[PATCH 25/32] softirq: Prepare for mixing all/per-vector masking

From: Frederic Weisbecker
Date: Tue Feb 12 2019 - 12:15:55 EST


In order to be able to mix and nest full and per vector softirq masking,
we need to be able to track the nesting state using a "full masking"
counter and a mask of "individual disabled vectors".

Start with introducing the full masking counter. For now it's a simple
mirror of softirq_count() because there is no per vector masking API
yet.

When this full masking counter is non 0, all softirq vectors are
explicitly disabled.

Suggested-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Frederic Weisbecker <frederic@xxxxxxxxxx>
Cc: Mauro Carvalho Chehab <mchehab@xxxxxxxxxxxxxxxx>
Cc: Joel Fernandes <joel@xxxxxxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Pavan Kondeti <pkondeti@xxxxxxxxxxxxxx>
Cc: Paul E . McKenney <paulmck@xxxxxxxxxxxxxxxxxx>
Cc: David S . Miller <davem@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
---
kernel/softirq.c | 33 ++++++++++++++++++++++-----------
1 file changed, 22 insertions(+), 11 deletions(-)

diff --git a/kernel/softirq.c b/kernel/softirq.c
index 91dee716e139..4477a03afd94 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -57,6 +57,12 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp

DEFINE_PER_CPU(struct task_struct *, ksoftirqd);

+struct softirq_nesting {
+ unsigned int disabled_all;
+};
+
+static DEFINE_PER_CPU(struct softirq_nesting, softirq_nesting);
+
const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
@@ -118,11 +124,11 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
* call the trace_preempt_off later.
*/
__preempt_count_add(cnt);
- /*
- * Were softirqs turned off above:
- */
- if (softirq_count() == (cnt & SOFTIRQ_MASK))
+
+ if (__this_cpu_inc_return(softirq_nesting.disabled_all) == 1) {
+ softirq_enabled_clear_mask(SOFTIRQ_ALL_MASK);
trace_softirqs_off(ip);
+ }

#ifdef CONFIG_TRACE_IRQFLAGS
raw_local_irq_restore(flags);
@@ -137,6 +143,15 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
}
EXPORT_SYMBOL(__local_bh_disable_ip);

+static void local_bh_enable_common(unsigned long ip, unsigned int cnt)
+{
+ if (__this_cpu_dec_return(softirq_nesting.disabled_all))
+ return;
+
+ softirq_enabled_set(SOFTIRQ_ALL_MASK);
+ trace_softirqs_on(ip);
+}
+
static void __local_bh_enable_no_softirq(unsigned int cnt)
{
lockdep_assert_irqs_disabled();
@@ -144,8 +159,7 @@ static void __local_bh_enable_no_softirq(unsigned int cnt)
if (preempt_count() == cnt)
trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());

- if (softirq_count() == (cnt & SOFTIRQ_MASK))
- trace_softirqs_on(_RET_IP_);
+ local_bh_enable_common(_RET_IP_, cnt);

__preempt_count_sub(cnt);
}
@@ -168,11 +182,8 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
#ifdef CONFIG_TRACE_IRQFLAGS
local_irq_disable();
#endif
- /*
- * Are softirqs going to be turned on now:
- */
- if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
- trace_softirqs_on(ip);
+ local_bh_enable_common(ip, cnt);
+
/*
* Keep preemption disabled until we are done with
* softirq processing:
--
2.17.1