[RFC PATCH 29/30] softirq: Make softirq processing softinterruptible

From: Frederic Weisbecker
Date: Wed Oct 10 2018 - 19:14:06 EST


From: Frederic Weisbecker <fweisbec@xxxxxxxxx>

Make do_softirq() re-entrant and allow a vector, being either processed
or disabled, to be interrupted by another vector. This way a vector
won't be able to monopolize the CPU for a long while at the expense of
the others that may rely on some predictable latency, especially on
softirq disabled sections that used to disable all vectors.

Signed-off-by: Frederic Weisbecker <fweisbec@xxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: David S. Miller <davem@xxxxxxxxxxxxx>
Cc: Mauro Carvalho Chehab <mchehab@xxxxxxxxxxxxxxxx>
Cc: Paul E. McKenney <paulmck@xxxxxxxxxxxxxxxxxx>
---
kernel/softirq.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/kernel/softirq.c b/kernel/softirq.c
index 457bf60..f4cb1ea 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -195,7 +195,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt, unsigned int bh)
if (cnt)
preempt_count_sub(cnt - 1);

- if (unlikely(!in_interrupt() && local_softirq_pending())) {
+ if (unlikely(!in_irq() && (local_softirq_pending() & local_softirq_enabled()))) {
/*
* Run softirq if any pending. And do it in its own stack
* as we may be calling this deep in a task call stack already.
@@ -387,7 +387,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current);
local_bh_exit();
- WARN_ON_ONCE(in_interrupt());
+ WARN_ON_ONCE(in_irq());
current_restore_flags(old_flags, PF_MEMALLOC);
}

@@ -396,12 +396,12 @@ asmlinkage __visible void do_softirq(void)
__u32 pending;
unsigned long flags;

- if (in_interrupt())
+ if (in_irq())
return;

local_irq_save(flags);

- pending = local_softirq_pending();
+ pending = local_softirq_pending() & local_softirq_enabled();

if (pending && !ksoftirqd_running(pending))
do_softirq_own_stack();
@@ -432,7 +432,7 @@ void irq_enter(void)

static inline void invoke_softirq(void)
{
- if (ksoftirqd_running(local_softirq_pending()))
+ if (ksoftirqd_running(local_softirq_pending() & local_softirq_enabled()))
return;

if (!force_irqthreads) {
@@ -481,7 +481,7 @@ void irq_exit(void)
#endif
account_irq_exit_time(current);
preempt_count_sub(HARDIRQ_OFFSET);
- if (!in_interrupt() && local_softirq_pending())
+ if (!in_irq() && (local_softirq_pending() & local_softirq_enabled()))
invoke_softirq();

tick_irq_exit();
@@ -712,13 +712,13 @@ void __init softirq_init(void)

static int ksoftirqd_should_run(unsigned int cpu)
{
- return local_softirq_pending();
+ return local_softirq_pending() & local_softirq_enabled();
}

static void run_ksoftirqd(unsigned int cpu)
{
local_irq_disable();
- if (local_softirq_pending()) {
+ if (local_softirq_pending() & local_softirq_enabled()) {
/*
* We can safely run softirq on inline stack, as we are not deep
* in the task stack here.
--
2.7.4