[PATCH 29/32] locking/lockdep: Branch the new vec-finegrained softirq masking to lockdep

From: Frederic Weisbecker
Date: Tue Feb 12 2019 - 12:16:09 EST


Now that we have full support from softirqs to perform per vector
masking, let's feed lockdep with the proper inputs and push the vector
numbers involved in a base softirq lock usage:

LOCK_ENABLED_SOFTIRQ: push local_softirq_enabled()
LOCK_USED_IN_SOFTIRQ: push curr->softirq_context, modified by
lockdep_softirq_enter/exit()

Signed-off-by: Frederic Weisbecker <frederic@xxxxxxxxxx>
Cc: Mauro Carvalho Chehab <mchehab@xxxxxxxxxxxxxxxx>
Cc: Joel Fernandes <joel@xxxxxxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Pavan Kondeti <pkondeti@xxxxxxxxxxxxxx>
Cc: Paul E . McKenney <paulmck@xxxxxxxxxxxxxxxxxx>
Cc: David S . Miller <davem@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
---
kernel/locking/lockdep.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index acd82145f6a6..570eea5376ec 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2877,6 +2877,7 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
*/
if (curr->softirqs_enabled) {
usage.bit = LOCK_ENABLED_SOFTIRQ;
+ usage.vector = local_softirq_enabled();
if (!mark_held_locks(curr, &usage))
return;
}
@@ -2964,6 +2965,7 @@ void trace_softirqs_on(unsigned long ip)
struct task_struct *curr = current;
struct lock_usage usage = {
.bit = LOCK_ENABLED_SOFTIRQ,
+ .vector = local_softirq_enabled()
};

if (unlikely(!debug_locks || current->lockdep_recursion))
@@ -3028,7 +3030,7 @@ void trace_softirqs_off(unsigned long ip)

static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
{
- struct lock_usage usage = { .vector = 0 };
+ struct lock_usage usage;
/*
* If non-trylock use in a hardirq or softirq context, then
* mark the lock as used in these contexts:
@@ -3037,22 +3039,26 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
if (hlock->read) {
if (curr->hardirq_context) {
usage.bit = LOCK_USED_IN_HARDIRQ_READ;
+ usage.vector = 0;
if (!mark_lock(curr, hlock, &usage))
return 0;
}
if (curr->softirq_context) {
usage.bit = LOCK_USED_IN_SOFTIRQ_READ;
+ usage.vector = curr->softirq_context;
if (!mark_lock(curr, hlock, &usage))
return 0;
}
} else {
if (curr->hardirq_context) {
usage.bit = LOCK_USED_IN_HARDIRQ;
+ usage.vector = 0;
if (!mark_lock(curr, hlock, &usage))
return 0;
}
if (curr->softirq_context) {
usage.bit = LOCK_USED_IN_SOFTIRQ;
+ usage.vector = curr->softirq_context;
if (!mark_lock(curr, hlock, &usage))
return 0;
}
@@ -3061,19 +3067,23 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
if (!hlock->hardirqs_off) {
if (hlock->read) {
usage.bit = LOCK_ENABLED_HARDIRQ_READ;
+ usage.vector = 0;
if (!mark_lock(curr, hlock, &usage))
return 0;
if (curr->softirqs_enabled) {
usage.bit = LOCK_ENABLED_SOFTIRQ_READ;
+ usage.vector = local_softirq_enabled();
if (!mark_lock(curr, hlock, &usage))
return 0;
}
} else {
usage.bit = LOCK_ENABLED_HARDIRQ;
+ usage.vector = 0;
if (!mark_lock(curr, hlock, &usage))
return 0;
if (curr->softirqs_enabled) {
usage.bit = LOCK_ENABLED_SOFTIRQ;
+ usage.vector = local_softirq_enabled();
if (!mark_lock(curr, hlock, &usage))
return 0;
}
--
2.17.1