[PATCH v5 08/19] locking/mutex: Split blocked_on logic into two states (blocked_on and blocked_on_waking)

From: John Stultz
Date: Sat Aug 19 2023 - 02:10:50 EST


This patch adds blocked_on_waking so we can track separately if
the task should be able to try to aquire the lock separately
from the lock it is blocked on.

This avoids some of the subtle magic where the blocked_on state
gets cleared, only to have it re-added by the
mutex_lock_slowpath call when it tries to aquire the lock on
wakeup

This should make dealing with the ww_mutex issue cleaner.

Cc: Joel Fernandes <joelaf@xxxxxxxxxx>
Cc: Qais Yousef <qyousef@xxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Juri Lelli <juri.lelli@xxxxxxxxxx>
Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
Cc: Dietmar Eggemann <dietmar.eggemann@xxxxxxx>
Cc: Valentin Schneider <vschneid@xxxxxxxxxx>
Cc: Steven Rostedt <rostedt@xxxxxxxxxxx>
Cc: Ben Segall <bsegall@xxxxxxxxxx>
Cc: Zimuzo Ezeozue <zezeozue@xxxxxxxxxx>
Cc: Youssef Esmat <youssefesmat@xxxxxxxxxx>
Cc: Mel Gorman <mgorman@xxxxxxx>
Cc: Daniel Bristot de Oliveira <bristot@xxxxxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Waiman Long <longman@xxxxxxxxxx>
Cc: Boqun Feng <boqun.feng@xxxxxxxxx>
Cc: "Paul E . McKenney" <paulmck@xxxxxxxxxx>
Cc: kernel-team@xxxxxxxxxxx
Signed-off-by: John Stultz <jstultz@xxxxxxxxxx>
---
include/linux/sched.h | 2 ++
kernel/fork.c | 1 +
kernel/locking/mutex.c | 7 ++++---
kernel/locking/ww_mutex.h | 12 ++++++------
kernel/sched/sched.h | 12 ++++++++++++
5 files changed, 25 insertions(+), 9 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0f32bea47e5e..3b7f26df2496 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1141,6 +1141,7 @@ struct task_struct {
#endif

struct mutex *blocked_on; /* lock we're blocked on */
+ bool blocked_on_waking; /* blocked on, but waking */
raw_spinlock_t blocked_lock;

#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
@@ -2241,6 +2242,7 @@ static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
WARN_ON((!m && !p->blocked_on) || (m && p->blocked_on));

p->blocked_on = m;
+ p->blocked_on_waking = false;
}

static inline struct mutex *get_task_blocked_on(struct task_struct *p)
diff --git a/kernel/fork.c b/kernel/fork.c
index 8bad899b6c6e..5b11ead90b12 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2460,6 +2460,7 @@ __latent_entropy struct task_struct *copy_process(
#endif

p->blocked_on = NULL; /* not blocked yet */
+ p->blocked_on_waking = false; /* not blocked yet */

#ifdef CONFIG_BCACHE
p->sequential_io = 0;
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 04b0ea45cc01..687009eca2d1 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -666,10 +666,11 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas

raw_spin_lock_irqsave(&lock->wait_lock, flags);
raw_spin_lock(&current->blocked_lock);
+
/*
- * Gets reset by unlock path().
+ * Clear blocked_on_waking flag set by the unlock path().
*/
- set_task_blocked_on(current, lock);
+ current->blocked_on_waking = false;
set_current_state(state);
/*
* Here we order against unlock; we must either see it change
@@ -948,7 +949,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
debug_mutex_wake_waiter(lock, waiter);
raw_spin_lock(&next->blocked_lock);
WARN_ON(next->blocked_on != lock);
- set_task_blocked_on(current, NULL);
+ next->blocked_on_waking = true;
raw_spin_unlock(&next->blocked_lock);
wake_q_add(&wake_q, next);
}
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index 44a532dda927..3b0a68d7e308 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -287,12 +287,12 @@ __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
debug_mutex_wake_waiter(lock, waiter);
#endif
/*
- * When waking up the task to die, be sure to clear the
- * blocked_on pointer. Otherwise we can see circular
+ * When waking up the task to die, be sure to set the
+ * blocked_on_waking flag. Otherwise we can see circular
* blocked_on relationships that can't resolve.
*/
WARN_ON(waiter->task->blocked_on != lock);
- set_task_blocked_on(waiter->task, NULL);
+ waiter->task->blocked_on_waking = true;
wake_q_add(wake_q, waiter->task);
raw_spin_unlock(&waiter->task->blocked_lock);
}
@@ -345,11 +345,11 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
/* nested as we should hold current->blocked_lock already */
raw_spin_lock_nested(&owner->blocked_lock, SINGLE_DEPTH_NESTING);
/*
- * When waking up the task to wound, be sure to clear the
- * blocked_on pointer. Otherwise we can see circular
+ * When waking up the task to wound, be sure to set the
+ * blocked_on_waking flag. Otherwise we can see circular
* blocked_on relationships that can't resolve.
*/
- set_task_blocked_on(owner, NULL);
+ owner->blocked_on_waking = true;
wake_q_add(wake_q, owner);
raw_spin_unlock(&owner->blocked_lock);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 33ad47a093ae..95900ccaaf82 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2111,6 +2111,18 @@ static inline int task_current(struct rq *rq, struct task_struct *p)
return rq->curr == p;
}

+#ifdef CONFIG_PROXY_EXEC
+static inline bool task_is_blocked(struct task_struct *p)
+{
+ return !!p->blocked_on && !p->blocked_on_waking;
+}
+#else /* !PROXY_EXEC */
+static inline bool task_is_blocked(struct task_struct *p)
+{
+ return false;
+}
+#endif /* PROXY_EXEC */
+
static inline int task_on_cpu(struct rq *rq, struct task_struct *p)
{
#ifdef CONFIG_SMP
--
2.42.0.rc1.204.g551eb34607-goog