[PATCH v2 06/11] locking/ww_mutex: Notify waiters that have to back off while adding tasks to wait list

From: Nicolai HÃhnle
Date: Thu Dec 01 2016 - 09:07:24 EST


From: Nicolai HÃhnle <Nicolai.Haehnle@xxxxxxx>

While adding our task as a waiter, detect if another task should back off
because of us.

With this patch, we establish the invariant that the wait list contains
at most one (sleeping) waiter with ww_ctx->acquired > 0, and this waiter
will be the first waiter with a context.

Since only waiters with ww_ctx->acquired > 0 have to back off, this allows
us to be much more economical with wakeups.

v2: rebase on v2 of earlier patches

Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Maarten Lankhorst <dev@xxxxxxxxxxxxxx>
Cc: Daniel Vetter <daniel@xxxxxxxx>
Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Cc: dri-devel@xxxxxxxxxxxxxxxxxxxxx
Signed-off-by: Nicolai HÃhnle <Nicolai.Haehnle@xxxxxxx>
---
kernel/locking/mutex.c | 42 ++++++++++++++++++++++++++++++++----------
1 file changed, 32 insertions(+), 10 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index d63eebb..01e9438 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -609,23 +609,34 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
EXPORT_SYMBOL(ww_mutex_unlock);

static inline int __sched
-__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
+__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
+ struct ww_acquire_ctx *ctx)
{
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
+ struct mutex_waiter *cur;

- if (!hold_ctx)
- return 0;
+ if (hold_ctx && __ww_mutex_stamp_after(ctx, hold_ctx))
+ goto deadlock;

- if (__ww_mutex_stamp_after(ctx, hold_ctx)) {
-#ifdef CONFIG_DEBUG_MUTEXES
- DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
- ctx->contending_lock = ww;
-#endif
- return -EDEADLK;
+ /*
+ * If there is a waiter in front of us that has a context, then its
+ * stamp is earlier than ours and we must back off.
+ */
+ cur = waiter;
+ list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
+ if (cur->ww_ctx)
+ goto deadlock;
}

return 0;
+
+deadlock:
+#ifdef CONFIG_DEBUG_MUTEXES
+ DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
+ ctx->contending_lock = ww;
+#endif
+ return -EDEADLK;
}

static inline int __sched
@@ -666,6 +677,16 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
}

list_add_tail(&waiter->list, &cur->list);
+
+ /*
+ * Wake up the waiter so that it gets a chance to back
+ * off.
+ */
+ if (cur->ww_ctx->acquired > 0) {
+ debug_mutex_wake_waiter(lock, cur);
+ wake_up_process(cur->task);
+ }
+
return 0;
}

@@ -767,7 +788,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
}

if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
- ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
+ ret = __ww_mutex_lock_check_stamp(lock, &waiter,
+ ww_ctx);
if (ret)
goto err;
}
--
2.7.4