[patch 5/6] rtmutex: Clarify the lock chain walk

From: Thomas Gleixner
Date: Wed May 21 2014 - 23:25:50 EST


Add a separate local variable for the boost/deboost logic to make the
code more readable. Add comments where appropriate.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
kernel/locking/rtmutex.c | 50 +++++++++++++++++++++++++++++++++++++++--------
1 file changed, 42 insertions(+), 8 deletions(-)

Index: tip/kernel/locking/rtmutex.c
===================================================================
--- tip.orig/kernel/locking/rtmutex.c
+++ tip/kernel/locking/rtmutex.c
@@ -302,9 +302,10 @@ static int rt_mutex_adjust_prio_chain(st
struct rt_mutex_waiter *orig_waiter,
struct task_struct *top_task)
{
- struct rt_mutex *lock;
struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
+ struct rt_mutex_waiter *prerequeue_top_waiter;
int detect_deadlock, ret = 0, depth = 0;
+ struct rt_mutex *lock;
unsigned long flags;

detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter,
@@ -379,6 +380,11 @@ static int rt_mutex_adjust_prio_chain(st
}

lock = waiter->lock;
+ /*
+ * We need to trylock here as we are holding task->pi_lock,
+ * which is the reverse lock order versus the other rtmutex
+ * operations.
+ */
if (!raw_spin_trylock(&lock->wait_lock)) {
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
cpu_relax();
@@ -398,7 +404,11 @@ static int rt_mutex_adjust_prio_chain(st
goto out_unlock_pi;
}

- top_waiter = rt_mutex_top_waiter(lock);
+ /*
+ * Store the top waiter before doing any requeue operation. We
+ * need it for the boost/deboost decision below.
+ */
+ prerequeue_top_waiter = rt_mutex_top_waiter(lock);

/* Requeue the waiter */
rt_mutex_dequeue(lock, waiter);
@@ -407,13 +417,18 @@ static int rt_mutex_adjust_prio_chain(st

/* Release the task */
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ /*
+ * We must abort the chain walk if there is no lock owner even
+ * in the dead lock detection case, as we have nothing to
+ * follow here.
+ */
if (!rt_mutex_owner(lock)) {
/*
* If the requeue above changed the top waiter, then we need
* to wake the new top waiter up to try to get the lock.
*/
-
- if (top_waiter != rt_mutex_top_waiter(lock))
+ if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
wake_up_process(rt_mutex_top_waiter(lock)->task);
raw_spin_unlock(&lock->wait_lock);
goto out_put_task;
@@ -426,17 +441,31 @@ static int rt_mutex_adjust_prio_chain(st
raw_spin_lock_irqsave(&task->pi_lock, flags);

if (waiter == rt_mutex_top_waiter(lock)) {
- /* Boost the owner */
- rt_mutex_dequeue_pi(task, top_waiter);
+ /*
+ * The waiter became the top waiter on the
+ * lock. Remove the previous top waiter from the tasks
+ * pi waiters list and add waiter to it.
+ */
+ rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
rt_mutex_enqueue_pi(task, waiter);
__rt_mutex_adjust_prio(task);

- } else if (top_waiter == waiter) {
- /* Deboost the owner */
+ } else if (prerequeue_top_waiter == waiter) {
+ /*
+ * The waiter was the top waiter on the lock. Remove
+ * waiter from the tasks pi waiters list and add the
+ * new top waiter to it.
+ */
rt_mutex_dequeue_pi(task, waiter);
waiter = rt_mutex_top_waiter(lock);
rt_mutex_enqueue_pi(task, waiter);
__rt_mutex_adjust_prio(task);
+
+ } else {
+ /*
+ * Nothing changed. No need to do any priority
+ * adjustment.
+ */
}

raw_spin_unlock_irqrestore(&task->pi_lock, flags);
@@ -444,6 +473,11 @@ static int rt_mutex_adjust_prio_chain(st
top_waiter = rt_mutex_top_waiter(lock);
raw_spin_unlock(&lock->wait_lock);

+ /*
+ * If the current waiter is not the top waiter on the lock,
+ * then we can stop the chain walk here if we are not in full
+ * deadlock detection mode.
+ */
if (!detect_deadlock && waiter != top_waiter)
goto out_put_task;



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/