[patch 40/61] lock validator: special locking: futex

From: Ingo Molnar
Date: Mon May 29 2006 - 17:35:11 EST


From: Ingo Molnar <mingo@xxxxxxx>

teach special (recursive) locking code to the lock validator. Has no
effect on non-lockdep kernels.

Signed-off-by: Ingo Molnar <mingo@xxxxxxx>
Signed-off-by: Arjan van de Ven <arjan@xxxxxxxxxxxxxxx>
---
kernel/futex.c | 44 ++++++++++++++++++++++++++------------------
1 file changed, 26 insertions(+), 18 deletions(-)

Index: linux/kernel/futex.c
===================================================================
--- linux.orig/kernel/futex.c
+++ linux/kernel/futex.c
@@ -604,6 +604,22 @@ static int unlock_futex_pi(u32 __user *u
}

/*
+ * Express the locking dependencies for lockdep:
+ */
+static inline void
+double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
+{
+ if (hb1 <= hb2) {
+ spin_lock(&hb1->lock);
+ if (hb1 < hb2)
+ spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
+ } else { /* hb1 > hb2 */
+ spin_lock(&hb2->lock);
+ spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
+ }
+}
+
+/*
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
*/
@@ -669,19 +685,15 @@ retryfull:
hb2 = hash_futex(&key2);

retry:
- if (hb1 < hb2)
- spin_lock(&hb1->lock);
- spin_lock(&hb2->lock);
- if (hb1 > hb2)
- spin_lock(&hb1->lock);
+ double_lock_hb(hb1, hb2);

op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
u32 dummy;

- spin_unlock(&hb1->lock);
+ spin_unlock_non_nested(&hb1->lock);
if (hb1 != hb2)
- spin_unlock(&hb2->lock);
+ spin_unlock_non_nested(&hb2->lock);

#ifndef CONFIG_MMU
/*
@@ -748,9 +760,9 @@ retry:
ret += op_ret;
}

- spin_unlock(&hb1->lock);
+ spin_unlock_non_nested(&hb1->lock);
if (hb1 != hb2)
- spin_unlock(&hb2->lock);
+ spin_unlock_non_nested(&hb2->lock);
out:
up_read(&current->mm->mmap_sem);
return ret;
@@ -782,11 +794,7 @@ static int futex_requeue(u32 __user *uad
hb1 = hash_futex(&key1);
hb2 = hash_futex(&key2);

- if (hb1 < hb2)
- spin_lock(&hb1->lock);
- spin_lock(&hb2->lock);
- if (hb1 > hb2)
- spin_lock(&hb1->lock);
+ double_lock_hb(hb1, hb2);

if (likely(cmpval != NULL)) {
u32 curval;
@@ -794,9 +802,9 @@ static int futex_requeue(u32 __user *uad
ret = get_futex_value_locked(&curval, uaddr1);

if (unlikely(ret)) {
- spin_unlock(&hb1->lock);
+ spin_unlock_non_nested(&hb1->lock);
if (hb1 != hb2)
- spin_unlock(&hb2->lock);
+ spin_unlock_non_nested(&hb2->lock);

/*
* If we would have faulted, release mmap_sem, fault
@@ -842,9 +850,9 @@ static int futex_requeue(u32 __user *uad
}

out_unlock:
- spin_unlock(&hb1->lock);
+ spin_unlock_non_nested(&hb1->lock);
if (hb1 != hb2)
- spin_unlock(&hb2->lock);
+ spin_unlock_non_nested(&hb2->lock);

/* drop_key_refs() must be called outside the spinlocks. */
while (--drop_count >= 0)
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/