[patch 49/61] lock validator: special locking: sched.c

From: Ingo Molnar
Date: Mon May 29 2006 - 17:31:03 EST


From: Ingo Molnar <mingo@xxxxxxx>

teach special (recursive) locking code to the lock validator. Has no
effect on non-lockdep kernels.

Signed-off-by: Ingo Molnar <mingo@xxxxxxx>
Signed-off-by: Arjan van de Ven <arjan@xxxxxxxxxxxxxxx>
---
kernel/sched.c | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)

Index: linux/kernel/sched.c
===================================================================
--- linux.orig/kernel/sched.c
+++ linux/kernel/sched.c
@@ -1963,7 +1963,7 @@ static void double_rq_unlock(runqueue_t
__releases(rq1->lock)
__releases(rq2->lock)
{
- spin_unlock(&rq1->lock);
+ spin_unlock_non_nested(&rq1->lock);
if (rq1 != rq2)
spin_unlock(&rq2->lock);
else
@@ -1980,7 +1980,7 @@ static void double_lock_balance(runqueue
{
if (unlikely(!spin_trylock(&busiest->lock))) {
if (busiest->cpu < this_rq->cpu) {
- spin_unlock(&this_rq->lock);
+ spin_unlock_non_nested(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock(&this_rq->lock);
} else
@@ -2602,7 +2602,7 @@ static int load_balance_newidle(int this
nr_moved = move_tasks(this_rq, this_cpu, busiest,
minus_1_or_zero(busiest->nr_running),
imbalance, sd, NEWLY_IDLE, NULL);
- spin_unlock(&busiest->lock);
+ spin_unlock_non_nested(&busiest->lock);
}

if (!nr_moved) {
@@ -2687,7 +2687,7 @@ static void active_load_balance(runqueue
else
schedstat_inc(sd, alb_failed);
out:
- spin_unlock(&target_rq->lock);
+ spin_unlock_non_nested(&target_rq->lock);
}

/*
@@ -3032,7 +3032,7 @@ static void wake_sleeping_dependent(int
}

for_each_cpu_mask(i, sibling_map)
- spin_unlock(&cpu_rq(i)->lock);
+ spin_unlock_non_nested(&cpu_rq(i)->lock);
/*
* We exit with this_cpu's rq still held and IRQs
* still disabled:
@@ -3068,7 +3068,7 @@ static int dependent_sleeper(int this_cp
* The same locking rules and details apply as for
* wake_sleeping_dependent():
*/
- spin_unlock(&this_rq->lock);
+ spin_unlock_non_nested(&this_rq->lock);
sibling_map = sd->span;
for_each_cpu_mask(i, sibling_map)
spin_lock(&cpu_rq(i)->lock);
@@ -3146,7 +3146,7 @@ check_smt_task:
}
out_unlock:
for_each_cpu_mask(i, sibling_map)
- spin_unlock(&cpu_rq(i)->lock);
+ spin_unlock_non_nested(&cpu_rq(i)->lock);
return ret;
}
#else
@@ -6680,7 +6680,7 @@ void __init sched_init(void)
prio_array_t *array;

rq = cpu_rq(i);
- spin_lock_init(&rq->lock);
+ spin_lock_init_static(&rq->lock);
rq->nr_running = 0;
rq->active = rq->arrays;
rq->expired = rq->arrays + 1;
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/