From: Ingo Molnar Teach special (recursive) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton --- kernel/sched.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff -puN kernel/sched.c~lock-validator-special-locking-schedc kernel/sched.c --- devel/kernel/sched.c~lock-validator-special-locking-schedc 2006-05-29 18:13:22.000000000 -0700 +++ devel-akpm/kernel/sched.c 2006-05-29 18:13:22.000000000 -0700 @@ -1920,7 +1920,7 @@ static void double_rq_unlock(runqueue_t __releases(rq1->lock) __releases(rq2->lock) { - spin_unlock(&rq1->lock); + spin_unlock_non_nested(&rq1->lock); if (rq1 != rq2) spin_unlock(&rq2->lock); else @@ -1937,7 +1937,7 @@ static void double_lock_balance(runqueue { if (unlikely(!spin_trylock(&busiest->lock))) { if (busiest->cpu < this_rq->cpu) { - spin_unlock(&this_rq->lock); + spin_unlock_non_nested(&this_rq->lock); spin_lock(&busiest->lock); spin_lock(&this_rq->lock); } else @@ -2559,7 +2559,7 @@ static int load_balance_newidle(int this nr_moved = move_tasks(this_rq, this_cpu, busiest, minus_1_or_zero(busiest->nr_running), imbalance, sd, NEWLY_IDLE, NULL); - spin_unlock(&busiest->lock); + spin_unlock_non_nested(&busiest->lock); } if (!nr_moved) { @@ -2644,7 +2644,7 @@ static void active_load_balance(runqueue else schedstat_inc(sd, alb_failed); out: - spin_unlock(&target_rq->lock); + spin_unlock_non_nested(&target_rq->lock); } /* @@ -3005,7 +3005,7 @@ static void wake_sleeping_dependent(int } for_each_cpu_mask(i, sibling_map) - spin_unlock(&cpu_rq(i)->lock); + spin_unlock_non_nested(&cpu_rq(i)->lock); /* * We exit with this_cpu's rq still held and IRQs * still disabled: @@ -3041,7 +3041,7 @@ static int dependent_sleeper(int this_cp * The same locking rules and details apply as for * wake_sleeping_dependent(): */ - spin_unlock(&this_rq->lock); + spin_unlock_non_nested(&this_rq->lock); sibling_map = sd->span; for_each_cpu_mask(i, sibling_map) spin_lock(&cpu_rq(i)->lock); @@ -3119,7 +3119,7 @@ check_smt_task: } out_unlock: for_each_cpu_mask(i, sibling_map) - spin_unlock(&cpu_rq(i)->lock); + spin_unlock_non_nested(&cpu_rq(i)->lock); return ret; } #else @@ -6651,7 +6651,7 @@ void __init sched_init(void) prio_array_t *array; rq = cpu_rq(i); - spin_lock_init(&rq->lock); + spin_lock_init_static(&rq->lock); rq->nr_running = 0; rq->active = rq->arrays; rq->expired = rq->arrays + 1; _