From: Ingo Molnar Teach special (recursive) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton --- kernel/sched.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff -puN kernel/sched.c~lock-validator-special-locking-schedc kernel/sched.c --- a/kernel/sched.c~lock-validator-special-locking-schedc +++ a/kernel/sched.c @@ -1927,7 +1927,7 @@ static void double_rq_unlock(runqueue_t __releases(rq1->lock) __releases(rq2->lock) { - spin_unlock(&rq1->lock); + spin_unlock_non_nested(&rq1->lock); if (rq1 != rq2) spin_unlock(&rq2->lock); else @@ -2641,7 +2641,7 @@ static int load_balance_newidle(int this nr_moved = move_tasks(this_rq, this_cpu, busiest, minus_1_or_zero(busiest->nr_running), imbalance, sd, NEWLY_IDLE, NULL); - spin_unlock(&busiest->lock); + spin_unlock_non_nested(&busiest->lock); } if (!nr_moved) { @@ -2727,7 +2727,7 @@ static void active_load_balance(runqueue else schedstat_inc(sd, alb_failed); out: - spin_unlock(&target_rq->lock); + spin_unlock_non_nested(&target_rq->lock); } /* @@ -6798,7 +6798,7 @@ void __init sched_init(void) prio_array_t *array; rq = cpu_rq(i); - spin_lock_init(&rq->lock); + spin_lock_init_static(&rq->lock); rq->nr_running = 0; rq->active = rq->arrays; rq->expired = rq->arrays + 1; _