Index: linux-2.6.19-rc3/kernel/sched.c =================================================================== --- linux-2.6.19-rc3.orig/kernel/sched.c 2006-10-31 17:12:34.708109060 -0600 +++ linux-2.6.19-rc3/kernel/sched.c 2006-10-31 17:12:47.341034439 -0600 @@ -1932,6 +1932,24 @@ task_hot(struct task_struct *p, unsigned } /* + * double_rq_trylock - safely lock two runqueues + * + * Note this does not disable interrupts like task_rq_lock, + * you need to do so manually before calling. + */ +static int double_rq_trylock(struct rq *rq1, struct rq *rq2) + __acquires(rq1->lock) + __acquires(rq2->lock) +{ + if (spin_trylock(&rq1->lock)) { + if (rq1 == rq2 || spin_trylock(&rq2->lock)) + return 1; + spin_unlock(&rq1->lock); + } + return 0; +} + +/* * double_rq_lock - safely lock two runqueues * * Note this does not disable interrupts like task_rq_lock, @@ -2579,11 +2597,12 @@ redo: * still unbalanced. nr_moved simply stays zero, so it is * correctly treated as an imbalance. */ - double_rq_lock(this_rq, busiest); - nr_moved = move_tasks(this_rq, this_cpu, busiest, + if (double_rq_trylock(this_rq, busiest)) { + nr_moved = move_tasks(this_rq, this_cpu, busiest, minus_1_or_zero(busiest->nr_running), imbalance, sd, idle, &all_pinned); - double_rq_unlock(this_rq, busiest); + double_rq_unlock(this_rq, busiest); + } /* All tasks on this runqueue were pinned by CPU affinity */ if (unlikely(all_pinned)) {