From: Gautham R Shenoy In hot-cpu callback function workqueue_cpu_callback, lock the workqueue_mutex under CPU_LOCK_ACQUIRE and release it under CPU_LOCK_RELEASE. This eliminates handling of redundant events namely CPU_DOWN_PREPARE and CPU_DOWN_FAILED. Signed-off-by: Gautham R Shenoy Signed-off-by: Andrew Morton --- kernel/workqueue.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff -puN kernel/workqueue.c~handle-cpu_lock_acquire-and-cpu_lock_release-in-workqueue_cpu_callback kernel/workqueue.c --- a/kernel/workqueue.c~handle-cpu_lock_acquire-and-cpu_lock_release-in-workqueue_cpu_callback +++ a/kernel/workqueue.c @@ -658,8 +658,11 @@ static int __devinit workqueue_cpu_callb struct workqueue_struct *wq; switch (action) { - case CPU_UP_PREPARE: + case CPU_LOCK_ACQUIRE: mutex_lock(&workqueue_mutex); + break; + + case CPU_UP_PREPARE: /* Create a new workqueue thread for it. */ list_for_each_entry(wq, &workqueues, list) { if (!create_workqueue_thread(wq, hotcpu, 0)) { @@ -678,7 +681,6 @@ static int __devinit workqueue_cpu_callb kthread_bind(cwq->thread, hotcpu); wake_up_process(cwq->thread); } - mutex_unlock(&workqueue_mutex); break; case CPU_UP_CANCELED: @@ -690,15 +692,6 @@ static int __devinit workqueue_cpu_callb any_online_cpu(cpu_online_map)); cleanup_workqueue_thread(wq, hotcpu); } - mutex_unlock(&workqueue_mutex); - break; - - case CPU_DOWN_PREPARE: - mutex_lock(&workqueue_mutex); - break; - - case CPU_DOWN_FAILED: - mutex_unlock(&workqueue_mutex); break; case CPU_DEAD: @@ -706,6 +699,9 @@ static int __devinit workqueue_cpu_callb cleanup_workqueue_thread(wq, hotcpu); list_for_each_entry(wq, &workqueues, list) take_over_work(wq, hotcpu); + break; + + case CPU_LOCK_RELEASE: mutex_unlock(&workqueue_mutex); break; } _