From: Dipankar Sarma Fix rcu_barrier() to work properly in preemptive kernel environment. Also, the ordering of callback must be preserved while moving callbacks to another CPU during CPU hotplug. Signed-off-by: Dipankar Sarma Acked-by: Paul E. McKenney Signed-off-by: Andrew Morton --- kernel/rcuclassic.c | 2 +- kernel/rcupdate.c | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff -puN kernel/rcuclassic.c~rcu-fix-barriers kernel/rcuclassic.c --- a/kernel/rcuclassic.c~rcu-fix-barriers +++ a/kernel/rcuclassic.c @@ -350,9 +350,9 @@ static void __rcu_offline_cpu(struct rcu if (rcp->cur != rcp->completed) cpu_quiet(rdp->cpu, rcp); spin_unlock_bh(&rcp->lock); + rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); - rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); } static void rcu_offline_cpu(int cpu) diff -puN kernel/rcupdate.c~rcu-fix-barriers kernel/rcupdate.c --- a/kernel/rcupdate.c~rcu-fix-barriers +++ a/kernel/rcupdate.c @@ -117,7 +117,18 @@ void rcu_barrier(void) mutex_lock(&rcu_barrier_mutex); init_completion(&rcu_barrier_completion); atomic_set(&rcu_barrier_cpu_count, 0); + /* + * The queueing of callbacks in all CPUs must be + * atomic with respect to RCU, otherwise one cpu may + * queue a callback, wait for a grace period, decrement + * barrier count and call complete(), while other CPUs + * haven't yet queued anything. So, we need to make sure + * that no grace period happens until all the callbacks + * are queued. + */ + rcu_read_lock(); on_each_cpu(rcu_barrier_func, NULL, 0, 1); + rcu_read_unlock(); wait_for_completion(&rcu_barrier_completion); mutex_unlock(&rcu_barrier_mutex); } _