Implement flushing directly from the cpu that is owning the active slab instead of remote flushing (which may be subject to races). Signed-off-by: Christoph Lameter Index: linux-2.6.18-rc5-mm1/mm/slabifier.c =================================================================== --- linux-2.6.18-rc5-mm1.orig/mm/slabifier.c 2006-09-01 15:20:51.915297648 -0700 +++ linux-2.6.18-rc5-mm1/mm/slabifier.c 2006-09-02 13:27:24.799157503 -0700 @@ -22,7 +22,8 @@ struct slab { struct slab_cache sc; #ifdef CONFIG_SMP - int flusher_active; + struct mutex flushing; + atomic_t active_cpus; /* if >0 then flusher is scheduled */ struct work_struct flush; #endif atomic_t refcount; /* Refcount for destroy */ @@ -346,66 +347,79 @@ static void __always_inline deactivate_s } /* - * Deactivate slab if we have an active slab. + * Flush active slab + * Called from IPI handler with interrupts disabled. */ -static void flush_active(struct slab *s, int cpu) +static void flush_active(void *d) { - struct page *page; - unsigned long flags; + struct slab *s = d; + int cpu = smp_processor_id(); + struct page *page = s->active[cpu]; - local_irq_save(flags); page = s->active[cpu]; if (likely(page)) { slab_lock(page); deactivate_slab(s, page, cpu); } - local_irq_restore(flags); } #ifdef CONFIG_SMP /* - * Check active per cpu slabs and flush them if they are not in use. + * Called from IPI during flushing to check and flush active slabs. */ -void flusher(void *d) +void check_flush_active(void *d) { struct slab *s = d; int cpu = smp_processor_id(); - struct page *page; - int nr_active = 0; - - for_each_online_cpu(cpu) { + struct page *page = s->active[cpu]; - page = s->active[cpu]; - if (!page) - continue; + if (!page) + return; - if (PageReferenced(page)) { - ClearPageReferenced(page); - nr_active++; - } else - flush_active(s, cpu); + if (PageReferenced(page)) { + ClearPageReferenced(page); + atomic_inc(&s->active_cpus); + } else { + slab_lock(page); + deactivate_slab(s, page, cpu); } - if (nr_active) +} + +/* + * Called from eventd + */ +void flusher(void *d) +{ + struct slab *s = d; + + if (!mutex_trylock(&s->flushing)) + return; + + atomic_set(&s->active_cpus, num_online_cpus()); + on_each_cpu(check_flush_active, s, 1, 1); + if (atomic_read(&s->active_cpus)) schedule_delayed_work(&s->flush, 2 * HZ); - else - s->flusher_active = 0; + mutex_unlock(&s->flushing); } static void drain_all(struct slab *s) { - int cpu; - - if (s->flusher_active) { + if (atomic_read(&s->active_cpus)) { + mutex_lock(&s->flushing); cancel_delayed_work(&s->flush); - for_each_possible_cpu(cpu) - flush_active(s, cpu); - s->flusher_active = 0; + atomic_set(&s->active_cpus, 0); + on_each_cpu(flush_active, s, 1, 1); + mutex_unlock(&s->flushing); } } #else static void drain_all(struct slab *s) { + unsigned long flags;l + + local_irq_save(flags); flush_active(s, 0); + local_irq_restore(flags); } #endif @@ -490,8 +504,8 @@ gotpage: check_free_chain(s, page); #ifdef CONFIG_SMP - if (keventd_up() && !s->flusher_active) { - s->flusher_active = 1; + if (keventd_up() && !atomic_read(&s->active_cpus)) { + atomic_inc(&s->active_cpus); schedule_delayed_work(&s->flush, 2 * HZ); } #endif @@ -680,7 +694,7 @@ static struct slab_cache *slab_create(st atomic_long_set(&s->nr_slabs, 0); s->nr_partial = 0; #ifdef CONFIG_SMP - s->flusher_active = 0; + atomic_set(&s->active_cpus, 0); INIT_WORK(&s->flush, &flusher, s); #endif if (!s->objects) @@ -690,7 +704,7 @@ static struct slab_cache *slab_create(st atomic_set(&s->refcount, 1); spin_lock_init(&s->list_lock); - + mutex_init(&s->flushing); for_each_possible_cpu(cpu) s->active[cpu] = NULL; return &s->sc;