From: Christoph Lameter The list_lock also protects the shared array and we call drain_array() with the shared array. Therefore we cannot go as far as I wanted to but have to take the lock in a way so that it also protects the array_cache in drain_pages. (Note: maybe we should make the array_cache locking more consistent? I.e. always take the array cache lock for shared arrays and disable interrupts for the per cpu arrays?) Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- mm/slab.c | 21 ++++++++++++--------- 1 files changed, 12 insertions(+), 9 deletions(-) diff -puN mm/slab.c~slab-fix-drain_array-so-that-it-works-correctly-with-the-shared_array mm/slab.c --- devel/mm/slab.c~slab-fix-drain_array-so-that-it-works-correctly-with-the-shared_array 2006-03-11 02:46:25.000000000 -0800 +++ devel-akpm/mm/slab.c 2006-03-11 02:46:25.000000000 -0800 @@ -3521,7 +3521,8 @@ static void enable_cpucache(struct kmem_ /* * Drain an array if it contains any elements taking the l3 lock only if - * necessary. + * necessary. Note that the l3 listlock also protects the array_cache + * if drain_array() is used on the shared array. */ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, struct array_cache *ac, int force, int node) @@ -3532,16 +3533,18 @@ void drain_array(struct kmem_cache *cach return; if (ac->touched && !force) { ac->touched = 0; - } else if (ac->avail) { - tofree = force ? ac->avail : (ac->limit + 4) / 5; - if (tofree > ac->avail) - tofree = (ac->avail + 1) / 2; + } else { spin_lock_irq(&l3->list_lock); - free_block(cachep, ac->entry, tofree, node); + if (ac->avail) { + tofree = force ? ac->avail : (ac->limit + 4) / 5; + if (tofree > ac->avail) + tofree = (ac->avail + 1) / 2; + free_block(cachep, ac->entry, tofree, node); + ac->avail -= tofree; + memmove(ac->entry, &(ac->entry[tofree]), + sizeof(void *) * ac->avail); + } spin_unlock_irq(&l3->list_lock); - ac->avail -= tofree; - memmove(ac->entry, &(ac->entry[tofree]), - sizeof(void *) * ac->avail); } } _