Index: linux-2.6.16-rc4-mm2/mm/slab.c =================================================================== --- linux-2.6.16-rc4-mm2.orig/mm/slab.c 2006-02-24 10:33:54.000000000 -0800 +++ linux-2.6.16-rc4-mm2/mm/slab.c 2006-02-28 19:41:09.000000000 -0800 @@ -183,6 +183,12 @@ #endif /* + * Maximum size of an alien arraycache. Alien arraycaches are used when + * an object located on a remote node is freed. + */ +#define MAX_ALIEN 12 + +/* * kmem_bufctl_t: * * Bufctl's are used for linking objs within a slab @@ -911,7 +917,7 @@ static struct array_cache **alloc_alien_ int i; if (limit > 1) - limit = 12; + limit = MAX_ALIEN; ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node); if (ac_ptr) { for_each_node(i) { @@ -948,10 +954,46 @@ static void __drain_alien_cache(struct k struct kmem_list3 *rl3 = cachep->nodelists[node]; if (ac->avail) { - spin_lock(&rl3->list_lock); - free_block(cachep, ac->entry, ac->avail, node); + void *aliens[MAX_ALIEN]; + int nr = ac->avail; + struct array_cache *shared; + + /* + * Copy the alien cache so that interrupts can be + * reenabled. We are going to access off node + * items following the unlock and would not want + * those delays with interrupts disabled. + */ + BUG_ON(nr > MAX_ALIEN); + memcpy(aliens, ac->entry, ac->avail * sizeof(void *)); ac->avail = 0; + spin_unlock_irq(&ac->lock); + + spin_lock(&rl3->list_lock); + /* + * Try to shift alien objects into the shared arraycache + * of the remote node. + */ + shared = rl3->shared; + while (nr && shared->avail < shared->limit) { + nr--; + shared->entry[shared->avail] = aliens[nr]; + shared->avail++; + } + + /* + * Leftovers will still require free_block but + * since it is remote we can do this without + * disabling interrupts. + */ + if (nr) + free_block(cachep, aliens, nr, node); spin_unlock(&rl3->list_lock); + + /* + * Reacquire the earlier lock and redisable interrupts + */ + spin_lock_irq(&ac->lock); } } @@ -3063,10 +3105,10 @@ static inline void __cache_free(struct k if (l3->alien && l3->alien[nodeid]) { alien = l3->alien[nodeid]; spin_lock(&alien->lock); + alien->entry[alien->avail++] = objp; if (unlikely(alien->avail == alien->limit)) __drain_alien_cache(cachep, alien, nodeid); - alien->entry[alien->avail++] = objp; spin_unlock(&alien->lock); } else { spin_lock(&(cachep->nodelists[nodeid])->