Slight optimization in off node kfree. Do not completely throw away the whole alien list. Keep a piece. Index: linux-2.6.16-rc5-mm2/mm/slab.c =================================================================== --- linux-2.6.16-rc5-mm2.orig/mm/slab.c 2006-03-03 17:49:25.000000000 -0800 +++ linux-2.6.16-rc5-mm2/mm/slab.c 2006-03-03 18:16:04.000000000 -0800 @@ -956,12 +956,6 @@ static void drain_array(struct kmem_cach struct kmem_list3 *l3, struct array_cache *ac, int force, int node); -static void __drain_alien_cache(struct kmem_cache *cachep, - struct array_cache *ac, int node) -{ - drain_array(cachep, cachep->nodelists[node], ac, 1, node); -} - /* * Called from cache_reap() to regularly drain alien caches round robin. */ @@ -973,7 +967,8 @@ static void reap_alien(struct kmem_cache struct array_cache *ac = l3->alien[node]; if (ac && ac->avail) { spin_lock_irq(&ac->lock); - __drain_alien_cache(cachep, ac, node); + drain_array(cachep, cachep->nodelists[node], + ac, 1, node); spin_unlock_irq(&ac->lock); } } @@ -990,7 +985,7 @@ static void drain_alien_cache(struct kme ac = alien[i]; if (ac) { spin_lock_irqsave(&ac->lock, flags); - __drain_alien_cache(cachep, ac, i); + drain_array(cachep, cachep->nodelists[i], ac, 1, i); spin_unlock_irqrestore(&ac->lock, flags); } } @@ -3067,8 +3062,8 @@ static inline void __cache_free(struct k alien = l3->alien[nodeid]; spin_lock(&alien->lock); if (unlikely(alien->avail == alien->limit)) - __drain_alien_cache(cachep, - alien, nodeid); + drain_array(cachep, + l3, alien, 0, nodeid); alien->entry[alien->avail++] = objp; spin_unlock(&alien->lock); } else