Index: linux-2.6.17-rc6-mm2/mm/slab.c =================================================================== --- linux-2.6.17-rc6-mm2.orig/mm/slab.c 2006-06-17 14:32:37.418007798 -0700 +++ linux-2.6.17-rc6-mm2/mm/slab.c 2006-06-17 17:11:27.217078762 -0700 @@ -222,7 +222,10 @@ struct slab { void *s_mem; /* including colour offset */ unsigned int inuse; /* num of objs active in slab */ kmem_bufctl_t free; - unsigned short nodeid; + short nodeid; /* The node number for the l3 list structure + * that this slab is a member off. -1 means + * not on any lists. + */ }; /* @@ -3103,20 +3106,21 @@ static void free_block(struct kmem_cache /* fixup slab chains */ if (slabp->inuse == 0) { - if (l3->free_objects > l3->free_limit) { - l3->free_objects -= cachep->num; - /* - * It is safe to drop the lock. The slab is - * no longer linked to the cache. cachep - * cannot disappear - we are using it and - * all destruction of caches must be - * serialized properly by the user. - */ - spin_unlock(&l3->list_lock); - slab_destroy(cachep, slabp); - spin_lock(&l3->list_lock); - } else { - list_add(&slabp->list, &l3->slabs_free); + if (slabp->nodeid >= 0) { + if (l3->free_objects > l3->free_limit) { + l3->free_objects -= cachep->num; + /* + * It is safe to drop the lock. The slab is + * no longer linked to the cache. cachep + * cannot disappear - we are using it and + * all destruction of caches must be + * serialized properly by the user. + */ + spin_unlock(&l3->list_lock); + slab_destroy(cachep, slabp); + spin_lock(&l3->list_lock); + } else + list_add(&slabp->list, &l3->slabs_free); } } else { /* Unconditionally move a slab to the end of the