Index: linux-2.6.18-rc4/mm/slab.c =================================================================== --- linux-2.6.18-rc4.orig/mm/slab.c 2006-08-08 18:48:18.652797400 -0700 +++ linux-2.6.18-rc4/mm/slab.c 2006-08-08 18:51:37.748889483 -0700 @@ -2646,7 +2646,6 @@ static int cache_grow(struct kmem_cache /* Make slab active. */ list_add_tail(&slabp->list, &(l3->slabs_free)); STATS_INC_GROWN(cachep); - l3->free_objects += cachep->num; spin_unlock(&l3->list_lock); return 1; opps1: @@ -2819,6 +2818,19 @@ static struct slab *get_slab(struct kmem return slab; } +/* + * Move slabp to correct list. + * Must be called under list_lock. + */ +static void put_slab(struct kmem_list3 *l3, struct slab *slab) +{ + l3->free_objects += slab->inuse; + if (slab->free == BUFCTL_END) + list_add(&slab->list, &l3->slabs_full); + else + list_add(&slab ->list, &l3->slabs_partial); +} + static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) { int batchcount; @@ -2862,12 +2874,7 @@ retry: numa_node_id()); } check_slabp(cachep, slabp); - - /* move slabp to correct slabp list: */ - if (slabp->free == BUFCTL_END) - list_add(&slabp->list, &l3->slabs_full); - else - list_add(&slabp->list, &l3->slabs_partial); + put_slab(l3, slabp); } alloc_done: @@ -3055,12 +3062,7 @@ retry: obj = slab_get_obj(cachep, slabp, nodeid); check_slabp(cachep, slabp); - /* move slabp to correct slabp list: */ - if (slabp->free == BUFCTL_END) - list_add(&slabp->list, &l3->slabs_full); - else - list_add(&slabp->list, &l3->slabs_partial); - + put_slab(l3, slabp); spin_unlock(&l3->list_lock); goto done;