Index: linux-2.6.18-rc4/mm/slab.c =================================================================== --- linux-2.6.18-rc4.orig/mm/slab.c 2006-08-08 16:31:05.730000310 -0700 +++ linux-2.6.18-rc4/mm/slab.c 2006-08-08 18:48:18.652797400 -0700 @@ -2797,6 +2797,28 @@ bad: #define check_slabp(x,y) do { } while(0) #endif +/* + * Get a slab from the indicated cache. + * We hold list_lock when called. + */ +static struct slab *get_slab(struct kmem_list3 *l3) +{ + struct list_head *entry; + struct slab *slab; + + entry = l3->slabs_partial.next; + if (entry == &l3->slabs_partial) { + entry = l3->slabs_free.next; + if (entry == &l3->slabs_free) + return NULL; + } + l3->free_touched = 1; + slab = list_entry(entry, struct slab, list); + list_del(&slab->list); + l3->free_objects -= slab->inuse; + return slab; +} + static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) { int batchcount; @@ -2825,18 +2847,10 @@ retry: goto alloc_done; while (batchcount > 0) { - struct list_head *entry; - struct slab *slabp; - /* Get slab alloc is to come from. */ - entry = l3->slabs_partial.next; - if (entry == &l3->slabs_partial) { - l3->free_touched = 1; - entry = l3->slabs_free.next; - if (entry == &l3->slabs_free) - goto must_grow; - } + struct slab *slabp = get_slab(l3); - slabp = list_entry(entry, struct slab, list); + if (!slabp) + goto alloc_done; check_slabp(cachep, slabp); check_spinlock_acquired(cachep); while (slabp->inuse < cachep->num && batchcount--) { @@ -2850,15 +2864,12 @@ retry: check_slabp(cachep, slabp); /* move slabp to correct slabp list: */ - list_del(&slabp->list); if (slabp->free == BUFCTL_END) list_add(&slabp->list, &l3->slabs_full); else list_add(&slabp->list, &l3->slabs_partial); } -must_grow: - l3->free_objects -= ac->avail; alloc_done: spin_unlock(&l3->list_lock); @@ -3018,7 +3029,6 @@ static void *alternate_node_alloc(struct static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { - struct list_head *entry; struct slab *slabp; struct kmem_list3 *l3; void *obj; @@ -3030,15 +3040,10 @@ static void *__cache_alloc_node(struct k retry: check_irq_off(); spin_lock(&l3->list_lock); - entry = l3->slabs_partial.next; - if (entry == &l3->slabs_partial) { - l3->free_touched = 1; - entry = l3->slabs_free.next; - if (entry == &l3->slabs_free) - goto must_grow; - } + slabp = get_slab(l3); + if (!slabp) + goto must_grow; - slabp = list_entry(entry, struct slab, list); check_spinlock_acquired_node(cachep, nodeid); check_slabp(cachep, slabp); @@ -3050,10 +3055,7 @@ retry: obj = slab_get_obj(cachep, slabp, nodeid); check_slabp(cachep, slabp); - l3->free_objects--; /* move slabp to correct slabp list: */ - list_del(&slabp->list); - if (slabp->free == BUFCTL_END) list_add(&slabp->list, &l3->slabs_full); else