slab: Remove gotos from __alloc_node and cache_alloc_refill Extract the common get_slab and put_slab functions. Move the logic to put a slab into the freelists out of cache_grow() and make cache_grow return a pointer to the slab allocated. Signed-off-by: Christoph Lameter Index: linux-2.6.18-rc4/mm/slab.c =================================================================== --- linux-2.6.18-rc4.orig/mm/slab.c 2006-08-08 18:01:00.160222458 -0700 +++ linux-2.6.18-rc4/mm/slab.c 2006-08-08 18:01:15.171990181 -0700 @@ -2568,23 +2568,30 @@ static void slab_map_pages(struct kmem_c /* * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. + * When cache_grow is called the list_lock is held. We drop the lock + * before calling the page allocator. + * + * Return with the object and the list_lock held if successful. + * Otherwise return NULL and do not take the list_lock. */ -static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) +static struct slab *cache_grow(struct kmem_cache *cachep, struct kmem_list3 *l3, + gfp_t flags, int nodeid) { struct slab *slabp; void *objp; size_t offset; gfp_t local_flags; unsigned long ctor_flags; - struct kmem_list3 *l3; /* * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)); - if (flags & SLAB_NO_GROW) - return 0; + if (flags & SLAB_NO_GROW) { + spin_unlock(&l3->list_lock); + return NULL; + } ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & SLAB_LEVEL_MASK); @@ -2597,8 +2604,6 @@ static int cache_grow(struct kmem_cache /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); - l3 = cachep->nodelists[nodeid]; - spin_lock(&l3->list_lock); /* Get colour for the slab, and cal the next value. */ offset = l3->colour_next; @@ -2643,18 +2648,15 @@ static int cache_grow(struct kmem_cache check_irq_off(); spin_lock(&l3->list_lock); - /* Make slab active. */ - list_add_tail(&slabp->list, &(l3->slabs_free)); STATS_INC_GROWN(cachep); l3->free_objects += cachep->num; - spin_unlock(&l3->list_lock); - return 1; + return slabp; opps1: kmem_freepages(cachep, objp); failed: if (local_flags & __GFP_WAIT) local_irq_disable(); - return 0; + return NULL; } #if DEBUG @@ -2818,19 +2820,28 @@ static struct slab *get_slab(struct kmem l3->free_touched = 1; slab = list_entry(entry, struct slab, list); list_del(&slab->list); - l3->free_objects -= slab->inuse; + l3->free_objects-= slab->inuse; return slab; } +/* move slabp to correct slabp list: */ +static void put_slab(struct kmem_list3 *l3, struct slab *slabp) +{ + if (slabp->free == BUFCTL_END) + list_add(&slabp->list, &l3->slabs_full); + else + list_add(&slabp->list, &l3->slabs_partial); +} + static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) { int batchcount; struct kmem_list3 *l3; struct array_cache *ac; + struct slab *slabp; check_irq_off(); ac = cpu_cache_get(cachep); -retry: batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* @@ -2843,17 +2854,22 @@ retry: l3 = cachep->nodelists[numa_node_id()]; BUG_ON(ac->avail > 0 || !l3); - spin_lock(&l3->list_lock); + spin_lock(&l3->list_lock); /* See if we can refill from the shared array */ if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) goto alloc_done; while (batchcount > 0) { - struct slab *slabp = get_slab(l3); - - if (!slabp) - goto alloc_done; + slabp = get_slab(l3); + if (unlikely(!slabp)) { + slabp = cache_grow(cachep, l3, flags, numa_node_id()); + if (!slabp) { + if (ac->avail) + goto done_no_lock; + return NULL; + } + } check_slabp(cachep, slabp); check_spinlock_acquired(cachep); while (slabp->inuse < cachep->num && batchcount--) { @@ -2865,29 +2881,12 @@ retry: numa_node_id()); } check_slabp(cachep, slabp); - - /* move slabp to correct slabp list: */ - if (slabp->free == BUFCTL_END) - list_add(&slabp->list, &l3->slabs_full); - else - list_add(&slabp->list, &l3->slabs_partial); + put_slab(l3, slabp); } alloc_done: spin_unlock(&l3->list_lock); - - if (unlikely(!ac->avail)) { - int x; - x = cache_grow(cachep, flags, numa_node_id()); - - /* cache_grow can reenable interrupts, then ac could change. */ - ac = cpu_cache_get(cachep); - if (!x && ac->avail == 0) /* no objects in sight? abort */ - return NULL; - - if (!ac->avail) /* objects refilled by interrupt? */ - goto retry; - } +done_no_lock: ac->touched = 1; return ac->entry[--ac->avail]; } @@ -3035,47 +3034,31 @@ static void *__cache_alloc_node(struct k struct slab *slabp; struct kmem_list3 *l3; void *obj; - int x; l3 = cachep->nodelists[nodeid]; BUG_ON(!l3); -retry: check_irq_off(); spin_lock(&l3->list_lock); slabp = get_slab(l3); - if (!slabp) - goto must_grow; - + if (!slabp) { + slabp = cache_grow(cachep, l3, flags, nodeid); + if (!slabp) + return NULL; + } check_spinlock_acquired_node(cachep, nodeid); check_slabp(cachep, slabp); STATS_INC_NODEALLOCS(cachep); STATS_INC_ACTIVE(cachep); STATS_SET_HIGH(cachep); - BUG_ON(slabp->inuse == cachep->num); obj = slab_get_obj(cachep, slabp, nodeid); check_slabp(cachep, slabp); - /* move slabp to correct slabp list: */ - if (slabp->free == BUFCTL_END) - list_add(&slabp->list, &l3->slabs_full); - else - list_add(&slabp->list, &l3->slabs_partial); + put_slab(l3, slabp); spin_unlock(&l3->list_lock); - goto done; - -must_grow: - spin_unlock(&l3->list_lock); - x = cache_grow(cachep, flags, nodeid); - - if (!x) - return NULL; - - goto retry; -done: return obj; } #endif