Index: linux-2.6.18-rc4/mm/slab.c =================================================================== --- linux-2.6.18-rc4.orig/mm/slab.c 2006-08-08 18:51:37.748889483 -0700 +++ linux-2.6.18-rc4/mm/slab.c 2006-08-08 18:55:04.024225668 -0700 @@ -2568,15 +2568,20 @@ static void slab_map_pages(struct kmem_c /* * Grow (by 1) the number of slabs within a cache. This is called by * kmem_cache_alloc() when there are no active objs left in a cache. + * When cache_grow is called the list_lock is held. We drop the lock + * before calling the page allocator. + * + * Return with the object and the list_lock held if successful. + * Otherwise return NULL and do not take the list_lock. */ -static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid) +static struct slab *cache_grow(struct kmem_cache *cachep, struct kmem_list3 *l3, + gfp_t flags, int nodeid) { - struct slab *slabp; + struct slab *slabp = NULL; void *objp; size_t offset; gfp_t local_flags; unsigned long ctor_flags; - struct kmem_list3 *l3; /* * Be lazy and only check for valid flags here, keeping it out of the @@ -2584,7 +2589,7 @@ static int cache_grow(struct kmem_cache */ BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW)); if (flags & SLAB_NO_GROW) - return 0; + return NULL; ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & SLAB_LEVEL_MASK); @@ -2597,8 +2602,6 @@ static int cache_grow(struct kmem_cache /* Take the l3 list lock to change the colour_next on this node */ check_irq_off(); - l3 = cachep->nodelists[nodeid]; - spin_lock(&l3->list_lock); /* Get colour for the slab, and cal the next value. */ offset = l3->colour_next; @@ -2626,34 +2629,26 @@ static int cache_grow(struct kmem_cache */ objp = kmem_getpages(cachep, flags, nodeid); if (!objp) - goto failed; + goto out; /* Get slab management. */ slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid); - if (!slabp) - goto opps1; - + if (!slabp) { + kmem_freepages(cachep, objp); + goto out; + } slabp->nodeid = nodeid; slab_map_pages(cachep, slabp, objp); cache_init_objs(cachep, slabp, ctor_flags); + STATS_INC_GROWN(cachep); +out: if (local_flags & __GFP_WAIT) local_irq_disable(); check_irq_off(); spin_lock(&l3->list_lock); - - /* Make slab active. */ - list_add_tail(&slabp->list, &(l3->slabs_free)); - STATS_INC_GROWN(cachep); - spin_unlock(&l3->list_lock); - return 1; -opps1: - kmem_freepages(cachep, objp); -failed: - if (local_flags & __GFP_WAIT) - local_irq_disable(); - return 0; + return slabp; } #if DEBUG @@ -2839,7 +2834,6 @@ static void *cache_alloc_refill(struct k check_irq_off(); ac = cpu_cache_get(cachep); -retry: batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* @@ -2861,8 +2855,17 @@ retry: while (batchcount > 0) { struct slab *slabp = get_slab(l3); - if (!slabp) - goto alloc_done; + if (!slabp) { + slabp = cache_grow(cachep, l3, flags, numa_node_id()); + ac = cpu_cache_get(cachep); + if (!slabp) { + if (ac->avail) + goto alloc_done; + spin_unlock(&l3->list_lock); + return NULL; + } + } + check_slabp(cachep, slabp); check_spinlock_acquired(cachep); while (slabp->inuse < cachep->num && batchcount--) { @@ -2876,22 +2879,8 @@ retry: check_slabp(cachep, slabp); put_slab(l3, slabp); } - alloc_done: spin_unlock(&l3->list_lock); - - if (unlikely(!ac->avail)) { - int x; - x = cache_grow(cachep, flags, numa_node_id()); - - /* cache_grow can reenable interrupts, then ac could change. */ - ac = cpu_cache_get(cachep); - if (!x && ac->avail == 0) /* no objects in sight? abort */ - return NULL; - - if (!ac->avail) /* objects refilled by interrupt? */ - goto retry; - } ac->touched = 1; return ac->entry[--ac->avail]; } @@ -3039,18 +3028,20 @@ static void *__cache_alloc_node(struct k struct slab *slabp; struct kmem_list3 *l3; void *obj; - int x; l3 = cachep->nodelists[nodeid]; BUG_ON(!l3); -retry: check_irq_off(); spin_lock(&l3->list_lock); slabp = get_slab(l3); - if (!slabp) - goto must_grow; - + if (!slabp) { + slabp = cache_grow(cachep, l3, flags, nodeid); + if (!slabp) { + spin_unlock(&l3->list_lock); + return NULL; + } + } check_spinlock_acquired_node(cachep, nodeid); check_slabp(cachep, slabp); @@ -3064,17 +3055,6 @@ retry: check_slabp(cachep, slabp); put_slab(l3, slabp); spin_unlock(&l3->list_lock); - goto done; - -must_grow: - spin_unlock(&l3->list_lock); - x = cache_grow(cachep, flags, nodeid); - - if (!x) - return NULL; - - goto retry; -done: return obj; } #endif