--- mm/slub.c | 123 ++++++++++++++++++++++++++++++++------------------------------ 1 file changed, 65 insertions(+), 58 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-06-10 15:51:57.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-06-10 16:14:45.000000000 -0700 @@ -227,8 +227,13 @@ static enum { } slab_state = DOWN; /* A list of all slab caches on the system */ -static DECLARE_RWSEM(slub_lock); LIST_HEAD(slab_caches); +/* + * Lock protecting + * 1. slab_caches list + * 2. Dynamic population of dma caches + */ +static DECLARE_RWSEM(slub_lock); /* * Tracking user of a slab. @@ -2112,12 +2117,13 @@ void kmem_cache_destroy(struct kmem_cach s->refcount--; if (!s->refcount) { list_del(&s->list); + up_write(&slub_lock); if (kmem_cache_close(s)) WARN_ON(1); sysfs_slab_remove(s); kfree(s); - } - up_write(&slub_lock); + } else + up_write(&slub_lock); } EXPORT_SYMBOL(kmem_cache_destroy); @@ -2167,29 +2173,6 @@ static int __init setup_slub_nomerge(cha __setup("slub_nomerge", setup_slub_nomerge); -static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, - const char *name, int size, gfp_t gfp_flags) -{ - unsigned int flags = 0; - - if (gfp_flags & SLUB_DMA) - flags = SLAB_CACHE_DMA; - - down_write(&slub_lock); - if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, - flags, NULL)) - goto panic; - - list_add(&s->list, &slab_caches); - up_write(&slub_lock); - if (sysfs_slab_add(s)) - goto panic; - return s; - -panic: - panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); -} - static struct kmem_cache *get_slab(size_t size, gfp_t flags) { int index = kmalloc_index(size); @@ -2203,18 +2186,17 @@ static struct kmem_cache *get_slab(size_ #ifdef CONFIG_ZONE_DMA if ((flags & SLUB_DMA)) { struct kmem_cache *s; - struct kmem_cache *x; char *text; - size_t realsize; + size_t realsize = 0; s = kmalloc_caches_dma[index]; if (s) return s; /* Dynamically create dma cache */ - x = kmalloc(kmem_size, flags & ~SLUB_DMA); - if (!x) - panic("Unable to allocate memory for dma cache\n"); + s = kmalloc(kmem_size, flags & ~SLUB_DMA); + if (!s) + goto panic; if (index <= KMALLOC_SHIFT_HIGH) realsize = 1 << index; @@ -2225,10 +2207,31 @@ static struct kmem_cache *get_slab(size_ realsize = 192; } - text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", - (unsigned int)realsize); - s = create_kmalloc_cache(x, text, realsize, flags); - kmalloc_caches_dma[index] = s; + text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%td", + realsize); + if (!text) + goto panic; + + if (!kmem_cache_open(s, flags, text, realsize, + ARCH_KMALLOC_MINALIGN, SLAB_CACHE_DMA, NULL)) + goto panic; + + down_write(&slub_lock); + if (kmalloc_caches_dma[index]) { + /* Concurrent alloc */ + up_write(&slub_lock); + kmem_cache_close(s); + kfree(s); + kfree(text); + return kmalloc_caches_dma[index]; + } else { + kmalloc_caches_dma[index] = s; + list_add(&s->list, &slab_caches); + up_write(&slub_lock); + if (sysfs_slab_add(s)) +panic: + panic("Cannot create dma cache for size %td", realsize); + } return s; } #endif @@ -2432,6 +2435,14 @@ EXPORT_SYMBOL(krealloc); /******************************************************************** * Basic setup of slabs *******************************************************************/ +static void create_kmalloc_cache(int i, const char *name, int size) +{ + if (kmem_cache_open(&kmalloc_caches[i], 0, name, size, + ARCH_KMALLOC_MINALIGN, GFP_KERNEL, NULL)) + return; + + panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); +} void __init kmem_cache_init(void) { @@ -2443,8 +2454,8 @@ void __init kmem_cache_init(void) * struct kmem_cache_node's. There is special bootstrap code in * kmem_cache_open for slab_state == DOWN. */ - create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", - sizeof(struct kmem_cache_node), GFP_KERNEL); + create_kmalloc_cache(0, "kmem_cache_node", + sizeof(struct kmem_cache_node)); kmalloc_caches[0].refcount = -1; #endif @@ -2452,14 +2463,11 @@ void __init kmem_cache_init(void) slab_state = PARTIAL; /* Caches that are not of the two-to-the-power-of size */ - create_kmalloc_cache(&kmalloc_caches[1], - "kmalloc-96", 96, GFP_KERNEL); - create_kmalloc_cache(&kmalloc_caches[2], - "kmalloc-192", 192, GFP_KERNEL); + create_kmalloc_cache(1, "kmalloc-96", 96); + create_kmalloc_cache(2, "kmalloc-192", 192); for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) - create_kmalloc_cache(&kmalloc_caches[i], - "kmalloc", 1 << i, GFP_KERNEL); + create_kmalloc_cache(i, "kmalloc", 1 << i); slab_state = UP; @@ -2564,25 +2572,24 @@ struct kmem_cache *kmem_cache_create(con */ s->objsize = max(s->objsize, (int)size); s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); - if (sysfs_slab_alias(s, name)) - goto err; - } else { - s = kmalloc(kmem_size, GFP_KERNEL); - if (s && kmem_cache_open(s, GFP_KERNEL, name, - size, align, flags, ctor)) { - if (sysfs_slab_add(s)) { - kfree(s); - goto err; - } - list_add(&s->list, &slab_caches); - } else - kfree(s); + up_write(&slub_lock); + if (!sysfs_slab_alias(s, name)) + return s; + goto err; } up_write(&slub_lock); - return s; + s = kmalloc(kmem_size, GFP_KERNEL); + if (s && kmem_cache_open(s, GFP_KERNEL, name, + size, align, flags, ctor)) { + down_write(&slub_lock); + list_add(&s->list, &slab_caches); + up_write(&slub_lock); + if (!sysfs_slab_add(s)) + return s; + } + kfree(s); err: - up_write(&slub_lock); if (flags & SLAB_PANIC) panic("Cannot create slabcache %s\n", name); else