Index: linux-2.6.21-rc2/include/linux/slub_def.h =================================================================== --- linux-2.6.21-rc2.orig/include/linux/slub_def.h 2007-03-01 19:11:08.000000000 -0800 +++ linux-2.6.21-rc2/include/linux/slub_def.h 2007-03-01 19:43:17.000000000 -0800 @@ -27,7 +27,7 @@ struct kmem_cache { int size; /* Total size of an object */ int objects; /* Number of objects in slab */ struct kmem_cache_node local_node; - atomic_t refcount; /* Refcount for destroy */ + int refcount; /* Refcount for destroy */ void (*ctor)(void *, struct kmem_cache *, unsigned long); void (*dtor)(void *, struct kmem_cache *, unsigned long); Index: linux-2.6.21-rc2/mm/slub.c =================================================================== --- linux-2.6.21-rc2.orig/mm/slub.c 2007-03-01 19:07:29.000000000 -0800 +++ linux-2.6.21-rc2/mm/slub.c 2007-03-01 19:45:24.000000000 -0800 @@ -82,12 +82,6 @@ static int kmem_size = sizeof(struct kmem_cache); -/* - * Forward declarations - */ -static void register_slab(struct kmem_cache *s); -static void unregister_slab(struct kmem_cache *s); - #ifdef CONFIG_SMP static struct notifier_block slab_notifier; #endif @@ -103,7 +97,7 @@ int slab_is_available(void) { } /* A list of all slab caches on the system */ -static DECLARE_RWSEM(slabstat_sem); +static DECLARE_RWSEM(slub_lock); LIST_HEAD(slab_caches); /******************************************************************** @@ -1251,7 +1245,7 @@ static unsigned long calculate_alignment return ALIGN(align, sizeof(void *)); } -void free_kmem_cache_nodes(struct kmem_cache *s) +static void free_kmem_cache_nodes(struct kmem_cache *s) { #ifdef CONFIG_NUMA int node; @@ -1273,7 +1267,7 @@ static void init_kmem_cache_node(struct INIT_LIST_HEAD(&n->partial); } -int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) +static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) { #ifdef CONFIG_NUMA int node; @@ -1330,7 +1324,7 @@ int init_kmem_cache_nodes(struct kmem_ca return 1; } -int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, +static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags, const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *, struct kmem_cache *, unsigned long), @@ -1417,7 +1411,7 @@ int kmem_cache_open(struct kmem_cache *s if (!s->objects || s->objects > 65535) goto error; - atomic_set(&s->refcount, 1); + s->refcount = 1; #ifdef CONFIG_SMP mutex_init(&s->flushing); @@ -1425,7 +1419,6 @@ int kmem_cache_open(struct kmem_cache *s INIT_DELAYED_WORK(&s->flush, flusher); #endif if (init_kmem_cache_nodes(s, gfpflags)) { - register_slab(s); return 1; } error: @@ -1508,7 +1501,7 @@ static int free_list(struct kmem_cache * * Release all resources used by slab cache * (if possible...) */ -int kmem_cache_close(struct kmem_cache *s) +static int kmem_cache_close(struct kmem_cache *s) { int node; @@ -1523,7 +1516,6 @@ int kmem_cache_close(struct kmem_cache * return 1; } free_kmem_cache_nodes(s); - unregister_slab(s); return 0; } EXPORT_SYMBOL(kmem_cache_close); @@ -1534,11 +1526,15 @@ EXPORT_SYMBOL(kmem_cache_close); */ void kmem_cache_destroy(struct kmem_cache *s) { - if (!atomic_dec_and_test(&s->refcount)) - return; - - BUG_ON(kmem_cache_close(s)); - kfree(s); + down_write(&slub_lock); + if (s->refcount) + s->refcount--; + else { + list_del(&s->list); + BUG_ON(kmem_cache_close(s)); + kfree(s); + } + up_write(&slub_lock); } EXPORT_SYMBOL(kmem_cache_destroy); @@ -1652,11 +1648,13 @@ __setup("slub_debug", setup_slub_debug); static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, const char *name, int size, gfp_t flags) { - + down_write(&slub_lock); if (!kmem_cache_open(s, flags, name, size, ARCH_KMALLOC_MINALIGN, 0, NULL, NULL)) panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); + list_add(&s->list, &slab_caches); + up_write(&slub_lock); return s; } @@ -1819,16 +1817,17 @@ void __init kmem_cache_init(void) static struct kmem_cache *kmem_cache_dup(struct kmem_cache *s, gfp_t flags, const char *name) { - atomic_inc(&s->refcount); - down_write(&slabstat_sem); - if (!s->aliases) - s->aliases = kstrdup(name, flags); - else { - char *x = s->aliases; - s->aliases = kasprintf(flags, "%s/%s", s->aliases, name); - kfree(x); - } - up_write(&slabstat_sem); + if (s->refcount == 1) { + s->refcount++; + if (!s->aliases) + s->aliases = kstrdup(name, flags); + else { + char *x = s->aliases; + s->aliases = kasprintf(flags, "%s/%s", s->aliases, name); + kfree(x); + } + } else + s = NULL; return s; } @@ -1852,7 +1851,6 @@ static struct kmem_cache *find_mergeable align = calculate_alignment(flags, align); size = ALIGN(size, align); - down_read(&slabstat_sem); list_for_each(h, &slab_caches) { struct kmem_cache *s = container_of(h, struct kmem_cache, list); @@ -1879,10 +1877,8 @@ static struct kmem_cache *find_mergeable if (s->size - size >= sizeof(void *)) continue; - up_read(&slabstat_sem); return s; } - up_read(&slabstat_sem); return NULL; } @@ -1893,18 +1889,26 @@ struct kmem_cache *kmem_cache_create(con { struct kmem_cache *s; + down_write(&slub_lock); s = find_mergeable(size, align, flags, dtor, ctor); if (s) { - printk(KERN_INFO "SLUB: Merging slab_cache %s size %d" + s = kmem_cache_dup(s, GFP_KERNEL, name); + if (s) { + printk(KERN_INFO "SLUB: Merging slab_cache %s size %d" " with slab_cache %s size %d\n", name, (int)size, s->name, s->size); - return kmem_cache_dup(s, GFP_KERNEL, name); + goto out; + } + } s = kmalloc(kmem_size, GFP_KERNEL); if (s && kmem_cache_open(s, GFP_KERNEL, kstrdup(name, GFP_KERNEL), - size, align, flags, ctor, dtor)) - return s; - kfree(s); + size, align, flags, ctor, dtor)) { + list_add(&s->list, &slab_caches); + } else + kfree(s); +out: + up_write(&slub_lock); return NULL; } EXPORT_SYMBOL(kmem_cache_create); @@ -1924,20 +1928,6 @@ EXPORT_SYMBOL(kmem_cache_zalloc); * Slab proc interface *******************************************************************/ -static void register_slab(struct kmem_cache *s) -{ - down_write(&slabstat_sem); - list_add(&s->list, &slab_caches); - up_write(&slabstat_sem); -} - -static void unregister_slab(struct kmem_cache *s) -{ - down_write(&slabstat_sem); - list_del(&s->list); - up_write(&slabstat_sem); -} - static void print_slubinfo_header(struct seq_file *m) { /* @@ -1958,7 +1948,7 @@ static void *s_start(struct seq_file *m, loff_t n = *pos; struct list_head *p; - down_read(&slabstat_sem); + down_read(&slub_lock); if (!n) print_slubinfo_header(m); p = slab_caches.next; @@ -1980,7 +1970,7 @@ static void *s_next(struct seq_file *m, static void s_stop(struct seq_file *m, void *p) { - up_read(&slabstat_sem); + up_read(&slub_lock); } static void display_nodes(struct seq_file *m, unsigned long *nodes) @@ -2072,14 +2062,14 @@ static void for_all_slabs(void (*func)(s { struct list_head *h; - down_read(&slabstat_sem); + down_read(&slub_lock); list_for_each(h, &slab_caches) { struct kmem_cache *s = container_of(h, struct kmem_cache, list); func(s, cpu); } - up_read(&slabstat_sem); + up_read(&slub_lock); } /*