--- include/linux/slub_def.h | 18 ++++- mm/slub.c | 147 ++++++++++++++++++++++++----------------------- 2 files changed, 91 insertions(+), 74 deletions(-) Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-06-02 16:31:19.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-06-02 20:28:43.000000000 -0700 @@ -59,13 +59,15 @@ struct kmem_cache { */ #define KMALLOC_SHIFT_LOW 3 +#define KMALLOC_SHIFT_STATIC 4 + #define KMALLOC_MIN_SIZE (1UL << KMALLOC_SHIFT_LOW) /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; +extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; /* * Determine the kmalloc array index given the object size. @@ -120,7 +122,7 @@ static inline struct kmem_cache *kmalloc * If this triggers then the amount of memory requested was too large. */ BUG_ON(index < 0); - return &kmalloc_caches[index]; + return kmalloc_caches[index]; } #ifdef CONFIG_ZONE_DMA @@ -144,7 +146,9 @@ static inline struct kmem_cache *kmalloc static inline void *kmalloc(size_t size, gfp_t flags) { - if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { + if (__builtin_constant_p(size) && + size <= (1 << KMALLOC_SHIFT_STATIC) && + !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) @@ -157,7 +161,9 @@ static inline void *kmalloc(size_t size, static inline void *kzalloc(size_t size, gfp_t flags) { - if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { + if (__builtin_constant_p(size) && + size <= (1 << KMALLOC_SHIFT_STATIC) && + !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) @@ -173,7 +179,9 @@ extern void *__kmalloc_node(size_t size, static inline void *kmalloc_node(size_t size, gfp_t flags, int node) { - if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { + if (__builtin_constant_p(size) && + size <= (1 << KMALLOC_SHIFT_STATIC) && + !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-06-02 16:32:35.000000000 -0700 +++ slub/mm/slub.c 2007-06-02 20:47:57.000000000 -0700 @@ -220,7 +220,7 @@ static inline void ClearSlabDebug(struct #define cache_line_size() L1_CACHE_BYTES #endif -static int kmem_size = sizeof(struct kmem_cache); +static struct kmem_cache kmem_cache; #ifdef CONFIG_SMP static struct notifier_block slab_notifier; @@ -2062,7 +2062,6 @@ static int kmem_cache_open(struct kmem_c size_t align, unsigned long flags, void (*ctor)(void *, struct kmem_cache *, unsigned long)) { - memset(s, 0, kmem_size); s->name = name; s->ctor = ctor; s->objsize = size; @@ -2087,7 +2086,6 @@ error: s->offset, flags); return 0; } -EXPORT_SYMBOL(kmem_cache_open); /* * Check if a given pointer is valid @@ -2183,12 +2181,13 @@ void kmem_cache_destroy(struct kmem_cach s->refcount--; if (!s->refcount) { list_del(&s->list); + up_write(&slub_lock); if (kmem_cache_close(s)) WARN_ON(1); sysfs_slab_remove(s); - kfree(s); - } - up_write(&slub_lock); + kmem_cache_free(&kmem_cache, s); + } else + up_write(&slub_lock); } EXPORT_SYMBOL(kmem_cache_destroy); @@ -2196,7 +2195,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ -struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned; +struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; EXPORT_SYMBOL(kmalloc_caches); #ifdef CONFIG_ZONE_DMA @@ -2238,14 +2237,19 @@ static int __init setup_slub_nomerge(cha __setup("slub_nomerge", setup_slub_nomerge); -static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, +static struct kmem_cache *create_kmalloc_cache(struct kmem_cache **sp, const char *name, int size, gfp_t gfp_flags) { unsigned int flags = 0; + struct kmem_cache *s; if (gfp_flags & SLUB_DMA) flags = SLAB_CACHE_DMA; + *sp = s = kmem_cache_zalloc(&kmem_cache, gfp_flags); + if (!s) + goto panic; + down_write(&slub_lock); if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, flags, NULL)) @@ -2264,6 +2268,10 @@ panic: static struct kmem_cache *get_slab(size_t size, gfp_t flags) { int index = kmalloc_index(size); + struct kmem_cache **sp; + struct kmem_cache *s; + char *text; + size_t realsize; if (!index) return NULL; @@ -2271,32 +2279,33 @@ static struct kmem_cache *get_slab(size_ /* Allocation too large? */ BUG_ON(index < 0); -#ifdef CONFIG_ZONE_DMA - if ((flags & SLUB_DMA)) { - struct kmem_cache *s; - struct kmem_cache *x; - char *text; - size_t realsize; - - s = kmalloc_caches_dma[index]; - if (s) - return s; + if (unlikely(flags & SLUB_DMA)) + sp = kmalloc_caches_dma; + else + sp = kmalloc_caches; - /* Dynamically create dma cache */ - x = kmalloc(kmem_size, flags & ~SLUB_DMA); - if (!x) - panic("Unable to allocate memory for dma cache\n"); - - realsize = kmalloc_caches[index].objsize; - - text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", - (unsigned int)realsize); - s = create_kmalloc_cache(x, text, realsize, flags); - kmalloc_caches_dma[index] = s; + sp += index; + s = *sp; + if (likely(s)) return s; - } -#endif - return &kmalloc_caches[index]; + + + if (index >= KMALLOC_SHIFT_LOW) + realsize = 1 << index; + else if (index == 2) + realsize = 192; + else + realsize = 96; + + /* + * The kmalloc slabs for this string allocation cannot be created + * on demand. This means we need the kmalloc slabs of size 16 + * to be setup manually. + */ + text = kasprintf(flags & ~SLUB_DMA, + (flags & SLUB_DMA) ? "kmalloc_dma-%d" : "kmalloc-%d", + (unsigned int)realsize); + return create_kmalloc_cache(sp, text, realsize, flags); } void *__kmalloc(size_t size, gfp_t flags) @@ -2495,7 +2504,9 @@ EXPORT_SYMBOL(krealloc); void __init kmem_cache_init(void) { - int i; + int kmem_size = offsetof(struct kmem_cache, cpu_slab) + + nr_cpu_ids * sizeof(struct page *); + if (!page_group_by_mobility_disabled && !user_override) { /* @@ -2512,38 +2523,35 @@ void __init kmem_cache_init(void) * struct kmem_cache_node's. There is special bootstrap code in * kmem_cache_open for slab_state == DOWN. */ - create_kmalloc_cache(&node_cache, "kmem_cache_node", - sizeof(struct kmem_cache_node), GFP_KERNEL); + BUG_ON(!kmem_cache_open(&node_cache, GFP_KERNEL, "kmem_cache_node", + sizeof(struct kmem_cache_node), ARCH_KMALLOC_MINALIGN, + 0, NULL)) node_cache.refcount = -1; + list_add(&node_cache.list, &slab_caches); #endif - /* Able to allocate the per node structures */ - slab_state = PARTIAL; + BUG_ON(!kmem_cache_open(&kmem_cache, GFP_KERNEL, "kmem_cache", + kmem_size, ARCH_KMALLOC_MINALIGN, + 0, NULL)); + kmem_cache.refcount = -1; + list_add(&kmem_cache.list, &slab_caches); + + slab_state = PARTIAL; /* Able to create slabs */ + + /* + * Seed the first kmalloc slabs. We need size 16 to be functional + * so that the kasprintf in kmalloc_slab() works. This needs to + * sync with KMALLOC_SHIFT_STATIC. + */ + create_kmalloc_cache(&kmalloc_caches[3], "kmalloc-8", 8, GFP_KERNEL); + create_kmalloc_cache(&kmalloc_caches[4], "kmalloc-16", 16, GFP_KERNEL); - /* Caches that are not of the two-to-the-power-of size */ - create_kmalloc_cache(&kmalloc_caches[1], - "kmalloc-96", 96, GFP_KERNEL); - create_kmalloc_cache(&kmalloc_caches[2], - "kmalloc-192", 192, GFP_KERNEL); - - for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) - create_kmalloc_cache(&kmalloc_caches[i], - "kmalloc", 1 << i, GFP_KERNEL); - - slab_state = UP; - - /* Provide the correct kmalloc names now that the caches are up */ - for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) - kmalloc_caches[i]. name = - kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); + slab_state = UP; /* Able to use kmalloc array */ #ifdef CONFIG_SMP register_cpu_notifier(&slab_notifier); #endif - kmem_size = offsetof(struct kmem_cache, cpu_slab) + - nr_cpu_ids * sizeof(struct page *); - devel_printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, " "MinObjects=%d, Processors=%d, Nodes=%d\n", KMALLOC_SHIFT_HIGH, cache_line_size(), @@ -2633,26 +2641,27 @@ struct kmem_cache *kmem_cache_create(con */ s->objsize = max(s->objsize, (int)size); s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); + up_write(&slub_lock); if (sysfs_slab_alias(s, name)) goto err; - } else { - s = kmalloc(kmem_size, GFP_KERNEL); - if (s && kmem_cache_open(s, GFP_KERNEL, name, - size, align, flags, ctor)) { - if (sysfs_slab_add(s)) { - kfree(s); - goto err; - } + else + return s; + } + + s = kmem_cache_zalloc(&kmem_cache, GFP_KERNEL); + if (s) { + if (kmem_cache_open(s, GFP_KERNEL, name, size, align, + flags, ctor)) { list_add(&s->list, &slab_caches); + up_write(&slub_lock); raise_kswapd_order(s->order); - } else - kfree(s); + if (!sysfs_slab_add(s)) + return s; + } + kmem_cache_free(&kmem_cache, s); } up_write(&slub_lock); - return s; - err: - up_write(&slub_lock); if (flags & SLAB_PANIC) panic("Cannot create slabcache %s\n", name); else