--- include/linux/slub_def.h | 41 ++++++++++++++++----------------- mm/slub.c | 58 +++++++++++++++++++++-------------------------- 2 files changed, 46 insertions(+), 53 deletions(-) Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-06-02 20:28:43.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-06-03 14:36:07.000000000 -0700 @@ -57,17 +57,25 @@ struct kmem_cache { /* * Kmalloc subsystem. */ -#define KMALLOC_SHIFT_LOW 3 - -#define KMALLOC_SHIFT_STATIC 4 - -#define KMALLOC_MIN_SIZE (1UL << KMALLOC_SHIFT_LOW) +#define KMALLOC_UNIT_SHIFT 4 +#define KMALLOC_UNIT (1UL << KMALLOC_UNIT_SHIFT) +#define KMALLOC_SHIFT_STATIC 8 + +#define KMALLOC_P2_SHIFT 11 +#define KMALLOC_P2_CACHES (KMALLOC_SHIFT_HIGH - KMALLOC_P2_SHIFT + 1) +#define KMALLOC_LINEAR_CACHES (1 << (KMALLOC_P2_SHIFT - KMALLOC_UNIT_SHIFT)) /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. + * kmalloc allocations. + * 0 ... PAGE_SIZE / KMALLOC_MIN_SIZE are used for each of the KMALLOC_MIN + * size stepped slabs. + * PAGE_SIZE / KMALLOC_MIN_SIZE are used for 2^x bytes of allocations for + * slabs > PAGE_SIZE. */ -extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; +extern struct kmem_cache *kmalloc_caches[KMALLOC_LINEAR_CACHES + + KMALLOC_P2_CACHES]; /* * Determine the kmalloc array index given the object size. @@ -76,7 +84,6 @@ extern struct kmem_cache *kmalloc_caches */ static inline int kmalloc_index(size_t size) { - /* * The behavior for zero sized allocs changes. We no longer * allocate memory but return ZERO_SIZE_PTR. @@ -85,24 +92,16 @@ static inline int kmalloc_index(size_t s DEVEL_WARN_ON_ONCE(size == 0); if (!size) - return 0; + return -2; if (size > KMALLOC_MAX_SIZE) return -1; - if (size <= KMALLOC_MIN_SIZE) - return KMALLOC_SHIFT_LOW; - - /* - * We map the non power of two slabs to the unused - * log2 values in the kmalloc array. - */ - if (size > 64 && size <= 96) - return 1; - if (size > 128 && size <= 192) - return 2; + if (size > (1UL << KMALLOC_P2_SHIFT)) + return ilog2(size - 1) - KMALLOC_P2_SHIFT + + KMALLOC_LINEAR_CACHES; - return ilog2(size - 1) + 1; + return (size - 1) >> KMALLOC_UNIT_SHIFT; } /* @@ -115,7 +114,7 @@ static inline struct kmem_cache *kmalloc { int index = kmalloc_index(size); - if (index == 0) + if (index == -2) return NULL; /* Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-06-02 20:47:57.000000000 -0700 +++ slub/mm/slub.c 2007-06-03 14:36:41.000000000 -0700 @@ -2195,11 +2195,12 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ -struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; +struct kmem_cache *kmalloc_caches[KMALLOC_LINEAR_CACHES + KMALLOC_P2_CACHES]; EXPORT_SYMBOL(kmalloc_caches); #ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1]; +static struct kmem_cache *kmalloc_caches_dma[KMALLOC_LINEAR_CACHES + + KMALLOC_P2_CACHES]; #endif static int __init setup_slub_min_order(char *str) @@ -2240,29 +2241,15 @@ __setup("slub_nomerge", setup_slub_nomer static struct kmem_cache *create_kmalloc_cache(struct kmem_cache **sp, const char *name, int size, gfp_t gfp_flags) { - unsigned int flags = 0; + unsigned int flags = SLAB_PANIC; struct kmem_cache *s; if (gfp_flags & SLUB_DMA) flags = SLAB_CACHE_DMA; - *sp = s = kmem_cache_zalloc(&kmem_cache, gfp_flags); - if (!s) - goto panic; - - down_write(&slub_lock); - if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, - flags, NULL)) - goto panic; - - list_add(&s->list, &slab_caches); - up_write(&slub_lock); - if (sysfs_slab_add(s)) - goto panic; + *sp = s = kmem_cache_create(name, size, ARCH_KMALLOC_MINALIGN, flags, + NULL, NULL); return s; - -panic: - panic("Creation of kmalloc slab %s size=%d failed.\n", name, size); } static struct kmem_cache *get_slab(size_t size, gfp_t flags) @@ -2273,7 +2260,8 @@ static struct kmem_cache *get_slab(size_ char *text; size_t realsize; - if (!index) + /* zero sized alloc */ + if (index == -2) return NULL; /* Allocation too large? */ @@ -2290,12 +2278,12 @@ static struct kmem_cache *get_slab(size_ return s; - if (index >= KMALLOC_SHIFT_LOW) - realsize = 1 << index; - else if (index == 2) - realsize = 192; - else - realsize = 96; + if (index >= KMALLOC_LINEAR_CACHES) { + realsize = 1 << (index - KMALLOC_LINEAR_CACHES + + KMALLOC_P2_SHIFT + 1); + printk(KERN_ERR "index =%d realsize =%ld\n", index, realsize); + } else + realsize = (index + 1) << KMALLOC_UNIT_SHIFT; /* * The kmalloc slabs for this string allocation cannot be created @@ -2303,8 +2291,8 @@ static struct kmem_cache *get_slab(size_ * to be setup manually. */ text = kasprintf(flags & ~SLUB_DMA, - (flags & SLUB_DMA) ? "kmalloc_dma-%d" : "kmalloc-%d", - (unsigned int)realsize); + (flags & SLUB_DMA) ? "kmalloc_dma-%05td" : "kmalloc-%05td", + realsize); return create_kmalloc_cache(sp, text, realsize, flags); } @@ -2506,6 +2494,7 @@ void __init kmem_cache_init(void) { int kmem_size = offsetof(struct kmem_cache, cpu_slab) + nr_cpu_ids * sizeof(struct page *); + int i; if (!page_group_by_mobility_disabled && !user_override) { @@ -2540,12 +2529,17 @@ void __init kmem_cache_init(void) /* * Seed the first kmalloc slabs. We need size 16 to be functional - * so that the kasprintf in kmalloc_slab() works. This needs to - * sync with KMALLOC_SHIFT_STATIC. + * so that the kasprintf works. */ - create_kmalloc_cache(&kmalloc_caches[3], "kmalloc-8", 8, GFP_KERNEL); - create_kmalloc_cache(&kmalloc_caches[4], "kmalloc-16", 16, GFP_KERNEL); + create_kmalloc_cache(&kmalloc_caches[0], "kmalloc-00016", 16, GFP_KERNEL); + for (i = 2 * KMALLOC_UNIT; i <= (1 << KMALLOC_SHIFT_STATIC); i += KMALLOC_UNIT) { + char *x = kasprintf(GFP_KERNEL, "kmalloc-%05d", i); + + create_kmalloc_cache( + &kmalloc_caches[(i - 1) >> KMALLOC_UNIT_SHIFT], + x, i, GFP_KERNEL); + } slab_state = UP; /* Able to use kmalloc array */ #ifdef CONFIG_SMP