Index: linux-2.6.21-rc2-mm1/mm/slub.c =================================================================== --- linux-2.6.21-rc2-mm1.orig/mm/slub.c 2007-03-06 18:57:59.000000000 -0800 +++ linux-2.6.21-rc2-mm1/mm/slub.c 2007-03-06 21:40:20.000000000 -0800 @@ -1720,12 +1720,12 @@ static struct kmem_cache *get_slab(size_ panic("Unable to allocate memory for dma cache\n"); #ifdef KMALLOC_EXTRA - if (index <= KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW) + if (index < PAGE_SHIFT - KMALLOC_SHIFT_LOW) #endif realsize = 1 << (index + KMALLOC_SHIFT_LOW); #ifdef KMALLOC_EXTRA else { - index -= KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW +1; + index -= PAGE_SHIFT - KMALLOC_SHIFT_LOW; if (!index) realsize = 96; else @@ -1860,7 +1860,7 @@ void __init kmem_cache_init(void) kmalloc_index(sizeof(struct kmem_cache_node)); BUG_ON(kmem_cache_node_cache < 0 || - kmem_cache_node_cache > KMALLOC_SHIFT_HIGH); + kmem_cache_node_cache >= PAGE_SHIFT); /* * Must first have the slab cache available for the allocations of the @@ -1876,7 +1876,7 @@ void __init kmem_cache_init(void) /* Now we are able to allocate the per node structures */ slab_state = PARTIAL; - for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { + for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { if (i == kmem_cache_node_cache) continue; @@ -1888,16 +1888,16 @@ void __init kmem_cache_init(void) #ifdef KMALLOC_EXTRA /* Caches that are not of the two-to-the-power-of size */ create_kmalloc_cache(&kmalloc_caches - [KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW + 1], + [PAGE_SHIFT - KMALLOC_SHIFT_LOW], "kmalloc-96", 96, GFP_KERNEL); create_kmalloc_cache(&kmalloc_caches - [KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW + 2], + [PAGE_SHIFT - KMALLOC_SHIFT_LOW + 1], "kmalloc-192", 192, GFP_KERNEL); #endif slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ - for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) { + for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { char *name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); BUG_ON(!name); @@ -1912,20 +1912,21 @@ void __init kmem_cache_init(void) + nr_cpu_ids * sizeof(struct page *); printk(KERN_INFO "SLUB V4: General Slabs=%d, HW alignment=%d, Processors=%d, Nodes=%d\n", - KMALLOC_SHIFT_HIGH + KMALLOC_EXTRAS + 1 - KMALLOC_SHIFT_LOW, + PAGE_SHIFT + KMALLOC_EXTRAS - KMALLOC_SHIFT_LOW, L1_CACHE_BYTES, nr_cpu_ids, nr_node_ids); } static struct kmem_cache *kmem_cache_dup(struct kmem_cache *s, gfp_t flags, const char *name) { - if (s->refcount == 1) { + if (s->refcount >= 1) { s->refcount++; if (!s->aliases) s->aliases = kstrdup(name, flags); else { char *x = s->aliases; - s->aliases = kasprintf(flags, "%s/%s", s->aliases, name); + + s->aliases = kasprintf(flags, "%s/%s", x, name); kfree(x); } } else Index: linux-2.6.21-rc2-mm1/include/linux/slub_def.h =================================================================== --- linux-2.6.21-rc2-mm1.orig/include/linux/slub_def.h 2007-03-06 20:00:08.000000000 -0800 +++ linux-2.6.21-rc2-mm1/include/linux/slub_def.h 2007-03-06 21:41:52.000000000 -0800 @@ -55,8 +55,6 @@ struct kmem_cache { */ #define KMALLOC_SHIFT_LOW 3 -#define KMALLOC_SHIFT_HIGH 11 - #if L1_CACHE_BYTES <= 64 #define KMALLOC_EXTRAS 2 #define KMALLOC_EXTRA @@ -64,8 +62,8 @@ struct kmem_cache { #define KMALLOC_EXTRAS 0 #endif -#define KMALLOC_NR_CACHES (KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW \ - + 1 + KMALLOC_EXTRAS) +#define KMALLOC_NR_CACHES (PAGE_SHIFT - KMALLOC_SHIFT_LOW \ + + KMALLOC_EXTRAS) /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. @@ -78,21 +76,18 @@ extern struct kmem_cache kmalloc_caches[ */ static inline int kmalloc_index(int size) { - if (size <= 8) return 3; - if (size <= 16) return 4; - if (size <= 32) return 5; - if (size <= 64) return 6; -#ifdef KMALLOC_EXTRA - if (size <= 96) return KMALLOC_SHIFT_HIGH + 1; -#endif - if (size <= 128) return 7; + int i = 3; + #ifdef KMALLOC_EXTRA - if (size <= 192) return KMALLOC_SHIFT_HIGH + 2; + if (size > 64 && size <= 96) return PAGE_SHIFT; + if (size > 128 && size <= 192) return PAGE_SHIFT + 1; #endif - if (size <= 256) return 8; - if (size <= 512) return 9; - if (size <= 1024) return 10; - if (size <= 2048) return 11; + + while (i < PAGE_SHIFT) { + if ((1 << i) <= size) + return i; + i++; + } return -1; }