--- include/linux/kmalloc_sizes.h | 6 ------ include/linux/slub_def.h | 12 ------------ mm/slab.c | 6 +++++- 3 files changed, 5 insertions(+), 19 deletions(-) Index: slub/include/linux/kmalloc_sizes.h =================================================================== --- slub.orig/include/linux/kmalloc_sizes.h 2007-05-14 16:48:01.000000000 -0700 +++ slub/include/linux/kmalloc_sizes.h 2007-05-14 16:48:44.000000000 -0700 @@ -19,17 +19,11 @@ CACHE(32768) CACHE(65536) CACHE(131072) -#if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU) CACHE(262144) -#endif -#ifndef CONFIG_MMU CACHE(524288) CACHE(1048576) -#ifdef CONFIG_LARGE_ALLOCS CACHE(2097152) CACHE(4194304) CACHE(8388608) CACHE(16777216) CACHE(33554432) -#endif /* CONFIG_LARGE_ALLOCS */ -#endif /* CONFIG_MMU */ Index: slub/include/linux/slub_def.h =================================================================== --- slub.orig/include/linux/slub_def.h 2007-05-14 16:51:59.000000000 -0700 +++ slub/include/linux/slub_def.h 2007-05-14 16:53:22.000000000 -0700 @@ -58,16 +58,8 @@ struct kmem_cache { */ #define KMALLOC_SHIFT_LOW 3 -#ifdef CONFIG_LARGE_ALLOCS #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) < 25 ? \ MAX_ORDER + PAGE_SHIFT - 1 : 25) -#else -#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256 -#define KMALLOC_SHIFT_HIGH 20 -#else -#define KMALLOC_SHIFT_HIGH 18 -#endif -#endif /* * We keep the general caches in an array of slab caches that are used for @@ -110,17 +102,13 @@ static inline int kmalloc_index(int size if (size <= 64 * 1024) return 16; if (size <= 128 * 1024) return 17; if (size <= 256 * 1024) return 18; -#if KMALLOC_SHIFT_HIGH > 18 if (size <= 512 * 1024) return 19; if (size <= 1024 * 1024) return 20; -#endif -#if KMALLOC_SHIFT_HIGH > 20 if (size <= 2 * 1024 * 1024) return 21; if (size <= 4 * 1024 * 1024) return 22; if (size <= 8 * 1024 * 1024) return 23; if (size <= 16 * 1024 * 1024) return 24; if (size <= 32 * 1024 * 1024) return 25; -#endif return -1; /* Index: slub/mm/slab.c =================================================================== --- slub.orig/mm/slab.c 2007-05-14 16:48:49.000000000 -0700 +++ slub/mm/slab.c 2007-05-14 17:00:40.000000000 -0700 @@ -328,6 +328,8 @@ static __always_inline int index_of(cons if (__builtin_constant_p(size)) { int i = 0; + if (size >= MAX_SLAB_SIZE) + __bad_size; #define CACHE(x) \ if (size <=x) \ return i; \ @@ -1508,7 +1510,7 @@ void __init kmem_cache_init(void) slab_early_init = 0; - while (sizes->cs_size != ULONG_MAX) { + while (sizes->cs_size < min(MAX_ORDER + PAGE_SHIFT, 25)) { /* * For performance, all the general caches are L1 aligned. * This should be particularly beneficial on SMP boxes, as it @@ -1535,6 +1537,8 @@ void __init kmem_cache_init(void) sizes++; names++; } + sizes->cs_size = ULONG_MAX; + /* 4) Replace the bootstrap head arrays */ { struct array_cache *ptr;