Index: linux-2.6.21-rc4-mm1/include/linux/slub_def.h =================================================================== --- linux-2.6.21-rc4-mm1.orig/include/linux/slub_def.h 2007-03-22 19:20:16.000000000 -0700 +++ linux-2.6.21-rc4-mm1/include/linux/slub_def.h 2007-03-22 19:33:53.000000000 -0700 @@ -62,7 +62,7 @@ struct kmem_cache { * Kmalloc subsystem. */ #define KMALLOC_SHIFT_LOW 3 - +#define KMALLOC_SHIFT_INLINE 16 #define KMALLOC_SHIFT_HIGH 18 #if L1_CACHE_BYTES <= 64 @@ -86,6 +86,8 @@ extern struct kmem_cache kmalloc_caches[ */ static inline int kmalloc_index(int size) { + if (size > 65536) + return -1; #ifdef KMALLOC_EXTRA if (size > 64 && size <= 96) return KMALLOC_SHIFT_HIGH + 1; @@ -105,11 +107,7 @@ static inline int kmalloc_index(int size if (size < 8 * 1024) return 13; if (size < 16 * 1024) return 14; if (size < 32 * 1024) return 15; - if (size < 64 * 1024) return 16; - if (size < 128 * 1024) return 17; - if (size < 256 * 1024) return 18; - - return -1; + return 16; /* * What we really wanted to do and cannot do because of compiler issues is: @@ -130,14 +128,8 @@ static inline struct kmem_cache *kmalloc { int index = kmalloc_index(size) - KMALLOC_SHIFT_LOW; - if (index < 0) { - /* - * Generate a link failure. Would be great if we could - * do something to stop the compile here. - */ - extern void __kmalloc_size_too_large(void); - __kmalloc_size_too_large(); - } + if (index < 0) + return NULL; return &kmalloc_caches[index]; } @@ -153,9 +145,10 @@ static inline void *kmalloc(size_t size, if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); - return kmem_cache_alloc(s, flags); - } else - return __kmalloc(size, flags); + if (s) + return kmem_cache_alloc(s, flags); + } + return __kmalloc(size, flags); } static inline void *kzalloc(size_t size, gfp_t flags) @@ -163,9 +156,10 @@ static inline void *kzalloc(size_t size, if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); - return kmem_cache_zalloc(s, flags); - } else - return __kzalloc(size, flags); + if (s) + return kmem_cache_zalloc(s, flags); + } + return __kzalloc(size, flags); } #ifdef CONFIG_NUMA @@ -176,9 +170,10 @@ static inline void *kmalloc_node(size_t if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); - return kmem_cache_alloc_node(s, flags, node); - } else - return __kmalloc_node(size, flags, node); + if (s) + return kmem_cache_alloc_node(s, flags, node); + } + return __kmalloc_node(size, flags, node); } #endif Index: linux-2.6.21-rc4-mm1/mm/slub.c =================================================================== --- linux-2.6.21-rc4-mm1.orig/mm/slub.c 2007-03-22 19:34:35.000000000 -0700 +++ linux-2.6.21-rc4-mm1/mm/slub.c 2007-03-22 20:08:27.000000000 -0700 @@ -1724,12 +1724,11 @@ static unsigned long slab_objects(struct * Kmalloc subsystem *******************************************************************/ -struct kmem_cache kmalloc_caches[KMALLOC_NR_CACHES] __cacheline_aligned; +struct kmem_cache kmalloc_caches[KMALLOC_INLINE - KMALLOW_LOW] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); -#ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_caches_dma[KMALLOC_NR_CACHES]; -#endif +/* Non inlined caches (DMA and large caches) */ +static struct kmem_cache *kmalloc_caches_misc[!!SLUB_DMA][KMALLOC_NR_CACHES]; static int __init setup_slub_min_order(char *str) { @@ -1821,50 +1820,53 @@ panic: static struct kmem_cache *get_slab(size_t size, gfp_t flags) { int index = kmalloc_index(size) - KMALLOC_SHIFT_LOW; + struct kmem_cache *s; + struct kmem_cache **ss; + struct kmem_cache *x; + char *text; + size_t realsize; + /* SLAB allows allocations with zero size. So warn on those */ WARN_ON(size == 0); + if (index >= 0) + return &kmalloc_caches[index]; + /* Allocation too large? */ - BUG_ON(index < 0); + BUG_ON(size > (PAGE_SHIFT << MAX_ORDER)); -#ifdef CONFIG_ZONE_DMA - if ((flags & SLUB_DMA)) { - struct kmem_cache *s; - struct kmem_cache *x; - char *text; - size_t realsize; - - s = kmalloc_caches_dma[index]; - if (s) - return s; - - /* Dynamically create dma cache */ - x = kmalloc(kmem_size, flags & ~SLUB_DMA); - if (!x) - panic("Unable to allocate memory for dma cache\n"); + ss = &kmalloc_caches_misc[!!(flags & SLUB_DMA)][index]; + s = *ss; + if (s) + return s; + + /* Dynamically create cache */ + x = kmalloc(kmem_size, flags & ~SLUB_DMA); + if (!x) + panic("Unable to allocate memory for dynamic cache size =%d\n", size); #ifdef KMALLOC_EXTRA - if (index <= KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW) + if (index <= KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW) #endif - realsize = 1 << (index + KMALLOC_SHIFT_LOW); + realsize = 1 << (index + KMALLOC_SHIFT_LOW); #ifdef KMALLOC_EXTRA - else { - index -= KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW +1; - if (!index) - realsize = 96; - else - realsize = 192; - } + else { + index -= KMALLOC_SHIFT_HIGH - KMALLOC_SHIFT_LOW +1; + if (!index) + realsize = 96; + else + realsize = 192; + } #endif + if (flags & SLUB_DMA) text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", - (unsigned int)realsize); - s = create_kmalloc_cache(x, text, realsize, flags); - kmalloc_caches_dma[index] = s; - return s; - } -#endif - return &kmalloc_caches[index]; + (unsigned int)realsize); + else + text = kasprintf(flags, "kmalloc-%d", (unsigned int)realsize); + + *ss = s = create_kmalloc_cache(x, text, realsize, flags); + return s; } void *__kmalloc(size_t size, gfp_t flags)