Unlimited kmalloc size and removal of general caches >=4. We can directly use the page allocator for all allocations 4K and larger. This means that no general slabs are necessary and the size of the allocation passed to kmalloc() can be arbitrarily large. Remove the useless general caches over 4k. Signed-off-by: Christoph Lameter Index: linux-2.6.21-rc2/mm/slub.c =================================================================== --- linux-2.6.21-rc2.orig/mm/slub.c 2007-03-05 20:22:09.000000000 -0800 +++ linux-2.6.21-rc2/mm/slub.c 2007-03-05 20:48:55.000000000 -0800 @@ -1097,6 +1097,13 @@ if (unlikely(PageCompound(page))) page = page->first_page; + if (unlikely(!PageSlab(page))) { + if (x == page_address(page)) { + put_page(page); + return; + } + } + if (!s) s = page->slab; @@ -1669,7 +1676,8 @@ /* SLAB allows allocations with zero size. So warn on those */ WARN_ON(size == 0); /* Allocation too large? */ - BUG_ON(index < 0); + if (index < 0) + return NULL; #ifdef CONFIG_ZONE_DMA if ((flags & SLUB_DMA)) { @@ -1716,15 +1724,32 @@ void *__kmalloc(size_t size, gfp_t flags) { - return kmem_cache_alloc(get_slab(size, flags), flags); + struct kmem_cache *s = get_slab(size, flags); + struct page *page; + + if (s) + return kmem_cache_alloc(s, flags); + + page = alloc_pages(flags, get_order(size)); + if (!page) + return NULL; + return page_address(page); } EXPORT_SYMBOL(__kmalloc); #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t flags, int node) { - return kmem_cache_alloc_node(get_slab(size, flags), - flags, node); + struct kmem_cache *s = get_slab(size, flags); + struct page *page; + + if (s) + return kmem_cache_alloc_node(s, flags, node); + + page = alloc_pages_node(node, flags, get_order(size)); + if (!page) + return NULL; + return page_address(page); } EXPORT_SYMBOL(__kmalloc_node); #endif Index: linux-2.6.21-rc2/include/linux/slub_def.h =================================================================== --- linux-2.6.21-rc2.orig/include/linux/slub_def.h 2007-03-05 18:21:15.000000000 -0800 +++ linux-2.6.21-rc2/include/linux/slub_def.h 2007-03-05 20:48:55.000000000 -0800 @@ -55,7 +55,7 @@ */ #define KMALLOC_SHIFT_LOW 3 -#define KMALLOC_SHIFT_HIGH 18 +#define KMALLOC_SHIFT_HIGH 11 #if L1_CACHE_BYTES <= 64 #define KMALLOC_EXTRAS 2 @@ -93,13 +93,6 @@ if (size <= 512) return 9; if (size <= 1024) return 10; if (size <= 2048) return 11; - if (size <= 4096) return 12; - if (size <= 8 * 1024) return 13; - if (size <= 16 * 1024) return 14; - if (size <= 32 * 1024) return 15; - if (size <= 64 * 1024) return 16; - if (size <= 128 * 1024) return 17; - if (size <= 256 * 1024) return 18; return -1; } @@ -113,14 +106,8 @@ { int index = kmalloc_index(size) - KMALLOC_SHIFT_LOW; - if (index < 0) { - /* - * Generate a link failure. Would be great if we could - * do something to stop the compile here. - */ - extern void __kmalloc_size_too_large(void); - __kmalloc_size_too_large(); - } + if (index < 0) + return NULL; return &kmalloc_caches[index]; } @@ -136,9 +123,10 @@ if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); - return kmem_cache_alloc(s, flags); - } else - return __kmalloc(size, flags); + if (s) + return kmem_cache_alloc(s, flags); + } + return __kmalloc(size, flags); } static inline void *kzalloc(size_t size, gfp_t flags) @@ -146,9 +134,10 @@ if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); - return kmem_cache_zalloc(s, flags); - } else - return __kzalloc(size, flags); + if (s) + return kmem_cache_zalloc(s, flags); + } + return __kzalloc(size, flags); } #ifdef CONFIG_NUMA @@ -159,9 +148,10 @@ if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); - return kmem_cache_alloc_node(s, flags, node); - } else - return __kmalloc_node(size, flags, node); + if (s) + return kmem_cache_alloc_node(s, flags, node); + } + return __kmalloc_node(size, flags, node); } #endif