--- include/linux/slub_def.h | 8 +++++--- mm/slub.c | 16 ++++++++-------- 2 files changed, 13 insertions(+), 11 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-02-09 10:51:25.100378710 -0800 +++ linux-2.6/include/linux/slub_def.h 2008-02-09 11:04:27.689964680 -0800 @@ -106,11 +106,13 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) +#define KMALLOC_SHIFT_END (PAGE_SHIFT + PAGE_ALLOC_COSTLY_ORDER) +#define KMALLOC_END_SIZE (1 << KMALLOC_SHIFT_END) /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache kmalloc_caches[PAGE_SHIFT]; +extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_END]; /* * Sorry that the following has to be that ugly but some versions of GCC @@ -191,7 +193,7 @@ void *__kmalloc(size_t size, gfp_t flags static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { - if (size > PAGE_SIZE / 2) + if (size > KMALLOC_END_SIZE / 2) return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); @@ -214,7 +216,7 @@ void *kmem_cache_alloc_node(struct kmem_ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && - size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) { + size <= KMALLOC_END_SIZE / 2 && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-02-09 10:51:25.108378657 -0800 +++ linux-2.6/mm/slub.c 2008-02-09 11:40:34.491786757 -0800 @@ -2484,11 +2484,11 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ -struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned; +struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_END] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); #ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; +static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_END]; #endif static int __init setup_slub_min_order(char *str) @@ -2670,7 +2670,7 @@ void *__kmalloc(size_t size, gfp_t flags { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) + if (unlikely(size > KMALLOC_END_SIZE / 2)) return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); @@ -2688,7 +2688,7 @@ void *__kmalloc_node(size_t size, gfp_t { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) + if (unlikely(size > KMALLOC_END_SIZE / 2)) return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); @@ -3001,7 +3001,7 @@ void __init kmem_cache_init(void) caches++; } - for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { + for (i = KMALLOC_SHIFT_LOW; i < KMALLOC_SHIFT_END; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; @@ -3028,7 +3028,7 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ - for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) + for (i = KMALLOC_SHIFT_LOW; i < KMALLOC_SHIFT_END; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); @@ -3218,7 +3218,7 @@ void *__kmalloc_track_caller(size_t size { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) + if (unlikely(size > KMALLOC_END_SIZE / 2)) return (void *)__get_free_pages(gfpflags | __GFP_COMP, get_order(size)); s = get_slab(size, gfpflags); @@ -3234,7 +3234,7 @@ void *__kmalloc_node_track_caller(size_t { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) + if (unlikely(size > KMALLOC_END_SIZE / 2)) return (void *)__get_free_pages(gfpflags | __GFP_COMP, get_order(size)); s = get_slab(size, gfpflags);