--- include/linux/slub_def.h | 9 ++++++--- mm/slub.c | 16 ++++++++-------- 2 files changed, 14 insertions(+), 11 deletions(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2007-11-09 09:06:57.765222740 -0800 +++ linux-2.6/include/linux/slub_def.h 2007-11-09 09:19:26.556078675 -0800 @@ -73,12 +73,15 @@ struct kmem_cache { #endif #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) +#define MAX_OBJECT_SHIFT (PAGE_SHIFT + 2) +#define MAX_OBJECT_SIZE (1 < PAGE_SHIFT) + /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache kmalloc_caches[PAGE_SHIFT]; +extern struct kmem_cache kmalloc_caches[MAX_OBJECT_SHIFT + 1]; /* * Sorry that the following has to be that ugly but some versions of GCC @@ -159,7 +162,7 @@ void *__kmalloc(size_t size, gfp_t flags static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { - if (size > PAGE_SIZE / 2) + if (size >= MAX_OBJECT_SIZE) return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); @@ -182,7 +185,7 @@ void *kmem_cache_alloc_node(struct kmem_ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && - size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) { + size < MAX_OBJECT_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-11-09 09:07:43.673972941 -0800 +++ linux-2.6/mm/slub.c 2007-11-09 09:20:08.725578307 -0800 @@ -2355,11 +2355,11 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ -struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned; +struct kmem_cache kmalloc_caches[MAX_OBJECT_SHIFT + 1] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); #ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; +static struct kmem_cache *kmalloc_caches_dma[MAX_OBJECT_SHIFT + 1]; #endif static int __init setup_slub_min_order(char *str) @@ -2540,7 +2540,7 @@ void *__kmalloc(size_t size, gfp_t flags { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) + if (unlikely(size >= MAX_OBJECT_SIZE)) return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); @@ -2558,7 +2558,7 @@ void *__kmalloc_node(size_t size, gfp_t { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) + if (unlikely(size >= MAX_OBJECT_SIZE)) return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); @@ -2852,7 +2852,7 @@ void __init kmem_cache_init(void) caches++; } - for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { + for (i = KMALLOC_SHIFT_LOW; i < MAX_OBJECT_SHIFT + 1; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; @@ -2879,7 +2879,7 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ - for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) + for (i = KMALLOC_SHIFT_LOW; i < MAX_OBJECT_SHIFT + 1; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); #ifdef CONFIG_SMP @@ -3055,7 +3055,7 @@ void *__kmalloc_track_caller(size_t size { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) + if (unlikely(size >= MAX_OBJECT_SIZE)) return (void *)__get_free_pages(gfpflags | __GFP_COMP, get_order(size)); s = get_slab(size, gfpflags); @@ -3071,7 +3071,7 @@ void *__kmalloc_node_track_caller(size_t { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) + if (unlikely(size >= MAX_OBJECT_SIZE)) return (void *)__get_free_pages(gfpflags | __GFP_COMP, get_order(size)); s = get_slab(size, gfpflags);