From: Christoph Hellwig Signed-off-by: Christoph Hellwig Acked-by: Christoph Lameter Signed-off-by: Andrew Morton --- include/linux/slab.h | 25 ++++++++++++++++++++++++- mm/slab.c | 2 +- 2 files changed, 25 insertions(+), 2 deletions(-) diff -puN include/linux/slab.h~slab-optimize-kmalloc_node-the-same-way-as-kmalloc include/linux/slab.h --- a/include/linux/slab.h~slab-optimize-kmalloc_node-the-same-way-as-kmalloc +++ a/include/linux/slab.h @@ -202,7 +202,30 @@ extern int slab_is_available(void); #ifdef CONFIG_NUMA extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); -extern void *kmalloc_node(size_t size, gfp_t flags, int node); +extern void *__kmalloc_node(size_t size, gfp_t flags, int node); + +static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +{ + if (__builtin_constant_p(size)) { + int i = 0; +#define CACHE(x) \ + if (size <= x) \ + goto found; \ + else \ + i++; +#include "kmalloc_sizes.h" +#undef CACHE + { + extern void __you_cannot_kmalloc_that_much(void); + __you_cannot_kmalloc_that_much(); + } +found: + return kmem_cache_alloc_node((flags & GFP_DMA) ? + malloc_sizes[i].cs_dmacachep : + malloc_sizes[i].cs_cachep, flags, node); + } + return __kmalloc_node(size, flags, node); +} #else static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int node) { diff -puN mm/slab.c~slab-optimize-kmalloc_node-the-same-way-as-kmalloc mm/slab.c --- a/mm/slab.c~slab-optimize-kmalloc_node-the-same-way-as-kmalloc +++ a/mm/slab.c @@ -3348,7 +3348,7 @@ void *kmem_cache_alloc_node(struct kmem_ } EXPORT_SYMBOL(kmem_cache_alloc_node); -void *kmalloc_node(size_t size, gfp_t flags, int node) +void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *cachep; _