Index: linux-2.6.21-rc5-mm2/include/linux/slub_def.h =================================================================== --- linux-2.6.21-rc5-mm2.orig/include/linux/slub_def.h 2007-03-30 18:38:11.000000000 -0700 +++ linux-2.6.21-rc5-mm2/include/linux/slub_def.h 2007-03-30 18:43:36.000000000 -0700 @@ -85,6 +85,8 @@ extern struct kmem_cache kmalloc_caches[ */ static inline int kmalloc_index(int size) { + if (size == 0) + return 0; if (size > 64 && size <= 96) return 1; if (size > 128 && size <= 192) @@ -137,6 +139,9 @@ static inline struct kmem_cache *kmalloc { int index = kmalloc_index(size); + if (index == 0) + return NULL; + if (index < 0) { /* * Generate a link failure. Would be great if we could @@ -160,6 +165,9 @@ static inline void *kmalloc(size_t size, if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); + if (!s) + return NULL; + return kmem_cache_alloc(s, flags); } else return __kmalloc(size, flags); @@ -170,6 +178,9 @@ static inline void *kzalloc(size_t size, if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); + if (!s) + return NULL; + return kmem_cache_zalloc(s, flags); } else return __kzalloc(size, flags); @@ -183,6 +194,9 @@ static inline void *kmalloc_node(size_t if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); + if (!s) + return NULL; + return kmem_cache_alloc_node(s, flags, node); } else return __kmalloc_node(size, flags, node); Index: linux-2.6.21-rc5-mm2/mm/slub.c =================================================================== --- linux-2.6.21-rc5-mm2.orig/mm/slub.c 2007-03-30 18:35:02.000000000 -0700 +++ linux-2.6.21-rc5-mm2/mm/slub.c 2007-03-30 18:47:24.000000000 -0700 @@ -2261,10 +2261,16 @@ __initcall(cpucache_init); void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { struct kmem_cache *s = get_slab(size, gfpflags); - void *object = kmem_cache_alloc(s, gfpflags); + void *object; + + if (!s) + return NULL; + + object = kmem_cache_alloc(s, gfpflags); if (object && (s->flags & SLAB_STORE_USER)) set_track(s, object, 0, caller); + return object; } @@ -2272,10 +2278,16 @@ void *__kmalloc_node_track_caller(size_t int node, void *caller) { struct kmem_cache *s = get_slab(size, gfpflags); - void *object = kmem_cache_alloc_node(s, gfpflags, node); + void *object; + + if (!s) + return NULL; + + object = kmem_cache_alloc_node(s, gfpflags, node); if (object && (s->flags & SLAB_STORE_USER)) set_track(s, object, 0, caller); + return object; }