Index: linux-2.6.21-rc7-mm1/mm/slub.c =================================================================== --- linux-2.6.21-rc7-mm1.orig/mm/slub.c 2007-04-25 11:38:20.000000000 -0700 +++ linux-2.6.21-rc7-mm1/mm/slub.c 2007-04-25 11:59:24.000000000 -0700 @@ -107,7 +107,7 @@ */ /* Enable to test recovery from slab corruption on boot */ -#undef SLUB_RESILIENCY_TEST +#define SLUB_RESILIENCY_TEST #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ SLAB_POISON | SLAB_STORE_USER) @@ -2532,7 +2532,8 @@ static void resiliency_test(void) validate_slab_cache(kmalloc_caches + 9); printk(KERN_ERR "\4. Test that kmalloc fails\n"); - p = kmalloc (1 << 30, GFP_KERNEL); + p = kmalloc(64 * 1024 * 1024, GFP_KERNEL); + printk(KERN_ERR "\5. blubb = %p\n", p); } #else static void resiliency_test(void) {}; Index: linux-2.6.21-rc7-mm1/include/linux/slub_def.h =================================================================== --- linux-2.6.21-rc7-mm1.orig/include/linux/slub_def.h 2007-04-25 11:42:10.000000000 -0700 +++ linux-2.6.21-rc7-mm1/include/linux/slub_def.h 2007-04-25 11:59:05.000000000 -0700 @@ -84,7 +84,7 @@ extern struct kmem_cache kmalloc_caches[ * Sorry that the following has to be that ugly but some versions of GCC * have trouble with constant propagation and loops. */ -static inline int kmalloc_index(int size) +static __always_inline int kmalloc_index(int size) { if (size == 0) return 0; @@ -120,7 +120,6 @@ static inline int kmalloc_index(int size if (size <= 32 * 1024 * 1024) return 25; #endif return -1; - /* * What we really wanted to do and cannot do because of compiler issues is: * int i; @@ -136,7 +135,7 @@ static inline int kmalloc_index(int size * This ought to end up with a global pointer to the right cache * in kmalloc_caches. */ -static inline struct kmem_cache *kmalloc_slab(size_t size) +static __always_inline struct kmem_cache *kmalloc_slab(size_t size) { int index = kmalloc_index(size); @@ -155,7 +154,7 @@ static inline struct kmem_cache *kmalloc #define SLUB_DMA 0 #endif -static inline void *kmalloc(size_t size, gfp_t flags) +static __always_inline void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); @@ -168,7 +167,7 @@ static inline void *kmalloc(size_t size, return __kmalloc(size, flags); } -static inline void *kzalloc(size_t size, gfp_t flags) +static __always_inline void *kzalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); @@ -184,7 +183,7 @@ static inline void *kzalloc(size_t size, #ifdef CONFIG_NUMA extern void *__kmalloc_node(size_t size, gfp_t flags, int node); -static inline void *kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size);