Index: linux-2.6.18-rc4-mm2/mm/slabifier.c =================================================================== --- linux-2.6.18-rc4-mm2.orig/mm/slabifier.c 2006-08-25 23:48:19.209830001 -0700 +++ linux-2.6.18-rc4-mm2/mm/slabifier.c 2006-08-26 10:52:18.557967843 -0700 @@ -664,16 +664,18 @@ load: return object; } -static void *slab_alloc(struct slab_cache *sc, gfp_t gfpflags) +void *slab_alloc(struct slab_cache *sc, gfp_t gfpflags) { return __slab_alloc(sc, gfpflags, -1); } +EXPORT_SYMBOL(slab_alloc); -static void *slab_alloc_node(struct slab_cache *sc, gfp_t gfpflags, +void *slab_alloc_node(struct slab_cache *sc, gfp_t gfpflags, int node) { return __slab_alloc(sc, gfpflags, node); } +EXPORT_SYMBOL(slab_alloc_node); /* Figure out on which slab object the object resides */ static __always_inline struct page *get_object_page(const void *x) @@ -689,7 +691,7 @@ static __always_inline struct page *get_ return page; } -static void slab_free(struct slab_cache *sc, const void *x) +void slab_free(struct slab_cache *sc, const void *x) { struct slab *s = (void *)sc; struct page * page; @@ -780,6 +782,7 @@ out_unlock: out: local_irq_restore(flags); } +EXPORT_SYMBOL(slab_free); /* * Check if a given pointer is valid Index: linux-2.6.18-rc4-mm2/include/linux/kmalloc.h =================================================================== --- linux-2.6.18-rc4-mm2.orig/include/linux/kmalloc.h 2006-08-25 23:48:24.369666763 -0700 +++ linux-2.6.18-rc4-mm2/include/linux/kmalloc.h 2006-08-26 10:32:31.342383161 -0700 @@ -72,6 +72,10 @@ static inline int kmalloc_index(int size return -1; } +extern void *slab_alloc(struct slab_cache *, gfp_t flags); +extern void *slab_alloc_node(struct slab_cache *, gfp_t, int); +extern void slab_free(struct slab_cache *, const void *); + /* * Find the slab cache for a given combination of allocation flags and size. * @@ -100,8 +104,8 @@ static inline void *kmalloc(size_t size, { if (__builtin_constant_p(size)) { struct slab_cache *s = kmalloc_slab(size, flags); - - return KMALLOC_ALLOCATOR.alloc(s, flags); + return slab_alloc(s, flags); +// return KMALLOC_ALLOCATOR.alloc(s, flags); } else return __kmalloc(size, flags); } @@ -113,7 +117,8 @@ static inline void *kmalloc_node(size_t if (__builtin_constant_p(size)) { struct slab_cache *s = kmalloc_slab(size, flags); - return KMALLOC_ALLOCATOR.alloc_node(s, flags, node); + return slab_alloc_node(s, flags, node); +// return KMALLOC_ALLOCATOR.alloc_node(s, flags, node); } else return __kmalloc_node(size, flags, node); } @@ -124,7 +129,8 @@ static inline void *kmalloc_node(size_t /* Free an object */ static inline void kfree(const void *x) { - return KMALLOC_ALLOCATOR.free(NULL, x); + slab_free(NULL, x); +// return KMALLOC_ALLOCATOR.free(NULL, x); } /* Allocate and zero the specified number of bytes */ Index: linux-2.6.18-rc4-mm2/mm/kmalloc.c =================================================================== --- linux-2.6.18-rc4-mm2.orig/mm/kmalloc.c 2006-08-25 23:48:24.370643265 -0700 +++ linux-2.6.18-rc4-mm2/mm/kmalloc.c 2006-08-26 10:34:27.603748685 -0700 @@ -25,15 +25,17 @@ static struct slab_cache *get_slab(size_ void *__kmalloc(size_t size, gfp_t flags) { - return KMALLOC_ALLOCATOR.alloc(get_slab(size, flags), flags); + return slab_alloc(get_slab(size, flags), flags); +// return KMALLOC_ALLOCATOR.alloc(get_slab(size, flags), flags); } EXPORT_SYMBOL(__kmalloc); #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t flags, int node) { - return KMALLOC_ALLOCATOR.alloc_node(get_slab(size, flags), - flags, node); + return slab_alloc_node(get_slab(size, flags), flags, node); +// return KMALLOC_ALLOCATOR.alloc_node(get_slab(size, flags), +// flags, node); } EXPORT_SYMBOL(__kmalloc_node); #endif Index: linux-2.6.18-rc4-mm2/include/linux/slabulator.h =================================================================== --- linux-2.6.18-rc4-mm2.orig/include/linux/slabulator.h 2006-08-25 23:48:26.243574171 -0700 +++ linux-2.6.18-rc4-mm2/include/linux/slabulator.h 2006-08-26 10:31:44.592344360 -0700 @@ -73,20 +73,23 @@ static inline const char *kmem_cache_nam static inline void *kmem_cache_alloc(struct slab_cache *s, gfp_t flags) { - return SLABULATOR_ALLOCATOR.alloc(s, flags); + return slab_alloc(s, flags); + //return SLABULATOR_ALLOCATOR.alloc(s, flags); } static inline void *kmem_cache_alloc_node(struct slab_cache *s, gfp_t flags, int node) { - return SLABULATOR_ALLOCATOR.alloc_node(s, flags, node); + return slab_alloc_node(s, flags, node); +// return SLABULATOR_ALLOCATOR.alloc_node(s, flags, node); } extern void *kmem_cache_zalloc(struct slab_cache *s, gfp_t flags); static inline void kmem_cache_free(struct slab_cache *s, const void *x) { - SLABULATOR_ALLOCATOR.free(s, x); + slab_free(s, x); +// SLABULATOR_ALLOCATOR.free(s, x); } static inline int kmem_ptr_validate(struct slab_cache *s, void *x)