Index: linux-2.6.18-rc4-mm3/mm/slabifier.c =================================================================== --- linux-2.6.18-rc4-mm3.orig/mm/slabifier.c 2006-08-31 18:38:38.250779613 -0700 +++ linux-2.6.18-rc4-mm3/mm/slabifier.c 2006-08-31 18:57:32.485264258 -0700 @@ -484,12 +484,13 @@ gotpage: goto redo; } -static void *slab_alloc(struct slab_cache *sc, gfp_t gfpflags) +void *slab_alloc(struct slab_cache *sc, gfp_t gfpflags) { return __slab_alloc(sc, gfpflags, -1); } +EXPORT_SYMBOL(slab_alloc); -static void *slab_alloc_node(struct slab_cache *sc, gfp_t gfpflags, +void *slab_alloc_node(struct slab_cache *sc, gfp_t gfpflags, int node) { #ifdef CONFIG_NUMA @@ -498,8 +499,9 @@ static void *slab_alloc_node(struct slab return slab_alloc(sc, gfpflags); #endif } +EXPORT_SYMBOL(slab_alloc_node); -static void slab_free(struct slab_cache *sc, const void *x) +void slab_free(struct slab_cache *sc, const void *x) { struct slab *s = (void *)sc; struct page * page; @@ -602,6 +604,7 @@ dumpret: return; #endif } +EXPORT_SYMBOL(slab_free); /* Figure out on which slab object the object resides */ static __always_inline struct page *get_object_page(const void *x) Index: linux-2.6.18-rc4-mm3/include/linux/kmalloc.h =================================================================== --- linux-2.6.18-rc4-mm3.orig/include/linux/kmalloc.h 2006-08-31 18:38:29.920239847 -0700 +++ linux-2.6.18-rc4-mm3/include/linux/kmalloc.h 2006-08-31 18:45:25.108443704 -0700 @@ -67,6 +67,10 @@ static inline int kmalloc_index(int size return -1; } +extern void *slab_alloc(struct slab_cache *, gfp_t flags); +extern void *slab_alloc_node(struct slab_cache *, gfp_t, int); +extern void slab_free(struct slab_cache *, const void *); + /* * Find the slab cache for a given combination of allocation flags and size. * @@ -96,7 +100,7 @@ static inline void *kmalloc(size_t size, if (__builtin_constant_p(size) && !(flags & __GFP_DMA)) { struct slab_cache *s = kmalloc_slab(size); - return KMALLOC_ALLOCATOR.alloc(s, flags); + return slab_alloc(s, flags); } else return __kmalloc(size, flags); } @@ -108,7 +112,7 @@ static inline void *kmalloc_node(size_t if (__builtin_constant_p(size) && !(flags & __GFP_DMA)) { struct slab_cache *s = kmalloc_slab(size); - return KMALLOC_ALLOCATOR.alloc_node(s, flags, node); + return slab_alloc_node(s, flags, node); } else return __kmalloc_node(size, flags, node); } @@ -119,7 +123,7 @@ static inline void *kmalloc_node(size_t /* Free an object */ static inline void kfree(const void *x) { - return KMALLOC_ALLOCATOR.free(NULL, x); + slab_free(NULL, x); } /* Allocate and zero the specified number of bytes */ Index: linux-2.6.18-rc4-mm3/mm/kmalloc.c =================================================================== --- linux-2.6.18-rc4-mm3.orig/mm/kmalloc.c 2006-08-31 18:38:29.921216349 -0700 +++ linux-2.6.18-rc4-mm3/mm/kmalloc.c 2006-08-31 18:44:24.237206235 -0700 @@ -105,15 +105,14 @@ static struct slab_cache *get_slab(size_ void *__kmalloc(size_t size, gfp_t flags) { - return KMALLOC_ALLOCATOR.alloc(get_slab(size, flags), flags); + return slab_alloc(get_slab(size, flags), flags); } EXPORT_SYMBOL(__kmalloc); #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t flags, int node) { - return KMALLOC_ALLOCATOR.alloc_node(get_slab(size, flags), - flags, node); + return slab_alloc_node(get_slab(size, flags), flags, node); } EXPORT_SYMBOL(__kmalloc_node); #endif Index: linux-2.6.18-rc4-mm3/include/linux/slabulator.h =================================================================== --- linux-2.6.18-rc4-mm3.orig/include/linux/slabulator.h 2006-08-31 18:38:38.248826609 -0700 +++ linux-2.6.18-rc4-mm3/include/linux/slabulator.h 2006-08-31 18:40:47.150038742 -0700 @@ -73,20 +73,23 @@ static inline const char *kmem_cache_nam static inline void *kmem_cache_alloc(struct slab_cache *s, gfp_t flags) { - return SLABULATOR_ALLOCATOR.alloc(s, flags); + return slab_alloc(s, flags); + //return SLABULATOR_ALLOCATOR.alloc(s, flags); } static inline void *kmem_cache_alloc_node(struct slab_cache *s, gfp_t flags, int node) { - return SLABULATOR_ALLOCATOR.alloc_node(s, flags, node); + return slab_alloc_node(s, flags, node); +// return SLABULATOR_ALLOCATOR.alloc_node(s, flags, node); } extern void *kmem_cache_zalloc(struct slab_cache *s, gfp_t flags); static inline void kmem_cache_free(struct slab_cache *s, const void *x) { - SLABULATOR_ALLOCATOR.free(s, x); + slab_free(s, x); +// SLABULATOR_ALLOCATOR.free(s, x); } static inline int kmem_ptr_validate(struct slab_cache *s, void *x)