SLUB: Hooks for alloc and free This adds alloc and free hooks to SLUB. Thus one can provide their own allocation functions under SLUB if one needs special memory. Signed-off-by: Christoph Lameter --- include/linux/slab.h | 5 ++ mm/slub.c | 113 +++++++++++++++++++++++++++++++++------------------ 2 files changed, 79 insertions(+), 39 deletions(-) Index: slub/include/linux/slab.h =================================================================== --- slub.orig/include/linux/slab.h 2007-05-09 16:25:36.000000000 -0700 +++ slub/include/linux/slab.h 2007-05-09 16:45:31.000000000 -0700 @@ -56,6 +56,11 @@ struct kmem_cache_ops { * Any operation may be performed in kick_object. */ void (*kick_object)(struct kmem_cache *, void *); + + /* Redirection of allocations and frees */ + struct page *(*alloc)(int node, gfp_t flags, int order, + unsigned long slabflags); + void (*free)(struct page *page, int order, unsigned long slabflags); }; struct kmem_cache *__kmem_cache_create(const char *, size_t, size_t, Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-09 16:45:24.000000000 -0700 +++ slub/mm/slub.c 2007-05-09 16:45:31.000000000 -0700 @@ -291,11 +291,58 @@ static inline int check_valid_pointer(st return 1; } +/* + * Page allocator interface. These function are providing the same functionality + * as provided by the two methods in the kmem_cache_ops structure. + * + * If these methods are provided then the logic implemented there must mirror + * what we have here. + */ +static inline struct page *slub_alloc_pages(int node, gfp_t flags, + int order, unsigned long slabflags) +{ + struct page * page; + int pages = 1 << order; + + /* Setup the allocation flags */ + flags &= GFP_LEVEL_MASK; + if (order) + flags |= __GFP_COMP; + + if (slabflags & SLAB_CACHE_DMA) + flags |= SLUB_DMA; + + if (node == -1) + page = alloc_pages(flags, order); + else + page = alloc_pages_node(node, flags, order); + + if (!page) + return NULL; + + mod_zone_page_state(page_zone(page), + (slabflags & SLAB_RECLAIM_ACCOUNT) ? + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, + pages); + + return page; +} + +static inline void slub_free_pages(struct page *page, int order, + unsigned long slabflags) +{ + int pages = 1 << order; + + mod_zone_page_state(page_zone(page), + (slabflags & SLAB_RECLAIM_ACCOUNT) ? + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, + - pages); + return __free_pages(page, order); +} + struct kmem_cache_ops slub_default_ops = { - NULL, - NULL, - NULL, - NULL + .alloc = slub_alloc_pages, + .free = slub_free_pages }; /* @@ -969,33 +1016,6 @@ static inline void kmem_cache_open_debug /* * Slab allocation and freeing */ -static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) -{ - struct page * page; - int pages = 1 << s->order; - - if (s->order) - flags |= __GFP_COMP; - - if (s->flags & SLAB_CACHE_DMA) - flags |= SLUB_DMA; - - if (node == -1) - page = alloc_pages(flags, s->order); - else - page = alloc_pages_node(node, flags, s->order); - - if (!page) - return NULL; - - mod_zone_page_state(page_zone(page), - (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - pages); - - return page; -} - static void setup_object(struct kmem_cache *s, struct page *page, void *object) { @@ -1022,7 +1042,11 @@ static struct page *new_slab(struct kmem if (flags & __GFP_WAIT) local_irq_enable(); - page = allocate_slab(s, flags & GFP_LEVEL_MASK, node); + if (s->ops->alloc) + page = s->ops->alloc(node, flags, s->order, s->flags); + else + page = slub_alloc_pages(node, flags, s->order, s->flags); + if (!page) goto out; @@ -1070,8 +1094,6 @@ static struct page *new_slab(struct kmem static void __free_slab(struct kmem_cache *s, struct page *page) { - int pages = 1 << s->order; - if (unlikely(SlabDebug(page) || s->ops->dtor)) { void *p; @@ -1083,13 +1105,12 @@ static void __free_slab(struct kmem_cach } } - mod_zone_page_state(page_zone(page), - (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - - pages); page->mapping = NULL; - __free_pages(page, s->order); + if (s->ops->free) + s->ops->free(page, s->order, s->flags); + else + slub_free_pages(page, s->order, s->flags); } static void rcu_free_slab(struct rcu_head *h) @@ -3423,6 +3444,20 @@ static ssize_t ops_show(struct kmem_cach (unsigned long)s->ops->kick_object); x += sprintf(buf + x, "\n"); } + + if (s->ops->alloc) { + x += sprintf(buf + x, "alloc : "); + x += sprint_symbol(buf + x, + (unsigned long)s->ops->alloc); + x += sprintf(buf + x, "\n"); + } + + if (s->ops->free) { + x += sprintf(buf + x, "free : "); + x += sprint_symbol(buf + x, + (unsigned long)s->ops->free); + x += sprintf(buf + x, "\n"); + } return x; } SLAB_ATTR_RO(ops);