SLUB: Hooks for alloc and free This adds alloc and free hooks to SLUB. Thus one can provide their own allocation functions under SLUB if one needs special memory. Signed-off-by: Christoph Lameter --- include/linux/slab.h | 5 ++ mm/slub.c | 111 +++++++++++++++++++++++++++++++++------------------ 2 files changed, 78 insertions(+), 38 deletions(-) Index: slub/include/linux/slab.h =================================================================== --- slub.orig/include/linux/slab.h 2007-05-15 21:34:09.000000000 -0700 +++ slub/include/linux/slab.h 2007-05-15 21:34:52.000000000 -0700 @@ -72,6 +72,11 @@ struct kmem_cache_ops { * set to synchronize_rcu(). */ void (*sync)(void); + + /* Redirection of allocations and frees */ + struct page *(*alloc)(int node, gfp_t flags, int order, + unsigned long slabflags); + void (*free)(struct page *page, int order, unsigned long slabflags); }; struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, Index: slub/mm/slub.c =================================================================== --- slub.orig/mm/slub.c 2007-05-15 21:34:09.000000000 -0700 +++ slub/mm/slub.c 2007-05-15 21:46:12.000000000 -0700 @@ -294,6 +294,56 @@ static inline int check_valid_pointer(st return 1; } +/* + * Page allocator interface. These function are providing the same functionality + * as provided by the two methods in the kmem_cache_ops structure. + * + * If these methods are provided then the logic implemented there must mirror + * what we have here. + */ +static inline struct page *slub_alloc_pages(int node, gfp_t flags, + int order, unsigned long slabflags) +{ + struct page * page; + int pages = 1 << order; + + /* Setup the allocation flags */ + flags &= GFP_LEVEL_MASK; + if (order) + flags |= __GFP_COMP; + + if (slabflags & SLAB_CACHE_DMA) + flags |= SLUB_DMA; + + if (slabflags & SLAB_RECLAIM_ACCOUNT) + flags |= __GFP_RECLAIMABLE; + + if (node == -1) + page = alloc_pages(flags, order); + else + page = alloc_pages_node(node, flags, order); + + if (!page) + return NULL; + + mod_zone_page_state(page_zone(page), + (slabflags & SLAB_RECLAIM_ACCOUNT) ? + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, + pages); + + return page; +} + +static inline void slub_free_pages(struct page *page, int order, + unsigned long slabflags) +{ + mod_zone_page_state(page_zone(page), + (slabflags & SLAB_RECLAIM_ACCOUNT) ? + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, + - (1UL << order)); + return __free_pages(page, order); +} + struct kmem_cache_ops slub_default_ops = { }; @@ -984,36 +1034,6 @@ static inline void kmem_cache_open_debug /* * Slab allocation and freeing */ -static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) -{ - struct page * page; - int pages = 1 << s->order; - - if (s->order) - flags |= __GFP_COMP; - - if (s->flags & SLAB_CACHE_DMA) - flags |= SLUB_DMA; - - if (s->flags & SLAB_RECLAIM_ACCOUNT) - flags |= __GFP_RECLAIMABLE; - - if (node == -1) - page = alloc_pages(flags, s->order); - else - page = alloc_pages_node(node, flags, s->order); - - if (!page) - return NULL; - - mod_zone_page_state(page_zone(page), - (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - pages); - - return page; -} - static void setup_object(struct kmem_cache *s, struct page *page, void *object) { @@ -1036,7 +1056,11 @@ static struct page *new_slab(struct kmem if (flags & __GFP_WAIT) local_irq_enable(); - page = allocate_slab(s, flags & GFP_LEVEL_MASK, node); + if (s->ops->alloc) + page = s->ops->alloc(node, flags, s->order, s->flags); + else + page = slub_alloc_pages(node, flags, s->order, s->flags); + if (!page) goto out; @@ -1084,8 +1108,6 @@ static struct page *new_slab(struct kmem static void __free_slab(struct kmem_cache *s, struct page *page) { - int pages = 1 << s->order; - if (unlikely(SlabDebug(page))) { void *p; @@ -1094,13 +1116,12 @@ static void __free_slab(struct kmem_cach check_object(s, page, p, 0); } - mod_zone_page_state(page_zone(page), - (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - - pages); page->mapping = NULL; - __free_pages(page, s->order); + if (s->ops->free) + s->ops->free(page, s->order, s->flags); + else + slub_free_pages(page, s->order, s->flags); } static void rcu_free_slab(struct rcu_head *h) @@ -3510,6 +3531,20 @@ static ssize_t ops_show(struct kmem_cach (unsigned long)s->ops->sync); x += sprintf(buf + x, "\n"); } + + if (s->ops->alloc) { + x += sprintf(buf + x, "alloc : "); + x += sprint_symbol(buf + x, + (unsigned long)s->ops->alloc); + x += sprintf(buf + x, "\n"); + } + + if (s->ops->free) { + x += sprintf(buf + x, "free : "); + x += sprint_symbol(buf + x, + (unsigned long)s->ops->free); + x += sprintf(buf + x, "\n"); + } return x; } SLAB_ATTR_RO(ops);