From 9327cbc0b15b2920e94bacf179e3041eb8a093d3 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 14 Apr 2008 19:15:43 +0300 Subject: [PATCH] SLUB: Add get() and kick() methods Add the two methods needed for defragmentation and add the display of the methods via the proc interface. Add documentation explaining the use of these methods. Reviewed-by: Rik van Riel Signed-off-by: Christoph Lameter Signed-off-by: Pekka Enberg --- include/linux/slab.h | 45 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/slub_def.h | 3 +++ mm/slab.c | 3 +++ mm/slob.c | 3 +++ mm/slub.c | 29 ++++++++++++++++++++++++++++- 5 files changed, 82 insertions(+), 1 deletion(-) Index: linux-2.6/include/linux/slub_def.h =================================================================== --- linux-2.6.orig/include/linux/slub_def.h 2008-04-28 21:22:24.921140299 -0700 +++ linux-2.6/include/linux/slub_def.h 2008-04-28 21:22:31.871140325 -0700 @@ -86,6 +86,9 @@ struct kmem_cache { gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(struct kmem_cache *, void *); + kmem_defrag_get_func *get; + kmem_defrag_kick_func *kick; + int inuse; /* Offset to metadata */ int align; /* Alignment */ int defrag_ratio; /* Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2008-04-28 21:22:28.499889496 -0700 +++ linux-2.6/mm/slub.c 2008-04-28 21:22:31.871140325 -0700 @@ -2768,6 +2768,19 @@ void kfree(const void *x) } EXPORT_SYMBOL(kfree); +void kmem_cache_setup_defrag(struct kmem_cache *s, + kmem_defrag_get_func get, kmem_defrag_kick_func kick) +{ + /* + * Defragmentable slabs must have a ctor otherwise objects may be + * in an undetermined state after they are allocated. + */ + BUG_ON(!s->ctor); + s->get = get; + s->kick = kick; +} +EXPORT_SYMBOL(kmem_cache_setup_defrag); + /* * kmem_cache_shrink removes empty slabs from the partial lists and sorts * the remaining slabs by the number of items in use. The slabs with the @@ -3053,7 +3066,7 @@ static int slab_unmergeable(struct kmem_ if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) return 1; - if (s->ctor) + if (s->ctor || s->kick || s->get) return 1; /* @@ -3839,6 +3852,20 @@ static ssize_t ops_show(struct kmem_cach x += sprint_symbol(buf + x, (unsigned long)s->ctor); x += sprintf(buf + x, "\n"); } + + if (s->get) { + x += sprintf(buf + x, "get : "); + x += sprint_symbol(buf + x, + (unsigned long)s->get); + x += sprintf(buf + x, "\n"); + } + + if (s->kick) { + x += sprintf(buf + x, "kick : "); + x += sprint_symbol(buf + x, + (unsigned long)s->kick); + x += sprintf(buf + x, "\n"); + } return x; } SLAB_ATTR_RO(ops); Index: linux-2.6/include/linux/slab.h =================================================================== --- linux-2.6.orig/include/linux/slab.h 2008-04-28 21:20:44.462390206 -0700 +++ linux-2.6/include/linux/slab.h 2008-04-28 21:22:31.911149313 -0700 @@ -96,6 +96,51 @@ void kfree(const void *); size_t ksize(const void *); /* + * Function prototypes passed to kmem_cache_defrag() to enable defragmentation + * and targeted reclaim in slab caches. + */ + +/* + * kmem_cache_defrag_get_func() is called with locks held so that the slab + * objects cannot be freed. We are in an atomic context and no slab + * operations may be performed. The purpose of kmem_cache_defrag_get_func() + * is to obtain a stable refcount on the objects, so that they cannot be + * removed until kmem_cache_kick_func() has handled them. + * + * Parameters passed are the number of objects to process and an array of + * pointers to objects for which we need references. + * + * Returns a pointer that is passed to the kick function. If any objects + * cannot be moved then the pointer may indicate a failure and + * then kick can simply remove the references that were already obtained. + * + * The object pointer array passed is also passed to kmem_cache_defrag_kick(). + * The function may remove objects from the array by setting pointers to + * NULL. This is useful if we can determine that an object is already about + * to be removed. In that case it is often impossible to obtain the necessary + * refcount. + */ +typedef void *kmem_defrag_get_func(struct kmem_cache *, int, void **); + +/* + * kmem_cache_defrag_kick_func is called with no locks held and interrupts + * enabled. Sleeping is possible. Any operation may be performed in kick(). + * kmem_cache_defrag should free all the objects in the pointer array. + * + * Parameters passed are the number of objects in the array, the array of + * pointers to the objects and the pointer returned by kmem_cache_defrag_get(). + * + * Success is checked by examining the number of remaining objects in the slab. + */ +typedef void kmem_defrag_kick_func(struct kmem_cache *, int, void **, void *); + +/* + * kmem_cache_setup_defrag is used to setup callbacks for a slab cache. + */ +void kmem_cache_setup_defrag(struct kmem_cache *, kmem_defrag_get_func, + kmem_defrag_kick_func); + +/* * Allocator specific definitions. These are mainly used to establish optimized * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by * selecting the appropriate general cache at compile time. Index: linux-2.6/mm/slab.c =================================================================== --- linux-2.6.orig/mm/slab.c 2008-04-28 21:20:44.482390701 -0700 +++ linux-2.6/mm/slab.c 2008-04-28 21:22:31.931149244 -0700 @@ -313,6 +313,9 @@ static void free_block(struct kmem_cache static int enable_cpucache(struct kmem_cache *cachep); static void cache_reap(struct work_struct *unused); +void kmem_cache_setup_defrag(struct kmem_cache *s, + kmem_defrag_get_func get, kmem_defrag_kick_func kiok) {} + /* * This function must be completely optimized away if a constant is passed to * it. Mostly the same as what is in linux/slab.h except it returns an index. Index: linux-2.6/mm/slob.c =================================================================== --- linux-2.6.orig/mm/slob.c 2008-04-28 21:20:44.492390805 -0700 +++ linux-2.6/mm/slob.c 2008-04-28 21:22:31.941149232 -0700 @@ -107,6 +107,9 @@ struct slob_page { static inline void struct_slob_page_wrong_size(void) { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); } +void kmem_cache_setup_defrag(struct kmem_cache *s, + kmem_defrag_get_func get, kmem_cache_defrag_kick_func kick) {} + /* * free_slob_page: call before a slob_page is returned to the page allocator. */