[SLUB] Sort slab list and establish maximum objects for defrag slabs When we defragmenting slabs then it is advantageous to have all defragmentable slabs together. When adding a slab cache put defragmentale caches first and others last. Determine the maximum of objects in defragmentable slabs. This allows to size the allocation of arrays holding refs to these objects later. Signed-off-by: Christoph Lameter --- mm/slub.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c 2007-08-08 20:35:23.000000000 -0700 +++ linux-2.6/mm/slub.c 2007-08-08 20:35:54.000000000 -0700 @@ -235,6 +235,9 @@ static enum { static DECLARE_RWSEM(slub_lock); static LIST_HEAD(slab_caches); +/* Maximum objects in defragmentable slabs */ +static unsigned int max_defrag_slab_objects = 0; + /* * Tracking user of a slab. */ @@ -2268,7 +2271,7 @@ static struct kmem_cache *create_kmalloc flags, NULL)) goto panic; - list_add(&s->list, &slab_caches); + list_add_tail(&s->list, &slab_caches); up_write(&slub_lock); if (sysfs_slab_add(s)) goto panic; @@ -2448,6 +2451,13 @@ void kfree(const void *x) } EXPORT_SYMBOL(kfree); +static inline void *alloc_scratch(void) +{ + return kmalloc(max_defrag_slab_objects * sizeof(void *) + + BITS_TO_LONGS(max_defrag_slab_objects) * sizeof(unsigned long), + GFP_KERNEL); +} + void kmem_cache_setup_defrag(struct kmem_cache *s, void *(*get)(struct kmem_cache *, int nr, void **), void (*kick)(struct kmem_cache *, int nr, void **, void *private)) @@ -2459,6 +2469,11 @@ void kmem_cache_setup_defrag(struct kmem BUG_ON(!s->ctor); s->get = get; s->kick = kick; + down_write(&slub_lock); + list_move(&s->list, &slab_caches); + if (s->objects > max_defrag_slab_objects) + max_defrag_slab_objects = s->objects; + up_write(&slub_lock); } EXPORT_SYMBOL(kmem_cache_setup_defrag); @@ -2721,7 +2736,7 @@ struct kmem_cache *kmem_cache_create(con if (s) { if (kmem_cache_open(s, GFP_KERNEL, name, size, align, flags, ctor)) { - list_add(&s->list, &slab_caches); + list_add_tail(&s->list, &slab_caches); up_write(&slub_lock); if (sysfs_slab_add(s)) goto err;