Slab defrag: Sort slab list and establish maximum objects for defrag slabs When we defragmenting slabs then it is advantageus to have them all together. Having the maximum number of objects in such slabs allows us to allocate the array holding refs to these objects later. Signed-off-by: Christoph Lameter --- mm/slub.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) Index: linux-2.6.23-rc1/mm/slub.c =================================================================== --- linux-2.6.23-rc1.orig/mm/slub.c 2007-07-29 00:33:28.000000000 -0700 +++ linux-2.6.23-rc1/mm/slub.c 2007-07-29 00:33:33.000000000 -0700 @@ -235,6 +235,9 @@ static enum { static DECLARE_RWSEM(slub_lock); static LIST_HEAD(slab_caches); +/* Maximum objects in defragmentable slabs */ +static unsigned int max_defrag_slab_objects = 0; + /* * Tracking user of a slab. */ @@ -2270,7 +2273,7 @@ static struct kmem_cache *create_kmalloc flags, &slub_default_ops)) goto panic; - list_add(&s->list, &slab_caches); + list_add_tail(&s->list, &slab_caches); up_write(&slub_lock); if (sysfs_slab_add(s)) goto panic; @@ -2712,7 +2715,19 @@ struct kmem_cache *kmem_cache_create(con if (kmem_cache_open(s, GFP_KERNEL, name, size, align, flags, ops)) { list_add(&s->list, &slab_caches); - up_write(&slub_lock); + + /* + * Reclaimable slabs first because we may have + * to scan them repeatedly. + */ + if (ops->kick) { + list_add(&s->list, &slab_caches); + if (s->objects > max_defrag_slab_objects) + max_defrag_slab_objects = s->objects; + } else + list_add_tail(&s->list, &slab_caches); + + up_write(&slub_lock); if (sysfs_slab_add(s)) goto err; return s;