SLUB defrag: Only scan the defraggable caches on kmem_cache_defrag. Make sure that the defragmentable slab caches are at the top of the list of slab caches. We can then stop scanning if we find a non defragmentable slabcache on the slab list. Signed-off-by: Christoph Lameter --- mm/slub.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) Index: linux-2.6.22-rc6-mm1/mm/slub.c =================================================================== --- linux-2.6.22-rc6-mm1.orig/mm/slub.c 2007-07-04 11:06:25.000000000 -0700 +++ linux-2.6.22-rc6-mm1/mm/slub.c 2007-07-04 11:07:26.000000000 -0700 @@ -2280,7 +2280,7 @@ static struct kmem_cache *create_kmalloc flags, NULL, &slub_default_ops)) goto panic; - list_add(&s->list, &slab_caches); + list_add_tail(&s->list, &slab_caches); up_write(&slub_lock); if (sysfs_slab_add(s)) goto panic; @@ -2737,10 +2737,11 @@ int kmem_cache_defrag(int node) list_for_each_entry(s, &slab_caches, list) { /* - * The slab cache must have defrag methods. + * The list of slab cachess is sorted so that we only scan + * have to scan the one capable of defragmentation. */ - if (!s->ops || !s->ops->kick) - continue; + if (!s->ops->kick) + break; scratch = kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL); @@ -2946,7 +2947,16 @@ struct kmem_cache *kmem_cache_create(con if (s) { if (kmem_cache_open(s, GFP_KERNEL, name, size, align, flags, ctor, ops)) { - list_add(&s->list, &slab_caches); + + /* + * Reclaimable slabs first because we may have + * to scan them repeatedly. + */ + if (ops->kick) + list_add(&s->list, &slab_caches); + else + list_add_tail(&s->list, &slab_caches); + up_write(&slub_lock); raise_kswapd_order(s->order);