From: Christoph Lameter If there is page mobility then we can defragment memory. So its possible to use higher order of pages for slab allocations. If the defaults were not overridden set the max order to 4 and guarantee 16 objects per slab. This will put some stress on Mel's antifrag approaches. If these defaults are too large then they should be later reduced. Cc: Mel Gorman Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 2 ++ mm/slub.c | 27 ++++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) diff -puN include/linux/mmzone.h~slub-exploit-page-mobility-to-increase-allocation-order include/linux/mmzone.h --- a/include/linux/mmzone.h~slub-exploit-page-mobility-to-increase-allocation-order +++ a/include/linux/mmzone.h @@ -25,6 +25,8 @@ #endif #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) +extern int page_group_by_mobility_disabled; + /* * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed * costly to service. That is between allocation orders which should diff -puN mm/slub.c~slub-exploit-page-mobility-to-increase-allocation-order mm/slub.c --- a/mm/slub.c~slub-exploit-page-mobility-to-increase-allocation-order +++ a/mm/slub.c @@ -168,6 +168,13 @@ static inline void ClearSlabDebug(struct #endif /* + * If antifragmentation methods are in effect then increase the + * slab sizes to increase performance + */ +#define DEFAULT_ANTIFRAG_MAX_ORDER 4 +#define DEFAULT_ANTIFRAG_MIN_OBJECTS 16 + +/* * Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. */ @@ -1691,6 +1698,11 @@ static struct page *get_object_page(cons */ /* + * Set if the user has overridden any of the order related defaults. + */ +static int user_override; + +/* * Mininum / Maximum order of slab pages. This influences locking overhead * and slab fragmentation. A higher order reduces the number of partial slabs * and increases the number of allocations possible without having to @@ -2333,7 +2345,7 @@ static struct kmem_cache *kmalloc_caches static int __init setup_slub_min_order(char *str) { get_option (&str, &slub_min_order); - + user_override = 1; return 1; } @@ -2342,7 +2354,7 @@ __setup("slub_min_order=", setup_slub_mi static int __init setup_slub_max_order(char *str) { get_option (&str, &slub_max_order); - + user_override = 1; return 1; } @@ -2351,7 +2363,7 @@ __setup("slub_max_order=", setup_slub_ma static int __init setup_slub_min_objects(char *str) { get_option (&str, &slub_min_objects); - + user_override = 1; return 1; } @@ -2677,6 +2689,15 @@ void __init kmem_cache_init(void) init_alloc_cpu(); + if (!page_group_by_mobility_disabled && !user_override) { + /* + * Antifrag support available. Increase usable + * page order and generate slabs with more objects. + */ + slub_max_order = DEFAULT_ANTIFRAG_MAX_ORDER; + slub_min_objects = DEFAULT_ANTIFRAG_MIN_OBJECTS; + } + #ifdef CONFIG_NUMA /* * Must first have the slab cache available for the allocations of the _