SLUB: Manual slab order specification Add a new slab flag SLAB_ORDER If specified then the slab order that must be used is indicated in the lowest nibble of the flags. This can f.e. be used to force slabs to use order 0 allocations. Useful if allocations from an atomic context are done for a slab and we want to avoid potential complications that the use of higher order pages may cause. Use the option on the radix_tree. Signed-off-by: Christoph Lameter --- include/linux/slab.h | 4 ++++ lib/radix-tree.c | 2 +- mm/slab.c | 6 ++++-- mm/slub.c | 10 +++++++--- 4 files changed, 16 insertions(+), 6 deletions(-) Index: linux-2.6.23-rc8-mm2/include/linux/slab.h =================================================================== --- linux-2.6.23-rc8-mm2.orig/include/linux/slab.h 2007-10-02 17:19:41.000000000 -0700 +++ linux-2.6.23-rc8-mm2/include/linux/slab.h 2007-10-03 11:51:46.000000000 -0700 @@ -18,7 +18,9 @@ * Flags to pass to kmem_cache_create(). * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. */ +#define SLAB_ORDER_MASK 0x0000000fUL #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ +#define __SLAB_FORCE_ORDER 0x00000200UL /* Set order of slabs (in lower 4 bits */ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ @@ -29,6 +31,8 @@ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ +#define SLAB_FORCE_ORDER(n) (__SLAB_FORCE_ORDER|((n) & SLAB_ORDER_MASK)) + /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ Index: linux-2.6.23-rc8-mm2/lib/radix-tree.c =================================================================== --- linux-2.6.23-rc8-mm2.orig/lib/radix-tree.c 2007-10-02 17:19:41.000000000 -0700 +++ linux-2.6.23-rc8-mm2/lib/radix-tree.c 2007-10-03 11:48:23.000000000 -0700 @@ -1091,7 +1091,7 @@ void __init radix_tree_init(void) { radix_tree_node_cachep = kmem_cache_create("radix_tree_node", sizeof(struct radix_tree_node), 0, - SLAB_PANIC, radix_tree_node_ctor); + SLAB_PANIC | SLAB_FORCE_ORDER(0), radix_tree_node_ctor); radix_tree_init_maxindex(); hotcpu_notifier(radix_tree_callback, 0); } Index: linux-2.6.23-rc8-mm2/mm/slub.c =================================================================== --- linux-2.6.23-rc8-mm2.orig/mm/slub.c 2007-10-02 17:19:41.000000000 -0700 +++ linux-2.6.23-rc8-mm2/mm/slub.c 2007-10-03 11:52:08.000000000 -0700 @@ -2182,9 +2182,13 @@ static int calculate_sizes(struct kmem_c size = ALIGN(size, align); s->size = size; - s->order = calculate_order(size); - if (s->order < 0) - return 0; + if (s->flags & __SLAB_FORCE_ORDER) + s->order = s->flags & SLAB_ORDER_MASK; + else { + s->order = calculate_order(size); + if (s->order < 0) + return 0; + } /* * Determine the number of objects per slab Index: linux-2.6.23-rc8-mm2/mm/slab.c =================================================================== --- linux-2.6.23-rc8-mm2.orig/mm/slab.c 2007-10-02 17:19:41.000000000 -0700 +++ linux-2.6.23-rc8-mm2/mm/slab.c 2007-10-02 17:20:03.000000000 -0700 @@ -178,12 +178,14 @@ SLAB_CACHE_DMA | \ SLAB_STORE_USER | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ + SLAB_FORCE_ORDER) #else # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ SLAB_CACHE_DMA | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ - SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) + SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ + SLAB_FORCE_ORDER) #endif /*