Restrict special slab casing to arches using page->index Mark the arches using page->index of slab pages. We can then switch off the special casing for all other arches. Signed-off-by: Christoph Lameter Index: linux-2.6.21-rc5-mm2/arch/frv/Kconfig =================================================================== --- linux-2.6.21-rc5-mm2.orig/arch/frv/Kconfig 2007-03-30 08:47:01.000000000 -0700 +++ linux-2.6.21-rc5-mm2/arch/frv/Kconfig 2007-03-30 09:07:51.000000000 -0700 @@ -53,6 +53,10 @@ config ARCH_HAS_ILOG2_U64 bool default y +config ARCH_USES_SLAB_PAGE_INDEX + bool + default y + mainmenu "Fujitsu FR-V Kernel Configuration" source "init/Kconfig" Index: linux-2.6.21-rc5-mm2/arch/i386/Kconfig =================================================================== --- linux-2.6.21-rc5-mm2.orig/arch/i386/Kconfig 2007-03-30 08:47:01.000000000 -0700 +++ linux-2.6.21-rc5-mm2/arch/i386/Kconfig 2007-03-30 09:07:51.000000000 -0700 @@ -79,6 +79,10 @@ config ARCH_MAY_HAVE_PC_FDC bool default y +config ARCH_USES_SLAB_PAGE_INDEX + bool + default y + config DMI bool default y Index: linux-2.6.21-rc5-mm2/mm/slub.c =================================================================== --- linux-2.6.21-rc5-mm2.orig/mm/slub.c 2007-03-30 09:07:44.000000000 -0700 +++ linux-2.6.21-rc5-mm2/mm/slub.c 2007-03-30 09:07:51.000000000 -0700 @@ -1363,6 +1363,7 @@ static int calculate_order(int size) int order; int rem; +#ifdef CONFIG_ARCH_USES_SLAB_PAGE_INDEX /* * If this is an order 0 page then there are no issues with * fragmentation. We can then create a slab with a single object. @@ -1372,6 +1373,7 @@ static int calculate_order(int size) */ if (size == PAGE_SIZE) return 0; +#endif for (order = max(slub_min_order, fls(size - 1) - PAGE_SHIFT); order < MAX_ORDER; order++) { @@ -1503,6 +1505,7 @@ int calculate_sizes(struct kmem_cache *s tentative_size = ALIGN(size, calculate_alignment(align, flags)); +#ifdef CONFIG_ARCH_USES_SLAB_PAGE_INDEX /* * PAGE_SIZEd slabs are special because some arches use them for * page table pages. Do not do any debugging in order to avoid @@ -1511,6 +1514,7 @@ int calculate_sizes(struct kmem_cache *s if (size == PAGE_SIZE) flags &= ~(SLAB_RED_ZONE| SLAB_DEBUG_FREE | \ SLAB_STORE_USER | SLAB_POISON | __OBJECT_POISON); +#endif size = ALIGN(size, sizeof(void *));