Index: linux-2.6.21-rc7-mm2/include/linux/mmzone.h =================================================================== --- linux-2.6.21-rc7-mm2.orig/include/linux/mmzone.h 2007-04-26 13:08:58.000000000 -0700 +++ linux-2.6.21-rc7-mm2/include/linux/mmzone.h 2007-04-26 13:09:38.000000000 -0700 @@ -25,6 +25,8 @@ #endif #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) +extern int page_group_by_mobility_disabled; + /* * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed * costly to service. That is between allocation orders which should Index: linux-2.6.21-rc7-mm2/mm/slub.c =================================================================== --- linux-2.6.21-rc7-mm2.orig/mm/slub.c 2007-04-26 13:09:42.000000000 -0700 +++ linux-2.6.21-rc7-mm2/mm/slub.c 2007-04-26 14:17:02.000000000 -0700 @@ -1925,7 +1925,7 @@ static int kmem_cache_close(struct kmem_ for_each_online_node(node) { struct kmem_cache_node *n = get_node(s, node); - free_list(s, n, &n->partial); + n->nr_partial -= free_list(s, n, &n->partial); if (atomic_long_read(&n->nr_slabs)) return 1; } @@ -2220,6 +2220,7 @@ int kmem_cache_shrink(struct kmem_cache list_for_each_entry_safe(page, t, &n->partial, lru) { if (!page->inuse) { list_del(&page->lru); + n->nr_partial--; discard_slab(s, page); } else if (n->nr_partial > MAX_PARTIAL) @@ -2299,6 +2300,17 @@ void __init kmem_cache_init(void) { int i; +#if PAGE_SHIFT <= 12 + /* + * Small page size. Make sure that we do not fragment memory + */ + if (page_group_by_mobility_disabled) { + printk(KERN_WARNING "SLUB: No antifrag support. Restricting page orders used.\n"); + slub_max_order = min(slub_max_order, 2); + slub_min_objects = min(slub_min_objects, 4); + slub_min_order = min(slub_min_order, slub_max_order); + } +#endif #ifdef CONFIG_NUMA /* * Must first have the slab cache available for the allocations of the @@ -2337,9 +2349,10 @@ void __init kmem_cache_init(void) kmem_size = offsetof(struct kmem_cache, cpu_slab) + nr_cpu_ids * sizeof(struct page *); - printk(KERN_INFO "SLUB: General Slabs=%d, HW alignment=%d, " - "Processors=%d, Nodes=%d\n", + printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," + " Processors=%d, Nodes=%d\n", KMALLOC_SHIFT_HIGH, L1_CACHE_BYTES, + slub_min_order, slub_max_order, slub_min_objects, nr_cpu_ids, nr_node_ids); } @@ -2693,17 +2706,17 @@ static void validate_slab_slab(struct km validate_slab(s, page); slab_unlock(page); } else - printk(KERN_INFO "SLUB %s: Skipped busy slab %p\n", + printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n", s->name, page); if (s->flags & DEBUG_DEFAULT_FLAGS) { if (!PageError(page)) printk(KERN_ERR "SLUB %s: PageError not set " - "on slab %p\n", s->name, page); + "on slab 0x%p\n", s->name, page); } else { if (PageError(page)) printk(KERN_ERR "SLUB %s: PageError set on " - "slab %p\n", s->name, page); + "slab 0x%p\n", s->name, page); } } Index: linux-2.6.21-rc7-mm2/mm/page_alloc.c =================================================================== --- linux-2.6.21-rc7-mm2.orig/mm/page_alloc.c 2007-04-26 13:05:53.000000000 -0700 +++ linux-2.6.21-rc7-mm2/mm/page_alloc.c 2007-04-26 13:59:27.000000000 -0700 @@ -2256,7 +2256,8 @@ void __meminit build_all_zonelists(void) if (vm_total_pages < (MAX_ORDER_NR_PAGES * MIGRATE_TYPES)) page_group_by_mobility_disabled = 1; else - page_group_by_mobility_disabled = 0; + /* debug */ + page_group_by_mobility_disabled = 1; printk("Built %i zonelists, mobility grouping %s. Total pages: %ld\n", num_online_nodes(),