From: Mel Gorman Per-cpu pages can accidentally cause fragmentation because they are free, but pinned pages in an otherwise contiguous block. When this patch is applied, the per-cpu caches are drained after the direct-reclaim is entered if the requested order is greater than 0. It simply reuses the code used by suspend and hotplug. Signed-off-by: Mel Gorman Signed-off-by: Andrew Morton --- mm/page_alloc.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff -puN mm/page_alloc.c~drain-per-cpu-lists-when-high-order-allocations-fail mm/page_alloc.c --- a/mm/page_alloc.c~drain-per-cpu-lists-when-high-order-allocations-fail +++ a/mm/page_alloc.c @@ -902,7 +902,9 @@ void mark_free_pages(struct zone *zone) } spin_unlock_irqrestore(&zone->lock, flags); } +#endif /* CONFIG_PM */ +#if defined(CONFIG_PM) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY) /* * Spill all of this CPU's per-cpu pages back into the buddy allocator. */ @@ -914,7 +916,28 @@ void drain_local_pages(void) __drain_pages(smp_processor_id()); local_irq_restore(flags); } -#endif /* CONFIG_PM */ + +void smp_drain_local_pages(void *arg) +{ + drain_local_pages(); +} + +/* + * Spill all the per-cpu pages from all CPUs back into the buddy allocator + */ +void drain_all_local_pages(void) +{ + unsigned long flags; + + local_irq_save(flags); + __drain_pages(smp_processor_id()); + local_irq_restore(flags); + + smp_call_function(smp_drain_local_pages, NULL, 0, 1); +} +#else +void drain_all_local_pages(void) {} +#endif /* CONFIG_PM || CONFIG_PAGE_GROUP_BY_MOBILITY */ /* * Free a 0-order page @@ -1483,6 +1506,9 @@ nofail_alloc: cond_resched(); + if (order != 0) + drain_all_local_pages(); + if (likely(did_some_progress)) { page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); _