Index: linux-2.6.16-rc1-mm4/mm/page_alloc.c =================================================================== --- linux-2.6.16-rc1-mm4.orig/mm/page_alloc.c 2006-01-30 17:37:24.000000000 -0800 +++ linux-2.6.16-rc1-mm4/mm/page_alloc.c 2006-01-31 11:12:31.000000000 -0800 @@ -635,20 +635,29 @@ out: * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. */ -static struct page *__rmqueue(struct zone *zone, unsigned int order, int last) +static struct page *__rmqueue(struct zone *zone, unsigned int order, int zero) { struct free_area * area; unsigned int current_order; struct page *page; +again: for (current_order = order; current_order < MAX_ORDER; ++current_order) { area = zone->free_area + current_order; if (list_empty(&area->free_list)) continue; page = list_entry( - last ? area->free_list.prev : area->free_list.next, + zero ? area->free_list.prev : area->free_list.next, struct page, lru); + + /* + * Break up higher zeroed allocations rather than + * zeroing inline + */ + if (zero && !PageZeroed(page)) + continue; + list_del(&page->lru); rmv_page_order(page); area->nr_free--; @@ -658,6 +667,12 @@ static struct page *__rmqueue(struct zon expand(zone, page, order, current_order, area); return page; } + /* + * No zeroed page. Try an unzeroed one */ + if (zero) { + zero = 0; + goto again; + } return NULL; }