--- include/linux/mmzone.h | 4 +++- mm/page_alloc.c | 25 ++++++++++++++----------- 2 files changed, 17 insertions(+), 12 deletions(-) Index: linux-2.6.24-rc6-mm1/include/linux/mmzone.h =================================================================== --- linux-2.6.24-rc6-mm1.orig/include/linux/mmzone.h 2008-01-10 20:18:48.133858283 -0800 +++ linux-2.6.24-rc6-mm1/include/linux/mmzone.h 2008-01-10 20:20:05.577985011 -0800 @@ -112,8 +112,10 @@ struct per_cpu_pages { struct list_head list; /* the list of pages */ }; +#define PCP_CACHED_ORDERS 3 /* Orders cached by PCPs */ + struct per_cpu_pageset { - struct per_cpu_pages pcp; + struct per_cpu_pages pcp[PCP_CACHED_ORDER]; #ifdef CONFIG_NUMA s8 expire; #endif Index: linux-2.6.24-rc6-mm1/mm/page_alloc.c =================================================================== --- linux-2.6.24-rc6-mm1.orig/mm/page_alloc.c 2008-01-10 20:20:22.070013506 -0800 +++ linux-2.6.24-rc6-mm1/mm/page_alloc.c 2008-01-10 20:29:10.923136674 -0800 @@ -930,11 +930,14 @@ static void drain_pages(unsigned int cpu pset = zone_pcp(zone, cpu); - pcp = &pset->pcp; - local_irq_save(flags); - free_pages_bulk(zone, pcp->count, &pcp->list, 0); - pcp->count = 0; - local_irq_restore(flags); + for (i = 0; i < PCP_CACHED_ORDERS; i++) { + pcp = &pset->pcp[i]; + local_irq_save(flags); + free_pages_bulk(zone, pcp->count, &pcp->list, + cached_order[i]); + pcp->count = 0; + local_irq_restore(flags); + } } } @@ -993,7 +996,7 @@ void mark_free_pages(struct zone *zone) /* * Free a 0-order page */ -void free_a_page(struct page *page) +void free_a_page(struct page *page, int order) { struct zone *zone = page_zone(page); struct per_cpu_pages *pcp; @@ -1010,7 +1013,7 @@ void free_a_page(struct page *page) arch_free_page(page, 0); kernel_map_pages(page, 1, 0); - pcp = &zone_pcp(zone, get_cpu())->pcp; + pcp = &zone_pcp(zone, get_cpu())->pcp[CACHED_ORDER_INDEX(order)]; local_irq_save(flags); __count_vm_event(PGFREE); list_add(&page->lru, &pcp->list); @@ -1057,10 +1060,10 @@ static struct page *buffered_rmqueue(str again: cpu = get_cpu(); - if (likely(order == 0)) { + if (likely(IS_CACHED_ORDER(order))) { struct per_cpu_pages *pcp; - pcp = &zone_pcp(zone, cpu)->pcp; + pcp = &zone_pcp(zone, cpu)->pcp[CACHED_ORDER_INDEX(order)]; local_irq_save(flags); if (!pcp->count) { pcp->count = rmqueue_bulk(zone, 0, @@ -1741,8 +1744,8 @@ void __pagevec_free(struct pagevec *pvec void __free_pages(struct page *page, unsigned int order) { if (put_page_testzero(page)) { - if (order == 0) - free_a_page(page); + if (IS_CACHED_ORDER) + free_a_page(page, order); else __free_pages_ok(page, order); }