--- include/linux/gfp.h | 4 ++ mm/page_alloc.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+) Index: linux-2.6/include/linux/gfp.h =================================================================== --- linux-2.6.orig/include/linux/gfp.h 2008-02-11 16:43:17.000000000 -0800 +++ linux-2.6/include/linux/gfp.h 2008-02-11 17:40:06.000000000 -0800 @@ -223,4 +223,8 @@ void drain_zone_pages(struct zone *zone, void drain_all_pages(void); void drain_local_pages(void *dummy); +struct page *fast_alloc(gfp_t gfp_mask); +void fast_free(struct page *page); +void flush_fast_pages(void); + #endif /* __LINUX_GFP_H */ Index: linux-2.6/mm/page_alloc.c =================================================================== --- linux-2.6.orig/mm/page_alloc.c 2008-02-11 16:45:59.000000000 -0800 +++ linux-2.6/mm/page_alloc.c 2008-02-11 17:40:53.000000000 -0800 @@ -1465,6 +1465,7 @@ restart: if (page) goto got_pg; + flush_fast_pages(); /* * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and * __GFP_NOWARN set) should not cause reclaim since the subsystem @@ -4530,3 +4531,74 @@ __offline_isolated_pages(unsigned long s spin_unlock_irqrestore(&zone->lock, flags); } #endif + +struct fast_queue { + struct page *head[MIGRATE_TYPES]; +}; + +DEFINE_PER_CPU(struct fast_queue, fq); + +void flush_fast_pages_cpu(void *dummy) +{ + int i; + struct page *page; + + for (i = 0; i < MIGRATE_TYPES; i++) { + struct page **p = &__get_cpu_var(fq).head[i]; + + while (*p) { + page = *p; + *p = (struct page *)page->lru.next; + __free_page(page); + } + } +} + +void flush_fast_pages(void) +{ + on_each_cpu(flush_fast_pages_cpu, NULL, 0, 1); +} + +struct page *fast_alloc(gfp_t mask) +{ + unsigned long flags; + struct page **p; + struct page *page; + + if (current->flags & (PF_SPREAD_PAGE | PF_SPREAD_SLAB | PF_MEMPOLICY)) + goto slow; + if (mask & (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_HARDWALL|__GFP_THISNODE)) + goto slow; + + local_irq_save(flags); + p = &__get_cpu_var(fq).head[allocflags_to_migratetype(mask)]; + page = *p; + if (!page) { + local_irq_restore(flags); + goto slow; + } + + *p = (struct page *)page->lru.next; + local_irq_restore(flags); + if (mask & __GFP_ZERO) + memset(page_address(page), 0, PAGE_SIZE); + return page; + +slow: + return alloc_page(mask); +} + +void fast_free(struct page *page) +{ + unsigned long flags; + struct page **p; + + if (page_to_nid(page) != numa_node_id() || PageHighMem(page)) + __free_pages(page, 0); + + local_irq_save(flags); + p = &__get_cpu_var(fq).head[get_pageblock_migratetype(page)]; + page->lru.next = (void *)*p; + *p = page; + local_irq_restore(flags); +}