--- mm/internal.h | 10 ------- mm/page_alloc.c | 70 ++++++++++++++++++++++++++++++++++------------------ mm/page_isolation.c | 2 - 3 files changed, 47 insertions(+), 35 deletions(-) Index: linux-2.6/mm/page_alloc.c =================================================================== --- linux-2.6.orig/mm/page_alloc.c 2007-11-14 20:24:34.000000000 -0800 +++ linux-2.6/mm/page_alloc.c 2007-11-14 20:53:49.000000000 -0800 @@ -270,8 +270,17 @@ static void prep_compound_page(struct pa int nr_pages = 1 << order; set_compound_page_dtor(page, free_compound_page); - set_compound_order(page, order); - __SetPageHead(page); + + /* + * The buddy allocator prepares the head page for us. + * So nothing to do. + */ + + /* Already setup up? (The buddy allocator preserves these flags */ + if (PageTail(page + 1)) + return; + + /* Setup the tail pages */ for (i = 1; i < nr_pages; i++) { struct page *p = page + i; @@ -280,8 +289,13 @@ static void prep_compound_page(struct pa } } +/* + * Compound pages can directly enter the buddy allocator. + * The primary use of this function is to check for errors. + */ static void destroy_compound_page(struct page *page, unsigned long order) { +#ifdef CONFIG_VM_DEBUG int i; int nr_pages = 1 << order; @@ -290,15 +304,14 @@ static void destroy_compound_page(struct if (unlikely(!PageHead(page))) bad_page(page); - __ClearPageHead(page); for (i = 1; i < nr_pages; i++) { struct page *p = page + i; if (unlikely(!PageTail(p) | (p->first_page != page))) bad_page(page); - __ClearPageTail(p); } +#endif } static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) @@ -315,18 +328,29 @@ static inline void prep_zero_page(struct clear_highpage(page + i); } -static inline void set_page_order(struct page *page, int order) +/* + * Setup a page to fit into the buddy allocator. Buddy allocator uses the + * compound page conventions for higher order pages. + */ +static inline void set_buddy(struct page *page, int order) { - set_page_private(page, order); + if (compound_order(page) != order) { + /* Order mismatch. Fix up the header */ + if (order) { + __SetPageHead(page); + /* + * Clearing the tail causes a reinitialization + * on allocation. + */ + __ClearPageTail(page + 1); + set_compound_order(page, order); + } else + /* Order 0 oages have PageHead cleared */ + __ClearPageHead(page); + } __SetPageBuddy(page); } -static inline void rmv_page_order(struct page *page) -{ - __ClearPageBuddy(page); - set_page_private(page, 0); -} - /* * Locate the struct page for both the matching buddy in our * pair (buddy1) and the combined O(n+1) page they form (page). @@ -380,7 +404,7 @@ static inline int page_is_buddy(struct p if (page_zone_id(page) != page_zone_id(buddy)) return 0; - if (PageBuddy(buddy) && page_order(buddy) == order) { + if (PageBuddy(buddy) && compound_order(buddy) == order) { BUG_ON(page_count(buddy) != 0); return 1; } @@ -437,13 +461,13 @@ static inline void __free_one_page(struc list_del(&buddy->lru); zone->free_area[order].nr_free--; - rmv_page_order(buddy); + __ClearPageBuddy(buddy); combined_idx = __find_combined_index(page_idx, order); page = page + (combined_idx - page_idx); page_idx = combined_idx; order++; } - set_page_order(page, order); + set_buddy(page, order); list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); zone->free_area[order].nr_free++; @@ -591,7 +615,7 @@ static inline void expand(struct zone *z VM_BUG_ON(bad_range(zone, &page[size])); list_add(&page[size].lru, &area->free_list[migratetype]); area->nr_free++; - set_page_order(&page[size], high); + set_buddy(&page[size], high); } } @@ -661,7 +685,7 @@ static struct page *__rmqueue_smallest(s page = list_entry(area->free_list[migratetype].next, struct page, lru); list_del(&page->lru); - rmv_page_order(page); + __ClearPageBuddy(page); area->nr_free--; __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); expand(zone, page, order, current_order, area, migratetype); @@ -708,16 +732,14 @@ int move_freepages(struct zone *zone, #endif for (page = start_page; page <= end_page; page += (1 << order)) { + order = compound_order(page); if (pfn_valid_within(page_to_pfn(page)) && PageBuddy(page)) { - order = page_order(page); list_move(&page->lru, &zone->free_area[order].free_list[migratetype]); pages_moved += 1 << order; - - } else - order = compound_order(page); + } } return pages_moved; @@ -792,7 +814,7 @@ static struct page *__rmqueue_fallback(s /* Remove the page from the freelists */ list_del(&page->lru); - rmv_page_order(page); + __ClearPageBuddy(page); __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order)); @@ -4492,13 +4514,13 @@ __offline_isolated_pages(unsigned long s page = pfn_to_page(pfn); BUG_ON(page_count(page)); BUG_ON(!PageBuddy(page)); - order = page_order(page); + order = compound_order(page); #ifdef CONFIG_DEBUG_VM printk(KERN_INFO "remove from free list %lx %d %lx\n", pfn, 1 << order, end_pfn); #endif list_del(&page->lru); - rmv_page_order(page); + __ClearPageBuddy(page); zone->free_area[order].nr_free--; __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); Index: linux-2.6/mm/internal.h =================================================================== --- linux-2.6.orig/mm/internal.h 2007-11-14 20:23:55.000000000 -0800 +++ linux-2.6/mm/internal.h 2007-11-14 20:52:25.000000000 -0800 @@ -37,14 +37,4 @@ static inline void __put_page(struct pag extern void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order); -/* - * function for dealing with page's order in buddy system. - * zone->lock is already acquired when we use these. - * So, we don't need atomic page->flags operations here. - */ -static inline unsigned long page_order(struct page *page) -{ - VM_BUG_ON(!PageBuddy(page)); - return page_private(page); -} #endif Index: linux-2.6/mm/page_isolation.c =================================================================== --- linux-2.6.orig/mm/page_isolation.c 2007-11-14 20:23:55.000000000 -0800 +++ linux-2.6/mm/page_isolation.c 2007-11-14 20:52:25.000000000 -0800 @@ -101,7 +101,7 @@ __test_page_isolated_in_pageblock(unsign } page = pfn_to_page(pfn); if (PageBuddy(page)) - pfn += 1 << page_order(page); + pfn += compound_pages(page); else if (page_count(page) == 0 && page_private(page) == MIGRATE_ISOLATE) pfn += 1;