Optimise bad_page slightly by not passing in the function name (it will be displayed by dump_stack()). Index: linux-2.6/mm/page_alloc.c =================================================================== --- linux-2.6.orig/mm/page_alloc.c +++ linux-2.6/mm/page_alloc.c @@ -102,14 +102,13 @@ static inline int bad_range(struct zone } #endif -static void bad_page(const char *function, struct page *page) +static void bad_page(struct page *page) { - printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n", - function, current->comm, page); - printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n", - (int)(2*sizeof(page_flags_t)), (unsigned long)page->flags, + printk(KERN_EMERG "Bad page state in process '%s'\n", current->comm); + printk(KERN_EMERG "page:%p flags:0x%0*lx " + "mapping:%p mapcount:%d count:%d\n", + page, (int)(2*sizeof(page_flags_t)), (unsigned long)page->flags, page->mapping, page_mapcount(page), page_count(page)); - printk(KERN_EMERG "Backtrace:\n"); dump_stack(); printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"); page->flags &= ~(1 << PG_lru | @@ -169,19 +168,15 @@ static void destroy_compound_page(struct int i; int nr_pages = 1 << order; - if (!PageCompound(page)) - return; - - if (page[1].index != order) - bad_page(__FUNCTION__, page); + if (unlikely(page[1].index != order)) + bad_page(page); for (i = 0; i < nr_pages; i++) { struct page *p = page + i; - if (!PageCompound(p)) - bad_page(__FUNCTION__, page); - if (page_private(p) != (unsigned long)page) - bad_page(__FUNCTION__, page); + if (unlikely(!PageCompound(p)) || + page_private(p) != (unsigned long)page)) + bad_page(page); ClearPageCompound(p); } } @@ -286,7 +281,7 @@ static inline void __free_pages_bulk (st unsigned long page_idx; int order_size = 1 << order; - if (unlikely(order)) + if (unlikely(PageCompound(page))) destroy_compound_page(page, order); page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); @@ -320,11 +315,11 @@ static inline void __free_pages_bulk (st zone->free_area[order].nr_free++; } -static inline void free_pages_check(const char *function, struct page *page) +static inline void free_pages_check(struct page *page) { - if ( page_mapcount(page) || - page->mapping != NULL || - page_count(page) != 0 || + if (unlikely(page_mapcount(page) | + (page->mapping != NULL) | + (page_count(page) != 0) | (page->flags & ( 1 << PG_lru | 1 << PG_private | @@ -334,8 +329,8 @@ static inline void free_pages_check(cons 1 << PG_slab | 1 << PG_swapcache | 1 << PG_writeback | - 1 << PG_reserved ))) - bad_page(function, page); + 1 << PG_reserved )))) + bad_page(page); if (PageDirty(page)) __ClearPageDirty(page); } @@ -389,7 +384,7 @@ void __free_pages_ok(struct page *page, #endif for (i = 0 ; i < (1 << order) ; ++i) - free_pages_check(__FUNCTION__, page + i); + free_pages_check(page + i); list_add(&page->lru, &list); kernel_map_pages(page, 1<flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_referenced | 1 << PG_arch_1 | @@ -645,7 +640,7 @@ static void fastcall free_hot_cold_page( inc_page_state(pgfree); if (PageAnon(page)) page->mapping = NULL; - free_pages_check(__FUNCTION__, page); + free_pages_check(page); pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; local_irq_save(flags); list_add(&page->lru, &pcp->list);