Index: linux-2.6.16-rc1-mm4/Makefile =================================================================== --- linux-2.6.16-rc1-mm4.orig/Makefile 2006-01-30 11:27:37.000000000 -0800 +++ linux-2.6.16-rc1-mm4/Makefile 2006-01-31 11:12:48.000000000 -0800 @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 16 -EXTRAVERSION =-rc1-mm4 +EXTRAVERSION =-rc1-mm4-debug NAME=Sliding Snow Leopard # *DOCUMENTATION* Index: linux-2.6.16-rc1-mm4/mm/page_alloc.c =================================================================== --- linux-2.6.16-rc1-mm4.orig/mm/page_alloc.c 2006-01-31 11:12:31.000000000 -0800 +++ linux-2.6.16-rc1-mm4/mm/page_alloc.c 2006-01-31 17:17:06.000000000 -0800 @@ -377,6 +377,7 @@ static void __free_one_page(struct page zone->zeroed_pages -= 1 << order; __ClearPageZeroed(page); __ClearPageZeroed(buddy); + add_page_state(zeroed_merge_waste, 1 << order); } list_del(&buddy->lru); area = zone->free_area + order; @@ -384,14 +385,15 @@ static void __free_one_page(struct page rmv_page_order(buddy); combined_idx = __find_combined_index(page_idx, order); page = page + (combined_idx - page_idx); - if (both_zeroed) + if (both_zeroed) { /* * The first page's status determines the zero status * of the combined page. We know that that is set. * However, the zero bit of the second half must * be cleared */ - __SetPageZeroed(page + (1 << order)); + __ClearPageZeroed(page + (1 << order)); + } page_idx = combined_idx; order++; } @@ -875,6 +877,20 @@ static inline void prep_zero_page(struct clear_highpage(page + i); } +static inline void verify_zero_page(struct page *page, int order, gfp_t gfp_flags) +{ + int i; + + unsigned char *a = page_address(page); + + for (i = 0; i < (PAGE_SIZE << order); i++) + if (a[i]) { + printk(KERN_ERR "Page %p not zeroed. Order %d Byte offset %d\n", + page, order, i); + prep_zero_page(page, order, gfp_flags); + } +} + #ifdef CONFIG_MMU /* * split_page takes a non-compound higher-order page, and splits it into @@ -948,8 +964,15 @@ again: BUG_ON(bad_range(zone, page)); - if (gfp_flags & __GFP_ZERO && !PageZeroed(page)) + if (gfp_flags & __GFP_ZERO) { + if (!PageZeroed(page)) { prep_zero_page(page, order, gfp_flags); + add_page_state(zeroed_sync, 1 << order); + } else { + add_page_state(zero_avoid, 1 << order); + verify_zero_page(page, order, gfp_flags); + } + } if (prep_new_page(page, order)) goto again; @@ -2225,6 +2248,7 @@ static void scrub_pgdat(pg_data_t *pgdat __SetPageZeroed(page); zone->zeroed_pages += 1 << order; __free_one_page(page, zone, order); + add_page_state(zeroed_scrubd, 1 << order); } } } @@ -2661,6 +2685,10 @@ static char *vmstat_text[] = { "pgrotated", "nr_bounce", + + "zeroed_scrubd", + "zeroed_sync", + "zero_avoided", }; static void *vmstat_start(struct seq_file *m, loff_t *pos) Index: linux-2.6.16-rc1-mm4/include/linux/page-flags.h =================================================================== --- linux-2.6.16-rc1-mm4.orig/include/linux/page-flags.h 2006-01-30 17:23:11.000000000 -0800 +++ linux-2.6.16-rc1-mm4/include/linux/page-flags.h 2006-01-31 11:12:48.000000000 -0800 @@ -152,6 +152,10 @@ struct page_state { unsigned long pgrotated; /* pages rotated to tail of the LRU */ unsigned long nr_bounce; /* pages for bounce buffers */ + + unsigned long zeroed_scrubd; /* zeroed by scrubd */ + unsigned long zeroed_sync; /* synchrononous zeroing */ + unsigned long zero_avoid; /* avoided zeroing */ }; extern void get_page_state(struct page_state *ret);