vmstat.c: Support accounting for compound pages Compound pages must increment the counters in terms of base pages. If we detect a compound page then add the number of base pages that a compound page has to the counter. This will avoid numerous changes in the VM to fix up page accounting as we support more compound pages. It also simplifies page accounting in SLUB since we use compound pages there. Signed-off-by: Christoph Lameter Index: linux-2.6.21-rc6/mm/vmstat.c =================================================================== --- linux-2.6.21-rc6.orig/mm/vmstat.c 2007-04-20 18:06:45.000000000 -0700 +++ linux-2.6.21-rc6/mm/vmstat.c 2007-04-20 21:31:16.000000000 -0700 @@ -223,7 +223,8 @@ void __inc_zone_state(struct zone *zone, void __inc_zone_page_state(struct page *page, enum zone_stat_item item) { - __inc_zone_state(page_zone(page), item); + __mod_zone_page_state(page_zone(page), item, + 1 << compound_order(page)); } EXPORT_SYMBOL(__inc_zone_page_state); @@ -244,7 +245,8 @@ void __dec_zone_state(struct zone *zone, void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { - __dec_zone_state(page_zone(page), item); + __mod_zone_page_state(page_zone(page), item, + -(1 << compound_order(page))); } EXPORT_SYMBOL(__dec_zone_page_state); @@ -260,11 +262,9 @@ void inc_zone_state(struct zone *zone, e void inc_zone_page_state(struct page *page, enum zone_stat_item item) { unsigned long flags; - struct zone *zone; - zone = page_zone(page); local_irq_save(flags); - __inc_zone_state(zone, item); + __inc_zone_page_state(page, item); local_irq_restore(flags); } EXPORT_SYMBOL(inc_zone_page_state); Index: linux-2.6.21-rc6/mm/slub.c =================================================================== --- linux-2.6.21-rc6.orig/mm/slub.c 2007-04-20 19:25:40.000000000 -0700 +++ linux-2.6.21-rc6/mm/slub.c 2007-04-22 23:51:23.000000000 -0700 @@ -754,7 +754,6 @@ fail: static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page * page; - int pages = 1 << s->order; if (s->order) flags |= __GFP_COMP; @@ -770,10 +769,9 @@ static struct page *allocate_slab(struct if (!page) return NULL; - mod_zone_page_state(page_zone(page), + inc_zone_page_state(page, (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - pages); + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE); return page; } @@ -852,11 +850,9 @@ out: static void __free_slab(struct kmem_cache *s, struct page *page) { - int pages = 1 << s->order; - if (unlikely(PageError(page) || s->dtor)) { void *start = page_address(page); - void *end = start + (pages << PAGE_SHIFT); + void *end = start + (1 << (PAGE_SHIFT + s->order)); void *p; slab_pad_check(s, page); @@ -867,10 +863,9 @@ static void __free_slab(struct kmem_cach } } - mod_zone_page_state(page_zone(page), + dec_zone_page_state(page, (s->flags & SLAB_RECLAIM_ACCOUNT) ? - NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - - pages); + NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE); page->mapping = NULL; __free_pages(page, s->order);