Index: linux-2.6.20-rc6/mm/shmem.c =================================================================== --- linux-2.6.20-rc6.orig/mm/shmem.c 2007-01-25 18:36:34.000000000 -0800 +++ linux-2.6.20-rc6/mm/shmem.c 2007-01-25 18:49:16.000000000 -0800 @@ -94,11 +94,18 @@ static inline struct page *shmem_dir_all * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: * might be reconsidered if it ever diverges from PAGE_SIZE. */ - return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT); + struct page *page = alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT); + + if (pages) + mod_zone_page_state(page_zone(page), NR_SHM, + 1 << (PAGE_CACHE_SHIFT - PAGE_SHIFT)); + return 0; } static inline void shmem_dir_free(struct page *page) { + mod_zone_page_state(page_zone(page), NR_SHM, + - ( 1 << (PAGE_CACHE_SHIFT - PAGE_SHIFT))); __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); } @@ -983,6 +990,8 @@ shmem_alloc_page(gfp_t gfp, struct shmem pvma.vm_end = PAGE_SIZE; page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); mpol_free(pvma.vm_policy); + if (page) + inc_zone_page_state(page, NR_SHM); return page; } #else @@ -1001,7 +1010,11 @@ shmem_swapin(struct shmem_inode_info *in static inline struct page * shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) { - return alloc_page(gfp | __GFP_ZERO); + struct page *page = alloc_page(gfp | __GFP_ZERO); + + if (page) + inc_zone_page_state(page, NR_SHM); + return page; } #endif Index: linux-2.6.20-rc6/include/linux/mmzone.h =================================================================== --- linux-2.6.20-rc6.orig/include/linux/mmzone.h 2007-01-25 18:39:11.000000000 -0800 +++ linux-2.6.20-rc6/include/linux/mmzone.h 2007-01-25 18:39:46.000000000 -0800 @@ -59,6 +59,7 @@ enum zone_stat_item { NR_WRITEBACK, /* Second 128 byte cacheline */ NR_MLOCK, /* Mlocked pages */ + NR_SHM, /* Shared Memory */ NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, NR_PAGETABLE, /* used for pagetables */