Track unreclaimable pages Tracking unreclaimable pages helps us to calculate the dirty ratio the right way. If a large number of unreclaimable pages are allocated (through the slab or through huge pages) then write throttling will no longer work since the limit cannot be reached anymore. We simply subtract the number of unreclaimable pages from the total. Signed-off-by: Christoph Lameter Index: linux-2.6.20-rc3/include/linux/mmzone.h =================================================================== --- linux-2.6.20-rc3.orig/include/linux/mmzone.h 2007-01-05 20:07:27.830918285 -0600 +++ linux-2.6.20-rc3/include/linux/mmzone.h 2007-01-05 20:08:38.958264921 -0600 @@ -53,6 +53,7 @@ enum zone_stat_item { NR_FILE_PAGES, NR_SLAB_RECLAIMABLE, NR_SLAB_UNRECLAIMABLE, + NR_UNRECLAIMABLE, NR_PAGETABLE, /* used for pagetables */ NR_FILE_DIRTY, NR_WRITEBACK, Index: linux-2.6.20-rc3/fs/proc/proc_misc.c =================================================================== --- linux-2.6.20-rc3.orig/fs/proc/proc_misc.c 2007-01-05 20:07:27.901236967 -0600 +++ linux-2.6.20-rc3/fs/proc/proc_misc.c 2007-01-05 20:08:38.982681129 -0600 @@ -175,6 +175,7 @@ static int meminfo_read_proc(char *page, "Slab: %8lu kB\n" "SReclaimable: %8lu kB\n" "SUnreclaim: %8lu kB\n" + "Unreclaimabl: %8lu kB\n" "PageTables: %8lu kB\n" "NFS_Unstable: %8lu kB\n" "Bounce: %8lu kB\n" @@ -206,6 +207,7 @@ static int meminfo_read_proc(char *page, global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_SLAB_RECLAIMABLE)), K(global_page_state(NR_SLAB_UNRECLAIMABLE)), + K(global_page_state(NR_UNRECLAIMABLE)), K(global_page_state(NR_PAGETABLE)), K(global_page_state(NR_UNSTABLE_NFS)), K(global_page_state(NR_BOUNCE)), Index: linux-2.6.20-rc3/mm/hugetlb.c =================================================================== --- linux-2.6.20-rc3.orig/mm/hugetlb.c 2007-01-05 20:07:28.316312523 -0600 +++ linux-2.6.20-rc3/mm/hugetlb.c 2007-01-05 20:08:39.007097338 -0600 @@ -115,6 +115,8 @@ static int alloc_fresh_huge_page(void) nr_huge_pages_node[page_to_nid(page)]++; spin_unlock(&hugetlb_lock); put_page(page); /* free it into the hugepage allocator */ + mod_zone_page_state(page_zone(page), NR_UNRECLAIMABLE, + HPAGE_SIZE / PAGE_SIZE); return 1; } return 0; @@ -183,6 +185,8 @@ static void update_and_free_page(struct 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 1 << PG_private | 1<< PG_writeback); } + mod_zone_page_state(page_zone(page), NR_UNRECLAIMABLE, + - (HPAGE_SIZE / PAGE_SIZE)); page[1].lru.next = NULL; set_page_refcounted(page); __free_pages(page, HUGETLB_PAGE_ORDER); Index: linux-2.6.20-rc3/mm/vmstat.c =================================================================== --- linux-2.6.20-rc3.orig/mm/vmstat.c 2007-01-05 20:07:28.426673789 -0600 +++ linux-2.6.20-rc3/mm/vmstat.c 2007-01-05 20:08:39.024677008 -0600 @@ -459,6 +459,7 @@ static const char * const vmstat_text[] "nr_file_pages", "nr_slab_reclaimable", "nr_slab_unreclaimable", + "nr_unreclaimable", "nr_page_table_pages", "nr_dirty", "nr_writeback", Index: linux-2.6.20-rc3/mm/page-writeback.c =================================================================== --- linux-2.6.20-rc3.orig/mm/page-writeback.c 2007-01-05 20:08:38.091001186 -0600 +++ linux-2.6.20-rc3/mm/page-writeback.c 2007-01-05 20:13:46.511654602 -0600 @@ -164,7 +164,9 @@ get_dirty_limits(struct dirty_limits *dl dl->nr_writeback += node_page_state(node, NR_WRITEBACK); available_memory += - NODE_DATA(node)->node_present_pages; + NODE_DATA(node)->node_present_pages + - node_page_state(node, NR_UNRECLAIMABLE) + - node_page_state(node, NR_SLAB_UNRECLAIMABLE); #ifdef CONFIG_HIGHMEM high_memory += NODE_DATA(node) ->node_zones[ZONE_HIGHMEM]->present_pages; @@ -179,7 +181,9 @@ get_dirty_limits(struct dirty_limits *dl dl->nr_dirty = global_page_state(NR_FILE_DIRTY); dl->nr_unstable = global_page_state(NR_UNSTABLE_NFS); dl->nr_writeback = global_page_state(NR_WRITEBACK); - available_memory = vm_total_pages; + available_memory = vm_total_pages + - global_page_state(NR_UNRECLAIMABLE) + - global_page_state(NR_SLAB_UNRECLAIMABLE); high_memory = totalhigh_pages; nr_mapped = global_page_state(NR_FILE_MAPPED) + global_page_state(NR_ANON_PAGES);