Index: linux-2.6.20-rc3/include/linux/hugetlb.h =================================================================== --- linux-2.6.20-rc3.orig/include/linux/hugetlb.h 2006-12-31 18:53:20.000000000 -0600 +++ linux-2.6.20-rc3/include/linux/hugetlb.h 2007-01-05 15:29:02.786446291 -0600 @@ -93,6 +93,9 @@ pte_t huge_ptep_get_and_clear(struct mm_ void hugetlb_prefault_arch_hook(struct mm_struct *mm); #endif +unsigned long global_huge_page_pages(void); +unsigned long node_huge_page_pages(int node); + #else /* !CONFIG_HUGETLB_PAGE */ static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) @@ -125,6 +128,9 @@ static inline unsigned long hugetlb_tota #define HPAGE_SIZE PAGE_SIZE #endif +#define global_huge_page_pages 0 +#define node_huge_page_pages(__n) 0 + #endif /* !CONFIG_HUGETLB_PAGE */ #ifdef CONFIG_HUGETLBFS Index: linux-2.6.20-rc3/mm/hugetlb.c =================================================================== --- linux-2.6.20-rc3.orig/mm/hugetlb.c 2006-12-31 18:53:20.000000000 -0600 +++ linux-2.6.20-rc3/mm/hugetlb.c 2007-01-05 15:31:00.369876250 -0600 @@ -27,6 +27,17 @@ unsigned long max_huge_pages; static struct list_head hugepage_freelists[MAX_NUMNODES]; static unsigned int nr_huge_pages_node[MAX_NUMNODES]; static unsigned int free_huge_pages_node[MAX_NUMNODES]; + +unsigned long global_huge_page_pages(void) +{ + return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); +} + +unsigned long node_huge_page_pages(int node) +{ + return nr_huge_pages_node[node] * (HPAGE_SIZE / PAGE_SIZE); +} + /* * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages */ Index: linux-2.6.20-rc3/mm/page-writeback.c =================================================================== --- linux-2.6.20-rc3.orig/mm/page-writeback.c 2007-01-05 14:51:00.000000000 -0600 +++ linux-2.6.20-rc3/mm/page-writeback.c 2007-01-05 18:49:48.192187815 -0600 @@ -34,6 +34,7 @@ #include #include #include +#include /* * The maximum number of pages to writeout in a single bdflush/kupdate @@ -160,12 +161,15 @@ get_dirty_limits(struct dirty_limits *dl dl->nr_dirty += node_page_state(node, NR_FILE_DIRTY); dl->nr_unstable += node_page_state(node, NR_UNSTABLE_NFS); dl->nr_writeback += node_page_state(node, NR_WRITEBACK); - available_memory += NODE_DATA(node)->node_present_pages; + available_memory += NODE_DATA(node)->node_present_pages + - node_huge_page_pages(node) + - node_page_state(node, NR_SLAB_UNRECLAIMABLE); #ifdef CONFIG_HIGHMEM high_memory += NODE_DATA(node)->node_zones[ZONE_HIGHMEM]->present_pages; #endif nr_mapped += node_page_state(node, NR_FILE_MAPPED) + node_page_state(node, NR_ANON_PAGES); + } } else #endif @@ -174,7 +178,9 @@ get_dirty_limits(struct dirty_limits *dl dl->nr_dirty = global_page_state(NR_FILE_DIRTY); dl->nr_unstable = global_page_state(NR_UNSTABLE_NFS); dl->nr_writeback = global_page_state(NR_WRITEBACK); - available_memory = vm_total_pages; + available_memory = vm_total_pages + - global_huge_page_pages() + - global_page_state(NR_SLAB_UNRECLAIMABLE); high_memory = totalhigh_pages; nr_mapped = global_page_state(NR_FILE_MAPPED) + global_page_state(NR_ANON_PAGES); @@ -188,8 +194,12 @@ get_dirty_limits(struct dirty_limits *dl available_memory -= high_memory; #endif + /* + * We should subtract the amount of mlocked pages but we do not have + * any way to account for it. + */ - unmapped_ratio = 100 - (nr_mapped * 100) / vm_total_pages; + unmapped_ratio = 100 - (nr_mapped * 100) / available_memory; dirty_ratio = vm_dirty_ratio; if (dirty_ratio > unmapped_ratio / 2)