From: Christoph Lameter Insure that dirtyable memory calculation always returns positive number In order to avoid division by zero and strange results we insure that the memory calculation of dirtyable memory always returns at least 1. We need to make sure that highmem_dirtyable_memory() never returns a number larger than the total dirtyable memory. Counter deferrals and strange VM situations with unimagiably small lowmem may make the count go negative. Also base the calculation of the mapped_ratio on the amount of dirtyable memory. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- mm/page-writeback.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff -puN mm/page-writeback.c~use-zvc-counters-to-establish-exact-size-of-dirtyable-pages-fix mm/page-writeback.c --- a/mm/page-writeback.c~use-zvc-counters-to-establish-exact-size-of-dirtyable-pages-fix +++ a/mm/page-writeback.c @@ -120,7 +120,7 @@ static void background_writeout(unsigned * clamping level. */ -static unsigned long highmem_dirtyable_memory(void) +static unsigned long highmem_dirtyable_memory(unsigned long total) { #ifdef CONFIG_HIGHMEM int node; @@ -134,7 +134,13 @@ static unsigned long highmem_dirtyable_m + zone_page_state(z, NR_INACTIVE) + zone_page_state(z, NR_ACTIVE); } - return x; + /* + * Make sure that the number of highmem pages is never larger + * than the number of the total dirtyable memory. This can only + * occur in very strange VM situations but we want to make sure + * that this does not occur. + */ + return min(x, total); #else return 0; #endif @@ -146,9 +152,9 @@ static unsigned long determine_dirtyable x = global_page_state(NR_FREE_PAGES) + global_page_state(NR_INACTIVE) - + global_page_state(NR_ACTIVE) - - highmem_dirtyable_memory(); - return x; + + global_page_state(NR_ACTIVE); + x -= highmem_dirtyable_memory(x); + return x + 1; /* Ensure that we never return 0 */ } static void @@ -165,7 +171,7 @@ get_dirty_limits(long *pbackground, long unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) + global_page_state(NR_ANON_PAGES)) * 100) / - vm_total_pages; + available_memory; dirty_ratio = vm_dirty_ratio; if (dirty_ratio > unmapped_ratio / 2) _