Index: linux-2.6.19-rc6-mm1/mm/page-writeback.c =================================================================== --- linux-2.6.19-rc6-mm1.orig/mm/page-writeback.c 2006-11-27 19:45:41.000000000 -0800 +++ linux-2.6.19-rc6-mm1/mm/page-writeback.c 2006-11-27 19:48:41.000000000 -0800 @@ -33,6 +33,7 @@ #include #include #include +#include /* * The maximum number of pages to writeout in a single bdflush/kupdate @@ -103,8 +104,8 @@ static void background_writeout(unsigned long _min_pages); struct dirty_limits { - long background; - long dirty; + long thresh_background; + long thresh_dirty; unsigned long nr_dirty; unsigned long nr_unstable; unsigned long nr_writeback; @@ -135,22 +136,55 @@ int unmapped_ratio; long background; long dirty; - unsigned long available_memory = vm_total_pages; + unsigned long available_memory; + unsigned long high_memory; + unsigned long nr_mapped; struct task_struct *tsk; + /* + * Respect the boundaries of the current cpuset otherwise dirty + * writeout will not work properly in a cpuset. + */ + if (nodes_full(cpuset_current_mems_allowed)) { + dl->nr_dirty = global_page_state(NR_FILE_DIRTY); + dl->nr_unstable = global_page_state(NR_UNSTABLE_NFS); + dl->nr_writeback = global_page_state(NR_WRITEBACK); + available_memory = vm_total_pages; + high_memory = totalhigh_pages; + nr_mapped = global_page_state(NR_FILE_MAPPED) + + global_page_state(NR_ANON_PAGES); + } else { + int node; + + memset(&dl, 0, sizeof(struct dirty_limits)); + available_memory = 0; + high_memory = 0; + nr_mapped = 0; + + for_each_node_mask(node, cpuset_current_mems_allowed) { + dl->nr_dirty += node_page_state(node, NR_FILE_DIRTY); + dl->nr_unstable += node_page_state(node, NR_UNSTABLE_NFS); + dl->nr_writeback += node_page_state(node, NR_WRITEBACK); + available_memory += NODE_DATA(node)->node_present_pages; +#ifdef CONFIG_HIGHMEM + high_memory += NODE_DATA(node)->node_zones[ZONE_HIGHMEM]->present_pages; +#endif + nr_mapped += node_page_state(node, NR_FILE_MAPPED) + + node_page_state(node, NR_ANON_PAGES); + } + } + #ifdef CONFIG_HIGHMEM /* * If this mapping can only allocate from low memory, * we exclude high memory from our count. */ if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM)) - available_memory -= totalhigh_pages; + available_memory -= high_memory; #endif - unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) + - global_page_state(NR_ANON_PAGES)) * 100) / - vm_total_pages; + unmapped_ratio = 100 - (nr_mapped * 100) / vm_total_pages; dirty_ratio = vm_dirty_ratio; if (dirty_ratio > unmapped_ratio / 2) @@ -170,11 +204,8 @@ background += background / 4; dirty += dirty / 4; } - dl->background = background; - dl->dirty = dirty; - dl->nr_dirty = global_page_state(NR_FILE_DIRTY); - dl->nr_unstable = global_page_state(NR_UNSTABLE_NFS); - dl->nr_writeback = global_page_state(NR_WRITEBACK); + dl->thresh_background = background; + dl->thresh_dirty = dirty; } /* @@ -205,7 +236,7 @@ get_dirty_limits(&dl, mapping); nr_reclaimable = dl.nr_dirty + dl.nr_unstable; if (nr_reclaimable + dl.nr_writeback <= - dl.dirty) + dl.thresh_dirty) break; if (!dirty_exceeded) @@ -221,7 +252,7 @@ writeback_inodes(&wbc); get_dirty_limits(&dl, mapping); nr_reclaimable = dl.nr_dirty + dl.nr_unstable; - if (nr_reclaimable + dl.nr_writeback <= dl.dirty) + if (nr_reclaimable + dl.nr_writeback <= dl.thresh_dirty) break; pages_written += write_chunk - wbc.nr_to_write; if (pages_written >= write_chunk) @@ -231,7 +262,7 @@ } if (nr_reclaimable + dl.nr_writeback - <= dl.dirty && dirty_exceeded) + <= dl.thresh_dirty && dirty_exceeded) dirty_exceeded = 0; if (writeback_in_progress(bdi)) @@ -246,7 +277,7 @@ * background_thresh, to keep the amount of dirty memory low. */ if ((laptop_mode && pages_written) || - (!laptop_mode && (nr_reclaimable > dl.background))) + (!laptop_mode && (nr_reclaimable > dl.thresh_background))) pdflush_operation(background_writeout, 0); } @@ -313,9 +344,9 @@ * Boost the allowable dirty threshold a bit for page * allocators so they don't get DoS'ed by heavy writers */ - dl.dirty += dl.dirty / 10; /* wheeee... */ + dl.thresh_dirty += dl.thresh_dirty / 10; /* wheeee... */ - if (dl.nr_unstable + dl.nr_writeback <= dl.dirty) + if (dl.nr_unstable + dl.nr_writeback <= dl.thresh_dirty) break; congestion_wait(WRITE, HZ/10); } @@ -342,7 +373,7 @@ struct dirty_limits dl; get_dirty_limits(&dl, NULL); - if (dl.nr_dirty + dl.nr_unstable < dl.background + if (dl.nr_dirty + dl.nr_unstable < dl.thresh_background && min_pages <= 0) break; wbc.encountered_congestion = 0;