From: Andrew Morton Cc: Christoph Lameter Cc: Con Kolivas Signed-off-by: Andrew Morton --- mm/swap_prefetch.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff -puN mm/swap_prefetch.c~swap_prefetch-vs-zoned-counters mm/swap_prefetch.c --- a/mm/swap_prefetch.c~swap_prefetch-vs-zoned-counters +++ a/mm/swap_prefetch.c @@ -19,6 +19,7 @@ #include #include #include +#include /* * Time to delay prefetching if vm is busy or prefetching unsuccessful. There @@ -357,7 +358,6 @@ static int prefetch_suitable(void) */ for_each_node_mask(node, sp_stat.prefetch_nodes) { struct node_stats *ns = &sp_stat.node[node]; - struct page_state ps; /* * We check to see that pages are not being allocated @@ -378,10 +378,8 @@ static int prefetch_suitable(void) if (!test_pagestate) continue; - get_page_state_node(&ps, node); - /* We shouldn't prefetch when we are doing writeback */ - if (ps.nr_writeback) { + if (node_page_state(node, NR_WRITEBACK)) { node_clear(node, sp_stat.prefetch_nodes); continue; } @@ -394,8 +392,11 @@ static int prefetch_suitable(void) * even if the slab is being allocated on a remote node. This * would be expensive to fix and not of great significance. */ - limit = ps.nr_mapped + ps.nr_slab + ps.nr_dirty + - ps.nr_unstable + total_swapcache_pages; + limit = node_page_state(node, NR_FILE_PAGES); + limit += node_page_state(node, NR_SLAB); + limit += node_page_state(node, NR_FILE_DIRTY); + limit += node_page_state(node, NR_UNSTABLE_NFS); + limit += total_swapcache_pages; if (limit > ns->prefetch_watermark) { node_clear(node, sp_stat.prefetch_nodes); continue; _