From: Andrew Morton Cc: Christoph Lameter Signed-off-by: Andrew Morton --- include/linux/vmstat.h | 3 +-- mm/vmscan.c | 2 +- vmstat.c | 0 3 files changed, 2 insertions(+), 3 deletions(-) diff -puN include/linux/vmstat.h~zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter-fix include/linux/vmstat.h --- a/include/linux/vmstat.h~zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter-fix +++ a/include/linux/vmstat.h @@ -26,8 +26,7 @@ struct page_state { unsigned long nr_writeback; /* Pages under writeback */ unsigned long nr_unstable; /* NFS unstable pages */ unsigned long nr_page_table_pages;/* Pages used for pagetables */ - unsigned long nr_slab; /* In slab */ -#define GET_PAGE_STATE_LAST nr_slab +#define GET_PAGE_STATE_LAST nr_page_table_pages /* * The below are zeroed by get_page_state(). Use get_full_page_state() diff -puN mm/vmscan.c~zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter-fix mm/vmscan.c --- a/mm/vmscan.c~zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter-fix +++ a/mm/vmscan.c @@ -1362,7 +1362,7 @@ unsigned long shrink_all_memory(unsigned for_each_zone(zone) lru_pages += zone->nr_active + zone->nr_inactive; - nr_slab = read_page_state(nr_slab); + nr_slab = global_page_state(NR_SLAB); /* If slab caches are huge, it's better to hit them first */ while (nr_slab >= lru_pages) { reclaim_state.reclaimed_slab = 0; diff -puN mm/vmstat.c~zoned-vm-counters-conversion-of-nr_slab-to-per-zone-counter-fix mm/vmstat.c _