Index: linux-2.6.18-rc4/mm/vmstat.c =================================================================== --- linux-2.6.18-rc4.orig/mm/vmstat.c 2006-08-18 19:15:05.238552259 -0700 +++ linux-2.6.18-rc4/mm/vmstat.c 2006-08-18 19:27:01.978473704 -0700 @@ -110,7 +110,7 @@ void vm_events_fold_cpu(int cpu) * * vm_stat contains the global counters */ -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; +atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned; EXPORT_SYMBOL(vm_stat); #ifdef CONFIG_SMP @@ -179,11 +179,24 @@ static void refresh_zone_stat_thresholds } #ifdef CONFIG_NUMA -#define UPDATE_ALL(__z) (unlikely((__z)->stat_threshold > 32)) +#define UPDATE_GROUP(__z) (unlikely((__z)->stat_threshold > 32)) #else -#define UPDATE_ALL(__z) 0 +#define UPDATE_GROUP(__z) 0 #endif +static void update_group(struct zone *zone, struct per_cpu_pageset *pcp, + enum zone_stat_item item) +{ + int i = item & 0xf8; + int j; + + for(j = i ; j < i + sizeof(long); j++) { + zone_page_state_add(pcp->vm_stat_diff[j], + zone, j); + pcp->vm_stat_diff[j] = 0; + } +} + /* * Update all counters that have some differentials. This is called * when the counter threshold becomes very large to limit the number @@ -192,7 +205,7 @@ static void refresh_zone_stat_thresholds static void update_all_counters(struct per_cpu_pageset *pcp, struct zone *zone) { - int i, j; + enum zone_stat_item i; unsigned long flags; for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i += sizeof(long)) { @@ -206,12 +219,7 @@ static void update_all_counters(struct p continue; local_irq_save(flags); - - for(j = i; j < i + sizeof(long); j++) { - zone_page_state_add(pcp->vm_stat_diff[j], - zone, j); - pcp->vm_stat_diff[j] = 0; - } + update_group(zone, pcp, i); local_irq_restore(flags); } } @@ -281,8 +289,8 @@ static void __inc_zone_state(struct zone if (unlikely(*p > zone->stat_threshold)) { int overstep = zone->stat_threshold / 2; - if (UPDATE_ALL(zone)) { - update_all_counters(pcp, zone); + if (UPDATE_GROUP(zone)) { + update_group(zone, pcp, item); zone_page_state_add(overstep, zone, item); } else zone_page_state_add(*p + overstep, zone, item); @@ -308,8 +316,8 @@ void __dec_zone_page_state(struct page * if (unlikely(*p < -zone->stat_threshold)) { int overstep = zone->stat_threshold / 2; - if (UPDATE_ALL(zone)) { - update_all_counters(pcp, zone); + if (UPDATE_GROUP(zone)) { + update_group(zone, pcp, item); zone_page_state_add(-overstep, zone, item); } else zone_page_state_add(*p - overstep, zone, item); @@ -358,13 +366,9 @@ void refresh_cpu_vm_stats(int cpu) struct zone *zone; for_each_zone(zone) { - struct per_cpu_pageset *pcp; - if (!populated_zone(zone)) continue; - pcp = zone_pcp(zone, cpu); - update_all_counters(zone_pcp(zone, cpu), zone); } }