ZVC: Scale thresholds depending on the size of the system The ZVC counter update threshold is currently set to a fixed value of 32. This patch sets up the threshold depending on the number of processors and the sizes of the zones in the system. With the current threshold of 32, I was only able to observe slight contention when more than 130-140 processors concurrently updated the counters. The contention vanished when I either increased the threshold to 64 or used Andrew's idea of overstepping the interval (see next patch). The current default is therefore a bit of an overkill for smaller systems. A lesser threshold means less counter update deferrals and therefore the counters are more precise. Small (this patch only affects SMP) systems may not have gobs of memory and so the additional precision may be useful. Some systems have tiny zones where precision matters. For example i386 and x86_64 have 16M DMA zones and either 900M ZONE_NORMAL or ZONE_DMA32. The patch here sets up a threshold based on the number of processors in the system and the size of the zone that these counters are used for. The threshold should grow logarithmically, so we use fls() as an easy approximation. On a system with 16 processors and a threshold of 32 we may defer updates for 16*32 pages which covers 2M (1/8th counter variance). Such precious memory needs to be frugally managed and it is unlikely to be touched frequently. This patch reduces that threshold to 5. With 16 processors we defer at maximum 16*5 = 320k. In the typical dual processor case we use a threshold of 2 for a maximum counter variance of 16k. For the 900M i386/x86_64 zone this scheme will yield a threshold of 8 for a dual processor configuration. Maximum variance is 64k then. For the 16 processor case we reach a threshold of 16 yielding a maximum counter variance of 1Mb. Larger system can take much larger thresholds. F.e. a typical 64 processor NUMA configuration with 4GB per node will have a threshold of 35. This leads to a maximum variance of 8GB. However, this is not an issue considering that the system has 256GB of main memory and it is unlikely that all processors allocate from the same node (its much more likely in SMP configurations with high processor counts). Finally a configuration with 1k processors, 1k nodes and 8GB per node will have a huge threshold of 80 which makes it possible to defer updates for up to 320TB. This is larger than the 8TB of memory that the system has. On these systems it is possible that the VM counters get way out of sync with the reality on the ground. However, this is a price one is willing to pay for scalable counter operations. Signed-off-by: Christoph Lameter Index: linux-2.6.18-rc3/mm/vmstat.c =================================================================== --- linux-2.6.18-rc3.orig/mm/vmstat.c 2006-07-29 23:15:36.000000000 -0700 +++ linux-2.6.18-rc3/mm/vmstat.c 2006-08-02 16:48:50.910708665 -0700 @@ -12,6 +12,7 @@ #include #include #include +#include void __get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free, struct pglist_data *pgdat) @@ -114,17 +115,71 @@ EXPORT_SYMBOL(vm_stat); #ifdef CONFIG_SMP -#define STAT_THRESHOLD 32 +static s8 calculate_threshold(struct zone *zone) +{ + int threshold; + int mem; /* memory in 128 MB units */ + + /* + * The threshold scales with the number of processors and the amount + * of memory per zone. More memory means that we can defer updates for + * longer, more processors could lead to more contention. + * fls() is used to have a cheap way of logarithmic scaling. + * + * Some sample thresholds: + * + * Threshold Processors (fls) Zonesize fls(mem+1) + * ------------------------------------------------------------------ + * 4 1 1 0.9-1 GB 4 + * 8 2 2 0.9-1 GB 4 + * 10 2 2 1-2 GB 5 + * 12 2 2 2-4 GB 6 + * 14 2 2 4-8 GB 7 + * 16 2 2 8-16 GB 8 + * 2 2 2 <128M 1 + * 15 4 3 2-4 GB 5 + * 24 4 3 8-16 GB 8 + * 16 8 4 1-2 GB 4 + * 16 8 4 0.9-1GB 4 + * 5 16 5 <128M 1 + * 20 16 5 900M 4 + * 35 64 7 2-4 GB 5 + * 42 64 7 4-8 GB 6 + * 54 512 9 4-8 GB 6 + * 80 1024 10 8-16 GB 8 + * 90 1024 10 16-32 GB 9 + */ + + mem = zone->present_pages >> (27 - PAGE_SHIFT); + + threshold = fls(num_online_cpus()) * (1 + fls(mem)); + + /* + * Maximum threshold is 125 + */ + threshold = min(125, threshold); + + return threshold; +} /* - * Determine pointer to currently valid differential byte given a zone and - * the item number. - * - * Preemption must be off + * Refresh the thresholds for each zone. */ -static inline s8 *diff_pointer(struct zone *zone, enum zone_stat_item item) +static void refresh_zone_stat_thresholds(void) { - return &zone_pcp(zone, smp_processor_id())->vm_stat_diff[item]; + struct zone *zone; + int cpu; + int threshold; + + for_each_zone(zone) { + if (!zone->present_pages) + continue; + + threshold = calculate_threshold(zone); + + for_each_online_cpu(cpu) + zone_pcp(zone, cpu)->stat_threshold = threshold; + } } /* @@ -133,17 +188,16 @@ static inline s8 *diff_pointer(struct zo void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, int delta) { - s8 *p; + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); + s8 *p = pcp->vm_stat_diff + item; long x; - p = diff_pointer(zone, item); x = delta + *p; - if (unlikely(x > STAT_THRESHOLD || x < -STAT_THRESHOLD)) { + if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { zone_page_state_add(x, zone, item); x = 0; } - *p = x; } EXPORT_SYMBOL(__mod_zone_page_state); @@ -185,11 +239,12 @@ EXPORT_SYMBOL(mod_zone_page_state); */ static void __inc_zone_state(struct zone *zone, enum zone_stat_item item) { - s8 *p = diff_pointer(zone, item); + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); + s8 *p = pcp->vm_stat_diff + item; (*p)++; - if (unlikely(*p > STAT_THRESHOLD)) { + if (unlikely(*p > pcp->stat_threshold)) { zone_page_state_add(*p, zone, item); *p = 0; } @@ -204,11 +259,12 @@ EXPORT_SYMBOL(__inc_zone_page_state); void __dec_zone_page_state(struct page *page, enum zone_stat_item item) { struct zone *zone = page_zone(page); - s8 *p = diff_pointer(zone, item); + struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id()); + s8 *p = pcp->vm_stat_diff + item; (*p)--; - if (unlikely(*p < -STAT_THRESHOLD)) { + if (unlikely(*p < -pcp->stat_threshold)) { zone_page_state_add(*p, zone, item); *p = 0; } @@ -239,19 +295,9 @@ EXPORT_SYMBOL(inc_zone_page_state); void dec_zone_page_state(struct page *page, enum zone_stat_item item) { unsigned long flags; - struct zone *zone; - s8 *p; - zone = page_zone(page); local_irq_save(flags); - p = diff_pointer(zone, item); - - (*p)--; - - if (unlikely(*p < -STAT_THRESHOLD)) { - zone_page_state_add(*p, zone, item); - *p = 0; - } + __dec_zone_page_state(page, item); local_irq_restore(flags); } EXPORT_SYMBOL(dec_zone_page_state); @@ -525,6 +571,8 @@ static int zoneinfo_show(struct seq_file pageset->pcp[j].high, pageset->pcp[j].batch); } + seq_printf(m, "\n vm stats threshold: %d", + pageset->stat_threshold); } seq_printf(m, "\n all_unreclaimable: %u" @@ -613,3 +661,34 @@ struct seq_operations vmstat_op = { #endif /* CONFIG_PROC_FS */ +/* + * Use the cpu notifier to insure that the thresholds are recalculated + * when necessary. + */ +static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, + unsigned long action, + void *hcpu) +{ + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_CANCELED: + case CPU_DEAD: + refresh_zone_stat_thresholds(); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata vmstat_notifier = + { &vmstat_cpuup_callback, NULL, 0 }; + +int __init setup_vmstat(void) +{ + refresh_zone_stat_thresholds(); + register_cpu_notifier(&vmstat_notifier); + return 0; +} +module_init(setup_vmstat) + Index: linux-2.6.18-rc3/include/linux/mmzone.h =================================================================== --- linux-2.6.18-rc3.orig/include/linux/mmzone.h 2006-07-29 23:15:36.000000000 -0700 +++ linux-2.6.18-rc3/include/linux/mmzone.h 2006-08-02 16:32:25.111290370 -0700 @@ -77,6 +77,7 @@ struct per_cpu_pages { struct per_cpu_pageset { struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ #ifdef CONFIG_SMP + s8 stat_threshold; /* maximum diff before update */ s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; #endif } ____cacheline_aligned_in_smp;