From: Ravikiran G Thirumalai - Add percpu_counter_exceeds() as suggested by Andrew Morton. - Move percpu_counter routines from mm/swap.c to lib/percpu_counter.c Signed-off-by: Ravikiran Thirumalai Signed-off-by: Andrew Morton --- include/linux/percpu_counter.h | 7 ++ lib/Makefile | 1 lib/percpu_counter.c | 79 +++++++++++++++++++++++++++++++ mm/swap.c | 56 --------------------- 4 files changed, 87 insertions(+), 56 deletions(-) diff -puN include/linux/percpu_counter.h~percpu-counters-add-percpu_counter_exceeds include/linux/percpu_counter.h --- devel/include/linux/percpu_counter.h~percpu-counters-add-percpu_counter_exceeds 2006-03-27 21:46:05.000000000 -0800 +++ devel-akpm/include/linux/percpu_counter.h 2006-03-27 21:46:05.000000000 -0800 @@ -42,6 +42,7 @@ static inline void percpu_counter_destro void percpu_counter_mod(struct percpu_counter *fbc, long amount); long percpu_counter_sum(struct percpu_counter *fbc); void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount); +int percpu_counter_exceeds(struct percpu_counter *fbc, long limit); static inline long percpu_counter_read(struct percpu_counter *fbc) { @@ -106,6 +107,12 @@ static inline void percpu_counter_mod_bh fbc->count += amount; local_bh_enable(); } + +static inline int percpu_counter_exceeds(struct percpu_counter *fbc, long limit) +{ + return percpu_counter_read(fbc) > limit; +} + #endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) diff -puN lib/Makefile~percpu-counters-add-percpu_counter_exceeds lib/Makefile --- devel/lib/Makefile~percpu-counters-add-percpu_counter_exceeds 2006-03-27 21:46:05.000000000 -0800 +++ devel-akpm/lib/Makefile 2006-03-27 21:46:05.000000000 -0800 @@ -46,6 +46,7 @@ obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o +obj-$(CONFIG_SMP) += percpu_counter.o obj-$(CONFIG_SWIOTLB) += swiotlb.o diff -puN lib/percpu_counter.c~percpu-counters-add-percpu_counter_exceeds lib/percpu_counter.c --- devel/lib/percpu_counter.c~percpu-counters-add-percpu_counter_exceeds 2006-03-27 21:46:05.000000000 -0800 +++ devel-akpm/lib/percpu_counter.c 2006-03-27 21:46:05.000000000 -0800 @@ -0,0 +1,79 @@ +/* + * Fast batching percpu counters. + */ + +#include +#include + +static void __percpu_counter_mod(struct percpu_counter *fbc, long amount) +{ + long count; + long *pcount; + int cpu = smp_processor_id(); + + pcount = per_cpu_ptr(fbc->counters, cpu); + count = *pcount + amount; + if (count >= FBC_BATCH || count <= -FBC_BATCH) { + spin_lock(&fbc->lock); + fbc->count += count; + *pcount = 0; + spin_unlock(&fbc->lock); + } else { + *pcount = count; + } +} + +void percpu_counter_mod(struct percpu_counter *fbc, long amount) +{ + preempt_disable(); + __percpu_counter_mod(fbc, amount); + preempt_enable(); +} +EXPORT_SYMBOL(percpu_counter_mod); + +void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount) +{ + local_bh_disable(); + __percpu_counter_mod(fbc, amount); + local_bh_enable(); +} +EXPORT_SYMBOL(percpu_counter_mod_bh); + +/* + * Add up all the per-cpu counts, return the result. This is a more accurate + * but much slower version of percpu_counter_read_positive() + */ +long percpu_counter_sum(struct percpu_counter *fbc) +{ + long ret; + int cpu; + + spin_lock(&fbc->lock); + ret = fbc->count; + for_each_possible_cpu(cpu) { + long *pcount = per_cpu_ptr(fbc->counters, cpu); + ret += *pcount; + } + spin_unlock(&fbc->lock); + return ret < 0 ? 0 : ret; +} +EXPORT_SYMBOL(percpu_counter_sum); + +/* + * Returns zero if the counter is within limit. Returns non zero if counter + * is over limit. + * + * The idea is that we avoid doing the expensive percpu_counter_sum() unless + * some limit looks like it is exceeded, which will cause some serious failure. + * In that case we do the percpu_counter_sum() to make the counts accurate. If + * it turns out that the limit wasn't exceeded, there will be no more calls to + * percpu_counter_sum() until significant counter skew has reoccurred. + */ +int percpu_counter_exceeds(struct percpu_counter *fbc, long limit) +{ + if (percpu_counter_read(fbc) > limit) + if (percpu_counter_sum(fbc) > limit) + return 1; + return 0; +} +EXPORT_SYMBOL(percpu_counter_exceeds); diff -puN mm/swap.c~percpu-counters-add-percpu_counter_exceeds mm/swap.c --- devel/mm/swap.c~percpu-counters-add-percpu_counter_exceeds 2006-03-27 21:46:05.000000000 -0800 +++ devel-akpm/mm/swap.c 2006-03-27 21:46:05.000000000 -0800 @@ -480,62 +480,6 @@ static int cpu_swap_callback(struct noti #endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_SMP */ -#ifdef CONFIG_SMP -static void __percpu_counter_mod(struct percpu_counter *fbc, long amount) -{ - long count; - long *pcount; - int cpu = smp_processor_id(); - - pcount = per_cpu_ptr(fbc->counters, cpu); - count = *pcount + amount; - if (count >= FBC_BATCH || count <= -FBC_BATCH) { - spin_lock(&fbc->lock); - fbc->count += count; - *pcount = 0; - spin_unlock(&fbc->lock); - } else { - *pcount = count; - } -} - -void percpu_counter_mod(struct percpu_counter *fbc, long amount) -{ - preempt_disable(); - __percpu_counter_mod(fbc, amount); - preempt_enable(); -} -EXPORT_SYMBOL(percpu_counter_mod); - -void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount) -{ - local_bh_disable(); - __percpu_counter_mod(fbc, amount); - local_bh_enable(); -} -EXPORT_SYMBOL(percpu_counter_mod_bh); - -/* - * Add up all the per-cpu counts, return the result. This is a more accurate - * but much slower version of percpu_counter_read_positive() - */ -long percpu_counter_sum(struct percpu_counter *fbc) -{ - long ret; - int cpu; - - spin_lock(&fbc->lock); - ret = fbc->count; - for_each_possible_cpu(cpu) { - long *pcount = per_cpu_ptr(fbc->counters, cpu); - ret += *pcount; - } - spin_unlock(&fbc->lock); - return ret < 0 ? 0 : ret; -} -EXPORT_SYMBOL(percpu_counter_sum); -#endif - /* * Perform any setup for the swap system */ _