From: Ravikiran G Thirumalai - Move percpu_counter routines from mm/swap.c to lib/percpu_counter.c Signed-off-by: Ravikiran Thirumalai Signed-off-by: Andrew Morton --- lib/Makefile | 1 lib/percpu_counter.c | 46 +++++++++++++++++++++++++++++++++++++++ mm/swap.c | 42 ----------------------------------- linux/percpu_counter.h | 0 4 files changed, 47 insertions(+), 42 deletions(-) diff -puN include/linux/percpu_counter.h~percpu-counters-add-percpu_counter_exceeds include/linux/percpu_counter.h diff -puN lib/Makefile~percpu-counters-add-percpu_counter_exceeds lib/Makefile --- 25/lib/Makefile~percpu-counters-add-percpu_counter_exceeds 2006-05-28 18:47:18.000000000 -0700 +++ 25-akpm/lib/Makefile 2006-05-28 19:22:16.522557968 -0700 @@ -46,6 +46,7 @@ obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o +obj-$(CONFIG_SMP) += percpu_counter.o obj-$(CONFIG_SWIOTLB) += swiotlb.o diff -puN lib/percpu_counter.c~percpu-counters-add-percpu_counter_exceeds lib/percpu_counter.c --- 25/lib/percpu_counter.c~percpu-counters-add-percpu_counter_exceeds 2006-05-28 18:47:18.000000000 -0700 +++ 25-akpm/lib/percpu_counter.c 2006-05-28 19:23:31.510158120 -0700 @@ -0,0 +1,46 @@ +/* + * Fast batching percpu counters. + */ + +#include +#include + +void percpu_counter_mod(struct percpu_counter *fbc, long amount) +{ + long count; + long *pcount; + int cpu = get_cpu(); + + pcount = per_cpu_ptr(fbc->counters, cpu); + count = *pcount + amount; + if (count >= FBC_BATCH || count <= -FBC_BATCH) { + spin_lock(&fbc->lock); + fbc->count += count; + *pcount = 0; + spin_unlock(&fbc->lock); + } else { + *pcount = count; + } + put_cpu(); +} +EXPORT_SYMBOL(percpu_counter_mod); + +/* + * Add up all the per-cpu counts, return the result. This is a more accurate + * but much slower version of percpu_counter_read_positive() + */ +long percpu_counter_sum(struct percpu_counter *fbc) +{ + long ret; + int cpu; + + spin_lock(&fbc->lock); + ret = fbc->count; + for_each_possible_cpu(cpu) { + long *pcount = per_cpu_ptr(fbc->counters, cpu); + ret += *pcount; + } + spin_unlock(&fbc->lock); + return ret < 0 ? 0 : ret; +} +EXPORT_SYMBOL(percpu_counter_sum); diff -puN mm/swap.c~percpu-counters-add-percpu_counter_exceeds mm/swap.c --- 25/mm/swap.c~percpu-counters-add-percpu_counter_exceeds 2006-05-28 18:47:18.000000000 -0700 +++ 25-akpm/mm/swap.c 2006-05-28 19:22:22.608632744 -0700 @@ -480,48 +480,6 @@ static int cpu_swap_callback(struct noti #endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_SMP */ -#ifdef CONFIG_SMP -void percpu_counter_mod(struct percpu_counter *fbc, long amount) -{ - long count; - long *pcount; - int cpu = get_cpu(); - - pcount = per_cpu_ptr(fbc->counters, cpu); - count = *pcount + amount; - if (count >= FBC_BATCH || count <= -FBC_BATCH) { - spin_lock(&fbc->lock); - fbc->count += count; - *pcount = 0; - spin_unlock(&fbc->lock); - } else { - *pcount = count; - } - put_cpu(); -} -EXPORT_SYMBOL(percpu_counter_mod); - -/* - * Add up all the per-cpu counts, return the result. This is a more accurate - * but much slower version of percpu_counter_read_positive() - */ -long percpu_counter_sum(struct percpu_counter *fbc) -{ - long ret; - int cpu; - - spin_lock(&fbc->lock); - ret = fbc->count; - for_each_possible_cpu(cpu) { - long *pcount = per_cpu_ptr(fbc->counters, cpu); - ret += *pcount; - } - spin_unlock(&fbc->lock); - return ret < 0 ? 0 : ret; -} -EXPORT_SYMBOL(percpu_counter_sum); -#endif - /* * Perform any setup for the swap system */ _