From: Ravikiran G Thirumalai Add percpu_counter_mod_bh for using these counters safely from both softirq and process context. Signed-off-by: Pravin B. Shelar Signed-off-by: Ravikiran G Thirumalai Signed-off-by: Shai Fultheim Signed-off-by: Andrew Morton --- include/linux/percpu_counter.h | 9 +++++++++ mm/swap.c | 23 +++++++++++++++++++---- 2 files changed, 28 insertions(+), 4 deletions(-) diff -puN include/linux/percpu_counter.h~per-cpufy-net-proto-structures-add-percpu_counter_modbh include/linux/percpu_counter.h --- devel/include/linux/percpu_counter.h~per-cpufy-net-proto-structures-add-percpu_counter_modbh 2006-03-16 02:08:41.000000000 -0800 +++ devel-akpm/include/linux/percpu_counter.h 2006-03-16 02:08:41.000000000 -0800 @@ -11,6 +11,7 @@ #include #include #include +#include #ifdef CONFIG_SMP @@ -40,6 +41,7 @@ static inline void percpu_counter_destro void percpu_counter_mod(struct percpu_counter *fbc, long amount); long percpu_counter_sum(struct percpu_counter *fbc); +void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount); static inline long percpu_counter_read(struct percpu_counter *fbc) { @@ -98,6 +100,12 @@ static inline long percpu_counter_sum(st return percpu_counter_read_positive(fbc); } +static inline void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount) +{ + local_bh_disable(); + fbc->count += amount; + local_bh_enable(); +} #endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) @@ -110,4 +118,5 @@ static inline void percpu_counter_dec(st percpu_counter_mod(fbc, -1); } + #endif /* _LINUX_PERCPU_COUNTER_H */ diff -puN mm/swap.c~per-cpufy-net-proto-structures-add-percpu_counter_modbh mm/swap.c --- devel/mm/swap.c~per-cpufy-net-proto-structures-add-percpu_counter_modbh 2006-03-16 02:08:41.000000000 -0800 +++ devel-akpm/mm/swap.c 2006-03-16 02:08:41.000000000 -0800 @@ -480,11 +480,11 @@ static int cpu_swap_callback(struct noti #endif /* CONFIG_SMP */ #ifdef CONFIG_SMP -void percpu_counter_mod(struct percpu_counter *fbc, long amount) +static void __percpu_counter_mod(struct percpu_counter *fbc, long amount) { long count; long *pcount; - int cpu = get_cpu(); + int cpu = smp_processor_id(); pcount = per_cpu_ptr(fbc->counters, cpu); count = *pcount + amount; @@ -496,9 +496,21 @@ void percpu_counter_mod(struct percpu_co } else { *pcount = count; } - put_cpu(); } -EXPORT_SYMBOL(percpu_counter_mod); + +void percpu_counter_mod(struct percpu_counter *fbc, long amount) +{ + preempt_disable(); + __percpu_counter_mod(fbc, amount); + preempt_enable(); +} + +void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount) +{ + local_bh_disable(); + __percpu_counter_mod(fbc, amount); + local_bh_enable(); +} /* * Add up all the per-cpu counts, return the result. This is a more accurate @@ -518,6 +530,9 @@ long percpu_counter_sum(struct percpu_co spin_unlock(&fbc->lock); return ret < 0 ? 0 : ret; } + +EXPORT_SYMBOL(percpu_counter_mod); +EXPORT_SYMBOL(percpu_counter_mod_bh); EXPORT_SYMBOL(percpu_counter_sum); #endif _