From: Ravikiran G Thirumalai Add percpu_counter_mod_bh for using these counters safely from both softirq and process context. Signed-off-by: Pravin B. Shelar Signed-off-by: Ravikiran G Thirumalai Signed-off-by: Shai Fultheim Signed-off-by: Andrew Morton --- include/linux/percpu_counter.h | 9 +++++++++ mm/swap.c | 20 +++++++++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) diff -puN include/linux/percpu_counter.h~per-cpufy-net-proto-structures-add-percpu_counter_modbh include/linux/percpu_counter.h --- devel/include/linux/percpu_counter.h~per-cpufy-net-proto-structures-add-percpu_counter_modbh 2006-03-23 03:22:21.000000000 -0800 +++ devel-akpm/include/linux/percpu_counter.h 2006-03-23 03:22:21.000000000 -0800 @@ -11,6 +11,7 @@ #include #include #include +#include #ifdef CONFIG_SMP @@ -40,6 +41,7 @@ static inline void percpu_counter_destro void percpu_counter_mod(struct percpu_counter *fbc, long amount); long percpu_counter_sum(struct percpu_counter *fbc); +void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount); static inline long percpu_counter_read(struct percpu_counter *fbc) { @@ -98,6 +100,12 @@ static inline long percpu_counter_sum(st return percpu_counter_read_positive(fbc); } +static inline void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount) +{ + local_bh_disable(); + fbc->count += amount; + local_bh_enable(); +} #endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) @@ -110,4 +118,5 @@ static inline void percpu_counter_dec(st percpu_counter_mod(fbc, -1); } + #endif /* _LINUX_PERCPU_COUNTER_H */ diff -puN mm/swap.c~per-cpufy-net-proto-structures-add-percpu_counter_modbh mm/swap.c --- devel/mm/swap.c~per-cpufy-net-proto-structures-add-percpu_counter_modbh 2006-03-23 03:22:21.000000000 -0800 +++ devel-akpm/mm/swap.c 2006-03-23 03:22:23.000000000 -0800 @@ -481,11 +481,11 @@ static int cpu_swap_callback(struct noti #endif /* CONFIG_SMP */ #ifdef CONFIG_SMP -void percpu_counter_mod(struct percpu_counter *fbc, long amount) +static void __percpu_counter_mod(struct percpu_counter *fbc, long amount) { long count; long *pcount; - int cpu = get_cpu(); + int cpu = smp_processor_id(); pcount = per_cpu_ptr(fbc->counters, cpu); count = *pcount + amount; @@ -497,10 +497,24 @@ void percpu_counter_mod(struct percpu_co } else { *pcount = count; } - put_cpu(); +} + +void percpu_counter_mod(struct percpu_counter *fbc, long amount) +{ + preempt_disable(); + __percpu_counter_mod(fbc, amount); + preempt_enable(); } EXPORT_SYMBOL(percpu_counter_mod); +void percpu_counter_mod_bh(struct percpu_counter *fbc, long amount) +{ + local_bh_disable(); + __percpu_counter_mod(fbc, amount); + local_bh_enable(); +} +EXPORT_SYMBOL(percpu_counter_mod_bh); + /* * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() _