From 1df3e758c090c21de0e963f69111c4060f1aa5a9 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 26 Jul 2007 14:04:23 -0700 Subject: [PATCH] Add support for local_cmpxchg and arch specific local sections --- include/asm-generic/local.h | 41 +++++++++++++++++++++++++++++++++++++++-- include/asm-i386/local.h | 19 +++++++++++++++++++ include/asm-x86_64/local.h | 23 +++++++++++++++++++++++ 3 files changed, 81 insertions(+), 2 deletions(-) diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h index 33d7d04..0c966f1 100644 --- a/include/asm-generic/local.h +++ b/include/asm-generic/local.h @@ -46,13 +46,50 @@ typedef struct #define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u)) #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) -/* Non-atomic variants, ie. preemption disabled and won't be touched - * in interrupt, etc. Some archs can optimize this case well. */ +/* + * Establish a state necessary for __local_xx functions to work. + */ +#define local_begin(flags) local_irq_disable(flags) + +/* + * Undo whatever local_begin did + */ +static inline void local_end(unsigned long flags) +{ + local_irq_restore(flags); +} + +/* + * Go from the state necessary for local_xx functions to + * full interrupt disable + */ +#define local_up_irq(flags) do { } while (0) + +/* + * Back from interrupt to local ops + */ +static inline void #define local_down_irq(unsigned long flags) +{ +} + +/* + * Non-atomic variants, ie. within local_begin() / local_end() or + * preempt_disable / enable() and won't be touched in interrupt, etc. + * Some archs can optimize this case well. + */ #define __local_inc(l) local_set((l), local_read(l) + 1) #define __local_dec(l) local_set((l), local_read(l) - 1) #define __local_add(i,l) local_set((l), local_read(l) + (i)) #define __local_sub(i,l) local_set((l), local_read(l) - (i)) +#define __local_cmpxchg((v), (o), (n)) (*(v) = (n), (o)) +#define __local_xchg((v), (n)) \ +({ \ + __typeof(v) x = *(v); \ + *(v) = (n); \ + x; \ +)} + /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable (eg. mystruct.foo), not an address. diff --git a/include/asm-i386/local.h b/include/asm-i386/local.h index 6e85975..f38a9a3 100644 --- a/include/asm-i386/local.h +++ b/include/asm-i386/local.h @@ -194,12 +194,31 @@ static __inline__ long local_sub_return(long i, local_t *l) }) #define local_inc_not_zero(l) local_add_unless((l), 1, 0) +#define local_begin(__flags) \ +{ \ + preempt_disable(); \ +} + +static inline void local_end(unsigned long flags) { + preempt_enable(); +} + +#define local_up_irq(__flags) local_irq_save(__flags) + +static inline void local_down_irq(unsigned long flags) +{ + local_irq_restore(flags); +} + /* On x86, these are no better than the atomic variants. */ #define __local_inc(l) local_inc(l) #define __local_dec(l) local_dec(l) #define __local_add(i,l) local_add((i),(l)) #define __local_sub(i,l) local_sub((i),(l)) +#define __local_cmpxchg cmpxchg_local +#define __local_xchg xchg + /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable, not an address. diff --git a/include/asm-x86_64/local.h b/include/asm-x86_64/local.h index e87492b..feb5ea9 100644 --- a/include/asm-x86_64/local.h +++ b/include/asm-x86_64/local.h @@ -9,6 +9,7 @@ typedef struct atomic_long_t a; } local_t; + #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) } #define local_read(l) atomic_long_read(&(l)->a) @@ -181,11 +182,33 @@ static __inline__ long local_sub_return(long i, local_t *l) /* On x86-64 these are better than the atomic variants on SMP kernels because they dont use a lock prefix. */ + +#define local_begin(__flags) \ +{ \ + preempt_disable(); \ +} + +static inline void local_end(unsigned long flags) +{ + preempt_enable(); +} + +#define local_up_irq(flags) local_irq_save(flags) + +static inline void local_down_irq(unsigned long flags) +{ + local_irq_restore(flags); +} + #define __local_inc(l) local_inc(l) #define __local_dec(l) local_dec(l) #define __local_add(i,l) local_add((i),(l)) #define __local_sub(i,l) local_sub((i),(l)) +#define __local_cmpxchg cmpxchg_local +#define __local_xchg xchg + + /* Use these for per-cpu local_t variables: on some archs they are * much more efficient than these naive implementations. Note they take * a variable, not an address. -- 1.4.4.4