CPU Ops: x86_64 support Support fast cpu ops in x86_64 by providing a series of functions that generate the proper instructions. Signed-off-by: Christoph Lameter --- arch/x86/Kconfig | 4 include/asm-x86/percpu_64.h | 260 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 264 insertions(+) Index: linux-2.6/arch/x86/Kconfig =================================================================== --- linux-2.6.orig/arch/x86/Kconfig 2007-11-17 20:24:21.765582177 -0800 +++ linux-2.6/arch/x86/Kconfig 2007-11-17 20:36:17.829907748 -0800 @@ -141,6 +141,10 @@ config GENERIC_PENDING_IRQ depends on GENERIC_HARDIRQS && SMP default y +config FAST_CPU_OPS + bool + default y + config X86_SMP bool depends on X86_32 && SMP && !X86_VOYAGER Index: linux-2.6/include/asm-x86/percpu_64.h =================================================================== --- linux-2.6.orig/include/asm-x86/percpu_64.h 2007-11-17 20:24:23.213831698 -0800 +++ linux-2.6/include/asm-x86/percpu_64.h 2007-11-17 20:38:43.315888946 -0800 @@ -64,4 +64,264 @@ extern void setup_per_cpu_areas(void); #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) +static inline unsigned long __cpu_read_gs(volatile void *ptr, int size) +{ + unsigned long result; + switch (size) { + case 1: + __asm__ __volatile__("mov %%gs:%1, %b0" + : "=r"(result) + : "m"(*__xg(ptr))); + return result; + case 2: + __asm__ __volatile__("movw %%gs:%1, %w0" + : "=r"(result) + : "m"(*__xg(ptr))); + return result; + case 4: + __asm__ __volatile__("movl %%gs:%1, %k0" + : "=r"(result) + : "m"(*__xg(ptr))); + return result; + case 8: + __asm__ __volatile__("movq %%gs:%1, %0" + : "=r"(result) + : "m"(*__xg(ptr))); + return result; + } + BUG(); +} + +#define cpu_read_gs(obj)\ + ((__typeof__(obj))__cpu_read_gs(&(obj), sizeof(obj))) + +static inline void __cpu_write_gs(volatile void *ptr, + unsigned long data, int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("mov %b0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 2: + __asm__ __volatile__("movw %w0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 4: + __asm__ __volatile__("movl %k0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 8: + __asm__ __volatile__("movq %0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + } + BUG(); +} + +#define cpu_write_gs(obj, value)\ + __cpu_write_gs(&(obj), (unsigned long)value, sizeof(obj)) + +static inline void __cpu_add_gs(volatile void *ptr, + long data, int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("add %b0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 2: + __asm__ __volatile__("addw %w0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 4: + __asm__ __volatile__("addl %k0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 8: + __asm__ __volatile__("addq %0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + } + BUG(); +} + +#define cpu_add_gs(obj, value)\ + __cpu_add_gs(&(obj), (unsigned long)value, sizeof(obj)) + +static inline void __cpu_sub_gs(volatile void *ptr, + long data, int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("sub %b0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 2: + __asm__ __volatile__("subw %w0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 4: + __asm__ __volatile__("subl %k0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 8: + __asm__ __volatile__("subq %0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + } + BUG(); +} + +#define cpu_sub_gs(obj, value)\ + __cpu_sub_gs(&(obj), (unsigned long)value, sizeof(obj)) + +static inline void __cpu_xchg_gs(volatile void *ptr, + long data, int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("xchg %b0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 2: + __asm__ __volatile__("xchgw %w0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 4: + __asm__ __volatile__("xchgl %k0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + case 8: + __asm__ __volatile__("xchgq %0, %%gs:%1" + : : "ri"(data), "m"(*__xg(ptr))); + return; + } + BUG(); +} + +#define cpu_xchg_gs(obj, value)\ + __cpu_xchg_gs(&(obj), (unsigned long)value, sizeof(obj)) + +static inline void __cpu_inc_gs(volatile void *ptr, int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("incb %%gs:%0" + : : "m"(*__xg(ptr))); + return; + case 2: + __asm__ __volatile__("incw %%gs:%0" + : : "m"(*__xg(ptr))); + return; + case 4: + __asm__ __volatile__("incl %%gs:%0" + : : "m"(*__xg(ptr))); + return; + case 8: + __asm__ __volatile__("incq %%gs:%0" + : : "m"(*__xg(ptr))); + return; + } + BUG(); +} + +#define cpu_inc_gs(obj)\ + __cpu_inc_gs(&(obj), sizeof(obj)) + +static inline void __cpu_dec_gs(volatile void *ptr, int size) +{ + switch (size) { + case 1: + __asm__ __volatile__("decb %%gs:%0" + : : "m"(*__xg(ptr))); + return; + case 2: + __asm__ __volatile__("decw %%gs:%0" + : : "m"(*__xg(ptr))); + return; + case 4: + __asm__ __volatile__("decl %%gs:%0" + : : "m"(*__xg(ptr))); + return; + case 8: + __asm__ __volatile__("decq %%gs:%0" + : : "m"(*__xg(ptr))); + return; + } + BUG(); +} + +#define cpu_dec_gs(obj)\ + __cpu_dec_gs(&(obj), sizeof(obj)) + +static inline unsigned long __cmpxchg_local_gs(volatile void *ptr, + unsigned long old, unsigned long new, int size) +{ + unsigned long prev; + switch (size) { + case 1: + __asm__ __volatile__("cmpxchgb %b1, %%gs:%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 2: + __asm__ __volatile__("cmpxchgw %w1, %%gs:%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 4: + __asm__ __volatile__("cmpxchgl %k1, %%gs:%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 8: + __asm__ __volatile__("cmpxchgq %1, %%gs:%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + } + return old; +} + +#define cmpxchg_local_gs(obj, o, n)\ + ((__typeof__(obj))__cmpxchg_local_gs(&(obj),(unsigned long)(o),\ + (unsigned long)(n),sizeof(obj))) + +#define CPU_READ(obj) cpu_read_gs(obj) +#define CPU_WRITE(obj,val) cpu_write_gs(obj, val) +#define CPU_ADD(obj,val) cpu_add_gs(obj, val) +#define CPU_SUB(obj,val) cpu_sub_gs(obj, val) +#define CPU_INC(obj) cpu_inc_gs(obj) +#define CPU_DEC(obj) cpu_dec_gs(obj) + +#define CPU_XCHG(obj,val) cpu_xchg_gs(obj, val) +#define CPU_CMPXCHG(obj, old, new) cmpxchg_local_gs(obj, old, new) + +/* + * All cpu operations are interrupt safe and do not need to disable + * preempt. So the other variants all reduce to the same instruction. + */ +#define _CPU_READ CPU_READ +#define _CPU_WRITE CPU_WRITE +#define _CPU_ADD CPU_ADD +#define _CPU_SUB CPU_SUB +#define _CPU_INC CPU_INC +#define _CPU_DEC CPU_DEC +#define _CPU_XCHG CPU_XCHG +#define _CPU_CMPXCHG CPU_CMPXCHG + +#define __CPU_READ CPU_READ +#define __CPU_WRITE CPU_WRITE +#define __CPU_ADD CPU_ADD +#define __CPU_SUB CPU_SUB +#define __CPU_INC CPU_INC +#define __CPU_DEC CPU_DEC +#define __CPU_XCHG CPU_XCHG +#define __CPU_CMPXCHG CPU_CMPXCHG + #endif /* _ASM_X8664_PERCPU_H_ */