From: Ingo Molnar Add the per_cpu_offset() generic method. (used by the lock validator) Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Cc: Rusty Russell Signed-off-by: Andrew Morton --- include/asm-generic/percpu.h | 2 ++ include/asm-ia64/percpu.h | 1 + include/asm-powerpc/percpu.h | 1 + include/asm-s390/percpu.h | 1 + include/asm-sparc64/percpu.h | 1 + include/asm-x86_64/percpu.h | 2 ++ 6 files changed, 8 insertions(+) diff -puN include/asm-generic/percpu.h~lockdep-add-per_cpu_offset include/asm-generic/percpu.h --- a/include/asm-generic/percpu.h~lockdep-add-per_cpu_offset +++ a/include/asm-generic/percpu.h @@ -7,6 +7,8 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; +#define per_cpu_offset(x) (__per_cpu_offset[x]) + /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name diff -puN include/asm-ia64/percpu.h~lockdep-add-per_cpu_offset include/asm-ia64/percpu.h --- a/include/asm-ia64/percpu.h~lockdep-add-per_cpu_offset +++ a/include/asm-ia64/percpu.h @@ -36,6 +36,7 @@ #ifdef CONFIG_SMP extern unsigned long __per_cpu_offset[NR_CPUS]; +#define per_cpu_offset(x) (__per_cpu_offset(x)) /* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */ DECLARE_PER_CPU(unsigned long, local_per_cpu_offset); diff -puN include/asm-powerpc/percpu.h~lockdep-add-per_cpu_offset include/asm-powerpc/percpu.h --- a/include/asm-powerpc/percpu.h~lockdep-add-per_cpu_offset +++ a/include/asm-powerpc/percpu.h @@ -14,6 +14,7 @@ #define __per_cpu_offset(cpu) (paca[cpu].data_offset) #define __my_cpu_offset() get_paca()->data_offset +#define per_cpu_offset(x) (__per_cpu_offset(x)) /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ diff -puN include/asm-s390/percpu.h~lockdep-add-per_cpu_offset include/asm-s390/percpu.h --- a/include/asm-s390/percpu.h~lockdep-add-per_cpu_offset +++ a/include/asm-s390/percpu.h @@ -42,6 +42,7 @@ extern unsigned long __per_cpu_offset[NR #define __get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) #define __raw_get_cpu_var(var) __reloc_hide(var,S390_lowcore.percpu_offset) #define per_cpu(var,cpu) __reloc_hide(var,__per_cpu_offset[cpu]) +#define per_cpu_offset(x) (__per_cpu_offset[x]) /* A macro to avoid #include hell... */ #define percpu_modcopy(pcpudst, src, size) \ diff -puN include/asm-sparc64/percpu.h~lockdep-add-per_cpu_offset include/asm-sparc64/percpu.h --- a/include/asm-sparc64/percpu.h~lockdep-add-per_cpu_offset +++ a/include/asm-sparc64/percpu.h @@ -11,6 +11,7 @@ extern unsigned long __per_cpu_base; extern unsigned long __per_cpu_shift; #define __per_cpu_offset(__cpu) \ (__per_cpu_base + ((unsigned long)(__cpu) << __per_cpu_shift)) +#define per_cpu_offset(x) (__per_cpu_offset(x)) /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ diff -puN include/asm-x86_64/percpu.h~lockdep-add-per_cpu_offset include/asm-x86_64/percpu.h --- a/include/asm-x86_64/percpu.h~lockdep-add-per_cpu_offset +++ a/include/asm-x86_64/percpu.h @@ -14,6 +14,8 @@ #define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset) #define __my_cpu_offset() read_pda(data_offset) +#define per_cpu_offset(x) (__per_cpu_offset(x)) + /* Separate out the type, so (int[3], foo) works. */ #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name _