Some cleanup in spinlock.h - Expand macros that are only called once into caller for cleaner code. - Turn all spinlock macros into inlines - Turn all __asm__ __volatile__s into asm volatile The result is much more readable, but shouldn't generate any Signed-off-by: Andi Kleen different code. --- include/asm-x86_64/rwlock.h | 14 ------ include/asm-x86_64/spinlock.h | 96 ++++++++++++++++++++++-------------------- 2 files changed, 51 insertions(+), 59 deletions(-) Index: linux/include/asm-x86_64/rwlock.h =================================================================== --- linux.orig/include/asm-x86_64/rwlock.h +++ linux/include/asm-x86_64/rwlock.h @@ -20,18 +20,4 @@ #define RW_LOCK_BIAS 0x01000000 -#define __build_read_lock(rw) \ - asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" \ - "jns 1f\n" \ - "call __read_lock_failed\n" \ - "1:\n" \ - ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory") - -#define __build_write_lock(rw) \ - asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" \ - "jz 1f\n" \ - "\tcall __write_lock_failed\n\t" \ - "1:\n" \ - ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory") - #endif Index: linux/include/asm-x86_64/spinlock.h =================================================================== --- linux.orig/include/asm-x86_64/spinlock.h +++ linux/include/asm-x86_64/spinlock.h @@ -16,51 +16,30 @@ * (the type definitions are in asm/spinlock_types.h) */ -#define __raw_spin_is_locked(x) \ - (*(volatile signed int *)(&(x)->slock) <= 0) - -#define __raw_spin_lock_string \ - "\n0:\t" \ - LOCK_PREFIX " ; decl %0\n\t" \ - "jns 2f\n" \ - "1:\n\t" \ - "rep;nop\n\t" \ - "cmpl $0,%0\n\t" \ - "jle 1b\n\t" \ - "jmp 0b\n" \ - "2:\t" - -#define __raw_spin_lock_string_flags \ - "\n1:\t" \ - LOCK_PREFIX "; decb %0\n\t" \ - "js 2f\n\t" \ - LOCK_SECTION_START("") \ - "2:\t" \ - "test $0x200, %1\n\t" \ - "jz 3f\n\t" \ - "sti\n\t" \ - "3:\t" \ - "rep;nop\n\t" \ - "cmpb $0, %0\n\t" \ - "jle 3b\n\t" \ - "cli\n\t" \ - "jmp 1b\n" \ - LOCK_SECTION_END - -#define __raw_spin_unlock_string \ - "movl $1,%0" \ - :"=m" (lock->slock) : : "memory" +static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +{ + return *(volatile signed int *)(&lock->slock) <= 0; +} static inline void __raw_spin_lock(raw_spinlock_t *lock) { - asm(__raw_spin_lock_string : "=m" (lock->slock) : : "memory"); + asm volatile( + "\n0:\t" + LOCK_PREFIX " ; decl %0\n\t" + "jns 2f\n" + "1:\n\t" + "rep;nop\n\t" + "cmpl $0,%0\n\t" + "jle 1b\n\t" + "jmp 0b\n" + "2:\t" : "=m" (lock->slock) : : "memory"); } static inline int __raw_spin_trylock(raw_spinlock_t *lock) { int oldval; - __asm__ __volatile__( + asm volatile( "xchgl %0,%1" :"=q" (oldval), "=m" (lock->slock) :"0" (0) : "memory"); @@ -70,16 +49,28 @@ static inline int __raw_spin_trylock(raw static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) { - __asm__ __volatile__( - __raw_spin_lock_string_flags + asm volatile( + "\n1:\t" + LOCK_PREFIX "; decb %0\n\t" + "js 2f\n\t" + LOCK_SECTION_START("") + "2:\t" + "test $0x200, %1\n\t" + "jz 3f\n\t" + "sti\n\t" + "3:\t" + "rep;nop\n\t" + "cmpb $0, %0\n\t" + "jle 3b\n\t" + "cli\n\t" + "jmp 1b\n" + LOCK_SECTION_END :"=m" (lock->slock) : "r" (flags) : "memory"); } static inline void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__( - __raw_spin_unlock_string - ); + asm volatile("movl $1,%0" :"=m" (lock->slock) : : "memory"); } #define __raw_spin_unlock_wait(lock) \ @@ -99,17 +90,32 @@ static inline void __raw_spin_unlock(raw * with the high bit (sign) being the "contended" bit. */ -#define __raw_read_can_lock(x) ((int)(x)->lock > 0) -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +static inline int __raw_read_can_lock(raw_rwlock_t *rw) +{ + return (int)rw->lock > 0; +} + +static inline int __raw_write_can_lock(raw_rwlock_t *rw) +{ + return rw->lock == RW_LOCK_BIAS; +} static inline void __raw_read_lock(raw_rwlock_t *rw) { - __build_read_lock(rw); + asm volatile(LOCK_PREFIX "subl $1,(%0)\n\t" + "jns 1f\n" + "call __read_lock_failed\n" + "1:\n" + ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); } static inline void __raw_write_lock(raw_rwlock_t *rw) { - __build_write_lock(rw); + asm volatile(LOCK_PREFIX "subl %1,(%0)\n\t" + "jz 1f\n" + "\tcall __write_lock_failed\n\t" + "1:\n" + ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory"); } static inline int __raw_read_trylock(raw_rwlock_t *lock)