Ia64 implementation of synchronized bit operations Implement synchronized bit operations and use these to define the regular bit operations. This means that for IA64 code an #include is not necessary. Synchronized bit operations are usable by default throughout arch specific code. Signed-off-by: Christoph Lameter Index: linux-2.6.16-mm2/include/asm-ia64/bitops.h =================================================================== --- linux-2.6.16-mm2.orig/include/asm-ia64/bitops.h 2006-03-30 21:21:04.000000000 -0800 +++ linux-2.6.16-mm2/include/asm-ia64/bitops.h 2006-03-30 21:21:13.000000000 -0800 @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -19,8 +20,6 @@ * @nr: the bit to set * @addr: the address to start counting from * - * This function is atomic and may not be reordered. See __set_bit() - * if you do not require the atomic guarantees. * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. * @@ -32,252 +31,114 @@ * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). */ static __inline__ void -set_bit (int nr, volatile void *addr) +set_bit (int nr, void *addr) { - __u32 bit, old, new; - volatile __u32 *m; - CMPXCHG_BUGCHECK_DECL - - m = (volatile __u32 *) addr + (nr >> 5); - bit = 1 << (nr & 31); - do { - CMPXCHG_BUGCHECK(m); - old = *m; - new = old | bit; - } while (cmpxchg_acq(m, old, new) != old); + set_bit_mode(nr, addr, BMODE_ATOMIC); } /** * __set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. */ static __inline__ void -__set_bit (int nr, volatile void *addr) +__set_bit (int nr, void *addr) { - *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); + set_bit_mode(nr, addr, BMODE_NONE); } -/* - * clear_bit() has "acquire" semantics. - */ #define smp_mb__before_clear_bit() smp_mb() -#define smp_mb__after_clear_bit() do { /* skip */; } while (0) +#define smp_mb__after_clear_bit() smp_mb() /** * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from - * - * clear_bit() is atomic and may not be reordered. However, it does - * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() - * in order to ensure changes are visible on other processors. */ static __inline__ void -clear_bit (int nr, volatile void *addr) +clear_bit (int nr, void *addr) { - __u32 mask, old, new; - volatile __u32 *m; - CMPXCHG_BUGCHECK_DECL - - m = (volatile __u32 *) addr + (nr >> 5); - mask = ~(1 << (nr & 31)); - do { - CMPXCHG_BUGCHECK(m); - old = *m; - new = old & mask; - } while (cmpxchg_acq(m, old, new) != old); + clear_bit_mode(nr, addr, BMODE_ATOMIC); } -/** - * __clear_bit - Clears a bit in memory (non-atomic version) - */ static __inline__ void -__clear_bit (int nr, volatile void *addr) +__clear_bit (int nr, void *addr) { - volatile __u32 *p = (__u32 *) addr + (nr >> 5); - __u32 m = 1 << (nr & 31); - *p &= ~m; + clear_bit_mode(nr, addr, BMODE_NONE); } /** * change_bit - Toggle a bit in memory * @nr: Bit to clear * @addr: Address to start counting from - * - * change_bit() is atomic and may not be reordered. - * Note that @nr may be almost arbitrarily large; this function is not - * restricted to acting on a single-word quantity. */ static __inline__ void -change_bit (int nr, volatile void *addr) +change_bit (int nr, void *addr) { - __u32 bit, old, new; - volatile __u32 *m; - CMPXCHG_BUGCHECK_DECL - - m = (volatile __u32 *) addr + (nr >> 5); - bit = (1 << (nr & 31)); - do { - CMPXCHG_BUGCHECK(m); - old = *m; - new = old ^ bit; - } while (cmpxchg_acq(m, old, new) != old); + change_bit_mode(nr, addr, BMODE_ATOMIC); } -/** - * __change_bit - Toggle a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ static __inline__ void -__change_bit (int nr, volatile void *addr) +__change_bit (int nr, void *addr) { - *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); + change_bit_mode(nr, addr, BMODE_NONE); } /** * test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. */ static __inline__ int -test_and_set_bit (int nr, volatile void *addr) +test_and_set_bit (int nr, void *addr) { - __u32 bit, old, new; - volatile __u32 *m; - CMPXCHG_BUGCHECK_DECL - - m = (volatile __u32 *) addr + (nr >> 5); - bit = 1 << (nr & 31); - do { - CMPXCHG_BUGCHECK(m); - old = *m; - new = old | bit; - } while (cmpxchg_acq(m, old, new) != old); - return (old & bit) != 0; + return test_and_set_bit_mode(nr, addr, BMODE_ATOMIC); } -/** - * __test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ static __inline__ int -__test_and_set_bit (int nr, volatile void *addr) +__test_and_set_bit (int nr, void *addr) { - __u32 *p = (__u32 *) addr + (nr >> 5); - __u32 m = 1 << (nr & 31); - int oldbitset = (*p & m) != 0; - - *p |= m; - return oldbitset; + return test_and_set_bit_mode(nr, addr, BMODE_NONE); } /** * test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to set * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. */ static __inline__ int -test_and_clear_bit (int nr, volatile void *addr) +test_and_clear_bit (int nr, void *addr) { - __u32 mask, old, new; - volatile __u32 *m; - CMPXCHG_BUGCHECK_DECL - - m = (volatile __u32 *) addr + (nr >> 5); - mask = ~(1 << (nr & 31)); - do { - CMPXCHG_BUGCHECK(m); - old = *m; - new = old & mask; - } while (cmpxchg_acq(m, old, new) != old); - return (old & ~mask) != 0; + return test_and_clear_bit_mode(nr, addr, BMODE_ATOMIC); } -/** - * __test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ static __inline__ int -__test_and_clear_bit(int nr, volatile void * addr) +__test_and_clear_bit(int nr, void * addr) { - __u32 *p = (__u32 *) addr + (nr >> 5); - __u32 m = 1 << (nr & 31); - int oldbitset = *p & m; - - *p &= ~m; - return oldbitset; + return test_and_clear_bit_mode(nr, addr, BMODE_NONE); } /** * test_and_change_bit - Change a bit and return its old value * @nr: Bit to set * @addr: Address to count from - * - * This operation is atomic and cannot be reordered. - * It also implies a memory barrier. */ static __inline__ int -test_and_change_bit (int nr, volatile void *addr) +test_and_change_bit (int nr, void *addr) { - __u32 bit, old, new; - volatile __u32 *m; - CMPXCHG_BUGCHECK_DECL - - m = (volatile __u32 *) addr + (nr >> 5); - bit = (1 << (nr & 31)); - do { - CMPXCHG_BUGCHECK(m); - old = *m; - new = old ^ bit; - } while (cmpxchg_acq(m, old, new) != old); - return (old & bit) != 0; + return test_and_change_bit_mode(nr, addr, BMODE_ATOMIC); } -/* - * WARNING: non atomic version. - */ static __inline__ int __test_and_change_bit (int nr, void *addr) { - __u32 old, bit = (1 << (nr & 31)); - __u32 *m = (__u32 *) addr + (nr >> 5); - - old = *m; - *m = old ^ bit; - return (old & bit) != 0; + return test_and_change_bit_mode(nr, addr, BMODE_NONE); } static __inline__ int -test_bit (int nr, const volatile void *addr) +test_bit (int nr, const void *addr) { - return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); + return 1 & (((const __u32 *) addr)[nr >> 5] >> (nr & 31)); } /** Index: linux-2.6.16-mm2/include/asm-ia64/bitops_mode.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux-2.6.16-mm2/include/asm-ia64/bitops_mode.h 2006-03-31 10:39:39.000000000 -0800 @@ -0,0 +1,229 @@ +#ifndef _ASM_IA64_BITOPS_MODE_H +#define _ASM_IA64_BITOPS_MODE_H + +/* + * Copyright (C) 2006 Silicon Graphics, Incorporated + * Christoph Lameter + * + * Bit operations with the ability to specify the synchronization mode + */ + +#include +#include +#include + +#define BMODE_NONE 0 +#define BMODE_ATOMIC 1 +#define BMODE_LOCK 2 +#define BMODE_UNLOCK 3 +#define BMODE_BARRIER 4 + +static __inline__ __u32 cmpxchg_mode(__u32 *m, __u32 old, __u32 new, int mode) +{ + __u32 x; + + switch (mode) { + case BMODE_ATOMIC : + case BMODE_LOCK : + return cmpxchg_acq(m, old, new); + + case BMODE_UNLOCK : + return cmpxchg_rel(m, old, new); + + case BMODE_BARRIER : + x = cmpxchg_rel(m, old, new); + ia64_mf(); + return x; + } +} + + +/** + * set_bit_mode - set a bit in memory + * + * The address must be (at least) "long" aligned. + * Note that there are driver (e.g., eepro100) which use these operations to + * operate on hw-defined data-structures, so we can't easily change these + * operations to force a bigger alignment. + * + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + */ +static __inline__ void +set_bit_mode (int nr, void *addr, int mode) +{ + __u32 bit, old, new; + __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (__u32 *) addr + (nr >> 5); + bit = 1 << (nr & 31); + + if (mode == BMODE_NONE) { + *m |= bit; + return; + } + + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old | bit; + } while (cmpxchg_mode(m, old, new, mode) != old); +} + +/** + * clear_bit_mode - Clears a bit in memory + */ +static __inline__ void +clear_bit_mode (int nr, void *addr, int mode) +{ + __u32 mask, old, new; + __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (__u32 *) addr + (nr >> 5); + mask = ~(1 << (nr & 31)); + + if (mode == BMODE_NONE) { + *m &= mask; + return; + } + + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old & mask; + } while (cmpxchg_mode(m, old, new, mode) != old); +} + +/** + * change_bit_mode - Toggle a bit in memory + */ +static __inline__ void +change_bit_mode (int nr, void *addr, int mode) +{ + __u32 bit, old, new; + __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (__u32 *) addr + (nr >> 5); + bit = (1 << (nr & 31)); + + if (mode == BMODE_NONE) { + *m ^= bit; + return; + } + + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old ^ bit; + } while (cmpxchg_mode(m, old, new, mode) != old); +} + +/** + * test_and_set_bit_mode - Set a bit and return its old value + */ +static __inline__ int +test_and_set_bit_mode (int nr, void *addr, int mode) +{ + __u32 bit, old, new; + __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (__u32 *) addr + (nr >> 5); + bit = 1 << (nr & 31); + + if (mode == BMODE_NONE) { + int oldbitset = *m & bit; + *m |= bit; + return oldbitset; + } + + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old | bit; + } while (cmpxchg_mode(m, old, new, mode) != old); + return (old & bit) != 0; +} + +/** + * test_and_clear_bit_mode - Clear a bit and return its old value + */ +static __inline__ int +test_and_clear_bit_mode (int nr, void *addr, int mode) +{ + __u32 mask, old, new; + __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (__u32 *) addr + (nr >> 5); + mask = ~(1 << (nr & 31)); + + if (mode == BMODE_NONE) { + int oldbitset = *m & mask; + *m &= mask; + return oldbitset; + } + + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old & mask; + } while (cmpxchg_mode(m, old, new, mode) != old); + return (old & ~mask) != 0; +} + +/** + * test_and_change_bit_mode - Change a bit and return its old value + */ +static __inline__ int +test_and_change_bit_mode (int nr, void *addr, int mode) +{ + __u32 bit, old, new; + __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (__u32 *) addr + (nr >> 5); + bit = (1 << (nr & 31)); + + if (mode == BMODE_NONE) { + old = *m; + *m = old ^ bit; + return (old & bit) != 0; + } + + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old ^ bit; + } while (cmpxchg_mode(m, old, new, mode) != old); + return (old & bit) != 0; +} + +/* A set of convenience definitions */ +#define set_bit_lock(a,b) set_bit_mode(a,b,BMODE_LOCK) +#define set_bit_unlock(a,b) set_bit_mode(a,b,BMODE_UNLOCK) +#define set_bit_barrier(a,b) set_bit_mode(a,b,BMODE_BARRIER) + +#define clear_bit_lock(a,b) clear_bit_mode(a,b,BMODE_LOCK) +#define clear_bit_unlock(a,b) clear_bit_mode(a,b,BMODE_UNLOCK) +#define clear_bit_barrier(a,b) clear_bit_mode(a,b,BMODE_BARRIER) + +#define change_bit_lock(a,b) change_bit_mode(a,b,BMODE_LOCK) +#define change_bit_unlock(a,b) change_bit_mode(a,b,BMODE_UNLOCK) +#define change_bit_barrier(a,b) change_bit_mode(a,b,BMODE_BARRIER) + +#define test_and_set_bit_lock(a,b) test_and_set_bit_mode(a,b,BMODE_LOCK) +#define test_and_set_bit_unlock(a,b) test_and_set_bit_mode(a,b,BMODE_UNLOCK) +#define test_and_set_bit_barrier(a,b) test_and_set_bit_mode(a,b,BMODE_BARRIER) + +#define test_and_clear_bit_lock(a,b) test_and_clear_bit_mode(a,b,BMODE_LOCK) +#define test_and_clear_bit_unlock(a,b) test_and_clear_bit_mode(a,b,BMODE_UNLOCK) +#define test_and_clear_bit_barrier(a,b) test_and_clear_bit_mode(a,b,BMODE_BARRIER) + +#define test_and_change_bit_lock(a,b) test_and_change_bit_mode(a,b,BMODE_LOCK) +#define test_and_change_bit_unlock(a,b) test_and_change_bit_mode(a,b,BMODE_UNLOCK) +#define test_and_change_bit_barrier(a,b) test_and_change_bit_mode(a,b,BMODE_BARRIER) + +#endif /* _ASM_IA64_BITOPS_MODE_H */