Convert bit_spin_lock to new locking bitops. Slub can use the non-atomic store version to clear (I think?) Signed-off-by: Nick Piggin --- include/linux/bit_spinlock.h | 26 ++++++++++++++++++++++---- mm/slub.c | 2 +- 2 files changed, 23 insertions(+), 5 deletions(-) Index: linux-2.6/include/linux/bit_spinlock.h =================================================================== --- linux-2.6.orig/include/linux/bit_spinlock.h +++ linux-2.6/include/linux/bit_spinlock.h @@ -18,7 +18,7 @@ static inline void bit_spin_lock(int bit */ preempt_disable(); #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - while (test_and_set_bit(bitnum, addr)) { + while (unlikely(test_and_set_bit_lock(bitnum, addr))) { while (test_bit(bitnum, addr)) { preempt_enable(); cpu_relax(); @@ -36,7 +36,7 @@ static inline int bit_spin_trylock(int b { preempt_disable(); #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - if (test_and_set_bit(bitnum, addr)) { + if (unlikely(test_and_set_bit_lock(bitnum, addr))) { preempt_enable(); return 0; } @@ -50,10 +50,28 @@ static inline int bit_spin_trylock(int b */ static inline void bit_spin_unlock(int bitnum, unsigned long *addr) { +#ifdef CONFIG_DEBUG_SPINLOCK + BUG_ON(!test_bit(bitnum, addr)); +#endif #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + clear_bit_unlock(bitnum, addr); +#endif + preempt_enable(); + __release(bitlock); +} + +/* + * bit-based spin_unlock() + * non-atomic version, which can be used eg. if the bit lock itself is + * protecting the rest of the flags in the word. + */ +static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) +{ +#ifdef CONFIG_DEBUG_SPINLOCK BUG_ON(!test_bit(bitnum, addr)); - smp_mb__before_clear_bit(); - clear_bit(bitnum, addr); +#endif +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit_unlock(bitnum, addr); #endif preempt_enable(); __release(bitlock); Index: linux-2.6/mm/slub.c =================================================================== --- linux-2.6.orig/mm/slub.c +++ linux-2.6/mm/slub.c @@ -1124,7 +1124,7 @@ static __always_inline void slab_lock(st static __always_inline void slab_unlock(struct page *page) { - bit_spin_unlock(PG_locked, &page->flags); + __bit_spin_unlock(PG_locked, &page->flags); } static __always_inline int slab_trylock(struct page *page)