From: Ingo Molnar Teach special (recursive) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Signed-off-by: Andrew Morton --- kernel/futex.c | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff -puN kernel/futex.c~lock-validator-special-locking-futex kernel/futex.c --- devel/kernel/futex.c~lock-validator-special-locking-futex 2006-05-29 18:13:14.000000000 -0700 +++ devel-akpm/kernel/futex.c 2006-05-29 18:13:14.000000000 -0700 @@ -604,6 +604,22 @@ static int unlock_futex_pi(u32 __user *u } /* + * Express the locking dependencies for lockdep: + */ +static inline void +double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) +{ + if (hb1 <= hb2) { + spin_lock(&hb1->lock); + if (hb1 < hb2) + spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); + } else { /* hb1 > hb2 */ + spin_lock(&hb2->lock); + spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); + } +} + +/* * Wake up all waiters hashed on the physical page that is mapped * to this virtual address: */ @@ -669,19 +685,15 @@ retryfull: hb2 = hash_futex(&key2); retry: - if (hb1 < hb2) - spin_lock(&hb1->lock); - spin_lock(&hb2->lock); - if (hb1 > hb2) - spin_lock(&hb1->lock); + double_lock_hb(hb1, hb2); op_ret = futex_atomic_op_inuser(op, uaddr2); if (unlikely(op_ret < 0)) { u32 dummy; - spin_unlock(&hb1->lock); + spin_unlock_non_nested(&hb1->lock); if (hb1 != hb2) - spin_unlock(&hb2->lock); + spin_unlock_non_nested(&hb2->lock); #ifndef CONFIG_MMU /* @@ -748,9 +760,9 @@ retry: ret += op_ret; } - spin_unlock(&hb1->lock); + spin_unlock_non_nested(&hb1->lock); if (hb1 != hb2) - spin_unlock(&hb2->lock); + spin_unlock_non_nested(&hb2->lock); out: up_read(¤t->mm->mmap_sem); return ret; @@ -782,11 +794,7 @@ static int futex_requeue(u32 __user *uad hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); - if (hb1 < hb2) - spin_lock(&hb1->lock); - spin_lock(&hb2->lock); - if (hb1 > hb2) - spin_lock(&hb1->lock); + double_lock_hb(hb1, hb2); if (likely(cmpval != NULL)) { u32 curval; @@ -794,9 +802,9 @@ static int futex_requeue(u32 __user *uad ret = get_futex_value_locked(&curval, uaddr1); if (unlikely(ret)) { - spin_unlock(&hb1->lock); + spin_unlock_non_nested(&hb1->lock); if (hb1 != hb2) - spin_unlock(&hb2->lock); + spin_unlock_non_nested(&hb2->lock); /* * If we would have faulted, release mmap_sem, fault @@ -842,9 +850,9 @@ static int futex_requeue(u32 __user *uad } out_unlock: - spin_unlock(&hb1->lock); + spin_unlock_non_nested(&hb1->lock); if (hb1 != hb2) - spin_unlock(&hb2->lock); + spin_unlock_non_nested(&hb2->lock); /* drop_key_refs() must be called outside the spinlocks. */ while (--drop_count >= 0) _