From: Ingo Molnar Teach special (recursive) locking code to the lock validator. Has no effect on non-lockdep kernels. (Includes workaround for sk_receive_queue.lock, which is currently treated globally by the lock validator, but which be switched to per-address-family locking rules.) Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Cc: "David S. Miller" Signed-off-by: Andrew Morton --- include/net/af_unix.h | 3 +++ net/unix/af_unix.c | 10 +++++----- net/unix/garbage.c | 8 ++++---- 3 files changed, 12 insertions(+), 9 deletions(-) diff -puN include/net/af_unix.h~lock-validator-special-locking-af_unix include/net/af_unix.h --- devel/include/net/af_unix.h~lock-validator-special-locking-af_unix 2006-05-29 18:13:25.000000000 -0700 +++ devel-akpm/include/net/af_unix.h 2006-05-29 18:13:25.000000000 -0700 @@ -61,6 +61,9 @@ struct unix_skb_parms { #define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) #define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock) #define unix_state_wlock(s) spin_lock(&unix_sk(s)->lock) +#define unix_state_wlock_nested(s) \ + spin_lock_nested(&unix_sk(s)->lock, \ + SINGLE_DEPTH_NESTING) #define unix_state_wunlock(s) spin_unlock(&unix_sk(s)->lock) #ifdef __KERNEL__ diff -puN net/unix/af_unix.c~lock-validator-special-locking-af_unix net/unix/af_unix.c --- devel/net/unix/af_unix.c~lock-validator-special-locking-af_unix 2006-05-29 18:13:25.000000000 -0700 +++ devel-akpm/net/unix/af_unix.c 2006-05-29 18:13:25.000000000 -0700 @@ -1022,7 +1022,7 @@ restart: goto out_unlock; } - unix_state_wlock(sk); + unix_state_wlock_nested(sk); if (sk->sk_state != st) { unix_state_wunlock(sk); @@ -1073,12 +1073,12 @@ restart: unix_state_wunlock(sk); /* take ten and and send info to listening sock */ - spin_lock(&other->sk_receive_queue.lock); + spin_lock_bh(&other->sk_receive_queue.lock); __skb_queue_tail(&other->sk_receive_queue, skb); /* Undo artificially decreased inflight after embrion * is installed to listening socket. */ atomic_inc(&newu->inflight); - spin_unlock(&other->sk_receive_queue.lock); + spin_unlock_bh(&other->sk_receive_queue.lock); unix_state_runlock(other); other->sk_data_ready(other, 0); sock_put(other); @@ -1843,7 +1843,7 @@ static int unix_ioctl(struct socket *soc break; } - spin_lock(&sk->sk_receive_queue.lock); + spin_lock_bh(&sk->sk_receive_queue.lock); if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { skb_queue_walk(&sk->sk_receive_queue, skb) @@ -1853,7 +1853,7 @@ static int unix_ioctl(struct socket *soc if (skb) amount=skb->len; } - spin_unlock(&sk->sk_receive_queue.lock); + spin_unlock_bh(&sk->sk_receive_queue.lock); err = put_user(amount, (int __user *)arg); break; } diff -puN net/unix/garbage.c~lock-validator-special-locking-af_unix net/unix/garbage.c --- devel/net/unix/garbage.c~lock-validator-special-locking-af_unix 2006-05-29 18:13:25.000000000 -0700 +++ devel-akpm/net/unix/garbage.c 2006-05-29 18:13:25.000000000 -0700 @@ -235,7 +235,7 @@ void unix_gc(void) struct sock *x = pop_stack(); struct sock *sk; - spin_lock(&x->sk_receive_queue.lock); + spin_lock_bh(&x->sk_receive_queue.lock); skb = skb_peek(&x->sk_receive_queue); /* @@ -270,7 +270,7 @@ void unix_gc(void) maybe_unmark_and_push(skb->sk); skb=skb->next; } - spin_unlock(&x->sk_receive_queue.lock); + spin_unlock_bh(&x->sk_receive_queue.lock); sock_put(x); } @@ -283,7 +283,7 @@ void unix_gc(void) if (u->gc_tree == GC_ORPHAN) { struct sk_buff *nextsk; - spin_lock(&s->sk_receive_queue.lock); + spin_lock_bh(&s->sk_receive_queue.lock); skb = skb_peek(&s->sk_receive_queue); while (skb && skb != (struct sk_buff *)&s->sk_receive_queue) { @@ -298,7 +298,7 @@ void unix_gc(void) } skb = nextsk; } - spin_unlock(&s->sk_receive_queue.lock); + spin_unlock_bh(&s->sk_receive_queue.lock); } u->gc_tree = GC_ORPHAN; } _