From: Ingo Molnar Teach special (recursive) locking code to the lock validator. Also splits af_unix's sk_receive_queue.lock class from the other networking skb-queue locks. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Cc: "David S. Miller" Signed-off-by: Andrew Morton --- include/net/af_unix.h | 3 +++ net/unix/af_unix.c | 12 +++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff -puN include/net/af_unix.h~lockdep-annotate-af_unix-locking include/net/af_unix.h --- a/include/net/af_unix.h~lockdep-annotate-af_unix-locking +++ a/include/net/af_unix.h @@ -61,6 +61,9 @@ struct unix_skb_parms { #define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) #define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock) #define unix_state_wlock(s) spin_lock(&unix_sk(s)->lock) +#define unix_state_wlock_nested(s) \ + spin_lock_nested(&unix_sk(s)->lock, \ + SINGLE_DEPTH_NESTING) #define unix_state_wunlock(s) spin_unlock(&unix_sk(s)->lock) #ifdef __KERNEL__ diff -puN net/unix/af_unix.c~lockdep-annotate-af_unix-locking net/unix/af_unix.c --- a/net/unix/af_unix.c~lockdep-annotate-af_unix-locking +++ a/net/unix/af_unix.c @@ -542,6 +542,14 @@ static struct proto unix_proto = { .obj_size = sizeof(struct unix_sock), }; +/* + * AF_UNIX sockets do not interact with hardware, hence they + * dont trigger interrupts - so it's safe for them to have + * bh-unsafe locking for their sk_receive_queue.lock. Split off + * this special lock-class by reinitializing the spinlock key: + */ +static struct lock_class_key af_unix_sk_receive_queue_lock_key; + static struct sock * unix_create1(struct socket *sock) { struct sock *sk = NULL; @@ -557,6 +565,8 @@ static struct sock * unix_create1(struct atomic_inc(&unix_nr_socks); sock_init_data(sock,sk); + lockdep_set_class(&sk->sk_receive_queue.lock, + &af_unix_sk_receive_queue_lock_key); sk->sk_write_space = unix_write_space; sk->sk_max_ack_backlog = sysctl_unix_max_dgram_qlen; @@ -1022,7 +1032,7 @@ restart: goto out_unlock; } - unix_state_wlock(sk); + unix_state_wlock_nested(sk); if (sk->sk_state != st) { unix_state_wunlock(sk); _