From: Ingo Molnar Teach special (multi-initialized) locking code to the lock validator. Has no effect on non-lockdep kernels. Signed-off-by: Ingo Molnar Signed-off-by: Arjan van de Ven Cc: "David S. Miller" Signed-off-by: Andrew Morton --- include/linux/skbuff.h | 7 +------ net/core/skbuff.c | 8 ++++++++ 2 files changed, 9 insertions(+), 6 deletions(-) diff -puN include/linux/skbuff.h~lock-validator-special-locking-skb_queue_head_init include/linux/skbuff.h --- devel/include/linux/skbuff.h~lock-validator-special-locking-skb_queue_head_init 2006-05-29 18:13:20.000000000 -0700 +++ devel-akpm/include/linux/skbuff.h 2006-05-29 18:13:20.000000000 -0700 @@ -588,12 +588,7 @@ static inline __u32 skb_queue_len(const return list_->qlen; } -static inline void skb_queue_head_init(struct sk_buff_head *list) -{ - spin_lock_init(&list->lock); - list->prev = list->next = (struct sk_buff *)list; - list->qlen = 0; -} +extern void skb_queue_head_init(struct sk_buff_head *list); /* * Insert an sk_buff at the start of a list. diff -puN net/core/skbuff.c~lock-validator-special-locking-skb_queue_head_init net/core/skbuff.c --- devel/net/core/skbuff.c~lock-validator-special-locking-skb_queue_head_init 2006-05-29 18:13:20.000000000 -0700 +++ devel-akpm/net/core/skbuff.c 2006-05-29 18:13:20.000000000 -0700 @@ -71,6 +71,14 @@ static kmem_cache_t *skbuff_head_cache __read_mostly; static kmem_cache_t *skbuff_fclone_cache __read_mostly; +void skb_queue_head_init(struct sk_buff_head *list) +{ + spin_lock_init(&list->lock); + list->prev = list->next = (struct sk_buff *)list; + list->qlen = 0; +} +EXPORT_SYMBOL(skb_queue_head_init); + /* * Keep out-of-line to prevent kernel bloat. * __builtin_return_address is not used because it is not always _