From: Ingo Molnar My __acquire_lock() cleanup introduced a locking bug: on SMP systems we'd release a non-owned graph lock. Fix this by moving the graph unlock back, and by leaving the max_lockdep_depth variable update possibly racy. (we dont care, it's just statistics) Also add some minimal debugging code to graph_unlock()/graph_lock(), which caught this locking bug. Signed-off-by: Ingo Molnar Cc: Jarek Poplawski Signed-off-by: Andrew Morton --- kernel/lockdep.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff -puN kernel/lockdep.c~lockdep-more-unlock-on-error-fixes-fix kernel/lockdep.c --- a/kernel/lockdep.c~lockdep-more-unlock-on-error-fixes-fix +++ a/kernel/lockdep.c @@ -70,6 +70,9 @@ static int graph_lock(void) static inline int graph_unlock(void) { + if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) + return DEBUG_LOCKS_WARN_ON(1); + __raw_spin_unlock(&lockdep_lock); return 0; } @@ -712,6 +715,9 @@ find_usage_backwards(struct lock_class * struct lock_list *entry; int ret; + if (!__raw_spin_is_locked(&lockdep_lock)) + return DEBUG_LOCKS_WARN_ON(1); + if (depth > max_recursion_depth) max_recursion_depth = depth; if (depth >= RECURSION_LIMIT) @@ -2208,6 +2214,7 @@ out_calc_hash: if (!chain_head && ret != 2) if (!check_prevs_add(curr, hlock)) return 0; + graph_unlock(); } else /* after lookup_chain_cache(): */ if (unlikely(!debug_locks)) @@ -2216,7 +2223,7 @@ out_calc_hash: curr->lockdep_depth++; check_chain_key(curr); if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { - debug_locks_off_graph_unlock(); + debug_locks_off(); printk("BUG: MAX_LOCK_DEPTH too low!\n"); printk("turning off the locking correctness validator.\n"); return 0; @@ -2225,7 +2232,6 @@ out_calc_hash: if (unlikely(curr->lockdep_depth > max_lockdep_depth)) max_lockdep_depth = curr->lockdep_depth; - graph_unlock(); return 1; } _