From: Alexaner Zarochentsev lock validator friendly locking of new atom in atom_begin_and_assign_to_txnh and locking of two atoms. Signed-off-by: Alexander Zarochentsev Cc: Hans Reiser Signed-off-by: Andrew Morton --- fs/reiser4/txnmgr.c | 20 +++++++++++--------- fs/reiser4/txnmgr.h | 15 +++++++++++++++ 2 files changed, 26 insertions(+), 9 deletions(-) diff -puN fs/reiser4/txnmgr.c~reiser4-decribe-new-atom-locking-and-nested-atom-locks-to-lock-validator fs/reiser4/txnmgr.c --- a/fs/reiser4/txnmgr.c~reiser4-decribe-new-atom-locking-and-nested-atom-locks-to-lock-validator +++ a/fs/reiser4/txnmgr.c @@ -397,7 +397,7 @@ static void atom_init(txn_atom * atom) INIT_LIST_HEAD(ATOM_OVRWR_LIST(atom)); INIT_LIST_HEAD(ATOM_WB_LIST(atom)); INIT_LIST_HEAD(&atom->inodes); - spin_lock_init(&atom->alock); + spin_lock_init(&(atom->alock)); /* list of transaction handles */ INIT_LIST_HEAD(&atom->txnh_list); /* link to transaction manager's list of atoms */ @@ -732,10 +732,12 @@ static int atom_begin_and_assign_to_txnh assert("jmacd-17", atom_isclean(atom)); /* - * do not use spin_lock_atom because we have broken lock ordering here - * which is ok, as long as @atom is new and inaccessible for others. + * lock ordering is broken here. It is ok, as long as @atom is new + * and inaccessible for others. We can't use spin_lock_atom or + * spin_lock(&atom->alock) because they care about locking + * dependencies. spin_trylock_lock doesn't. */ - spin_lock(&(atom->alock)); + check_me("", spin_trylock_atom(atom)); /* add atom to the end of transaction manager's list of atoms */ list_add_tail(&atom->atom_link, &mgr->atoms_list); @@ -751,7 +753,7 @@ static int atom_begin_and_assign_to_txnh atom->super = reiser4_get_current_sb(); capture_assign_txnh_nolock(atom, txnh); - spin_unlock(&(atom->alock)); + spin_unlock_atom(atom); spin_unlock_txnh(txnh); return -E_REPEAT; @@ -2112,11 +2114,11 @@ static void fuse_not_fused_lock_owners(t atomic_inc(&atomf->refcount); spin_unlock_txnh(ctx->trans); if (atomf > atomh) { - spin_lock_atom(atomf); + spin_lock_atom_nested(atomf); } else { spin_unlock_atom(atomh); spin_lock_atom(atomf); - spin_lock_atom(atomh); + spin_lock_atom_nested(atomh); } if (atomh == atomf || !atom_isopen(atomh) || !atom_isopen(atomf)) { release_two_atoms(atomf, atomh); @@ -2794,10 +2796,10 @@ static void lock_two_atoms(txn_atom * on /* lock the atom with lesser address first */ if (one < two) { spin_lock_atom(one); - spin_lock_atom(two); + spin_lock_atom_nested(two); } else { spin_lock_atom(two); - spin_lock_atom(one); + spin_lock_atom_nested(one); } } diff -puN fs/reiser4/txnmgr.h~reiser4-decribe-new-atom-locking-and-nested-atom-locks-to-lock-validator fs/reiser4/txnmgr.h --- a/fs/reiser4/txnmgr.h~reiser4-decribe-new-atom-locking-and-nested-atom-locks-to-lock-validator +++ a/fs/reiser4/txnmgr.h @@ -503,6 +503,7 @@ static inline void spin_lock_atom(txn_at { /* check that spinlocks of lower priorities are not held */ assert("", (LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(spin_locked_atom) && LOCK_CNT_NIL(spin_locked_jnode) && LOCK_CNT_NIL(spin_locked_zlock) && LOCK_CNT_NIL(rw_locked_dk) && @@ -514,6 +515,20 @@ static inline void spin_lock_atom(txn_at LOCK_CNT_INC(spin_locked); } +static inline void spin_lock_atom_nested(txn_atom *atom) +{ + assert("", (LOCK_CNT_NIL(spin_locked_txnh) && + LOCK_CNT_NIL(spin_locked_jnode) && + LOCK_CNT_NIL(spin_locked_zlock) && + LOCK_CNT_NIL(rw_locked_dk) && + LOCK_CNT_NIL(rw_locked_tree))); + + spin_lock_nested(&(atom->alock), SINGLE_DEPTH_NESTING); + + LOCK_CNT_INC(spin_locked_atom); + LOCK_CNT_INC(spin_locked); +} + static inline int spin_trylock_atom(txn_atom *atom) { if (spin_trylock(&(atom->alock))) { _