always lock the root (oldest) anon_vma From: Rik van Riel Always (and only) lock the root (oldest) anon_vma whenever we do something in an anon_vma. The recently introduced anon_vma scalability is due to the rmap code scanning only the VMAs that need to be scanned. Many common operations still took the anon_vma lock on the root anon_vma, so always taking that lock is not expected to introduce any scalability issues. However, always taking the same lock does mean we only need to take one lock, which means rmap_walk on pages from any anon_vma in the vma is excluded from occurring during an munmap, expand_stack or other operation that needs to exclude rmap_walk and similar functions. Also add the proper locking to vma_adjust. - conditionally take the anon_vma lock in vma_adjust, like introduced in 252c5f94d944487e9f50ece7942b0fbf659c5c31 (with a proper comment) Signed-off-by: Rik van Riel --- * * * fix vma_adjust lock inversion From: Andrea Arcangeli From code review I found a potential lock inversion deadlock problem in the newly introduced anon-vma-root locking (vma_link and other places always take i_mmap_lock before anon_vma lock, so vma_adjust has to to do it too). This should fix it. Signed-off-by: Andrea Arcangeli --- diff --git a/include/linux/rmap.h b/include/linux/rmap.h --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -104,24 +104,24 @@ static inline void vma_lock_anon_vma(str { struct anon_vma *anon_vma = vma->anon_vma; if (anon_vma) - spin_lock(&anon_vma->lock); + spin_lock(&anon_vma->root->lock); } static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; if (anon_vma) - spin_unlock(&anon_vma->lock); + spin_unlock(&anon_vma->root->lock); } static inline void anon_vma_lock(struct anon_vma *anon_vma) { - spin_lock(&anon_vma->lock); + spin_lock(&anon_vma->root->lock); } static inline void anon_vma_unlock(struct anon_vma *anon_vma) { - spin_unlock(&anon_vma->lock); + spin_unlock(&anon_vma->root->lock); } /* diff --git a/mm/ksm.c b/mm/ksm.c --- a/mm/ksm.c +++ b/mm/ksm.c @@ -325,7 +325,7 @@ static void drop_anon_vma(struct rmap_it { struct anon_vma *anon_vma = rmap_item->anon_vma; - if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) { + if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) { int empty = list_empty(&anon_vma->head); anon_vma_unlock(anon_vma); if (empty) diff --git a/mm/mmap.c b/mm/mmap.c --- a/mm/mmap.c +++ b/mm/mmap.c @@ -506,6 +506,7 @@ int vma_adjust(struct vm_area_struct *vm struct vm_area_struct *importer = NULL; struct address_space *mapping = NULL; struct prio_tree_root *root = NULL; + struct anon_vma *anon_vma = NULL; struct file *file = vma->vm_file; long adjust_next = 0; int remove_next = 0; @@ -578,6 +579,17 @@ again: remove_next = 1 + (end > next-> } } + /* + * When changing only vma->vm_end, we don't really need anon_vma + * lock. This is a fairly rare case by itself, but the anon_vma + * lock may be shared between many sibling processes. Skipping + * the lock for brk adjustments makes a difference sometimes. + */ + if (vma->anon_vma && (insert || importer || start != vma->vm_start)) { + anon_vma = vma->anon_vma; + anon_vma_lock(anon_vma); + } + if (root) { flush_dcache_mmap_lock(mapping); vma_prio_tree_remove(vma, root); @@ -617,6 +629,8 @@ again: remove_next = 1 + (end > next-> __insert_vm_struct(mm, insert); } + if (anon_vma) + anon_vma_unlock(anon_vma); if (mapping) spin_unlock(&mapping->i_mmap_lock); @@ -2471,7 +2485,7 @@ static void vm_lock_anon_vma(struct mm_s * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ - spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem); + spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem); /* * We can safely modify head.next after taking the * anon_vma->lock. If some other vma in this mm shares