Subject: mm_take_all_locks-rwsem From: Andrea Arcangeli Converts mm_take_all_locks to use semaphores after i_mmap_lock and anon_vma_lock conversions. Signed-off-by: Andrea Arcangeli --- diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -134,7 +134,7 @@ struct mmu_notifier_ops { * Therefore notifier chains can only be traversed when either * * 1. mmap_sem is held. - * 2. One of the reverse map locks is held (i_mmap_lock or anon_vma->lock). + * 2. One of the reverse map locks is held (i_mmap_sem or anon_vma->sem). * 3. No other concurrent thread can access the list (release) */ struct mmu_notifier { diff --git a/mm/mmap.c b/mm/mmap.c --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2266,15 +2266,15 @@ static void vm_lock_anon_vma(struct anon * The LSB of head.next can't change from under us * because we hold the mm_all_locks_mutex. */ - spin_lock(&anon_vma->lock); + down_write(&anon_vma->sem); /* * We can safely modify head.next after taking the - * anon_vma->lock. If some other vma in this mm shares + * anon_vma->sem. If some other vma in this mm shares * the same anon_vma we won't take it again. * * No need of atomic instructions here, head.next * can't change from under us thanks to the - * anon_vma->lock. + * anon_vma->sem. */ if (__test_and_set_bit(0, (unsigned long *) &anon_vma->head.next)) @@ -2296,7 +2296,7 @@ static void vm_lock_mapping(struct addre */ if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG(); - spin_lock(&mapping->i_mmap_lock); + down_write(&mapping->i_mmap_sem); } } @@ -2323,7 +2323,7 @@ static void vm_lock_mapping(struct addre * vma in this mm is backed by the same anon_vma or address_space. * * We can take all the locks in random order because the VM code - * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never + * taking i_mmap_sem or anon_vma->sem outside the mmap_sem never * takes more than one of them in a row. Secondly we're protected * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. * @@ -2371,12 +2371,12 @@ static void vm_unlock_anon_vma(struct an * * No need of atomic instructions here, head.next * can't change from under us until we release the - * anon_vma->lock. + * anon_vma->sem. */ if (!__test_and_clear_bit(0, (unsigned long *) &anon_vma->head.next)) BUG(); - spin_unlock(&anon_vma->lock); + up_write(&anon_vma->sem); } } @@ -2387,7 +2387,7 @@ static void vm_unlock_mapping(struct add * AS_MM_ALL_LOCKS can't change to 0 from under us * because we hold the mm_all_locks_mutex. */ - spin_unlock(&mapping->i_mmap_lock); + up_write(&mapping->i_mmap_sem); if (!test_and_clear_bit(AS_MM_ALL_LOCKS, &mapping->flags)) BUG();