Subject: mm_lock-rwsem Convert mm_lock to use semaphores after i_mmap_lock and anon_vma_lock conversion. Signed-off-by: Andrea Arcangeli diff --git a/include/linux/mm.h b/include/linux/mm.h --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1062,10 +1062,10 @@ * mm_lock and mm_unlock are expensive operations that may take a long time. */ struct mm_lock_data { - spinlock_t **i_mmap_locks; - spinlock_t **anon_vma_locks; - size_t nr_i_mmap_locks; - size_t nr_anon_vma_locks; + struct rw_semaphore **i_mmap_sems; + struct rw_semaphore **anon_vma_sems; + size_t nr_i_mmap_sems; + size_t nr_anon_vma_sems; }; extern int mm_lock(struct mm_struct *mm, struct mm_lock_data *data); extern void mm_unlock(struct mm_struct *mm, struct mm_lock_data *data); diff --git a/mm/mmap.c b/mm/mmap.c --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2242,8 +2242,8 @@ static int mm_lock_cmp(const void *a, const void *b) { - unsigned long _a = (unsigned long)*(spinlock_t **)a; - unsigned long _b = (unsigned long)*(spinlock_t **)b; + unsigned long _a = (unsigned long)*(struct rw_semaphore **)a; + unsigned long _b = (unsigned long)*(struct rw_semaphore **)b; cond_resched(); if (_a < _b) @@ -2253,7 +2253,7 @@ return 0; } -static unsigned long mm_lock_sort(struct mm_struct *mm, spinlock_t **locks, +static unsigned long mm_lock_sort(struct mm_struct *mm, struct rw_semaphore **sems, int anon) { struct vm_area_struct *vma; @@ -2262,59 +2262,59 @@ for (vma = mm->mmap; vma; vma = vma->vm_next) { if (anon) { if (vma->anon_vma) - locks[i++] = &vma->anon_vma->lock; + sems[i++] = &vma->anon_vma->sem; } else { if (vma->vm_file && vma->vm_file->f_mapping) - locks[i++] = &vma->vm_file->f_mapping->i_mmap_lock; + sems[i++] = &vma->vm_file->f_mapping->i_mmap_sem; } } if (!i) goto out; - sort(locks, i, sizeof(spinlock_t *), mm_lock_cmp, NULL); + sort(sems, i, sizeof(struct rw_semaphore *), mm_lock_cmp, NULL); out: return i; } static inline unsigned long mm_lock_sort_anon_vma(struct mm_struct *mm, - spinlock_t **locks) + struct rw_semaphore **sems) { - return mm_lock_sort(mm, locks, 1); + return mm_lock_sort(mm, sems, 1); } static inline unsigned long mm_lock_sort_i_mmap(struct mm_struct *mm, - spinlock_t **locks) + struct rw_semaphore **sems) { - return mm_lock_sort(mm, locks, 0); + return mm_lock_sort(mm, sems, 0); } -static void mm_lock_unlock(spinlock_t **locks, size_t nr, int lock) +static void mm_lock_unlock(struct rw_semaphore **sems, size_t nr, int lock) { - spinlock_t *last = NULL; + struct rw_semaphore *last = NULL; size_t i; for (i = 0; i < nr; i++) /* Multiple vmas may use the same lock. */ - if (locks[i] != last) { - BUG_ON((unsigned long) last > (unsigned long) locks[i]); - last = locks[i]; + if (sems[i] != last) { + BUG_ON((unsigned long) last > (unsigned long) sems[i]); + last = sems[i]; if (lock) - spin_lock(last); + down_write(last); else - spin_unlock(last); + up_write(last); } } -static inline void __mm_lock(spinlock_t **locks, size_t nr) +static inline void __mm_lock(struct rw_semaphore **sems, size_t nr) { - mm_lock_unlock(locks, nr, 1); + mm_lock_unlock(sems, nr, 1); } -static inline void __mm_unlock(spinlock_t **locks, size_t nr) +static inline void __mm_unlock(struct rw_semaphore **sems, size_t nr) { - mm_lock_unlock(locks, nr, 0); + mm_lock_unlock(sems, nr, 0); } /* @@ -2326,57 +2326,57 @@ */ int mm_lock(struct mm_struct *mm, struct mm_lock_data *data) { - spinlock_t **anon_vma_locks, **i_mmap_locks; + struct rw_semaphore **anon_vma_sems, **i_mmap_sems; down_write(&mm->mmap_sem); if (mm->map_count) { - anon_vma_locks = vmalloc(sizeof(spinlock_t *) * mm->map_count); - if (unlikely(!anon_vma_locks)) { + anon_vma_sems = vmalloc(sizeof(struct rw_semaphore *) * mm->map_count); + if (unlikely(!anon_vma_sems)) { up_write(&mm->mmap_sem); return -ENOMEM; } - i_mmap_locks = vmalloc(sizeof(spinlock_t *) * mm->map_count); - if (unlikely(!i_mmap_locks)) { + i_mmap_sems = vmalloc(sizeof(struct rw_semaphore *) * mm->map_count); + if (unlikely(!i_mmap_sems)) { up_write(&mm->mmap_sem); - vfree(anon_vma_locks); + vfree(anon_vma_sems); return -ENOMEM; } - data->nr_anon_vma_locks = mm_lock_sort_anon_vma(mm, anon_vma_locks); - data->nr_i_mmap_locks = mm_lock_sort_i_mmap(mm, i_mmap_locks); + data->nr_anon_vma_sems = mm_lock_sort_anon_vma(mm, anon_vma_sems); + data->nr_i_mmap_sems = mm_lock_sort_i_mmap(mm, i_mmap_sems); - if (data->nr_anon_vma_locks) { - __mm_lock(anon_vma_locks, data->nr_anon_vma_locks); - data->anon_vma_locks = anon_vma_locks; + if (data->nr_anon_vma_sems) { + __mm_lock(anon_vma_sems, data->nr_anon_vma_sems); + data->anon_vma_sems = anon_vma_sems; } else - vfree(anon_vma_locks); + vfree(anon_vma_sems); - if (data->nr_i_mmap_locks) { - __mm_lock(i_mmap_locks, data->nr_i_mmap_locks); - data->i_mmap_locks = i_mmap_locks; + if (data->nr_i_mmap_sems) { + __mm_lock(i_mmap_sems, data->nr_i_mmap_sems); + data->i_mmap_sems = i_mmap_sems; } else - vfree(i_mmap_locks); + vfree(i_mmap_sems); } return 0; } -static void mm_unlock_vfree(spinlock_t **locks, size_t nr) +static void mm_unlock_vfree(struct rw_semaphore **sems, size_t nr) { - __mm_unlock(locks, nr); - vfree(locks); + __mm_unlock(sems, nr); + vfree(sems); } /* avoid memory allocations for mm_unlock to prevent deadlock */ void mm_unlock(struct mm_struct *mm, struct mm_lock_data *data) { if (mm->map_count) { - if (data->nr_anon_vma_locks) - mm_unlock_vfree(data->anon_vma_locks, - data->nr_anon_vma_locks); - if (data->nr_i_mmap_locks) - mm_unlock_vfree(data->i_mmap_locks, - data->nr_i_mmap_locks); + if (data->nr_anon_vma_sems) + mm_unlock_vfree(data->anon_vma_sems, + data->nr_anon_vma_sems); + if (data->nr_i_mmap_sems) + mm_unlock_vfree(data->i_mmap_sems, + data->nr_i_mmap_sems); } up_write(&mm->mmap_sem); }