From: Balbir Singh The memrlimit patches did not account for move_vma() since we account for address space usage in do_mremap(). The code flow actually increments total_vm twice (once in do_mremap() and once in move_vma()), the excess is removed in remove_vma_list() via do_munmap(). Since we did not do the duplicate accounting, the code was seeing the extra uncharge, causing our accounting to break. This patch fixes the problem Signed-off-by: Balbir Singh Cc: Pavel Emelyanov Cc: Sudhir Kumar Cc: YAMAMOTO Takashi Cc: Paul Menage Cc: Li Zefan Cc: Balbir Singh Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: Vivek Goyal Signed-off-by: Andrew Morton --- mm/mremap.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff -puN mm/mremap.c~memrlimit-setup-the-memrlimit-controller-memrlimit-correct-mremap-and-move_vma-accounting mm/mremap.c --- a/mm/mremap.c~memrlimit-setup-the-memrlimit-controller-memrlimit-correct-mremap-and-move_vma-accounting +++ a/mm/mremap.c @@ -174,10 +174,15 @@ static unsigned long move_vma(struct vm_ if (mm->map_count >= sysctl_max_map_count - 3) return -ENOMEM; + if (memrlimit_cgroup_charge_as(mm, new_len >> PAGE_SHIFT)) + return -ENOMEM; + new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff); - if (!new_vma) + if (!new_vma) { + memrlimit_cgroup_uncharge_as(mm, new_len >> PAGE_SHIFT); return -ENOMEM; + } moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len); if (moved_len < old_len) { @@ -381,6 +386,8 @@ unsigned long do_mremap(unsigned long ad } } + memrlimit_cgroup_uncharge_as(mm, (new_len - old_len) >> PAGE_SHIFT); + /* * We weren't able to just expand or shrink the area, * we need to create a new one and move it.. _