From: David Rientjes Flush the entire user address space from the TLB for each VMA in the task_struct list when clearing reference bits with /proc/pid/clear_refs. It's more efficient than flushing each page individually depending on pte_young(). Cc: Paul Mundt Cc: Christoph Lameter Signed-off-by: David Rientjes Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 2 +- 1 files changed, 1 insertion(+), 1 deletion(-) diff -puN fs/proc/task_mmu.c~smaps-add-clear_refs-file-to-clear-reference-fix-fix fs/proc/task_mmu.c --- a/fs/proc/task_mmu.c~smaps-add-clear_refs-file-to-clear-reference-fix-fix +++ a/fs/proc/task_mmu.c @@ -276,7 +276,6 @@ static void clear_refs_one_pmd(struct vm if (pte_young(ptent)) { ptent = pte_mkold(ptent); set_pte_at(vma->vm_mm, addr, pte, ptent); - flush_tlb_page(vma, addr); } ClearPageReferenced(page); } @@ -358,6 +357,7 @@ void clear_refs_smap(struct mm_struct *m for (vma = mm->mmap; vma; vma = vma->vm_next) if (vma->vm_mm && !is_vm_hugetlb_page(vma)) for_each_pmd(vma, clear_refs_one_pmd, NULL); + flush_tlb_mm(mm); up_read(&mm->mmap_sem); } _