From: Matt Mackall This function caused me some confusion. In keeping with the precedent in mm/memory.c, I've used pte_range instead of pmd. While I was at it, I changed for_each_ to walk_ as we have another precedent of using for_each as a macro that can drive a for loop. [It'd be nice to give sensible names to these data structures some day. For instance, the thing pointed at by a pmd_t is not a "page middle directory" but a "page table entry...table"? Ideally, we'd make a consistent distinction between "e"ntries and "d"irectories in the naming. So a PGD would contain PGEs that pointed to... that pointed to PTDs that contained PTEs.] Signed-off-by: Matt Mackall Cc: David Rientjes Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 47 +++++++++++++++++++++++++------------------ 1 files changed, 28 insertions(+), 19 deletions(-) diff -puN fs/proc/task_mmu.c~smaps-add-clear_refs-file-to-clear-reference-cleanup fs/proc/task_mmu.c --- a/fs/proc/task_mmu.c~smaps-add-clear_refs-file-to-clear-reference-cleanup +++ a/fs/proc/task_mmu.c @@ -214,9 +214,9 @@ static int show_map(struct seq_file *m, return show_map_internal(m, v, NULL); } -static void smaps_one_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long addr, unsigned long end, - void *private) +static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, unsigned long end, + void *private) { struct mem_size_stats *mss = private; pte_t *pte, ptent; @@ -254,9 +254,9 @@ static void smaps_one_pmd(struct vm_area cond_resched(); } -static void clear_refs_one_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long addr, unsigned long end, - void *private) +static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd, + unsigned long addr, unsigned long end, + void *private) { pte_t *pte, ptent; spinlock_t *ptl; @@ -283,8 +283,8 @@ static void clear_refs_one_pmd(struct vm cond_resched(); } -static inline void for_each_pmd_in_pud(struct pmd_walker *walker, pud_t *pud, - unsigned long addr, unsigned long end) +static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud, + unsigned long addr, unsigned long end) { pmd_t *pmd; unsigned long next; @@ -298,8 +298,8 @@ static inline void for_each_pmd_in_pud(s } } -static inline void for_each_pud_in_pgd(struct pmd_walker *walker, pgd_t *pgd, - unsigned long addr, unsigned long end) +static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd, + unsigned long addr, unsigned long end) { pud_t *pud; unsigned long next; @@ -309,15 +309,24 @@ static inline void for_each_pud_in_pgd(s next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; - for_each_pmd_in_pud(walker, pud, addr, next); + walk_pmd_range(walker, pud, addr, next); } } -static inline void for_each_pmd(struct vm_area_struct *vma, - void (*action)(struct vm_area_struct *, pmd_t *, - unsigned long, unsigned long, - void *), - void *private) +/* + * walk_page_range - walk the page tables of a VMA with a callback + * @vma - VMA to walk + * @action - callback invoked for every bottom-level (PTE) page table + * @private - private data passed to the callback function + * + * Recursively walk the page table for the memory area in a VMA, calling + * a callback for every bottom-level (PTE) page table. + */ +static inline void walk_page_range(struct vm_area_struct *vma, + void (*action)(struct vm_area_struct *, + pmd_t *, unsigned long, + unsigned long, void *), + void *private) { unsigned long addr = vma->vm_start; unsigned long end = vma->vm_end; @@ -334,7 +343,7 @@ static inline void for_each_pmd(struct v next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; - for_each_pud_in_pgd(&walker, pgd, addr, next); + walk_pud_range(&walker, pgd, addr, next); } } @@ -345,7 +354,7 @@ static int show_smap(struct seq_file *m, memset(&mss, 0, sizeof mss); if (vma->vm_mm && !is_vm_hugetlb_page(vma)) - for_each_pmd(vma, smaps_one_pmd, &mss); + walk_page_range(vma, smaps_pte_range, &mss); return show_map_internal(m, v, &mss); } @@ -356,7 +365,7 @@ void clear_refs_smap(struct mm_struct *m down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) if (vma->vm_mm && !is_vm_hugetlb_page(vma)) - for_each_pmd(vma, clear_refs_one_pmd, NULL); + walk_page_range(vma, clear_refs_pte_range, NULL); flush_tlb_mm(mm); up_read(&mm->mmap_sem); } _