From: Matt Mackall This makes the walker more generic. Signed-off-by: Matt Mackall Cc: Jeremy Fitzhardinge Cc: David Rientjes Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff -puN fs/proc/task_mmu.c~maps2-remove-vma-from-args-in-the-page-walker fs/proc/task_mmu.c --- a/fs/proc/task_mmu.c~maps2-remove-vma-from-args-in-the-page-walker +++ a/fs/proc/task_mmu.c @@ -313,25 +313,26 @@ static void walk_pud_range(pgd_t *pgd, u } /* - * walk_page_range - walk the page tables of a VMA with a callback - * @vma - VMA to walk + * walk_page_range - walk a memory map's page tables with a callback + * @mm - memory map to walk + * @addr - starting address + * @end - ending address * @action - callback invoked for every bottom-level (PTE) page table * @private - private data passed to the callback function * * Recursively walk the page table for the memory area in a VMA, calling * a callback for every bottom-level (PTE) page table. */ -static void walk_page_range(struct vm_area_struct *vma, +static void walk_page_range(struct mm_struct *mm, + unsigned long addr, unsigned long end, void (*action)(pmd_t *, unsigned long, unsigned long, void *), void *private) { - unsigned long addr = vma->vm_start; - unsigned long end = vma->vm_end; pgd_t *pgd; unsigned long next; - for (pgd = pgd_offset(vma->vm_mm, addr); addr != end; + for (pgd = pgd_offset(mm, addr); addr != end; pgd++, addr = next) { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) @@ -348,7 +349,8 @@ static int show_smap(struct seq_file *m, memset(&mss, 0, sizeof mss); mss.vma = vma; if (vma->vm_mm && !is_vm_hugetlb_page(vma)) - walk_page_range(vma, smaps_pte_range, &mss); + walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end, + smaps_pte_range, &mss); return show_map_internal(m, v, &mss); } @@ -359,7 +361,8 @@ void clear_refs_smap(struct mm_struct *m down_read(&mm->mmap_sem); for (vma = mm->mmap; vma; vma = vma->vm_next) if (vma->vm_mm && !is_vm_hugetlb_page(vma)) - walk_page_range(vma, clear_refs_pte_range, vma); + walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end, + clear_refs_pte_range, vma); flush_tlb_mm(mm); up_read(&mm->mmap_sem); } _