From: Andrew Morton Fix this patch for pagemap-pass-mm-into-pagewalkers.patch. Cc: KOSAKI Motohiro Cc: Lee Schermerhorn Cc: Nick Piggin Cc: Rik van Riel Cc: Dave Hansen Cc: Matt Mackall Signed-off-by: Andrew Morton --- mm/mlock.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff -puN mm/mlock.c~mlock-mlocked-pages-are-unevictable-fix mm/mlock.c --- a/mm/mlock.c~mlock-mlocked-pages-are-unevictable-fix +++ a/mm/mlock.c @@ -202,9 +202,9 @@ struct munlock_page_walk { * munlock normal pages for present ptes */ static int __munlock_pte_handler(pte_t *ptep, unsigned long addr, - unsigned long end, void *private) + unsigned long end, struct mm_walk *walk) { - struct munlock_page_walk *mpw = private; + struct munlock_page_walk *mpw = walk->private; swp_entry_t entry; struct page *page; pte_t pte; @@ -245,18 +245,14 @@ out: * Save pmd for pte handler for waiting on migration entries */ static int __munlock_pmd_handler(pmd_t *pmd, unsigned long addr, - unsigned long end, void *private) + unsigned long end, struct mm_walk *walk) { - struct munlock_page_walk *mpw = private; + struct munlock_page_walk *mpw = walk->private; mpw->pmd = pmd; return 0; } -static struct mm_walk munlock_page_walk = { - .pmd_entry = __munlock_pmd_handler, - .pte_entry = __munlock_pte_handler, -}; /* * munlock a range of pages in the vma using standard page table walk. @@ -266,8 +262,14 @@ static struct mm_walk munlock_page_walk static void __munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - struct mm_struct *mm = vma->vm_mm; - struct munlock_page_walk mpw; + struct munlock_page_walk mpw = { + .vma = vma, + }; + struct mm_walk munlock_page_walk = { + .pmd_entry = __munlock_pmd_handler, + .pte_entry = __munlock_pte_handler, + .private = &mpw, + }; VM_BUG_ON(start & ~PAGE_MASK || end & ~PAGE_MASK); VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); @@ -275,8 +277,7 @@ static void __munlock_vma_pages_range(st VM_BUG_ON(end > vma->vm_end); lru_add_drain_all(); /* push cached pages to LRU */ - mpw.vma = vma; - walk_page_range(mm, start, end, &munlock_page_walk, &mpw); + walk_page_range(start, end, &munlock_page_walk); lru_add_drain_all(); /* to update stats */ } _