From: Christoph Lameter Consolidate code to add an anonymous page in memory.c There are two location in which we add anonymous pages. Both implement the same logic. Create a new function add_anon_page() to have a common code path. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton --- mm/memory.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff -puN mm/memory.c~consolidate-new-anonymous-page-code-paths mm/memory.c --- a/mm/memory.c~consolidate-new-anonymous-page-code-paths +++ a/mm/memory.c @@ -900,6 +900,17 @@ unsigned long zap_page_range(struct vm_a } /* + * Add a new anonymous page + */ +static void add_anon_page(struct vm_area_struct *vma, struct page *page, + unsigned long address) +{ + inc_mm_counter(vma->vm_mm, anon_rss); + lru_cache_add_active(page); + page_add_new_anon_rmap(page, vma, address); +} + +/* * Do a quick page-table lookup for a single page. */ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, @@ -2148,9 +2159,7 @@ static int do_anonymous_page(struct mm_s page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto release; - inc_mm_counter(mm, anon_rss); - lru_cache_add_active(page); - page_add_new_anon_rmap(page, vma, address); + add_anon_page(vma, page, address); } else { /* Map the ZERO_PAGE - vm_page_prot is readonly */ page = ZERO_PAGE(address); @@ -2294,11 +2303,9 @@ retry: if (write_access) entry = maybe_mkwrite(pte_mkdirty(entry), vma); set_pte_at(mm, address, page_table, entry); - if (anon) { - inc_mm_counter(mm, anon_rss); - lru_cache_add_active(new_page); - page_add_new_anon_rmap(new_page, vma, address); - } else { + if (anon) + add_anon_page(vma, new_page, address); + else { inc_mm_counter(mm, file_rss); page_add_file_rmap(new_page); if (write_access) { _