Subject: skip transhuge pages in ksm for now From: Andrea Arcangeli Skip transhuge pages in ksm for now. Signed-off-by: Andrea Arcangeli Reviewed-by: Rik van Riel --- diff --git a/mm/ksm.c b/mm/ksm.c --- a/mm/ksm.c +++ b/mm/ksm.c @@ -449,7 +449,7 @@ static struct page *get_mergeable_page(s page = follow_page(vma, addr, FOLL_GET); if (!page) goto out; - if (PageAnon(page)) { + if (PageAnon(page) && !PageTransCompound(page)) { flush_anon_page(vma, page, addr); flush_dcache_page(page); } else { @@ -1294,7 +1294,19 @@ next_mm: if (ksm_test_exit(mm)) break; *page = follow_page(vma, ksm_scan.address, FOLL_GET); - if (*page && PageAnon(*page)) { + if (!*page) { + ksm_scan.address += PAGE_SIZE; + cond_resched(); + continue; + } + if (PageTransCompound(*page)) { + put_page(*page); + ksm_scan.address &= HPAGE_PMD_MASK; + ksm_scan.address += HPAGE_PMD_SIZE; + cond_resched(); + continue; + } + if (PageAnon(*page)) { flush_anon_page(vma, *page, ksm_scan.address); flush_dcache_page(*page); rmap_item = get_next_rmap_item(slot, @@ -1308,8 +1320,7 @@ next_mm: up_read(&mm->mmap_sem); return rmap_item; } - if (*page) - put_page(*page); + put_page(*page); ksm_scan.address += PAGE_SIZE; cond_resched(); }