Always use CLFLUSH instead of WBINVD for change_page_attr CLFLUSH is a lot faster than WBINVD so avoid it if at all possible. Signed-off-by: Andi Kleen --- arch/x86_64/mm/pageattr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) Index: linux/arch/x86_64/mm/pageattr.c =================================================================== --- linux.orig/arch/x86_64/mm/pageattr.c +++ linux/arch/x86_64/mm/pageattr.c @@ -64,8 +64,9 @@ static struct page *split_large_page(uns static void flush_kernel_map(void *address) { - if (0 && address && cpu_has_clflush) { - /* is this worth it? */ + /* When clflush is available always use it because it is + much cheaper than WBINVD */ + if (cpu_has_clflush) { int i; for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) asm volatile("clflush (%0)" :: "r" (address + i)); @@ -77,7 +78,6 @@ static void flush_kernel_map(void *addre __flush_tlb_all(); } - static inline void flush_map(unsigned long address) { on_each_cpu(flush_kernel_map, (void *)address, 1, 1); @@ -213,7 +213,7 @@ void global_flush_tlb(void) dpage = xchg(&deferred_pages, NULL); up_read(&init_mm.mmap_sem); - flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0); + flush_map((unsigned long)page_address(dpage)); while (dpage) { struct page *tmp = dpage; dpage = (struct page *)dpage->lru.next;