From: "Keshavamurthy, Anil S" Introduce the size param for clflush_cache_range(). Signed-off-by: Anil S Keshavamurthy Cc: Andi Kleen Cc: Peter Zijlstra Cc: Muli Ben-Yehuda Cc: "Siddha, Suresh B" Cc: Arjan van de Ven Cc: Ashok Raj Cc: "David S. Miller" Cc: Christoph Lameter Cc: Greg KH Signed-off-by: Andrew Morton --- arch/x86_64/mm/pageattr.c | 6 +++--- include/asm-x86_64/cacheflush.h | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff -puN arch/x86_64/mm/pageattr.c~intel-iommu-clflush_cache_range-now-takes-size-param arch/x86_64/mm/pageattr.c --- a/arch/x86_64/mm/pageattr.c~intel-iommu-clflush_cache_range-now-takes-size-param +++ a/arch/x86_64/mm/pageattr.c @@ -61,10 +61,10 @@ static struct page *split_large_page(uns return base; } -static void cache_flush_page(void *adr) +void clflush_cache_range(void *adr, int size) { int i; - for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) + for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) asm volatile("clflush (%0)" :: "r" (adr + i)); } @@ -79,7 +79,7 @@ static void flush_kernel_map(void *arg) wbinvd(); else list_for_each_entry(pg, l, lru) { void *adr = page_address(pg); - cache_flush_page(adr); + clflush_cache_range(adr, PAGE_SIZE); } __flush_tlb_all(); } diff -puN include/asm-x86_64/cacheflush.h~intel-iommu-clflush_cache_range-now-takes-size-param include/asm-x86_64/cacheflush.h --- a/include/asm-x86_64/cacheflush.h~intel-iommu-clflush_cache_range-now-takes-size-param +++ a/include/asm-x86_64/cacheflush.h @@ -27,6 +27,7 @@ void global_flush_tlb(void); int change_page_attr(struct page *page, int numpages, pgprot_t prot); int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot); +void clflush_cache_range(void *addr, int size); #ifdef CONFIG_DEBUG_RODATA void mark_rodata_ro(void); _