From: Jan Beulich Since the fourth PDPT entry cannot be shared under Xen, vmalloc_sync_all() must iterate over pmd-s rather than pgd-s here. Luckily, the code isn't used for native PAE (SHARED_KERNEL_PMD is 1) and the change is benign to non-PAE. Also do a little more cleanup in that function. Signed-off-by: Jan Beulich Cc: Jeremy Fitzhardinge Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton --- arch/x86/mm/fault.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff -puN arch/x86/mm/fault.c~x86-adjust-vmalloc_sync_all-for-xen-2nd-try arch/x86/mm/fault.c --- a/arch/x86/mm/fault.c~x86-adjust-vmalloc_sync_all-for-xen-2nd-try +++ a/arch/x86/mm/fault.c @@ -935,15 +935,15 @@ LIST_HEAD(pgd_list); void vmalloc_sync_all(void) { -#ifdef CONFIG_X86_32 - unsigned long start = VMALLOC_START & PGDIR_MASK; unsigned long address; +#ifdef CONFIG_X86_32 if (SHARED_KERNEL_PMD) return; - BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK); - for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) { + for (address = VMALLOC_START & PMD_MASK; + address >= TASK_SIZE && address < FIXADDR_TOP; + address += PMD_SIZE) { unsigned long flags; struct page *page; @@ -956,10 +956,8 @@ void vmalloc_sync_all(void) spin_unlock_irqrestore(&pgd_lock, flags); } #else /* CONFIG_X86_64 */ - unsigned long start = VMALLOC_START & PGDIR_MASK; - unsigned long address; - - for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) { + for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; + address += PGDIR_SIZE) { const pgd_t *pgd_ref = pgd_offset_k(address); unsigned long flags; struct page *page; _