From: Hugh Dickins My latency breaking in copy_pte_range didn't work as intended: instead of checking at regularish intervals, after the first interval it checked every time around the loop, too impatient to be preempted. Fix that. Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton --- mm/memory.c | 14 ++++++++------ 1 files changed, 8 insertions(+), 6 deletions(-) diff -puN mm/memory.c~mm-copy_pte_range-progress-fix mm/memory.c --- devel/mm/memory.c~mm-copy_pte_range-progress-fix 2005-09-25 15:30:55.000000000 -0700 +++ devel-akpm/mm/memory.c 2005-09-25 15:30:55.000000000 -0700 @@ -410,7 +410,7 @@ static int copy_pte_range(struct mm_stru { pte_t *src_pte, *dst_pte; unsigned long vm_flags = vma->vm_flags; - int progress; + int progress = 0; again: dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr); @@ -418,17 +418,19 @@ again: return -ENOMEM; src_pte = pte_offset_map_nested(src_pmd, addr); - progress = 0; spin_lock(&src_mm->page_table_lock); do { /* * We are holding two locks at this point - either of them * could generate latencies in another task on another CPU. */ - if (progress >= 32 && (need_resched() || - need_lockbreak(&src_mm->page_table_lock) || - need_lockbreak(&dst_mm->page_table_lock))) - break; + if (progress >= 32) { + progress = 0; + if (need_resched() || + need_lockbreak(&src_mm->page_table_lock) || + need_lockbreak(&dst_mm->page_table_lock)) + break; + } if (pte_none(*src_pte)) { progress++; continue; _