--- a/linux/linux/mm/mmap.c 2005-02-23 11:42:51.000000000 -0800 +++ b/linux/linux/mm/mmap.c 2005-02-22 13:49:49.000000000 -0800 @@ -1030,14 +1030,21 @@ vm_validate_enough("entering expand_stack"); /* - * vma->vm_start/vm_end cannot change under us because the caller is required - * to hold the mmap_sem in write mode. We need to get the spinlock only - * before relocating the vma range ourself. + * vma->vm_start/vm_end cannot change under us because the caller + * is required to hold the mmap_sem in read mode. We need the + * page_table_lock lock to serialize against concurrent expand_stacks. */ spin_lock(&vma->vm_mm->page_table_lock); address += 4 + PAGE_SIZE - 1; address &= PAGE_MASK; + + /* already expanded while we were spinning? */ + if (vma->vm_start <= address) { + spin_unlock(&vma->vm_mm->page_table_lock); + return 0; + } + grow = (address - vma->vm_end) >> PAGE_SHIFT; /* Overcommit.. */ @@ -1081,12 +1088,19 @@ vm_validate_enough("entering expand_stack"); /* - * vma->vm_start/vm_end cannot change under us because the caller is required - * to hold the mmap_sem in write mode. We need to get the spinlock only - * before relocating the vma range ourself. + * vma->vm_start/vm_end cannot change under us because the caller + * is required to hold the mmap_sem in read mode. We need the + * page_table_lock lock to serialize against concurrent expand_stacks. */ address &= PAGE_MASK; spin_lock(&vma->vm_mm->page_table_lock); + + /* already expanded while we were spinning? */ + if (vma->vm_start <= address) { + spin_unlock(&vma->vm_mm->page_table_lock); + return 0; + } + grow = (vma->vm_start - address) >> PAGE_SHIFT; /* Overcommit.. */