From: Hugh Dickins <hugh@veritas.com>

mremap's move_vma VM_LOCKED case was still wrong.

If the do_munmap unmaps a part of new_vma, then its vm_start and vm_end
from before cannot both be the right addresses for the make_pages_present
range, and may BUG() there.  

We need [new_addr, new_addr+new_len) to be locked down; but
move_page_tables already transferred the locked pages [new_addr,
new_addr+old_len), and they're either held in a VM_LOCKED vma throughout,
or temporarily in no vma: in neither case can be swapped out, so no need to
run over that range again.



 mm/mremap.c |   16 +++++-----------
 1 files changed, 5 insertions(+), 11 deletions(-)

diff -puN mm/mremap.c~move_vma-VM_LOCKED-fix mm/mremap.c
--- 25/mm/mremap.c~move_vma-VM_LOCKED-fix	2003-06-26 17:35:21.000000000 -0700
+++ 25-akpm/mm/mremap.c	2003-06-26 17:35:21.000000000 -0700
@@ -244,9 +244,7 @@ static unsigned long move_vma(struct vm_
 	}
 
 	if (!move_page_tables(vma, new_addr, addr, old_len)) {
-		unsigned long must_fault_in;
-		unsigned long fault_in_start;
-		unsigned long fault_in_end;
+		unsigned long vm_locked = vma->vm_flags & VM_LOCKED;
 
 		if (allocated_vma) {
 			*new_vma = *vma;
@@ -272,14 +270,8 @@ static unsigned long move_vma(struct vm_
 		} else
 			vma = NULL;		/* nothing more to do */
 
-		must_fault_in = new_vma->vm_flags & VM_LOCKED;
-		fault_in_start = new_vma->vm_start;
-		fault_in_end = new_vma->vm_end;
-
 		do_munmap(current->mm, addr, old_len);
 
-		/* new_vma could have been invalidated by do_munmap */
-
 		/* Restore VM_ACCOUNT if one or two pieces of vma left */
 		if (vma) {
 			vma->vm_flags |= VM_ACCOUNT;
@@ -288,9 +280,11 @@ static unsigned long move_vma(struct vm_
 		}
 
 		current->mm->total_vm += new_len >> PAGE_SHIFT;
-		if (must_fault_in) {
+		if (vm_locked) {
 			current->mm->locked_vm += new_len >> PAGE_SHIFT;
-			make_pages_present(fault_in_start, fault_in_end);
+			if (new_len > old_len)
+				make_pages_present(new_addr + old_len,
+						   new_addr + new_len);
 		}
 		return new_addr;
 	}

_