- We cannot hold i_shared_lock across momve_one_page() because
  move_one_page() needs to perform __GFP_WAIT allocations of pagetable pages.

- Move the cond_resched() out so we test it once per page rather than only
  when move_one_page() returns -EAGAIN.



---

 25-akpm/mm/mremap.c |   21 +--------------------
 1 files changed, 1 insertion(+), 20 deletions(-)

diff -puN mm/mremap.c~i_shared_lock-mremap-fix mm/mremap.c
--- 25/mm/mremap.c~i_shared_lock-mremap-fix	Wed Apr 21 15:23:57 2004
+++ 25-akpm/mm/mremap.c	Wed Apr 21 15:24:40 2004
@@ -143,22 +143,10 @@ static int move_page_tables(struct vm_ar
 		unsigned long new_addr, unsigned long old_addr,
 		unsigned long len, int *cows)
 {
-	struct address_space *mapping = NULL;
 	unsigned long offset;
 
 	flush_cache_range(vma, old_addr, old_addr + len);
 
-	if (vma->vm_file) {
-		/*
-		 * Subtle point from Rajesh Venkatasubramanian: before
-		 * moving file-based ptes, we must lock vmtruncate out,
-		 * since it might clean the dst vma before the src vma,
-		 * and we propagate stale pages into the dst afterward.
-		 */
-		mapping = vma->vm_file->f_mapping;
-		spin_lock(&mapping->i_shared_lock);
-	}
-
 	/*
 	 * This is not the clever way to do this, but we're taking the
 	 * easy way out on the assumption that most remappings will be
@@ -175,21 +163,14 @@ static int move_page_tables(struct vm_ar
 		 * brought back in (if it's still shared by then).
 		 */
 		if (ret == -EAGAIN) {
-			if (mapping)
-				spin_unlock(&mapping->i_shared_lock);
-			cond_resched();
 			ret = make_page_exclusive(vma, old_addr+offset);
-			if (mapping)
-				spin_lock(&mapping->i_shared_lock);
 			offset -= PAGE_SIZE;
 			(*cows)++;
 		}
 		if (ret)
 			break;
+		cond_resched();
 	}
-
-	if (mapping)
-		spin_unlock(&mapping->i_shared_lock);
 	return offset;
 }
 

_