From: Hugh Dickins <hugh@veritas.com>

objrmap page_convert_anon tries to allocate pte_chains, so must allow
for failure (even GFP_KERNEL can fail when OOM-killed); and move its
prototype to join the rest of the rmap.c prototypes in swap.h.



 25-akpm/include/linux/rmap-locking.h |    2 --
 25-akpm/include/linux/swap.h         |    2 ++
 25-akpm/mm/fremap.c                  |   12 ++++++------
 25-akpm/mm/rmap.c                    |   14 ++++++++++++--
 4 files changed, 20 insertions(+), 10 deletions(-)

diff -puN include/linux/rmap-locking.h~hugh-04-page_convert_anon-ENOMEM include/linux/rmap-locking.h
--- 25/include/linux/rmap-locking.h~hugh-04-page_convert_anon-ENOMEM	Tue Mar 25 18:34:47 2003
+++ 25-akpm/include/linux/rmap-locking.h	Tue Mar 25 18:34:47 2003
@@ -45,5 +45,3 @@ static inline void pte_chain_free(struct
 	if (pte_chain)
 		__pte_chain_free(pte_chain);
 }
-
-void page_convert_anon(struct page *page);
diff -puN include/linux/swap.h~hugh-04-page_convert_anon-ENOMEM include/linux/swap.h
--- 25/include/linux/swap.h~hugh-04-page_convert_anon-ENOMEM	Tue Mar 25 18:34:47 2003
+++ 25-akpm/include/linux/swap.h	Tue Mar 25 18:34:47 2003
@@ -178,6 +178,8 @@ struct pte_chain *FASTCALL(page_add_rmap
 void FASTCALL(page_remove_rmap(struct page *, pte_t *));
 int FASTCALL(try_to_unmap(struct page *));
 
+int page_convert_anon(struct page *);
+
 /* linux/mm/shmem.c */
 extern int shmem_unuse(swp_entry_t entry, struct page *page);
 #else
diff -puN mm/fremap.c~hugh-04-page_convert_anon-ENOMEM mm/fremap.c
--- 25/mm/fremap.c~hugh-04-page_convert_anon-ENOMEM	Tue Mar 25 18:34:47 2003
+++ 25-akpm/mm/fremap.c	Tue Mar 25 18:34:47 2003
@@ -72,8 +72,10 @@ int install_page(struct mm_struct *mm, s
 	pgidx = (addr - vma->vm_start) >> PAGE_SHIFT;
 	pgidx += vma->vm_pgoff;
 	pgidx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
-	if (!PageAnon(page) && (page->index != pgidx))
-		page_convert_anon(page);
+	if (!PageAnon(page) && (page->index != pgidx)) {
+		if (page_convert_anon(page) < 0)
+			goto err_free;
+	}
 
 	pgd = pgd_offset(mm, addr);
 	spin_lock(&mm->page_table_lock);
@@ -97,12 +99,10 @@ int install_page(struct mm_struct *mm, s
 	if (flush)
 		flush_tlb_page(vma, addr);
 
-	spin_unlock(&mm->page_table_lock);
-	pte_chain_free(pte_chain);
-	return 0;
-
+	err = 0;
 err_unlock:
 	spin_unlock(&mm->page_table_lock);
+err_free:
 	pte_chain_free(pte_chain);
 err:
 	return err;
diff -puN mm/rmap.c~hugh-04-page_convert_anon-ENOMEM mm/rmap.c
--- 25/mm/rmap.c~hugh-04-page_convert_anon-ENOMEM	Tue Mar 25 18:34:47 2003
+++ 25-akpm/mm/rmap.c	Tue Mar 25 18:34:47 2003
@@ -775,7 +775,7 @@ out:
  * of pte_chain structures to ensure that it can complete without releasing
  * the lock.
  */
-void page_convert_anon(struct page *page)
+int page_convert_anon(struct page *page)
 {
 	struct address_space *mapping = page->mapping;
 	struct vm_area_struct *vma;
@@ -784,6 +784,7 @@ void page_convert_anon(struct page *page
 	pte_addr_t pte_paddr;
 	int mapcount;
 	int index = 0;
+	int err = 0;
 
 	if (PageAnon(page))
 		goto out;
@@ -796,6 +797,15 @@ retry:
 	if (mapcount > 1) {
 		for (; index < mapcount; index += NRPTE) {
 			ptec = pte_chain_alloc(GFP_KERNEL);
+			if (!ptec) {
+				while (pte_chain) {
+					ptec = pte_chain->next;
+					pte_chain_free(pte_chain);
+					pte_chain = ptec;
+				}
+				err = -ENOMEM;
+				goto out;
+			}
 			ptec->next = pte_chain;
 			pte_chain = ptec;
 		}
@@ -868,7 +878,7 @@ out_unlock:
 	pte_chain_unlock(page);
 	up(&mapping->i_shared_sem);
 out:
-	return;
+	return err;
 }
 
 /**

_