diff -urN vm-ref/arch/alpha/kernel/traps.c vm-rest/arch/alpha/kernel/traps.c
--- vm-ref/arch/alpha/kernel/traps.c	Tue Jan 22 18:55:42 2002
+++ vm-rest/arch/alpha/kernel/traps.c	Fri Mar 29 21:29:38 2002
@@ -167,6 +167,11 @@
 	dik_show_trace(sp);
 }
 
+void dump_stack(void)
+{
+	show_stack(NULL);
+}
+
 void
 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
 {
diff -urN vm-ref/fs/buffer.c vm-rest/fs/buffer.c
--- vm-ref/fs/buffer.c	Fri Mar 29 21:29:21 2002
+++ vm-rest/fs/buffer.c	Fri Mar 29 21:29:38 2002
@@ -47,7 +47,6 @@
 #include <linux/highmem.h>
 #include <linux/module.h>
 #include <linux/completion.h>
-#include <linux/compiler.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
@@ -2986,16 +2985,6 @@
 
 	complete((struct completion *)startup);
 
-	/*
-	 * FIXME: The ndirty logic here is wrong.  It's supposed to
-	 * send bdflush back to sleep after writing ndirty buffers.
-	 * In fact, the test is wrong so bdflush will in fact
-	 * sleep when bdflush_stop() returns true.
-	 *
-	 * FIXME: If it proves useful to implement ndirty properly,
-	 * then perhaps the value of ndirty should be scaled by the
-	 * amount of memory in the machine.
-	 */
 	for (;;) {
 		int ndirty = bdf_prm.b_un.ndirty;
 
diff -urN vm-ref/include/linux/kernel.h vm-rest/include/linux/kernel.h
--- vm-ref/include/linux/kernel.h	Fri Mar 29 21:29:21 2002
+++ vm-rest/include/linux/kernel.h	Fri Mar 29 21:29:38 2002
@@ -11,6 +11,7 @@
 #include <linux/linkage.h>
 #include <linux/stddef.h>
 #include <linux/types.h>
+#include <linux/compiler.h>
 
 /* Optimization barrier */
 /* The "volatile" is due to gcc bugs */
diff -urN vm-ref/include/linux/mm.h vm-rest/include/linux/mm.h
--- vm-ref/include/linux/mm.h	Fri Mar 29 21:29:21 2002
+++ vm-rest/include/linux/mm.h	Fri Mar 29 21:29:38 2002
@@ -168,9 +168,8 @@
 	 * we can simply calculate the virtual address. On machines with
 	 * highmem some memory is mapped into kernel virtual memory
 	 * dynamically, so we need a place to store that address.
-	 * Note that this field could be 16 bits on x86 ... ;)
 	 *
-	 * Architectures with slow multiplication can define
+	 * Architectures with slow ALU can define
 	 * WANT_PAGE_VIRTUAL in asm/page.h
 	 */
 #if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
@@ -311,6 +310,7 @@
 #define TryLockPage(page)	test_and_set_bit(PG_locked, &(page)->flags)
 #define PageChecked(page)	test_bit(PG_checked, &(page)->flags)
 #define SetPageChecked(page)	set_bit(PG_checked, &(page)->flags)
+
 #define PageLaunder(page)	test_bit(PG_launder, &(page)->flags)
 #define SetPageLaunder(page)	set_bit(PG_launder, &(page)->flags)
 #define ClearPageLaunder(page)	clear_bit(PG_launder, &(page)->flags)
@@ -348,24 +348,18 @@
 	do {						\
 		(page)->virtual = (address);		\
 	} while(0)
-
-#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
-#define set_page_address(page, address)  do { } while(0)
-#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
-
-/*
- * Permanent address of a page. Obviously must never be
- * called on a highmem page.
- */
-#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
-
 #define page_address(page) ((page)->virtual)
 
 #else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
 
+#define set_page_address(page, address)  do { } while(0)
+#ifdef CONFIG_DISCONTIGMEM
 #define page_address(page)						\
 	__va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT)	\
 			+ page_zone(page)->zone_start_paddr)
+#else
+#define page_address(page) __va((page - mem_map) << PAGE_SHIFT)
+#endif
 
 #endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
 
@@ -461,6 +455,8 @@
 #define __free_page(page) __free_pages((page), 0)
 #define free_page(addr) free_pages((addr),0)
 
+extern int start_aggressive_readahead(unsigned int);
+
 extern void show_free_areas(void);
 extern void show_free_areas_node(pg_data_t *pgdat);
 
@@ -525,8 +521,8 @@
 	return page_count(page) - !!page->buffers == 1;
 }
 
-extern int can_share_swap_page(struct page *);
-extern int remove_exclusive_swap_page(struct page *);
+extern int FASTCALL(make_exclusive_page(struct page *, int));
+extern int FASTCALL(remove_exclusive_swap_page(struct page *));
 
 extern void __free_pte(pte_t);
 
diff -urN vm-ref/include/linux/mmzone.h vm-rest/include/linux/mmzone.h
--- vm-ref/include/linux/mmzone.h	Fri Mar 29 21:29:21 2002
+++ vm-rest/include/linux/mmzone.h	Fri Mar 29 21:29:38 2002
@@ -19,6 +19,11 @@
 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
 #endif
 
+#define ZONE_DMA		0
+#define ZONE_NORMAL		1
+#define ZONE_HIGHMEM		2
+#define MAX_NR_ZONES		3
+
 typedef struct free_area_struct {
 	struct list_head	free_list;
 	unsigned long		*map;
@@ -26,6 +31,10 @@
 
 struct pglist_data;
 
+typedef struct zone_watermarks_s {
+	unsigned long min, low, high;
+} zone_watermarks_t;
+
 /*
  * On machines where it is needed (eg PCs) we divide physical memory
  * into multiple physical zones. On a PC we have 3 zones:
@@ -40,7 +49,15 @@
 	 */
 	spinlock_t		lock;
 	unsigned long		free_pages;
-	unsigned long		pages_min, pages_low, pages_high;
+
+	/*
+	 * We don't know if the memory that we're going to allocate will be freeable
+	 * or/and it will be released eventually, so to avoid totally wasting several
+	 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
+	 * to run OOM on the lower zones despite there's tons of freeable ram
+	 * on the higher zones).
+	 */
+	zone_watermarks_t	watermarks[MAX_NR_ZONES];
 
 	/*
 	 * The below fields are protected by different locks (or by
@@ -60,35 +77,6 @@
 	free_area_t		free_area[MAX_ORDER];
 
 	/*
-	 * wait_table		-- the array holding the hash table
-	 * wait_table_size	-- the size of the hash table array
-	 * wait_table_shift	-- wait_table_size
-	 * 				== BITS_PER_LONG (1 << wait_table_bits)
-	 *
-	 * The purpose of all these is to keep track of the people
-	 * waiting for a page to become available and make them
-	 * runnable again when possible. The trouble is that this
-	 * consumes a lot of space, especially when so few things
-	 * wait on pages at a given time. So instead of using
-	 * per-page waitqueues, we use a waitqueue hash table.
-	 *
-	 * The bucket discipline is to sleep on the same queue when
-	 * colliding and wake all in that wait queue when removing.
-	 * When something wakes, it must check to be sure its page is
-	 * truly available, a la thundering herd. The cost of a
-	 * collision is great, but given the expected load of the
-	 * table, they should be so rare as to be outweighed by the
-	 * benefits from the saved space.
-	 *
-	 * __wait_on_page() and unlock_page() in mm/filemap.c, are the
-	 * primary users of these fields, and in mm/page_alloc.c
-	 * free_area_init_core() performs the initialization of them.
-	 */
-	wait_queue_head_t	* wait_table;
-	unsigned long		wait_table_size;
-	unsigned long		wait_table_shift;
-
-	/*
 	 * Discontig memory support fields.
 	 */
 	struct pglist_data	*zone_pgdat;
@@ -101,13 +89,9 @@
 	 */
 	char			*name;
 	unsigned long		size;
+	unsigned long		realsize;
 } zone_t;
 
-#define ZONE_DMA		0
-#define ZONE_NORMAL		1
-#define ZONE_HIGHMEM		2
-#define MAX_NR_ZONES		3
-
 /*
  * One allocation request operates on a zonelist. A zonelist
  * is a list of zones, the first one is the 'goal' of the
@@ -125,6 +109,32 @@
 
 #define GFP_ZONEMASK	0x0f
 
+typedef struct wait_table_s {
+	/*
+	 * The purpose of all these is to keep track of the people
+	 * waiting for a page to become available and make them
+	 * runnable again when possible. The trouble is that this
+	 * consumes a lot of space, especially when so few things
+	 * wait on pages at a given time. So instead of using
+	 * per-page waitqueues, we use a waitqueue hash table.
+	 *
+	 * The bucket discipline is to sleep on the same queue when
+	 * colliding and wake all in that wait queue when removing.
+	 * When something wakes, it must check to be sure its page is
+	 * truly available, a la thundering herd. The cost of a
+	 * collision is great, but given the expected load of the
+	 * table, they should be so rare as to be outweighed by the
+	 * benefits from the saved space.
+	 *
+	 * __wait_on_page() and unlock_page() in mm/filemap.c, are the
+	 * primary users of these fields, and in mm/page_alloc.c
+	 * free_area_init_core() performs the initialization of them.
+	 */
+	wait_queue_head_t	* head;
+	unsigned long		shift;
+	unsigned long		size;
+} wait_table_t;
+
 /*
  * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
  * (mostly NUMA machines?) to denote a higher-level memory zone than the
@@ -148,14 +158,15 @@
 	unsigned long node_start_mapnr;
 	unsigned long node_size;
 	int node_id;
+	wait_table_t wait_table;
 	struct pglist_data *node_next;
 } pg_data_t;
 
 extern int numnodes;
 extern pg_data_t *pgdat_list;
 
-#define memclass(pgzone, classzone)	(((pgzone)->zone_pgdat == (classzone)->zone_pgdat) \
-			&& ((pgzone) <= (classzone)))
+#define zone_idx(zone)			((zone) - (zone)->zone_pgdat->node_zones)
+#define memclass(pgzone, classzone)	(zone_idx(pgzone) <= zone_idx(classzone))
 
 /*
  * The following two are not meant for general usage. They are here as
diff -urN vm-ref/include/linux/pagemap.h vm-rest/include/linux/pagemap.h
--- vm-ref/include/linux/pagemap.h	Fri Mar 29 21:29:21 2002
+++ vm-rest/include/linux/pagemap.h	Fri Mar 29 21:29:38 2002
@@ -97,8 +97,6 @@
 		___wait_on_page(page);
 }
 
-extern void wake_up_page(struct page *);
-
 extern struct page * grab_cache_page (struct address_space *, unsigned long);
 extern struct page * grab_cache_page_nowait (struct address_space *, unsigned long);
 
diff -urN vm-ref/include/linux/sched.h vm-rest/include/linux/sched.h
--- vm-ref/include/linux/sched.h	Fri Mar 29 21:29:21 2002
+++ vm-rest/include/linux/sched.h	Fri Mar 29 21:29:38 2002
@@ -282,13 +282,9 @@
 
 struct zone_struct;
 
-/*
- * Used when a task if trying to free some pages for its own
- * use - to prevent other tasks/CPUs from stealing the just-freed
- * pages.
- */
-struct local_page {
-	struct page *page;
+struct local_pages {
+	struct list_head list;
+	unsigned int order, nr;
 	struct zone_struct * classzone;
 };
 
@@ -337,7 +333,7 @@
 
 	struct task_struct *next_task, *prev_task;
 	struct mm_struct *active_mm;
-	struct local_page local_page;
+	struct local_pages local_pages;
 
 /* task state */
 	struct linux_binfmt *binfmt;
diff -urN vm-ref/include/linux/sysctl.h vm-rest/include/linux/sysctl.h
--- vm-ref/include/linux/sysctl.h	Fri Mar 29 21:29:21 2002
+++ vm-rest/include/linux/sysctl.h	Fri Mar 29 21:29:38 2002
@@ -143,12 +143,13 @@
 	VM_MAX_MAP_COUNT=11,	/* int: Maximum number of active map areas */
 	VM_MIN_READAHEAD=12,    /* Min file readahead */
 	VM_MAX_READAHEAD=13,    /* Max file readahead */
-	VM_VFS_SCAN_RATIO=14,	/* part of the inactive vfs lists to scan */
-	VM_LRU_BALANCE_RATIO=15,/* balance active and inactive caches */
-	VM_PASSES=16,		/* number of vm passes before failing */
-	VM_GFP_DEBUG=17,	/* debug GFP failures */
-	VM_CACHE_SCAN_RATIO=18,	/* part of the inactive cache list to scan */
-	VM_MAPPED_RATIO=19,	/* amount of unfreeable pages that triggers swapout */
+	VM_HEAP_STACK_GAP=14,	/* int: page gap between heap and stack */
+	VM_VFS_SCAN_RATIO=15,	/* part of the inactive vfs lists to scan */
+	VM_LRU_BALANCE_RATIO=16,/* balance active and inactive caches */
+	VM_PASSES=17,		/* number of vm passes before failing */
+	VM_GFP_DEBUG=18,	/* debug GFP failures */
+	VM_CACHE_SCAN_RATIO=19,	/* part of the inactive cache list to scan */
+	VM_MAPPED_RATIO=20,	/* amount of unfreeable pages that triggers swapout */
 };
 
 
diff -urN vm-ref/kernel/fork.c vm-rest/kernel/fork.c
--- vm-ref/kernel/fork.c	Fri Mar 29 21:29:21 2002
+++ vm-rest/kernel/fork.c	Fri Mar 29 21:29:38 2002
@@ -659,7 +659,7 @@
 	p->lock_depth = -1;		/* -1 = no lock */
 	p->start_time = jiffies;
 
-	p->local_page.page = NULL;
+	INIT_LIST_HEAD(&p->local_pages.list);
 
 	retval = -ENOMEM;
 	/* copy all the process information */
diff -urN vm-ref/kernel/ksyms.c vm-rest/kernel/ksyms.c
--- vm-ref/kernel/ksyms.c	Fri Mar 29 21:29:21 2002
+++ vm-rest/kernel/ksyms.c	Fri Mar 29 21:29:38 2002
@@ -90,6 +90,7 @@
 EXPORT_SYMBOL(exit_sighand);
 
 /* internal kernel memory management */
+EXPORT_SYMBOL(start_aggressive_readahead);
 EXPORT_SYMBOL(_alloc_pages);
 EXPORT_SYMBOL(__alloc_pages);
 EXPORT_SYMBOL(alloc_pages_node);
diff -urN vm-ref/mm/filemap.c vm-rest/mm/filemap.c
--- vm-ref/mm/filemap.c	Fri Mar 29 21:29:21 2002
+++ vm-rest/mm/filemap.c	Fri Mar 29 21:29:38 2002
@@ -23,7 +23,6 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/iobuf.h>
-#include <linux/compiler.h>
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
@@ -745,25 +744,14 @@
 	return 0;
 }
 
-/*
- * Knuth recommends primes in approximately golden ratio to the maximum
- * integer representable by a machine word for multiplicative hashing.
- * Chuck Lever verified the effectiveness of this technique:
- * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
- *
- * These primes are chosen to be bit-sparse, that is operations on
- * them can use shifts and additions instead of multiplications for
- * machines where multiplications are slow.
- */
-#if BITS_PER_LONG == 32
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define GOLDEN_RATIO_PRIME 0x9e370001UL
-#elif BITS_PER_LONG == 64
-/*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
-#else
-#error Define GOLDEN_RATIO_PRIME for your wordsize.
-#endif
+static inline wait_queue_head_t * wait_table_hashfn(struct page * page, wait_table_t * wait_table)
+{
+#define i (((unsigned long) page)/(sizeof(struct page) & ~ (sizeof(struct page) - 1)))
+#define s(x) ((x)+((x)>>wait_table->shift))
+	return wait_table->head + (s(i) & (wait_table->size-1));
+#undef i
+#undef s
+}
 
 /*
  * In order to wait for pages to become available there must be
@@ -775,35 +763,10 @@
  * at a cost of "thundering herd" phenomena during rare hash
  * collisions.
  */
-static inline wait_queue_head_t *page_waitqueue(struct page *page)
+static inline wait_queue_head_t * page_waitqueue(struct page *page)
 {
-	const zone_t *zone = page_zone(page);
-	wait_queue_head_t *wait = zone->wait_table;
-	unsigned long hash = (unsigned long)page;
-
-#if BITS_PER_LONG == 64
-	/*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
-	unsigned long n = hash;
-	n <<= 18;
-	hash -= n;
-	n <<= 33;
-	hash -= n;
-	n <<= 3;
-	hash += n;
-	n <<= 3;
-	hash -= n;
-	n <<= 4;
-	hash += n;
-	n <<= 2;
-	hash += n;
-#else
-	/* On some cpus multiply is faster, on others gcc will do shifts */
-	hash *= GOLDEN_RATIO_PRIME;
-#endif
-
-	hash >>= zone->wait_table_shift;
-
-	return &wait[hash];
+	pg_data_t * pgdat = page_zone(page)->zone_pgdat;
+	return wait_table_hashfn(page, &pgdat->wait_table);
 }
 
 /* 
@@ -843,7 +806,7 @@
 		BUG();
 	smp_mb__after_clear_bit(); 
 	if (waitqueue_active(waitqueue))
-		wake_up_all(waitqueue);
+		wake_up(waitqueue);
 }
 
 /*
@@ -856,7 +819,7 @@
 	struct task_struct *tsk = current;
 	DECLARE_WAITQUEUE(wait, tsk);
 
-	add_wait_queue_exclusive(waitqueue, &wait);
+	add_wait_queue(waitqueue, &wait);
 	for (;;) {
 		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
 		if (PageLocked(page)) {
@@ -869,12 +832,6 @@
 	__set_task_state(tsk, TASK_RUNNING);
 	remove_wait_queue(waitqueue, &wait);
 }
-
-void wake_up_page(struct page *page)
-{
-	wake_up(page_waitqueue(page));
-}
-EXPORT_SYMBOL(wake_up_page);
 
 /*
  * Get an exclusive lock on the page, optimistically
diff -urN vm-ref/mm/memory.c vm-rest/mm/memory.c
--- vm-ref/mm/memory.c	Fri Mar 29 13:35:50 2002
+++ vm-rest/mm/memory.c	Fri Mar 29 21:29:38 2002
@@ -952,15 +952,11 @@
 	if (!VALID_PAGE(old_page))
 		goto bad_wp_page;
 
-	if (!TryLockPage(old_page)) {
-		int reuse = can_share_swap_page(old_page);
-		unlock_page(old_page);
-		if (reuse) {
-			flush_cache_page(vma, address);
-			establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
-			spin_unlock(&mm->page_table_lock);
-			return 1;	/* Minor fault */
-		}
+	if (make_exclusive_page(old_page, 1)) {	
+		flush_cache_page(vma, address);
+		establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
+		spin_unlock(&mm->page_table_lock);
+		return 1;	/* Minor fault */
 	}
 
 	/*
@@ -978,6 +974,19 @@
 	 * Re-check the pte - we dropped the lock
 	 */
 	spin_lock(&mm->page_table_lock);
+	/*
+	 * keep the page pinned until we return runnable
+	 * to avoid another thread to skip the break_cow
+	 * path, so we're sure pte_same below check also implys
+	 * that the _contents_ of the old_page didn't changed
+	 * under us (not only that the pagetable is the same).
+	 *
+	 * Since we have the page_table_lock acquired here, if the
+	 * pte is the same it means we're still holding an additional
+	 * reference on the old_page so we can safely
+	 * page_cache_release(old_page) before the "pte_same == true" path.
+	 */
+	page_cache_release(old_page);
 	if (pte_same(*page_table, pte)) {
 		if (PageReserved(old_page))
 			++mm->rss;
@@ -989,7 +998,6 @@
 	}
 	spin_unlock(&mm->page_table_lock);
 	page_cache_release(new_page);
-	page_cache_release(old_page);
 	return 1;	/* Minor fault */
 
 bad_wp_page:
@@ -1142,9 +1150,8 @@
 		ret = 2;
 	}
 
-	mark_page_accessed(page);
-
-	lock_page(page);
+	if (!Page_Uptodate(page))
+		wait_on_page(page);
 
 	/*
 	 * Back out if somebody else faulted in this pte while we
@@ -1153,7 +1160,6 @@
 	spin_lock(&mm->page_table_lock);
 	if (!pte_same(*page_table, orig_pte)) {
 		spin_unlock(&mm->page_table_lock);
-		unlock_page(page);
 		page_cache_release(page);
 		return 1;
 	}
@@ -1161,14 +1167,15 @@
 	/* The page isn't present yet, go ahead with the fault. */
 		
 	swap_free(entry);
-	if (vm_swap_full())
-		remove_exclusive_swap_page(page);
-
 	mm->rss++;
 	pte = mk_pte(page, vma->vm_page_prot);
-	if (write_access && can_share_swap_page(page))
-		pte = pte_mkdirty(pte_mkwrite(pte));
-	unlock_page(page);
+	if (make_exclusive_page(page, write_access)) {
+		if (write_access)
+			pte = pte_mkdirty(pte);
+		if (vma->vm_flags & VM_WRITE)
+			pte = pte_mkwrite(pte);
+	}
+	mark_page_accessed(page);
 
 	flush_page_to_ram(page);
 	flush_icache_page(vma, page);
@@ -1206,15 +1213,14 @@
 
 		spin_lock(&mm->page_table_lock);
 		if (!pte_none(*page_table)) {
-			page_cache_release(page);
 			spin_unlock(&mm->page_table_lock);
+			page_cache_release(page);
 			return 1;
 		}
 		mm->rss++;
 		flush_page_to_ram(page);
 		entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
 		lru_cache_add(page);
-		mark_page_accessed(page);
 	}
 
 	set_pte(page_table, entry);
@@ -1293,9 +1299,9 @@
 			entry = pte_mkwrite(pte_mkdirty(entry));
 		set_pte(page_table, entry);
 	} else {
+		spin_unlock(&mm->page_table_lock);
 		/* One of our sibling threads was faster, back out. */
 		page_cache_release(new_page);
-		spin_unlock(&mm->page_table_lock);
 		return 1;
 	}
 
diff -urN vm-ref/mm/page_alloc.c vm-rest/mm/page_alloc.c
--- vm-ref/mm/page_alloc.c	Fri Mar 29 21:29:21 2002
+++ vm-rest/mm/page_alloc.c	Fri Mar 29 21:29:41 2002
@@ -20,7 +20,6 @@
 #include <linux/pagemap.h>
 #include <linux/bootmem.h>
 #include <linux/slab.h>
-#include <linux/compiler.h>
 #include <linux/module.h>
 
 int nr_swap_pages;
@@ -38,6 +37,7 @@
 static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 128, 128, 128, };
 static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, };
 static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, };
+static int lower_zone_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 };
 
 int vm_gfp_debug = 0;
 
@@ -124,7 +124,7 @@
 		BUG();
 	page->flags &= ~((1<<PG_referenced) | (1<<PG_dirty));
 
-	if (order == 0 && current->flags & PF_FREE_PAGES)
+	if (current->flags & PF_FREE_PAGES)
 		goto local_freelist;
  back_local_freelist:
 
@@ -177,12 +177,14 @@
 	return;
 
  local_freelist:
-	if ((current->local_page.page) ||
-	    !memclass(page_zone(page), current->local_page.classzone) ||
+	if ((current->local_pages.nr && !current->local_pages.order) ||
+	    !memclass(page_zone(page), current->local_pages.classzone) ||
 	    in_interrupt())
 		goto back_local_freelist;
 
-	current->local_page.page = page;
+	list_add(&page->list, &current->local_pages.list);
+	page->index = order;
+	current->local_pages.nr++;
 }
 
 #define MARK_USED(index, order, area) \
@@ -267,75 +269,101 @@
 static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask, unsigned int order, int * freed)
 {
 	struct page * page = NULL;
-	int __freed = 0;
+	int __freed;
 
 	if (in_interrupt())
 		BUG();
 
-	if (current->local_page.page)
-		BUG();
-	current->local_page.classzone = classzone;
+	current->local_pages.order = order;
+	current->local_pages.classzone = classzone;
 	current->flags |= PF_MEMALLOC | PF_FREE_PAGES;
 
 	__freed = try_to_free_pages(classzone, gfp_mask, order);
 
 	current->flags &= ~(PF_MEMALLOC | PF_FREE_PAGES);
 
-	if (current->local_page.page) {
-		page = current->local_page.page;
-		current->local_page.page = NULL;
-
-		if (order != 0) {
-			/* The local page won't suit */
-			__free_pages_ok(page, 0);
-			page = NULL;
-			goto out;
+	if (current->local_pages.nr) {
+		struct list_head * entry, * local_pages;
+		struct page * tmp;
+		int nr_pages;
+
+		local_pages = &current->local_pages.list;
+
+		if (likely(__freed)) {
+			/* pick from the last inserted so we're lifo */
+			entry = local_pages->next;
+			do {
+				tmp = list_entry(entry, struct page, list);
+				if (!memclass(page_zone(tmp), classzone))
+					BUG();
+				if (tmp->index == order) {
+					list_del(entry);
+					current->local_pages.nr--;
+					set_page_count(tmp, 1);
+					page = tmp;
+
+					if (page->buffers)
+						BUG();
+					if (page->mapping)
+						BUG();
+					if (!VALID_PAGE(page))
+						BUG();
+					if (PageSwapCache(page))
+						BUG();
+					if (PageLocked(page))
+						BUG();
+					if (PageLRU(page))
+						BUG();
+					if (PageActive(page))
+						BUG();
+					if (PageDirty(page))
+						BUG();
+
+					break;
+				}
+			} while ((entry = entry->next) != local_pages);
 		}
-		if (!memclass(page_zone(page), classzone))
-			BUG();
-		set_page_count(page, 1);
-		if (page->buffers)
-			BUG();
-		if (page->mapping)
-			BUG();
-		if (!VALID_PAGE(page))
-			BUG();
-		if (PageSwapCache(page))
-			BUG();
-		if (PageLocked(page))
-			BUG();
-		if (PageLRU(page))
-			BUG();
-		if (PageActive(page))
-			BUG();
-		if (PageDirty(page))
-			BUG();
+
+		nr_pages = current->local_pages.nr;
+		/* free in reverse order so that the global order will be lifo */
+		while ((entry = local_pages->prev) != local_pages) {
+			list_del(entry);
+			tmp = list_entry(entry, struct page, list);
+			__free_pages_ok(tmp, tmp->index);
+			if (!nr_pages--)
+				BUG();
+		}
+		current->local_pages.nr = 0;
 	}
-out:
 	*freed = __freed;
 	return page;
 }
 
+static inline unsigned long zone_free_pages(zone_t * zone, unsigned int order)
+{
+	long free = zone->free_pages - (1UL << order);
+	return free >= 0 ? free : 0;
+}
+
 /*
  * This is the 'heart' of the zoned buddy allocator:
  */
 struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist)
 {
-	unsigned long min;
 	zone_t **zone, * classzone;
 	struct page * page;
-	int freed;
+	int freed, class_idx;
 
 	zone = zonelist->zones;
 	classzone = *zone;
-	min = 1UL << order;
+	class_idx = zone_idx(classzone);
+
 	for (;;) {
 		zone_t *z = *(zone++);
 		if (!z)
 			break;
 
-		min += z->pages_low;
-		if (z->free_pages > min) {
+		if (zone_free_pages(z, order) > z->watermarks[class_idx].low) {
 			page = rmqueue(z, order);
 			if (page)
 				return page;
@@ -348,18 +376,16 @@
 		wake_up_interruptible(&kswapd_wait);
 
 	zone = zonelist->zones;
-	min = 1UL << order;
 	for (;;) {
-		unsigned long local_min;
+		unsigned long min;
 		zone_t *z = *(zone++);
 		if (!z)
 			break;
 
-		local_min = z->pages_min;
+		min = z->watermarks[class_idx].min;
 		if (!(gfp_mask & __GFP_WAIT))
-			local_min >>= 2;
-		min += local_min;
-		if (z->free_pages > min) {
+			min >>= 2;
+		if (zone_free_pages(z, order) > min) {
 			page = rmqueue(z, order);
 			if (page)
 				return page;
@@ -368,8 +394,7 @@
 
 	/* here we're in the low on memory slow path */
 
-rebalance:
-	if (current->flags & PF_MEMALLOC) {
+	if (current->flags & PF_MEMALLOC && !in_interrupt()) {
 		zone = zonelist->zones;
 		for (;;) {
 			zone_t *z = *(zone++);
@@ -385,36 +410,51 @@
 
 	/* Atomic allocations - we can't balance anything */
 	if (!(gfp_mask & __GFP_WAIT))
-		return NULL;
+		goto out;
 
+ rebalance:
 	page = balance_classzone(classzone, gfp_mask, order, &freed);
 	if (page)
 		return page;
 
 	zone = zonelist->zones;
-	min = 1UL << order;
-	for (;;) {
-		zone_t *z = *(zone++);
-		if (!z)
-			break;
+	if (likely(freed)) {
+		for (;;) {
+			zone_t *z = *(zone++);
+			if (!z)
+				break;
 
-		min += z->pages_min;
-		if (z->free_pages > min) {
-			page = rmqueue(z, order);
-			if (page)
-				return page;
+			if (zone_free_pages(z, order) > z->watermarks[class_idx].min) {
+				page = rmqueue(z, order);
+				if (page)
+					return page;
+			}
 		}
-	}
+		goto rebalance;
+	} else {
+		/* 
+		 * Check that no other task is been killed meanwhile,
+		 * in such a case we can succeed the allocation.
+		 */
+		for (;;) {
+			zone_t *z = *(zone++);
+			if (!z)
+				break;
 
-	/* Don't let big-order allocations loop */
-	if (order > 3)
-		return NULL;
+			if (zone_free_pages(z, order) > z->watermarks[class_idx].high) {
+				page = rmqueue(z, order);
+				if (page)
+					return page;
+			}
+		}
+	}
 
-	/* Yield for kswapd, and try again */
-	current->policy |= SCHED_YIELD;
-	__set_current_state(TASK_RUNNING);
-	schedule();
-	goto rebalance;
+ out:
+	printk(KERN_NOTICE "__alloc_pages: %u-order allocation failed (gfp=0x%x/%i)\n",
+	       order, gfp_mask, !!(current->flags & PF_MEMALLOC));
+	if (unlikely(vm_gfp_debug))
+		show_stack(NULL);
+	return NULL;
 }
 
 /*
@@ -480,18 +520,25 @@
 {
 	pg_data_t *pgdat = pgdat_list;
 	unsigned int sum = 0;
+	zonelist_t *zonelist;
+	zone_t **zonep, *zone;
 
 	do {
-		zonelist_t *zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK);
-		zone_t **zonep = zonelist->zones;
-		zone_t *zone;
+		int class_idx;
+		zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK);
+		zonep = zonelist->zones;
+		zone = *zonep;
+		class_idx = zone_idx(zone);
 
-		for (zone = *zonep++; zone; zone = *zonep++) {
-			unsigned long size = zone->size;
-			unsigned long high = zone->pages_high;
-			if (size > high)
-				sum += size - high;
-		}
+		sum += zone->nr_cache_pages;
+		do {
+			unsigned int free = zone->free_pages - zone->watermarks[class_idx].high;
+			zonep++;
+			zone = *zonep;
+			if (free <= 0)
+				continue;
+			sum += free;
+		} while (zone);
 
 		pgdat = pgdat->node_next;
 	} while (pgdat);
@@ -513,6 +560,41 @@
 }
 #endif
 
+/*
+ * If it returns non zero it means there's lots of ram "free"
+ * (note: not in cache!) so any caller will know that
+ * he can allocate some memory to do some more aggressive
+ * (possibly wasteful) readahead. The state of the memory
+ * should be rechecked after every few pages allocated for
+ * doing this aggressive readahead.
+ *
+ * The gfp_mask parameter specifies in which kind of memory
+ * the readahead information will be applocated to.
+ */
+int start_aggressive_readahead(unsigned int gfp_mask)
+{
+	pg_data_t *pgdat = pgdat_list;
+	zonelist_t *zonelist;
+	zone_t **zonep, *zone;
+	int ret = 0;
+
+	do {
+		int class_idx;
+		zonelist = pgdat->node_zonelists + (gfp_mask & GFP_ZONEMASK);
+		zonep = zonelist->zones;
+		zone = *(zonep++);
+		class_idx = zone_idx(zone);
+
+		for (; zone; zone = *(zonep++))
+			if (zone->free_pages > zone->watermarks[class_idx].high * 2)
+				ret = 1;
+
+		pgdat = pgdat->node_next;
+	} while (pgdat);
+
+	return ret;
+}
+
 int try_to_free_pages_nozone(unsigned int gfp_mask)
 {
 	pg_data_t *pgdat = pgdat_list;
@@ -558,13 +640,9 @@
 		zone_t *zone;
 		for (zone = tmpdat->node_zones;
 			       	zone < tmpdat->node_zones + MAX_NR_ZONES; zone++)
-			printk("Zone:%s freepages:%6lukB min:%6lukB low:%6lukB " 
-				       "high:%6lukB\n", 
+			printk("Zone:%s freepages:%6lukB\n", 
 					zone->name,
-					K(zone->free_pages),
-					K(zone->pages_min),
-					K(zone->pages_low),
-					K(zone->pages_high));
+					K(zone->free_pages));
 			
 		tmpdat = tmpdat->node_next;
 	}
@@ -671,33 +749,45 @@
  */
 #define PAGES_PER_WAITQUEUE	256
 
-static inline unsigned long wait_table_size(unsigned long pages)
+static inline unsigned long wait_table_size(unsigned long pages, unsigned long * shift)
 {
 	unsigned long size = 1;
+	unsigned long __shift = 0;
 
 	pages /= PAGES_PER_WAITQUEUE;
 
-	while (size < pages)
+	while (size < pages) {
 		size <<= 1;
+		__shift++;
+	}
 
 	/*
-	 * Once we have dozens or even hundreds of threads sleeping
-	 * on IO we've got bigger problems than wait queue collision.
-	 * Limit the size of the wait table to a reasonable size.
+	 * The usage pattern of the queues depends mostly on the I/O,
+	 * not much of the ram size of the machine, so make sure the
+	 * array is large enough on lowmem nodes too.
 	 */
-	size = min(size, 4096UL);
+	size = max(size, 256UL);
+	*shift = max(__shift, 8);
 
 	return size;
 }
 
 /*
- * This is an integer logarithm so that shifts can be used later
- * to extract the more random high bits from the multiplicative
- * hash function before the remainder is taken.
+ * The per-node waitqueue mechanism uses hashed waitqueues
+ * per zone.
  */
-static inline unsigned long wait_table_bits(unsigned long size)
+static inline void wait_table_init(pg_data_t *pgdat)
 {
-	return ffz(~size);
+	unsigned long shift, size, i;
+
+	size = wait_table_size(pgdat->node_size, &shift);
+
+	pgdat->wait_table.size = size;
+	pgdat->wait_table.shift = shift;
+	pgdat->wait_table.head = (wait_queue_head_t *) alloc_bootmem_node(pgdat, size * sizeof(wait_queue_head_t));
+
+	for(i = 0; i < size; i++)
+		init_waitqueue_head(pgdat->wait_table.head + i);
 }
 
 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
@@ -751,11 +841,14 @@
 	pgdat->node_start_mapnr = (lmem_map - mem_map);
 	pgdat->nr_zones = 0;
 
+	wait_table_init(pgdat);
+
 	offset = lmem_map - mem_map;	
 	for (j = 0; j < MAX_NR_ZONES; j++) {
 		zone_t *zone = pgdat->node_zones + j;
 		unsigned long mask;
 		unsigned long size, realsize;
+		int idx;
 
 		zone_table[nid * MAX_NR_ZONES + j] = zone;
 		realsize = size = zones_size[j];
@@ -764,28 +857,16 @@
 
 		printk("zone(%lu): %lu pages.\n", j, size);
 		zone->size = size;
+		zone->realsize = realsize;
 		zone->name = zone_names[j];
 		zone->lock = SPIN_LOCK_UNLOCKED;
 		zone->zone_pgdat = pgdat;
 		zone->free_pages = 0;
 		zone->need_balance = 0;
+		zone->nr_active_pages = zone->nr_inactive_pages = 0;
 		if (!size)
 			continue;
 
-		/*
-		 * The per-page waitqueue mechanism uses hashed waitqueues
-		 * per zone.
-		 */
-		zone->wait_table_size = wait_table_size(size);
-		zone->wait_table_shift =
-			BITS_PER_LONG - wait_table_bits(zone->wait_table_size);
-		zone->wait_table = (wait_queue_head_t *)
-			alloc_bootmem_node(pgdat, zone->wait_table_size
-						* sizeof(wait_queue_head_t));
-
-		for(i = 0; i < zone->wait_table_size; ++i)
-			init_waitqueue_head(zone->wait_table + i);
-
 		pgdat->nr_zones = j+1;
 
 		mask = (realsize / zone_balance_ratio[j]);
@@ -793,9 +874,29 @@
 			mask = zone_balance_min[j];
 		else if (mask > zone_balance_max[j])
 			mask = zone_balance_max[j];
-		zone->pages_min = mask;
-		zone->pages_low = mask*2;
-		zone->pages_high = mask*3;
+		zone->watermarks[j].min = mask;
+		zone->watermarks[j].low = mask*2;
+		zone->watermarks[j].high = mask*3;
+		/* now set the watermarks of the lower zones in the "j" classzone */
+		for (idx = j-1; idx >= 0; idx--) {
+			zone_t * lower_zone = pgdat->node_zones + idx;
+			unsigned long lower_zone_reserve;
+			if (!lower_zone->size)
+				continue;
+
+			mask = lower_zone->watermarks[idx].min;
+			lower_zone->watermarks[j].min = mask;
+			lower_zone->watermarks[j].low = mask*2;
+			lower_zone->watermarks[j].high = mask*3;
+
+			/* now the brainer part */
+			lower_zone_reserve = realsize / lower_zone_reserve_ratio[idx];
+			lower_zone->watermarks[j].min += lower_zone_reserve;
+			lower_zone->watermarks[j].low += lower_zone_reserve;
+			lower_zone->watermarks[j].high += lower_zone_reserve;
+
+			realsize += lower_zone->realsize;
+		}
 
 		zone->zone_mem_map = mem_map + offset;
 		zone->zone_start_mapnr = offset;
@@ -879,3 +980,16 @@
 }
 
 __setup("memfrac=", setup_mem_frac);
+
+static int __init setup_lower_zone_reserve(char *str)
+{
+	int j = 0;
+
+	while (get_option(&str, &lower_zone_reserve_ratio[j++]) == 2);
+	printk("setup_lower_zone_reserve: ");
+	for (j = 0; j < MAX_NR_ZONES-1; j++) printk("%d  ", lower_zone_reserve_ratio[j]);
+	printk("\n");
+	return 1;
+}
+
+__setup("lower_zone_reserve=", setup_lower_zone_reserve);
diff -urN vm-ref/mm/slab.c vm-rest/mm/slab.c
--- vm-ref/mm/slab.c	Fri Mar 29 21:29:21 2002
+++ vm-rest/mm/slab.c	Fri Mar 29 21:29:38 2002
@@ -72,7 +72,6 @@
 #include	<linux/slab.h>
 #include	<linux/interrupt.h>
 #include	<linux/init.h>
-#include	<linux/compiler.h>
 #include	<asm/uaccess.h>
 
 /*
diff -urN vm-ref/mm/swapfile.c vm-rest/mm/swapfile.c
--- vm-ref/mm/swapfile.c	Mon Feb 25 22:05:09 2002
+++ vm-rest/mm/swapfile.c	Fri Mar 29 21:29:38 2002
@@ -14,7 +14,6 @@
 #include <linux/vmalloc.h>
 #include <linux/pagemap.h>
 #include <linux/shm.h>
-#include <linux/compiler.h>
 
 #include <asm/pgtable.h>
 
@@ -227,6 +226,7 @@
  * Check if we're the only user of a swap page,
  * when the page is locked.
  */
+static int FASTCALL(exclusive_swap_page(struct page *page));
 static int exclusive_swap_page(struct page *page)
 {
 	int retval = 0;
@@ -240,12 +240,13 @@
 		if (p->swap_map[SWP_OFFSET(entry)] == 1) {
 			/* Recheck the page count with the pagecache lock held.. */
 			spin_lock(&pagecache_lock);
-			if (page_count(page) - !!page->buffers == 2)
+			if (PageSwapCache(page) && page_count(page) - !!page->buffers == 2)
 				retval = 1;
 			spin_unlock(&pagecache_lock);
 		}
 		swap_info_put(p);
 	}
+
 	return retval;
 }
 
@@ -257,21 +258,42 @@
  * work, but we opportunistically check whether
  * we need to get all the locks first..
  */
-int can_share_swap_page(struct page *page)
+int make_exclusive_page(struct page *page, int write)
 {
 	int retval = 0;
 
-	if (!PageLocked(page))
-		BUG();
 	switch (page_count(page)) {
 	case 3:
 		if (!page->buffers)
 			break;
 		/* Fallthrough */
 	case 2:
+		/* racy fastpath check */
 		if (!PageSwapCache(page))
 			break;
-		retval = exclusive_swap_page(page);
+
+		if ((!write && !vm_swap_full()) || TryLockPage(page)) {
+			/*
+			 * Don't remove the page from the swapcache if:
+			 * - it was a read fault and...
+			 * - the swap isn't full
+			 * or if
+			 * - we failed acquiring the page lock
+			 *
+			 * NOTE: if failed acquiring the lock we cannot remove the
+			 * page from the swapcache, but still we can safely takeover
+			 * the page if it's exclusive, see the swapcache check in
+			 * the innermost critical section of exclusive_swap_page().
+			 */
+			retval = exclusive_swap_page(page);
+		} else {
+			/*
+			 * Here we've the page lock acquired and we're asked
+			 * to try to drop this page from the swapcache.
+			 */
+			retval = remove_exclusive_swap_page(page);
+			unlock_page(page);
+		}
 		break;
 	case 1:
 		if (PageReserved(page))
@@ -300,7 +322,7 @@
 
 	entry.val = page->index;
 	p = swap_info_get(entry);
-	if (!p)
+	if (unlikely(!p))
 		return 0;
 
 	/* Is the only swap cache user the cache itself? */
@@ -309,7 +331,11 @@
 		/* Recheck the page count with the pagecache lock held.. */
 		spin_lock(&pagecache_lock);
 		if (page_count(page) - !!page->buffers == 2) {
+			if (page->buffers && !try_to_free_buffers(page, 0))
+				/* an anonymous page cannot have page->buffers set */
+				BUG();
 			__delete_from_swap_cache(page);
+			swap_entry_free(p, SWP_OFFSET(entry));
 			SetPageDirty(page);
 			retval = 1;
 		}
@@ -317,11 +343,8 @@
 	}
 	swap_info_put(p);
 
-	if (retval) {
-		block_flushpage(page, 0);
-		swap_free(entry);
+	if (retval)
 		page_cache_release(page);
-	}
 
 	return retval;
 }
@@ -343,11 +366,7 @@
 	}
 	if (page) {
 		page_cache_get(page);
-		/* Only cache user (+us), or swap space full? Free it! */
-		if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) {
-			delete_from_swap_cache(page);
-			SetPageDirty(page);
-		}
+		remove_exclusive_swap_page(page);
 		UnlockPage(page);
 		page_cache_release(page);
 	}
diff -urN vm-ref/mm/vmscan.c vm-rest/mm/vmscan.c
--- vm-ref/mm/vmscan.c	Fri Mar 29 21:29:21 2002
+++ vm-rest/mm/vmscan.c	Fri Mar 29 21:29:38 2002
@@ -20,7 +20,6 @@
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/file.h>
-#include <linux/compiler.h>
 
 #include <asm/pgalloc.h>
 
@@ -282,6 +281,7 @@
 {
 	unsigned long address;
 	struct vm_area_struct* vma;
+	int tlb_flush = 0;
 
 	/*
 	 * Find the proper vm-area after freezing the vma chain 
@@ -296,6 +296,7 @@
 	}
 	vma = find_vma(mm, address);
 	if (vma) {
+		tlb_flush = 1;
 		if (address < vma->vm_start)
 			address = vma->vm_start;
 
@@ -314,6 +315,8 @@
 
 out_unlock:
 	spin_unlock(&mm->page_table_lock);
+	if (tlb_flush)
+		flush_tlb_mm(mm);
 	return count;
 }
 
@@ -733,11 +736,12 @@
 
 static int check_classzone_need_balance(zone_t * classzone)
 {
-	zone_t * first_classzone;
+	zone_t * first_zone;
+	int class_idx = zone_idx(classzone);
 
-	first_classzone = classzone->zone_pgdat->node_zones;
-	while (classzone >= first_classzone) {
-		if (classzone->free_pages > classzone->pages_high)
+	first_zone = classzone->zone_pgdat->node_zones;
+	while (classzone >= first_zone) {
+		if (classzone->free_pages > classzone->watermarks[class_idx].high)
 			return 0;
 		classzone--;
 	}
@@ -753,12 +757,12 @@
 		zone = pgdat->node_zones + i;
 		if (unlikely(current->need_resched))
 			schedule();
-		if (!zone->need_balance)
+		if (!zone->need_balance || !zone->size)
 			continue;
 		if (!try_to_free_pages(zone, GFP_KSWAPD, 0)) {
 			zone->need_balance = 0;
 			__set_current_state(TASK_INTERRUPTIBLE);
-			schedule_timeout(HZ);
+			schedule_timeout(HZ*5);
 			continue;
 		}
 		if (check_classzone_need_balance(zone))
@@ -791,7 +795,7 @@
 
 	for (i = pgdat->nr_zones-1; i >= 0; i--) {
 		zone = pgdat->node_zones + i;
-		if (!zone->need_balance)
+		if (!zone->need_balance || !zone->size)
 			continue;
 		return 0;
 	}