diff -urN test.00/mm/filemap.c test.05/mm/filemap.c
--- test.00/mm/filemap.c	Thu Feb  7 17:26:59 2002
+++ test.05/mm/filemap.c	Thu Feb  7 17:53:04 2002
@@ -30,6 +30,8 @@
 #include <asm/mman.h>
 
 #include <linux/highmem.h>
+#include <linux/worktodo.h>
+#include <linux/iobuf.h>
 
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
@@ -1389,7 +1391,7 @@
 			if (TryLockPage(page)) {
 				if (Page_Uptodate(page))
 					goto page_ok;
-				printk("page_not_up_to_date: -EAGAIN\n");
+				pr_debug("page_not_up_to_date: -EAGAIN\n");
 				desc->error = -EWOULDBLOCKIO;
 				page_cache_release(page);
 				break;
@@ -3127,3 +3129,616 @@
 		panic("Failed to allocate page hash table\n");
 	memset((void *)page_hash_table, 0, PAGE_HASH_SIZE * sizeof(struct page *));
 }
+
+/* address_space_map
+ *	Maps a series of pages from the page cache into the given array.
+ */
+static int address_space_map(struct address_space *as, unsigned long index,
+		int nr, struct page **pages,
+		int *nr_newp, struct page **new_pages)
+{
+	struct page *cached_page = NULL;
+	int nr_new = 0;
+	int ret;
+
+	if (unlikely(nr <= 0)) {
+		*nr_newp = nr_new;
+		return 0;
+	}
+
+	ret = 0;
+
+	spin_lock(&pagecache_lock);
+
+	while (nr > 0) {
+		struct page **hash = page_hash(as, index);
+		struct page *page;
+
+		page = __find_page_nolock(as, index, *hash);
+		if (page) {
+			page_cache_get(page);
+got_page:
+			pages[ret++] = page;
+			index++;
+			nr--;
+			continue;
+		}
+
+		if (cached_page) {
+			__add_to_page_cache(cached_page, as, index, hash);
+			nr_new++;
+			*new_pages++ = page = cached_page;
+			cached_page = NULL;
+			goto got_page;
+		}
+		spin_unlock(&pagecache_lock);
+
+		cached_page = page_cache_alloc(as);
+		if (!cached_page)
+			goto out;
+
+		/* Okay, we now have an allocated page.  Retry
+		 * the search and add. */
+		spin_lock(&pagecache_lock);
+	}
+
+	spin_unlock(&pagecache_lock);
+
+out:
+	if (cached_page)
+		page_cache_release(cached_page);
+
+	*nr_newp = nr_new;
+	return ret ? ret : -ENOMEM;
+}
+
+struct iodesc {
+	struct worktodo	wtd;
+
+	struct page	*good_page;	/* the highest Uptodate page */
+	int		good_idx;
+	int		err;
+	int		did_read;
+	int		rw;
+
+	struct page	**pages;
+	struct page	**new_pages;
+	struct page	**cur_pagep;
+	int		nr_pages;
+	int		nr_new_pages;
+
+	struct address_space *as;
+	struct file	*file;
+	kvec_cb_t	cb;
+
+	size_t		size;
+	unsigned long	transferred;
+	unsigned	offset;
+	struct kveclet	*veclet;
+
+	struct kvec_dst	src;
+
+	int		sync;
+
+#define READDESC_NR_DEF	3
+	struct page *def_pages[READDESC_NR_DEF];
+	struct page *def_new_pages[READDESC_NR_DEF];
+};
+
+static void __iodesc_free(struct iodesc *io, int unlock)
+{
+	kvec_cb_t cb;
+	ssize_t res;
+
+	if (unlock) {
+		unsigned i;
+		for (i=0; i<io->nr_pages; i++) {
+			struct page *page = io->pages[i];
+			UnlockPage(page);
+			page_cache_release(page);
+		}
+	} else {
+		unsigned i;
+		for (i=0; i<io->nr_pages; i++)
+			page_cache_release(io->pages[i]);
+	}
+
+	if (io->new_pages != io->def_new_pages)
+		kfree(io->new_pages);
+	if (io->pages != io->def_pages)
+		kfree(io->pages);
+
+	cb = io->cb;
+	res = io->transferred ? io->transferred : io->err;
+	kfree(io);
+
+	cb.fn(cb.data, cb.vec, res);
+}
+
+/* By the time this function is called, all of the pages prior to
+ * the current good_idx have been released appropriately.  The remaining
+ * duties are to release any remaining pages and to honour O_SYNC.
+ */
+static void __iodesc_finish_write(struct iodesc *io)
+{
+	pr_debug("__iodesc_finish_write(%p)\n", io);
+
+	__iodesc_free(io, WRITE == io->rw);
+}
+
+/* This is mostly ripped from generic_file_write */
+static int __iodesc_write_page(struct iodesc *io, struct page *page)
+{
+	char *kaddr = kmap(page);
+	unsigned long bytes;
+	unsigned long offset;
+	long status;
+	int done = 0;
+
+	offset = io->offset;
+	kaddr += offset;
+
+	bytes = PAGE_CACHE_SIZE - offset;
+	if (io->size < bytes)
+		bytes = io->size;
+
+	pr_debug("__iodesc_write_page(%p (%lu), %lu %lu %lu)\n", page, page->index, offset, bytes);
+
+	io->err = io->as->a_ops->prepare_write(io->file, page,
+						offset, offset + bytes);
+	if (unlikely(io->err)) {
+		printk("prepare_write: %d\n", io->err);
+		kunmap(page);
+		return 1;
+	}
+
+	kvec_dst_map(&io->src);
+	memcpy_from_kvec_dst(kaddr, &io->src, bytes);
+	kvec_dst_unmap(&io->src);	/* commit_write may block */
+
+	flush_dcache_page(page);
+	status = io->as->a_ops->commit_write(io->file, page,
+						offset, offset+bytes);
+
+	/* We don't handle short writes */
+	if (status > 0 && status != bytes)
+		done = 1;
+
+	if (!status)
+		status = bytes;
+	else
+		printk("commit_write: %ld\n", status);
+
+	if (likely(status > 0)) {
+		io->transferred += status;
+		io->size -= status;
+		io->offset = (offset + status) & (PAGE_CACHE_SIZE - 1);
+
+		if (io->offset)
+			done = 1;
+	} else {
+		io->err = status;
+		done = 1;
+	}
+
+	kunmap(page);
+	return done;
+}
+
+void __iodesc_sync_wait_page(void *data)
+{
+	struct iodesc *io = data;
+
+	do {
+		struct buffer_head *bh, *head = io->pages[io->good_idx]->buffers;
+
+		if (!head)
+			continue;
+
+		bh = head;
+		do {
+			if (buffer_locked(bh)) {
+				pr_debug("waiting on bh=%pi io=%p\n", bh, io);
+				if (!wtd_wait_on_buffer(&io->wtd, bh))
+					return;
+			}
+			if (buffer_req(bh) && !buffer_uptodate(bh)) {
+				pr_debug("io err bh=%p (%p)\n", bh, io);
+				io->err = -EIO;
+				break;
+			}
+		} while ((bh = bh->b_this_page) != head);
+	} while (!io->err && ++io->good_idx < io->nr_pages) ;
+
+	pr_debug("finish_write(%p)\n", io);
+	__iodesc_finish_write(io);
+}
+
+static void __iodesc_do_write(void *data)
+{
+	struct iodesc *io = data;
+	unsigned i;
+
+	up(&io->file->f_dentry->d_inode->i_sem);
+
+	for (i=0; i<io->nr_pages; i++) {
+		if (__iodesc_write_page(io, io->pages[i]))
+			break;
+	}
+
+	if (io->sync) {
+		io->good_idx = 0;
+
+		pr_debug("writing out pages(%p)\n", io);
+		for (i=0; i<io->nr_pages; i++) {
+			if (io->pages[i]->buffers)
+				writeout_one_page(io->pages[i]);
+		}
+
+		pr_debug("calling __iodesc_sync_wait_page(%p)\n", io);
+		wtd_set_action(&io->wtd, __iodesc_sync_wait_page, io);
+		__iodesc_sync_wait_page(io);
+		return;
+	}
+
+	__iodesc_finish_write(io);
+}
+
+static void __iodesc_write_lock_next_page(void *data)
+{
+	struct iodesc *io = data;
+	pr_debug("__iodesc_write_next_page(%p)\n", io);
+
+	while (io->good_idx < io->nr_pages) {
+		io->good_page = io->pages[io->good_idx++];
+		if (io->good_page == *io->cur_pagep)
+			io->cur_pagep++;
+		else {
+			if (!wtd_lock_page(&io->wtd, io->good_page))
+				return;
+		}
+	}
+
+	//Is this faster? __iodesc_do_write(io);
+	wtd_set_action(&io->wtd, __iodesc_do_write, io);
+	wtd_queue(&io->wtd);
+}
+
+static void __generic_file_write_iodesc(struct iodesc *io)
+{
+	struct inode *inode = io->file->f_dentry->d_inode;
+	time_t now = CURRENT_TIME;
+
+	remove_suid(inode);
+	if (inode->i_ctime != now || inode->i_mtime != now) {
+		inode->i_ctime = inode->i_mtime = now;
+		mark_inode_dirty_sync(inode);
+	}
+
+	wtd_set_action(&io->wtd, __iodesc_write_lock_next_page, io);
+	io->sync = !!(io->file->f_flags & O_SYNC);
+	io->good_idx = 0;
+	io->cur_pagep = io->new_pages;
+	__iodesc_write_lock_next_page(io);
+}
+
+static void __iodesc_read_finish(struct iodesc *io)
+{
+	struct page **src_pagep;
+	char *dst_addr, *src_addr;
+	int src_off;
+	size_t size;
+	size_t valid;
+
+	struct kveclet *veclet = io->veclet;
+	struct page *dst_page = veclet->page;
+	int dst_len = veclet->length;
+	int dst_off = veclet->offset;
+
+
+	pr_debug("__iodesc_read_finish: good_idx = %d\n", io->good_idx);
+	if (io->good_idx <= 0)
+		goto no_data;
+
+	size = io->size;
+	src_off = io->offset;
+	src_pagep = io->pages;
+	src_addr = kmap(*src_pagep);
+
+	valid = (size_t)io->good_idx << PAGE_CACHE_SHIFT;
+	valid -= src_off;
+	pr_debug("size=%d valid=%d src_off=%d\n", size, valid, src_off);
+
+	if (valid < size)
+		size = valid;
+
+	dst_addr = kmap(veclet->page);
+
+	while (size > 0) {
+		int this = PAGE_CACHE_SIZE - src_off;
+		if ((PAGE_SIZE - dst_off) < this)
+			this = PAGE_SIZE - dst_off;
+		if (size < this)
+			this = size;
+		pr_debug("this=%d src_off=%d dst_off=%d dst_len=%d\n",
+			this, src_off, dst_off, dst_len);
+		memcpy(dst_addr + dst_off, src_addr + src_off, this);
+
+		src_off += this;
+		dst_off += this;
+		dst_len -= this;
+		size -= this;
+		io->transferred += this;
+		pr_debug("read_finish: this=%d transferred=%d\n",
+			 this, io->transferred);
+
+		if (size <= 0)
+			break;
+
+		if (dst_len <= 0) {
+			kunmap(dst_page);
+			veclet++;
+			dst_page = veclet->page;
+			dst_off = veclet->offset;
+			dst_len = veclet->length;
+			dst_addr = kmap(dst_page);
+		}
+
+		if (src_off >= PAGE_SIZE) { /* FIXME: PAGE_CACHE_SIZE */
+			kunmap(*src_pagep);
+			pr_debug("page(%lu)->count = %d\n",
+				 (*src_pagep)->index,
+				 atomic_read(&(*src_pagep)->count));
+			src_pagep++;
+			src_addr = kmap(*src_pagep);
+			src_off = 0;
+		}
+	}
+	kunmap(dst_page);
+	kunmap(*src_pagep);
+no_data:
+	__iodesc_free(io, 0);
+}
+
+static void __iodesc_make_uptodate(void *data)
+{
+	struct iodesc *io = data;
+	struct page *page = io->good_page;
+	int locked = 1;
+
+	pr_debug("__iodesc_make_uptodate: io=%p index=%lu\n", io, page->index);
+again:
+	while (Page_Uptodate(page)) {
+		pr_debug("page index %lu uptodate\n", page->index);
+		if (locked) {
+			UnlockPage(page);
+			locked = 0;
+		}
+		io->did_read = 0;
+		io->good_idx++;
+		if (io->good_idx >= io->nr_pages) {
+			__iodesc_read_finish(io);
+			return;
+		}
+		page = io->good_page = io->pages[io->good_idx];
+		pr_debug("__iodesc_make_uptodate: index=%lu\n", page->index);
+	}
+
+	if (!locked) {
+		if (!wtd_lock_page(&io->wtd, page))
+			return;
+		locked = 1;
+	}
+
+	if (!io->did_read) {
+		/* We haven't tried reading this page before, give it a go. */
+		pr_debug("attempting to read %lu\n", page->index);
+		io->did_read = 1;
+		locked = 0;
+		io->err = page->mapping->a_ops->readpage(io->file, page);
+		if (!io->err) {
+			if (Page_Uptodate(page))
+				goto again;
+			if (wtd_lock_page(&io->wtd, page)) {
+				locked = 1;
+				goto again;
+			}
+			return;
+		}
+	}
+
+	if (locked)
+		UnlockPage(page);
+
+	/* We've already read this page before.  Set err to EIO and quite */
+	if (!io->err)
+		io->err = -EIO;
+	__iodesc_read_finish(io);
+}
+
+static void __wtdgeneric_file_read_iodesc(void *data);
+
+static void __generic_file_read_iodesc(struct iodesc *io, int mayblock)
+{
+	int (*readpage)(struct file *, struct page *);
+	int i;
+
+	wtd_set_action(&io->wtd, __iodesc_make_uptodate, io);
+	readpage = io->as->a_ops->readpage;
+	for (i=0; i<io->nr_new_pages; i++) {
+		int ret;
+		if (!mayblock) {
+			static int zoo; if (zoo++ < 5) printk("read sleep\n");
+			wtd_set_action(&io->wtd, __wtdgeneric_file_read_iodesc, io);
+			wtd_queue(&io->wtd);
+			return;
+		}
+		ret = readpage(io->file, io->new_pages[i]);
+		if (ret)
+			printk(KERN_DEBUG "__generic_file_read_kiovec: readpage(%lu) = %d\n", io->new_pages[i]->index, ret);
+	}
+
+	for (i=0; i<io->nr_pages; i++) {
+		struct page *page = io->pages[i];
+		if (Page_Uptodate(page)) {
+			pr_debug("__generic_file_read_iodesc: %lu is uptodate\n", page->index);
+			continue;
+		}
+
+		if (!mayblock) {
+			static int zoo; if (zoo++ < 5) printk("read sleep\n");
+			wtd_set_action(&io->wtd, __wtdgeneric_file_read_iodesc, io);
+			wtd_queue(&io->wtd);
+			return;
+		}
+		if (!TryLockPage(page)) {
+			int ret = readpage(io->file, page);
+			if (ret)
+				printk(KERN_DEBUG "__generic_file_read_iodesc: readpage(%lu): %d\n", page->index, ret);
+		}
+
+		if (!Page_Uptodate(page) && io->good_idx == -1) {
+			pr_debug("first good_idx=%d (%lu)\n", i, page->index);
+			io->good_idx = i;
+			io->good_page = page;
+		}
+	}
+
+	/* Whee, all the pages are uptodate! */
+	if (!io->good_page) {
+		static int zoo; if (!mayblock && zoo++ < 5) printk("all uptodate\n");
+		pr_debug("all pages uptodate!\n");
+		io->good_idx = io->nr_pages;
+		__iodesc_read_finish(io);
+		return;
+	}
+
+	pr_debug("locking good_page\n");
+	if (wtd_lock_page(&io->wtd, io->good_page))
+		__iodesc_make_uptodate(io);
+	return;
+}
+
+static void __wtdgeneric_file_read_iodesc(void *data)
+{
+	struct iodesc *io = data;
+	__generic_file_read_iodesc(io, 1);
+}
+
+static int generic_file_rw_kvec(struct file *file, int rw, kvec_cb_t cb,
+			 size_t size, loff_t pos);
+
+int generic_file_kvec_read(struct file *file, kvec_cb_t cb, size_t size, loff_t pos)
+{
+	return generic_file_rw_kvec(file, READ, cb, size, pos);
+}
+
+int generic_file_kvec_write(struct file *file, kvec_cb_t cb, size_t size, loff_t pos)
+{
+	return generic_file_rw_kvec(file, WRITE, cb, size, pos);
+}
+
+int generic_file_rw_kvec(struct file *file, int rw, kvec_cb_t cb,
+			 size_t size, loff_t pos)
+{
+	struct inode *inode = file->f_dentry->d_inode;
+	struct address_space *as = inode->i_mapping;
+	unsigned long index;
+	unsigned long eindex;
+	unsigned long nr_pages;
+	struct iodesc *io = NULL;
+	int ret;
+
+	ret = -EINVAL;
+	if (rw != READ && rw != WRITE)
+		goto out;
+
+	ret = -ENOMEM;
+	io = kmalloc(sizeof(*io), GFP_KERNEL);
+	if (!io)
+		goto out;
+
+	memset(io, 0, sizeof(*io));
+	io->size = size;
+
+	if (READ == rw) {
+		pr_debug("pos=%Ld i_size=%Ld\n", pos, inode->i_size);
+
+		if (pos > inode->i_size)
+			size = 0;
+		else if ((pos + size) > inode->i_size)
+			size = inode->i_size - pos;
+
+		if (io->size < size)
+			size = io->size;
+		else if (size < io->size)
+			io->size = size;
+
+		pr_debug("io->size=%d size=%d\n", io->size, size);
+	}
+
+	index = pos >> PAGE_CACHE_SHIFT;
+	eindex = (pos + size - 1) >> PAGE_CACHE_SHIFT;
+	nr_pages = eindex - index + 1;
+
+	pr_debug("nr_pages: %lu\n", nr_pages);
+
+	io->good_idx = -1;
+	io->good_page = NULL;
+	io->did_read = 0;
+	io->err = 0;
+	io->rw = rw;
+	io->as = as;
+	io->offset = (unsigned long)pos & (PAGE_CACHE_SIZE - 1);
+	io->file = file;
+	io->cb = cb;
+	kvec_dst_init(&io->src, KM_USER0);
+	kvec_dst_set(&io->src, cb.vec->veclet);
+	io->veclet = cb.vec->veclet;
+	if (nr_pages < READDESC_NR_DEF) {
+		io->pages = io->def_pages;
+		io->new_pages = io->def_new_pages;
+	} else {
+		io->pages = kmalloc(sizeof(*io->pages) * (nr_pages + 1), GFP_KERNEL);
+		if (!io->pages)
+			goto out_io;
+
+		io->new_pages = kmalloc(sizeof(*io->new_pages) * (nr_pages + 1), GFP_KERNEL);
+		if (!io->new_pages)
+			goto out_pages;
+	}
+
+	/* FIXME: make the down a WTD_op */
+	if (rw == WRITE)
+		down(&io->file->f_dentry->d_inode->i_sem);
+
+	ret = address_space_map(as, index, nr_pages, io->pages,
+			&io->nr_new_pages, io->new_pages);
+	pr_debug("as_map: %d (%d new)\n", ret, io->nr_new_pages);
+	if (ret <= 0)
+		goto out_new_pages;
+
+	io->nr_pages = ret;
+	io->pages[io->nr_pages] = NULL;
+	io->new_pages[io->nr_new_pages] = NULL;
+
+	if (rw == READ)
+		__generic_file_read_iodesc(io, 0);
+	else if (rw == WRITE)
+		__generic_file_write_iodesc(io);
+
+	return 0;
+
+out_new_pages:
+	if (io->new_pages != io->def_new_pages)
+		kfree(io->new_pages);
+out_pages:
+	if (io->pages != io->def_pages)
+		kfree(io->pages);
+out_io:
+	kfree(io);
+out:
+	if (!ret)
+		cb.fn(cb.data, cb.vec, ret);
+	return ret;
+}