Spinlocks don't have a buslocked unlock and are faster.

On a P4, time to write a 4M file with 4M one-byte-write()s:

Before:
	0.72s user 5.47s system 99% cpu 6.227 total
	0.76s user 5.40s system 100% cpu 6.154 total
	0.77s user 5.38s system 100% cpu 6.146 total

After:
	1.09s user 4.92s system 99% cpu 6.014 total
	0.74s user 5.28s system 99% cpu 6.023 total
	1.03s user 4.97s system 100% cpu 5.991 total



 fs/fs-writeback.c   |    4 ++--
 fs/inode.c          |    2 +-
 fs/mpage.c          |    8 ++++----
 include/linux/fs.h  |    2 +-
 mm/filemap.c        |   42 +++++++++++++++++++++---------------------
 mm/page-writeback.c |   14 +++++++-------
 mm/readahead.c      |    8 ++++----
 mm/swap_state.c     |   22 +++++++++++-----------
 mm/swapfile.c       |    8 ++++----
 mm/truncate.c       |    6 +++---
 mm/vmscan.c         |   14 +++++++-------
 11 files changed, 65 insertions(+), 65 deletions(-)

diff -puN fs/fs-writeback.c~page-lock-is-spin_lock fs/fs-writeback.c
--- 25/fs/fs-writeback.c~page-lock-is-spin_lock	2003-04-08 03:16:30.000000000 -0700
+++ 25-akpm/fs/fs-writeback.c	2003-04-08 03:16:30.000000000 -0700
@@ -149,10 +149,10 @@ __sync_single_inode(struct inode *inode,
 	 * read speculatively by this cpu before &= ~I_DIRTY  -- mikulas
 	 */
 
-	write_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
 	if (wait || !wbc->for_kupdate || list_empty(&mapping->io_pages))
 		list_splice_init(&mapping->dirty_pages, &mapping->io_pages);
-	write_unlock(&mapping->page_lock);
+	spin_unlock(&mapping->page_lock);
 	spin_unlock(&inode_lock);
 
 	do_writepages(mapping, wbc);
diff -puN fs/inode.c~page-lock-is-spin_lock fs/inode.c
--- 25/fs/inode.c~page-lock-is-spin_lock	2003-04-08 03:16:30.000000000 -0700
+++ 25-akpm/fs/inode.c	2003-04-08 03:16:30.000000000 -0700
@@ -181,7 +181,7 @@ void inode_init_once(struct inode *inode
 	INIT_LIST_HEAD(&inode->i_devices);
 	sema_init(&inode->i_sem, 1);
 	INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
-	rwlock_init(&inode->i_data.page_lock);
+	spin_lock_init(&inode->i_data.page_lock);
 	init_MUTEX(&inode->i_data.i_shared_sem);
 	INIT_LIST_HEAD(&inode->i_data.private_list);
 	spin_lock_init(&inode->i_data.private_lock);
diff -puN fs/mpage.c~page-lock-is-spin_lock fs/mpage.c
--- 25/fs/mpage.c~page-lock-is-spin_lock	2003-04-08 03:16:30.000000000 -0700
+++ 25-akpm/fs/mpage.c	2003-04-08 03:16:30.000000000 -0700
@@ -627,7 +627,7 @@ mpage_writepages(struct address_space *m
 		writepage = mapping->a_ops->writepage;
 
 	pagevec_init(&pvec, 0);
-	write_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
 	while (!list_empty(&mapping->io_pages) && !done) {
 		struct page *page = list_entry(mapping->io_pages.prev,
 					struct page, list);
@@ -647,7 +647,7 @@ mpage_writepages(struct address_space *m
 		list_add(&page->list, &mapping->locked_pages);
 
 		page_cache_get(page);
-		write_unlock(&mapping->page_lock);
+		spin_unlock(&mapping->page_lock);
 
 		/*
 		 * At this point we hold neither mapping->page_lock nor
@@ -679,12 +679,12 @@ mpage_writepages(struct address_space *m
 			unlock_page(page);
 		}
 		page_cache_release(page);
-		write_lock(&mapping->page_lock);
+		spin_lock(&mapping->page_lock);
 	}
 	/*
 	 * Leave any remaining dirty pages on ->io_pages
 	 */
-	write_unlock(&mapping->page_lock);
+	spin_unlock(&mapping->page_lock);
 	if (bio)
 		mpage_bio_submit(WRITE, bio);
 	return ret;
diff -puN include/linux/fs.h~page-lock-is-spin_lock include/linux/fs.h
--- 25/include/linux/fs.h~page-lock-is-spin_lock	2003-04-08 03:16:30.000000000 -0700
+++ 25-akpm/include/linux/fs.h	2003-04-08 03:16:30.000000000 -0700
@@ -313,7 +313,7 @@ struct backing_dev_info;
 struct address_space {
 	struct inode		*host;		/* owner: inode, block_device */
 	struct radix_tree_root	page_tree;	/* radix tree of all pages */
-	rwlock_t		page_lock;	/* and rwlock protecting it */
+	spinlock_t		page_lock;	/* and rwlock protecting it */
 	struct list_head	clean_pages;	/* list of clean pages */
 	struct list_head	dirty_pages;	/* list of dirty pages */
 	struct list_head	locked_pages;	/* list of locked pages */
diff -puN mm/filemap.c~page-lock-is-spin_lock mm/filemap.c
--- 25/mm/filemap.c~page-lock-is-spin_lock	2003-04-08 03:16:30.000000000 -0700
+++ 25-akpm/mm/filemap.c	2003-04-08 03:16:30.000000000 -0700
@@ -99,9 +99,9 @@ void remove_from_page_cache(struct page 
 	if (unlikely(!PageLocked(page)))
 		PAGE_BUG(page);
 
-	write_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
 	__remove_from_page_cache(page);
-	write_unlock(&mapping->page_lock);
+	spin_unlock(&mapping->page_lock);
 }
 
 static inline int sync_page(struct page *page)
@@ -133,9 +133,9 @@ static int __filemap_fdatawrite(struct a
 	if (mapping->backing_dev_info->memory_backed)
 		return 0;
 
-	write_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
 	list_splice_init(&mapping->dirty_pages, &mapping->io_pages);
-	write_unlock(&mapping->page_lock);
+	spin_unlock(&mapping->page_lock);
 	ret = do_writepages(mapping, &wbc);
 	return ret;
 }
@@ -166,7 +166,7 @@ int filemap_fdatawait(struct address_spa
 
 restart:
 	progress = 0;
-	write_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
         while (!list_empty(&mapping->locked_pages)) {
 		struct page *page;
 
@@ -180,7 +180,7 @@ restart:
 		if (!PageWriteback(page)) {
 			if (++progress > 32) {
 				if (need_resched()) {
-					write_unlock(&mapping->page_lock);
+					spin_unlock(&mapping->page_lock);
 					__cond_resched();
 					goto restart;
 				}
@@ -190,16 +190,16 @@ restart:
 
 		progress = 0;
 		page_cache_get(page);
-		write_unlock(&mapping->page_lock);
+		spin_unlock(&mapping->page_lock);
 
 		wait_on_page_writeback(page);
 		if (PageError(page))
 			ret = -EIO;
 
 		page_cache_release(page);
-		write_lock(&mapping->page_lock);
+		spin_lock(&mapping->page_lock);
 	}
-	write_unlock(&mapping->page_lock);
+	spin_unlock(&mapping->page_lock);
 	return ret;
 }
 
@@ -227,7 +227,7 @@ int add_to_page_cache(struct page *page,
 
 	if (error == 0) {
 		page_cache_get(page);
-		write_lock(&mapping->page_lock);
+		spin_lock(&mapping->page_lock);
 		error = radix_tree_insert(&mapping->page_tree, offset, page);
 		if (!error) {
 			SetPageLocked(page);
@@ -235,7 +235,7 @@ int add_to_page_cache(struct page *page,
 		} else {
 			page_cache_release(page);
 		}
-		write_unlock(&mapping->page_lock);
+		spin_unlock(&mapping->page_lock);
 		radix_tree_preload_end();
 	}
 	return error;
@@ -364,11 +364,11 @@ struct page * find_get_page(struct addre
 	 * We scan the hash list read-only. Addition to and removal from
 	 * the hash-list needs a held write-lock.
 	 */
-	read_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
 	page = radix_tree_lookup(&mapping->page_tree, offset);
 	if (page)
 		page_cache_get(page);
-	read_unlock(&mapping->page_lock);
+	spin_unlock(&mapping->page_lock);
 	return page;
 }
 
@@ -379,11 +379,11 @@ struct page *find_trylock_page(struct ad
 {
 	struct page *page;
 
-	read_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
 	page = radix_tree_lookup(&mapping->page_tree, offset);
 	if (page && TestSetPageLocked(page))
 		page = NULL;
-	read_unlock(&mapping->page_lock);
+	spin_unlock(&mapping->page_lock);
 	return page;
 }
 
@@ -403,15 +403,15 @@ struct page *find_lock_page(struct addre
 {
 	struct page *page;
 
-	read_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
 repeat:
 	page = radix_tree_lookup(&mapping->page_tree, offset);
 	if (page) {
 		page_cache_get(page);
 		if (TestSetPageLocked(page)) {
-			read_unlock(&mapping->page_lock);
+			spin_unlock(&mapping->page_lock);
 			lock_page(page);
-			read_lock(&mapping->page_lock);
+			spin_lock(&mapping->page_lock);
 
 			/* Has the page been truncated while we slept? */
 			if (page->mapping != mapping || page->index != offset) {
@@ -421,7 +421,7 @@ repeat:
 			}
 		}
 	}
-	read_unlock(&mapping->page_lock);
+	spin_unlock(&mapping->page_lock);
 	return page;
 }
 
@@ -491,12 +491,12 @@ unsigned int find_get_pages(struct addre
 	unsigned int i;
 	unsigned int ret;
 
-	read_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
 	ret = radix_tree_gang_lookup(&mapping->page_tree,
 				(void **)pages, start, nr_pages);
 	for (i = 0; i < ret; i++)
 		page_cache_get(pages[i]);
-	read_unlock(&mapping->page_lock);
+	spin_unlock(&mapping->page_lock);
 	return ret;
 }
 
diff -puN mm/page-writeback.c~page-lock-is-spin_lock mm/page-writeback.c
--- 25/mm/page-writeback.c~page-lock-is-spin_lock	2003-04-08 03:16:30.000000000 -0700
+++ 25-akpm/mm/page-writeback.c	2003-04-08 03:16:30.000000000 -0700
@@ -425,12 +425,12 @@ int write_one_page(struct page *page, in
 	if (wait && PageWriteback(page))
 		wait_on_page_writeback(page);
 
-	write_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
 	list_del(&page->list);
 	if (test_clear_page_dirty(page)) {
 		list_add(&page->list, &mapping->locked_pages);
 		page_cache_get(page);
-		write_unlock(&mapping->page_lock);
+		spin_unlock(&mapping->page_lock);
 		ret = mapping->a_ops->writepage(page, &wbc);
 		if (ret == 0 && wait) {
 			wait_on_page_writeback(page);
@@ -440,7 +440,7 @@ int write_one_page(struct page *page, in
 		page_cache_release(page);
 	} else {
 		list_add(&page->list, &mapping->clean_pages);
-		write_unlock(&mapping->page_lock);
+		spin_unlock(&mapping->page_lock);
 		unlock_page(page);
 	}
 	return ret;
@@ -513,14 +513,14 @@ int __set_page_dirty_buffers(struct page
 	spin_unlock(&mapping->private_lock);
 
 	if (!TestSetPageDirty(page)) {
-		write_lock(&mapping->page_lock);
+		spin_lock(&mapping->page_lock);
 		if (page->mapping) {	/* Race with truncate? */
 			if (!mapping->backing_dev_info->memory_backed)
 				inc_page_state(nr_dirty);
 			list_del(&page->list);
 			list_add(&page->list, &mapping->dirty_pages);
 		}
-		write_unlock(&mapping->page_lock);
+		spin_unlock(&mapping->page_lock);
 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 	}
 	
@@ -550,7 +550,7 @@ int __set_page_dirty_nobuffers(struct pa
 		struct address_space *mapping = page->mapping;
 
 		if (mapping) {
-			write_lock(&mapping->page_lock);
+			spin_lock(&mapping->page_lock);
 			if (page->mapping) {	/* Race with truncate? */
 				BUG_ON(page->mapping != mapping);
 				if (!mapping->backing_dev_info->memory_backed)
@@ -558,7 +558,7 @@ int __set_page_dirty_nobuffers(struct pa
 				list_del(&page->list);
 				list_add(&page->list, &mapping->dirty_pages);
 			}
-			write_unlock(&mapping->page_lock);
+			spin_unlock(&mapping->page_lock);
 			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 		}
 	}
diff -puN mm/readahead.c~page-lock-is-spin_lock mm/readahead.c
--- 25/mm/readahead.c~page-lock-is-spin_lock	2003-04-08 03:16:30.000000000 -0700
+++ 25-akpm/mm/readahead.c	2003-04-08 03:16:30.000000000 -0700
@@ -217,7 +217,7 @@ __do_page_cache_readahead(struct address
 	/*
 	 * Preallocate as many pages as we will need.
 	 */
-	read_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
 	for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
 		unsigned long page_offset = offset + page_idx;
 		
@@ -228,16 +228,16 @@ __do_page_cache_readahead(struct address
 		if (page)
 			continue;
 
-		read_unlock(&mapping->page_lock);
+		spin_unlock(&mapping->page_lock);
 		page = page_cache_alloc_cold(mapping);
-		read_lock(&mapping->page_lock);
+		spin_lock(&mapping->page_lock);
 		if (!page)
 			break;
 		page->index = page_offset;
 		list_add(&page->list, &page_pool);
 		ret++;
 	}
-	read_unlock(&mapping->page_lock);
+	spin_unlock(&mapping->page_lock);
 
 	/*
 	 * Now start the IO.  We ignore I/O errors - if the page is not
diff -puN mm/swapfile.c~page-lock-is-spin_lock mm/swapfile.c
--- 25/mm/swapfile.c~page-lock-is-spin_lock	2003-04-08 03:16:30.000000000 -0700
+++ 25-akpm/mm/swapfile.c	2003-04-08 03:16:30.000000000 -0700
@@ -248,10 +248,10 @@ static int exclusive_swap_page(struct pa
 		/* Is the only swap cache user the cache itself? */
 		if (p->swap_map[swp_offset(entry)] == 1) {
 			/* Recheck the page count with the pagecache lock held.. */
-			read_lock(&swapper_space.page_lock);
+			spin_lock(&swapper_space.page_lock);
 			if (page_count(page) - !!PagePrivate(page) == 2)
 				retval = 1;
-			read_unlock(&swapper_space.page_lock);
+			spin_unlock(&swapper_space.page_lock);
 		}
 		swap_info_put(p);
 	}
@@ -319,13 +319,13 @@ int remove_exclusive_swap_page(struct pa
 	retval = 0;
 	if (p->swap_map[swp_offset(entry)] == 1) {
 		/* Recheck the page count with the pagecache lock held.. */
-		write_lock(&swapper_space.page_lock);
+		spin_lock(&swapper_space.page_lock);
 		if ((page_count(page) == 2) && !PageWriteback(page)) {
 			__delete_from_swap_cache(page);
 			SetPageDirty(page);
 			retval = 1;
 		}
-		write_unlock(&swapper_space.page_lock);
+		spin_unlock(&swapper_space.page_lock);
 	}
 	swap_info_put(p);
 
diff -puN mm/swap_state.c~page-lock-is-spin_lock mm/swap_state.c
--- 25/mm/swap_state.c~page-lock-is-spin_lock	2003-04-08 03:16:30.000000000 -0700
+++ 25-akpm/mm/swap_state.c	2003-04-08 03:16:30.000000000 -0700
@@ -34,7 +34,7 @@ extern struct address_space_operations s
 
 struct address_space swapper_space = {
 	.page_tree	= RADIX_TREE_INIT(GFP_ATOMIC),
-	.page_lock	= RW_LOCK_UNLOCKED,
+	.page_lock	= SPIN_LOCK_UNLOCKED,
 	.clean_pages	= LIST_HEAD_INIT(swapper_space.clean_pages),
 	.dirty_pages	= LIST_HEAD_INIT(swapper_space.dirty_pages),
 	.io_pages	= LIST_HEAD_INIT(swapper_space.io_pages),
@@ -191,9 +191,9 @@ void delete_from_swap_cache(struct page 
   
 	entry.val = page->index;
 
-	write_lock(&swapper_space.page_lock);
+	spin_lock(&swapper_space.page_lock);
 	__delete_from_swap_cache(page);
-	write_unlock(&swapper_space.page_lock);
+	spin_unlock(&swapper_space.page_lock);
 
 	swap_free(entry);
 	page_cache_release(page);
@@ -204,8 +204,8 @@ int move_to_swap_cache(struct page *page
 	struct address_space *mapping = page->mapping;
 	int err;
 
-	write_lock(&swapper_space.page_lock);
-	write_lock(&mapping->page_lock);
+	spin_lock(&swapper_space.page_lock);
+	spin_lock(&mapping->page_lock);
 
 	err = radix_tree_insert(&swapper_space.page_tree, entry.val, page);
 	if (!err) {
@@ -213,8 +213,8 @@ int move_to_swap_cache(struct page *page
 		___add_to_page_cache(page, &swapper_space, entry.val);
 	}
 
-	write_unlock(&mapping->page_lock);
-	write_unlock(&swapper_space.page_lock);
+	spin_unlock(&mapping->page_lock);
+	spin_unlock(&swapper_space.page_lock);
 
 	if (!err) {
 		if (!swap_duplicate(entry))
@@ -240,8 +240,8 @@ int move_from_swap_cache(struct page *pa
 
 	entry.val = page->index;
 
-	write_lock(&swapper_space.page_lock);
-	write_lock(&mapping->page_lock);
+	spin_lock(&swapper_space.page_lock);
+	spin_lock(&mapping->page_lock);
 
 	err = radix_tree_insert(&mapping->page_tree, index, page);
 	if (!err) {
@@ -249,8 +249,8 @@ int move_from_swap_cache(struct page *pa
 		___add_to_page_cache(page, mapping, index);
 	}
 
-	write_unlock(&mapping->page_lock);
-	write_unlock(&swapper_space.page_lock);
+	spin_unlock(&mapping->page_lock);
+	spin_unlock(&swapper_space.page_lock);
 
 	if (!err) {
 		swap_free(entry);
diff -puN mm/truncate.c~page-lock-is-spin_lock mm/truncate.c
--- 25/mm/truncate.c~page-lock-is-spin_lock	2003-04-08 03:16:30.000000000 -0700
+++ 25-akpm/mm/truncate.c	2003-04-08 03:16:30.000000000 -0700
@@ -73,13 +73,13 @@ invalidate_complete_page(struct address_
 	if (PagePrivate(page) && !try_to_release_page(page, 0))
 		return 0;
 
-	write_lock(&mapping->page_lock);
+	spin_lock(&mapping->page_lock);
 	if (PageDirty(page)) {
-		write_unlock(&mapping->page_lock);
+		spin_unlock(&mapping->page_lock);
 		return 0;
 	}
 	__remove_from_page_cache(page);
-	write_unlock(&mapping->page_lock);
+	spin_unlock(&mapping->page_lock);
 	ClearPageUptodate(page);
 	page_cache_release(page);	/* pagecache ref */
 	return 1;
diff -puN mm/vmscan.c~page-lock-is-spin_lock mm/vmscan.c
--- 25/mm/vmscan.c~page-lock-is-spin_lock	2003-04-08 03:16:30.000000000 -0700
+++ 25-akpm/mm/vmscan.c	2003-04-08 03:16:30.000000000 -0700
@@ -325,7 +325,7 @@ shrink_list(struct list_head *page_list,
 				goto keep_locked;
 			if (!may_write_to_queue(mapping->backing_dev_info))
 				goto keep_locked;
-			write_lock(&mapping->page_lock);
+			spin_lock(&mapping->page_lock);
 			if (test_clear_page_dirty(page)) {
 				int res;
 				struct writeback_control wbc = {
@@ -336,7 +336,7 @@ shrink_list(struct list_head *page_list,
 				};
 
 				list_move(&page->list, &mapping->locked_pages);
-				write_unlock(&mapping->page_lock);
+				spin_unlock(&mapping->page_lock);
 
 				SetPageReclaim(page);
 				res = mapping->a_ops->writepage(page, &wbc);
@@ -351,7 +351,7 @@ shrink_list(struct list_head *page_list,
 				}
 				goto keep;
 			}
-			write_unlock(&mapping->page_lock);
+			spin_unlock(&mapping->page_lock);
 		}
 
 		/*
@@ -385,7 +385,7 @@ shrink_list(struct list_head *page_list,
 		if (!mapping)
 			goto keep_locked;	/* truncate got there first */
 
-		write_lock(&mapping->page_lock);
+		spin_lock(&mapping->page_lock);
 
 		/*
 		 * The non-racy check for busy page.  It is critical to check
@@ -393,7 +393,7 @@ shrink_list(struct list_head *page_list,
 		 * not in use by anybody. 	(pagecache + us == 2)
 		 */
 		if (page_count(page) != 2 || PageDirty(page)) {
-			write_unlock(&mapping->page_lock);
+			spin_unlock(&mapping->page_lock);
 			goto keep_locked;
 		}
 
@@ -401,7 +401,7 @@ shrink_list(struct list_head *page_list,
 		if (PageSwapCache(page)) {
 			swp_entry_t swap = { .val = page->index };
 			__delete_from_swap_cache(page);
-			write_unlock(&mapping->page_lock);
+			spin_unlock(&mapping->page_lock);
 			swap_free(swap);
 			__put_page(page);	/* The pagecache ref */
 			goto free_it;
@@ -409,7 +409,7 @@ shrink_list(struct list_head *page_list,
 #endif /* CONFIG_SWAP */
 
 		__remove_from_page_cache(page);
-		write_unlock(&mapping->page_lock);
+		spin_unlock(&mapping->page_lock);
 		__put_page(page);
 
 free_it:

_