This filesystem-wide sleeping lock is no longer needed.  Remove it.


 25-akpm/fs/jbd/checkpoint.c  |    5 --
 25-akpm/fs/jbd/commit.c      |   30 +----------------
 25-akpm/fs/jbd/journal.c     |   26 --------------
 25-akpm/fs/jbd/transaction.c |   75 ++++++++++---------------------------------
 25-akpm/include/linux/jbd.h  |   30 +----------------
 5 files changed, 26 insertions(+), 140 deletions(-)

diff -puN fs/jbd/checkpoint.c~jbd-400-remove-lock_journal fs/jbd/checkpoint.c
--- 25/fs/jbd/checkpoint.c~jbd-400-remove-lock_journal	Thu Jun  5 15:14:31 2003
+++ 25-akpm/fs/jbd/checkpoint.c	Thu Jun  5 15:14:31 2003
@@ -83,10 +83,8 @@ void __log_wait_for_space(journal_t *jou
 	while (__log_space_left(journal) < nblocks) {
 		if (journal->j_flags & JFS_ABORT)
 			return;
-		unlock_journal(journal);
 		spin_unlock(&journal->j_state_lock);
 		down(&journal->j_checkpoint_sem);
-		lock_journal(journal);
 		
 		/*
 		 * Test again, another process may have checkpointed while we
@@ -148,7 +146,6 @@ static int __cleanup_transaction(journal
 		if (buffer_locked(bh)) {
 			atomic_inc(&bh->b_count);
 			spin_unlock(&journal->j_list_lock);
-			unlock_journal(journal);
 			wait_on_buffer(bh);
 			/* the journal_head may have gone by now */
 			BUFFER_TRACE(bh, "brelse");
@@ -171,7 +168,6 @@ static int __cleanup_transaction(journal
 			spin_unlock(&journal->j_list_lock);
 			jbd_unlock_bh_state(bh);
 			log_start_commit(journal, transaction);
-			unlock_journal(journal);
 			log_wait_commit(journal, tid);
 			goto out_return_1;
 		}
@@ -201,7 +197,6 @@ static int __cleanup_transaction(journal
 
 	return ret;
 out_return_1:
-	lock_journal(journal);
 	spin_lock(&journal->j_list_lock);
 	return 1;
 }
diff -puN fs/jbd/commit.c~jbd-400-remove-lock_journal fs/jbd/commit.c
--- 25/fs/jbd/commit.c~jbd-400-remove-lock_journal	Thu Jun  5 15:14:31 2003
+++ 25-akpm/fs/jbd/commit.c	Thu Jun  5 15:14:31 2003
@@ -62,8 +62,6 @@ void journal_commit_transaction(journal_
 	 * all outstanding updates to complete.
 	 */
 
-	lock_journal(journal);
-
 #ifdef COMMIT_STATS
 	spin_lock(&journal->j_list_lock);
 	summarise_journal_usage(journal);
@@ -89,9 +87,7 @@ void journal_commit_transaction(journal_
 					TASK_UNINTERRUPTIBLE);
 		if (commit_transaction->t_updates) {
 			spin_unlock(&commit_transaction->t_handle_lock);
-			unlock_journal(journal);
 			schedule();
-			lock_journal(journal);
 			spin_lock(&commit_transaction->t_handle_lock);
 		}
 		finish_wait(&journal->j_wait_updates, &wait);
@@ -240,12 +236,10 @@ write_out_data_locked:
 	if (bufs || need_resched()) {
 		jbd_debug(2, "submit %d writes\n", bufs);
 		spin_unlock(&journal->j_list_lock);
-		unlock_journal(journal);
 		if (bufs)
 			ll_rw_block(WRITE, bufs, wbuf);
 		cond_resched();
 		journal_brelse_array(wbuf, bufs);
-		lock_journal(journal);
 		spin_lock(&journal->j_list_lock);
 		if (bufs)
 			goto write_out_data_locked;
@@ -265,13 +259,11 @@ write_out_data_locked:
 		if (buffer_locked(bh)) {
 			get_bh(bh);
 			spin_unlock(&journal->j_list_lock);
-			unlock_journal(journal);
 			wait_on_buffer(bh);
 			if (unlikely(!buffer_uptodate(bh)))
 				err = -EIO;
 			put_bh(bh);
 			/* the journal_head may have been removed now */
-			lock_journal(journal);
 			goto write_out_data;
 		} else if (buffer_dirty(bh)) {
 			goto write_out_data_locked;
@@ -434,8 +426,7 @@ sync_datalist_empty:
 			tag->t_flags |= htonl(JFS_FLAG_LAST_TAG);
 
 start_journal_io:
-			unlock_journal(journal);
-			for (i=0; i<bufs; i++) {
+			for (i = 0; i < bufs; i++) {
 				struct buffer_head *bh = wbuf[i];
 				set_buffer_locked(bh);
 				clear_buffer_dirty(bh);
@@ -444,7 +435,6 @@ start_journal_io:
 				submit_bh(WRITE, bh);
 			}
 			cond_resched();
-			lock_journal(journal);
 
 			/* Force a new descriptor to be generated next
                            time round the loop. */
@@ -477,11 +467,9 @@ wait_for_iobuf:
 		jh = commit_transaction->t_iobuf_list->b_tprev;
 		bh = jh2bh(jh);
 		if (buffer_locked(bh)) {
-			unlock_journal(journal);
 			wait_on_buffer(bh);
 			if (unlikely(!buffer_uptodate(bh)))
 				err = -EIO;
-			lock_journal(journal);
 			goto wait_for_iobuf;
 		}
 
@@ -539,11 +527,9 @@ wait_for_iobuf:
 		jh = commit_transaction->t_log_list->b_tprev;
 		bh = jh2bh(jh);
 		if (buffer_locked(bh)) {
-			unlock_journal(journal);
 			wait_on_buffer(bh);
 			if (unlikely(!buffer_uptodate(bh)))
 				err = -EIO;
-			lock_journal(journal);
 			goto wait_for_ctlbuf;
 		}
 
@@ -558,10 +544,8 @@ wait_for_iobuf:
 
 	jbd_debug(3, "JBD: commit phase 6\n");
 
-	if (is_journal_aborted(journal)) {
-		unlock_journal(journal);
+	if (is_journal_aborted(journal))
 		goto skip_commit;
-	}
 
 	/* Done it all: now write the commit record.  We should have
 	 * cleaned up our previous buffers by now, so if we are in abort
@@ -571,7 +555,6 @@ wait_for_iobuf:
 	descriptor = journal_get_descriptor_buffer(journal);
 	if (!descriptor) {
 		__journal_abort_hard(journal);
-		unlock_journal(journal);
 		goto skip_commit;
 	}
 
@@ -584,7 +567,6 @@ wait_for_iobuf:
 		tmp->h_sequence = htonl(commit_transaction->t_tid);
 	}
 
-	unlock_journal(journal);
 	JBUFFER_TRACE(descriptor, "write commit block");
 	{
 		struct buffer_head *bh = jh2bh(descriptor);
@@ -603,11 +585,8 @@ wait_for_iobuf:
 
 skip_commit: /* The journal should be unlocked by now. */
 
-	if (err) {
-		lock_journal(journal);
+	if (err)
 		__journal_abort_hard(journal);
-		unlock_journal(journal);
-	}
 	
 	/*
 	 * Call any callbacks that had been registered for handles in this
@@ -633,8 +612,6 @@ skip_commit: /* The journal should be un
 	}
 	spin_unlock(&commit_transaction->t_jcb_lock);
 
-	lock_journal(journal);
-
 	jbd_debug(3, "JBD: commit phase 7\n");
 
 	J_ASSERT(commit_transaction->t_sync_datalist == NULL);
@@ -758,6 +735,5 @@ skip_commit: /* The journal should be un
 	jbd_debug(1, "JBD: commit %d complete, head %d\n",
 		  journal->j_commit_sequence, journal->j_tail_sequence);
 
-	unlock_journal(journal);
 	wake_up(&journal->j_wait_done_commit);
 }
diff -puN fs/jbd/journal.c~jbd-400-remove-lock_journal fs/jbd/journal.c
--- 25/fs/jbd/journal.c~jbd-400-remove-lock_journal	Thu Jun  5 15:14:31 2003
+++ 25-akpm/fs/jbd/journal.c	Thu Jun  5 15:14:31 2003
@@ -499,7 +499,6 @@ int log_wait_commit(journal_t *journal, 
 	int err = 0;
 
 #ifdef CONFIG_JBD_DEBUG
-	lock_journal(journal);
 	spin_lock(&journal->j_state_lock);
 	if (!tid_geq(journal->j_commit_request, tid)) {
 		printk(KERN_EMERG
@@ -507,7 +506,6 @@ int log_wait_commit(journal_t *journal, 
 		       __FUNCTION__, journal->j_commit_request, tid);
 	}
 	spin_unlock(&journal->j_state_lock);
-	unlock_journal(journal);
 #endif
 	spin_lock(&journal->j_state_lock);
 	while (tid_gt(tid, journal->j_commit_sequence)) {
@@ -632,7 +630,6 @@ static journal_t * journal_init_common (
 	init_waitqueue_head(&journal->j_wait_updates);
 	init_MUTEX(&journal->j_barrier);
 	init_MUTEX(&journal->j_checkpoint_sem);
-	init_MUTEX(&journal->j_sem);
 	spin_lock_init(&journal->j_revoke_lock);
 	spin_lock_init(&journal->j_list_lock);
 	spin_lock_init(&journal->j_state_lock);
@@ -787,11 +784,7 @@ static int journal_reset(journal_t *jour
 
 	/* Add the dynamic fields and write it to disk. */
 	journal_update_superblock(journal, 1);
-
-	lock_journal(journal);
 	journal_start_thread(journal);
-	unlock_journal(journal);
-
 	return 0;
 }
 
@@ -1062,7 +1055,6 @@ void journal_destroy(journal_t *journal)
 		journal_commit_transaction(journal);
 
 	/* Force any old transactions to disk */
-	lock_journal(journal);
 
 	/* Totally anal locking here... */
 	spin_lock(&journal->j_list_lock);
@@ -1089,8 +1081,6 @@ void journal_destroy(journal_t *journal)
 		iput(journal->j_inode);
 	if (journal->j_revoke)
 		journal_destroy_revoke(journal);
-
-	unlock_journal(journal);
 	kfree(journal);
 }
 
@@ -1275,7 +1265,6 @@ int journal_flush(journal_t *journal)
 	}
 
 	/* ...and flush everything in the log out to disk. */
-	lock_journal(journal);
 	spin_lock(&journal->j_list_lock);
 	while (!err && journal->j_checkpoint_transactions != NULL) {
 		spin_unlock(&journal->j_list_lock);
@@ -1304,8 +1293,6 @@ int journal_flush(journal_t *journal)
 	J_ASSERT(journal->j_head == journal->j_tail);
 	J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
 	spin_unlock(&journal->j_state_lock);
-	unlock_journal(journal);
-	
 	return err;
 }
 
@@ -1460,11 +1447,9 @@ void __journal_abort_soft (journal_t *jo
  * 
  */
 
-void journal_abort (journal_t *journal, int errno)
+void journal_abort(journal_t *journal, int errno)
 {
-	lock_journal(journal);
 	__journal_abort_soft(journal, errno);
-	unlock_journal(journal);
 }
 
 /** 
@@ -1482,19 +1467,15 @@ int journal_errno(journal_t *journal)
 {
 	int err;
 
-	lock_journal(journal);
 	spin_lock(&journal->j_state_lock);
 	if (journal->j_flags & JFS_ABORT)
 		err = -EROFS;
 	else
 		err = journal->j_errno;
 	spin_unlock(&journal->j_state_lock);
-	unlock_journal(journal);
 	return err;
 }
 
-
-
 /** 
  * int journal_clear_err () - clears the journal's error state
  *
@@ -1505,18 +1486,15 @@ int journal_clear_err(journal_t *journal
 {
 	int err = 0;
 
-	lock_journal(journal);
 	spin_lock(&journal->j_state_lock);
 	if (journal->j_flags & JFS_ABORT)
 		err = -EROFS;
 	else
 		journal->j_errno = 0;
 	spin_unlock(&journal->j_state_lock);
-	unlock_journal(journal);
 	return err;
 }
 
-
 /** 
  * void journal_ack_err() - Ack journal err.
  *
@@ -1525,12 +1503,10 @@ int journal_clear_err(journal_t *journal
  */
 void journal_ack_err(journal_t *journal)
 {
-	lock_journal(journal);
 	spin_lock(&journal->j_state_lock);
 	if (journal->j_errno)
 		journal->j_flags |= JFS_ACK_ERR;
 	spin_unlock(&journal->j_state_lock);
-	unlock_journal(journal);
 }
 
 int journal_blocks_per_page(struct inode *inode)
diff -puN fs/jbd/transaction.c~jbd-400-remove-lock_journal fs/jbd/transaction.c
--- 25/fs/jbd/transaction.c~jbd-400-remove-lock_journal	Thu Jun  5 15:14:31 2003
+++ 25-akpm/fs/jbd/transaction.c	Thu Jun  5 15:14:31 2003
@@ -87,20 +87,24 @@ static int start_this_handle(journal_t *
 	int needed;
 	int nblocks = handle->h_buffer_credits;
 	transaction_t *new_transaction = NULL;
+	int ret;
 
 	if (nblocks > journal->j_max_transaction_buffers) {
 		printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
 		       current->comm, nblocks,
 		       journal->j_max_transaction_buffers);
-		return -ENOSPC;
+		ret = -ENOSPC;
+		goto out;
 	}
 
 alloc_transaction:
 	if (!journal->j_running_transaction) {
 		new_transaction = jbd_kmalloc(sizeof(*new_transaction),
 						GFP_NOFS);
-		if (!new_transaction)
-			return -ENOMEM;
+		if (!new_transaction) {
+			ret = -ENOMEM;
+			goto out;
+		}
 		memset(new_transaction, 0, sizeof(*new_transaction));
 	}
 
@@ -108,8 +112,6 @@ alloc_transaction:
 
 repeat:
 
-	lock_journal(journal);
-
 	/*
 	 * We need to hold j_state_lock until t_updates has been incremented,
 	 * for proper journal barrier handling
@@ -118,32 +120,27 @@ repeat:
 	if (is_journal_aborted(journal) ||
 	    (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
 		spin_unlock(&journal->j_state_lock);
-		unlock_journal(journal);
-		return -EROFS; 
+		ret = -EROFS; 
+		goto out;
 	}
 
 	/* Wait on the journal's transaction barrier if necessary */
 	if (journal->j_barrier_count) {
 		spin_unlock(&journal->j_state_lock);
-		unlock_journal(journal);
 		wait_event(journal->j_wait_transaction_locked,
 				journal->j_barrier_count == 0);
 		goto repeat;
 	}
 	
-repeat_locked:
 	if (!journal->j_running_transaction) {
 		if (!new_transaction) {
 			spin_unlock(&journal->j_state_lock);
-			unlock_journal(journal);
 			goto alloc_transaction;
 		}
 		get_transaction(journal, new_transaction);
+		new_transaction = NULL;
 	}
 
-	/* @@@ Error? */
-	J_ASSERT(journal->j_running_transaction);
-	
 	transaction = journal->j_running_transaction;
 
 	/*
@@ -152,7 +149,6 @@ repeat_locked:
 	 */
 	if (transaction->t_state == T_LOCKED) {
 		spin_unlock(&journal->j_state_lock);
-		unlock_journal(journal);
 		jbd_debug(3, "Handle %p stalling...\n", handle);
 		wait_event(journal->j_wait_transaction_locked,
 				transaction->t_state != T_LOCKED);
@@ -181,12 +177,9 @@ repeat_locked:
 		prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
 				TASK_UNINTERRUPTIBLE);
 		log_start_commit(journal, transaction);
-		unlock_journal(journal);
 		schedule();
 		finish_wait(&journal->j_wait_transaction_locked, &wait);
-		lock_journal(journal);
-		spin_lock(&journal->j_state_lock);
-		goto repeat_locked;
+		goto repeat;
 	}
 
 	/* 
@@ -223,7 +216,8 @@ repeat_locked:
 		jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
 		spin_unlock(&transaction->t_handle_lock);
 		__log_wait_for_space(journal, needed);
-		goto repeat_locked;
+		spin_unlock(&journal->j_state_lock);
+		goto repeat;
 	}
 
 	/* OK, account for the buffers that this operation expects to
@@ -238,7 +232,9 @@ repeat_locked:
 		  __log_space_left(journal));
 	spin_unlock(&transaction->t_handle_lock);
 	spin_unlock(&journal->j_state_lock);
-	unlock_journal(journal);
+out:
+	if (new_transaction)
+		kfree(new_transaction);
 	return 0;
 }
 
@@ -326,8 +322,6 @@ int journal_extend(handle_t *handle, int
 	int result;
 	int wanted;
 
-	lock_journal(journal);
-
 	result = -EIO;
 	if (is_handle_aborted(handle))
 		goto error_out;
@@ -367,7 +361,6 @@ unlock:
 	spin_unlock(&transaction->t_handle_lock);
 error_out:
 	spin_unlock(&journal->j_state_lock);
-	unlock_journal(journal);
 	return result;
 }
 
@@ -436,8 +429,6 @@ void journal_lock_updates(journal_t *jou
 {
 	DEFINE_WAIT(wait);
 
-	lock_journal(journal);
-
 	spin_lock(&journal->j_state_lock);
 	++journal->j_barrier_count;
 
@@ -457,14 +448,11 @@ void journal_lock_updates(journal_t *jou
 				TASK_UNINTERRUPTIBLE);
 		spin_unlock(&transaction->t_handle_lock);
 		spin_unlock(&journal->j_state_lock);
-		unlock_journal(journal);
 		schedule();
 		finish_wait(&journal->j_wait_updates, &wait);
-		lock_journal(journal);
 		spin_lock(&journal->j_state_lock);
 	}
 	spin_unlock(&journal->j_state_lock);
-	unlock_journal(journal);
 
 	/*
 	 * We have now established a barrier against other normal updates, but
@@ -485,8 +473,6 @@ void journal_lock_updates(journal_t *jou
  */
 void journal_unlock_updates (journal_t *journal)
 {
-	lock_journal(journal);
-
 	J_ASSERT(journal->j_barrier_count != 0);
 
 	up(&journal->j_barrier);
@@ -494,7 +480,6 @@ void journal_unlock_updates (journal_t *
 	--journal->j_barrier_count;
 	spin_unlock(&journal->j_state_lock);
 	wake_up(&journal->j_wait_transaction_locked);
-	unlock_journal(journal);
 }
 
 /*
@@ -645,11 +630,9 @@ repeat:
 			JBUFFER_TRACE(jh, "on shadow: sleep");
 			spin_unlock(&journal->j_list_lock);
 			jbd_unlock_bh_state(bh);
-			unlock_journal(journal);
 			/* commit wakes up all shadow buffers after IO */
 			wqh = bh_waitq_head(jh2bh(jh));
 			wait_event(*wqh, (jh->b_jlist != BJ_Shadow));
-			lock_journal(journal);
 			goto repeat;
 		}
 			
@@ -752,18 +735,14 @@ out_unlocked:
 
 int journal_get_write_access (handle_t *handle, struct buffer_head *bh) 
 {
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
 	struct journal_head *jh = journal_add_journal_head(bh);
 	int rc;
 
 	/* We do not want to get caught playing with fields which the
 	 * log thread also manipulates.  Make sure that the buffer
 	 * completes any outstanding IO before proceeding. */
-	lock_journal(journal);
 	rc = do_get_write_access(handle, jh, 0);
 	journal_put_journal_head(jh);
-	unlock_journal(journal);
 	return rc;
 }
 
@@ -795,7 +774,6 @@ int journal_get_create_access(handle_t *
 	int err;
 	
 	jbd_debug(5, "journal_head %p\n", jh);
-	lock_journal(journal);
 	err = -EROFS;
 	if (is_handle_aborted(handle))
 		goto out;
@@ -844,7 +822,6 @@ int journal_get_create_access(handle_t *
 	journal_cancel_revoke(handle, jh);
 	journal_put_journal_head(jh);
 out:
-	unlock_journal(journal);
 	return err;
 }
 
@@ -876,13 +853,11 @@ out:
  */
 int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
 {
-	journal_t *journal = handle->h_transaction->t_journal;
 	int err;
 	struct journal_head *jh = journal_add_journal_head(bh);
 	char *committed_data = NULL;
 
 	JBUFFER_TRACE(jh, "entry");
-	lock_journal(journal);
 
 	/* Do this first --- it can drop the journal lock, so we want to
 	 * make sure that obtaining the committed_data is done
@@ -919,7 +894,6 @@ repeat:
 	jbd_unlock_bh_state(bh);
 out:
 	journal_put_journal_head(jh);
-	unlock_journal(journal);
 	if (committed_data)
 		kfree(committed_data);
 	return err;
@@ -938,8 +912,7 @@ out:
  * Returns error number or 0 on success.
  *
  * journal_dirty_data() can be called via page_launder->ext3_writepage
- * by kswapd.  So it cannot block.  Happily, there's nothing here
- * which needs lock_journal if `async' is set.
+ * by kswapd.
  */
 int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
 {
@@ -1113,9 +1086,8 @@ int journal_dirty_metadata(handle_t *han
 
 	jbd_debug(5, "journal_head %p\n", jh);
 	JBUFFER_TRACE(jh, "entry");
-	lock_journal(journal);
 	if (is_handle_aborted(handle))
-		goto out_unlock;
+		goto out;
 	
 	jbd_lock_bh_state(bh);
 	spin_lock(&journal->j_list_lock);
@@ -1151,9 +1123,8 @@ int journal_dirty_metadata(handle_t *han
 done_locked:
 	spin_unlock(&journal->j_list_lock);
 	jbd_unlock_bh_state(bh);
+out:
 	JBUFFER_TRACE(jh, "exit");
-out_unlock:
-	unlock_journal(journal);
 	return 0;
 }
 
@@ -1171,7 +1142,6 @@ void journal_release_buffer(handle_t *ha
 	journal_t *journal = transaction->t_journal;
 	struct journal_head *jh = bh2jh(bh);
 
-	lock_journal(journal);
 	JBUFFER_TRACE(jh, "entry");
 
 	/* If the buffer is reserved but not modified by this
@@ -1190,7 +1160,6 @@ void journal_release_buffer(handle_t *ha
 	jbd_unlock_bh_state(bh);
 
 	JBUFFER_TRACE(jh, "exit");
-	unlock_journal(journal);
 }
 
 /** 
@@ -1218,7 +1187,6 @@ void journal_forget(handle_t *handle, st
 
 	BUFFER_TRACE(bh, "entry");
 
-	lock_journal(journal);
 	jbd_lock_bh_state(bh);
 	spin_lock(&journal->j_list_lock);
 
@@ -1261,7 +1229,6 @@ void journal_forget(handle_t *handle, st
 			if (!buffer_jbd(bh)) {
 				spin_unlock(&journal->j_list_lock);
 				jbd_unlock_bh_state(bh);
-				unlock_journal(journal);
 				__bforget(bh);
 				return;
 			}
@@ -1285,7 +1252,6 @@ void journal_forget(handle_t *handle, st
 not_jbd:
 	spin_unlock(&journal->j_list_lock);
 	jbd_unlock_bh_state(bh);
-	unlock_journal(journal);
 	__brelse(bh);
 	return;
 }
@@ -1896,7 +1862,6 @@ int journal_invalidatepage(journal_t *jo
 	/* We will potentially be playing with lists other than just the
 	 * data lists (especially for journaled data mode), so be
 	 * cautious in our locking. */
-	lock_journal(journal);
 
 	head = bh = page_buffers(page);
 	do {
@@ -1915,8 +1880,6 @@ int journal_invalidatepage(journal_t *jo
 
 	} while (bh != head);
 
-	unlock_journal(journal);
-
 	if (!offset) {
 		if (!may_free || !try_to_free_buffers(page))
 			return 0;
diff -puN include/linux/jbd.h~jbd-400-remove-lock_journal include/linux/jbd.h
--- 25/include/linux/jbd.h~jbd-400-remove-lock_journal	Thu Jun  5 15:14:31 2003
+++ 25-akpm/include/linux/jbd.h	Thu Jun  5 15:14:31 2003
@@ -593,7 +593,6 @@ struct transaction_s 
  * @j_wait_commit: Wait queue to trigger commit
  * @j_wait_updates: Wait queue to wait for updates to complete
  * @j_checkpoint_sem: Semaphore for locking against concurrent checkpoints
- * @j_sem: The main journal lock, used by lock_journal() 
  * @j_head: Journal head - identifies the first unused block in the journal
  * @j_tail: Journal tail - identifies the oldest still-used block in the
  *  journal.
@@ -700,9 +699,6 @@ struct journal_s
 	/* Semaphore for locking against concurrent checkpoints */
 	struct semaphore 	j_checkpoint_sem;
 
-	/* The main journal lock, used by lock_journal() */
-	struct semaphore	j_sem;
-
 	/*
 	 * Journal head: identifies the first unused block in the journal.
 	 * [j_state_lock]
@@ -867,34 +863,14 @@ extern void		__wait_on_journal (journal_
 /*
  * Journal locking.
  *
- * We need to lock the journal during transaction state changes so that
- * nobody ever tries to take a handle on the running transaction while
- * we are in the middle of moving it to the commit phase.  
+ * We need to lock the journal during transaction state changes so that nobody
+ * ever tries to take a handle on the running transaction while we are in the
+ * middle of moving it to the commit phase.  j_state_lock does this.
  *
  * Note that the locking is completely interrupt unsafe.  We never touch
  * journal structures from interrupts.
- *
- * In 2.2, the BKL was required for lock_journal.  This is no longer
- * the case.
  */
 
-static inline void lock_journal(journal_t *journal)
-{
-	down(&journal->j_sem);
-}
-
-/* This returns zero if we acquired the semaphore */
-static inline int try_lock_journal(journal_t * journal)
-{
-	return down_trylock(&journal->j_sem);
-}
-
-static inline void unlock_journal(journal_t * journal)
-{
-	up(&journal->j_sem);
-}
-
-
 static inline handle_t *journal_current_handle(void)
 {
 	return current->journal_info;

_