From: Nick Piggin <piggin@cyberone.com.au>

This patch imports one of Jens' hash improvements from DL, and reduces the
per queue hash memory usage from 8KB to 512B.



 drivers/block/as-iosched.c |   18 +++++++++++++++++-
 1 files changed, 17 insertions(+), 1 deletion(-)

diff -puN drivers/block/as-iosched.c~as-small-hashes drivers/block/as-iosched.c
--- 25/drivers/block/as-iosched.c~as-small-hashes	2003-05-08 00:31:23.000000000 -0700
+++ 25-akpm/drivers/block/as-iosched.c	2003-05-08 00:31:23.000000000 -0700
@@ -295,7 +295,7 @@ swap_as_io_context(struct as_io_context 
 /*
  * the back merge hash support functions
  */
-static const int as_hash_shift = 10;
+static const int as_hash_shift = 6;
 #define AS_HASH_BLOCK(sec)	((sec) >> 3)
 #define AS_HASH_FN(sec)		(hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
 #define AS_HASH_ENTRIES		(1 << as_hash_shift)
@@ -331,6 +331,20 @@ static void as_add_arq_hash(struct as_da
 	list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
 }
 
+/*
+ * move hot entry to front of chain
+ */
+static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
+{
+	struct request *rq = arq->request;
+	struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
+
+	if (ON_HASH(arq) && arq->hash.prev != head) {
+		list_del(&arq->hash);
+		list_add(&arq->hash, head);
+	}
+}
+
 static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
 {
 	struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
@@ -1388,6 +1402,8 @@ as_merge(request_queue_t *q, struct list
 out:
 	q->last_merge = &__rq->queuelist;
 out_insert:
+	if (ret)
+		as_hot_arq_hash(ad, RQ_DATA(__rq));
 	*insert = &__rq->queuelist;
 	return ret;
 }

_