diff -purN -X /home/mbligh/.diff.exclude 501-ppc64-pci/fs/proc/array.c 520-queuestat/fs/proc/array.c
--- 501-ppc64-pci/fs/proc/array.c	2003-06-05 14:38:39.000000000 -0700
+++ 520-queuestat/fs/proc/array.c	2003-07-28 19:01:30.000000000 -0700
@@ -336,7 +336,7 @@ int proc_pid_stat(struct task_struct *ta
 	read_unlock(&tasklist_lock);
 	res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \
 %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld %llu %lu %ld %lu %lu %lu %lu %lu \
-%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n",
+%lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu %lu %lu %lu\n",
 		task->pid,
 		task->comm,
 		state,
@@ -382,7 +382,10 @@ int proc_pid_stat(struct task_struct *ta
 		task->exit_signal,
 		task_cpu(task),
 		task->rt_priority,
-		task->policy);
+		task->policy,
+		jiffies_to_clock_t(task->sched_info.inter_arrival_time),
+		jiffies_to_clock_t(task->sched_info.service_time),
+		jiffies_to_clock_t(task->sched_info.response_time));
 	if(mm)
 		mmput(mm);
 	return res;
diff -purN -X /home/mbligh/.diff.exclude 501-ppc64-pci/fs/proc/proc_misc.c 520-queuestat/fs/proc/proc_misc.c
--- 501-ppc64-pci/fs/proc/proc_misc.c	2003-07-28 18:58:37.000000000 -0700
+++ 520-queuestat/fs/proc/proc_misc.c	2003-07-28 19:01:30.000000000 -0700
@@ -464,14 +464,20 @@ static int kstat_read_proc(char *page, c
 		jiffies_to_clock_t(idle),
 		jiffies_to_clock_t(iowait));
 	for (i = 0 ; i < NR_CPUS; i++){
-		if (!cpu_online(i)) continue;
-		len += sprintf(page + len, "cpu%d %u %u %u %u %u\n",
+		struct sched_info info;
+		if (!cpu_online(i))
+			continue;
+		cpu_sched_info(&info, i);
+		len += sprintf(page + len, "cpu%d %u %u %u %u %u %u %u %u\n",
 			i,
 			jiffies_to_clock_t(kstat_cpu(i).cpustat.user),
 			jiffies_to_clock_t(kstat_cpu(i).cpustat.nice),
 			jiffies_to_clock_t(kstat_cpu(i).cpustat.system),
 			jiffies_to_clock_t(kstat_cpu(i).cpustat.idle),
-			jiffies_to_clock_t(kstat_cpu(i).cpustat.iowait));
+			jiffies_to_clock_t(kstat_cpu(i).cpustat.iowait),
+			(uint) jiffies_to_clock_t(info.inter_arrival_time),
+			(uint) jiffies_to_clock_t(info.service_time),
+			(uint) jiffies_to_clock_t(info.response_time));
 	}
 	len += sprintf(page + len, "intr %u", sum);
 
diff -purN -X /home/mbligh/.diff.exclude 501-ppc64-pci/include/linux/sched.h 520-queuestat/include/linux/sched.h
--- 501-ppc64-pci/include/linux/sched.h	2003-07-28 18:58:44.000000000 -0700
+++ 520-queuestat/include/linux/sched.h	2003-07-28 19:01:30.000000000 -0700
@@ -95,6 +95,9 @@ extern unsigned long nr_running(void);
 extern unsigned long nr_uninterruptible(void);
 extern unsigned long nr_iowait(void);
 
+struct sched_info;
+extern void cpu_sched_info(struct sched_info *, int);
+
 #include <linux/time.h>
 #include <linux/param.h>
 #include <linux/resource.h>
@@ -327,6 +330,13 @@ struct k_itimer {
 	struct sigqueue *sigq;		/* signal queue entry. */
 };
 
+struct sched_info {
+	/* running averages */
+	unsigned long response_time, inter_arrival_time, service_time;
+
+	/* timestamps */
+	unsigned long last_arrival, began_service;
+};
 
 struct io_context;			/* See blkdev.h */
 void exit_io_context(void);
@@ -351,6 +361,8 @@ struct task_struct {
 	unsigned long cpus_allowed;
 	unsigned int time_slice, first_time_slice;
 
+	struct sched_info sched_info;
+
 	struct list_head tasks;
 	struct list_head ptrace_children;
 	struct list_head ptrace_list;
diff -purN -X /home/mbligh/.diff.exclude 501-ppc64-pci/kernel/sched.c 520-queuestat/kernel/sched.c
--- 501-ppc64-pci/kernel/sched.c	2003-07-28 18:58:52.000000000 -0700
+++ 520-queuestat/kernel/sched.c	2003-07-28 19:01:30.000000000 -0700
@@ -59,6 +59,11 @@
 #define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
 #define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
 
+/* the FIXED_1 gunk is so running averages don't vanish prematurely */
+#define RAVG_WEIGHT		128
+#define RAVG_FACTOR		(RAVG_WEIGHT*FIXED_1)
+#define RUNNING_AVG(x,y)	(((RAVG_WEIGHT-1)*(x)+RAVG_FACTOR*(y))/RAVG_WEIGHT)
+
 /*
  * These are the 'tuning knobs' of the scheduler:
  *
@@ -184,6 +189,8 @@ struct runqueue {
 	struct list_head migration_queue;
 
 	atomic_t nr_iowait;
+
+	struct sched_info info;
 };
 
 static DEFINE_PER_CPU(struct runqueue, runqueues);
@@ -273,6 +280,74 @@ static inline void task_rq_unlock(runque
 	spin_unlock_irqrestore(&rq->lock, *flags);
 }
 
+static inline void sched_info_arrive(task_t *t)
+{
+	unsigned long now  = jiffies;
+	unsigned long diff = now - t->sched_info.last_arrival;
+	struct runqueue *rq = task_rq(t);
+
+	t->sched_info.inter_arrival_time =
+		RUNNING_AVG(t->sched_info.inter_arrival_time, diff);
+	t->sched_info.last_arrival = now;
+
+	if (!rq)
+		return;
+	diff = now - rq->info.last_arrival;
+	rq->info.inter_arrival_time =
+		RUNNING_AVG(rq->info.inter_arrival_time, diff);
+	rq->info.last_arrival = now;
+}
+
+/* is this ever used? */
+static inline void sched_info_depart(task_t *t)
+{
+	struct runqueue *rq = task_rq(t);
+	unsigned long diff, now = jiffies;
+
+	diff = now - t->sched_info.began_service;
+	t->sched_info.service_time =
+		RUNNING_AVG(t->sched_info.service_time, diff);
+
+	if (!rq)
+		return;
+	diff = now - rq->info.began_service;
+	rq->info.service_time =
+		RUNNING_AVG(rq->info.service_time, diff);
+}
+
+static inline void sched_info_switch(task_t *prev, task_t *next)
+{
+	struct runqueue *rq = task_rq(prev);
+	unsigned long diff, now = jiffies;
+
+	/* prev now departs the cpu */
+	sched_info_depart(prev);
+
+	/* only for involuntary context switches */
+	if (prev->state == TASK_RUNNING)
+		sched_info_arrive(prev);
+
+	diff = now - next->sched_info.last_arrival;
+	next->sched_info.response_time =
+		RUNNING_AVG(next->sched_info.response_time, diff);
+	next->sched_info.began_service = now;
+
+	if (!rq)
+		return;
+	/* yes, reusing next's service time is valid */
+	rq->info.response_time =
+		RUNNING_AVG(rq->info.response_time, diff);
+	rq->info.began_service = now;
+
+	if (prev->state != TASK_RUNNING)
+		return;
+	/* if prev arrived subtract rq's last arrival from its arrival */
+	diff = now - rq->info.last_arrival;
+	rq->info.inter_arrival_time =
+		RUNNING_AVG(rq->info.inter_arrival_time, diff);
+	rq->info.last_arrival = now;
+}
+
 /*
  * rq_lock - lock a given runqueue and disable interrupts.
  */
@@ -505,15 +580,18 @@ repeat_lock_task:
 				(p->cpus_allowed & (1UL << smp_processor_id())))) {
 
 				set_task_cpu(p, smp_processor_id());
+				sched_info_arrive(p);
 				task_rq_unlock(rq, &flags);
 				goto repeat_lock_task;
 			}
 			if (old_state == TASK_UNINTERRUPTIBLE)
 				rq->nr_uninterruptible--;
-			if (sync)
+			if (sync) {
+				sched_info_arrive(p);
 				__activate_task(p, rq);
-			else {
+			} else {
 				activate_task(p, rq);
+				sched_info_arrive(p);
 				if (p->prio < rq->curr->prio)
 					resched_task(rq->curr);
 			}
@@ -567,6 +645,7 @@ void wake_up_forked_process(task_t * p)
 	p->sleep_avg = p->sleep_avg * CHILD_PENALTY / 100;
 	p->prio = effective_prio(p);
 	set_task_cpu(p, smp_processor_id());
+	sched_info_arrive(p);
 
 	if (unlikely(!current->array))
 		__activate_task(p, rq);
@@ -728,6 +807,11 @@ unsigned long nr_iowait(void)
 	return sum;
 }
 
+void cpu_sched_info(struct sched_info *info, int cpu)
+{
+	memcpy(info, &cpu_rq(cpu)->info, sizeof(struct sched_info));
+}
+
 /*
  * double_rq_lock - safely lock two runqueues
  *
@@ -1417,6 +1501,7 @@ switch_tasks:
 
 	if (likely(prev != next)) {
 		rq->nr_switches++;
+		sched_info_switch(prev, next);
 		rq->curr = next;
 
 		prepare_arch_switch(rq, next);