diff -urpN -X /home/fletch/.diff.exclude 250-lkcd_fixups/fs/proc/proc_misc.c 260-percpu_loadavg/fs/proc/proc_misc.c
--- 250-lkcd_fixups/fs/proc/proc_misc.c	Sun Apr 20 21:11:58 2003
+++ 260-percpu_loadavg/fs/proc/proc_misc.c	Sun Apr 20 22:14:44 2003
@@ -134,6 +134,37 @@ static struct vmalloc_info get_vmalloc_i
 	return vmi;
 }
 
+static int real_loadavg_read_proc(char *page, char **start, off_t off,
+				 int count, int *eof, void *data)
+{
+	int a, b, c, cpu;
+	int len;
+
+	a = tasks_running[0] + (FIXED_1/200);
+	b = tasks_running[1] + (FIXED_1/200);
+	c = tasks_running[2] + (FIXED_1/200);
+	len = sprintf(page,"Domain    load1    load2    load3  nr_run/nr_thrd\n");
+	len += sprintf(page+len,"SYSTEM %5d.%02d %5d.%02d %5d.%02d %7ld/%7d\n",
+		LOAD_INT(a), LOAD_FRAC(a),
+		LOAD_INT(b), LOAD_FRAC(b),
+		LOAD_INT(c), LOAD_FRAC(c),
+		nr_running(), nr_threads);
+	for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+		if (!cpu_online(cpu))
+			continue;
+		a = cpu_tasks_running[0][cpu] + (FIXED_1/200);
+		b = cpu_tasks_running[1][cpu] + (FIXED_1/200);
+		c = cpu_tasks_running[2][cpu] + (FIXED_1/200);
+		len += sprintf(page+len, "%5d  %5d.%02d %5d.%02d %5d.%02d %7ld/7%d\n",
+			cpu,
+			LOAD_INT(a), LOAD_FRAC(a), 
+			LOAD_INT(b), LOAD_FRAC(b),
+			LOAD_INT(c), LOAD_FRAC(c),
+			nr_running_cpu(cpu), nr_threads);
+	}
+	return proc_calc_metrics(page, start, off, count, eof, len);
+}
+
 static int uptime_read_proc(char *page, char **start, off_t off,
 				 int count, int *eof, void *data)
 {
@@ -675,6 +706,7 @@ void __init proc_misc_init(void)
 		int (*read_proc)(char*,char**,off_t,int,int*,void*);
 	} *p, simple_ones[] = {
 		{"loadavg",     loadavg_read_proc},
+		{"real_loadavg",real_loadavg_read_proc},
 		{"uptime",	uptime_read_proc},
 		{"meminfo",	meminfo_read_proc},
 		{"version",	version_read_proc},
diff -urpN -X /home/fletch/.diff.exclude 250-lkcd_fixups/include/linux/sched.h 260-percpu_loadavg/include/linux/sched.h
--- 250-lkcd_fixups/include/linux/sched.h	Sun Apr 20 21:28:58 2003
+++ 260-percpu_loadavg/include/linux/sched.h	Sun Apr 20 22:14:44 2003
@@ -69,7 +69,11 @@ struct exec_domain;
  *    the EXP_n values would be 1981, 2034 and 2043 if still using only
  *    11 bit fractions.
  */
-extern unsigned long avenrun[];		/* Load averages */
+extern unsigned long avenrun[];				/* Load averages */
+extern unsigned long tasks_running[3]; 			/* Real load averages */
+extern unsigned long cpu_tasks_running[3][NR_CPUS];	/* Real load averages per cpu */
+
+extern unsigned long tasks_running[];	/* Real load averages */
 
 #define FSHIFT		11		/* nr of bits of precision */
 #define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
@@ -91,6 +95,7 @@ extern int last_pid;
 DECLARE_PER_CPU(unsigned long, process_counts);
 extern int nr_processes(void);
 extern unsigned long nr_running(void);
+extern unsigned long nr_running_cpu(int i);
 extern unsigned long nr_uninterruptible(void);
 extern unsigned long nr_iowait(void);
 
diff -urpN -X /home/fletch/.diff.exclude 250-lkcd_fixups/kernel/sched.c 260-percpu_loadavg/kernel/sched.c
--- 250-lkcd_fixups/kernel/sched.c	Sun Apr 20 22:07:31 2003
+++ 260-percpu_loadavg/kernel/sched.c	Sun Apr 20 22:14:44 2003
@@ -815,6 +815,11 @@ unsigned long nr_running(void)
 	return sum;
 }
 
+unsigned long nr_running_cpu(int cpu)
+{
+	return cpu_rq(cpu)->nr_running;
+}
+
 unsigned long nr_uninterruptible(void)
 {
 	unsigned long i, sum = 0;
diff -urpN -X /home/fletch/.diff.exclude 250-lkcd_fixups/kernel/timer.c 260-percpu_loadavg/kernel/timer.c
--- 250-lkcd_fixups/kernel/timer.c	Sun Apr 20 19:35:08 2003
+++ 260-percpu_loadavg/kernel/timer.c	Sun Apr 20 22:14:44 2003
@@ -737,6 +737,8 @@ static unsigned long count_active_tasks(
  * Requires xtime_lock to access.
  */
 unsigned long avenrun[3];
+unsigned long tasks_running[3];
+unsigned long cpu_tasks_running[3][NR_CPUS];
 
 /*
  * calc_load - given tick count, update the avenrun load estimates.
@@ -744,8 +746,9 @@ unsigned long avenrun[3];
  */
 static inline void calc_load(unsigned long ticks)
 {
-	unsigned long active_tasks; /* fixed-point */
+	unsigned long active_tasks, running_tasks; /* fixed-point */
 	static int count = LOAD_FREQ;
+	int cpu;
 
 	count -= ticks;
 	if (count < 0) {
@@ -754,6 +757,19 @@ static inline void calc_load(unsigned lo
 		CALC_LOAD(avenrun[0], EXP_1, active_tasks);
 		CALC_LOAD(avenrun[1], EXP_5, active_tasks);
 		CALC_LOAD(avenrun[2], EXP_15, active_tasks);
+		running_tasks = nr_running() * FIXED_1;
+		CALC_LOAD(tasks_running[0], EXP_1,  running_tasks);
+		CALC_LOAD(tasks_running[1], EXP_5,  running_tasks);
+		CALC_LOAD(tasks_running[2], EXP_15, running_tasks);
+		for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+			if (!cpu_online(cpu)) 
+				continue;
+			running_tasks = nr_running_cpu(cpu) * FIXED_1;
+			CALC_LOAD(cpu_tasks_running[0][cpu], EXP_1,  running_tasks);
+			CALC_LOAD(cpu_tasks_running[1][cpu], EXP_5,  running_tasks);
+			CALC_LOAD(cpu_tasks_running[2][cpu], EXP_15, running_tasks);
+		}
+			
 	}
 }