From: Nick Piggin <piggin@cyberone.com.au>

Con's smtnice patch added an extra field in struct runqueue.  Don't think
it is really needed.


---

 25-akpm/kernel/sched.c |   31 +++++++++++++++----------------
 1 files changed, 15 insertions(+), 16 deletions(-)

diff -puN kernel/sched.c~sched-no-cpu-in-rq kernel/sched.c
--- 25/kernel/sched.c~sched-no-cpu-in-rq	Fri Mar 12 14:11:31 2004
+++ 25-akpm/kernel/sched.c	Fri Mar 12 14:11:31 2004
@@ -216,7 +216,6 @@ struct runqueue {
 	struct mm_struct *prev_mm;
 	prio_array_t *active, *expired, arrays[2];
 	int best_expired_prio;
-	int cpu;
 	atomic_t nr_iowait;
 
 	/* For active balancing */
@@ -1913,10 +1912,10 @@ out:
 }
 
 #ifdef CONFIG_SCHED_SMT
-static inline void wake_sleeping_dependent(runqueue_t *rq)
+static inline void wake_sleeping_dependent(int cpu, runqueue_t *rq)
 {
-	int i, this_cpu = rq->cpu;
-	struct sched_domain *sd = cpu_sched_domain(this_cpu);
+	int i;
+	struct sched_domain *sd = cpu_sched_domain(cpu);
 	cpumask_t sibling_map;
 
 	if (!(sd->flags & SD_FLAG_SHARE_CPUPOWER)) {
@@ -1925,7 +1924,7 @@ static inline void wake_sleeping_depende
 	}
 
 	cpus_and(sibling_map, sd->span, cpu_online_map);
-	cpu_clear(this_cpu, sibling_map);
+	cpu_clear(cpu, sibling_map);
 	for_each_cpu_mask(i, sibling_map) {
 		runqueue_t *smt_rq;
 
@@ -1940,10 +1939,10 @@ static inline void wake_sleeping_depende
 	}
 }
 
-static inline int dependent_sleeper(runqueue_t *rq, task_t *p)
+static inline int dependent_sleeper(int cpu, runqueue_t *rq, task_t *p)
 {
-	int ret = 0, i, this_cpu = rq->cpu;
-	struct sched_domain *sd = cpu_sched_domain(this_cpu);
+	int ret = 0, i;
+	struct sched_domain *sd = cpu_sched_domain(cpu);
 	cpumask_t sibling_map;
 
 	if (!(sd->flags & SD_FLAG_SHARE_CPUPOWER)) {
@@ -1952,7 +1951,7 @@ static inline int dependent_sleeper(runq
 	}
 
 	cpus_and(sibling_map, sd->span, cpu_online_map);
-	cpu_clear(this_cpu, sibling_map);
+	cpu_clear(cpu, sibling_map);
 	for_each_cpu_mask(i, sibling_map) {
 		runqueue_t *smt_rq;
 		task_t *smt_curr;
@@ -1987,11 +1986,11 @@ static inline int dependent_sleeper(runq
 	return ret;
 }
 #else
-static inline void wake_sleeping_dependent(runqueue_t *rq)
+static inline void wake_sleeping_dependent(int cpu, runqueue_t *rq)
 {
 }
 
-static inline int dependent_sleeper(runqueue_t *rq, task_t *p)
+static inline int dependent_sleeper(int cpu, runqueue_t *rq, task_t *p)
 {
 	return 0;
 }
@@ -2011,7 +2010,7 @@ asmlinkage void schedule(void)
 	struct list_head *queue;
 	unsigned long long now;
 	unsigned long run_time;
-	int idx;
+	int cpu, idx;
 
 	/*
 	 * Test if we are atomic.  Since do_exit() needs to call into
@@ -2061,14 +2060,15 @@ need_resched:
 			deactivate_task(prev, rq);
 	}
 
+	cpu = smp_processor_id();
 	if (unlikely(!rq->nr_running)) {
 #ifdef CONFIG_SMP
-		idle_balance(smp_processor_id(), rq);
+		idle_balance(cpu, rq);
 #endif
 		if (!rq->nr_running) {
 			next = rq->idle;
 			rq->expired_timestamp = 0;
-			wake_sleeping_dependent(rq);
+			wake_sleeping_dependent(cpu, rq);
 			goto switch_tasks;
 		}
 	}
@@ -2089,7 +2089,7 @@ need_resched:
 	queue = array->queue + idx;
 	next = list_entry(queue->next, task_t, run_list);
 
-	if (dependent_sleeper(rq, next)) {
+	if (dependent_sleeper(cpu, rq, next)) {
 		next = rq->idle;
 		goto switch_tasks;
 	}
@@ -3522,7 +3522,6 @@ void __init sched_init(void)
 #endif
 
 		rq = cpu_rq(i);
-		rq->cpu = i;
 		rq->active = rq->arrays;
 		rq->expired = rq->arrays + 1;
 		rq->best_expired_prio = MAX_PRIO;

_