From: David Mosberger <davidm@napali.hpl.hp.com>

Care needs to be taken when taking the address of a CPU-local
variable, because otherwise things may break when comparing addresses
on a platform which uses virtual remapping to implement such
variables.  In particular, it's almost always unsafe to use the
address of a per-CPU variable which contains a "struct list", because
the list-manipulation routines use the list-head address to detect the
end of the list etc.

The patch below makes 2.5.74+ work on ia64 by reverting to the old
definition of this_rq().  Ditto for kernel/timer.c.



 kernel/sched.c |    2 +-
 kernel/timer.c |    8 ++++----
 2 files changed, 5 insertions(+), 5 deletions(-)

diff -puN kernel/sched.c~ia64-percpu-revert kernel/sched.c
--- 25/kernel/sched.c~ia64-percpu-revert	2003-07-09 16:13:04.000000000 -0700
+++ 25-akpm/kernel/sched.c	2003-07-09 16:13:04.000000000 -0700
@@ -176,7 +176,7 @@ struct runqueue {
 static DEFINE_PER_CPU(struct runqueue, runqueues);
 
 #define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
-#define this_rq()		(&__get_cpu_var(runqueues))
+#define this_rq()		(cpu_rq(smp_processor_id())) /* not __get_cpu_var(runqueues)! */
 #define task_rq(p)		cpu_rq(task_cpu(p))
 #define cpu_curr(cpu)		(cpu_rq(cpu)->curr)
 #define rt_task(p)		((p)->prio < MAX_RT_PRIO)
diff -puN kernel/timer.c~ia64-percpu-revert kernel/timer.c
--- 25/kernel/timer.c~ia64-percpu-revert	2003-07-09 16:13:04.000000000 -0700
+++ 25-akpm/kernel/timer.c	2003-07-09 16:13:04.000000000 -0700
@@ -160,7 +160,7 @@ static void internal_add_timer(tvec_base
  */
 void add_timer(struct timer_list *timer)
 {
-	tvec_base_t *base = &get_cpu_var(tvec_bases);
+	tvec_base_t *base = &per_cpu(tvec_bases, get_cpu());
   	unsigned long flags;
   
   	BUG_ON(timer_pending(timer) || !timer->function);
@@ -171,7 +171,7 @@ void add_timer(struct timer_list *timer)
 	internal_add_timer(base, timer);
 	timer->base = base;
 	spin_unlock_irqrestore(&base->lock, flags);
-	put_cpu_var(tvec_bases);
+	put_cpu();
 }
 
 /***
@@ -234,7 +234,7 @@ int mod_timer(struct timer_list *timer, 
 		return 1;
 
 	spin_lock_irqsave(&timer->lock, flags);
-	new_base = &__get_cpu_var(tvec_bases);
+	new_base = &per_cpu(tvec_bases, smp_processor_id());
 repeat:
 	old_base = timer->base;
 
@@ -792,7 +792,7 @@ seqlock_t xtime_lock __cacheline_aligned
  */
 static void run_timer_softirq(struct softirq_action *h)
 {
-	tvec_base_t *base = &__get_cpu_var(tvec_bases);
+	tvec_base_t *base = &per_cpu(tvec_bases, smp_processor_id());
 
 	if (time_after_eq(jiffies, base->timer_jiffies))
 		__run_timers(base);

_