1/* 2 * Smp support for ppc. 3 * 4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great 5 * deal of code from the sparc and intel versions. 6 * 7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 8 * 9 */ 10 11#include <linux/kernel.h> 12#include <linux/module.h> 13#include <linux/sched.h> 14#include <linux/smp.h> 15#include <linux/interrupt.h> 16#include <linux/kernel_stat.h> 17#include <linux/delay.h> 18#include <linux/init.h> 19#include <linux/spinlock.h> 20#include <linux/cache.h> 21 22#include <asm/ptrace.h> 23#include <asm/atomic.h> 24#include <asm/irq.h> 25#include <asm/page.h> 26#include <asm/pgtable.h> 27#include <asm/io.h> 28#include <asm/prom.h> 29#include <asm/smp.h> 30#include <asm/residual.h> 31#include <asm/time.h> 32#include <asm/thread_info.h> 33#include <asm/tlbflush.h> 34#include <asm/xmon.h> 35#include <asm/machdep.h> 36 37volatile int smp_commenced; 38int smp_tb_synchronized; 39struct cpuinfo_PPC cpu_data[NR_CPUS]; 40atomic_t ipi_recv; 41atomic_t ipi_sent; 42cpumask_t cpu_online_map; 43cpumask_t cpu_possible_map; 44int smp_hw_index[NR_CPUS]; 45struct thread_info *secondary_ti; 46static struct task_struct *idle_tasks[NR_CPUS]; 47 48EXPORT_SYMBOL(cpu_online_map); 49EXPORT_SYMBOL(cpu_possible_map); 50 51/* SMP operations for this machine */ 52struct smp_ops_t *smp_ops; 53 54/* all cpu mappings are 1-1 -- Cort */ 55volatile unsigned long cpu_callin_map[NR_CPUS]; 56 57int start_secondary(void *); 58void smp_call_function_interrupt(void); 59static int __smp_call_function(void (*func) (void *info), void *info, 60 int wait, int target); 61 62/* Low level assembly function used to backup CPU 0 state */ 63extern void __save_cpu_setup(void); 64 65/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. 66 * 67 * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up 68 * in /proc/interrupts will be wrong!!! --Troy */ 69#define PPC_MSG_CALL_FUNCTION 0 70#define PPC_MSG_RESCHEDULE 1 71#define PPC_MSG_INVALIDATE_TLB 2 72#define PPC_MSG_XMON_BREAK 3 73 74static inline void 75smp_message_pass(int target, int msg) 76{ 77 if (smp_ops) { 78 atomic_inc(&ipi_sent); 79 smp_ops->message_pass(target, msg); 80 } 81} 82 83/* 84 * Common functions 85 */ 86void smp_message_recv(int msg) 87{ 88 atomic_inc(&ipi_recv); 89 90 switch( msg ) { 91 case PPC_MSG_CALL_FUNCTION: 92 smp_call_function_interrupt(); 93 break; 94 case PPC_MSG_RESCHEDULE: 95 set_need_resched(); 96 break; 97 case PPC_MSG_INVALIDATE_TLB: 98 _tlbia(); 99 break; 100#ifdef CONFIG_XMON 101 case PPC_MSG_XMON_BREAK: 102 xmon(get_irq_regs()); 103 break; 104#endif /* CONFIG_XMON */ 105 default: 106 printk("SMP %d: smp_message_recv(): unknown msg %d\n", 107 smp_processor_id(), msg); 108 break; 109 } 110} 111 112/* 113 * 750's don't broadcast tlb invalidates so 114 * we have to emulate that behavior. 115 * -- Cort 116 */ 117void smp_send_tlb_invalidate(int cpu) 118{ 119 if ( PVR_VER(mfspr(SPRN_PVR)) == 8 ) 120 smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_INVALIDATE_TLB); 121} 122 123void smp_send_reschedule(int cpu) 124{ 125 /* 126 * This is only used if `cpu' is running an idle task, 127 * so it will reschedule itself anyway... 128 * 129 * This isn't the case anymore since the other CPU could be 130 * sleeping and won't reschedule until the next interrupt (such 131 * as the timer). 132 * -- Cort 133 */ 134 /* This is only used if `cpu' is running an idle task, 135 so it will reschedule itself anyway... */ 136 smp_message_pass(cpu, PPC_MSG_RESCHEDULE); 137} 138 139#ifdef CONFIG_XMON 140void smp_send_xmon_break(int cpu) 141{ 142 smp_message_pass(cpu, PPC_MSG_XMON_BREAK); 143} 144#endif /* CONFIG_XMON */ 145 146static void stop_this_cpu(void *dummy) 147{ 148 local_irq_disable(); 149 while (1) 150 ; 151} 152 153void smp_send_stop(void) 154{ 155 smp_call_function(stop_this_cpu, NULL, 1, 0); 156} 157 158/* 159 * Structure and data for smp_call_function(). This is designed to minimise 160 * static memory requirements. It also looks cleaner. 161 * Stolen from the i386 version. 162 */ 163static DEFINE_SPINLOCK(call_lock); 164 165static struct call_data_struct { 166 void (*func) (void *info); 167 void *info; 168 atomic_t started; 169 atomic_t finished; 170 int wait; 171} *call_data; 172 173/* 174 * this function sends a 'generic call function' IPI to all other CPUs 175 * in the system. 176 */ 177 178int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 179 int wait) 180/* 181 * [SUMMARY] Run a function on all other CPUs. 182 * <func> The function to run. This must be fast and non-blocking. 183 * <info> An arbitrary pointer to pass to the function. 184 * <nonatomic> currently unused. 185 * <wait> If true, wait (atomically) until function has completed on other CPUs. 186 * [RETURNS] 0 on success, else a negative status code. Does not return until 187 * remote CPUs are nearly ready to execute <<func>> or are or have executed. 188 * 189 * You must not call this function with disabled interrupts or from a 190 * hardware interrupt handler or from a bottom half handler. 191 */ 192{ 193 if (num_online_cpus() <= 1) 194 return 0; 195 /* Can deadlock when called with interrupts disabled */ 196 WARN_ON(irqs_disabled()); 197 return __smp_call_function(func, info, wait, MSG_ALL_BUT_SELF); 198} 199 200static int __smp_call_function(void (*func) (void *info), void *info, 201 int wait, int target) 202{ 203 struct call_data_struct data; 204 int ret = -1; 205 int timeout; 206 int ncpus = 1; 207 208 if (target == MSG_ALL_BUT_SELF) 209 ncpus = num_online_cpus() - 1; 210 else if (target == MSG_ALL) 211 ncpus = num_online_cpus(); 212 213 data.func = func; 214 data.info = info; 215 atomic_set(&data.started, 0); 216 data.wait = wait; 217 if (wait) 218 atomic_set(&data.finished, 0); 219 220 spin_lock(&call_lock); 221 call_data = &data; 222 /* Send a message to all other CPUs and wait for them to respond */ 223 smp_message_pass(target, PPC_MSG_CALL_FUNCTION); 224 225 /* Wait for response */ 226 timeout = 1000000; 227 while (atomic_read(&data.started) != ncpus) { 228 if (--timeout == 0) { 229 printk("smp_call_function on cpu %d: other cpus not responding (%d)\n", 230 smp_processor_id(), atomic_read(&data.started)); 231 goto out; 232 } 233 barrier(); 234 udelay(1); 235 } 236 237 if (wait) { 238 timeout = 1000000; 239 while (atomic_read(&data.finished) != ncpus) { 240 if (--timeout == 0) { 241 printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n", 242 smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started)); 243 goto out; 244 } 245 barrier(); 246 udelay(1); 247 } 248 } 249 ret = 0; 250 251 out: 252 spin_unlock(&call_lock); 253 return ret; 254} 255 256void smp_call_function_interrupt(void) 257{ 258 void (*func) (void *info) = call_data->func; 259 void *info = call_data->info; 260 int wait = call_data->wait; 261 262 /* 263 * Notify initiating CPU that I've grabbed the data and am 264 * about to execute the function 265 */ 266 atomic_inc(&call_data->started); 267 /* 268 * At this point the info structure may be out of scope unless wait==1 269 */ 270 (*func)(info); 271 if (wait) 272 atomic_inc(&call_data->finished); 273} 274 275static void __devinit smp_store_cpu_info(int id) 276{ 277 struct cpuinfo_PPC *c = &cpu_data[id]; 278 279 /* assume bogomips are same for everything */ 280 c->loops_per_jiffy = loops_per_jiffy; 281 c->pvr = mfspr(SPRN_PVR); 282} 283 284void __init smp_prepare_cpus(unsigned int max_cpus) 285{ 286 int num_cpus, i, cpu; 287 struct task_struct *p; 288 289 /* Fixup boot cpu */ 290 smp_store_cpu_info(smp_processor_id()); 291 cpu_callin_map[smp_processor_id()] = 1; 292 293 if (smp_ops == NULL) { 294 printk("SMP not supported on this machine.\n"); 295 return; 296 } 297 298 /* Probe platform for CPUs: always linear. */ 299 num_cpus = smp_ops->probe(); 300 301 if (num_cpus < 2) 302 smp_tb_synchronized = 1; 303 304 for (i = 0; i < num_cpus; ++i) 305 cpu_set(i, cpu_possible_map); 306 307 /* Backup CPU 0 state */ 308 __save_cpu_setup(); 309 310 for_each_possible_cpu(cpu) { 311 if (cpu == smp_processor_id()) 312 continue; 313 /* create a process for the processor */ 314 p = fork_idle(cpu); 315 if (IS_ERR(p)) 316 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 317 task_thread_info(p)->cpu = cpu; 318 idle_tasks[cpu] = p; 319 } 320} 321 322void __devinit smp_prepare_boot_cpu(void) 323{ 324 cpu_set(smp_processor_id(), cpu_online_map); 325 cpu_set(smp_processor_id(), cpu_possible_map); 326} 327 328int __init setup_profiling_timer(unsigned int multiplier) 329{ 330 return 0; 331} 332 333/* Processor coming up starts here */ 334int __devinit start_secondary(void *unused) 335{ 336 int cpu; 337 338 atomic_inc(&init_mm.mm_count); 339 current->active_mm = &init_mm; 340 341 cpu = smp_processor_id(); 342 smp_store_cpu_info(cpu); 343 set_dec(tb_ticks_per_jiffy); 344 preempt_disable(); 345 cpu_callin_map[cpu] = 1; 346 347 printk("CPU %d done callin...\n", cpu); 348 smp_ops->setup_cpu(cpu); 349 printk("CPU %d done setup...\n", cpu); 350 smp_ops->take_timebase(); 351 printk("CPU %d done timebase take...\n", cpu); 352 353 spin_lock(&call_lock); 354 cpu_set(cpu, cpu_online_map); 355 spin_unlock(&call_lock); 356 357 local_irq_enable(); 358 359 cpu_idle(); 360 return 0; 361} 362 363int __cpu_up(unsigned int cpu) 364{ 365 char buf[32]; 366 int c; 367 368 secondary_ti = task_thread_info(idle_tasks[cpu]); 369 mb(); 370 371 /* 372 * There was a cache flush loop here to flush the cache 373 * to memory for the first 8MB of RAM. The cache flush 374 * has been pushed into the kick_cpu function for those 375 * platforms that need it. 376 */ 377 378 /* wake up cpu */ 379 smp_ops->kick_cpu(cpu); 380 381 /* 382 * wait to see if the cpu made a callin (is actually up). 383 * use this value that I found through experimentation. 384 * -- Cort 385 */ 386 for (c = 1000; c && !cpu_callin_map[cpu]; c--) 387 udelay(100); 388 389 if (!cpu_callin_map[cpu]) { 390 sprintf(buf, "didn't find cpu %u", cpu); 391 if (ppc_md.progress) ppc_md.progress(buf, 0x360+cpu); 392 printk("Processor %u is stuck.\n", cpu); 393 return -ENOENT; 394 } 395 396 sprintf(buf, "found cpu %u", cpu); 397 if (ppc_md.progress) ppc_md.progress(buf, 0x350+cpu); 398 printk("Processor %d found.\n", cpu); 399 400 smp_ops->give_timebase(); 401 402 /* Wait until cpu puts itself in the online map */ 403 while (!cpu_online(cpu)) 404 cpu_relax(); 405 406 return 0; 407} 408 409void smp_cpus_done(unsigned int max_cpus) 410{ 411 smp_ops->setup_cpu(0); 412} 413