1/* sun4m_smp.c: Sparc SUN4M SMP support. 2 * 3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) 4 */ 5 6#include <asm/head.h> 7 8#include <linux/kernel.h> 9#include <linux/sched.h> 10#include <linux/threads.h> 11#include <linux/smp.h> 12#include <linux/interrupt.h> 13#include <linux/kernel_stat.h> 14#include <linux/init.h> 15#include <linux/spinlock.h> 16#include <linux/mm.h> 17#include <linux/swap.h> 18#include <linux/profile.h> 19#include <asm/cacheflush.h> 20#include <asm/tlbflush.h> 21#include <asm/irq_regs.h> 22 23#include <asm/ptrace.h> 24#include <asm/atomic.h> 25 26#include <asm/delay.h> 27#include <asm/irq.h> 28#include <asm/page.h> 29#include <asm/pgalloc.h> 30#include <asm/pgtable.h> 31#include <asm/oplib.h> 32#include <asm/cpudata.h> 33 34#define IRQ_RESCHEDULE 13 35#define IRQ_STOP_CPU 14 36#define IRQ_CROSS_CALL 15 37 38extern ctxd_t *srmmu_ctx_table_phys; 39 40extern void calibrate_delay(void); 41 42extern volatile unsigned long cpu_callin_map[NR_CPUS]; 43extern unsigned char boot_cpu_id; 44 45extern cpumask_t smp_commenced_mask; 46 47extern int __smp4m_processor_id(void); 48 49/*#define SMP_DEBUG*/ 50 51#ifdef SMP_DEBUG 52#define SMP_PRINTK(x) printk x 53#else 54#define SMP_PRINTK(x) 55#endif 56 57static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val) 58{ 59 __asm__ __volatile__("swap [%1], %0\n\t" : 60 "=&r" (val), "=&r" (ptr) : 61 "0" (val), "1" (ptr)); 62 return val; 63} 64 65static void smp_setup_percpu_timer(void); 66extern void cpu_probe(void); 67 68void __cpuinit smp4m_callin(void) 69{ 70 int cpuid = hard_smp_processor_id(); 71 72 local_flush_cache_all(); 73 local_flush_tlb_all(); 74 75 /* Get our local ticker going. */ 76 smp_setup_percpu_timer(); 77 78 calibrate_delay(); 79 smp_store_cpu_info(cpuid); 80 81 local_flush_cache_all(); 82 local_flush_tlb_all(); 83 84 /* 85 * Unblock the master CPU _only_ when the scheduler state 86 * of all secondary CPUs will be up-to-date, so after 87 * the SMP initialization the master will be just allowed 88 * to call the scheduler code. 89 */ 90 /* Allow master to continue. */ 91 swap(&cpu_callin_map[cpuid], 1); 92 93 local_flush_cache_all(); 94 local_flush_tlb_all(); 95 96 cpu_probe(); 97 98 /* Fix idle thread fields. */ 99 __asm__ __volatile__("ld [%0], %%g6\n\t" 100 : : "r" (¤t_set[cpuid]) 101 : "memory" /* paranoid */); 102 103 /* Attach to the address space of init_task. */ 104 atomic_inc(&init_mm.mm_count); 105 current->active_mm = &init_mm; 106 107 while (!cpu_isset(cpuid, smp_commenced_mask)) 108 mb(); 109 110 local_irq_enable(); 111 112 cpu_set(cpuid, cpu_online_map); 113} 114 115/* 116 * Cycle through the processors asking the PROM to start each one. 117 */ 118 119extern struct linux_prom_registers smp_penguin_ctable; 120extern unsigned long trapbase_cpu1[]; 121extern unsigned long trapbase_cpu2[]; 122extern unsigned long trapbase_cpu3[]; 123 124void __init smp4m_boot_cpus(void) 125{ 126 smp_setup_percpu_timer(); 127 local_flush_cache_all(); 128} 129 130int __cpuinit smp4m_boot_one_cpu(int i) 131{ 132 extern unsigned long sun4m_cpu_startup; 133 unsigned long *entry = &sun4m_cpu_startup; 134 struct task_struct *p; 135 int timeout; 136 int cpu_node; 137 138 cpu_find_by_mid(i, &cpu_node); 139 140 /* Cook up an idler for this guy. */ 141 p = fork_idle(i); 142 current_set[i] = task_thread_info(p); 143 /* See trampoline.S for details... */ 144 entry += ((i-1) * 3); 145 146 /* 147 * Initialize the contexts table 148 * Since the call to prom_startcpu() trashes the structure, 149 * we need to re-initialize it for each cpu 150 */ 151 smp_penguin_ctable.which_io = 0; 152 smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; 153 smp_penguin_ctable.reg_size = 0; 154 155 /* whirrr, whirrr, whirrrrrrrrr... */ 156 printk("Starting CPU %d at %p\n", i, entry); 157 local_flush_cache_all(); 158 prom_startcpu(cpu_node, 159 &smp_penguin_ctable, 0, (char *)entry); 160 161 /* wheee... it's going... */ 162 for(timeout = 0; timeout < 10000; timeout++) { 163 if(cpu_callin_map[i]) 164 break; 165 udelay(200); 166 } 167 168 if (!(cpu_callin_map[i])) { 169 printk("Processor %d is stuck.\n", i); 170 return -ENODEV; 171 } 172 173 local_flush_cache_all(); 174 return 0; 175} 176 177void __init smp4m_smp_done(void) 178{ 179 int i, first; 180 int *prev; 181 182 /* setup cpu list for irq rotation */ 183 first = 0; 184 prev = &first; 185 for (i = 0; i < NR_CPUS; i++) { 186 if (cpu_online(i)) { 187 *prev = i; 188 prev = &cpu_data(i).next; 189 } 190 } 191 *prev = first; 192 local_flush_cache_all(); 193 194 /* Free unneeded trap tables */ 195 if (!cpu_isset(1, cpu_present_map)) { 196 ClearPageReserved(virt_to_page(trapbase_cpu1)); 197 init_page_count(virt_to_page(trapbase_cpu1)); 198 free_page((unsigned long)trapbase_cpu1); 199 totalram_pages++; 200 num_physpages++; 201 } 202 if (!cpu_isset(2, cpu_present_map)) { 203 ClearPageReserved(virt_to_page(trapbase_cpu2)); 204 init_page_count(virt_to_page(trapbase_cpu2)); 205 free_page((unsigned long)trapbase_cpu2); 206 totalram_pages++; 207 num_physpages++; 208 } 209 if (!cpu_isset(3, cpu_present_map)) { 210 ClearPageReserved(virt_to_page(trapbase_cpu3)); 211 init_page_count(virt_to_page(trapbase_cpu3)); 212 free_page((unsigned long)trapbase_cpu3); 213 totalram_pages++; 214 num_physpages++; 215 } 216 217 /* Ok, they are spinning and ready to go. */ 218} 219 220void smp4m_irq_rotate(int cpu) 221{ 222 int next = cpu_data(cpu).next; 223 if (next != cpu) 224 set_irq_udt(next); 225} 226 227/* Cross calls, in order to work efficiently and atomically do all 228 * the message passing work themselves, only stopcpu and reschedule 229 * messages come through here. 230 */ 231void smp4m_message_pass(int target, int msg, unsigned long data, int wait) 232{ 233 static unsigned long smp_cpu_in_msg[NR_CPUS]; 234 cpumask_t mask; 235 int me = smp_processor_id(); 236 int irq, i; 237 238 if(msg == MSG_RESCHEDULE) { 239 irq = IRQ_RESCHEDULE; 240 241 if(smp_cpu_in_msg[me]) 242 return; 243 } else if(msg == MSG_STOP_CPU) { 244 irq = IRQ_STOP_CPU; 245 } else { 246 goto barf; 247 } 248 249 smp_cpu_in_msg[me]++; 250 if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) { 251 mask = cpu_online_map; 252 if(target == MSG_ALL_BUT_SELF) 253 cpu_clear(me, mask); 254 for(i = 0; i < 4; i++) { 255 if (cpu_isset(i, mask)) 256 set_cpu_int(i, irq); 257 } 258 } else { 259 set_cpu_int(target, irq); 260 } 261 smp_cpu_in_msg[me]--; 262 263 return; 264barf: 265 printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me); 266 panic("Bogon SMP message pass."); 267} 268 269static struct smp_funcall { 270 smpfunc_t func; 271 unsigned long arg1; 272 unsigned long arg2; 273 unsigned long arg3; 274 unsigned long arg4; 275 unsigned long arg5; 276 unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */ 277 unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */ 278} ccall_info; 279 280static DEFINE_SPINLOCK(cross_call_lock); 281 282/* Cross calls must be serialized, at least currently. */ 283void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2, 284 unsigned long arg3, unsigned long arg4, unsigned long arg5) 285{ 286 register int ncpus = SUN4M_NCPUS; 287 unsigned long flags; 288 289 spin_lock_irqsave(&cross_call_lock, flags); 290 291 /* Init function glue. */ 292 ccall_info.func = func; 293 ccall_info.arg1 = arg1; 294 ccall_info.arg2 = arg2; 295 ccall_info.arg3 = arg3; 296 ccall_info.arg4 = arg4; 297 ccall_info.arg5 = arg5; 298 299 /* Init receive/complete mapping, plus fire the IPI's off. */ 300 { 301 cpumask_t mask = cpu_online_map; 302 register int i; 303 304 cpu_clear(smp_processor_id(), mask); 305 for(i = 0; i < ncpus; i++) { 306 if (cpu_isset(i, mask)) { 307 ccall_info.processors_in[i] = 0; 308 ccall_info.processors_out[i] = 0; 309 set_cpu_int(i, IRQ_CROSS_CALL); 310 } else { 311 ccall_info.processors_in[i] = 1; 312 ccall_info.processors_out[i] = 1; 313 } 314 } 315 } 316 317 { 318 register int i; 319 320 i = 0; 321 do { 322 while(!ccall_info.processors_in[i]) 323 barrier(); 324 } while(++i < ncpus); 325 326 i = 0; 327 do { 328 while(!ccall_info.processors_out[i]) 329 barrier(); 330 } while(++i < ncpus); 331 } 332 333 spin_unlock_irqrestore(&cross_call_lock, flags); 334} 335 336/* Running cross calls. */ 337void smp4m_cross_call_irq(void) 338{ 339 int i = smp_processor_id(); 340 341 ccall_info.processors_in[i] = 1; 342 ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, 343 ccall_info.arg4, ccall_info.arg5); 344 ccall_info.processors_out[i] = 1; 345} 346 347void smp4m_percpu_timer_interrupt(struct pt_regs *regs) 348{ 349 struct pt_regs *old_regs; 350 int cpu = smp_processor_id(); 351 352 old_regs = set_irq_regs(regs); 353 354 clear_profile_irq(cpu); 355 356 profile_tick(CPU_PROFILING); 357 358 if(!--prof_counter(cpu)) { 359 int user = user_mode(regs); 360 361 irq_enter(); 362 update_process_times(user); 363 irq_exit(); 364 365 prof_counter(cpu) = prof_multiplier(cpu); 366 } 367 set_irq_regs(old_regs); 368} 369 370extern unsigned int lvl14_resolution; 371 372static void __init smp_setup_percpu_timer(void) 373{ 374 int cpu = smp_processor_id(); 375 376 prof_counter(cpu) = prof_multiplier(cpu) = 1; 377 load_profile_irq(cpu, lvl14_resolution); 378 379 if(cpu == boot_cpu_id) 380 enable_pil_irq(14); 381} 382 383void __init smp4m_blackbox_id(unsigned *addr) 384{ 385 int rd = *addr & 0x3e000000; 386 int rs1 = rd >> 11; 387 388 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */ 389 addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */ 390 addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */ 391} 392 393void __init smp4m_blackbox_current(unsigned *addr) 394{ 395 int rd = *addr & 0x3e000000; 396 int rs1 = rd >> 11; 397 398 addr[0] = 0x81580000 | rd; /* rd %tbr, reg */ 399 addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */ 400 addr[4] = 0x8008200c | rd | rs1; /* and reg, 0xc, reg */ 401} 402 403void __init sun4m_init_smp(void) 404{ 405 BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id); 406 BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current); 407 BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM); 408 BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM); 409 BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM); 410} 411