mp_machdep.c revision 223485
1/*- 2 * Copyright (c) 2008 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/mp_machdep.c 223485 2011-06-23 22:21:28Z nwhitehorn $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/ktr.h> 34#include <sys/bus.h> 35#include <sys/cpuset.h> 36#include <sys/lock.h> 37#include <sys/mutex.h> 38#include <sys/pcpu.h> 39#include <sys/proc.h> 40#include <sys/sched.h> 41#include <sys/smp.h> 42 43#include <vm/vm.h> 44#include <vm/vm_param.h> 45#include <vm/pmap.h> 46#include <vm/vm_map.h> 47#include <vm/vm_extern.h> 48#include <vm/vm_kern.h> 49 50#include <machine/bus.h> 51#include <machine/cpu.h> 52#include <machine/intr_machdep.h> 53#include <machine/pcb.h> 54#include <machine/platform.h> 55#include <machine/md_var.h> 56#include <machine/smp.h> 57 58#include "pic_if.h" 59 60extern struct pcpu __pcpu[MAXCPU]; 61 62volatile static int ap_awake; 63volatile static u_int ap_letgo; 64volatile static u_quad_t ap_timebase; 65static u_int ipi_msg_cnt[32]; 66static struct mtx ap_boot_mtx; 67struct pcb stoppcbs[MAXCPU]; 68 69void 70machdep_ap_bootstrap(void) 71{ 72 /* Set up important bits on the CPU (HID registers, etc.) */ 73 cpudep_ap_setup(); 74 75 /* Set PIR */ 76 PCPU_SET(pir, mfspr(SPR_PIR)); 77 PCPU_SET(awake, 1); 78 __asm __volatile("msync; isync"); 79 80 while (ap_letgo == 0) 81 ; 82 83 /* Initialize DEC and TB, sync with the BSP values */ 84#ifdef __powerpc64__ 85 /* Writing to the time base register is hypervisor-privileged */ 86 if (mfmsr() & PSL_HV) 87 mttb(ap_timebase); 88#else 89 mttb(ap_timebase); 90#endif 91 decr_ap_init(); 92 93 /* Serialize console output and AP count increment */ 94 mtx_lock_spin(&ap_boot_mtx); 95 ap_awake++; 96 printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid)); 97 mtx_unlock_spin(&ap_boot_mtx); 98 99 /* Start per-CPU event timers. */ 100 cpu_initclocks_ap(); 101 102 /* Announce ourselves awake, and enter the scheduler */ 103 sched_throw(NULL); 104} 105 106void 107cpu_mp_setmaxid(void) 108{ 109 struct cpuref cpuref; 110 int error; 111 112 mp_ncpus = 0; 113 error = platform_smp_first_cpu(&cpuref); 114 while (!error) { 115 mp_ncpus++; 116 error = platform_smp_next_cpu(&cpuref); 117 } 118 /* Sanity. */ 119 if (mp_ncpus == 0) 120 mp_ncpus = 1; 121 122 /* 123 * Set the largest cpuid we're going to use. This is necessary 124 * for VM initialization. 125 */ 126 mp_maxid = min(mp_ncpus, MAXCPU) - 1; 127} 128 129int 130cpu_mp_probe(void) 131{ 132 133 /* 134 * We're not going to enable SMP if there's only 1 processor. 135 */ 136 return (mp_ncpus > 1); 137} 138 139void 140cpu_mp_start(void) 141{ 142 struct cpuref bsp, cpu; 143 struct pcpu *pc; 144 int error; 145 146 error = platform_smp_get_bsp(&bsp); 147 KASSERT(error == 0, ("Don't know BSP")); 148 KASSERT(bsp.cr_cpuid == 0, ("%s: cpuid != 0", __func__)); 149 150 error = platform_smp_first_cpu(&cpu); 151 while (!error) { 152 if (cpu.cr_cpuid >= MAXCPU) { 153 printf("SMP: cpu%d: skipped -- ID out of range\n", 154 cpu.cr_cpuid); 155 goto next; 156 } 157 if (CPU_ISSET(cpu.cr_cpuid, &all_cpus)) { 158 printf("SMP: cpu%d: skipped - duplicate ID\n", 159 cpu.cr_cpuid); 160 goto next; 161 } 162 if (cpu.cr_cpuid != bsp.cr_cpuid) { 163 void *dpcpu; 164 165 pc = &__pcpu[cpu.cr_cpuid]; 166 dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE); 167 pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc)); 168 dpcpu_init(dpcpu, cpu.cr_cpuid); 169 } else { 170 pc = pcpup; 171 pc->pc_cpuid = bsp.cr_cpuid; 172 pc->pc_bsp = 1; 173 } 174 CPU_SETOF(pc->pc_cpuid, &pc->pc_cpumask); 175 pc->pc_hwref = cpu.cr_hwref; 176 CPU_OR(&all_cpus, &pc->pc_cpumask); 177next: 178 error = platform_smp_next_cpu(&cpu); 179 } 180} 181 182void 183cpu_mp_announce(void) 184{ 185 struct pcpu *pc; 186 int i; 187 188 for (i = 0; i <= mp_maxid; i++) { 189 pc = pcpu_find(i); 190 if (pc == NULL) 191 continue; 192 printf("cpu%d: dev=%x", i, (int)pc->pc_hwref); 193 if (pc->pc_bsp) 194 printf(" (BSP)"); 195 printf("\n"); 196 } 197} 198 199static void 200cpu_mp_unleash(void *dummy) 201{ 202 struct pcpu *pc; 203 int cpus, timeout; 204 205 if (mp_ncpus <= 1) 206 return; 207 208 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 209 210 cpus = 0; 211 smp_cpus = 0; 212 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 213 cpus++; 214 pc->pc_other_cpus = all_cpus; 215 CPU_NAND(&pc->pc_other_cpus, &pc->pc_cpumask); 216 if (!pc->pc_bsp) { 217 if (bootverbose) 218 printf("Waking up CPU %d (dev=%x)\n", 219 pc->pc_cpuid, (int)pc->pc_hwref); 220 221 platform_smp_start_cpu(pc); 222 223 timeout = 2000; /* wait 2sec for the AP */ 224 while (!pc->pc_awake && --timeout > 0) 225 DELAY(1000); 226 227 } else { 228 PCPU_SET(pir, mfspr(SPR_PIR)); 229 pc->pc_awake = 1; 230 } 231 if (pc->pc_awake) { 232 if (bootverbose) 233 printf("Adding CPU %d, pir=%x, awake=%x\n", 234 pc->pc_cpuid, pc->pc_pir, pc->pc_awake); 235 smp_cpus++; 236 } else 237 CPU_SET(pc->pc_cpuid, &stopped_cpus); 238 } 239 240 ap_awake = 1; 241 242 /* Provide our current DEC and TB values for APs */ 243 ap_timebase = mftb() + 10; 244 __asm __volatile("msync; isync"); 245 246 /* Let APs continue */ 247 atomic_store_rel_int(&ap_letgo, 1); 248 249#ifdef __powerpc64__ 250 /* Writing to the time base register is hypervisor-privileged */ 251 if (mfmsr() & PSL_HV) 252 mttb(ap_timebase); 253#else 254 mttb(ap_timebase); 255#endif 256 257 while (ap_awake < smp_cpus) 258 ; 259 260 if (smp_cpus != cpus || cpus != mp_ncpus) { 261 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n", 262 mp_ncpus, cpus, smp_cpus); 263 } 264 265 /* Let the APs get into the scheduler */ 266 DELAY(10000); 267 268 smp_active = 1; 269 smp_started = 1; 270} 271 272SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL); 273 274int 275powerpc_ipi_handler(void *arg) 276{ 277 cpuset_t self; 278 uint32_t ipimask; 279 int msg; 280 281 CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr()); 282 283 ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask)); 284 if (ipimask == 0) 285 return (FILTER_STRAY); 286 while ((msg = ffs(ipimask) - 1) != -1) { 287 ipimask &= ~(1u << msg); 288 ipi_msg_cnt[msg]++; 289 switch (msg) { 290 case IPI_AST: 291 CTR1(KTR_SMP, "%s: IPI_AST", __func__); 292 break; 293 case IPI_PREEMPT: 294 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); 295 sched_preempt(curthread); 296 break; 297 case IPI_RENDEZVOUS: 298 CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__); 299 smp_rendezvous_action(); 300 break; 301 case IPI_STOP: 302 303 /* 304 * IPI_STOP_HARD is mapped to IPI_STOP so it is not 305 * necessary to add such case in the switch. 306 */ 307 CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)", 308 __func__); 309 savectx(&stoppcbs[PCPU_GET(cpuid)]); 310 self = PCPU_GET(cpumask); 311 savectx(PCPU_GET(curpcb)); 312 CPU_OR_ATOMIC(&stopped_cpus, &self); 313 while (!CPU_OVERLAP(&started_cpus, &self)) 314 cpu_spinwait(); 315 CPU_NAND_ATOMIC(&started_cpus, &self); 316 CPU_NAND_ATOMIC(&stopped_cpus, &self); 317 CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__); 318 break; 319 case IPI_HARDCLOCK: 320 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); 321 hardclockintr(); 322 break; 323 } 324 } 325 326 return (FILTER_HANDLED); 327} 328 329static void 330ipi_send(struct pcpu *pc, int ipi) 331{ 332 333 CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__, 334 pc, pc->pc_cpuid, ipi); 335 336 atomic_set_32(&pc->pc_ipimask, (1 << ipi)); 337 PIC_IPI(root_pic, pc->pc_cpuid); 338 339 CTR1(KTR_SMP, "%s: sent", __func__); 340} 341 342/* Send an IPI to a set of cpus. */ 343void 344ipi_selected(cpuset_t cpus, int ipi) 345{ 346 struct pcpu *pc; 347 348 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 349 if (CPU_OVERLAP(&cpus, &pc->pc_cpumask)) 350 ipi_send(pc, ipi); 351 } 352} 353 354/* Send an IPI to a specific CPU. */ 355void 356ipi_cpu(int cpu, u_int ipi) 357{ 358 359 ipi_send(cpuid_to_pcpu[cpu], ipi); 360} 361 362/* Send an IPI to all CPUs EXCEPT myself. */ 363void 364ipi_all_but_self(int ipi) 365{ 366 struct pcpu *pc; 367 368 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 369 if (pc != pcpup) 370 ipi_send(pc, ipi); 371 } 372} 373