mp_machdep.c revision 293636
1/*- 2 * Copyright (c) 2008 Marcel Moolenaar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/powerpc/powerpc/mp_machdep.c 293636 2016-01-10 16:42:14Z nwhitehorn $"); 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/kernel.h> 33#include <sys/ktr.h> 34#include <sys/bus.h> 35#include <sys/cpuset.h> 36#include <sys/lock.h> 37#include <sys/malloc.h> 38#include <sys/mutex.h> 39#include <sys/pcpu.h> 40#include <sys/proc.h> 41#include <sys/sched.h> 42#include <sys/smp.h> 43 44#include <vm/vm.h> 45#include <vm/vm_param.h> 46#include <vm/pmap.h> 47#include <vm/vm_map.h> 48#include <vm/vm_extern.h> 49#include <vm/vm_kern.h> 50 51#include <machine/bus.h> 52#include <machine/cpu.h> 53#include <machine/intr_machdep.h> 54#include <machine/pcb.h> 55#include <machine/platform.h> 56#include <machine/md_var.h> 57#include <machine/setjmp.h> 58#include <machine/smp.h> 59 60#include "pic_if.h" 61 62extern struct pcpu __pcpu[MAXCPU]; 63 64volatile static int ap_awake; 65volatile static u_int ap_letgo; 66volatile static u_quad_t ap_timebase; 67static u_int ipi_msg_cnt[32]; 68static struct mtx ap_boot_mtx; 69struct pcb stoppcbs[MAXCPU]; 70 71void 72machdep_ap_bootstrap(void) 73{ 74 75 /* Set PIR */ 76 PCPU_SET(pir, mfspr(SPR_PIR)); 77 PCPU_SET(awake, 1); 78 __asm __volatile("msync; isync"); 79 80 while (ap_letgo == 0) 81 ; 82 83 /* Initialize DEC and TB, sync with the BSP values */ 84#ifdef __powerpc64__ 85 /* Writing to the time base register is hypervisor-privileged */ 86 if (mfmsr() & PSL_HV) 87 mttb(ap_timebase); 88#else 89 mttb(ap_timebase); 90#endif 91 decr_ap_init(); 92 93 /* Give platform code a chance to do anything necessary */ 94 platform_smp_ap_init(); 95 96 /* Serialize console output and AP count increment */ 97 mtx_lock_spin(&ap_boot_mtx); 98 ap_awake++; 99 printf("SMP: AP CPU #%d launched\n", PCPU_GET(cpuid)); 100 mtx_unlock_spin(&ap_boot_mtx); 101 102 /* Start per-CPU event timers. */ 103 cpu_initclocks_ap(); 104 105 /* Announce ourselves awake, and enter the scheduler */ 106 sched_throw(NULL); 107} 108 109void 110cpu_mp_setmaxid(void) 111{ 112 struct cpuref cpuref; 113 int error; 114 115 mp_ncpus = 0; 116 error = platform_smp_first_cpu(&cpuref); 117 while (!error) { 118 mp_ncpus++; 119 error = platform_smp_next_cpu(&cpuref); 120 } 121 /* Sanity. */ 122 if (mp_ncpus == 0) 123 mp_ncpus = 1; 124 125 /* 126 * Set the largest cpuid we're going to use. This is necessary 127 * for VM initialization. 128 */ 129 mp_maxid = min(mp_ncpus, MAXCPU) - 1; 130} 131 132int 133cpu_mp_probe(void) 134{ 135 136 /* 137 * We're not going to enable SMP if there's only 1 processor. 138 */ 139 return (mp_ncpus > 1); 140} 141 142void 143cpu_mp_start(void) 144{ 145 struct cpuref bsp, cpu; 146 struct pcpu *pc; 147 int error; 148 149 error = platform_smp_get_bsp(&bsp); 150 KASSERT(error == 0, ("Don't know BSP")); 151 KASSERT(bsp.cr_cpuid == 0, ("%s: cpuid != 0", __func__)); 152 153 error = platform_smp_first_cpu(&cpu); 154 while (!error) { 155 if (cpu.cr_cpuid >= MAXCPU) { 156 printf("SMP: cpu%d: skipped -- ID out of range\n", 157 cpu.cr_cpuid); 158 goto next; 159 } 160 if (CPU_ISSET(cpu.cr_cpuid, &all_cpus)) { 161 printf("SMP: cpu%d: skipped - duplicate ID\n", 162 cpu.cr_cpuid); 163 goto next; 164 } 165 if (cpu.cr_cpuid != bsp.cr_cpuid) { 166 void *dpcpu; 167 168 pc = &__pcpu[cpu.cr_cpuid]; 169 dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, 170 M_WAITOK | M_ZERO); 171 pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc)); 172 dpcpu_init(dpcpu, cpu.cr_cpuid); 173 } else { 174 pc = pcpup; 175 pc->pc_cpuid = bsp.cr_cpuid; 176 pc->pc_bsp = 1; 177 } 178 pc->pc_hwref = cpu.cr_hwref; 179 CPU_SET(pc->pc_cpuid, &all_cpus); 180next: 181 error = platform_smp_next_cpu(&cpu); 182 } 183} 184 185void 186cpu_mp_announce(void) 187{ 188 struct pcpu *pc; 189 int i; 190 191 for (i = 0; i <= mp_maxid; i++) { 192 pc = pcpu_find(i); 193 if (pc == NULL) 194 continue; 195 printf("cpu%d: dev=%x", i, (int)pc->pc_hwref); 196 if (pc->pc_bsp) 197 printf(" (BSP)"); 198 printf("\n"); 199 } 200} 201 202static void 203cpu_mp_unleash(void *dummy) 204{ 205 struct pcpu *pc; 206 int cpus, timeout; 207 208 if (mp_ncpus <= 1) 209 return; 210 211 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 212 213 cpus = 0; 214 smp_cpus = 0; 215 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 216 cpus++; 217 if (!pc->pc_bsp) { 218 if (bootverbose) 219 printf("Waking up CPU %d (dev=%x)\n", 220 pc->pc_cpuid, (int)pc->pc_hwref); 221 222 platform_smp_start_cpu(pc); 223 224 timeout = 2000; /* wait 2sec for the AP */ 225 while (!pc->pc_awake && --timeout > 0) 226 DELAY(1000); 227 228 } else { 229 PCPU_SET(pir, mfspr(SPR_PIR)); 230 pc->pc_awake = 1; 231 } 232 if (pc->pc_awake) { 233 if (bootverbose) 234 printf("Adding CPU %d, pir=%x, awake=%x\n", 235 pc->pc_cpuid, pc->pc_pir, pc->pc_awake); 236 smp_cpus++; 237 } else 238 CPU_SET(pc->pc_cpuid, &stopped_cpus); 239 } 240 241 ap_awake = 1; 242 243 /* Provide our current DEC and TB values for APs */ 244 ap_timebase = mftb() + 10; 245 __asm __volatile("msync; isync"); 246 247 /* Let APs continue */ 248 atomic_store_rel_int(&ap_letgo, 1); 249 250#ifdef __powerpc64__ 251 /* Writing to the time base register is hypervisor-privileged */ 252 if (mfmsr() & PSL_HV) 253 mttb(ap_timebase); 254#else 255 mttb(ap_timebase); 256#endif 257 258 while (ap_awake < smp_cpus) 259 ; 260 261 if (smp_cpus != cpus || cpus != mp_ncpus) { 262 printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n", 263 mp_ncpus, cpus, smp_cpus); 264 } 265 266 /* Let the APs get into the scheduler */ 267 DELAY(10000); 268 269 /* XXX Atomic set operation? */ 270 smp_started = 1; 271} 272 273SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL); 274 275int 276powerpc_ipi_handler(void *arg) 277{ 278 u_int cpuid; 279 uint32_t ipimask; 280 int msg; 281 282 CTR2(KTR_SMP, "%s: MSR 0x%08x", __func__, mfmsr()); 283 284 ipimask = atomic_readandclear_32(&(pcpup->pc_ipimask)); 285 if (ipimask == 0) 286 return (FILTER_STRAY); 287 while ((msg = ffs(ipimask) - 1) != -1) { 288 ipimask &= ~(1u << msg); 289 ipi_msg_cnt[msg]++; 290 switch (msg) { 291 case IPI_AST: 292 CTR1(KTR_SMP, "%s: IPI_AST", __func__); 293 break; 294 case IPI_PREEMPT: 295 CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); 296 sched_preempt(curthread); 297 break; 298 case IPI_RENDEZVOUS: 299 CTR1(KTR_SMP, "%s: IPI_RENDEZVOUS", __func__); 300 smp_rendezvous_action(); 301 break; 302 case IPI_STOP: 303 304 /* 305 * IPI_STOP_HARD is mapped to IPI_STOP so it is not 306 * necessary to add such case in the switch. 307 */ 308 CTR1(KTR_SMP, "%s: IPI_STOP or IPI_STOP_HARD (stop)", 309 __func__); 310 cpuid = PCPU_GET(cpuid); 311 savectx(&stoppcbs[cpuid]); 312 savectx(PCPU_GET(curpcb)); 313 CPU_SET_ATOMIC(cpuid, &stopped_cpus); 314 while (!CPU_ISSET(cpuid, &started_cpus)) 315 cpu_spinwait(); 316 CPU_CLR_ATOMIC(cpuid, &stopped_cpus); 317 CPU_CLR_ATOMIC(cpuid, &started_cpus); 318 CTR1(KTR_SMP, "%s: IPI_STOP (restart)", __func__); 319 break; 320 case IPI_HARDCLOCK: 321 CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); 322 hardclockintr(); 323 break; 324 } 325 } 326 327 return (FILTER_HANDLED); 328} 329 330static void 331ipi_send(struct pcpu *pc, int ipi) 332{ 333 334 CTR4(KTR_SMP, "%s: pc=%p, targetcpu=%d, IPI=%d", __func__, 335 pc, pc->pc_cpuid, ipi); 336 337 atomic_set_32(&pc->pc_ipimask, (1 << ipi)); 338 powerpc_sync(); 339 PIC_IPI(root_pic, pc->pc_cpuid); 340 341 CTR1(KTR_SMP, "%s: sent", __func__); 342} 343 344/* Send an IPI to a set of cpus. */ 345void 346ipi_selected(cpuset_t cpus, int ipi) 347{ 348 struct pcpu *pc; 349 350 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 351 if (CPU_ISSET(pc->pc_cpuid, &cpus)) 352 ipi_send(pc, ipi); 353 } 354} 355 356/* Send an IPI to a specific CPU. */ 357void 358ipi_cpu(int cpu, u_int ipi) 359{ 360 361 ipi_send(cpuid_to_pcpu[cpu], ipi); 362} 363 364/* Send an IPI to all CPUs EXCEPT myself. */ 365void 366ipi_all_but_self(int ipi) 367{ 368 struct pcpu *pc; 369 370 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 371 if (pc != pcpup) 372 ipi_send(pc, ipi); 373 } 374} 375