mp_machdep.c revision 71576
1/*- 2 * Copyright (c) 2000 Doug Rabson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: head/sys/powerpc/powerpc/mp_machdep.c 71576 2001-01-24 12:35:55Z jasone $ 27 */ 28 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/ktr.h> 32#include <sys/proc.h> 33#include <sys/lock.h> 34#include <sys/malloc.h> 35#include <sys/mutex.h> 36#include <sys/kernel.h> 37#include <sys/sysctl.h> 38 39#include <vm/vm.h> 40#include <vm/pmap.h> 41#include <vm/vm_map.h> 42#include <sys/user.h> 43#include <sys/dkstat.h> 44 45#include <machine/smp.h> 46#include <machine/lock.h> 47#include <machine/atomic.h> 48#include <machine/globaldata.h> 49#include <machine/pmap.h> 50#include <machine/rpb.h> 51#include <machine/clock.h> 52 53volatile u_int stopped_cpus; 54volatile u_int started_cpus; 55volatile u_int checkstate_probed_cpus; 56volatile u_int checkstate_need_ast; 57volatile u_int checkstate_pending_ast; 58struct proc* checkstate_curproc[MAXCPU]; 59int checkstate_cpustate[MAXCPU]; 60u_long checkstate_pc[MAXCPU]; 61volatile u_int resched_cpus; 62void (*cpustop_restartfunc) __P((void)); 63int mp_ncpus; 64 65int smp_started; 66int boot_cpu_id; 67u_int32_t all_cpus; 68 69static struct globaldata *cpuno_to_globaldata[MAXCPU]; 70 71int smp_active = 0; /* are the APs allowed to run? */ 72SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, ""); 73 74/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */ 75int forward_irq_enabled = 1; 76SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW, 77 &forward_irq_enabled, 0, ""); 78 79/* Enable forwarding of a signal to a process running on a different CPU */ 80static int forward_signal_enabled = 1; 81SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 82 &forward_signal_enabled, 0, ""); 83 84/* Enable forwarding of roundrobin to all other cpus */ 85static int forward_roundrobin_enabled = 1; 86SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, 87 &forward_roundrobin_enabled, 0, ""); 88 89/* 90 * Communicate with a console running on a secondary processor. 91 * Return 1 on failure. 92 */ 93static int 94smp_send_secondary_command(const char *command, int cpuno) 95{ 96 u_int64_t mask; 97 98 mask = 1L << cpuno; 99 struct pcs *cpu = LOCATE_PCS(hwrpb, cpuno); 100 int i, len; 101 102 /* 103 * Sanity check. 104 */ 105 len = strlen(command); 106 if (len > sizeof(cpu->pcs_buffer.rxbuf)) { 107 printf("smp_send_secondary_command: command '%s' too long\n", 108 command); 109 return 0; 110 } 111 112 /* 113 * Wait for the rx bit to clear. 114 */ 115 for (i = 0; i < 100000; i++) { 116 if (!(hwrpb->rpb_rxrdy & mask)) 117 break; 118 DELAY(10); 119 } 120 if (hwrpb->rpb_rxrdy & mask) 121 return 0; 122 123 /* 124 * Write the command into the processor's buffer. 125 */ 126 bcopy(command, cpu->pcs_buffer.rxbuf, len); 127 cpu->pcs_buffer.rxlen = len; 128 129 /* 130 * Set the bit in the rxrdy mask and let the secondary try to 131 * handle the command. 132 */ 133 atomic_set_64(&hwrpb->rpb_rxrdy, mask); 134 135 /* 136 * Wait for the rx bit to clear. 137 */ 138 for (i = 0; i < 100000; i++) { 139 if (!(hwrpb->rpb_rxrdy & mask)) 140 break; 141 DELAY(10); 142 } 143 if (hwrpb->rpb_rxrdy & mask) 144 return 0; 145 146 return 1; 147} 148 149void 150smp_init_secondary(void) 151{ 152 153 mtx_enter(&Giant, MTX_DEF); 154 155 printf("smp_init_secondary: called\n"); 156 CTR0(KTR_SMP, "smp_init_secondary"); 157 158 /* 159 * Add to mask. 160 */ 161 smp_started = 1; 162 if (PCPU_GET(cpuno) + 1 > mp_ncpus) 163 mp_ncpus = PCPU_GET(cpuno) + 1; 164 spl0(); 165 166 mtx_exit(&Giant, MTX_DEF); 167} 168 169extern void smp_init_secondary_glue(void); 170 171static int 172smp_start_secondary(int cpuno) 173{ 174 175 printf("smp_start_secondary: starting cpu %d\n", cpuno); 176 177 sz = round_page(UPAGES * PAGE_SIZE); 178 globaldata = malloc(sz, M_TEMP, M_NOWAIT); 179 if (!globaldata) { 180 printf("smp_start_secondary: can't allocate memory\n"); 181 return 0; 182 } 183 184 globaldata_init(globaldata, cpuno, sz); 185 186 /* 187 * Fire it up and hope for the best. 188 */ 189 if (!smp_send_secondary_command("START\r\n", cpuno)) { 190 printf("smp_init_secondary: can't send START command\n"); 191 free(globaldata, M_TEMP); 192 return 0; 193 } 194 195 /* 196 * It worked (I think). 197 */ 198 /* if (bootverbose) */ 199 printf("smp_init_secondary: cpu %d started\n", cpuno); 200 201 return 1; 202} 203 204/* 205 * Initialise a struct globaldata. 206 */ 207void 208globaldata_init(struct globaldata *globaldata, int cpuno, size_t sz) 209{ 210 211 bzero(globaldata, sz); 212 globaldata->gd_idlepcbphys = vtophys((vm_offset_t) &globaldata->gd_idlepcb); 213 globaldata->gd_idlepcb.apcb_ksp = (u_int64_t) 214 ((caddr_t) globaldata + sz - sizeof(struct trapframe)); 215 globaldata->gd_idlepcb.apcb_ptbr = proc0.p_addr->u_pcb.pcb_hw.apcb_ptbr; 216 globaldata->gd_cpuno = cpuno; 217 globaldata->gd_other_cpus = all_cpus & ~(1 << cpuno); 218 globaldata->gd_next_asn = 0; 219 globaldata->gd_current_asngen = 1; 220 globaldata->gd_cpuid = cpuno; 221 cpuno_to_globaldata[cpuno] = globaldata; 222} 223 224struct globaldata * 225globaldata_find(int cpuno) 226{ 227 228 return cpuno_to_globaldata[cpuno]; 229} 230 231/* Other stuff */ 232 233/* lock around the MP rendezvous */ 234static struct mtx smp_rv_mtx; 235 236/* only 1 CPU can panic at a time :) */ 237struct mtx panic_mtx; 238 239static void 240init_locks(void) 241{ 242 243 mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN); 244 mtx_init(&panic_mtx, "panic", MTX_DEF); 245} 246 247void 248mp_start() 249{ 250} 251 252void 253mp_announce() 254{ 255} 256 257void 258smp_invltlb() 259{ 260} 261 262 263/* 264 * When called the executing CPU will send an IPI to all other CPUs 265 * requesting that they halt execution. 266 * 267 * Usually (but not necessarily) called with 'other_cpus' as its arg. 268 * 269 * - Signals all CPUs in map to stop. 270 * - Waits for each to stop. 271 * 272 * Returns: 273 * -1: error 274 * 0: NA 275 * 1: ok 276 * 277 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 278 * from executing at same time. 279 */ 280int 281stop_cpus(u_int map) 282{ 283 int i; 284 285 if (!smp_started) 286 return 0; 287 288 CTR1(KTR_SMP, "stop_cpus(%x)", map); 289 290 i = 0; 291 while ((stopped_cpus & map) != map) { 292 /* spin */ 293 i++; 294 if (i == 100000) { 295 printf("timeout stopping cpus\n"); 296 break; 297 } 298 } 299 300 printf("stopped_cpus=%x\n", stopped_cpus); 301 302 return 1; 303} 304 305 306/* 307 * Called by a CPU to restart stopped CPUs. 308 * 309 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 310 * 311 * - Signals all CPUs in map to restart. 312 * - Waits for each to restart. 313 * 314 * Returns: 315 * -1: error 316 * 0: NA 317 * 1: ok 318 */ 319int 320restart_cpus(u_int map) 321{ 322 323 if (!smp_started) 324 return 0; 325 326 CTR1(KTR_SMP, "restart_cpus(%x)", map); 327 328 started_cpus = map; /* signal other cpus to restart */ 329 330 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */ 331 ; 332 333 return 1; 334} 335 336/* 337 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 338 * (if specified), rendezvous, execute the action function (if specified), 339 * rendezvous again, execute the teardown function (if specified), and then 340 * resume. 341 * 342 * Note that the supplied external functions _must_ be reentrant and aware 343 * that they are running in parallel and in an unknown lock context. 344 */ 345static void (*smp_rv_setup_func)(void *arg); 346static void (*smp_rv_action_func)(void *arg); 347static void (*smp_rv_teardown_func)(void *arg); 348static void *smp_rv_func_arg; 349static volatile int smp_rv_waiters[2]; 350 351void 352smp_rendezvous_action(void) 353{ 354 355 /* setup function */ 356 if (smp_rv_setup_func != NULL) 357 smp_rv_setup_func(smp_rv_func_arg); 358 /* spin on entry rendezvous */ 359 atomic_add_int(&smp_rv_waiters[0], 1); 360 while (smp_rv_waiters[0] < mp_ncpus) 361 ; 362 /* action function */ 363 if (smp_rv_action_func != NULL) 364 smp_rv_action_func(smp_rv_func_arg); 365 /* spin on exit rendezvous */ 366 atomic_add_int(&smp_rv_waiters[1], 1); 367 while (smp_rv_waiters[1] < mp_ncpus) 368 ; 369 /* teardown function */ 370 if (smp_rv_teardown_func != NULL) 371 smp_rv_teardown_func(smp_rv_func_arg); 372} 373 374void 375smp_rendezvous(void (* setup_func)(void *), 376 void (* action_func)(void *), 377 void (* teardown_func)(void *), 378 void *arg) 379{ 380 381 /* obtain rendezvous lock */ 382 mtx_enter(&smp_rv_mtx, MTX_SPIN); 383 384 /* set static function pointers */ 385 smp_rv_setup_func = setup_func; 386 smp_rv_action_func = action_func; 387 smp_rv_teardown_func = teardown_func; 388 smp_rv_func_arg = arg; 389 smp_rv_waiters[0] = 0; 390 smp_rv_waiters[1] = 0; 391 392 /* call executor function */ 393 smp_rendezvous_action(); 394 395 /* release lock */ 396 mtx_exit(&smp_rv_mtx, MTX_SPIN); 397} 398 399static u_int64_t 400atomic_readandclear(u_int64_t* p) 401{ 402 u_int64_t v, temp; 403 404 __asm__ __volatile__ ( 405 : "=&r"(v), "=&r"(temp), "=m" (*p) 406 : "m"(*p) 407 : "memory"); 408 return v; 409} 410