mp_machdep.c revision 70583
1261991Sdim/*- 2243791Sdim * Copyright (c) 2000 Doug Rabson 3353358Sdim * All rights reserved. 4353358Sdim * 5353358Sdim * Redistribution and use in source and binary forms, with or without 6243791Sdim * modification, are permitted provided that the following conditions 7243791Sdim * are met: 8243791Sdim * 1. Redistributions of source code must retain the above copyright 9243791Sdim * notice, this list of conditions and the following disclaimer. 10341825Sdim * 2. Redistributions in binary form must reproduce the above copyright 11341825Sdim * notice, this list of conditions and the following disclaimer in the 12243791Sdim * documentation and/or other materials provided with the distribution. 13243791Sdim * 14243791Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15243791Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16243791Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17288943Sdim * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18341825Sdim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19341825Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20341825Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21341825Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22288943Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23341825Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24341825Sdim * SUCH DAMAGE. 25341825Sdim * 26341825Sdim * $FreeBSD: head/sys/powerpc/powerpc/mp_machdep.c 70583 2001-01-01 23:45:11Z obrien $ 27341825Sdim */ 28341825Sdim 29309124Sdim#include <sys/param.h> 30309124Sdim#include <sys/systm.h> 31309124Sdim#include <sys/ktr.h> 32309124Sdim#include <sys/proc.h> 33314564Sdim#include <sys/lock.h> 34309124Sdim#include <sys/malloc.h> 35309124Sdim#include <sys/mutex.h> 36309124Sdim#include <sys/kernel.h> 37309124Sdim#include <sys/sysctl.h> 38341825Sdim 39309124Sdim#include <vm/vm.h> 40309124Sdim#include <vm/pmap.h> 41353358Sdim#include <vm/vm_map.h> 42353358Sdim#include <sys/user.h> 43353358Sdim#include <sys/dkstat.h> 44309124Sdim 45243791Sdim#include <machine/smp.h> 46341825Sdim#include <machine/lock.h> 47309124Sdim#include <machine/atomic.h> 48309124Sdim#include <machine/globaldata.h> 49309124Sdim#include <machine/pmap.h> 50309124Sdim#include <machine/rpb.h> 51309124Sdim#include <machine/clock.h> 52309124Sdim 53309124Sdimvolatile u_int stopped_cpus; 54309124Sdimvolatile u_int started_cpus; 55314564Sdimvolatile u_int checkstate_probed_cpus; 56309124Sdimvolatile u_int checkstate_need_ast; 57309124Sdimvolatile u_int checkstate_pending_ast; 58309124Sdimstruct proc* checkstate_curproc[MAXCPU]; 59309124Sdimint checkstate_cpustate[MAXCPU]; 60309124Sdimu_long checkstate_pc[MAXCPU]; 61314564Sdimvolatile u_int resched_cpus; 62314564Sdimvoid (*cpustop_restartfunc) __P((void)); 63314564Sdimint mp_ncpus; 64314564Sdim 65314564Sdimint smp_started; 66309124Sdimint boot_cpu_id; 67309124Sdimu_int32_t all_cpus; 68341825Sdim 69321369Sdimstatic struct globaldata *cpuno_to_globaldata[MAXCPU]; 70341825Sdim 71309124Sdimint smp_active = 0; /* are the APs allowed to run? */ 72341825SdimSYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, ""); 73309124Sdim 74309124Sdim/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */ 75309124Sdimint forward_irq_enabled = 1; 76309124SdimSYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW, 77309124Sdim &forward_irq_enabled, 0, ""); 78309124Sdim 79309124Sdim/* Enable forwarding of a signal to a process running on a different CPU */ 80309124Sdimstatic int forward_signal_enabled = 1; 81314564SdimSYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 82309124Sdim &forward_signal_enabled, 0, ""); 83309124Sdim 84309124Sdim/* Enable forwarding of roundrobin to all other cpus */ 85309124Sdimstatic int forward_roundrobin_enabled = 1; 86314564SdimSYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, 87314564Sdim &forward_roundrobin_enabled, 0, ""); 88314564Sdim 89314564Sdim/* 90314564Sdim * Communicate with a console running on a secondary processor. 91309124Sdim * Return 1 on failure. 92309124Sdim */ 93309124Sdimstatic int 94309124Sdimsmp_send_secondary_command(const char *command, int cpuno) 95341825Sdim{ 96341825Sdim u_int64_t mask; 97309124Sdim 98341825Sdim mask = 1L << cpuno; 99309124Sdim struct pcs *cpu = LOCATE_PCS(hwrpb, cpuno); 100309124Sdim int i, len; 101309124Sdim 102309124Sdim /* 103314564Sdim * Sanity check. 104309124Sdim */ 105309124Sdim len = strlen(command); 106309124Sdim if (len > sizeof(cpu->pcs_buffer.rxbuf)) { 107309124Sdim printf("smp_send_secondary_command: command '%s' too long\n", 108309124Sdim command); 109341825Sdim return 0; 110249423Sdim } 111243791Sdim 112249423Sdim /* 113243791Sdim * Wait for the rx bit to clear. 114243791Sdim */ 115341825Sdim for (i = 0; i < 100000; i++) { 116341825Sdim if (!(hwrpb->rpb_rxrdy & mask)) 117341825Sdim break; 118341825Sdim DELAY(10); 119341825Sdim } 120341825Sdim if (hwrpb->rpb_rxrdy & mask) 121341825Sdim return 0; 122341825Sdim 123341825Sdim /* 124341825Sdim * Write the command into the processor's buffer. 125341825Sdim */ 126341825Sdim bcopy(command, cpu->pcs_buffer.rxbuf, len); 127341825Sdim cpu->pcs_buffer.rxlen = len; 128341825Sdim 129341825Sdim /* 130341825Sdim * Set the bit in the rxrdy mask and let the secondary try to 131341825Sdim * handle the command. 132341825Sdim */ 133341825Sdim atomic_set_64(&hwrpb->rpb_rxrdy, mask); 134341825Sdim 135341825Sdim /* 136341825Sdim * Wait for the rx bit to clear. 137341825Sdim */ 138341825Sdim for (i = 0; i < 100000; i++) { 139341825Sdim if (!(hwrpb->rpb_rxrdy & mask)) 140288943Sdim break; 141341825Sdim DELAY(10); 142341825Sdim } 143341825Sdim if (hwrpb->rpb_rxrdy & mask) 144341825Sdim return 0; 145341825Sdim 146341825Sdim return 1; 147341825Sdim} 148341825Sdim 149341825Sdimvoid 150341825Sdimsmp_init_secondary(void) 151341825Sdim{ 152341825Sdim 153341825Sdim mtx_enter(&Giant, MTX_DEF); 154341825Sdim 155341825Sdim printf("smp_init_secondary: called\n"); 156341825Sdim CTR0(KTR_SMP, "smp_init_secondary"); 157341825Sdim 158341825Sdim /* 159341825Sdim * Add to mask. 160341825Sdim */ 161341825Sdim smp_started = 1; 162243791Sdim if (PCPU_GET(cpuno) + 1 > mp_ncpus) 163 mp_ncpus = PCPU_GET(cpuno) + 1; 164 spl0(); 165 166 mtx_exit(&Giant, MTX_DEF); 167} 168 169extern void smp_init_secondary_glue(void); 170 171static int 172smp_start_secondary(int cpuno) 173{ 174 175 printf("smp_start_secondary: starting cpu %d\n", cpuno); 176 177 sz = round_page(UPAGES * PAGE_SIZE); 178 globaldata = malloc(sz, M_TEMP, M_NOWAIT); 179 if (!globaldata) { 180 printf("smp_start_secondary: can't allocate memory\n"); 181 return 0; 182 } 183 184 globaldata_init(globaldata, cpuno, sz); 185 186 /* 187 * Fire it up and hope for the best. 188 */ 189 if (!smp_send_secondary_command("START\r\n", cpuno)) { 190 printf("smp_init_secondary: can't send START command\n"); 191 free(globaldata, M_TEMP); 192 return 0; 193 } 194 195 /* 196 * It worked (I think). 197 */ 198 /* if (bootverbose) */ 199 printf("smp_init_secondary: cpu %d started\n", cpuno); 200 201 return 1; 202} 203 204/* 205 * Initialise a struct globaldata. 206 */ 207void 208globaldata_init(struct globaldata *globaldata, int cpuno, size_t sz) 209{ 210 211 bzero(globaldata, sz); 212 globaldata->gd_idlepcbphys = vtophys((vm_offset_t) &globaldata->gd_idlepcb); 213 globaldata->gd_idlepcb.apcb_ksp = (u_int64_t) 214 ((caddr_t) globaldata + sz - sizeof(struct trapframe)); 215 globaldata->gd_idlepcb.apcb_ptbr = proc0.p_addr->u_pcb.pcb_hw.apcb_ptbr; 216 globaldata->gd_cpuno = cpuno; 217 globaldata->gd_other_cpus = all_cpus & ~(1 << cpuno); 218 globaldata->gd_next_asn = 0; 219 globaldata->gd_current_asngen = 1; 220 globaldata->gd_cpuid = cpuno; 221 cpuno_to_globaldata[cpuno] = globaldata; 222} 223 224struct globaldata * 225globaldata_find(int cpuno) 226{ 227 228 return cpuno_to_globaldata[cpuno]; 229} 230 231/* Implementation of simplelocks */ 232 233/* 234 * Atomically swap the value of *p with val. Return the old value of *p. 235 */ 236static __inline int 237atomic_xchg(volatile u_int *p, u_int val) 238{ 239 u_int32_t oldval, temp; 240 241 __asm__ __volatile__ ( 242 : "=&r"(oldval), "=r"(temp), "=m" (*p) 243 : "m"(*p), "r"(val) 244 : "memory"); 245 return oldval; 246} 247 248void 249s_lock_init(struct simplelock *lkp) 250{ 251 252 lkp->lock_data = 0; 253} 254 255void 256s_lock(struct simplelock *lkp) 257{ 258 259 for (;;) { 260 if (s_lock_try(lkp)) 261 return; 262 263 /* 264 * Spin until clear. 265 */ 266 while (lkp->lock_data) 267 ; 268 } 269} 270 271int 272s_lock_try(struct simplelock *lkp) 273{ 274 u_int32_t oldval, temp; 275 276 __asm__ __volatile__ ( 277 : "=&r"(oldval), "=r"(temp), "=m" (lkp->lock_data) 278 : "m"(lkp->lock_data) 279 : "memory"); 280 281 if (!oldval) { 282 /* 283 * It was clear, return success. 284 */ 285 return 1; 286 } 287 return 0; 288} 289 290/* Other stuff */ 291 292/* lock around the MP rendezvous */ 293static struct simplelock smp_rv_lock; 294 295/* only 1 CPU can panic at a time :) */ 296struct simplelock panic_lock; 297 298static void 299init_locks(void) 300{ 301 302 s_lock_init(&smp_rv_lock); 303 s_lock_init(&panic_lock); 304} 305 306void 307mp_start() 308{ 309} 310 311void 312mp_announce() 313{ 314} 315 316void 317smp_invltlb() 318{ 319} 320 321 322/* 323 * When called the executing CPU will send an IPI to all other CPUs 324 * requesting that they halt execution. 325 * 326 * Usually (but not necessarily) called with 'other_cpus' as its arg. 327 * 328 * - Signals all CPUs in map to stop. 329 * - Waits for each to stop. 330 * 331 * Returns: 332 * -1: error 333 * 0: NA 334 * 1: ok 335 * 336 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 337 * from executing at same time. 338 */ 339int 340stop_cpus(u_int map) 341{ 342 int i; 343 344 if (!smp_started) 345 return 0; 346 347 CTR1(KTR_SMP, "stop_cpus(%x)", map); 348 349 i = 0; 350 while ((stopped_cpus & map) != map) { 351 /* spin */ 352 i++; 353 if (i == 100000) { 354 printf("timeout stopping cpus\n"); 355 break; 356 } 357 } 358 359 printf("stopped_cpus=%x\n", stopped_cpus); 360 361 return 1; 362} 363 364 365/* 366 * Called by a CPU to restart stopped CPUs. 367 * 368 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 369 * 370 * - Signals all CPUs in map to restart. 371 * - Waits for each to restart. 372 * 373 * Returns: 374 * -1: error 375 * 0: NA 376 * 1: ok 377 */ 378int 379restart_cpus(u_int map) 380{ 381 382 if (!smp_started) 383 return 0; 384 385 CTR1(KTR_SMP, "restart_cpus(%x)", map); 386 387 started_cpus = map; /* signal other cpus to restart */ 388 389 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */ 390 ; 391 392 return 1; 393} 394 395/* 396 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 397 * (if specified), rendezvous, execute the action function (if specified), 398 * rendezvous again, execute the teardown function (if specified), and then 399 * resume. 400 * 401 * Note that the supplied external functions _must_ be reentrant and aware 402 * that they are running in parallel and in an unknown lock context. 403 */ 404static void (*smp_rv_setup_func)(void *arg); 405static void (*smp_rv_action_func)(void *arg); 406static void (*smp_rv_teardown_func)(void *arg); 407static void *smp_rv_func_arg; 408static volatile int smp_rv_waiters[2]; 409 410void 411smp_rendezvous_action(void) 412{ 413 414 /* setup function */ 415 if (smp_rv_setup_func != NULL) 416 smp_rv_setup_func(smp_rv_func_arg); 417 /* spin on entry rendezvous */ 418 atomic_add_int(&smp_rv_waiters[0], 1); 419 while (smp_rv_waiters[0] < mp_ncpus) 420 ; 421 /* action function */ 422 if (smp_rv_action_func != NULL) 423 smp_rv_action_func(smp_rv_func_arg); 424 /* spin on exit rendezvous */ 425 atomic_add_int(&smp_rv_waiters[1], 1); 426 while (smp_rv_waiters[1] < mp_ncpus) 427 ; 428 /* teardown function */ 429 if (smp_rv_teardown_func != NULL) 430 smp_rv_teardown_func(smp_rv_func_arg); 431} 432 433void 434smp_rendezvous(void (* setup_func)(void *), 435 void (* action_func)(void *), 436 void (* teardown_func)(void *), 437 void *arg) 438{ 439 int s; 440 441 /* disable interrupts on this CPU, save interrupt status */ 442 s = save_intr(); 443 disable_intr(); 444 445 /* obtain rendezvous lock */ 446 s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */ 447 448 /* set static function pointers */ 449 smp_rv_setup_func = setup_func; 450 smp_rv_action_func = action_func; 451 smp_rv_teardown_func = teardown_func; 452 smp_rv_func_arg = arg; 453 smp_rv_waiters[0] = 0; 454 smp_rv_waiters[1] = 0; 455 456 /* call executor function */ 457 smp_rendezvous_action(); 458 459 /* release lock */ 460 s_unlock(&smp_rv_lock); 461 462 /* restore interrupt flag */ 463 restore_intr(s); 464} 465 466static u_int64_t 467atomic_readandclear(u_int64_t* p) 468{ 469 u_int64_t v, temp; 470 471 __asm__ __volatile__ ( 472 : "=&r"(v), "=&r"(temp), "=m" (*p) 473 : "m"(*p) 474 : "memory"); 475 return v; 476} 477