1/*- 2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp 29 */ 30/*- 31 * Copyright (c) 2002 Jake Burkholder. 32 * Copyright (c) 2007 - 2010 Marius Strobl <marius@FreeBSD.org> 33 * All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 47 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 54 * SUCH DAMAGE. 55 */ 56 57#include <sys/cdefs.h> 58__FBSDID("$FreeBSD$"); 59 60#include <sys/param.h> 61#include <sys/systm.h> 62#include <sys/lock.h> 63#include <sys/kdb.h> 64#include <sys/kernel.h> 65#include <sys/ktr.h> 66#include <sys/mutex.h> 67#include <sys/pcpu.h> 68#include <sys/proc.h> 69#include <sys/sched.h> 70#include <sys/smp.h> 71 72#include <vm/vm.h> 73#include <vm/vm_param.h> 74#include <vm/pmap.h> 75#include <vm/vm_kern.h> 76#include <vm/vm_extern.h> 77#include <vm/vm_map.h> 78 79#include <dev/ofw/openfirm.h> 80 81#include <machine/asi.h> 82#include <machine/atomic.h> 83#include <machine/bus.h> 84#include <machine/cpu.h> 85#include <machine/md_var.h> 86#include <machine/metadata.h> 87#include <machine/ofw_machdep.h> 88#include <machine/pcb.h> 89#include <machine/smp.h> 90#include <machine/tick.h> 91#include <machine/tlb.h> 92#include <machine/tsb.h> 93#include <machine/tte.h> 94#include <machine/ver.h> 95 96#define SUNW_STARTCPU "SUNW,start-cpu" 97#define SUNW_STOPSELF "SUNW,stop-self" 98 99static ih_func_t cpu_ipi_ast; 100static ih_func_t cpu_ipi_hardclock; 101static ih_func_t cpu_ipi_preempt; 102static ih_func_t cpu_ipi_stop; 103 104/* 105 * Argument area used to pass data to non-boot processors as they start up. 106 * This must be statically initialized with a known invalid CPU module ID, 107 * since the other processors will use it before the boot CPU enters the 108 * kernel. 109 */ 110struct cpu_start_args cpu_start_args = { 0, -1, -1, 0, 0, 0 }; 111struct ipi_cache_args ipi_cache_args; 112struct ipi_rd_args ipi_rd_args; 113struct ipi_tlb_args ipi_tlb_args; 114struct pcb stoppcbs[MAXCPU]; 115 116cpu_ipi_selected_t *cpu_ipi_selected; 117cpu_ipi_single_t *cpu_ipi_single; 118 119static vm_offset_t mp_tramp; 120static u_int cpuid_to_mid[MAXCPU]; 121static int isjbus; 122static volatile cpuset_t shutdown_cpus; 123 124static void ap_count(phandle_t node, u_int mid, u_int cpu_impl); 125static void ap_start(phandle_t node, u_int mid, u_int cpu_impl); 126static void cpu_mp_unleash(void *v); 127static void foreach_ap(phandle_t node, void (*func)(phandle_t node, 128 u_int mid, u_int cpu_impl)); 129static void sun4u_startcpu(phandle_t cpu, void *func, u_long arg); 130 131static cpu_ipi_selected_t cheetah_ipi_selected; 132static cpu_ipi_single_t cheetah_ipi_single; 133static cpu_ipi_selected_t jalapeno_ipi_selected; 134static cpu_ipi_single_t jalapeno_ipi_single; 135static cpu_ipi_selected_t spitfire_ipi_selected; 136static cpu_ipi_single_t spitfire_ipi_single; 137 138SYSINIT(cpu_mp_unleash, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL); 139 140void 141mp_init(u_int cpu_impl) 142{ 143 struct tte *tp; 144 int i; 145 146 mp_tramp = (vm_offset_t)OF_claim(NULL, PAGE_SIZE, PAGE_SIZE); 147 if (mp_tramp == (vm_offset_t)-1) 148 panic("%s", __func__); 149 bcopy(mp_tramp_code, (void *)mp_tramp, mp_tramp_code_len); 150 *(vm_offset_t *)(mp_tramp + mp_tramp_tlb_slots) = kernel_tlb_slots; 151 *(vm_offset_t *)(mp_tramp + mp_tramp_func) = (vm_offset_t)mp_startup; 152 tp = (struct tte *)(mp_tramp + mp_tramp_code_len); 153 for (i = 0; i < kernel_tlb_slots; i++) { 154 tp[i].tte_vpn = TV_VPN(kernel_tlbs[i].te_va, TS_4M); 155 tp[i].tte_data = TD_V | TD_4M | TD_PA(kernel_tlbs[i].te_pa) | 156 TD_L | TD_CP | TD_CV | TD_P | TD_W; 157 } 158 for (i = 0; i < PAGE_SIZE; i += sizeof(vm_offset_t)) 159 flush(mp_tramp + i); 160 161 /* 162 * On UP systems cpu_ipi_selected() can be called while 163 * cpu_mp_start() wasn't so initialize these here. 164 */ 165 if (cpu_impl == CPU_IMPL_ULTRASPARCIIIi || 166 cpu_impl == CPU_IMPL_ULTRASPARCIIIip) { 167 isjbus = 1; 168 cpu_ipi_selected = jalapeno_ipi_selected; 169 cpu_ipi_single = jalapeno_ipi_single; 170 } else if (cpu_impl == CPU_IMPL_SPARC64V || 171 cpu_impl >= CPU_IMPL_ULTRASPARCIII) { 172 cpu_ipi_selected = cheetah_ipi_selected; 173 cpu_ipi_single = cheetah_ipi_single; 174 } else { 175 cpu_ipi_selected = spitfire_ipi_selected; 176 cpu_ipi_single = spitfire_ipi_single; 177 } 178} 179 180static void 181foreach_ap(phandle_t node, void (*func)(phandle_t node, u_int mid, 182 u_int cpu_impl)) 183{ 184 char type[sizeof("cpu")]; 185 phandle_t child; 186 u_int cpuid; 187 uint32_t cpu_impl; 188 189 /* There's no need to traverse the whole OFW tree twice. */ 190 if (mp_maxid > 0 && mp_ncpus >= mp_maxid + 1) 191 return; 192 193 for (; node != 0; node = OF_peer(node)) { 194 child = OF_child(node); 195 if (child > 0) 196 foreach_ap(child, func); 197 else { 198 if (OF_getprop(node, "device_type", type, 199 sizeof(type)) <= 0) 200 continue; 201 if (strcmp(type, "cpu") != 0) 202 continue; 203 if (OF_getprop(node, "implementation#", &cpu_impl, 204 sizeof(cpu_impl)) <= 0) 205 panic("%s: couldn't determine CPU " 206 "implementation", __func__); 207 if (OF_getprop(node, cpu_cpuid_prop(cpu_impl), &cpuid, 208 sizeof(cpuid)) <= 0) 209 panic("%s: couldn't determine CPU module ID", 210 __func__); 211 if (cpuid == PCPU_GET(mid)) 212 continue; 213 (*func)(node, cpuid, cpu_impl); 214 } 215 } 216} 217 218/* 219 * Probe for other CPUs. 220 */ 221void 222cpu_mp_setmaxid() 223{ 224 225 CPU_SETOF(curcpu, &all_cpus); 226 mp_ncpus = 1; 227 mp_maxid = 0; 228 229 foreach_ap(OF_child(OF_peer(0)), ap_count); 230} 231 232static void 233ap_count(phandle_t node __unused, u_int mid __unused, u_int cpu_impl __unused) 234{ 235 236 mp_maxid++; 237} 238 239int 240cpu_mp_probe(void) 241{ 242 243 return (mp_maxid > 0); 244} 245 246struct cpu_group * 247cpu_topo(void) 248{ 249 250 return (smp_topo_none()); 251} 252 253static void 254sun4u_startcpu(phandle_t cpu, void *func, u_long arg) 255{ 256 static struct { 257 cell_t name; 258 cell_t nargs; 259 cell_t nreturns; 260 cell_t cpu; 261 cell_t func; 262 cell_t arg; 263 } args = { 264 (cell_t)SUNW_STARTCPU, 265 3, 266 }; 267 268 args.cpu = cpu; 269 args.func = (cell_t)func; 270 args.arg = (cell_t)arg; 271 ofw_entry(&args); 272} 273 274/* 275 * Fire up any non-boot processors. 276 */ 277void 278cpu_mp_start(void) 279{ 280 281 intr_setup(PIL_AST, cpu_ipi_ast, -1, NULL, NULL); 282 intr_setup(PIL_RENDEZVOUS, (ih_func_t *)smp_rendezvous_action, 283 -1, NULL, NULL); 284 intr_setup(PIL_STOP, cpu_ipi_stop, -1, NULL, NULL); 285 intr_setup(PIL_PREEMPT, cpu_ipi_preempt, -1, NULL, NULL); 286 intr_setup(PIL_HARDCLOCK, cpu_ipi_hardclock, -1, NULL, NULL); 287 288 cpuid_to_mid[curcpu] = PCPU_GET(mid); 289 290 foreach_ap(OF_child(OF_peer(0)), ap_start); 291 KASSERT(!isjbus || mp_ncpus <= IDR_JALAPENO_MAX_BN_PAIRS, 292 ("%s: can only IPI a maximum of %d JBus-CPUs", 293 __func__, IDR_JALAPENO_MAX_BN_PAIRS)); 294 smp_active = 1; 295} 296 297static void 298ap_start(phandle_t node, u_int mid, u_int cpu_impl) 299{ 300 volatile struct cpu_start_args *csa; 301 struct pcpu *pc; 302 register_t s; 303 vm_offset_t va; 304 u_int cpuid; 305 uint32_t clock; 306 307 if (mp_ncpus > MAXCPU) 308 return; 309 310 if (OF_getprop(node, "clock-frequency", &clock, sizeof(clock)) <= 0) 311 panic("%s: couldn't determine CPU frequency", __func__); 312 if (clock != PCPU_GET(clock)) 313 tick_et_use_stick = 1; 314 315 csa = &cpu_start_args; 316 csa->csa_state = 0; 317 sun4u_startcpu(node, (void *)mp_tramp, 0); 318 s = intr_disable(); 319 while (csa->csa_state != CPU_TICKSYNC) 320 ; 321 membar(StoreLoad); 322 csa->csa_tick = rd(tick); 323 if (cpu_impl == CPU_IMPL_SPARC64V || 324 cpu_impl >= CPU_IMPL_ULTRASPARCIII) { 325 while (csa->csa_state != CPU_STICKSYNC) 326 ; 327 membar(StoreLoad); 328 csa->csa_stick = rdstick(); 329 } 330 while (csa->csa_state != CPU_INIT) 331 ; 332 csa->csa_tick = csa->csa_stick = 0; 333 intr_restore(s); 334 335 cpuid = mp_ncpus++; 336 cpuid_to_mid[cpuid] = mid; 337 cpu_identify(csa->csa_ver, clock, cpuid); 338 339 va = kmem_alloc(kernel_map, PCPU_PAGES * PAGE_SIZE); 340 pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1; 341 pcpu_init(pc, cpuid, sizeof(*pc)); 342 dpcpu_init((void *)kmem_alloc(kernel_map, DPCPU_SIZE), cpuid); 343 pc->pc_addr = va; 344 pc->pc_clock = clock; 345 pc->pc_impl = cpu_impl; 346 pc->pc_mid = mid; 347 pc->pc_node = node; 348 349 cache_init(pc); 350 351 CPU_SET(cpuid, &all_cpus); 352 intr_add_cpu(cpuid); 353} 354 355void 356cpu_mp_announce(void) 357{ 358 359} 360 361static void 362cpu_mp_unleash(void *v) 363{ 364 volatile struct cpu_start_args *csa; 365 struct pcpu *pc; 366 register_t s; 367 vm_offset_t va; 368 vm_paddr_t pa; 369 u_int ctx_inc; 370 u_int ctx_min; 371 int i; 372 373 ctx_min = TLB_CTX_USER_MIN; 374 ctx_inc = (TLB_CTX_USER_MAX - 1) / mp_ncpus; 375 csa = &cpu_start_args; 376 csa->csa_count = mp_ncpus; 377 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 378 pc->pc_tlb_ctx = ctx_min; 379 pc->pc_tlb_ctx_min = ctx_min; 380 pc->pc_tlb_ctx_max = ctx_min + ctx_inc; 381 ctx_min += ctx_inc; 382 383 if (pc->pc_cpuid == curcpu) 384 continue; 385 KASSERT(pc->pc_idlethread != NULL, 386 ("%s: idlethread", __func__)); 387 pc->pc_curthread = pc->pc_idlethread; 388 pc->pc_curpcb = pc->pc_curthread->td_pcb; 389 for (i = 0; i < PCPU_PAGES; i++) { 390 va = pc->pc_addr + i * PAGE_SIZE; 391 pa = pmap_kextract(va); 392 if (pa == 0) 393 panic("%s: pmap_kextract", __func__); 394 csa->csa_ttes[i].tte_vpn = TV_VPN(va, TS_8K); 395 csa->csa_ttes[i].tte_data = TD_V | TD_8K | TD_PA(pa) | 396 TD_L | TD_CP | TD_CV | TD_P | TD_W; 397 } 398 csa->csa_state = 0; 399 csa->csa_pcpu = pc->pc_addr; 400 csa->csa_mid = pc->pc_mid; 401 s = intr_disable(); 402 while (csa->csa_state != CPU_BOOTSTRAP) 403 ; 404 intr_restore(s); 405 } 406 407 membar(StoreLoad); 408 csa->csa_count = 0; 409 smp_started = 1; 410} 411 412void 413cpu_mp_bootstrap(struct pcpu *pc) 414{ 415 volatile struct cpu_start_args *csa; 416 417 csa = &cpu_start_args; 418 419 /* Do CPU-specific initialization. */ 420 if (pc->pc_impl >= CPU_IMPL_ULTRASPARCIII) 421 cheetah_init(pc->pc_impl); 422 else if (pc->pc_impl == CPU_IMPL_SPARC64V) 423 zeus_init(pc->pc_impl); 424 425 /* 426 * Enable the caches. Note that his may include applying workarounds. 427 */ 428 cache_enable(pc->pc_impl); 429 430 /* 431 * Clear (S)TICK timer(s) (including NPT) and ensure they are stopped. 432 */ 433 tick_clear(pc->pc_impl); 434 tick_stop(pc->pc_impl); 435 436 /* Set the kernel context. */ 437 pmap_set_kctx(); 438 439 /* Lock the kernel TSB in the TLB if necessary. */ 440 if (tsb_kernel_ldd_phys == 0) 441 pmap_map_tsb(); 442 443 /* 444 * Flush all non-locked TLB entries possibly left over by the 445 * firmware. 446 */ 447 tlb_flush_nonlocked(); 448 449 /* 450 * Enable interrupts. 451 * Note that the PIL we be lowered indirectly via sched_throw(NULL) 452 * when fake spinlock held by the idle thread eventually is released. 453 */ 454 wrpr(pstate, 0, PSTATE_KERNEL); 455 456 smp_cpus++; 457 KASSERT(curthread != NULL, ("%s: curthread", __func__)); 458 printf("SMP: AP CPU #%d Launched!\n", curcpu); 459 460 csa->csa_count--; 461 membar(StoreLoad); 462 csa->csa_state = CPU_BOOTSTRAP; 463 while (csa->csa_count != 0) 464 ; 465 466 /* Start per-CPU event timers. */ 467 cpu_initclocks_ap(); 468 469 /* Ok, now enter the scheduler. */ 470 sched_throw(NULL); 471} 472 473void 474cpu_mp_shutdown(void) 475{ 476 cpuset_t cpus; 477 int i; 478 479 critical_enter(); 480 shutdown_cpus = all_cpus; 481 CPU_CLR(PCPU_GET(cpuid), &shutdown_cpus); 482 cpus = shutdown_cpus; 483 484 /* XXX: Stop all the CPUs which aren't already. */ 485 if (CPU_CMP(&stopped_cpus, &cpus)) { 486 487 /* cpus is just a flat "on" mask without curcpu. */ 488 CPU_NAND(&cpus, &stopped_cpus); 489 stop_cpus(cpus); 490 } 491 i = 0; 492 while (!CPU_EMPTY(&shutdown_cpus)) { 493 if (i++ > 100000) { 494 printf("timeout shutting down CPUs.\n"); 495 break; 496 } 497 } 498 critical_exit(); 499} 500 501static void 502cpu_ipi_ast(struct trapframe *tf __unused) 503{ 504 505} 506 507static void 508cpu_ipi_stop(struct trapframe *tf __unused) 509{ 510 u_int cpuid; 511 512 CTR2(KTR_SMP, "%s: stopped %d", __func__, curcpu); 513 sched_pin(); 514 savectx(&stoppcbs[curcpu]); 515 cpuid = PCPU_GET(cpuid); 516 CPU_SET_ATOMIC(cpuid, &stopped_cpus); 517 while (!CPU_ISSET(cpuid, &started_cpus)) { 518 if (CPU_ISSET(cpuid, &shutdown_cpus)) { 519 CPU_CLR_ATOMIC(cpuid, &shutdown_cpus); 520 (void)intr_disable(); 521 for (;;) 522 ; 523 } 524 } 525 CPU_CLR_ATOMIC(cpuid, &started_cpus); 526 CPU_CLR_ATOMIC(cpuid, &stopped_cpus); 527 sched_unpin(); 528 CTR2(KTR_SMP, "%s: restarted %d", __func__, curcpu); 529} 530 531static void 532cpu_ipi_preempt(struct trapframe *tf) 533{ 534 535 sched_preempt(curthread); 536} 537 538static void 539cpu_ipi_hardclock(struct trapframe *tf) 540{ 541 struct trapframe *oldframe; 542 struct thread *td; 543 544 critical_enter(); 545 td = curthread; 546 td->td_intr_nesting_level++; 547 oldframe = td->td_intr_frame; 548 td->td_intr_frame = tf; 549 hardclockintr(); 550 td->td_intr_frame = oldframe; 551 td->td_intr_nesting_level--; 552 critical_exit(); 553} 554 555static void 556spitfire_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2) 557{ 558 u_int cpu; 559 560 while ((cpu = CPU_FFS(&cpus)) != 0) { 561 cpu--; 562 CPU_CLR(cpu, &cpus); 563 spitfire_ipi_single(cpu, d0, d1, d2); 564 } 565} 566 567static void 568spitfire_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2) 569{ 570 register_t s; 571 u_long ids; 572 u_int mid; 573 int i; 574 575 KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__)); 576 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) & IDR_BUSY) == 0, 577 ("%s: outstanding dispatch", __func__)); 578 mid = cpuid_to_mid[cpu]; 579 for (i = 0; i < IPI_RETRIES; i++) { 580 s = intr_disable(); 581 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0); 582 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1); 583 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2); 584 membar(Sync); 585 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT), 586 ASI_SDB_INTR_W, 0); 587 /* 588 * Workaround for SpitFire erratum #54; do a dummy read 589 * from a SDB internal register before the MEMBAR #Sync 590 * for the write to ASI_SDB_INTR_W (requiring another 591 * MEMBAR #Sync in order to make sure the write has 592 * occurred before the load). 593 */ 594 membar(Sync); 595 (void)ldxa(AA_SDB_CNTL_HIGH, ASI_SDB_CONTROL_R); 596 membar(Sync); 597 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) & 598 IDR_BUSY) != 0) 599 ; 600 intr_restore(s); 601 if ((ids & (IDR_BUSY | IDR_NACK)) == 0) 602 return; 603 /* 604 * Leave interrupts enabled for a bit before retrying 605 * in order to avoid deadlocks if the other CPU is also 606 * trying to send an IPI. 607 */ 608 DELAY(2); 609 } 610 if (kdb_active != 0 || panicstr != NULL) 611 printf("%s: couldn't send IPI to module 0x%u\n", 612 __func__, mid); 613 else 614 panic("%s: couldn't send IPI to module 0x%u", 615 __func__, mid); 616} 617 618static void 619cheetah_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2) 620{ 621 register_t s; 622 u_long ids; 623 u_int mid; 624 int i; 625 626 KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__)); 627 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) & 628 IDR_CHEETAH_ALL_BUSY) == 0, 629 ("%s: outstanding dispatch", __func__)); 630 mid = cpuid_to_mid[cpu]; 631 for (i = 0; i < IPI_RETRIES; i++) { 632 s = intr_disable(); 633 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0); 634 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1); 635 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2); 636 membar(Sync); 637 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT), 638 ASI_SDB_INTR_W, 0); 639 membar(Sync); 640 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) & 641 IDR_BUSY) != 0) 642 ; 643 intr_restore(s); 644 if ((ids & (IDR_BUSY | IDR_NACK)) == 0) 645 return; 646 /* 647 * Leave interrupts enabled for a bit before retrying 648 * in order to avoid deadlocks if the other CPU is also 649 * trying to send an IPI. 650 */ 651 DELAY(2); 652 } 653 if (kdb_active != 0 || panicstr != NULL) 654 printf("%s: couldn't send IPI to module 0x%u\n", 655 __func__, mid); 656 else 657 panic("%s: couldn't send IPI to module 0x%u", 658 __func__, mid); 659} 660 661static void 662cheetah_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2) 663{ 664 char pbuf[CPUSETBUFSIZ]; 665 register_t s; 666 u_long ids; 667 u_int bnp; 668 u_int cpu; 669 int i; 670 671 KASSERT(!CPU_ISSET(curcpu, &cpus), ("%s: CPU can't IPI itself", 672 __func__)); 673 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) & 674 IDR_CHEETAH_ALL_BUSY) == 0, 675 ("%s: outstanding dispatch", __func__)); 676 if (CPU_EMPTY(&cpus)) 677 return; 678 ids = 0; 679 for (i = 0; i < IPI_RETRIES * mp_ncpus; i++) { 680 s = intr_disable(); 681 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0); 682 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1); 683 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2); 684 membar(Sync); 685 bnp = 0; 686 for (cpu = 0; cpu < mp_ncpus; cpu++) { 687 if (CPU_ISSET(cpu, &cpus)) { 688 stxa(AA_INTR_SEND | (cpuid_to_mid[cpu] << 689 IDC_ITID_SHIFT) | bnp << IDC_BN_SHIFT, 690 ASI_SDB_INTR_W, 0); 691 membar(Sync); 692 bnp++; 693 if (bnp == IDR_CHEETAH_MAX_BN_PAIRS) 694 break; 695 } 696 } 697 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) & 698 IDR_CHEETAH_ALL_BUSY) != 0) 699 ; 700 intr_restore(s); 701 bnp = 0; 702 for (cpu = 0; cpu < mp_ncpus; cpu++) { 703 if (CPU_ISSET(cpu, &cpus)) { 704 if ((ids & (IDR_NACK << (2 * bnp))) == 0) 705 CPU_CLR(cpu, &cpus); 706 bnp++; 707 } 708 } 709 if (CPU_EMPTY(&cpus)) 710 return; 711 /* 712 * Leave interrupts enabled for a bit before retrying 713 * in order to avoid deadlocks if the other CPUs are 714 * also trying to send IPIs. 715 */ 716 DELAY(2 * mp_ncpus); 717 } 718 if (kdb_active != 0 || panicstr != NULL) 719 printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n", 720 __func__, cpusetobj_strprint(pbuf, &cpus), ids); 721 else 722 panic("%s: couldn't send IPI (cpus=%s ids=0x%lu)", 723 __func__, cpusetobj_strprint(pbuf, &cpus), ids); 724} 725 726static void 727jalapeno_ipi_single(u_int cpu, u_long d0, u_long d1, u_long d2) 728{ 729 register_t s; 730 u_long ids; 731 u_int busy, busynack, mid; 732 int i; 733 734 KASSERT(cpu != curcpu, ("%s: CPU can't IPI itself", __func__)); 735 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) & 736 IDR_CHEETAH_ALL_BUSY) == 0, 737 ("%s: outstanding dispatch", __func__)); 738 mid = cpuid_to_mid[cpu]; 739 busy = IDR_BUSY << (2 * mid); 740 busynack = (IDR_BUSY | IDR_NACK) << (2 * mid); 741 for (i = 0; i < IPI_RETRIES; i++) { 742 s = intr_disable(); 743 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0); 744 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1); 745 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2); 746 membar(Sync); 747 stxa(AA_INTR_SEND | (mid << IDC_ITID_SHIFT), 748 ASI_SDB_INTR_W, 0); 749 membar(Sync); 750 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) & 751 busy) != 0) 752 ; 753 intr_restore(s); 754 if ((ids & busynack) == 0) 755 return; 756 /* 757 * Leave interrupts enabled for a bit before retrying 758 * in order to avoid deadlocks if the other CPU is also 759 * trying to send an IPI. 760 */ 761 DELAY(2); 762 } 763 if (kdb_active != 0 || panicstr != NULL) 764 printf("%s: couldn't send IPI to module 0x%u\n", 765 __func__, mid); 766 else 767 panic("%s: couldn't send IPI to module 0x%u", 768 __func__, mid); 769} 770 771static void 772jalapeno_ipi_selected(cpuset_t cpus, u_long d0, u_long d1, u_long d2) 773{ 774 char pbuf[CPUSETBUFSIZ]; 775 register_t s; 776 u_long ids; 777 u_int cpu; 778 int i; 779 780 KASSERT(!CPU_ISSET(curcpu, &cpus), ("%s: CPU can't IPI itself", 781 __func__)); 782 KASSERT((ldxa(0, ASI_INTR_DISPATCH_STATUS) & 783 IDR_CHEETAH_ALL_BUSY) == 0, 784 ("%s: outstanding dispatch", __func__)); 785 if (CPU_EMPTY(&cpus)) 786 return; 787 ids = 0; 788 for (i = 0; i < IPI_RETRIES * mp_ncpus; i++) { 789 s = intr_disable(); 790 stxa(AA_SDB_INTR_D0, ASI_SDB_INTR_W, d0); 791 stxa(AA_SDB_INTR_D1, ASI_SDB_INTR_W, d1); 792 stxa(AA_SDB_INTR_D2, ASI_SDB_INTR_W, d2); 793 membar(Sync); 794 for (cpu = 0; cpu < mp_ncpus; cpu++) { 795 if (CPU_ISSET(cpu, &cpus)) { 796 stxa(AA_INTR_SEND | (cpuid_to_mid[cpu] << 797 IDC_ITID_SHIFT), ASI_SDB_INTR_W, 0); 798 membar(Sync); 799 } 800 } 801 while (((ids = ldxa(0, ASI_INTR_DISPATCH_STATUS)) & 802 IDR_CHEETAH_ALL_BUSY) != 0) 803 ; 804 intr_restore(s); 805 if ((ids & 806 (IDR_CHEETAH_ALL_BUSY | IDR_CHEETAH_ALL_NACK)) == 0) 807 return; 808 for (cpu = 0; cpu < mp_ncpus; cpu++) 809 if (CPU_ISSET(cpu, &cpus)) 810 if ((ids & (IDR_NACK << 811 (2 * cpuid_to_mid[cpu]))) == 0) 812 CPU_CLR(cpu, &cpus); 813 /* 814 * Leave interrupts enabled for a bit before retrying 815 * in order to avoid deadlocks if the other CPUs are 816 * also trying to send IPIs. 817 */ 818 DELAY(2 * mp_ncpus); 819 } 820 if (kdb_active != 0 || panicstr != NULL) 821 printf("%s: couldn't send IPI (cpus=%s ids=0x%lu)\n", 822 __func__, cpusetobj_strprint(pbuf, &cpus), ids); 823 else 824 panic("%s: couldn't send IPI (cpus=%s ids=0x%lu)", 825 __func__, cpusetobj_strprint(pbuf, &cpus), ids); 826} 827