mp_machdep.c revision 163219
1/*- 2 * Copyright (c) 1996, by Steve Passe 3 * Copyright (c) 2003, by Peter Wemm 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. The name of the developer may NOT be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/amd64/amd64/mp_machdep.c 163219 2006-10-10 23:23:12Z jhb $"); 29 30#include "opt_cpu.h" 31#include "opt_kstack_pages.h" 32#include "opt_mp_watchdog.h" 33#include "opt_sched.h" 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/bus.h> 38#ifdef GPROF 39#include <sys/gmon.h> 40#endif 41#include <sys/kernel.h> 42#include <sys/ktr.h> 43#include <sys/lock.h> 44#include <sys/malloc.h> 45#include <sys/memrange.h> 46#include <sys/mutex.h> 47#include <sys/pcpu.h> 48#include <sys/proc.h> 49#include <sys/smp.h> 50#include <sys/sysctl.h> 51 52#include <vm/vm.h> 53#include <vm/vm_param.h> 54#include <vm/pmap.h> 55#include <vm/vm_kern.h> 56#include <vm/vm_extern.h> 57 58#include <machine/apicreg.h> 59#include <machine/md_var.h> 60#include <machine/mp_watchdog.h> 61#include <machine/pcb.h> 62#include <machine/psl.h> 63#include <machine/smp.h> 64#include <machine/specialreg.h> 65#include <machine/tss.h> 66 67#define WARMBOOT_TARGET 0 68#define WARMBOOT_OFF (KERNBASE + 0x0467) 69#define WARMBOOT_SEG (KERNBASE + 0x0469) 70 71#define CMOS_REG (0x70) 72#define CMOS_DATA (0x71) 73#define BIOS_RESET (0x0f) 74#define BIOS_WARM (0x0a) 75 76/* lock region used by kernel profiling */ 77int mcount_lock; 78 79int mp_naps; /* # of Applications processors */ 80int boot_cpu_id = -1; /* designated BSP */ 81extern int nkpt; 82 83/* 84 * CPU topology map datastructures for HTT. 85 */ 86static struct cpu_group mp_groups[MAXCPU]; 87static struct cpu_top mp_top; 88 89/* AP uses this during bootstrap. Do not staticize. */ 90char *bootSTK; 91static int bootAP; 92 93/* Free these after use */ 94void *bootstacks[MAXCPU]; 95 96/* Temporary holder for double fault stack */ 97char *doublefault_stack; 98 99/* Hotwire a 0->4MB V==P mapping */ 100extern pt_entry_t *KPTphys; 101 102/* SMP page table page */ 103extern pt_entry_t *SMPpt; 104 105struct pcb stoppcbs[MAXCPU]; 106 107/* Variables needed for SMP tlb shootdown. */ 108vm_offset_t smp_tlb_addr1; 109vm_offset_t smp_tlb_addr2; 110volatile int smp_tlb_wait; 111 112extern inthand_t IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 113 114#ifdef STOP_NMI 115volatile cpumask_t ipi_nmi_pending; 116 117static void ipi_nmi_selected(u_int32_t cpus); 118#endif 119 120/* 121 * Local data and functions. 122 */ 123 124#ifdef STOP_NMI 125/* 126 * Provide an alternate method of stopping other CPUs. If another CPU has 127 * disabled interrupts the conventional STOP IPI will be blocked. This 128 * NMI-based stop should get through in that case. 129 */ 130static int stop_cpus_with_nmi = 1; 131SYSCTL_INT(_debug, OID_AUTO, stop_cpus_with_nmi, CTLTYPE_INT | CTLFLAG_RW, 132 &stop_cpus_with_nmi, 0, ""); 133TUNABLE_INT("debug.stop_cpus_with_nmi", &stop_cpus_with_nmi); 134#else 135#define stop_cpus_with_nmi 0 136#endif 137 138static u_int logical_cpus; 139 140/* used to hold the AP's until we are ready to release them */ 141static struct mtx ap_boot_mtx; 142 143/* Set to 1 once we're ready to let the APs out of the pen. */ 144static volatile int aps_ready = 0; 145 146/* 147 * Store data from cpu_add() until later in the boot when we actually setup 148 * the APs. 149 */ 150struct cpu_info { 151 int cpu_present:1; 152 int cpu_bsp:1; 153 int cpu_disabled:1; 154} static cpu_info[MAXCPU]; 155static int cpu_apic_ids[MAXCPU]; 156 157/* Holds pending bitmap based IPIs per CPU */ 158static volatile u_int cpu_ipi_pending[MAXCPU]; 159 160static u_int boot_address; 161 162static void set_interrupt_apic_ids(void); 163static int start_all_aps(void); 164static int start_ap(int apic_id); 165static void release_aps(void *dummy); 166 167static int hlt_logical_cpus; 168static u_int hyperthreading_cpus; 169static cpumask_t hyperthreading_cpus_mask; 170static int hyperthreading_allowed = 1; 171static struct sysctl_ctx_list logical_cpu_clist; 172static u_int bootMP_size; 173 174static void 175mem_range_AP_init(void) 176{ 177 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP) 178 mem_range_softc.mr_op->initAP(&mem_range_softc); 179} 180 181void 182mp_topology(void) 183{ 184 struct cpu_group *group; 185 u_int regs[4]; 186 int logical_cpus; 187 int apic_id; 188 int groups; 189 int cpu; 190 191 /* Build the smp_topology map. */ 192 /* Nothing to do if there is no HTT support. */ 193 if ((cpu_feature & CPUID_HTT) == 0) 194 return; 195 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16; 196 if (logical_cpus <= 1) 197 return; 198 /* Nothing to do if reported cores are physical cores. */ 199 if (strcmp(cpu_vendor, "GenuineIntel") == 0 && cpu_high >= 4) { 200 cpuid_count(4, 0, regs); 201 if ((regs[0] & 0x1f) != 0 && 202 logical_cpus <= ((regs[0] >> 26) & 0x3f) + 1) 203 return; 204 } 205 group = &mp_groups[0]; 206 groups = 1; 207 for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) { 208 if (!cpu_info[apic_id].cpu_present) 209 continue; 210 /* 211 * If the current group has members and we're not a logical 212 * cpu, create a new group. 213 */ 214 if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) { 215 group++; 216 groups++; 217 } 218 group->cg_count++; 219 group->cg_mask |= 1 << cpu; 220 cpu++; 221 } 222 223 mp_top.ct_count = groups; 224 mp_top.ct_group = mp_groups; 225 smp_topology = &mp_top; 226} 227 228/* 229 * Calculate usable address in base memory for AP trampoline code. 230 */ 231u_int 232mp_bootaddress(u_int basemem) 233{ 234 235 bootMP_size = mptramp_end - mptramp_start; 236 boot_address = trunc_page(basemem * 1024); /* round down to 4k boundary */ 237 if (((basemem * 1024) - boot_address) < bootMP_size) 238 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */ 239 /* 3 levels of page table pages */ 240 mptramp_pagetables = boot_address - (PAGE_SIZE * 3); 241 242 return mptramp_pagetables; 243} 244 245void 246cpu_add(u_int apic_id, char boot_cpu) 247{ 248 249 if (apic_id >= MAXCPU) { 250 printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n", 251 apic_id, MAXCPU - 1); 252 return; 253 } 254 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice", 255 apic_id)); 256 cpu_info[apic_id].cpu_present = 1; 257 if (boot_cpu) { 258 KASSERT(boot_cpu_id == -1, 259 ("CPU %d claims to be BSP, but CPU %d already is", apic_id, 260 boot_cpu_id)); 261 boot_cpu_id = apic_id; 262 cpu_info[apic_id].cpu_bsp = 1; 263 } 264 mp_ncpus++; 265 if (apic_id > mp_maxid) 266 mp_maxid = apic_id; 267 if (bootverbose) 268 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" : 269 "AP"); 270 271} 272 273void 274cpu_mp_setmaxid(void) 275{ 276 277 /* 278 * mp_maxid should be already set by calls to cpu_add(). 279 * Just sanity check its value here. 280 */ 281 if (mp_ncpus == 0) 282 KASSERT(mp_maxid == 0, 283 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__)); 284 else if (mp_ncpus == 1) 285 mp_maxid = 0; 286 else 287 KASSERT(mp_maxid >= mp_ncpus - 1, 288 ("%s: counters out of sync: max %d, count %d", __func__, 289 mp_maxid, mp_ncpus)); 290 291} 292 293int 294cpu_mp_probe(void) 295{ 296 297 /* 298 * Always record BSP in CPU map so that the mbuf init code works 299 * correctly. 300 */ 301 all_cpus = 1; 302 if (mp_ncpus == 0) { 303 /* 304 * No CPUs were found, so this must be a UP system. Setup 305 * the variables to represent a system with a single CPU 306 * with an id of 0. 307 */ 308 mp_ncpus = 1; 309 return (0); 310 } 311 312 /* At least one CPU was found. */ 313 if (mp_ncpus == 1) { 314 /* 315 * One CPU was found, so this must be a UP system with 316 * an I/O APIC. 317 */ 318 mp_maxid = 0; 319 return (0); 320 } 321 322 /* At least two CPUs were found. */ 323 return (1); 324} 325 326/* 327 * Initialize the IPI handlers and start up the AP's. 328 */ 329void 330cpu_mp_start(void) 331{ 332 int i; 333 u_int threads_per_cache, p[4]; 334 335 /* Initialize the logical ID to APIC ID table. */ 336 for (i = 0; i < MAXCPU; i++) { 337 cpu_apic_ids[i] = -1; 338 cpu_ipi_pending[i] = 0; 339 } 340 341 /* Install an inter-CPU IPI for TLB invalidation */ 342 setidt(IPI_INVLTLB, IDTVEC(invltlb), SDT_SYSIGT, SEL_KPL, 0); 343 setidt(IPI_INVLPG, IDTVEC(invlpg), SDT_SYSIGT, SEL_KPL, 0); 344 setidt(IPI_INVLRNG, IDTVEC(invlrng), SDT_SYSIGT, SEL_KPL, 0); 345 346 /* Install an inter-CPU IPI for cache invalidation. */ 347 setidt(IPI_INVLCACHE, IDTVEC(invlcache), SDT_SYSIGT, SEL_KPL, 0); 348 349 /* Install an inter-CPU IPI for all-CPU rendezvous */ 350 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0); 351 352 /* Install generic inter-CPU IPI handler */ 353 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler), 354 SDT_SYSIGT, SEL_KPL, 0); 355 356 /* Install an inter-CPU IPI for CPU stop/restart */ 357 setidt(IPI_STOP, IDTVEC(cpustop), SDT_SYSIGT, SEL_KPL, 0); 358 359 /* Set boot_cpu_id if needed. */ 360 if (boot_cpu_id == -1) { 361 boot_cpu_id = PCPU_GET(apic_id); 362 cpu_info[boot_cpu_id].cpu_bsp = 1; 363 } else 364 KASSERT(boot_cpu_id == PCPU_GET(apic_id), 365 ("BSP's APIC ID doesn't match boot_cpu_id")); 366 cpu_apic_ids[0] = boot_cpu_id; 367 368 /* Start each Application Processor */ 369 start_all_aps(); 370 371 /* Setup the initial logical CPUs info. */ 372 logical_cpus = logical_cpus_mask = 0; 373 if (cpu_feature & CPUID_HTT) 374 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16; 375 376 /* 377 * Work out if hyperthreading is *really* enabled. This 378 * is made really ugly by the fact that processors lie: Dual 379 * core processors claim to be hyperthreaded even when they're 380 * not, presumably because they want to be treated the same 381 * way as HTT with respect to per-cpu software licensing. 382 * At the time of writing (May 12, 2005) the only hyperthreaded 383 * cpus are from Intel, and Intel's dual-core processors can be 384 * identified via the "deterministic cache parameters" cpuid 385 * calls. 386 */ 387 /* 388 * First determine if this is an Intel processor which claims 389 * to have hyperthreading support. 390 */ 391 if ((cpu_feature & CPUID_HTT) && 392 (strcmp(cpu_vendor, "GenuineIntel") == 0)) { 393 /* 394 * If the "deterministic cache parameters" cpuid calls 395 * are available, use them. 396 */ 397 if (cpu_high >= 4) { 398 /* Ask the processor about the L1 cache. */ 399 for (i = 0; i < 1; i++) { 400 cpuid_count(4, i, p); 401 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1; 402 if (hyperthreading_cpus < threads_per_cache) 403 hyperthreading_cpus = threads_per_cache; 404 if ((p[0] & 0x1f) == 0) 405 break; 406 } 407 } 408 409 /* 410 * If the deterministic cache parameters are not 411 * available, or if no caches were reported to exist, 412 * just accept what the HTT flag indicated. 413 */ 414 if (hyperthreading_cpus == 0) 415 hyperthreading_cpus = logical_cpus; 416 } 417 418 set_interrupt_apic_ids(); 419} 420 421 422/* 423 * Print various information about the SMP system hardware and setup. 424 */ 425void 426cpu_mp_announce(void) 427{ 428 int i, x; 429 430 /* List CPUs */ 431 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id); 432 for (i = 1, x = 0; x < MAXCPU; x++) { 433 if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp) 434 continue; 435 if (cpu_info[x].cpu_disabled) 436 printf(" cpu (AP): APIC ID: %2d (disabled)\n", x); 437 else { 438 KASSERT(i < mp_ncpus, 439 ("mp_ncpus and actual cpus are out of whack")); 440 printf(" cpu%d (AP): APIC ID: %2d\n", i++, x); 441 } 442 } 443} 444 445/* 446 * AP CPU's call this to initialize themselves. 447 */ 448void 449init_secondary(void) 450{ 451 struct pcpu *pc; 452 u_int64_t msr, cr0; 453 int cpu, gsel_tss; 454 455 /* Set by the startup code for us to use */ 456 cpu = bootAP; 457 458 /* Init tss */ 459 common_tss[cpu] = common_tss[0]; 460 common_tss[cpu].tss_rsp0 = 0; /* not used until after switch */ 461 common_tss[cpu].tss_iobase = sizeof(struct amd64tss); 462 common_tss[cpu].tss_ist1 = (long)&doublefault_stack[PAGE_SIZE]; 463 464 gdt_segs[GPROC0_SEL].ssd_base = (long) &common_tss[cpu]; 465 ssdtosyssd(&gdt_segs[GPROC0_SEL], 466 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 467 468 lgdt(&r_gdt); /* does magic intra-segment return */ 469 470 /* Get per-cpu data */ 471 pc = &__pcpu[cpu]; 472 473 /* prime data page for it to use */ 474 pcpu_init(pc, cpu, sizeof(struct pcpu)); 475 pc->pc_apic_id = cpu_apic_ids[cpu]; 476 pc->pc_prvspace = pc; 477 pc->pc_curthread = 0; 478 pc->pc_tssp = &common_tss[cpu]; 479 pc->pc_rsp0 = 0; 480 481 wrmsr(MSR_FSBASE, 0); /* User value */ 482 wrmsr(MSR_GSBASE, (u_int64_t)pc); 483 wrmsr(MSR_KGSBASE, (u_int64_t)pc); /* XXX User value while we're in the kernel */ 484 485 lidt(&r_idt); 486 487 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 488 ltr(gsel_tss); 489 490 /* 491 * Set to a known state: 492 * Set by mpboot.s: CR0_PG, CR0_PE 493 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 494 */ 495 cr0 = rcr0(); 496 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 497 load_cr0(cr0); 498 499 /* Set up the fast syscall stuff */ 500 msr = rdmsr(MSR_EFER) | EFER_SCE; 501 wrmsr(MSR_EFER, msr); 502 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 503 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 504 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 505 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 506 wrmsr(MSR_STAR, msr); 507 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D); 508 509 /* Disable local APIC just to be sure. */ 510 lapic_disable(); 511 512 /* signal our startup to the BSP. */ 513 mp_naps++; 514 515 /* Spin until the BSP releases the AP's. */ 516 while (!aps_ready) 517 ia32_pause(); 518 519 /* Initialize the PAT MSR. */ 520 pmap_init_pat(); 521 522 /* set up CPU registers and state */ 523 cpu_setregs(); 524 525 /* set up SSE/NX registers */ 526 initializecpu(); 527 528 /* set up FPU state on the AP */ 529 fpuinit(); 530 531 /* A quick check from sanity claus */ 532 if (PCPU_GET(apic_id) != lapic_id()) { 533 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid)); 534 printf("SMP: actual apic_id = %d\n", lapic_id()); 535 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id)); 536 panic("cpuid mismatch! boom!!"); 537 } 538 539 /* Initialize curthread. */ 540 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); 541 PCPU_SET(curthread, PCPU_GET(idlethread)); 542 543 mtx_lock_spin(&ap_boot_mtx); 544 545 /* Init local apic for irq's */ 546 lapic_setup(1); 547 548 /* Set memory range attributes for this CPU to match the BSP */ 549 mem_range_AP_init(); 550 551 smp_cpus++; 552 553 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid)); 554 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); 555 556 /* Determine if we are a logical CPU. */ 557 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0) 558 logical_cpus_mask |= PCPU_GET(cpumask); 559 560 /* Determine if we are a hyperthread. */ 561 if (hyperthreading_cpus > 1 && 562 PCPU_GET(apic_id) % hyperthreading_cpus != 0) 563 hyperthreading_cpus_mask |= PCPU_GET(cpumask); 564 565 /* Build our map of 'other' CPUs. */ 566 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); 567 568 if (bootverbose) 569 lapic_dump("AP"); 570 571 if (smp_cpus == mp_ncpus) { 572 /* enable IPI's, tlb shootdown, freezes etc */ 573 atomic_store_rel_int(&smp_started, 1); 574 smp_active = 1; /* historic */ 575 } 576 577 /* 578 * Enable global pages TLB extension 579 * This also implicitly flushes the TLB 580 */ 581 582 load_cr4(rcr4() | CR4_PGE); 583 584 mtx_unlock_spin(&ap_boot_mtx); 585 586 /* wait until all the AP's are up */ 587 while (smp_started == 0) 588 ia32_pause(); 589 590 /* ok, now grab sched_lock and enter the scheduler */ 591 mtx_lock_spin(&sched_lock); 592 593 /* 594 * Correct spinlock nesting. The idle thread context that we are 595 * borrowing was created so that it would start out with a single 596 * spin lock (sched_lock) held in fork_trampoline(). Since we've 597 * explicitly acquired locks in this function, the nesting count 598 * is now 2 rather than 1. Since we are nested, calling 599 * spinlock_exit() will simply adjust the counts without allowing 600 * spin lock using code to interrupt us. 601 */ 602 spinlock_exit(); 603 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 604 605 PCPU_SET(switchtime, cpu_ticks()); 606 PCPU_SET(switchticks, ticks); 607 608 cpu_throw(NULL, choosethread()); /* doesn't return */ 609 610 panic("scheduler returned us to %s", __func__); 611 /* NOTREACHED */ 612} 613 614/******************************************************************* 615 * local functions and data 616 */ 617 618/* 619 * We tell the I/O APIC code about all the CPUs we want to receive 620 * interrupts. If we don't want certain CPUs to receive IRQs we 621 * can simply not tell the I/O APIC code about them in this function. 622 * We also do not tell it about the BSP since it tells itself about 623 * the BSP internally to work with UP kernels and on UP machines. 624 */ 625static void 626set_interrupt_apic_ids(void) 627{ 628 u_int apic_id; 629 630 for (apic_id = 0; apic_id < MAXCPU; apic_id++) { 631 if (!cpu_info[apic_id].cpu_present) 632 continue; 633 if (cpu_info[apic_id].cpu_bsp) 634 continue; 635 636 /* Don't let hyperthreads service interrupts. */ 637 if (hyperthreading_cpus > 1 && 638 apic_id % hyperthreading_cpus != 0) 639 continue; 640 641 intr_add_cpu(apic_id); 642 } 643} 644 645/* 646 * start each AP in our list 647 */ 648static int 649start_all_aps(void) 650{ 651 vm_offset_t va = boot_address + KERNBASE; 652 u_int64_t *pt4, *pt3, *pt2; 653 u_int32_t mpbioswarmvec; 654 int apic_id, cpu, i; 655 u_char mpbiosreason; 656 657 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 658 659 /* install the AP 1st level boot code */ 660 pmap_kenter(va, boot_address); 661 pmap_invalidate_page(kernel_pmap, va); 662 bcopy(mptramp_start, (void *)va, bootMP_size); 663 664 /* Locate the page tables, they'll be below the trampoline */ 665 pt4 = (u_int64_t *)(uintptr_t)(mptramp_pagetables + KERNBASE); 666 pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t); 667 pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t); 668 669 /* Create the initial 1GB replicated page tables */ 670 for (i = 0; i < 512; i++) { 671 /* Each slot of the level 4 pages points to the same level 3 page */ 672 pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + PAGE_SIZE); 673 pt4[i] |= PG_V | PG_RW | PG_U; 674 675 /* Each slot of the level 3 pages points to the same level 2 page */ 676 pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables + (2 * PAGE_SIZE)); 677 pt3[i] |= PG_V | PG_RW | PG_U; 678 679 /* The level 2 page slots are mapped with 2MB pages for 1GB. */ 680 pt2[i] = i * (2 * 1024 * 1024); 681 pt2[i] |= PG_V | PG_RW | PG_PS | PG_U; 682 } 683 684 /* save the current value of the warm-start vector */ 685 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 686 outb(CMOS_REG, BIOS_RESET); 687 mpbiosreason = inb(CMOS_DATA); 688 689 /* setup a vector to our boot code */ 690 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 691 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 692 outb(CMOS_REG, BIOS_RESET); 693 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 694 695 /* start each AP */ 696 for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) { 697 698 /* Ignore non-existent CPUs and the BSP. */ 699 if (!cpu_info[apic_id].cpu_present || 700 cpu_info[apic_id].cpu_bsp) 701 continue; 702 703 /* Don't use this CPU if it has been disabled by a tunable. */ 704 if (resource_disabled("lapic", apic_id)) { 705 cpu_info[apic_id].cpu_disabled = 1; 706 mp_ncpus--; 707 continue; 708 } 709 710 cpu++; 711 712 /* save APIC ID for this logical ID */ 713 cpu_apic_ids[cpu] = apic_id; 714 715 /* allocate and set up an idle stack data page */ 716 bootstacks[cpu] = (void *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE); 717 doublefault_stack = (char *)kmem_alloc(kernel_map, PAGE_SIZE); 718 719 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 8; 720 bootAP = cpu; 721 722 /* attempt to start the Application Processor */ 723 if (!start_ap(apic_id)) { 724 /* restore the warmstart vector */ 725 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec; 726 panic("AP #%d (PHY# %d) failed!", cpu, apic_id); 727 } 728 729 all_cpus |= (1 << cpu); /* record AP in CPU map */ 730 } 731 732 /* build our map of 'other' CPUs */ 733 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); 734 735 /* restore the warmstart vector */ 736 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec; 737 738 outb(CMOS_REG, BIOS_RESET); 739 outb(CMOS_DATA, mpbiosreason); 740 741 /* number of APs actually started */ 742 return mp_naps; 743} 744 745 746/* 747 * This function starts the AP (application processor) identified 748 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 749 * to accomplish this. This is necessary because of the nuances 750 * of the different hardware we might encounter. It isn't pretty, 751 * but it seems to work. 752 */ 753static int 754start_ap(int apic_id) 755{ 756 int vector, ms; 757 int cpus; 758 759 /* calculate the vector */ 760 vector = (boot_address >> 12) & 0xff; 761 762 /* used as a watchpoint to signal AP startup */ 763 cpus = mp_naps; 764 765 /* 766 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 767 * and running the target CPU. OR this INIT IPI might be latched (P5 768 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 769 * ignored. 770 */ 771 772 /* do an INIT IPI: assert RESET */ 773 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 774 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id); 775 776 /* wait for pending status end */ 777 lapic_ipi_wait(-1); 778 779 /* do an INIT IPI: deassert RESET */ 780 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL | 781 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0); 782 783 /* wait for pending status end */ 784 DELAY(10000); /* wait ~10mS */ 785 lapic_ipi_wait(-1); 786 787 /* 788 * next we do a STARTUP IPI: the previous INIT IPI might still be 789 * latched, (P5 bug) this 1st STARTUP would then terminate 790 * immediately, and the previously started INIT IPI would continue. OR 791 * the previous INIT IPI has already run. and this STARTUP IPI will 792 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 793 * will run. 794 */ 795 796 /* do a STARTUP IPI */ 797 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 798 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP | 799 vector, apic_id); 800 lapic_ipi_wait(-1); 801 DELAY(200); /* wait ~200uS */ 802 803 /* 804 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 805 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 806 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 807 * recognized after hardware RESET or INIT IPI. 808 */ 809 810 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 811 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP | 812 vector, apic_id); 813 lapic_ipi_wait(-1); 814 DELAY(200); /* wait ~200uS */ 815 816 /* Wait up to 5 seconds for it to start. */ 817 for (ms = 0; ms < 5000; ms++) { 818 if (mp_naps > cpus) 819 return 1; /* return SUCCESS */ 820 DELAY(1000); 821 } 822 return 0; /* return FAILURE */ 823} 824 825/* 826 * Flush the TLB on all other CPU's 827 */ 828static void 829smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2) 830{ 831 u_int ncpu; 832 833 ncpu = mp_ncpus - 1; /* does not shootdown self */ 834 if (ncpu < 1) 835 return; /* no other cpus */ 836 mtx_assert(&smp_ipi_mtx, MA_OWNED); 837 smp_tlb_addr1 = addr1; 838 smp_tlb_addr2 = addr2; 839 atomic_store_rel_int(&smp_tlb_wait, 0); 840 ipi_all_but_self(vector); 841 while (smp_tlb_wait < ncpu) 842 ia32_pause(); 843} 844 845static void 846smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2) 847{ 848 int ncpu, othercpus; 849 850 othercpus = mp_ncpus - 1; 851 if (mask == (u_int)-1) { 852 ncpu = othercpus; 853 if (ncpu < 1) 854 return; 855 } else { 856 mask &= ~PCPU_GET(cpumask); 857 if (mask == 0) 858 return; 859 ncpu = bitcount32(mask); 860 if (ncpu > othercpus) { 861 /* XXX this should be a panic offence */ 862 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n", 863 ncpu, othercpus); 864 ncpu = othercpus; 865 } 866 /* XXX should be a panic, implied by mask == 0 above */ 867 if (ncpu < 1) 868 return; 869 } 870 mtx_assert(&smp_ipi_mtx, MA_OWNED); 871 smp_tlb_addr1 = addr1; 872 smp_tlb_addr2 = addr2; 873 atomic_store_rel_int(&smp_tlb_wait, 0); 874 if (mask == (u_int)-1) 875 ipi_all_but_self(vector); 876 else 877 ipi_selected(mask, vector); 878 while (smp_tlb_wait < ncpu) 879 ia32_pause(); 880} 881 882void 883smp_cache_flush(void) 884{ 885 886 if (smp_started) 887 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0); 888} 889 890void 891smp_invltlb(void) 892{ 893 894 if (smp_started) { 895 smp_tlb_shootdown(IPI_INVLTLB, 0, 0); 896 } 897} 898 899void 900smp_invlpg(vm_offset_t addr) 901{ 902 903 if (smp_started) 904 smp_tlb_shootdown(IPI_INVLPG, addr, 0); 905} 906 907void 908smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2) 909{ 910 911 if (smp_started) { 912 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2); 913 } 914} 915 916void 917smp_masked_invltlb(u_int mask) 918{ 919 920 if (smp_started) { 921 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0); 922 } 923} 924 925void 926smp_masked_invlpg(u_int mask, vm_offset_t addr) 927{ 928 929 if (smp_started) { 930 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0); 931 } 932} 933 934void 935smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2) 936{ 937 938 if (smp_started) { 939 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2); 940 } 941} 942 943void 944ipi_bitmap_handler(struct trapframe frame) 945{ 946 int cpu = PCPU_GET(cpuid); 947 u_int ipi_bitmap; 948 949 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]); 950 951#ifdef IPI_PREEMPTION 952 if (ipi_bitmap & IPI_PREEMPT) { 953 mtx_lock_spin(&sched_lock); 954 /* Don't preempt the idle thread */ 955 if (curthread->td_priority < PRI_MIN_IDLE) { 956 struct thread *running_thread = curthread; 957 if (running_thread->td_critnest > 1) 958 running_thread->td_owepreempt = 1; 959 else 960 mi_switch(SW_INVOL | SW_PREEMPT, NULL); 961 } 962 mtx_unlock_spin(&sched_lock); 963 } 964#endif 965 966 /* Nothing to do for AST */ 967} 968 969/* 970 * send an IPI to a set of cpus. 971 */ 972void 973ipi_selected(u_int32_t cpus, u_int ipi) 974{ 975 int cpu; 976 u_int bitmap = 0; 977 u_int old_pending; 978 u_int new_pending; 979 980 if (IPI_IS_BITMAPED(ipi)) { 981 bitmap = 1 << ipi; 982 ipi = IPI_BITMAP_VECTOR; 983 } 984 985#ifdef STOP_NMI 986 if (ipi == IPI_STOP && stop_cpus_with_nmi) { 987 ipi_nmi_selected(cpus); 988 return; 989 } 990#endif 991 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi); 992 while ((cpu = ffs(cpus)) != 0) { 993 cpu--; 994 cpus &= ~(1 << cpu); 995 996 KASSERT(cpu_apic_ids[cpu] != -1, 997 ("IPI to non-existent CPU %d", cpu)); 998 999 if (bitmap) { 1000 do { 1001 old_pending = cpu_ipi_pending[cpu]; 1002 new_pending = old_pending | bitmap; 1003 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending)); 1004 1005 if (old_pending) 1006 continue; 1007 } 1008 1009 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]); 1010 } 1011 1012} 1013 1014/* 1015 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself 1016 */ 1017void 1018ipi_all(u_int ipi) 1019{ 1020 1021 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { 1022 ipi_selected(all_cpus, ipi); 1023 return; 1024 } 1025 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 1026 lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL); 1027} 1028 1029/* 1030 * send an IPI to all CPUs EXCEPT myself 1031 */ 1032void 1033ipi_all_but_self(u_int ipi) 1034{ 1035 1036 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { 1037 ipi_selected(PCPU_GET(other_cpus), ipi); 1038 return; 1039 } 1040 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 1041 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS); 1042} 1043 1044/* 1045 * send an IPI to myself 1046 */ 1047void 1048ipi_self(u_int ipi) 1049{ 1050 1051 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { 1052 ipi_selected(PCPU_GET(cpumask), ipi); 1053 return; 1054 } 1055 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 1056 lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF); 1057} 1058 1059#ifdef STOP_NMI 1060/* 1061 * send NMI IPI to selected CPUs 1062 */ 1063 1064#define BEFORE_SPIN 1000000 1065 1066void 1067ipi_nmi_selected(u_int32_t cpus) 1068{ 1069 int cpu; 1070 register_t icrlo; 1071 1072 icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT 1073 | APIC_TRIGMOD_EDGE; 1074 1075 CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus); 1076 1077 atomic_set_int(&ipi_nmi_pending, cpus); 1078 1079 while ((cpu = ffs(cpus)) != 0) { 1080 cpu--; 1081 cpus &= ~(1 << cpu); 1082 1083 KASSERT(cpu_apic_ids[cpu] != -1, 1084 ("IPI NMI to non-existent CPU %d", cpu)); 1085 1086 /* Wait for an earlier IPI to finish. */ 1087 if (!lapic_ipi_wait(BEFORE_SPIN)) 1088 panic("ipi_nmi_selected: previous IPI has not cleared"); 1089 1090 lapic_ipi_raw(icrlo, cpu_apic_ids[cpu]); 1091 } 1092} 1093 1094int 1095ipi_nmi_handler(void) 1096{ 1097 int cpumask = PCPU_GET(cpumask); 1098 1099 if (!(ipi_nmi_pending & cpumask)) 1100 return 1; 1101 1102 atomic_clear_int(&ipi_nmi_pending, cpumask); 1103 cpustop_handler(); 1104 return 0; 1105} 1106 1107#endif /* STOP_NMI */ 1108 1109/* 1110 * Handle an IPI_STOP by saving our current context and spinning until we 1111 * are resumed. 1112 */ 1113void 1114cpustop_handler(void) 1115{ 1116 int cpu = PCPU_GET(cpuid); 1117 int cpumask = PCPU_GET(cpumask); 1118 1119 savectx(&stoppcbs[cpu]); 1120 1121 /* Indicate that we are stopped */ 1122 atomic_set_int(&stopped_cpus, cpumask); 1123 1124 /* Wait for restart */ 1125 while (!(started_cpus & cpumask)) 1126 ia32_pause(); 1127 1128 atomic_clear_int(&started_cpus, cpumask); 1129 atomic_clear_int(&stopped_cpus, cpumask); 1130 1131 if (cpu == 0 && cpustop_restartfunc != NULL) { 1132 cpustop_restartfunc(); 1133 cpustop_restartfunc = NULL; 1134 } 1135} 1136 1137/* 1138 * This is called once the rest of the system is up and running and we're 1139 * ready to let the AP's out of the pen. 1140 */ 1141static void 1142release_aps(void *dummy __unused) 1143{ 1144 1145 if (mp_ncpus == 1) 1146 return; 1147 mtx_lock_spin(&sched_lock); 1148 atomic_store_rel_int(&aps_ready, 1); 1149 while (smp_started == 0) 1150 ia32_pause(); 1151 mtx_unlock_spin(&sched_lock); 1152} 1153SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 1154 1155static int 1156sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS) 1157{ 1158 u_int mask; 1159 int error; 1160 1161 mask = hlt_cpus_mask; 1162 error = sysctl_handle_int(oidp, &mask, 0, req); 1163 if (error || !req->newptr) 1164 return (error); 1165 1166 if (logical_cpus_mask != 0 && 1167 (mask & logical_cpus_mask) == logical_cpus_mask) 1168 hlt_logical_cpus = 1; 1169 else 1170 hlt_logical_cpus = 0; 1171 1172 if (! hyperthreading_allowed) 1173 mask |= hyperthreading_cpus_mask; 1174 1175 if ((mask & all_cpus) == all_cpus) 1176 mask &= ~(1<<0); 1177 hlt_cpus_mask = mask; 1178 return (error); 1179} 1180SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW, 1181 0, 0, sysctl_hlt_cpus, "IU", 1182 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2."); 1183 1184static int 1185sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS) 1186{ 1187 int disable, error; 1188 1189 disable = hlt_logical_cpus; 1190 error = sysctl_handle_int(oidp, &disable, 0, req); 1191 if (error || !req->newptr) 1192 return (error); 1193 1194 if (disable) 1195 hlt_cpus_mask |= logical_cpus_mask; 1196 else 1197 hlt_cpus_mask &= ~logical_cpus_mask; 1198 1199 if (! hyperthreading_allowed) 1200 hlt_cpus_mask |= hyperthreading_cpus_mask; 1201 1202 if ((hlt_cpus_mask & all_cpus) == all_cpus) 1203 hlt_cpus_mask &= ~(1<<0); 1204 1205 hlt_logical_cpus = disable; 1206 return (error); 1207} 1208 1209static int 1210sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS) 1211{ 1212 int allowed, error; 1213 1214 allowed = hyperthreading_allowed; 1215 error = sysctl_handle_int(oidp, &allowed, 0, req); 1216 if (error || !req->newptr) 1217 return (error); 1218 1219 if (allowed) 1220 hlt_cpus_mask &= ~hyperthreading_cpus_mask; 1221 else 1222 hlt_cpus_mask |= hyperthreading_cpus_mask; 1223 1224 if (logical_cpus_mask != 0 && 1225 (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask) 1226 hlt_logical_cpus = 1; 1227 else 1228 hlt_logical_cpus = 0; 1229 1230 if ((hlt_cpus_mask & all_cpus) == all_cpus) 1231 hlt_cpus_mask &= ~(1<<0); 1232 1233 hyperthreading_allowed = allowed; 1234 return (error); 1235} 1236 1237static void 1238cpu_hlt_setup(void *dummy __unused) 1239{ 1240 1241 if (logical_cpus_mask != 0) { 1242 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus", 1243 &hlt_logical_cpus); 1244 sysctl_ctx_init(&logical_cpu_clist); 1245 SYSCTL_ADD_PROC(&logical_cpu_clist, 1246 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO, 1247 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0, 1248 sysctl_hlt_logical_cpus, "IU", ""); 1249 SYSCTL_ADD_UINT(&logical_cpu_clist, 1250 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO, 1251 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD, 1252 &logical_cpus_mask, 0, ""); 1253 1254 if (hlt_logical_cpus) 1255 hlt_cpus_mask |= logical_cpus_mask; 1256 1257 /* 1258 * If necessary for security purposes, force 1259 * hyperthreading off, regardless of the value 1260 * of hlt_logical_cpus. 1261 */ 1262 if (hyperthreading_cpus_mask) { 1263 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed", 1264 &hyperthreading_allowed); 1265 SYSCTL_ADD_PROC(&logical_cpu_clist, 1266 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO, 1267 "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW, 1268 0, 0, sysctl_hyperthreading_allowed, "IU", ""); 1269 if (! hyperthreading_allowed) 1270 hlt_cpus_mask |= hyperthreading_cpus_mask; 1271 } 1272 } 1273} 1274SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL); 1275 1276int 1277mp_grab_cpu_hlt(void) 1278{ 1279 u_int mask = PCPU_GET(cpumask); 1280#ifdef MP_WATCHDOG 1281 u_int cpuid = PCPU_GET(cpuid); 1282#endif 1283 int retval; 1284 1285#ifdef MP_WATCHDOG 1286 ap_watchdog(cpuid); 1287#endif 1288 1289 retval = mask & hlt_cpus_mask; 1290 while (mask & hlt_cpus_mask) 1291 __asm __volatile("sti; hlt" : : : "memory"); 1292 return (retval); 1293} 1294