mp_x86.c revision 166569
1/*- 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26#include <sys/cdefs.h> 27__FBSDID("$FreeBSD: head/sys/i386/i386/mp_machdep.c 166569 2007-02-08 16:49:59Z jhb $"); 28 29#include "opt_apic.h" 30#include "opt_cpu.h" 31#include "opt_kstack_pages.h" 32#include "opt_mp_watchdog.h" 33#include "opt_sched.h" 34#include "opt_smp.h" 35 36#if !defined(lint) 37#if !defined(SMP) 38#error How did you get here? 39#endif 40 41#ifndef DEV_APIC 42#error The apic device is required for SMP, add "device apic" to your config file. 43#endif 44#if defined(CPU_DISABLE_CMPXCHG) && !defined(COMPILING_LINT) 45#error SMP not supported with CPU_DISABLE_CMPXCHG 46#endif 47#endif /* not lint */ 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/bus.h> 52#include <sys/cons.h> /* cngetc() */ 53#ifdef GPROF 54#include <sys/gmon.h> 55#endif 56#include <sys/kernel.h> 57#include <sys/ktr.h> 58#include <sys/lock.h> 59#include <sys/malloc.h> 60#include <sys/memrange.h> 61#include <sys/mutex.h> 62#include <sys/pcpu.h> 63#include <sys/proc.h> 64#include <sys/smp.h> 65#include <sys/sysctl.h> 66 67#include <vm/vm.h> 68#include <vm/vm_param.h> 69#include <vm/pmap.h> 70#include <vm/vm_kern.h> 71#include <vm/vm_extern.h> 72 73#include <machine/apicreg.h> 74#include <machine/md_var.h> 75#include <machine/mp_watchdog.h> 76#include <machine/pcb.h> 77#include <machine/smp.h> 78#include <machine/specialreg.h> 79#include <machine/privatespace.h> 80 81#define WARMBOOT_TARGET 0 82#define WARMBOOT_OFF (KERNBASE + 0x0467) 83#define WARMBOOT_SEG (KERNBASE + 0x0469) 84 85#define CMOS_REG (0x70) 86#define CMOS_DATA (0x71) 87#define BIOS_RESET (0x0f) 88#define BIOS_WARM (0x0a) 89 90/* 91 * this code MUST be enabled here and in mpboot.s. 92 * it follows the very early stages of AP boot by placing values in CMOS ram. 93 * it NORMALLY will never be needed and thus the primitive method for enabling. 94 * 95#define CHECK_POINTS 96 */ 97 98#if defined(CHECK_POINTS) && !defined(PC98) 99#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 100#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 101 102#define CHECK_INIT(D); \ 103 CHECK_WRITE(0x34, (D)); \ 104 CHECK_WRITE(0x35, (D)); \ 105 CHECK_WRITE(0x36, (D)); \ 106 CHECK_WRITE(0x37, (D)); \ 107 CHECK_WRITE(0x38, (D)); \ 108 CHECK_WRITE(0x39, (D)); 109 110#define CHECK_PRINT(S); \ 111 printf("%s: %d, %d, %d, %d, %d, %d\n", \ 112 (S), \ 113 CHECK_READ(0x34), \ 114 CHECK_READ(0x35), \ 115 CHECK_READ(0x36), \ 116 CHECK_READ(0x37), \ 117 CHECK_READ(0x38), \ 118 CHECK_READ(0x39)); 119 120#else /* CHECK_POINTS */ 121 122#define CHECK_INIT(D) 123#define CHECK_PRINT(S) 124#define CHECK_WRITE(A, D) 125 126#endif /* CHECK_POINTS */ 127 128/* lock region used by kernel profiling */ 129int mcount_lock; 130 131int mp_naps; /* # of Applications processors */ 132int boot_cpu_id = -1; /* designated BSP */ 133extern int nkpt; 134 135/* 136 * CPU topology map datastructures for HTT. 137 */ 138static struct cpu_group mp_groups[MAXCPU]; 139static struct cpu_top mp_top; 140 141/* AP uses this during bootstrap. Do not staticize. */ 142char *bootSTK; 143static int bootAP; 144 145/* Hotwire a 0->4MB V==P mapping */ 146extern pt_entry_t *KPTphys; 147 148/* SMP page table page */ 149extern pt_entry_t *SMPpt; 150 151struct pcb stoppcbs[MAXCPU]; 152 153/* Variables needed for SMP tlb shootdown. */ 154vm_offset_t smp_tlb_addr1; 155vm_offset_t smp_tlb_addr2; 156volatile int smp_tlb_wait; 157 158#ifdef STOP_NMI 159volatile cpumask_t ipi_nmi_pending; 160 161static void ipi_nmi_selected(u_int32_t cpus); 162#endif 163 164#ifdef COUNT_IPIS 165/* Interrupt counts. */ 166static u_long *ipi_preempt_counts[MAXCPU]; 167static u_long *ipi_ast_counts[MAXCPU]; 168u_long *ipi_invltlb_counts[MAXCPU]; 169u_long *ipi_invlrng_counts[MAXCPU]; 170u_long *ipi_invlpg_counts[MAXCPU]; 171u_long *ipi_invlcache_counts[MAXCPU]; 172u_long *ipi_rendezvous_counts[MAXCPU]; 173u_long *ipi_lazypmap_counts[MAXCPU]; 174#endif 175 176/* 177 * Local data and functions. 178 */ 179 180#ifdef STOP_NMI 181/* 182 * Provide an alternate method of stopping other CPUs. If another CPU has 183 * disabled interrupts the conventional STOP IPI will be blocked. This 184 * NMI-based stop should get through in that case. 185 */ 186static int stop_cpus_with_nmi = 1; 187SYSCTL_INT(_debug, OID_AUTO, stop_cpus_with_nmi, CTLTYPE_INT | CTLFLAG_RW, 188 &stop_cpus_with_nmi, 0, ""); 189TUNABLE_INT("debug.stop_cpus_with_nmi", &stop_cpus_with_nmi); 190#else 191#define stop_cpus_with_nmi 0 192#endif 193 194static u_int logical_cpus; 195 196/* used to hold the AP's until we are ready to release them */ 197static struct mtx ap_boot_mtx; 198 199/* Set to 1 once we're ready to let the APs out of the pen. */ 200static volatile int aps_ready = 0; 201 202/* 203 * Store data from cpu_add() until later in the boot when we actually setup 204 * the APs. 205 */ 206struct cpu_info { 207 int cpu_present:1; 208 int cpu_bsp:1; 209 int cpu_disabled:1; 210} static cpu_info[MAXCPU]; 211static int cpu_apic_ids[MAXCPU]; 212 213/* Holds pending bitmap based IPIs per CPU */ 214static volatile u_int cpu_ipi_pending[MAXCPU]; 215 216static u_int boot_address; 217 218static void set_interrupt_apic_ids(void); 219static int start_all_aps(void); 220static void install_ap_tramp(void); 221static int start_ap(int apic_id); 222static void release_aps(void *dummy); 223 224static int hlt_logical_cpus; 225static u_int hyperthreading_cpus; 226static cpumask_t hyperthreading_cpus_mask; 227static int hyperthreading_allowed = 1; 228static struct sysctl_ctx_list logical_cpu_clist; 229 230static void 231mem_range_AP_init(void) 232{ 233 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP) 234 mem_range_softc.mr_op->initAP(&mem_range_softc); 235} 236 237void 238mp_topology(void) 239{ 240 struct cpu_group *group; 241 u_int regs[4]; 242 int logical_cpus; 243 int apic_id; 244 int groups; 245 int cpu; 246 247 /* Build the smp_topology map. */ 248 /* Nothing to do if there is no HTT support. */ 249 if ((cpu_feature & CPUID_HTT) == 0) 250 return; 251 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16; 252 if (logical_cpus <= 1) 253 return; 254 /* Nothing to do if reported cores are physical cores. */ 255 if (strcmp(cpu_vendor, "GenuineIntel") == 0 && cpu_high >= 4) { 256 cpuid_count(4, 0, regs); 257 if ((regs[0] & 0x1f) != 0 && 258 logical_cpus <= ((regs[0] >> 26) & 0x3f) + 1) 259 return; 260 } 261 group = &mp_groups[0]; 262 groups = 1; 263 for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) { 264 if (!cpu_info[apic_id].cpu_present) 265 continue; 266 /* 267 * If the current group has members and we're not a logical 268 * cpu, create a new group. 269 */ 270 if (group->cg_count != 0 && (apic_id % logical_cpus) == 0) { 271 group++; 272 groups++; 273 } 274 group->cg_count++; 275 group->cg_mask |= 1 << cpu; 276 cpu++; 277 } 278 279 mp_top.ct_count = groups; 280 mp_top.ct_group = mp_groups; 281 smp_topology = &mp_top; 282} 283 284 285/* 286 * Calculate usable address in base memory for AP trampoline code. 287 */ 288u_int 289mp_bootaddress(u_int basemem) 290{ 291 292 boot_address = trunc_page(basemem); /* round down to 4k boundary */ 293 if ((basemem - boot_address) < bootMP_size) 294 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */ 295 296 return boot_address; 297} 298 299void 300cpu_add(u_int apic_id, char boot_cpu) 301{ 302 303 if (apic_id >= MAXCPU) { 304 printf("SMP: CPU %d exceeds maximum CPU %d, ignoring\n", 305 apic_id, MAXCPU - 1); 306 return; 307 } 308 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice", 309 apic_id)); 310 cpu_info[apic_id].cpu_present = 1; 311 if (boot_cpu) { 312 KASSERT(boot_cpu_id == -1, 313 ("CPU %d claims to be BSP, but CPU %d already is", apic_id, 314 boot_cpu_id)); 315 boot_cpu_id = apic_id; 316 cpu_info[apic_id].cpu_bsp = 1; 317 } 318 mp_ncpus++; 319 if (bootverbose) 320 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" : 321 "AP"); 322 323} 324 325void 326cpu_mp_setmaxid(void) 327{ 328 329 mp_maxid = MAXCPU - 1; 330} 331 332int 333cpu_mp_probe(void) 334{ 335 336 /* 337 * Always record BSP in CPU map so that the mbuf init code works 338 * correctly. 339 */ 340 all_cpus = 1; 341 if (mp_ncpus == 0) { 342 /* 343 * No CPUs were found, so this must be a UP system. Setup 344 * the variables to represent a system with a single CPU 345 * with an id of 0. 346 */ 347 mp_ncpus = 1; 348 return (0); 349 } 350 351 /* At least one CPU was found. */ 352 if (mp_ncpus == 1) { 353 /* 354 * One CPU was found, so this must be a UP system with 355 * an I/O APIC. 356 */ 357 return (0); 358 } 359 360 /* At least two CPUs were found. */ 361 return (1); 362} 363 364/* 365 * Initialize the IPI handlers and start up the AP's. 366 */ 367void 368cpu_mp_start(void) 369{ 370 int i; 371 u_int threads_per_cache, p[4]; 372 373 /* Initialize the logical ID to APIC ID table. */ 374 for (i = 0; i < MAXCPU; i++) { 375 cpu_apic_ids[i] = -1; 376 cpu_ipi_pending[i] = 0; 377 } 378 379 /* Install an inter-CPU IPI for TLB invalidation */ 380 setidt(IPI_INVLTLB, IDTVEC(invltlb), 381 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 382 setidt(IPI_INVLPG, IDTVEC(invlpg), 383 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 384 setidt(IPI_INVLRNG, IDTVEC(invlrng), 385 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 386 387 /* Install an inter-CPU IPI for cache invalidation. */ 388 setidt(IPI_INVLCACHE, IDTVEC(invlcache), 389 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 390 391 /* Install an inter-CPU IPI for lazy pmap release */ 392 setidt(IPI_LAZYPMAP, IDTVEC(lazypmap), 393 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 394 395 /* Install an inter-CPU IPI for all-CPU rendezvous */ 396 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), 397 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 398 399 /* Install generic inter-CPU IPI handler */ 400 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler), 401 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 402 403 /* Install an inter-CPU IPI for CPU stop/restart */ 404 setidt(IPI_STOP, IDTVEC(cpustop), 405 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 406 407 408 /* Set boot_cpu_id if needed. */ 409 if (boot_cpu_id == -1) { 410 boot_cpu_id = PCPU_GET(apic_id); 411 cpu_info[boot_cpu_id].cpu_bsp = 1; 412 } else 413 KASSERT(boot_cpu_id == PCPU_GET(apic_id), 414 ("BSP's APIC ID doesn't match boot_cpu_id")); 415 cpu_apic_ids[0] = boot_cpu_id; 416 417 /* Start each Application Processor */ 418 start_all_aps(); 419 420 /* Setup the initial logical CPUs info. */ 421 logical_cpus = logical_cpus_mask = 0; 422 if (cpu_feature & CPUID_HTT) 423 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16; 424 425 /* 426 * Work out if hyperthreading is *really* enabled. This 427 * is made really ugly by the fact that processors lie: Dual 428 * core processors claim to be hyperthreaded even when they're 429 * not, presumably because they want to be treated the same 430 * way as HTT with respect to per-cpu software licensing. 431 * At the time of writing (May 12, 2005) the only hyperthreaded 432 * cpus are from Intel, and Intel's dual-core processors can be 433 * identified via the "deterministic cache parameters" cpuid 434 * calls. 435 */ 436 /* 437 * First determine if this is an Intel processor which claims 438 * to have hyperthreading support. 439 */ 440 if ((cpu_feature & CPUID_HTT) && 441 (strcmp(cpu_vendor, "GenuineIntel") == 0)) { 442 /* 443 * If the "deterministic cache parameters" cpuid calls 444 * are available, use them. 445 */ 446 if (cpu_high >= 4) { 447 /* Ask the processor about the L1 cache. */ 448 for (i = 0; i < 1; i++) { 449 cpuid_count(4, i, p); 450 threads_per_cache = ((p[0] & 0x3ffc000) >> 14) + 1; 451 if (hyperthreading_cpus < threads_per_cache) 452 hyperthreading_cpus = threads_per_cache; 453 if ((p[0] & 0x1f) == 0) 454 break; 455 } 456 } 457 458 /* 459 * If the deterministic cache parameters are not 460 * available, or if no caches were reported to exist, 461 * just accept what the HTT flag indicated. 462 */ 463 if (hyperthreading_cpus == 0) 464 hyperthreading_cpus = logical_cpus; 465 } 466 467 set_interrupt_apic_ids(); 468} 469 470 471/* 472 * Print various information about the SMP system hardware and setup. 473 */ 474void 475cpu_mp_announce(void) 476{ 477 int i, x; 478 479 /* List CPUs */ 480 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id); 481 for (i = 1, x = 0; x < MAXCPU; x++) { 482 if (!cpu_info[x].cpu_present || cpu_info[x].cpu_bsp) 483 continue; 484 if (cpu_info[x].cpu_disabled) 485 printf(" cpu (AP): APIC ID: %2d (disabled)\n", x); 486 else { 487 KASSERT(i < mp_ncpus, 488 ("mp_ncpus and actual cpus are out of whack")); 489 printf(" cpu%d (AP): APIC ID: %2d\n", i++, x); 490 } 491 } 492} 493 494/* 495 * AP CPU's call this to initialize themselves. 496 */ 497void 498init_secondary(void) 499{ 500 vm_offset_t addr; 501 int gsel_tss; 502 int x, myid; 503 u_int cr0; 504 505 /* bootAP is set in start_ap() to our ID. */ 506 myid = bootAP; 507 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid]; 508 gdt_segs[GPROC0_SEL].ssd_base = 509 (int) &SMP_prvspace[myid].pcpu.pc_common_tss; 510 SMP_prvspace[myid].pcpu.pc_prvspace = 511 &SMP_prvspace[myid].pcpu; 512 513 for (x = 0; x < NGDT; x++) { 514 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd); 515 } 516 517 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 518 r_gdt.rd_base = (int) &gdt[myid * NGDT]; 519 lgdt(&r_gdt); /* does magic intra-segment return */ 520 521 lidt(&r_idt); 522 523 lldt(_default_ldt); 524 PCPU_SET(currentldt, _default_ldt); 525 526 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 527 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 528 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */ 529 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 530 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 531 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd); 532 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 533 ltr(gsel_tss); 534 535 PCPU_SET(fsgs_gdt, &gdt[myid * NGDT + GUFS_SEL].sd); 536 537 /* 538 * Set to a known state: 539 * Set by mpboot.s: CR0_PG, CR0_PE 540 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 541 */ 542 cr0 = rcr0(); 543 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 544 load_cr0(cr0); 545 CHECK_WRITE(0x38, 5); 546 547 /* Disable local APIC just to be sure. */ 548 lapic_disable(); 549 550 /* signal our startup to the BSP. */ 551 mp_naps++; 552 CHECK_WRITE(0x39, 6); 553 554 /* Spin until the BSP releases the AP's. */ 555 while (!aps_ready) 556 ia32_pause(); 557 558 /* BSP may have changed PTD while we were waiting */ 559 invltlb(); 560 for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE) 561 invlpg(addr); 562 563#if defined(I586_CPU) && !defined(NO_F00F_HACK) 564 lidt(&r_idt); 565#endif 566 567 /* Initialize the PAT MSR if present. */ 568 pmap_init_pat(); 569 570 /* set up CPU registers and state */ 571 cpu_setregs(); 572 573 /* set up FPU state on the AP */ 574 npxinit(__INITIAL_NPXCW__); 575 576 /* set up SSE registers */ 577 enable_sse(); 578 579 /* A quick check from sanity claus */ 580 if (PCPU_GET(apic_id) != lapic_id()) { 581 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid)); 582 printf("SMP: actual apic_id = %d\n", lapic_id()); 583 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id)); 584 printf("PTD[MPPTDI] = %#jx\n", (uintmax_t)PTD[MPPTDI]); 585 panic("cpuid mismatch! boom!!"); 586 } 587 588 /* Initialize curthread. */ 589 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); 590 PCPU_SET(curthread, PCPU_GET(idlethread)); 591 592 mtx_lock_spin(&ap_boot_mtx); 593 594 /* Init local apic for irq's */ 595 lapic_setup(1); 596 597 /* Set memory range attributes for this CPU to match the BSP */ 598 mem_range_AP_init(); 599 600 smp_cpus++; 601 602 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid)); 603 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); 604 605 /* Determine if we are a logical CPU. */ 606 if (logical_cpus > 1 && PCPU_GET(apic_id) % logical_cpus != 0) 607 logical_cpus_mask |= PCPU_GET(cpumask); 608 609 /* Determine if we are a hyperthread. */ 610 if (hyperthreading_cpus > 1 && 611 PCPU_GET(apic_id) % hyperthreading_cpus != 0) 612 hyperthreading_cpus_mask |= PCPU_GET(cpumask); 613 614 /* Build our map of 'other' CPUs. */ 615 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); 616 617 if (bootverbose) 618 lapic_dump("AP"); 619 620 if (smp_cpus == mp_ncpus) { 621 /* enable IPI's, tlb shootdown, freezes etc */ 622 atomic_store_rel_int(&smp_started, 1); 623 smp_active = 1; /* historic */ 624 } 625 626 mtx_unlock_spin(&ap_boot_mtx); 627 628 /* wait until all the AP's are up */ 629 while (smp_started == 0) 630 ia32_pause(); 631 632 /* ok, now grab sched_lock and enter the scheduler */ 633 mtx_lock_spin(&sched_lock); 634 635 /* 636 * Correct spinlock nesting. The idle thread context that we are 637 * borrowing was created so that it would start out with a single 638 * spin lock (sched_lock) held in fork_trampoline(). Since we've 639 * explicitly acquired locks in this function, the nesting count 640 * is now 2 rather than 1. Since we are nested, calling 641 * spinlock_exit() will simply adjust the counts without allowing 642 * spin lock using code to interrupt us. 643 */ 644 spinlock_exit(); 645 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); 646 647 PCPU_SET(switchtime, cpu_ticks()); 648 PCPU_SET(switchticks, ticks); 649 650 cpu_throw(NULL, choosethread()); /* doesn't return */ 651 652 panic("scheduler returned us to %s", __func__); 653 /* NOTREACHED */ 654} 655 656/******************************************************************* 657 * local functions and data 658 */ 659 660/* 661 * We tell the I/O APIC code about all the CPUs we want to receive 662 * interrupts. If we don't want certain CPUs to receive IRQs we 663 * can simply not tell the I/O APIC code about them in this function. 664 * We also do not tell it about the BSP since it tells itself about 665 * the BSP internally to work with UP kernels and on UP machines. 666 */ 667static void 668set_interrupt_apic_ids(void) 669{ 670 u_int apic_id; 671 672 for (apic_id = 0; apic_id < MAXCPU; apic_id++) { 673 if (!cpu_info[apic_id].cpu_present) 674 continue; 675 if (cpu_info[apic_id].cpu_bsp) 676 continue; 677 if (cpu_info[apic_id].cpu_disabled) 678 continue; 679 680 /* Don't let hyperthreads service interrupts. */ 681 if (hyperthreading_cpus > 1 && 682 apic_id % hyperthreading_cpus != 0) 683 continue; 684 685 intr_add_cpu(apic_id); 686 } 687} 688 689/* 690 * start each AP in our list 691 */ 692static int 693start_all_aps(void) 694{ 695#ifndef PC98 696 u_char mpbiosreason; 697#endif 698 struct pcpu *pc; 699 char *stack; 700 uintptr_t kptbase; 701 u_int32_t mpbioswarmvec; 702 int apic_id, cpu, i, pg; 703 704 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 705 706 /* install the AP 1st level boot code */ 707 install_ap_tramp(); 708 709 /* save the current value of the warm-start vector */ 710 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 711#ifndef PC98 712 outb(CMOS_REG, BIOS_RESET); 713 mpbiosreason = inb(CMOS_DATA); 714#endif 715 716 /* set up temporary P==V mapping for AP boot */ 717 /* XXX this is a hack, we should boot the AP on its own stack/PTD */ 718 kptbase = (uintptr_t)(void *)KPTphys; 719 for (i = 0; i < NKPT; i++) 720 PTD[i] = (pd_entry_t)(PG_V | PG_RW | 721 ((kptbase + i * PAGE_SIZE) & PG_FRAME)); 722 invltlb(); 723 724 /* start each AP */ 725 for (cpu = 0, apic_id = 0; apic_id < MAXCPU; apic_id++) { 726 727 /* Ignore non-existent CPUs and the BSP. */ 728 if (!cpu_info[apic_id].cpu_present || 729 cpu_info[apic_id].cpu_bsp) 730 continue; 731 732 /* Don't use this CPU if it has been disabled by a tunable. */ 733 if (resource_disabled("lapic", apic_id)) { 734 cpu_info[apic_id].cpu_disabled = 1; 735 mp_ncpus--; 736 continue; 737 } 738 739 cpu++; 740 741 /* save APIC ID for this logical ID */ 742 cpu_apic_ids[cpu] = apic_id; 743 744 /* first page of AP's private space */ 745 pg = cpu * i386_btop(sizeof(struct privatespace)); 746 747 /* allocate a new private data page */ 748 pc = (struct pcpu *)kmem_alloc(kernel_map, PAGE_SIZE); 749 750 /* wire it into the private page table page */ 751 SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(pc)); 752 753 /* allocate and set up an idle stack data page */ 754 stack = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE); /* XXXKSE */ 755 for (i = 0; i < KSTACK_PAGES; i++) 756 SMPpt[pg + 1 + i] = (pt_entry_t) 757 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 758 759 /* prime data page for it to use */ 760 pcpu_init(pc, cpu, sizeof(struct pcpu)); 761 pc->pc_apic_id = apic_id; 762 763 /* setup a vector to our boot code */ 764 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 765 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 766#ifndef PC98 767 outb(CMOS_REG, BIOS_RESET); 768 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 769#endif 770 771 bootSTK = &SMP_prvspace[cpu].idlekstack[KSTACK_PAGES * 772 PAGE_SIZE]; 773 bootAP = cpu; 774 775 /* attempt to start the Application Processor */ 776 CHECK_INIT(99); /* setup checkpoints */ 777 if (!start_ap(apic_id)) { 778 printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id); 779 CHECK_PRINT("trace"); /* show checkpoints */ 780 /* better panic as the AP may be running loose */ 781 printf("panic y/n? [y] "); 782 if (cngetc() != 'n') 783 panic("bye-bye"); 784 } 785 CHECK_PRINT("trace"); /* show checkpoints */ 786 787 all_cpus |= (1 << cpu); /* record AP in CPU map */ 788 } 789 790 /* build our map of 'other' CPUs */ 791 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); 792 793 /* restore the warmstart vector */ 794 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec; 795 796#ifndef PC98 797 outb(CMOS_REG, BIOS_RESET); 798 outb(CMOS_DATA, mpbiosreason); 799#endif 800 801 /* 802 * Set up the idle context for the BSP. Similar to above except 803 * that some was done by locore, some by pmap.c and some is implicit 804 * because the BSP is cpu#0 and the page is initially zero and also 805 * because we can refer to variables by name on the BSP.. 806 */ 807 808 /* Allocate and setup BSP idle stack */ 809 stack = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE); 810 for (i = 0; i < KSTACK_PAGES; i++) 811 SMPpt[1 + i] = (pt_entry_t) 812 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 813 814 for (i = 0; i < NKPT; i++) 815 PTD[i] = 0; 816 pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1); 817 818 /* number of APs actually started */ 819 return mp_naps; 820} 821 822/* 823 * load the 1st level AP boot code into base memory. 824 */ 825 826/* targets for relocation */ 827extern void bigJump(void); 828extern void bootCodeSeg(void); 829extern void bootDataSeg(void); 830extern void MPentry(void); 831extern u_int MP_GDT; 832extern u_int mp_gdtbase; 833 834static void 835install_ap_tramp(void) 836{ 837 int x; 838 int size = *(int *) ((u_long) & bootMP_size); 839 vm_offset_t va = boot_address + KERNBASE; 840 u_char *src = (u_char *) ((u_long) bootMP); 841 u_char *dst = (u_char *) va; 842 u_int boot_base = (u_int) bootMP; 843 u_int8_t *dst8; 844 u_int16_t *dst16; 845 u_int32_t *dst32; 846 847 KASSERT (size <= PAGE_SIZE, 848 ("'size' do not fit into PAGE_SIZE, as expected.")); 849 pmap_kenter(va, boot_address); 850 pmap_invalidate_page (kernel_pmap, va); 851 for (x = 0; x < size; ++x) 852 *dst++ = *src++; 853 854 /* 855 * modify addresses in code we just moved to basemem. unfortunately we 856 * need fairly detailed info about mpboot.s for this to work. changes 857 * to mpboot.s might require changes here. 858 */ 859 860 /* boot code is located in KERNEL space */ 861 dst = (u_char *) va; 862 863 /* modify the lgdt arg */ 864 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 865 *dst32 = boot_address + ((u_int) & MP_GDT - boot_base); 866 867 /* modify the ljmp target for MPentry() */ 868 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 869 *dst32 = ((u_int) MPentry - KERNBASE); 870 871 /* modify the target for boot code segment */ 872 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 873 dst8 = (u_int8_t *) (dst16 + 1); 874 *dst16 = (u_int) boot_address & 0xffff; 875 *dst8 = ((u_int) boot_address >> 16) & 0xff; 876 877 /* modify the target for boot data segment */ 878 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 879 dst8 = (u_int8_t *) (dst16 + 1); 880 *dst16 = (u_int) boot_address & 0xffff; 881 *dst8 = ((u_int) boot_address >> 16) & 0xff; 882} 883 884/* 885 * This function starts the AP (application processor) identified 886 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 887 * to accomplish this. This is necessary because of the nuances 888 * of the different hardware we might encounter. It isn't pretty, 889 * but it seems to work. 890 */ 891static int 892start_ap(int apic_id) 893{ 894 int vector, ms; 895 int cpus; 896 897 /* calculate the vector */ 898 vector = (boot_address >> 12) & 0xff; 899 900 /* used as a watchpoint to signal AP startup */ 901 cpus = mp_naps; 902 903 /* 904 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 905 * and running the target CPU. OR this INIT IPI might be latched (P5 906 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 907 * ignored. 908 */ 909 910 /* do an INIT IPI: assert RESET */ 911 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 912 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id); 913 914 /* wait for pending status end */ 915 lapic_ipi_wait(-1); 916 917 /* do an INIT IPI: deassert RESET */ 918 lapic_ipi_raw(APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL | 919 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, 0); 920 921 /* wait for pending status end */ 922 DELAY(10000); /* wait ~10mS */ 923 lapic_ipi_wait(-1); 924 925 /* 926 * next we do a STARTUP IPI: the previous INIT IPI might still be 927 * latched, (P5 bug) this 1st STARTUP would then terminate 928 * immediately, and the previously started INIT IPI would continue. OR 929 * the previous INIT IPI has already run. and this STARTUP IPI will 930 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 931 * will run. 932 */ 933 934 /* do a STARTUP IPI */ 935 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 936 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP | 937 vector, apic_id); 938 lapic_ipi_wait(-1); 939 DELAY(200); /* wait ~200uS */ 940 941 /* 942 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 943 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 944 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 945 * recognized after hardware RESET or INIT IPI. 946 */ 947 948 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 949 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP | 950 vector, apic_id); 951 lapic_ipi_wait(-1); 952 DELAY(200); /* wait ~200uS */ 953 954 /* Wait up to 5 seconds for it to start. */ 955 for (ms = 0; ms < 5000; ms++) { 956 if (mp_naps > cpus) 957 return 1; /* return SUCCESS */ 958 DELAY(1000); 959 } 960 return 0; /* return FAILURE */ 961} 962 963#ifdef COUNT_XINVLTLB_HITS 964u_int xhits_gbl[MAXCPU]; 965u_int xhits_pg[MAXCPU]; 966u_int xhits_rng[MAXCPU]; 967SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, ""); 968SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl, 969 sizeof(xhits_gbl), "IU", ""); 970SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg, 971 sizeof(xhits_pg), "IU", ""); 972SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng, 973 sizeof(xhits_rng), "IU", ""); 974 975u_int ipi_global; 976u_int ipi_page; 977u_int ipi_range; 978u_int ipi_range_size; 979SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, ""); 980SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, ""); 981SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, ""); 982SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size, 983 0, ""); 984 985u_int ipi_masked_global; 986u_int ipi_masked_page; 987u_int ipi_masked_range; 988u_int ipi_masked_range_size; 989SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW, 990 &ipi_masked_global, 0, ""); 991SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW, 992 &ipi_masked_page, 0, ""); 993SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW, 994 &ipi_masked_range, 0, ""); 995SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW, 996 &ipi_masked_range_size, 0, ""); 997#endif /* COUNT_XINVLTLB_HITS */ 998 999/* 1000 * Flush the TLB on all other CPU's 1001 */ 1002static void 1003smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2) 1004{ 1005 u_int ncpu; 1006 1007 ncpu = mp_ncpus - 1; /* does not shootdown self */ 1008 if (ncpu < 1) 1009 return; /* no other cpus */ 1010 mtx_assert(&smp_ipi_mtx, MA_OWNED); 1011 smp_tlb_addr1 = addr1; 1012 smp_tlb_addr2 = addr2; 1013 atomic_store_rel_int(&smp_tlb_wait, 0); 1014 ipi_all_but_self(vector); 1015 while (smp_tlb_wait < ncpu) 1016 ia32_pause(); 1017} 1018 1019static void 1020smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2) 1021{ 1022 int ncpu, othercpus; 1023 1024 othercpus = mp_ncpus - 1; 1025 if (mask == (u_int)-1) { 1026 ncpu = othercpus; 1027 if (ncpu < 1) 1028 return; 1029 } else { 1030 mask &= ~PCPU_GET(cpumask); 1031 if (mask == 0) 1032 return; 1033 ncpu = bitcount32(mask); 1034 if (ncpu > othercpus) { 1035 /* XXX this should be a panic offence */ 1036 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n", 1037 ncpu, othercpus); 1038 ncpu = othercpus; 1039 } 1040 /* XXX should be a panic, implied by mask == 0 above */ 1041 if (ncpu < 1) 1042 return; 1043 } 1044 mtx_assert(&smp_ipi_mtx, MA_OWNED); 1045 smp_tlb_addr1 = addr1; 1046 smp_tlb_addr2 = addr2; 1047 atomic_store_rel_int(&smp_tlb_wait, 0); 1048 if (mask == (u_int)-1) 1049 ipi_all_but_self(vector); 1050 else 1051 ipi_selected(mask, vector); 1052 while (smp_tlb_wait < ncpu) 1053 ia32_pause(); 1054} 1055 1056void 1057smp_cache_flush(void) 1058{ 1059 1060 if (smp_started) 1061 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0); 1062} 1063 1064void 1065smp_invltlb(void) 1066{ 1067 1068 if (smp_started) { 1069 smp_tlb_shootdown(IPI_INVLTLB, 0, 0); 1070#ifdef COUNT_XINVLTLB_HITS 1071 ipi_global++; 1072#endif 1073 } 1074} 1075 1076void 1077smp_invlpg(vm_offset_t addr) 1078{ 1079 1080 if (smp_started) { 1081 smp_tlb_shootdown(IPI_INVLPG, addr, 0); 1082#ifdef COUNT_XINVLTLB_HITS 1083 ipi_page++; 1084#endif 1085 } 1086} 1087 1088void 1089smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2) 1090{ 1091 1092 if (smp_started) { 1093 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2); 1094#ifdef COUNT_XINVLTLB_HITS 1095 ipi_range++; 1096 ipi_range_size += (addr2 - addr1) / PAGE_SIZE; 1097#endif 1098 } 1099} 1100 1101void 1102smp_masked_invltlb(u_int mask) 1103{ 1104 1105 if (smp_started) { 1106 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0); 1107#ifdef COUNT_XINVLTLB_HITS 1108 ipi_masked_global++; 1109#endif 1110 } 1111} 1112 1113void 1114smp_masked_invlpg(u_int mask, vm_offset_t addr) 1115{ 1116 1117 if (smp_started) { 1118 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0); 1119#ifdef COUNT_XINVLTLB_HITS 1120 ipi_masked_page++; 1121#endif 1122 } 1123} 1124 1125void 1126smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2) 1127{ 1128 1129 if (smp_started) { 1130 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2); 1131#ifdef COUNT_XINVLTLB_HITS 1132 ipi_masked_range++; 1133 ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE; 1134#endif 1135 } 1136} 1137 1138void 1139ipi_bitmap_handler(struct trapframe frame) 1140{ 1141 int cpu = PCPU_GET(cpuid); 1142 u_int ipi_bitmap; 1143 1144 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]); 1145 1146 if (ipi_bitmap & (1 << IPI_PREEMPT)) { 1147 struct thread *running_thread = curthread; 1148#ifdef COUNT_IPIS 1149 (*ipi_preempt_counts[cpu])++; 1150#endif 1151 mtx_lock_spin(&sched_lock); 1152 if (running_thread->td_critnest > 1) 1153 running_thread->td_owepreempt = 1; 1154 else 1155 mi_switch(SW_INVOL | SW_PREEMPT, NULL); 1156 mtx_unlock_spin(&sched_lock); 1157 } 1158 1159 if (ipi_bitmap & (1 << IPI_AST)) { 1160#ifdef COUNT_IPIS 1161 (*ipi_ast_counts[cpu])++; 1162#endif 1163 /* Nothing to do for AST */ 1164 } 1165} 1166 1167/* 1168 * send an IPI to a set of cpus. 1169 */ 1170void 1171ipi_selected(u_int32_t cpus, u_int ipi) 1172{ 1173 int cpu; 1174 u_int bitmap = 0; 1175 u_int old_pending; 1176 u_int new_pending; 1177 1178 if (IPI_IS_BITMAPED(ipi)) { 1179 bitmap = 1 << ipi; 1180 ipi = IPI_BITMAP_VECTOR; 1181 } 1182 1183#ifdef STOP_NMI 1184 if (ipi == IPI_STOP && stop_cpus_with_nmi) { 1185 ipi_nmi_selected(cpus); 1186 return; 1187 } 1188#endif 1189 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi); 1190 while ((cpu = ffs(cpus)) != 0) { 1191 cpu--; 1192 cpus &= ~(1 << cpu); 1193 1194 KASSERT(cpu_apic_ids[cpu] != -1, 1195 ("IPI to non-existent CPU %d", cpu)); 1196 1197 if (bitmap) { 1198 do { 1199 old_pending = cpu_ipi_pending[cpu]; 1200 new_pending = old_pending | bitmap; 1201 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu],old_pending, new_pending)); 1202 1203 if (old_pending) 1204 continue; 1205 } 1206 1207 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]); 1208 } 1209 1210} 1211 1212/* 1213 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself 1214 */ 1215void 1216ipi_all(u_int ipi) 1217{ 1218 1219 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { 1220 ipi_selected(all_cpus, ipi); 1221 return; 1222 } 1223 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 1224 lapic_ipi_vectored(ipi, APIC_IPI_DEST_ALL); 1225} 1226 1227/* 1228 * send an IPI to all CPUs EXCEPT myself 1229 */ 1230void 1231ipi_all_but_self(u_int ipi) 1232{ 1233 1234 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { 1235 ipi_selected(PCPU_GET(other_cpus), ipi); 1236 return; 1237 } 1238 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 1239 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS); 1240} 1241 1242/* 1243 * send an IPI to myself 1244 */ 1245void 1246ipi_self(u_int ipi) 1247{ 1248 1249 if (IPI_IS_BITMAPED(ipi) || (ipi == IPI_STOP && stop_cpus_with_nmi)) { 1250 ipi_selected(PCPU_GET(cpumask), ipi); 1251 return; 1252 } 1253 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 1254 lapic_ipi_vectored(ipi, APIC_IPI_DEST_SELF); 1255} 1256 1257#ifdef STOP_NMI 1258/* 1259 * send NMI IPI to selected CPUs 1260 */ 1261 1262#define BEFORE_SPIN 1000000 1263 1264void 1265ipi_nmi_selected(u_int32_t cpus) 1266{ 1267 int cpu; 1268 register_t icrlo; 1269 1270 icrlo = APIC_DELMODE_NMI | APIC_DESTMODE_PHY | APIC_LEVEL_ASSERT 1271 | APIC_TRIGMOD_EDGE; 1272 1273 CTR2(KTR_SMP, "%s: cpus: %x nmi", __func__, cpus); 1274 1275 atomic_set_int(&ipi_nmi_pending, cpus); 1276 1277 while ((cpu = ffs(cpus)) != 0) { 1278 cpu--; 1279 cpus &= ~(1 << cpu); 1280 1281 KASSERT(cpu_apic_ids[cpu] != -1, 1282 ("IPI NMI to non-existent CPU %d", cpu)); 1283 1284 /* Wait for an earlier IPI to finish. */ 1285 if (!lapic_ipi_wait(BEFORE_SPIN)) 1286 panic("ipi_nmi_selected: previous IPI has not cleared"); 1287 1288 lapic_ipi_raw(icrlo, cpu_apic_ids[cpu]); 1289 } 1290} 1291 1292int 1293ipi_nmi_handler(void) 1294{ 1295 int cpumask = PCPU_GET(cpumask); 1296 1297 if (!(ipi_nmi_pending & cpumask)) 1298 return 1; 1299 1300 atomic_clear_int(&ipi_nmi_pending, cpumask); 1301 cpustop_handler(); 1302 return 0; 1303} 1304 1305#endif /* STOP_NMI */ 1306 1307/* 1308 * Handle an IPI_STOP by saving our current context and spinning until we 1309 * are resumed. 1310 */ 1311void 1312cpustop_handler(void) 1313{ 1314 int cpu = PCPU_GET(cpuid); 1315 int cpumask = PCPU_GET(cpumask); 1316 1317 savectx(&stoppcbs[cpu]); 1318 1319 /* Indicate that we are stopped */ 1320 atomic_set_int(&stopped_cpus, cpumask); 1321 1322 /* Wait for restart */ 1323 while (!(started_cpus & cpumask)) 1324 ia32_pause(); 1325 1326 atomic_clear_int(&started_cpus, cpumask); 1327 atomic_clear_int(&stopped_cpus, cpumask); 1328 1329 if (cpu == 0 && cpustop_restartfunc != NULL) { 1330 cpustop_restartfunc(); 1331 cpustop_restartfunc = NULL; 1332 } 1333} 1334 1335/* 1336 * This is called once the rest of the system is up and running and we're 1337 * ready to let the AP's out of the pen. 1338 */ 1339static void 1340release_aps(void *dummy __unused) 1341{ 1342 1343 if (mp_ncpus == 1) 1344 return; 1345 mtx_lock_spin(&sched_lock); 1346 atomic_store_rel_int(&aps_ready, 1); 1347 while (smp_started == 0) 1348 ia32_pause(); 1349 mtx_unlock_spin(&sched_lock); 1350} 1351SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 1352 1353static int 1354sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS) 1355{ 1356 u_int mask; 1357 int error; 1358 1359 mask = hlt_cpus_mask; 1360 error = sysctl_handle_int(oidp, &mask, 0, req); 1361 if (error || !req->newptr) 1362 return (error); 1363 1364 if (logical_cpus_mask != 0 && 1365 (mask & logical_cpus_mask) == logical_cpus_mask) 1366 hlt_logical_cpus = 1; 1367 else 1368 hlt_logical_cpus = 0; 1369 1370 if (! hyperthreading_allowed) 1371 mask |= hyperthreading_cpus_mask; 1372 1373 if ((mask & all_cpus) == all_cpus) 1374 mask &= ~(1<<0); 1375 hlt_cpus_mask = mask; 1376 return (error); 1377} 1378SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW, 1379 0, 0, sysctl_hlt_cpus, "IU", 1380 "Bitmap of CPUs to halt. 101 (binary) will halt CPUs 0 and 2."); 1381 1382static int 1383sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS) 1384{ 1385 int disable, error; 1386 1387 disable = hlt_logical_cpus; 1388 error = sysctl_handle_int(oidp, &disable, 0, req); 1389 if (error || !req->newptr) 1390 return (error); 1391 1392 if (disable) 1393 hlt_cpus_mask |= logical_cpus_mask; 1394 else 1395 hlt_cpus_mask &= ~logical_cpus_mask; 1396 1397 if (! hyperthreading_allowed) 1398 hlt_cpus_mask |= hyperthreading_cpus_mask; 1399 1400 if ((hlt_cpus_mask & all_cpus) == all_cpus) 1401 hlt_cpus_mask &= ~(1<<0); 1402 1403 hlt_logical_cpus = disable; 1404 return (error); 1405} 1406 1407static int 1408sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS) 1409{ 1410 int allowed, error; 1411 1412 allowed = hyperthreading_allowed; 1413 error = sysctl_handle_int(oidp, &allowed, 0, req); 1414 if (error || !req->newptr) 1415 return (error); 1416 1417 if (allowed) 1418 hlt_cpus_mask &= ~hyperthreading_cpus_mask; 1419 else 1420 hlt_cpus_mask |= hyperthreading_cpus_mask; 1421 1422 if (logical_cpus_mask != 0 && 1423 (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask) 1424 hlt_logical_cpus = 1; 1425 else 1426 hlt_logical_cpus = 0; 1427 1428 if ((hlt_cpus_mask & all_cpus) == all_cpus) 1429 hlt_cpus_mask &= ~(1<<0); 1430 1431 hyperthreading_allowed = allowed; 1432 return (error); 1433} 1434 1435static void 1436cpu_hlt_setup(void *dummy __unused) 1437{ 1438 1439 if (logical_cpus_mask != 0) { 1440 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus", 1441 &hlt_logical_cpus); 1442 sysctl_ctx_init(&logical_cpu_clist); 1443 SYSCTL_ADD_PROC(&logical_cpu_clist, 1444 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO, 1445 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0, 1446 sysctl_hlt_logical_cpus, "IU", ""); 1447 SYSCTL_ADD_UINT(&logical_cpu_clist, 1448 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO, 1449 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD, 1450 &logical_cpus_mask, 0, ""); 1451 1452 if (hlt_logical_cpus) 1453 hlt_cpus_mask |= logical_cpus_mask; 1454 1455 /* 1456 * If necessary for security purposes, force 1457 * hyperthreading off, regardless of the value 1458 * of hlt_logical_cpus. 1459 */ 1460 if (hyperthreading_cpus_mask) { 1461 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed", 1462 &hyperthreading_allowed); 1463 SYSCTL_ADD_PROC(&logical_cpu_clist, 1464 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO, 1465 "hyperthreading_allowed", CTLTYPE_INT|CTLFLAG_RW, 1466 0, 0, sysctl_hyperthreading_allowed, "IU", ""); 1467 if (! hyperthreading_allowed) 1468 hlt_cpus_mask |= hyperthreading_cpus_mask; 1469 } 1470 } 1471} 1472SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL); 1473 1474int 1475mp_grab_cpu_hlt(void) 1476{ 1477 u_int mask = PCPU_GET(cpumask); 1478#ifdef MP_WATCHDOG 1479 u_int cpuid = PCPU_GET(cpuid); 1480#endif 1481 int retval; 1482 1483#ifdef MP_WATCHDOG 1484 ap_watchdog(cpuid); 1485#endif 1486 1487 retval = mask & hlt_cpus_mask; 1488 while (mask & hlt_cpus_mask) 1489 __asm __volatile("sti; hlt" : : : "memory"); 1490 return (retval); 1491} 1492 1493#ifdef COUNT_IPIS 1494/* 1495 * Setup interrupt counters for IPI handlers. 1496 */ 1497static void 1498mp_ipi_intrcnt(void *dummy) 1499{ 1500 char buf[64]; 1501 int i; 1502 1503 for (i = 0; i < mp_maxid; i++) { 1504 if (CPU_ABSENT(i)) 1505 continue; 1506 snprintf(buf, sizeof(buf), "cpu%d: invltlb", i); 1507 intrcnt_add(buf, &ipi_invltlb_counts[i]); 1508 snprintf(buf, sizeof(buf), "cpu%d: invlrng", i); 1509 intrcnt_add(buf, &ipi_invlrng_counts[i]); 1510 snprintf(buf, sizeof(buf), "cpu%d: invlpg", i); 1511 intrcnt_add(buf, &ipi_invlpg_counts[i]); 1512 snprintf(buf, sizeof(buf), "cpu%d: preempt", i); 1513 intrcnt_add(buf, &ipi_preempt_counts[i]); 1514 snprintf(buf, sizeof(buf), "cpu%d: ast", i); 1515 intrcnt_add(buf, &ipi_ast_counts[i]); 1516 snprintf(buf, sizeof(buf), "cpu%d: rendezvous", i); 1517 intrcnt_add(buf, &ipi_rendezvous_counts[i]); 1518 snprintf(buf, sizeof(buf), "cpu%d: lazypmap", i); 1519 intrcnt_add(buf, &ipi_lazypmap_counts[i]); 1520 } 1521} 1522SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL) 1523#endif 1524