mp_x86.c revision 267526
1/*- 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 */ 25 26#include <sys/cdefs.h> 27__FBSDID("$FreeBSD: head/sys/i386/i386/mp_machdep.c 267526 2014-06-16 08:43:03Z royger $"); 28 29#include "opt_apic.h" 30#include "opt_cpu.h" 31#include "opt_kstack_pages.h" 32#include "opt_pmap.h" 33#include "opt_sched.h" 34#include "opt_smp.h" 35 36#if !defined(lint) 37#if !defined(SMP) 38#error How did you get here? 39#endif 40 41#ifndef DEV_APIC 42#error The apic device is required for SMP, add "device apic" to your config file. 43#endif 44#if defined(CPU_DISABLE_CMPXCHG) && !defined(COMPILING_LINT) 45#error SMP not supported with CPU_DISABLE_CMPXCHG 46#endif 47#endif /* not lint */ 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/bus.h> 52#include <sys/cons.h> /* cngetc() */ 53#include <sys/cpuset.h> 54#ifdef GPROF 55#include <sys/gmon.h> 56#endif 57#include <sys/kernel.h> 58#include <sys/ktr.h> 59#include <sys/lock.h> 60#include <sys/malloc.h> 61#include <sys/memrange.h> 62#include <sys/mutex.h> 63#include <sys/pcpu.h> 64#include <sys/proc.h> 65#include <sys/sched.h> 66#include <sys/smp.h> 67#include <sys/sysctl.h> 68 69#include <vm/vm.h> 70#include <vm/vm_param.h> 71#include <vm/pmap.h> 72#include <vm/vm_kern.h> 73#include <vm/vm_extern.h> 74 75#include <x86/apicreg.h> 76#include <machine/clock.h> 77#include <machine/cputypes.h> 78#include <x86/mca.h> 79#include <machine/md_var.h> 80#include <machine/pcb.h> 81#include <machine/psl.h> 82#include <machine/smp.h> 83#include <machine/specialreg.h> 84#include <machine/cpu.h> 85 86#define WARMBOOT_TARGET 0 87#define WARMBOOT_OFF (KERNBASE + 0x0467) 88#define WARMBOOT_SEG (KERNBASE + 0x0469) 89 90#define CMOS_REG (0x70) 91#define CMOS_DATA (0x71) 92#define BIOS_RESET (0x0f) 93#define BIOS_WARM (0x0a) 94 95/* 96 * this code MUST be enabled here and in mpboot.s. 97 * it follows the very early stages of AP boot by placing values in CMOS ram. 98 * it NORMALLY will never be needed and thus the primitive method for enabling. 99 * 100#define CHECK_POINTS 101 */ 102 103#if defined(CHECK_POINTS) && !defined(PC98) 104#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 105#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 106 107#define CHECK_INIT(D); \ 108 CHECK_WRITE(0x34, (D)); \ 109 CHECK_WRITE(0x35, (D)); \ 110 CHECK_WRITE(0x36, (D)); \ 111 CHECK_WRITE(0x37, (D)); \ 112 CHECK_WRITE(0x38, (D)); \ 113 CHECK_WRITE(0x39, (D)); 114 115#define CHECK_PRINT(S); \ 116 printf("%s: %d, %d, %d, %d, %d, %d\n", \ 117 (S), \ 118 CHECK_READ(0x34), \ 119 CHECK_READ(0x35), \ 120 CHECK_READ(0x36), \ 121 CHECK_READ(0x37), \ 122 CHECK_READ(0x38), \ 123 CHECK_READ(0x39)); 124 125#else /* CHECK_POINTS */ 126 127#define CHECK_INIT(D) 128#define CHECK_PRINT(S) 129#define CHECK_WRITE(A, D) 130 131#endif /* CHECK_POINTS */ 132 133/* lock region used by kernel profiling */ 134int mcount_lock; 135 136int mp_naps; /* # of Applications processors */ 137int boot_cpu_id = -1; /* designated BSP */ 138 139extern struct pcpu __pcpu[]; 140 141/* AP uses this during bootstrap. Do not staticize. */ 142char *bootSTK; 143static int bootAP; 144 145/* Free these after use */ 146void *bootstacks[MAXCPU]; 147static void *dpcpu; 148 149struct pcb stoppcbs[MAXCPU]; 150struct pcb **susppcbs = NULL; 151 152/* Variables needed for SMP tlb shootdown. */ 153vm_offset_t smp_tlb_addr1; 154vm_offset_t smp_tlb_addr2; 155volatile int smp_tlb_wait; 156 157#ifdef COUNT_IPIS 158/* Interrupt counts. */ 159static u_long *ipi_preempt_counts[MAXCPU]; 160static u_long *ipi_ast_counts[MAXCPU]; 161u_long *ipi_invltlb_counts[MAXCPU]; 162u_long *ipi_invlrng_counts[MAXCPU]; 163u_long *ipi_invlpg_counts[MAXCPU]; 164u_long *ipi_invlcache_counts[MAXCPU]; 165u_long *ipi_rendezvous_counts[MAXCPU]; 166u_long *ipi_lazypmap_counts[MAXCPU]; 167static u_long *ipi_hardclock_counts[MAXCPU]; 168#endif 169 170/* Default cpu_ops implementation. */ 171struct cpu_ops cpu_ops; 172 173/* 174 * Local data and functions. 175 */ 176 177static volatile cpuset_t ipi_nmi_pending; 178 179/* used to hold the AP's until we are ready to release them */ 180static struct mtx ap_boot_mtx; 181 182/* Set to 1 once we're ready to let the APs out of the pen. */ 183static volatile int aps_ready = 0; 184 185/* 186 * Store data from cpu_add() until later in the boot when we actually setup 187 * the APs. 188 */ 189struct cpu_info { 190 int cpu_present:1; 191 int cpu_bsp:1; 192 int cpu_disabled:1; 193 int cpu_hyperthread:1; 194} static cpu_info[MAX_APIC_ID + 1]; 195int cpu_apic_ids[MAXCPU]; 196int apic_cpuids[MAX_APIC_ID + 1]; 197 198/* Holds pending bitmap based IPIs per CPU */ 199volatile u_int cpu_ipi_pending[MAXCPU]; 200 201static u_int boot_address; 202static int cpu_logical; /* logical cpus per core */ 203static int cpu_cores; /* cores per package */ 204 205static void assign_cpu_ids(void); 206static void install_ap_tramp(void); 207static void set_interrupt_apic_ids(void); 208static int start_all_aps(void); 209static int start_ap(int apic_id); 210static void release_aps(void *dummy); 211 212static u_int hyperthreading_cpus; /* logical cpus sharing L1 cache */ 213static int hyperthreading_allowed = 1; 214 215static void 216mem_range_AP_init(void) 217{ 218 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP) 219 mem_range_softc.mr_op->initAP(&mem_range_softc); 220} 221 222static void 223topo_probe_amd(void) 224{ 225 int core_id_bits; 226 int id; 227 228 /* AMD processors do not support HTT. */ 229 cpu_logical = 1; 230 231 if ((amd_feature2 & AMDID2_CMP) == 0) { 232 cpu_cores = 1; 233 return; 234 } 235 236 core_id_bits = (cpu_procinfo2 & AMDID_COREID_SIZE) >> 237 AMDID_COREID_SIZE_SHIFT; 238 if (core_id_bits == 0) { 239 cpu_cores = (cpu_procinfo2 & AMDID_CMP_CORES) + 1; 240 return; 241 } 242 243 /* Fam 10h and newer should get here. */ 244 for (id = 0; id <= MAX_APIC_ID; id++) { 245 /* Check logical CPU availability. */ 246 if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled) 247 continue; 248 /* Check if logical CPU has the same package ID. */ 249 if ((id >> core_id_bits) != (boot_cpu_id >> core_id_bits)) 250 continue; 251 cpu_cores++; 252 } 253} 254 255/* 256 * Round up to the next power of two, if necessary, and then 257 * take log2. 258 * Returns -1 if argument is zero. 259 */ 260static __inline int 261mask_width(u_int x) 262{ 263 264 return (fls(x << (1 - powerof2(x))) - 1); 265} 266 267static void 268topo_probe_0x4(void) 269{ 270 u_int p[4]; 271 int pkg_id_bits; 272 int core_id_bits; 273 int max_cores; 274 int max_logical; 275 int id; 276 277 /* Both zero and one here mean one logical processor per package. */ 278 max_logical = (cpu_feature & CPUID_HTT) != 0 ? 279 (cpu_procinfo & CPUID_HTT_CORES) >> 16 : 1; 280 if (max_logical <= 1) 281 return; 282 283 /* 284 * Because of uniformity assumption we examine only 285 * those logical processors that belong to the same 286 * package as BSP. Further, we count number of 287 * logical processors that belong to the same core 288 * as BSP thus deducing number of threads per core. 289 */ 290 if (cpu_high >= 0x4) { 291 cpuid_count(0x04, 0, p); 292 max_cores = ((p[0] >> 26) & 0x3f) + 1; 293 } else 294 max_cores = 1; 295 core_id_bits = mask_width(max_logical/max_cores); 296 if (core_id_bits < 0) 297 return; 298 pkg_id_bits = core_id_bits + mask_width(max_cores); 299 300 for (id = 0; id <= MAX_APIC_ID; id++) { 301 /* Check logical CPU availability. */ 302 if (!cpu_info[id].cpu_present || cpu_info[id].cpu_disabled) 303 continue; 304 /* Check if logical CPU has the same package ID. */ 305 if ((id >> pkg_id_bits) != (boot_cpu_id >> pkg_id_bits)) 306 continue; 307 cpu_cores++; 308 /* Check if logical CPU has the same package and core IDs. */ 309 if ((id >> core_id_bits) == (boot_cpu_id >> core_id_bits)) 310 cpu_logical++; 311 } 312 313 KASSERT(cpu_cores >= 1 && cpu_logical >= 1, 314 ("topo_probe_0x4 couldn't find BSP")); 315 316 cpu_cores /= cpu_logical; 317 hyperthreading_cpus = cpu_logical; 318} 319 320static void 321topo_probe_0xb(void) 322{ 323 u_int p[4]; 324 int bits; 325 int cnt; 326 int i; 327 int logical; 328 int type; 329 int x; 330 331 /* We only support three levels for now. */ 332 for (i = 0; i < 3; i++) { 333 cpuid_count(0x0b, i, p); 334 335 /* Fall back if CPU leaf 11 doesn't really exist. */ 336 if (i == 0 && p[1] == 0) { 337 topo_probe_0x4(); 338 return; 339 } 340 341 bits = p[0] & 0x1f; 342 logical = p[1] &= 0xffff; 343 type = (p[2] >> 8) & 0xff; 344 if (type == 0 || logical == 0) 345 break; 346 /* 347 * Because of uniformity assumption we examine only 348 * those logical processors that belong to the same 349 * package as BSP. 350 */ 351 for (cnt = 0, x = 0; x <= MAX_APIC_ID; x++) { 352 if (!cpu_info[x].cpu_present || 353 cpu_info[x].cpu_disabled) 354 continue; 355 if (x >> bits == boot_cpu_id >> bits) 356 cnt++; 357 } 358 if (type == CPUID_TYPE_SMT) 359 cpu_logical = cnt; 360 else if (type == CPUID_TYPE_CORE) 361 cpu_cores = cnt; 362 } 363 if (cpu_logical == 0) 364 cpu_logical = 1; 365 cpu_cores /= cpu_logical; 366} 367 368/* 369 * Both topology discovery code and code that consumes topology 370 * information assume top-down uniformity of the topology. 371 * That is, all physical packages must be identical and each 372 * core in a package must have the same number of threads. 373 * Topology information is queried only on BSP, on which this 374 * code runs and for which it can query CPUID information. 375 * Then topology is extrapolated on all packages using the 376 * uniformity assumption. 377 */ 378static void 379topo_probe(void) 380{ 381 static int cpu_topo_probed = 0; 382 383 if (cpu_topo_probed) 384 return; 385 386 CPU_ZERO(&logical_cpus_mask); 387 if (mp_ncpus <= 1) 388 cpu_cores = cpu_logical = 1; 389 else if (cpu_vendor_id == CPU_VENDOR_AMD) 390 topo_probe_amd(); 391 else if (cpu_vendor_id == CPU_VENDOR_INTEL) { 392 /* 393 * See Intel(R) 64 Architecture Processor 394 * Topology Enumeration article for details. 395 * 396 * Note that 0x1 <= cpu_high < 4 case should be 397 * compatible with topo_probe_0x4() logic when 398 * CPUID.1:EBX[23:16] > 0 (cpu_cores will be 1) 399 * or it should trigger the fallback otherwise. 400 */ 401 if (cpu_high >= 0xb) 402 topo_probe_0xb(); 403 else if (cpu_high >= 0x1) 404 topo_probe_0x4(); 405 } 406 407 /* 408 * Fallback: assume each logical CPU is in separate 409 * physical package. That is, no multi-core, no SMT. 410 */ 411 if (cpu_cores == 0 || cpu_logical == 0) 412 cpu_cores = cpu_logical = 1; 413 cpu_topo_probed = 1; 414} 415 416struct cpu_group * 417cpu_topo(void) 418{ 419 int cg_flags; 420 421 /* 422 * Determine whether any threading flags are 423 * necessry. 424 */ 425 topo_probe(); 426 if (cpu_logical > 1 && hyperthreading_cpus) 427 cg_flags = CG_FLAG_HTT; 428 else if (cpu_logical > 1) 429 cg_flags = CG_FLAG_SMT; 430 else 431 cg_flags = 0; 432 if (mp_ncpus % (cpu_cores * cpu_logical) != 0) { 433 printf("WARNING: Non-uniform processors.\n"); 434 printf("WARNING: Using suboptimal topology.\n"); 435 return (smp_topo_none()); 436 } 437 /* 438 * No multi-core or hyper-threaded. 439 */ 440 if (cpu_logical * cpu_cores == 1) 441 return (smp_topo_none()); 442 /* 443 * Only HTT no multi-core. 444 */ 445 if (cpu_logical > 1 && cpu_cores == 1) 446 return (smp_topo_1level(CG_SHARE_L1, cpu_logical, cg_flags)); 447 /* 448 * Only multi-core no HTT. 449 */ 450 if (cpu_cores > 1 && cpu_logical == 1) 451 return (smp_topo_1level(CG_SHARE_L2, cpu_cores, cg_flags)); 452 /* 453 * Both HTT and multi-core. 454 */ 455 return (smp_topo_2level(CG_SHARE_L2, cpu_cores, 456 CG_SHARE_L1, cpu_logical, cg_flags)); 457} 458 459 460/* 461 * Calculate usable address in base memory for AP trampoline code. 462 */ 463u_int 464mp_bootaddress(u_int basemem) 465{ 466 467 boot_address = trunc_page(basemem); /* round down to 4k boundary */ 468 if ((basemem - boot_address) < bootMP_size) 469 boot_address -= PAGE_SIZE; /* not enough, lower by 4k */ 470 471 return boot_address; 472} 473 474void 475cpu_add(u_int apic_id, char boot_cpu) 476{ 477 478 if (apic_id > MAX_APIC_ID) { 479 panic("SMP: APIC ID %d too high", apic_id); 480 return; 481 } 482 KASSERT(cpu_info[apic_id].cpu_present == 0, ("CPU %d added twice", 483 apic_id)); 484 cpu_info[apic_id].cpu_present = 1; 485 if (boot_cpu) { 486 KASSERT(boot_cpu_id == -1, 487 ("CPU %d claims to be BSP, but CPU %d already is", apic_id, 488 boot_cpu_id)); 489 boot_cpu_id = apic_id; 490 cpu_info[apic_id].cpu_bsp = 1; 491 } 492 if (mp_ncpus < MAXCPU) { 493 mp_ncpus++; 494 mp_maxid = mp_ncpus - 1; 495 } 496 if (bootverbose) 497 printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" : 498 "AP"); 499} 500 501void 502cpu_mp_setmaxid(void) 503{ 504 505 /* 506 * mp_maxid should be already set by calls to cpu_add(). 507 * Just sanity check its value here. 508 */ 509 if (mp_ncpus == 0) 510 KASSERT(mp_maxid == 0, 511 ("%s: mp_ncpus is zero, but mp_maxid is not", __func__)); 512 else if (mp_ncpus == 1) 513 mp_maxid = 0; 514 else 515 KASSERT(mp_maxid >= mp_ncpus - 1, 516 ("%s: counters out of sync: max %d, count %d", __func__, 517 mp_maxid, mp_ncpus)); 518} 519 520int 521cpu_mp_probe(void) 522{ 523 524 /* 525 * Always record BSP in CPU map so that the mbuf init code works 526 * correctly. 527 */ 528 CPU_SETOF(0, &all_cpus); 529 if (mp_ncpus == 0) { 530 /* 531 * No CPUs were found, so this must be a UP system. Setup 532 * the variables to represent a system with a single CPU 533 * with an id of 0. 534 */ 535 mp_ncpus = 1; 536 return (0); 537 } 538 539 /* At least one CPU was found. */ 540 if (mp_ncpus == 1) { 541 /* 542 * One CPU was found, so this must be a UP system with 543 * an I/O APIC. 544 */ 545 mp_maxid = 0; 546 return (0); 547 } 548 549 /* At least two CPUs were found. */ 550 return (1); 551} 552 553/* 554 * Initialize the IPI handlers and start up the AP's. 555 */ 556void 557cpu_mp_start(void) 558{ 559 int i; 560 561 /* Initialize the logical ID to APIC ID table. */ 562 for (i = 0; i < MAXCPU; i++) { 563 cpu_apic_ids[i] = -1; 564 cpu_ipi_pending[i] = 0; 565 } 566 567 /* Install an inter-CPU IPI for TLB invalidation */ 568 setidt(IPI_INVLTLB, IDTVEC(invltlb), 569 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 570 setidt(IPI_INVLPG, IDTVEC(invlpg), 571 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 572 setidt(IPI_INVLRNG, IDTVEC(invlrng), 573 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 574 575 /* Install an inter-CPU IPI for cache invalidation. */ 576 setidt(IPI_INVLCACHE, IDTVEC(invlcache), 577 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 578 579 /* Install an inter-CPU IPI for lazy pmap release */ 580 setidt(IPI_LAZYPMAP, IDTVEC(lazypmap), 581 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 582 583 /* Install an inter-CPU IPI for all-CPU rendezvous */ 584 setidt(IPI_RENDEZVOUS, IDTVEC(rendezvous), 585 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 586 587 /* Install generic inter-CPU IPI handler */ 588 setidt(IPI_BITMAP_VECTOR, IDTVEC(ipi_intr_bitmap_handler), 589 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 590 591 /* Install an inter-CPU IPI for CPU stop/restart */ 592 setidt(IPI_STOP, IDTVEC(cpustop), 593 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 594 595 /* Install an inter-CPU IPI for CPU suspend/resume */ 596 setidt(IPI_SUSPEND, IDTVEC(cpususpend), 597 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 598 599 /* Set boot_cpu_id if needed. */ 600 if (boot_cpu_id == -1) { 601 boot_cpu_id = PCPU_GET(apic_id); 602 cpu_info[boot_cpu_id].cpu_bsp = 1; 603 } else 604 KASSERT(boot_cpu_id == PCPU_GET(apic_id), 605 ("BSP's APIC ID doesn't match boot_cpu_id")); 606 607 /* Probe logical/physical core configuration. */ 608 topo_probe(); 609 610 assign_cpu_ids(); 611 612 /* Start each Application Processor */ 613 start_all_aps(); 614 615 set_interrupt_apic_ids(); 616} 617 618 619/* 620 * Print various information about the SMP system hardware and setup. 621 */ 622void 623cpu_mp_announce(void) 624{ 625 const char *hyperthread; 626 int i; 627 628 printf("FreeBSD/SMP: %d package(s) x %d core(s)", 629 mp_ncpus / (cpu_cores * cpu_logical), cpu_cores); 630 if (hyperthreading_cpus > 1) 631 printf(" x %d HTT threads", cpu_logical); 632 else if (cpu_logical > 1) 633 printf(" x %d SMT threads", cpu_logical); 634 printf("\n"); 635 636 /* List active CPUs first. */ 637 printf(" cpu0 (BSP): APIC ID: %2d\n", boot_cpu_id); 638 for (i = 1; i < mp_ncpus; i++) { 639 if (cpu_info[cpu_apic_ids[i]].cpu_hyperthread) 640 hyperthread = "/HT"; 641 else 642 hyperthread = ""; 643 printf(" cpu%d (AP%s): APIC ID: %2d\n", i, hyperthread, 644 cpu_apic_ids[i]); 645 } 646 647 /* List disabled CPUs last. */ 648 for (i = 0; i <= MAX_APIC_ID; i++) { 649 if (!cpu_info[i].cpu_present || !cpu_info[i].cpu_disabled) 650 continue; 651 if (cpu_info[i].cpu_hyperthread) 652 hyperthread = "/HT"; 653 else 654 hyperthread = ""; 655 printf(" cpu (AP%s): APIC ID: %2d (disabled)\n", hyperthread, 656 i); 657 } 658} 659 660/* 661 * AP CPU's call this to initialize themselves. 662 */ 663void 664init_secondary(void) 665{ 666 struct pcpu *pc; 667 vm_offset_t addr; 668 int gsel_tss; 669 int x, myid; 670 u_int cpuid, cr0; 671 672 /* bootAP is set in start_ap() to our ID. */ 673 myid = bootAP; 674 675 /* Get per-cpu data */ 676 pc = &__pcpu[myid]; 677 678 /* prime data page for it to use */ 679 pcpu_init(pc, myid, sizeof(struct pcpu)); 680 dpcpu_init(dpcpu, myid); 681 pc->pc_apic_id = cpu_apic_ids[myid]; 682 pc->pc_prvspace = pc; 683 pc->pc_curthread = 0; 684 685 gdt_segs[GPRIV_SEL].ssd_base = (int) pc; 686 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss; 687 688 for (x = 0; x < NGDT; x++) { 689 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd); 690 } 691 692 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 693 r_gdt.rd_base = (int) &gdt[myid * NGDT]; 694 lgdt(&r_gdt); /* does magic intra-segment return */ 695 696 lidt(&r_idt); 697 698 lldt(_default_ldt); 699 PCPU_SET(currentldt, _default_ldt); 700 701 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 702 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 703 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */ 704 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 705 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 706 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd); 707 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 708 ltr(gsel_tss); 709 710 PCPU_SET(fsgs_gdt, &gdt[myid * NGDT + GUFS_SEL].sd); 711 712 /* 713 * Set to a known state: 714 * Set by mpboot.s: CR0_PG, CR0_PE 715 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 716 */ 717 cr0 = rcr0(); 718 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 719 load_cr0(cr0); 720 CHECK_WRITE(0x38, 5); 721 722 /* Disable local APIC just to be sure. */ 723 lapic_disable(); 724 725 /* signal our startup to the BSP. */ 726 mp_naps++; 727 CHECK_WRITE(0x39, 6); 728 729 /* Spin until the BSP releases the AP's. */ 730 while (!aps_ready) 731 ia32_pause(); 732 733 /* BSP may have changed PTD while we were waiting */ 734 invltlb(); 735 for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE) 736 invlpg(addr); 737 738#if defined(I586_CPU) && !defined(NO_F00F_HACK) 739 lidt(&r_idt); 740#endif 741 742 /* Initialize the PAT MSR if present. */ 743 pmap_init_pat(); 744 745 /* set up CPU registers and state */ 746 cpu_setregs(); 747 748 /* set up FPU state on the AP */ 749 npxinit(); 750 751 /* set up SSE registers */ 752 enable_sse(); 753 754 if (cpu_ops.cpu_init) 755 cpu_ops.cpu_init(); 756 757#ifdef PAE 758 /* Enable the PTE no-execute bit. */ 759 if ((amd_feature & AMDID_NX) != 0) { 760 uint64_t msr; 761 762 msr = rdmsr(MSR_EFER) | EFER_NXE; 763 wrmsr(MSR_EFER, msr); 764 } 765#endif 766 767 /* A quick check from sanity claus */ 768 cpuid = PCPU_GET(cpuid); 769 if (PCPU_GET(apic_id) != lapic_id()) { 770 printf("SMP: cpuid = %d\n", cpuid); 771 printf("SMP: actual apic_id = %d\n", lapic_id()); 772 printf("SMP: correct apic_id = %d\n", PCPU_GET(apic_id)); 773 panic("cpuid mismatch! boom!!"); 774 } 775 776 /* Initialize curthread. */ 777 KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); 778 PCPU_SET(curthread, PCPU_GET(idlethread)); 779 780 mca_init(); 781 782 mtx_lock_spin(&ap_boot_mtx); 783 784 /* Init local apic for irq's */ 785 lapic_setup(1); 786 787 /* Set memory range attributes for this CPU to match the BSP */ 788 mem_range_AP_init(); 789 790 smp_cpus++; 791 792 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", cpuid); 793 printf("SMP: AP CPU #%d Launched!\n", cpuid); 794 795 /* Determine if we are a logical CPU. */ 796 /* XXX Calculation depends on cpu_logical being a power of 2, e.g. 2 */ 797 if (cpu_logical > 1 && PCPU_GET(apic_id) % cpu_logical != 0) 798 CPU_SET(cpuid, &logical_cpus_mask); 799 800 if (bootverbose) 801 lapic_dump("AP"); 802 803 if (smp_cpus == mp_ncpus) { 804 /* enable IPI's, tlb shootdown, freezes etc */ 805 atomic_store_rel_int(&smp_started, 1); 806 } 807 808 mtx_unlock_spin(&ap_boot_mtx); 809 810 /* Wait until all the AP's are up. */ 811 while (smp_started == 0) 812 ia32_pause(); 813 814 /* Start per-CPU event timers. */ 815 cpu_initclocks_ap(); 816 817 /* Enter the scheduler. */ 818 sched_throw(NULL); 819 820 panic("scheduler returned us to %s", __func__); 821 /* NOTREACHED */ 822} 823 824/******************************************************************* 825 * local functions and data 826 */ 827 828/* 829 * We tell the I/O APIC code about all the CPUs we want to receive 830 * interrupts. If we don't want certain CPUs to receive IRQs we 831 * can simply not tell the I/O APIC code about them in this function. 832 * We also do not tell it about the BSP since it tells itself about 833 * the BSP internally to work with UP kernels and on UP machines. 834 */ 835static void 836set_interrupt_apic_ids(void) 837{ 838 u_int i, apic_id; 839 840 for (i = 0; i < MAXCPU; i++) { 841 apic_id = cpu_apic_ids[i]; 842 if (apic_id == -1) 843 continue; 844 if (cpu_info[apic_id].cpu_bsp) 845 continue; 846 if (cpu_info[apic_id].cpu_disabled) 847 continue; 848 849 /* Don't let hyperthreads service interrupts. */ 850 if (hyperthreading_cpus > 1 && 851 apic_id % hyperthreading_cpus != 0) 852 continue; 853 854 intr_add_cpu(i); 855 } 856} 857 858/* 859 * Assign logical CPU IDs to local APICs. 860 */ 861static void 862assign_cpu_ids(void) 863{ 864 u_int i; 865 866 TUNABLE_INT_FETCH("machdep.hyperthreading_allowed", 867 &hyperthreading_allowed); 868 869 /* Check for explicitly disabled CPUs. */ 870 for (i = 0; i <= MAX_APIC_ID; i++) { 871 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp) 872 continue; 873 874 if (hyperthreading_cpus > 1 && i % hyperthreading_cpus != 0) { 875 cpu_info[i].cpu_hyperthread = 1; 876 877 /* 878 * Don't use HT CPU if it has been disabled by a 879 * tunable. 880 */ 881 if (hyperthreading_allowed == 0) { 882 cpu_info[i].cpu_disabled = 1; 883 continue; 884 } 885 } 886 887 /* Don't use this CPU if it has been disabled by a tunable. */ 888 if (resource_disabled("lapic", i)) { 889 cpu_info[i].cpu_disabled = 1; 890 continue; 891 } 892 } 893 894 if (hyperthreading_allowed == 0 && hyperthreading_cpus > 1) { 895 hyperthreading_cpus = 0; 896 cpu_logical = 1; 897 } 898 899 /* 900 * Assign CPU IDs to local APIC IDs and disable any CPUs 901 * beyond MAXCPU. CPU 0 is always assigned to the BSP. 902 * 903 * To minimize confusion for userland, we attempt to number 904 * CPUs such that all threads and cores in a package are 905 * grouped together. For now we assume that the BSP is always 906 * the first thread in a package and just start adding APs 907 * starting with the BSP's APIC ID. 908 */ 909 mp_ncpus = 1; 910 cpu_apic_ids[0] = boot_cpu_id; 911 apic_cpuids[boot_cpu_id] = 0; 912 for (i = boot_cpu_id + 1; i != boot_cpu_id; 913 i == MAX_APIC_ID ? i = 0 : i++) { 914 if (!cpu_info[i].cpu_present || cpu_info[i].cpu_bsp || 915 cpu_info[i].cpu_disabled) 916 continue; 917 918 if (mp_ncpus < MAXCPU) { 919 cpu_apic_ids[mp_ncpus] = i; 920 apic_cpuids[i] = mp_ncpus; 921 mp_ncpus++; 922 } else 923 cpu_info[i].cpu_disabled = 1; 924 } 925 KASSERT(mp_maxid >= mp_ncpus - 1, 926 ("%s: counters out of sync: max %d, count %d", __func__, mp_maxid, 927 mp_ncpus)); 928} 929 930/* 931 * start each AP in our list 932 */ 933/* Lowest 1MB is already mapped: don't touch*/ 934#define TMPMAP_START 1 935static int 936start_all_aps(void) 937{ 938#ifndef PC98 939 u_char mpbiosreason; 940#endif 941 u_int32_t mpbioswarmvec; 942 int apic_id, cpu, i; 943 944 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 945 946 /* install the AP 1st level boot code */ 947 install_ap_tramp(); 948 949 /* save the current value of the warm-start vector */ 950 mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF); 951#ifndef PC98 952 outb(CMOS_REG, BIOS_RESET); 953 mpbiosreason = inb(CMOS_DATA); 954#endif 955 956 /* set up temporary P==V mapping for AP boot */ 957 /* XXX this is a hack, we should boot the AP on its own stack/PTD */ 958 for (i = TMPMAP_START; i < NKPT; i++) 959 PTD[i] = PTD[KPTDI + i]; 960 invltlb(); 961 962 /* start each AP */ 963 for (cpu = 1; cpu < mp_ncpus; cpu++) { 964 apic_id = cpu_apic_ids[cpu]; 965 966 /* allocate and set up a boot stack data page */ 967 bootstacks[cpu] = 968 (char *)kmem_malloc(kernel_arena, KSTACK_PAGES * PAGE_SIZE, 969 M_WAITOK | M_ZERO); 970 dpcpu = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, 971 M_WAITOK | M_ZERO); 972 /* setup a vector to our boot code */ 973 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 974 *((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4); 975#ifndef PC98 976 outb(CMOS_REG, BIOS_RESET); 977 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 978#endif 979 980 bootSTK = (char *)bootstacks[cpu] + KSTACK_PAGES * PAGE_SIZE - 4; 981 bootAP = cpu; 982 983 /* attempt to start the Application Processor */ 984 CHECK_INIT(99); /* setup checkpoints */ 985 if (!start_ap(apic_id)) { 986 printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id); 987 CHECK_PRINT("trace"); /* show checkpoints */ 988 /* better panic as the AP may be running loose */ 989 printf("panic y/n? [y] "); 990 if (cngetc() != 'n') 991 panic("bye-bye"); 992 } 993 CHECK_PRINT("trace"); /* show checkpoints */ 994 995 CPU_SET(cpu, &all_cpus); /* record AP in CPU map */ 996 } 997 998 /* restore the warmstart vector */ 999 *(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec; 1000 1001#ifndef PC98 1002 outb(CMOS_REG, BIOS_RESET); 1003 outb(CMOS_DATA, mpbiosreason); 1004#endif 1005 1006 /* Undo V==P hack from above */ 1007 for (i = TMPMAP_START; i < NKPT; i++) 1008 PTD[i] = 0; 1009 pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1); 1010 1011 /* number of APs actually started */ 1012 return mp_naps; 1013} 1014 1015/* 1016 * load the 1st level AP boot code into base memory. 1017 */ 1018 1019/* targets for relocation */ 1020extern void bigJump(void); 1021extern void bootCodeSeg(void); 1022extern void bootDataSeg(void); 1023extern void MPentry(void); 1024extern u_int MP_GDT; 1025extern u_int mp_gdtbase; 1026 1027static void 1028install_ap_tramp(void) 1029{ 1030 int x; 1031 int size = *(int *) ((u_long) & bootMP_size); 1032 vm_offset_t va = boot_address + KERNBASE; 1033 u_char *src = (u_char *) ((u_long) bootMP); 1034 u_char *dst = (u_char *) va; 1035 u_int boot_base = (u_int) bootMP; 1036 u_int8_t *dst8; 1037 u_int16_t *dst16; 1038 u_int32_t *dst32; 1039 1040 KASSERT (size <= PAGE_SIZE, 1041 ("'size' do not fit into PAGE_SIZE, as expected.")); 1042 pmap_kenter(va, boot_address); 1043 pmap_invalidate_page (kernel_pmap, va); 1044 for (x = 0; x < size; ++x) 1045 *dst++ = *src++; 1046 1047 /* 1048 * modify addresses in code we just moved to basemem. unfortunately we 1049 * need fairly detailed info about mpboot.s for this to work. changes 1050 * to mpboot.s might require changes here. 1051 */ 1052 1053 /* boot code is located in KERNEL space */ 1054 dst = (u_char *) va; 1055 1056 /* modify the lgdt arg */ 1057 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 1058 *dst32 = boot_address + ((u_int) & MP_GDT - boot_base); 1059 1060 /* modify the ljmp target for MPentry() */ 1061 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 1062 *dst32 = ((u_int) MPentry - KERNBASE); 1063 1064 /* modify the target for boot code segment */ 1065 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 1066 dst8 = (u_int8_t *) (dst16 + 1); 1067 *dst16 = (u_int) boot_address & 0xffff; 1068 *dst8 = ((u_int) boot_address >> 16) & 0xff; 1069 1070 /* modify the target for boot data segment */ 1071 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 1072 dst8 = (u_int8_t *) (dst16 + 1); 1073 *dst16 = (u_int) boot_address & 0xffff; 1074 *dst8 = ((u_int) boot_address >> 16) & 0xff; 1075} 1076 1077/* 1078 * This function starts the AP (application processor) identified 1079 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 1080 * to accomplish this. This is necessary because of the nuances 1081 * of the different hardware we might encounter. It isn't pretty, 1082 * but it seems to work. 1083 */ 1084static int 1085start_ap(int apic_id) 1086{ 1087 int vector, ms; 1088 int cpus; 1089 1090 /* calculate the vector */ 1091 vector = (boot_address >> 12) & 0xff; 1092 1093 /* used as a watchpoint to signal AP startup */ 1094 cpus = mp_naps; 1095 1096 ipi_startup(apic_id, vector); 1097 1098 /* Wait up to 5 seconds for it to start. */ 1099 for (ms = 0; ms < 5000; ms++) { 1100 if (mp_naps > cpus) 1101 return 1; /* return SUCCESS */ 1102 DELAY(1000); 1103 } 1104 return 0; /* return FAILURE */ 1105} 1106 1107#ifdef COUNT_XINVLTLB_HITS 1108u_int xhits_gbl[MAXCPU]; 1109u_int xhits_pg[MAXCPU]; 1110u_int xhits_rng[MAXCPU]; 1111static SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, ""); 1112SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl, 1113 sizeof(xhits_gbl), "IU", ""); 1114SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg, 1115 sizeof(xhits_pg), "IU", ""); 1116SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng, 1117 sizeof(xhits_rng), "IU", ""); 1118 1119u_int ipi_global; 1120u_int ipi_page; 1121u_int ipi_range; 1122u_int ipi_range_size; 1123SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, ""); 1124SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, ""); 1125SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, ""); 1126SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size, 1127 0, ""); 1128 1129u_int ipi_masked_global; 1130u_int ipi_masked_page; 1131u_int ipi_masked_range; 1132u_int ipi_masked_range_size; 1133SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW, 1134 &ipi_masked_global, 0, ""); 1135SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW, 1136 &ipi_masked_page, 0, ""); 1137SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW, 1138 &ipi_masked_range, 0, ""); 1139SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW, 1140 &ipi_masked_range_size, 0, ""); 1141#endif /* COUNT_XINVLTLB_HITS */ 1142 1143/* 1144 * Init and startup IPI. 1145 */ 1146void 1147ipi_startup(int apic_id, int vector) 1148{ 1149 1150 /* 1151 * first we do an INIT IPI: this INIT IPI might be run, resetting 1152 * and running the target CPU. OR this INIT IPI might be latched (P5 1153 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 1154 * ignored. 1155 */ 1156 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 1157 APIC_LEVEL_ASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_INIT, apic_id); 1158 lapic_ipi_wait(-1); 1159 DELAY(10000); /* wait ~10mS */ 1160 1161 /* 1162 * next we do a STARTUP IPI: the previous INIT IPI might still be 1163 * latched, (P5 bug) this 1st STARTUP would then terminate 1164 * immediately, and the previously started INIT IPI would continue. OR 1165 * the previous INIT IPI has already run. and this STARTUP IPI will 1166 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 1167 * will run. 1168 */ 1169 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 1170 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP | 1171 vector, apic_id); 1172 lapic_ipi_wait(-1); 1173 DELAY(200); /* wait ~200uS */ 1174 1175 /* 1176 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 1177 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 1178 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 1179 * recognized after hardware RESET or INIT IPI. 1180 */ 1181 lapic_ipi_raw(APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 1182 APIC_LEVEL_DEASSERT | APIC_DESTMODE_PHY | APIC_DELMODE_STARTUP | 1183 vector, apic_id); 1184 lapic_ipi_wait(-1); 1185 DELAY(200); /* wait ~200uS */ 1186} 1187 1188/* 1189 * Send an IPI to specified CPU handling the bitmap logic. 1190 */ 1191static void 1192ipi_send_cpu(int cpu, u_int ipi) 1193{ 1194 u_int bitmap, old_pending, new_pending; 1195 1196 KASSERT(cpu_apic_ids[cpu] != -1, ("IPI to non-existent CPU %d", cpu)); 1197 1198 if (IPI_IS_BITMAPED(ipi)) { 1199 bitmap = 1 << ipi; 1200 ipi = IPI_BITMAP_VECTOR; 1201 do { 1202 old_pending = cpu_ipi_pending[cpu]; 1203 new_pending = old_pending | bitmap; 1204 } while (!atomic_cmpset_int(&cpu_ipi_pending[cpu], 1205 old_pending, new_pending)); 1206 if (old_pending) 1207 return; 1208 } 1209 lapic_ipi_vectored(ipi, cpu_apic_ids[cpu]); 1210} 1211 1212/* 1213 * Flush the TLB on all other CPU's 1214 */ 1215static void 1216smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2) 1217{ 1218 u_int ncpu; 1219 1220 ncpu = mp_ncpus - 1; /* does not shootdown self */ 1221 if (ncpu < 1) 1222 return; /* no other cpus */ 1223 if (!(read_eflags() & PSL_I)) 1224 panic("%s: interrupts disabled", __func__); 1225 mtx_lock_spin(&smp_ipi_mtx); 1226 smp_tlb_addr1 = addr1; 1227 smp_tlb_addr2 = addr2; 1228 atomic_store_rel_int(&smp_tlb_wait, 0); 1229 ipi_all_but_self(vector); 1230 while (smp_tlb_wait < ncpu) 1231 ia32_pause(); 1232 mtx_unlock_spin(&smp_ipi_mtx); 1233} 1234 1235static void 1236smp_targeted_tlb_shootdown(cpuset_t mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2) 1237{ 1238 int cpu, ncpu, othercpus; 1239 1240 othercpus = mp_ncpus - 1; 1241 if (CPU_ISFULLSET(&mask)) { 1242 if (othercpus < 1) 1243 return; 1244 } else { 1245 CPU_CLR(PCPU_GET(cpuid), &mask); 1246 if (CPU_EMPTY(&mask)) 1247 return; 1248 } 1249 if (!(read_eflags() & PSL_I)) 1250 panic("%s: interrupts disabled", __func__); 1251 mtx_lock_spin(&smp_ipi_mtx); 1252 smp_tlb_addr1 = addr1; 1253 smp_tlb_addr2 = addr2; 1254 atomic_store_rel_int(&smp_tlb_wait, 0); 1255 if (CPU_ISFULLSET(&mask)) { 1256 ncpu = othercpus; 1257 ipi_all_but_self(vector); 1258 } else { 1259 ncpu = 0; 1260 while ((cpu = CPU_FFS(&mask)) != 0) { 1261 cpu--; 1262 CPU_CLR(cpu, &mask); 1263 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, 1264 vector); 1265 ipi_send_cpu(cpu, vector); 1266 ncpu++; 1267 } 1268 } 1269 while (smp_tlb_wait < ncpu) 1270 ia32_pause(); 1271 mtx_unlock_spin(&smp_ipi_mtx); 1272} 1273 1274void 1275smp_cache_flush(void) 1276{ 1277 1278 if (smp_started) 1279 smp_tlb_shootdown(IPI_INVLCACHE, 0, 0); 1280} 1281 1282void 1283smp_invltlb(void) 1284{ 1285 1286 if (smp_started) { 1287 smp_tlb_shootdown(IPI_INVLTLB, 0, 0); 1288#ifdef COUNT_XINVLTLB_HITS 1289 ipi_global++; 1290#endif 1291 } 1292} 1293 1294void 1295smp_invlpg(vm_offset_t addr) 1296{ 1297 1298 if (smp_started) { 1299 smp_tlb_shootdown(IPI_INVLPG, addr, 0); 1300#ifdef COUNT_XINVLTLB_HITS 1301 ipi_page++; 1302#endif 1303 } 1304} 1305 1306void 1307smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2) 1308{ 1309 1310 if (smp_started) { 1311 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2); 1312#ifdef COUNT_XINVLTLB_HITS 1313 ipi_range++; 1314 ipi_range_size += (addr2 - addr1) / PAGE_SIZE; 1315#endif 1316 } 1317} 1318 1319void 1320smp_masked_invltlb(cpuset_t mask) 1321{ 1322 1323 if (smp_started) { 1324 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0); 1325#ifdef COUNT_XINVLTLB_HITS 1326 ipi_masked_global++; 1327#endif 1328 } 1329} 1330 1331void 1332smp_masked_invlpg(cpuset_t mask, vm_offset_t addr) 1333{ 1334 1335 if (smp_started) { 1336 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0); 1337#ifdef COUNT_XINVLTLB_HITS 1338 ipi_masked_page++; 1339#endif 1340 } 1341} 1342 1343void 1344smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2) 1345{ 1346 1347 if (smp_started) { 1348 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2); 1349#ifdef COUNT_XINVLTLB_HITS 1350 ipi_masked_range++; 1351 ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE; 1352#endif 1353 } 1354} 1355 1356void 1357ipi_bitmap_handler(struct trapframe frame) 1358{ 1359 struct trapframe *oldframe; 1360 struct thread *td; 1361 int cpu = PCPU_GET(cpuid); 1362 u_int ipi_bitmap; 1363 1364 critical_enter(); 1365 td = curthread; 1366 td->td_intr_nesting_level++; 1367 oldframe = td->td_intr_frame; 1368 td->td_intr_frame = &frame; 1369 ipi_bitmap = atomic_readandclear_int(&cpu_ipi_pending[cpu]); 1370 if (ipi_bitmap & (1 << IPI_PREEMPT)) { 1371#ifdef COUNT_IPIS 1372 (*ipi_preempt_counts[cpu])++; 1373#endif 1374 sched_preempt(td); 1375 } 1376 if (ipi_bitmap & (1 << IPI_AST)) { 1377#ifdef COUNT_IPIS 1378 (*ipi_ast_counts[cpu])++; 1379#endif 1380 /* Nothing to do for AST */ 1381 } 1382 if (ipi_bitmap & (1 << IPI_HARDCLOCK)) { 1383#ifdef COUNT_IPIS 1384 (*ipi_hardclock_counts[cpu])++; 1385#endif 1386 hardclockintr(); 1387 } 1388 td->td_intr_frame = oldframe; 1389 td->td_intr_nesting_level--; 1390 critical_exit(); 1391} 1392 1393/* 1394 * send an IPI to a set of cpus. 1395 */ 1396void 1397ipi_selected(cpuset_t cpus, u_int ipi) 1398{ 1399 int cpu; 1400 1401 /* 1402 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit 1403 * of help in order to understand what is the source. 1404 * Set the mask of receiving CPUs for this purpose. 1405 */ 1406 if (ipi == IPI_STOP_HARD) 1407 CPU_OR_ATOMIC(&ipi_nmi_pending, &cpus); 1408 1409 while ((cpu = CPU_FFS(&cpus)) != 0) { 1410 cpu--; 1411 CPU_CLR(cpu, &cpus); 1412 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi); 1413 ipi_send_cpu(cpu, ipi); 1414 } 1415} 1416 1417/* 1418 * send an IPI to a specific CPU. 1419 */ 1420void 1421ipi_cpu(int cpu, u_int ipi) 1422{ 1423 1424 /* 1425 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit 1426 * of help in order to understand what is the source. 1427 * Set the mask of receiving CPUs for this purpose. 1428 */ 1429 if (ipi == IPI_STOP_HARD) 1430 CPU_SET_ATOMIC(cpu, &ipi_nmi_pending); 1431 1432 CTR3(KTR_SMP, "%s: cpu: %d ipi: %x", __func__, cpu, ipi); 1433 ipi_send_cpu(cpu, ipi); 1434} 1435 1436/* 1437 * send an IPI to all CPUs EXCEPT myself 1438 */ 1439void 1440ipi_all_but_self(u_int ipi) 1441{ 1442 cpuset_t other_cpus; 1443 1444 other_cpus = all_cpus; 1445 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 1446 if (IPI_IS_BITMAPED(ipi)) { 1447 ipi_selected(other_cpus, ipi); 1448 return; 1449 } 1450 1451 /* 1452 * IPI_STOP_HARD maps to a NMI and the trap handler needs a bit 1453 * of help in order to understand what is the source. 1454 * Set the mask of receiving CPUs for this purpose. 1455 */ 1456 if (ipi == IPI_STOP_HARD) 1457 CPU_OR_ATOMIC(&ipi_nmi_pending, &other_cpus); 1458 1459 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 1460 lapic_ipi_vectored(ipi, APIC_IPI_DEST_OTHERS); 1461} 1462 1463int 1464ipi_nmi_handler() 1465{ 1466 u_int cpuid; 1467 1468 /* 1469 * As long as there is not a simple way to know about a NMI's 1470 * source, if the bitmask for the current CPU is present in 1471 * the global pending bitword an IPI_STOP_HARD has been issued 1472 * and should be handled. 1473 */ 1474 cpuid = PCPU_GET(cpuid); 1475 if (!CPU_ISSET(cpuid, &ipi_nmi_pending)) 1476 return (1); 1477 1478 CPU_CLR_ATOMIC(cpuid, &ipi_nmi_pending); 1479 cpustop_handler(); 1480 return (0); 1481} 1482 1483/* 1484 * Handle an IPI_STOP by saving our current context and spinning until we 1485 * are resumed. 1486 */ 1487void 1488cpustop_handler(void) 1489{ 1490 u_int cpu; 1491 1492 cpu = PCPU_GET(cpuid); 1493 1494 savectx(&stoppcbs[cpu]); 1495 1496 /* Indicate that we are stopped */ 1497 CPU_SET_ATOMIC(cpu, &stopped_cpus); 1498 1499 /* Wait for restart */ 1500 while (!CPU_ISSET(cpu, &started_cpus)) 1501 ia32_pause(); 1502 1503 CPU_CLR_ATOMIC(cpu, &started_cpus); 1504 CPU_CLR_ATOMIC(cpu, &stopped_cpus); 1505 1506 if (cpu == 0 && cpustop_restartfunc != NULL) { 1507 cpustop_restartfunc(); 1508 cpustop_restartfunc = NULL; 1509 } 1510} 1511 1512/* 1513 * Handle an IPI_SUSPEND by saving our current context and spinning until we 1514 * are resumed. 1515 */ 1516void 1517cpususpend_handler(void) 1518{ 1519 u_int cpu; 1520 1521 mtx_assert(&smp_ipi_mtx, MA_NOTOWNED); 1522 1523 cpu = PCPU_GET(cpuid); 1524 if (savectx(susppcbs[cpu])) { 1525 wbinvd(); 1526 CPU_SET_ATOMIC(cpu, &suspended_cpus); 1527 } else { 1528 pmap_init_pat(); 1529 PCPU_SET(switchtime, 0); 1530 PCPU_SET(switchticks, ticks); 1531 1532 /* Indicate that we are resumed */ 1533 CPU_CLR_ATOMIC(cpu, &suspended_cpus); 1534 } 1535 1536 /* Wait for resume */ 1537 while (!CPU_ISSET(cpu, &started_cpus)) 1538 ia32_pause(); 1539 1540 if (cpu_ops.cpu_resume) 1541 cpu_ops.cpu_resume(); 1542 1543 /* Resume MCA and local APIC */ 1544 mca_resume(); 1545 lapic_setup(0); 1546 1547 /* Indicate that we are resumed */ 1548 CPU_CLR_ATOMIC(cpu, &suspended_cpus); 1549 CPU_CLR_ATOMIC(cpu, &started_cpus); 1550} 1551 1552/* 1553 * Handlers for TLB related IPIs 1554 */ 1555void 1556invltlb_handler(void) 1557{ 1558 uint64_t cr3; 1559#ifdef COUNT_XINVLTLB_HITS 1560 xhits_gbl[PCPU_GET(cpuid)]++; 1561#endif /* COUNT_XINVLTLB_HITS */ 1562#ifdef COUNT_IPIS 1563 (*ipi_invltlb_counts[PCPU_GET(cpuid)])++; 1564#endif /* COUNT_IPIS */ 1565 1566 cr3 = rcr3(); 1567 load_cr3(cr3); 1568 atomic_add_int(&smp_tlb_wait, 1); 1569} 1570 1571void 1572invlpg_handler(void) 1573{ 1574#ifdef COUNT_XINVLTLB_HITS 1575 xhits_pg[PCPU_GET(cpuid)]++; 1576#endif /* COUNT_XINVLTLB_HITS */ 1577#ifdef COUNT_IPIS 1578 (*ipi_invlpg_counts[PCPU_GET(cpuid)])++; 1579#endif /* COUNT_IPIS */ 1580 1581 invlpg(smp_tlb_addr1); 1582 1583 atomic_add_int(&smp_tlb_wait, 1); 1584} 1585 1586void 1587invlrng_handler(void) 1588{ 1589 vm_offset_t addr; 1590#ifdef COUNT_XINVLTLB_HITS 1591 xhits_rng[PCPU_GET(cpuid)]++; 1592#endif /* COUNT_XINVLTLB_HITS */ 1593#ifdef COUNT_IPIS 1594 (*ipi_invlrng_counts[PCPU_GET(cpuid)])++; 1595#endif /* COUNT_IPIS */ 1596 1597 addr = smp_tlb_addr1; 1598 do { 1599 invlpg(addr); 1600 addr += PAGE_SIZE; 1601 } while (addr < smp_tlb_addr2); 1602 1603 atomic_add_int(&smp_tlb_wait, 1); 1604} 1605 1606void 1607invlcache_handler(void) 1608{ 1609#ifdef COUNT_IPIS 1610 (*ipi_invlcache_counts[PCPU_GET(cpuid)])++; 1611#endif /* COUNT_IPIS */ 1612 1613 wbinvd(); 1614 atomic_add_int(&smp_tlb_wait, 1); 1615} 1616 1617/* 1618 * This is called once the rest of the system is up and running and we're 1619 * ready to let the AP's out of the pen. 1620 */ 1621static void 1622release_aps(void *dummy __unused) 1623{ 1624 1625 if (mp_ncpus == 1) 1626 return; 1627 atomic_store_rel_int(&aps_ready, 1); 1628 while (smp_started == 0) 1629 ia32_pause(); 1630} 1631SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 1632 1633#ifdef COUNT_IPIS 1634/* 1635 * Setup interrupt counters for IPI handlers. 1636 */ 1637static void 1638mp_ipi_intrcnt(void *dummy) 1639{ 1640 char buf[64]; 1641 int i; 1642 1643 CPU_FOREACH(i) { 1644 snprintf(buf, sizeof(buf), "cpu%d:invltlb", i); 1645 intrcnt_add(buf, &ipi_invltlb_counts[i]); 1646 snprintf(buf, sizeof(buf), "cpu%d:invlrng", i); 1647 intrcnt_add(buf, &ipi_invlrng_counts[i]); 1648 snprintf(buf, sizeof(buf), "cpu%d:invlpg", i); 1649 intrcnt_add(buf, &ipi_invlpg_counts[i]); 1650 snprintf(buf, sizeof(buf), "cpu%d:invlcache", i); 1651 intrcnt_add(buf, &ipi_invlcache_counts[i]); 1652 snprintf(buf, sizeof(buf), "cpu%d:preempt", i); 1653 intrcnt_add(buf, &ipi_preempt_counts[i]); 1654 snprintf(buf, sizeof(buf), "cpu%d:ast", i); 1655 intrcnt_add(buf, &ipi_ast_counts[i]); 1656 snprintf(buf, sizeof(buf), "cpu%d:rendezvous", i); 1657 intrcnt_add(buf, &ipi_rendezvous_counts[i]); 1658 snprintf(buf, sizeof(buf), "cpu%d:lazypmap", i); 1659 intrcnt_add(buf, &ipi_lazypmap_counts[i]); 1660 snprintf(buf, sizeof(buf), "cpu%d:hardclock", i); 1661 intrcnt_add(buf, &ipi_hardclock_counts[i]); 1662 } 1663} 1664SYSINIT(mp_ipi_intrcnt, SI_SUB_INTR, SI_ORDER_MIDDLE, mp_ipi_intrcnt, NULL); 1665#endif 1666