1/* $NetBSD: cpu.c,v 1.145 2023/02/25 00:35:01 riastradh Exp $ */ 2 3/*- 4 * Copyright (c) 2000 The NetBSD Foundation, Inc. 5 * Copyright (c) 2002, 2006, 2007 YAMAMOTO Takashi, 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by RedBack Networks Inc. 10 * 11 * Author: Bill Sommerfeld 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35/* 36 * Copyright (c) 1999 Stefan Grefen 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the NetBSD 49 * Foundation, Inc. and its contributors. 50 * 4. Neither the name of The NetBSD Foundation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY 55 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67#include <sys/cdefs.h> 68__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.145 2023/02/25 00:35:01 riastradh Exp $"); 69 70#include "opt_ddb.h" 71#include "opt_multiprocessor.h" 72#include "opt_mpbios.h" /* for MPDEBUG */ 73#include "opt_mtrr.h" 74#include "opt_xen.h" 75 76#include "lapic.h" 77#include "ioapic.h" 78 79#include <sys/param.h> 80#include <sys/proc.h> 81#include <sys/systm.h> 82#include <sys/device.h> 83#include <sys/kmem.h> 84#include <sys/cpu.h> 85#include <sys/cpufreq.h> 86#include <sys/atomic.h> 87#include <sys/reboot.h> 88#include <sys/idle.h> 89 90#include <uvm/uvm.h> 91 92#include <machine/cpu.h> 93#include <machine/cpufunc.h> 94#include <machine/cpuvar.h> 95#include <machine/pmap.h> 96#include <machine/pmap_private.h> 97#include <machine/vmparam.h> 98#include <machine/mpbiosvar.h> 99#include <machine/pcb.h> 100#include <machine/specialreg.h> 101#include <machine/segments.h> 102#include <machine/gdt.h> 103#include <machine/mtrr.h> 104#include <machine/pio.h> 105 106#include <x86/fpu.h> 107 108#include <xen/xen.h> 109#include <xen/include/public/vcpu.h> 110#include <xen/vcpuvar.h> 111 112#if NLAPIC > 0 113#include <machine/apicvar.h> 114#include <machine/i82489reg.h> 115#include <machine/i82489var.h> 116#endif 117 118#include <dev/ic/mc146818reg.h> 119#include <dev/isa/isareg.h> 120 121static int cpu_match(device_t, cfdata_t, void *); 122static void cpu_attach(device_t, device_t, void *); 123static void cpu_defer(device_t); 124static int cpu_rescan(device_t, const char *, const int *); 125static void cpu_childdetached(device_t, device_t); 126static int vcpu_match(device_t, cfdata_t, void *); 127static void vcpu_attach(device_t, device_t, void *); 128static void cpu_attach_common(device_t, device_t, void *); 129void cpu_offline_md(void); 130 131struct cpu_softc { 132 device_t sc_dev; /* device tree glue */ 133 struct cpu_info *sc_info; /* pointer to CPU info */ 134 bool sc_wasonline; 135}; 136 137int mp_cpu_start(struct cpu_info *, vaddr_t); 138void mp_cpu_start_cleanup(struct cpu_info *); 139const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL, 140 mp_cpu_start_cleanup }; 141 142CFATTACH_DECL2_NEW(cpu, sizeof(struct cpu_softc), 143 cpu_match, cpu_attach, NULL, NULL, cpu_rescan, cpu_childdetached); 144 145CFATTACH_DECL_NEW(vcpu, sizeof(struct cpu_softc), 146 vcpu_match, vcpu_attach, NULL, NULL); 147 148/* 149 * Statically-allocated CPU info for the primary CPU (or the only 150 * CPU, on uniprocessors). The CPU info list is initialized to 151 * point at it. 152 */ 153struct cpu_info cpu_info_primary __aligned(CACHE_LINE_SIZE) = { 154 .ci_dev = 0, 155 .ci_self = &cpu_info_primary, 156 .ci_idepth = -1, 157 .ci_curlwp = &lwp0, 158 .ci_curldt = -1, 159}; 160struct cpu_info phycpu_info_primary __aligned(CACHE_LINE_SIZE) = { 161 .ci_dev = 0, 162 .ci_self = &phycpu_info_primary, 163}; 164 165struct cpu_info *cpu_info_list = &cpu_info_primary; 166struct cpu_info *phycpu_info_list = &phycpu_info_primary; 167 168uint32_t cpu_feature[7] __read_mostly; /* X86 CPUID feature bits 169 * [0] basic features %edx 170 * [1] basic features %ecx 171 * [2] extended features %edx 172 * [3] extended features %ecx 173 * [4] VIA padlock features 174 * [5] structured extended features cpuid.7:%ebx 175 * [6] structured extended features cpuid.7:%ecx 176 */ 177 178bool x86_mp_online; 179paddr_t mp_trampoline_paddr = MP_TRAMPOLINE; 180 181#if defined(MULTIPROCESSOR) 182void cpu_hatch(void *); 183static void cpu_boot_secondary(struct cpu_info *ci); 184static void cpu_start_secondary(struct cpu_info *ci); 185#endif /* MULTIPROCESSOR */ 186 187static int 188cpu_match(device_t parent, cfdata_t match, void *aux) 189{ 190 191 return 1; 192} 193 194static void 195cpu_attach(device_t parent, device_t self, void *aux) 196{ 197 struct cpu_softc *sc = device_private(self); 198 struct cpu_attach_args *caa = aux; 199 struct cpu_info *ci; 200 uintptr_t ptr; 201 static int nphycpu = 0; 202 203 sc->sc_dev = self; 204 205 /* 206 * If we're an Application Processor, allocate a cpu_info 207 * If we're the first attached CPU use the primary cpu_info, 208 * otherwise allocate a new one 209 */ 210 aprint_naive("\n"); 211 aprint_normal("\n"); 212 if (nphycpu > 0) { 213 struct cpu_info *tmp; 214 ptr = (uintptr_t)kmem_zalloc(sizeof(*ci) + CACHE_LINE_SIZE - 1, 215 KM_SLEEP); 216 ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE); 217 ci->ci_curldt = -1; 218 219 tmp = phycpu_info_list; 220 while (tmp->ci_next) 221 tmp = tmp->ci_next; 222 223 tmp->ci_next = ci; 224 } else { 225 ci = &phycpu_info_primary; 226 } 227 228 ci->ci_self = ci; 229 sc->sc_info = ci; 230 231 ci->ci_dev = self; 232 ci->ci_acpiid = caa->cpu_id; 233 ci->ci_cpuid = caa->cpu_number; 234 ci->ci_vcpu = NULL; 235 ci->ci_index = nphycpu++; 236 ci->ci_kfpu_spl = -1; 237 238 if (!pmf_device_register(self, NULL, NULL)) 239 aprint_error_dev(self, "couldn't establish power handler\n"); 240 241 (void)config_defer(self, cpu_defer); 242} 243 244static void 245cpu_defer(device_t self) 246{ 247 cpu_rescan(self, NULL, NULL); 248} 249 250static int 251cpu_rescan(device_t self, const char *ifattr, const int *locators) 252{ 253 struct cpu_softc *sc = device_private(self); 254 struct cpufeature_attach_args cfaa; 255 struct cpu_info *ci = sc->sc_info; 256 257 memset(&cfaa, 0, sizeof(cfaa)); 258 cfaa.ci = ci; 259 260 if (ifattr_match(ifattr, "cpufeaturebus")) { 261 262 if (ci->ci_frequency == NULL) { 263 cfaa.name = "frequency"; 264 ci->ci_frequency = 265 config_found(self, &cfaa, NULL, 266 CFARGS(.iattr = "cpufeaturebus")); 267 } 268 } 269 270 return 0; 271} 272 273static void 274cpu_childdetached(device_t self, device_t child) 275{ 276 struct cpu_softc *sc = device_private(self); 277 struct cpu_info *ci = sc->sc_info; 278 279 if (ci->ci_frequency == child) 280 ci->ci_frequency = NULL; 281} 282 283static int 284vcpu_match(device_t parent, cfdata_t match, void *aux) 285{ 286 struct vcpu_attach_args *vcaa = aux; 287 struct vcpu_runstate_info vcr; 288 int error; 289 290 if (strcmp(vcaa->vcaa_name, match->cf_name) == 0) { 291 error = HYPERVISOR_vcpu_op(VCPUOP_get_runstate_info, 292 vcaa->vcaa_caa.cpu_number, &vcr); 293 switch (error) { 294 case 0: 295 return 1; 296 case -ENOENT: 297 return 0; 298 default: 299 panic("Unknown hypervisor error %d returned on vcpu runstate probe\n", error); 300 } 301 } 302 303 return 0; 304} 305 306static void 307vcpu_attach(device_t parent, device_t self, void *aux) 308{ 309 struct vcpu_attach_args *vcaa = aux; 310 311 KASSERT(vcaa->vcaa_caa.cpu_func == NULL); 312 vcaa->vcaa_caa.cpu_func = &mp_cpu_funcs; 313 cpu_attach_common(parent, self, &vcaa->vcaa_caa); 314 315 if (!pmf_device_register(self, NULL, NULL)) 316 aprint_error_dev(self, "couldn't establish power handler\n"); 317} 318 319static int 320vcpu_is_up(struct cpu_info *ci) 321{ 322 KASSERT(ci != NULL); 323 return HYPERVISOR_vcpu_op(VCPUOP_is_up, ci->ci_vcpuid, NULL); 324} 325 326static void 327cpu_vm_init(struct cpu_info *ci) 328{ 329 int ncolors = 2, i; 330 331 for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) { 332 struct x86_cache_info *cai; 333 int tcolors; 334 335 cai = &ci->ci_cinfo[i]; 336 337 tcolors = atop(cai->cai_totalsize); 338 switch (cai->cai_associativity) { 339 case 0xff: 340 tcolors = 1; /* fully associative */ 341 break; 342 case 0: 343 case 1: 344 break; 345 default: 346 tcolors /= cai->cai_associativity; 347 } 348 ncolors = uimax(ncolors, tcolors); 349 } 350 351 /* 352 * Knowing the size of the largest cache on this CPU, potentially 353 * re-color our pages. 354 */ 355 aprint_debug_dev(ci->ci_dev, "%d page colors\n", ncolors); 356 uvm_page_recolor(ncolors); 357 pmap_tlb_cpu_init(ci); 358#ifndef __HAVE_DIRECT_MAP 359 pmap_vpage_cpu_init(ci); 360#endif 361} 362 363static void 364cpu_attach_common(device_t parent, device_t self, void *aux) 365{ 366 struct cpu_softc *sc = device_private(self); 367 struct cpu_attach_args *caa = aux; 368 struct cpu_info *ci; 369 uintptr_t ptr; 370 int cpunum = caa->cpu_number; 371 static bool again = false; 372 373 sc->sc_dev = self; 374 375 /* 376 * If we're an Application Processor, allocate a cpu_info 377 * structure, otherwise use the primary's. 378 */ 379 if (caa->cpu_role == CPU_ROLE_AP) { 380 aprint_naive(": Application Processor\n"); 381 ptr = (uintptr_t)kmem_alloc(sizeof(*ci) + CACHE_LINE_SIZE - 1, 382 KM_SLEEP); 383 ci = (struct cpu_info *)roundup2(ptr, CACHE_LINE_SIZE); 384 memset(ci, 0, sizeof(*ci)); 385 cpu_init_tss(ci); 386 } else { 387 aprint_naive(": %s Processor\n", 388 caa->cpu_role == CPU_ROLE_SP ? "Single" : "Boot"); 389 ci = &cpu_info_primary; 390 } 391 392 ci->ci_self = ci; 393 sc->sc_info = ci; 394 ci->ci_dev = self; 395 ci->ci_cpuid = cpunum; 396 ci->ci_vcpuid = cpunum; 397 ci->ci_kfpu_spl = -1; 398 399 KASSERT(HYPERVISOR_shared_info != NULL); 400 KASSERT(cpunum < XEN_LEGACY_MAX_VCPUS); 401 ci->ci_vcpu = &HYPERVISOR_shared_info->vcpu_info[cpunum]; 402 403 KASSERT(ci->ci_func == 0); 404 ci->ci_func = caa->cpu_func; 405 aprint_normal("\n"); 406 407 /* Must be called before mi_cpu_attach(). */ 408 cpu_vm_init(ci); 409 410 if (caa->cpu_role == CPU_ROLE_AP) { 411 int error; 412 413 error = mi_cpu_attach(ci); 414 415 KASSERT(ci->ci_data.cpu_idlelwp != NULL); 416 if (error != 0) { 417 aprint_error_dev(self, 418 "mi_cpu_attach failed with %d\n", error); 419 return; 420 } 421 422 } else { 423 KASSERT(ci->ci_data.cpu_idlelwp != NULL); 424 } 425 426 KASSERT(ci->ci_cpuid == ci->ci_index); 427#ifdef __x86_64__ 428 /* No user PGD mapped for this CPU yet */ 429 ci->ci_xen_current_user_pgd = 0; 430#endif 431 mutex_init(&ci->ci_kpm_mtx, MUTEX_DEFAULT, IPL_VM); 432 pmap_reference(pmap_kernel()); 433 ci->ci_pmap = pmap_kernel(); 434 ci->ci_tlbstate = TLBSTATE_STALE; 435 436 /* 437 * Boot processor may not be attached first, but the below 438 * must be done to allow booting other processors. 439 */ 440 if (!again) { 441 atomic_or_32(&ci->ci_flags, CPUF_PRESENT | CPUF_PRIMARY); 442 /* Basic init. */ 443 cpu_intr_init(ci); 444 cpu_get_tsc_freq(ci); 445 cpu_init(ci); 446 pmap_cpu_init_late(ci); 447 448 /* Every processor needs to init its own ipi h/w (similar to lapic) */ 449 xen_ipi_init(); 450 451 /* Make sure DELAY() is initialized. */ 452 DELAY(1); 453 again = true; 454 } 455 456 /* further PCB init done later. */ 457 458 switch (caa->cpu_role) { 459 case CPU_ROLE_SP: 460 atomic_or_32(&ci->ci_flags, CPUF_SP); 461 cpu_identify(ci); 462 x86_cpu_idle_init(); 463 break; 464 465 case CPU_ROLE_BP: 466 atomic_or_32(&ci->ci_flags, CPUF_BSP); 467 cpu_identify(ci); 468 x86_cpu_idle_init(); 469 break; 470 471 case CPU_ROLE_AP: 472 atomic_or_32(&ci->ci_flags, CPUF_AP); 473 474 /* 475 * report on an AP 476 */ 477 478#if defined(MULTIPROCESSOR) 479 /* interrupt handler stack */ 480 cpu_intr_init(ci); 481 482 /* Setup per-cpu memory for idt */ 483 idt_vec_init_cpu_md(&ci->ci_idtvec, cpu_index(ci)); 484 485 /* Setup per-cpu memory for gdt */ 486 gdt_alloc_cpu(ci); 487 488 pmap_cpu_init_late(ci); 489 cpu_start_secondary(ci); 490 491 if (ci->ci_flags & CPUF_PRESENT) { 492 struct cpu_info *tmp; 493 494 cpu_identify(ci); 495 tmp = cpu_info_list; 496 while (tmp->ci_next) 497 tmp = tmp->ci_next; 498 499 tmp->ci_next = ci; 500 } 501#else 502 aprint_error_dev(ci->ci_dev, "not started\n"); 503#endif 504 break; 505 506 default: 507 panic("unknown processor type??\n"); 508 } 509 510#ifdef MPVERBOSE 511 if (mp_verbose) { 512 struct lwp *l = ci->ci_data.cpu_idlelwp; 513 struct pcb *pcb = lwp_getpcb(l); 514 515 aprint_verbose_dev(self, 516 "idle lwp at %p, idle sp at %p\n", 517 l, 518#ifdef i386 519 (void *)pcb->pcb_esp 520#else 521 (void *)pcb->pcb_rsp 522#endif 523 ); 524 525 } 526#endif /* MPVERBOSE */ 527} 528 529/* 530 * Initialize the processor appropriately. 531 */ 532 533void 534cpu_init(struct cpu_info *ci) 535{ 536 uint32_t cr4 = 0; 537 538 /* 539 * If we have FXSAVE/FXRESTOR, use them. 540 */ 541 if (cpu_feature[0] & CPUID_FXSR) { 542 cr4 |= CR4_OSFXSR; 543 544 /* 545 * If we have SSE/SSE2, enable XMM exceptions. 546 */ 547 if (cpu_feature[0] & (CPUID_SSE|CPUID_SSE2)) 548 cr4 |= CR4_OSXMMEXCPT; 549 } 550 551 /* If xsave is supported, enable it */ 552 if (cpu_feature[1] & CPUID2_XSAVE && x86_fpu_save >= FPU_SAVE_XSAVE) 553 cr4 |= CR4_OSXSAVE; 554 555 if (cr4) { 556 cr4 |= rcr4(); 557 lcr4(cr4); 558 } 559 560 if (x86_fpu_save >= FPU_SAVE_FXSAVE) { 561 fpuinit_mxcsr_mask(); 562 } 563 564 /* 565 * Changing CR4 register may change cpuid values. For example, setting 566 * CR4_OSXSAVE sets CPUID2_OSXSAVE. The CPUID2_OSXSAVE is in 567 * ci_feat_val[1], so update it. 568 * XXX Other than ci_feat_val[1] might be changed. 569 */ 570 if (cpuid_level >= 1) { 571 u_int descs[4]; 572 573 x86_cpuid(1, descs); 574 ci->ci_feat_val[1] = descs[2]; 575 } 576 577 /* If xsave is enabled, enable all fpu features */ 578 if (cr4 & CR4_OSXSAVE) { 579 wrxcr(0, x86_xsave_features & XCR0_FPU); 580 } 581 582 atomic_or_32(&ci->ci_flags, CPUF_RUNNING); 583} 584 585 586#ifdef MULTIPROCESSOR 587 588void 589cpu_boot_secondary_processors(void) 590{ 591 struct cpu_info *ci; 592 kcpuset_t *cpus; 593 u_long i; 594 595 kcpuset_create(&cpus, true); 596 kcpuset_set(cpus, cpu_index(curcpu())); 597 for (i = 0; i < maxcpus; i++) { 598 ci = cpu_lookup(i); 599 if (ci == NULL) 600 continue; 601 if (ci->ci_data.cpu_idlelwp == NULL) 602 continue; 603 if ((ci->ci_flags & CPUF_PRESENT) == 0) 604 continue; 605 if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY)) 606 continue; 607 cpu_boot_secondary(ci); 608 kcpuset_set(cpus, cpu_index(ci)); 609 } 610 while (!kcpuset_match(cpus, kcpuset_running)) 611 ; 612 kcpuset_destroy(cpus); 613 614 x86_mp_online = true; 615} 616 617static void 618cpu_init_idle_lwp(struct cpu_info *ci) 619{ 620 struct lwp *l = ci->ci_data.cpu_idlelwp; 621 struct pcb *pcb = lwp_getpcb(l); 622 623 pcb->pcb_cr0 = rcr0(); 624} 625 626void 627cpu_init_idle_lwps(void) 628{ 629 struct cpu_info *ci; 630 u_long i; 631 632 for (i = 0; i < maxcpus; i++) { 633 ci = cpu_lookup(i); 634 if (ci == NULL) 635 continue; 636 if (ci->ci_data.cpu_idlelwp == NULL) 637 continue; 638 if ((ci->ci_flags & CPUF_PRESENT) == 0) 639 continue; 640 cpu_init_idle_lwp(ci); 641 } 642} 643 644static void 645cpu_start_secondary(struct cpu_info *ci) 646{ 647 int i; 648 649 aprint_debug_dev(ci->ci_dev, "starting\n"); 650 651 ci->ci_curlwp = ci->ci_data.cpu_idlelwp; 652 653 if (CPU_STARTUP(ci, (vaddr_t) cpu_hatch) != 0) { 654 return; 655 } 656 657 /* 658 * wait for it to become ready 659 */ 660 for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i > 0; i--) { 661 delay(10); 662 } 663 if ((ci->ci_flags & CPUF_PRESENT) == 0) { 664 aprint_error_dev(ci->ci_dev, "failed to become ready\n"); 665#if defined(MPDEBUG) && defined(DDB) 666 printf("dropping into debugger; continue from here to resume boot\n"); 667 Debugger(); 668#endif 669 } 670 671 CPU_START_CLEANUP(ci); 672} 673 674void 675cpu_boot_secondary(struct cpu_info *ci) 676{ 677 int i; 678 atomic_or_32(&ci->ci_flags, CPUF_GO); 679 for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) { 680 delay(10); 681 } 682 if ((ci->ci_flags & CPUF_RUNNING) == 0) { 683 aprint_error_dev(ci->ci_dev, "CPU failed to start\n"); 684#if defined(MPDEBUG) && defined(DDB) 685 printf("dropping into debugger; continue from here to resume boot\n"); 686 Debugger(); 687#endif 688 } 689} 690 691/* 692 * APs end up here immediately after initialisation and VCPUOP_up in 693 * mp_cpu_start(). 694 * At this point, we are running in the idle pcb/idle stack of the new 695 * CPU. This function jumps to the idle loop and starts looking for 696 * work. 697 */ 698extern void x86_64_tls_switch(struct lwp *); 699void 700cpu_hatch(void *v) 701{ 702 struct cpu_info *ci = (struct cpu_info *)v; 703 struct pcb *pcb; 704 int s, i; 705 706 /* Setup TLS and kernel GS/FS */ 707 cpu_init_msrs(ci, true); 708 cpu_init_idt(ci); 709 gdt_init_cpu(ci); 710 711 cpu_probe(ci); 712 713 atomic_or_32(&ci->ci_flags, CPUF_PRESENT); 714 715 while ((ci->ci_flags & CPUF_GO) == 0) { 716 /* Don't use delay, boot CPU may be patching the text. */ 717 for (i = 10000; i != 0; i--) 718 x86_pause(); 719 } 720 721 /* Because the text may have been patched in x86_patch(). */ 722 x86_flush(); 723 tlbflushg(); 724 725 KASSERT((ci->ci_flags & CPUF_RUNNING) == 0); 726 727 KASSERT(ci->ci_curlwp == ci->ci_data.cpu_idlelwp); 728 KASSERT(curlwp == ci->ci_data.cpu_idlelwp); 729 pcb = lwp_getpcb(curlwp); 730 pcb->pcb_cr3 = pmap_pdirpa(pmap_kernel(), 0); 731 732 xen_ipi_init(); 733 734 xen_initclocks(); 735 736#ifdef __x86_64__ 737 fpuinit(ci); 738#endif 739 740 lldt(GSEL(GLDT_SEL, SEL_KPL)); 741 742 cpu_init(ci); 743 cpu_get_tsc_freq(ci); 744 745 s = splhigh(); 746 x86_enable_intr(); 747 splx(s); 748 749 aprint_debug_dev(ci->ci_dev, "running\n"); 750 751 KASSERT(ci->ci_curlwp == ci->ci_data.cpu_idlelwp); 752 idle_loop(NULL); 753 KASSERT(false); 754} 755 756#if defined(DDB) 757 758#include <ddb/db_output.h> 759#include <machine/db_machdep.h> 760 761/* 762 * Dump CPU information from ddb. 763 */ 764void 765cpu_debug_dump(void) 766{ 767 struct cpu_info *ci; 768 CPU_INFO_ITERATOR cii; 769 770 db_printf("addr dev id flags ipis curlwp\n"); 771 for (CPU_INFO_FOREACH(cii, ci)) { 772 db_printf("%p %s %ld %x %x %10p\n", 773 ci, 774 ci->ci_dev == NULL ? "BOOT" : device_xname(ci->ci_dev), 775 (long)ci->ci_vcpuid, 776 ci->ci_flags, ci->ci_ipis, 777 ci->ci_curlwp); 778 } 779} 780#endif /* DDB */ 781 782#endif /* MULTIPROCESSOR */ 783 784extern void hypervisor_callback(void); 785extern void failsafe_callback(void); 786#ifdef __x86_64__ 787typedef void (vector)(void); 788extern vector Xsyscall, Xsyscall32; 789#endif 790 791/* 792 * Setup the "trampoline". On Xen, we setup nearly all cpu context 793 * outside a trampoline, so we prototype and call targetip like so: 794 * void targetip(struct cpu_info *); 795 */ 796 797static void 798gdt_prepframes(paddr_t *frames, vaddr_t base, uint32_t entries) 799{ 800 int i; 801 for (i = 0; i < entries; i++) { 802 frames[i] = ((paddr_t)xpmap_ptetomach( 803 (pt_entry_t *)(base + (i << PAGE_SHIFT)))) >> PAGE_SHIFT; 804 805 /* Mark Read-only */ 806 pmap_pte_clearbits(kvtopte(base + (i << PAGE_SHIFT)), 807 PTE_W); 808 } 809} 810 811#ifdef __x86_64__ 812extern char *ldtstore; 813 814static void 815xen_init_amd64_vcpuctxt(struct cpu_info *ci, struct vcpu_guest_context *initctx, 816 void targetrip(struct cpu_info *)) 817{ 818 /* page frames to point at GDT */ 819 extern int gdt_size; 820 paddr_t frames[16]; 821 psize_t gdt_ents; 822 823 struct lwp *l; 824 struct pcb *pcb; 825 826 volatile struct vcpu_info *vci; 827 828 KASSERT(ci != NULL); 829 KASSERT(ci != &cpu_info_primary); 830 KASSERT(initctx != NULL); 831 KASSERT(targetrip != NULL); 832 833 memset(initctx, 0, sizeof(*initctx)); 834 835 gdt_ents = roundup(gdt_size, PAGE_SIZE) >> PAGE_SHIFT; 836 KASSERT(gdt_ents <= 16); 837 838 gdt_prepframes(frames, (vaddr_t)ci->ci_gdt, gdt_ents); 839 840 /* Initialise the vcpu context: We use idle_loop()'s pcb context. */ 841 842 l = ci->ci_data.cpu_idlelwp; 843 844 KASSERT(l != NULL); 845 pcb = lwp_getpcb(l); 846 KASSERT(pcb != NULL); 847 848 /* resume with interrupts off */ 849 vci = ci->ci_vcpu; 850 vci->evtchn_upcall_mask = 1; 851 __insn_barrier(); 852 853 /* resume in kernel-mode */ 854 initctx->flags = VGCF_in_kernel | VGCF_online; 855 856 /* Stack and entry points: 857 * We arrange for the stack frame for cpu_hatch() to 858 * appear as a callee frame of lwp_trampoline(). Being a 859 * leaf frame prevents trampling on any of the MD stack setup 860 * that x86/vm_machdep.c:cpu_lwp_fork() does for idle_loop() 861 */ 862 863 initctx->user_regs.rdi = (uint64_t) ci; /* targetrip(ci); */ 864 initctx->user_regs.rip = (vaddr_t) targetrip; 865 866 initctx->user_regs.cs = GSEL(GCODE_SEL, SEL_KPL); 867 868 initctx->user_regs.rflags = pcb->pcb_flags; 869 initctx->user_regs.rsp = pcb->pcb_rsp; 870 871 /* Data segments */ 872 initctx->user_regs.ss = GSEL(GDATA_SEL, SEL_KPL); 873 initctx->user_regs.es = GSEL(GDATA_SEL, SEL_KPL); 874 initctx->user_regs.ds = GSEL(GDATA_SEL, SEL_KPL); 875 876 /* GDT */ 877 memcpy(initctx->gdt_frames, frames, sizeof(frames)); 878 initctx->gdt_ents = gdt_ents; 879 880 /* LDT */ 881 initctx->ldt_base = (unsigned long)ldtstore; 882 initctx->ldt_ents = LDT_SIZE >> 3; 883 884 /* Kernel context state */ 885 initctx->kernel_ss = GSEL(GDATA_SEL, SEL_KPL); 886 initctx->kernel_sp = pcb->pcb_rsp0; 887 initctx->ctrlreg[0] = pcb->pcb_cr0; 888 initctx->ctrlreg[1] = 0; /* "resuming" from kernel - no User cr3. */ 889 initctx->ctrlreg[2] = (vaddr_t)targetrip; 890 /* 891 * Use pmap_kernel() L4 PD directly, until we setup the 892 * per-cpu L4 PD in pmap_cpu_init_late() 893 */ 894 initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(ci->ci_kpm_pdirpa))); 895 initctx->ctrlreg[4] = CR4_PAE | CR4_OSFXSR | CR4_OSXMMEXCPT; 896 897 /* Xen callbacks */ 898 initctx->event_callback_eip = (unsigned long)hypervisor_callback; 899 initctx->failsafe_callback_eip = (unsigned long)failsafe_callback; 900 initctx->syscall_callback_eip = (unsigned long)Xsyscall; 901 902 return; 903} 904#else /* i386 */ 905extern union descriptor *ldtstore; 906extern void Xsyscall(void); 907 908static void 909xen_init_i386_vcpuctxt(struct cpu_info *ci, struct vcpu_guest_context *initctx, 910 void targeteip(struct cpu_info *)) 911{ 912 /* page frames to point at GDT */ 913 extern int gdt_size; 914 paddr_t frames[16]; 915 psize_t gdt_ents; 916 917 struct lwp *l; 918 struct pcb *pcb; 919 920 volatile struct vcpu_info *vci; 921 922 KASSERT(ci != NULL); 923 KASSERT(ci != &cpu_info_primary); 924 KASSERT(initctx != NULL); 925 KASSERT(targeteip != NULL); 926 927 memset(initctx, 0, sizeof(*initctx)); 928 929 gdt_ents = roundup(gdt_size, PAGE_SIZE) >> PAGE_SHIFT; 930 KASSERT(gdt_ents <= 16); 931 932 gdt_prepframes(frames, (vaddr_t)ci->ci_gdt, gdt_ents); 933 934 /* 935 * Initialise the vcpu context: 936 * We use this cpu's idle_loop() pcb context. 937 */ 938 939 l = ci->ci_data.cpu_idlelwp; 940 941 KASSERT(l != NULL); 942 pcb = lwp_getpcb(l); 943 KASSERT(pcb != NULL); 944 945 /* resume with interrupts off */ 946 vci = ci->ci_vcpu; 947 vci->evtchn_upcall_mask = 1; 948 __insn_barrier(); 949 950 /* resume in kernel-mode */ 951 initctx->flags = VGCF_in_kernel | VGCF_online; 952 953 /* Stack frame setup for cpu_hatch(): 954 * We arrange for the stack frame for cpu_hatch() to 955 * appear as a callee frame of lwp_trampoline(). Being a 956 * leaf frame prevents trampling on any of the MD stack setup 957 * that x86/vm_machdep.c:cpu_lwp_fork() does for idle_loop() 958 */ 959 960 initctx->user_regs.esp = pcb->pcb_esp - 4; /* Leave word for 961 arg1 */ 962 { 963 /* targeteip(ci); */ 964 uint32_t *arg = (uint32_t *)initctx->user_regs.esp; 965 arg[1] = (uint32_t)ci; /* arg1 */ 966 } 967 968 initctx->user_regs.eip = (vaddr_t)targeteip; 969 initctx->user_regs.cs = GSEL(GCODE_SEL, SEL_KPL); 970 initctx->user_regs.eflags |= pcb->pcb_iopl; 971 972 /* Data segments */ 973 initctx->user_regs.ss = GSEL(GDATA_SEL, SEL_KPL); 974 initctx->user_regs.es = GSEL(GDATA_SEL, SEL_KPL); 975 initctx->user_regs.ds = GSEL(GDATA_SEL, SEL_KPL); 976 initctx->user_regs.fs = GSEL(GDATA_SEL, SEL_KPL); 977 978 /* GDT */ 979 memcpy(initctx->gdt_frames, frames, sizeof(frames)); 980 initctx->gdt_ents = gdt_ents; 981 982 /* LDT */ 983 initctx->ldt_base = (unsigned long)ldtstore; 984 initctx->ldt_ents = NLDT; 985 986 /* Kernel context state */ 987 initctx->kernel_ss = GSEL(GDATA_SEL, SEL_KPL); 988 initctx->kernel_sp = pcb->pcb_esp0; 989 initctx->ctrlreg[0] = pcb->pcb_cr0; 990 initctx->ctrlreg[1] = 0; /* "resuming" from kernel - no User cr3. */ 991 initctx->ctrlreg[2] = (vaddr_t)targeteip; 992 initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(ci->ci_pae_l3_pdirpa))); 993 initctx->ctrlreg[4] = /* CR4_PAE | */CR4_OSFXSR | CR4_OSXMMEXCPT; 994 995 /* Xen callbacks */ 996 initctx->event_callback_eip = (unsigned long)hypervisor_callback; 997 initctx->event_callback_cs = GSEL(GCODE_SEL, SEL_KPL); 998 initctx->failsafe_callback_eip = (unsigned long)failsafe_callback; 999 initctx->failsafe_callback_cs = GSEL(GCODE_SEL, SEL_KPL); 1000 1001 return; 1002} 1003#endif /* __x86_64__ */ 1004 1005int 1006mp_cpu_start(struct cpu_info *ci, vaddr_t target) 1007{ 1008 int hyperror; 1009 struct vcpu_guest_context *vcpuctx; 1010 1011 KASSERT(ci != NULL); 1012 KASSERT(ci != &cpu_info_primary); 1013 KASSERT(ci->ci_flags & CPUF_AP); 1014 1015 vcpuctx = kmem_alloc(sizeof(*vcpuctx), KM_SLEEP); 1016 1017#ifdef __x86_64__ 1018 xen_init_amd64_vcpuctxt(ci, vcpuctx, (void (*)(struct cpu_info *))target); 1019#else 1020 xen_init_i386_vcpuctxt(ci, vcpuctx, (void (*)(struct cpu_info *))target); 1021#endif 1022 1023 /* Initialise the given vcpu to execute cpu_hatch(ci); */ 1024 if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_initialise, ci->ci_vcpuid, vcpuctx))) { 1025 aprint_error(": context initialisation failed. errno = %d\n", hyperror); 1026 goto out; 1027 } 1028 1029 /* Start it up */ 1030 1031 /* First bring it down */ 1032 if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_down, ci->ci_vcpuid, NULL))) { 1033 aprint_error(": VCPUOP_down hypervisor command failed. errno = %d\n", hyperror); 1034 goto out; 1035 } 1036 1037 if ((hyperror = HYPERVISOR_vcpu_op(VCPUOP_up, ci->ci_vcpuid, NULL))) { 1038 aprint_error(": VCPUOP_up hypervisor command failed. errno = %d\n", hyperror); 1039 goto out; 1040 } 1041 1042 if (!vcpu_is_up(ci)) { 1043 aprint_error(": did not come up\n"); 1044 hyperror = -1; 1045 goto out; 1046 } 1047 1048out: 1049 kmem_free(vcpuctx, sizeof(*vcpuctx)); 1050 return hyperror; 1051} 1052 1053void 1054mp_cpu_start_cleanup(struct cpu_info *ci) 1055{ 1056 if (vcpu_is_up(ci)) { 1057 aprint_debug_dev(ci->ci_dev, "is started.\n"); 1058 } else { 1059 aprint_error_dev(ci->ci_dev, "did not start up.\n"); 1060 } 1061} 1062 1063void 1064cpu_init_msrs(struct cpu_info *ci, bool full) 1065{ 1066#ifdef __x86_64__ 1067 if (full) { 1068 HYPERVISOR_set_segment_base(SEGBASE_FS, 0); 1069 HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL, (uint64_t)ci); 1070 HYPERVISOR_set_segment_base(SEGBASE_GS_USER, 0); 1071 } 1072#endif 1073 1074 if (cpu_feature[2] & CPUID_NOX) 1075 wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE); 1076} 1077 1078void 1079cpu_offline_md(void) 1080{ 1081 return; 1082} 1083 1084void 1085cpu_get_tsc_freq(struct cpu_info *ci) 1086{ 1087 uint32_t vcpu_tversion; 1088 const volatile vcpu_time_info_t *tinfo = &ci->ci_vcpu->time; 1089 1090 vcpu_tversion = tinfo->version; 1091 while (tinfo->version == vcpu_tversion); /* Wait for a time update. XXX: timeout ? */ 1092 1093 uint64_t freq = 1000000000ULL << 32; 1094 freq = freq / (uint64_t)tinfo->tsc_to_system_mul; 1095 if (tinfo->tsc_shift < 0) 1096 freq = freq << -tinfo->tsc_shift; 1097 else 1098 freq = freq >> tinfo->tsc_shift; 1099 ci->ci_data.cpu_cc_freq = freq; 1100} 1101 1102/* 1103 * Loads pmap for the current CPU. 1104 */ 1105void 1106cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap) 1107{ 1108 struct cpu_info *ci = curcpu(); 1109 cpuid_t cid = cpu_index(ci); 1110 int i; 1111 1112 KASSERT(kpreempt_disabled()); 1113 KASSERT(pmap != pmap_kernel()); 1114 1115 mutex_enter(&ci->ci_kpm_mtx); 1116 /* make new pmap visible to xen_kpm_sync() */ 1117 kcpuset_atomic_set(pmap->pm_xen_ptp_cpus, cid); 1118 1119#ifdef __x86_64__ 1120 pd_entry_t *new_pgd; 1121 paddr_t l4_pd_ma; 1122 1123 l4_pd_ma = xpmap_ptom_masked(ci->ci_kpm_pdirpa); 1124 1125 /* 1126 * Map user space address in kernel space and load 1127 * user cr3 1128 */ 1129 new_pgd = pmap->pm_pdir; 1130 KASSERT(pmap == ci->ci_pmap); 1131 1132 /* Copy user pmap L4 PDEs (in user addr. range) to per-cpu L4 */ 1133 for (i = 0; i < PDIR_SLOT_USERLIM; i++) { 1134 KASSERT(pmap != pmap_kernel() || new_pgd[i] == 0); 1135 if (ci->ci_kpm_pdir[i] != new_pgd[i]) { 1136 xpq_queue_pte_update(l4_pd_ma + i * sizeof(pd_entry_t), 1137 new_pgd[i]); 1138 } 1139 } 1140 1141 xen_set_user_pgd(pmap_pdirpa(pmap, 0)); 1142 ci->ci_xen_current_user_pgd = pmap_pdirpa(pmap, 0); 1143#else 1144 paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa); 1145 /* don't update the kernel L3 slot */ 1146 for (i = 0; i < PDP_SIZE - 1; i++) { 1147 xpq_queue_pte_update(l3_pd + i * sizeof(pd_entry_t), 1148 xpmap_ptom(pmap->pm_pdirpa[i]) | PTE_P); 1149 } 1150#endif 1151 1152 tlbflush(); 1153 1154 /* old pmap no longer visible to xen_kpm_sync() */ 1155 if (oldpmap != pmap_kernel()) { 1156 kcpuset_atomic_clear(oldpmap->pm_xen_ptp_cpus, cid); 1157 } 1158 mutex_exit(&ci->ci_kpm_mtx); 1159} 1160 1161/* 1162 * pmap_cpu_init_late: perform late per-CPU initialization. 1163 * 1164 * Short note about percpu PDIR pages. Both the PAE and __x86_64__ architectures 1165 * have per-cpu PDIR tables, for two different reasons: 1166 * - on PAE, this is to get around Xen's pagetable setup constraints (multiple 1167 * L3[3]s cannot point to the same L2 - Xen will refuse to pin a table set up 1168 * this way). 1169 * - on __x86_64__, this is for multiple CPUs to map in different user pmaps 1170 * (see cpu_load_pmap()). 1171 * 1172 * What this means for us is that the PDIR of the pmap_kernel() is considered 1173 * to be a canonical "SHADOW" PDIR with the following properties: 1174 * - its recursive mapping points to itself 1175 * - per-cpu recursive mappings point to themselves on __x86_64__ 1176 * - per-cpu L4 pages' kernel entries are expected to be in sync with 1177 * the shadow 1178 */ 1179 1180void 1181pmap_cpu_init_late(struct cpu_info *ci) 1182{ 1183 int i; 1184 1185 /* 1186 * The BP has already its own PD page allocated during early 1187 * MD startup. 1188 */ 1189 1190#ifdef __x86_64__ 1191 /* Setup per-cpu normal_pdes */ 1192 extern pd_entry_t * const normal_pdes[]; 1193 for (i = 0;i < PTP_LEVELS - 1;i++) { 1194 ci->ci_normal_pdes[i] = normal_pdes[i]; 1195 } 1196#endif 1197 1198 if (ci == &cpu_info_primary) 1199 return; 1200 1201 KASSERT(ci != NULL); 1202 1203#if defined(i386) 1204 cpu_alloc_l3_page(ci); 1205 KASSERT(ci->ci_pae_l3_pdirpa != 0); 1206 1207 /* Initialise L2 entries 0 - 2: Point them to pmap_kernel() */ 1208 for (i = 0; i < PDP_SIZE - 1; i++) { 1209 ci->ci_pae_l3_pdir[i] = 1210 xpmap_ptom_masked(pmap_kernel()->pm_pdirpa[i]) | PTE_P; 1211 } 1212#endif 1213 1214 ci->ci_kpm_pdir = (pd_entry_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 1215 UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_NOWAIT); 1216 1217 if (ci->ci_kpm_pdir == NULL) { 1218 panic("%s: failed to allocate L4 per-cpu PD for CPU %d\n", 1219 __func__, cpu_index(ci)); 1220 } 1221 ci->ci_kpm_pdirpa = vtophys((vaddr_t)ci->ci_kpm_pdir); 1222 KASSERT(ci->ci_kpm_pdirpa != 0); 1223 1224#ifdef __x86_64__ 1225 extern pt_entry_t xpmap_pg_nx; 1226 1227 /* Copy over the pmap_kernel() shadow L4 entries */ 1228 memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir, PAGE_SIZE); 1229 1230 /* Recursive kernel mapping */ 1231 ci->ci_kpm_pdir[PDIR_SLOT_PTE] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) 1232 | PTE_P | xpmap_pg_nx; 1233#else 1234 /* Copy over the pmap_kernel() shadow L2 entries */ 1235 memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir + PDIR_SLOT_KERN, 1236 nkptp[PTP_LEVELS - 1] * sizeof(pd_entry_t)); 1237#endif 1238 1239 /* Xen wants a RO pdir. */ 1240 pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_kpm_pdir, 1241 (vaddr_t)ci->ci_kpm_pdir + PAGE_SIZE, VM_PROT_READ); 1242 pmap_update(pmap_kernel()); 1243 1244#ifdef __x86_64__ 1245 xpq_queue_pin_l4_table(xpmap_ptom_masked(ci->ci_kpm_pdirpa)); 1246#else 1247 /* 1248 * Initialize L3 entry 3. This mapping is shared across all pmaps and is 1249 * static, ie: loading a new pmap will not update this entry. 1250 */ 1251 ci->ci_pae_l3_pdir[3] = xpmap_ptom_masked(ci->ci_kpm_pdirpa) | PTE_P; 1252 1253 /* Xen wants a RO L3. */ 1254 pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_pae_l3_pdir, 1255 (vaddr_t)ci->ci_pae_l3_pdir + PAGE_SIZE, VM_PROT_READ); 1256 pmap_update(pmap_kernel()); 1257 1258 xpq_queue_pin_l3_table(xpmap_ptom_masked(ci->ci_pae_l3_pdirpa)); 1259#endif 1260} 1261 1262/* 1263 * Notify all other cpus to halt. 1264 */ 1265 1266void 1267cpu_broadcast_halt(void) 1268{ 1269 xen_broadcast_ipi(XEN_IPI_HALT); 1270} 1271 1272/* 1273 * Send a dummy ipi to a cpu, and raise an AST on the running LWP. 1274 */ 1275 1276void 1277cpu_kick(struct cpu_info *ci) 1278{ 1279 (void)xen_send_ipi(ci, XEN_IPI_AST); 1280} 1281