1/* 2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 57#include <platforms.h> 58 59#include <mach/i386/vm_param.h> 60 61#include <string.h> 62#include <mach/vm_param.h> 63#include <mach/vm_prot.h> 64#include <mach/machine.h> 65#include <mach/time_value.h> 66#include <kern/spl.h> 67#include <kern/assert.h> 68#include <kern/debug.h> 69#include <kern/misc_protos.h> 70#include <kern/startup.h> 71#include <kern/clock.h> 72#include <kern/pms.h> 73#include <kern/xpr.h> 74#include <kern/cpu_data.h> 75#include <kern/processor.h> 76#include <console/serial_protos.h> 77#include <vm/vm_page.h> 78#include <vm/pmap.h> 79#include <vm/vm_kern.h> 80#include <machine/pal_routines.h> 81#include <i386/fpu.h> 82#include <i386/pmap.h> 83#include <i386/misc_protos.h> 84#include <i386/cpu_threads.h> 85#include <i386/cpuid.h> 86#include <i386/lapic.h> 87#include <i386/mp.h> 88#include <i386/mp_desc.h> 89#if CONFIG_MTRR 90#include <i386/mtrr.h> 91#endif 92#include <i386/machine_routines.h> 93#if CONFIG_MCA 94#include <i386/machine_check.h> 95#endif 96#include <i386/ucode.h> 97#include <i386/postcode.h> 98#include <i386/Diagnostics.h> 99#include <i386/pmCPU.h> 100#include <i386/tsc.h> 101#include <i386/locks.h> /* LcksOpts */ 102#ifdef __i386__ 103#include <i386/cpu_capabilities.h> 104#endif 105#if DEBUG 106#include <machine/pal_routines.h> 107#endif 108 109#if DEBUG 110#define DBG(x...) kprintf(x) 111#else 112#define DBG(x...) 113#endif 114 115int debug_task; 116 117static boot_args *kernelBootArgs; 118 119extern int disableConsoleOutput; 120extern const char version[]; 121extern const char version_variant[]; 122extern int nx_enabled; 123 124uint64_t physmap_base, physmap_max; 125 126pd_entry_t *KPTphys; 127pd_entry_t *IdlePTD; 128#ifdef __i386__ 129pd_entry_t *IdlePDPT64; 130#else 131pdpt_entry_t *IdlePDPT; 132pml4_entry_t *IdlePML4; 133#endif 134 135char *physfree; 136 137/* 138 * Note: ALLOCPAGES() can only be used safely within Idle_PTs_init() 139 * due to the mutation of physfree. 140 */ 141static void * 142ALLOCPAGES(int npages) 143{ 144 uintptr_t tmp = (uintptr_t)physfree; 145 bzero(physfree, npages * PAGE_SIZE); 146 physfree += npages * PAGE_SIZE; 147#ifdef __x86_64__ 148 tmp += VM_MIN_KERNEL_ADDRESS & ~LOW_4GB_MASK; 149#endif 150 return (void *)tmp; 151} 152 153static void 154fillkpt(pt_entry_t *base, int prot, uintptr_t src, int index, int count) 155{ 156 int i; 157 for (i=0; i<count; i++) { 158 base[index] = src | prot | INTEL_PTE_VALID; 159 src += PAGE_SIZE; 160 index++; 161 } 162} 163 164extern pmap_paddr_t first_avail; 165 166#ifdef __x86_64__ 167int break_kprintf = 0; 168 169uint64_t 170x86_64_pre_sleep(void) 171{ 172 IdlePML4[0] = IdlePML4[KERNEL_PML4_INDEX]; 173 uint64_t oldcr3 = get_cr3_raw(); 174 set_cr3_raw((uint32_t) (uintptr_t)ID_MAP_VTOP(IdlePML4)); 175 return oldcr3; 176} 177 178void 179x86_64_post_sleep(uint64_t new_cr3) 180{ 181 IdlePML4[0] = 0; 182 set_cr3_raw((uint32_t) new_cr3); 183} 184 185#endif 186 187#ifdef __i386__ 188#define ID_MAP_VTOP(x) x 189#endif 190 191 192#ifdef __x86_64__ 193// Set up the physical mapping - NPHYSMAP GB of memory mapped at a high address 194// NPHYSMAP is determined by the maximum supported RAM size plus 4GB to account 195// the PCI hole (which is less 4GB but not more). 196 197/* Compile-time guard: NPHYSMAP is capped to 256GiB, accounting for 198 * randomisation 199 */ 200extern int maxphymapsupported[NPHYSMAP <= (PTE_PER_PAGE/2) ? 1 : -1]; 201 202static void 203physmap_init(void) 204{ 205 pt_entry_t *physmapL3 = ALLOCPAGES(1); 206 struct { 207 pt_entry_t entries[PTE_PER_PAGE]; 208 } * physmapL2 = ALLOCPAGES(NPHYSMAP); 209 210 uint64_t i; 211 uint8_t phys_random_L3 = ml_early_random() & 0xFF; 212 213 /* We assume NX support. Mark all levels of the PHYSMAP NX 214 * to avoid granting executability via a single bit flip. 215 */ 216 assert(cpuid_extfeatures() & CPUID_EXTFEATURE_XD); 217 218 for(i = 0; i < NPHYSMAP; i++) { 219 physmapL3[i + phys_random_L3] = 220 ((uintptr_t)ID_MAP_VTOP(&physmapL2[i])) 221 | INTEL_PTE_VALID 222 | INTEL_PTE_NX 223 | INTEL_PTE_WRITE; 224 225 uint64_t j; 226 for(j = 0; j < PTE_PER_PAGE; j++) { 227 physmapL2[i].entries[j] = 228 ((i * PTE_PER_PAGE + j) << PDSHIFT) 229 | INTEL_PTE_PS 230 | INTEL_PTE_VALID 231 | INTEL_PTE_NX 232 | INTEL_PTE_WRITE; 233 } 234 } 235 236 IdlePML4[KERNEL_PHYSMAP_PML4_INDEX] = 237 ((uintptr_t)ID_MAP_VTOP(physmapL3)) 238 | INTEL_PTE_VALID 239 | INTEL_PTE_NX 240 | INTEL_PTE_WRITE; 241 242 physmap_base = KVADDR(KERNEL_PHYSMAP_PML4_INDEX, phys_random_L3, 0, 0); 243 physmap_max = physmap_base + NPHYSMAP * GB; 244 DBG("Physical address map base: 0x%qx\n", physmap_base); 245 DBG("Physical map idlepml4[%d]: 0x%llx\n", 246 KERNEL_PHYSMAP_PML4_INDEX, IdlePML4[KERNEL_PHYSMAP_PML4_INDEX]); 247} 248 249static void 250descriptor_alias_init() 251{ 252 vm_offset_t master_gdt_phys; 253 vm_offset_t master_gdt_alias_phys; 254 vm_offset_t master_idt_phys; 255 vm_offset_t master_idt_alias_phys; 256 257 assert(((vm_offset_t)master_gdt & PAGE_MASK) == 0); 258 assert(((vm_offset_t)master_idt64 & PAGE_MASK) == 0); 259 260 master_gdt_phys = (vm_offset_t) ID_MAP_VTOP(master_gdt); 261 master_idt_phys = (vm_offset_t) ID_MAP_VTOP(master_idt64); 262 master_gdt_alias_phys = (vm_offset_t) ID_MAP_VTOP(MASTER_GDT_ALIAS); 263 master_idt_alias_phys = (vm_offset_t) ID_MAP_VTOP(MASTER_IDT_ALIAS); 264 265 DBG("master_gdt_phys: %p\n", (void *) master_gdt_phys); 266 DBG("master_idt_phys: %p\n", (void *) master_idt_phys); 267 DBG("master_gdt_alias_phys: %p\n", (void *) master_gdt_alias_phys); 268 DBG("master_idt_alias_phys: %p\n", (void *) master_idt_alias_phys); 269 270 KPTphys[atop_kernel(master_gdt_alias_phys)] = master_gdt_phys | 271 INTEL_PTE_VALID | INTEL_PTE_NX | INTEL_PTE_WRITE; 272 KPTphys[atop_kernel(master_idt_alias_phys)] = master_idt_phys | 273 INTEL_PTE_VALID | INTEL_PTE_NX; /* read-only */ 274} 275 276static void 277Idle_PTs_init(void) 278{ 279 /* Allocate the "idle" kernel page tables: */ 280 KPTphys = ALLOCPAGES(NKPT); /* level 1 */ 281 IdlePTD = ALLOCPAGES(NPGPTD); /* level 2 */ 282 IdlePDPT = ALLOCPAGES(1); /* level 3 */ 283 IdlePML4 = ALLOCPAGES(1); /* level 4 */ 284 285 // Fill the lowest level with everything up to physfree 286 fillkpt(KPTphys, 287 INTEL_PTE_WRITE, 0, 0, (int)(((uintptr_t)physfree) >> PAGE_SHIFT)); 288 289 /* IdlePTD */ 290 fillkpt(IdlePTD, 291 INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(KPTphys), 0, NKPT); 292 293 // IdlePDPT entries 294 fillkpt(IdlePDPT, 295 INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePTD), 0, NPGPTD); 296 297 // IdlePML4 single entry for kernel space. 298 fillkpt(IdlePML4 + KERNEL_PML4_INDEX, 299 INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePDPT), 0, 1); 300 301 postcode(VSTART_PHYSMAP_INIT); 302 303 physmap_init(); 304 305 postcode(VSTART_DESC_ALIAS_INIT); 306 307 descriptor_alias_init(); 308 309 postcode(VSTART_SET_CR3); 310 311 // Switch to the page tables.. 312 set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4)); 313 314} 315 316#else /* __x86_64__ */ 317 318static void 319Idle_PTs_init(void) 320{ 321 /* Allocate the "idle" kernel page tables: */ 322 KPTphys = ALLOCPAGES(NKPT); /* level 1 */ 323 IdlePTD = ALLOCPAGES(NPGPTD); /* level 2 */ 324 325 IdlePDPT64 = ALLOCPAGES(1); 326 327 // Recursive mapping of PTEs 328 fillkpt(IdlePTD, INTEL_PTE_WRITE, (uintptr_t)IdlePTD, PTDPTDI, NPGPTD); 329 // commpage 330 fillkpt(IdlePTD, INTEL_PTE_WRITE|INTEL_PTE_USER, (uintptr_t)ALLOCPAGES(1), _COMM_PAGE32_BASE_ADDRESS >> PDESHIFT,1); 331 332 // Fill the lowest level with everything up to physfree 333 fillkpt(KPTphys, 334 INTEL_PTE_WRITE, 0, 0, (int)(((uintptr_t)physfree) >> PAGE_SHIFT)); 335 336 // Rewrite the 2nd-lowest level to point to pages of KPTphys. 337 // This was previously filled statically by idle_pt.c, and thus 338 // must be done after the KPTphys fill since IdlePTD is in use 339 fillkpt(IdlePTD, 340 INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(KPTphys), 0, NKPT); 341 342 // IdlePDPT entries 343 fillkpt(IdlePDPT, 0, (uintptr_t)IdlePTD, 0, NPGPTD); 344 345 postcode(VSTART_SET_CR3); 346 347 // Flush the TLB now we're done rewriting the page tables.. 348 set_cr3_raw(get_cr3_raw()); 349} 350#endif 351 352/* 353 * vstart() is called in the natural mode (64bit for K64, 32 for K32) 354 * on a set of bootstrap pagetables which use large, 2MB pages to map 355 * all of physical memory in both. See idle_pt.c for details. 356 * 357 * In K64 this identity mapping is mirrored the top and bottom 512GB 358 * slots of PML4. 359 * 360 * The bootstrap processor called with argument boot_args_start pointing to 361 * the boot-args block. The kernel's (4K page) page tables are allocated and 362 * initialized before switching to these. 363 * 364 * Non-bootstrap processors are called with argument boot_args_start NULL. 365 * These processors switch immediately to the existing kernel page tables. 366 */ 367void 368vstart(vm_offset_t boot_args_start) 369{ 370 boolean_t is_boot_cpu = !(boot_args_start == 0); 371 int cpu; 372 uint32_t lphysfree; 373 374 postcode(VSTART_ENTRY); 375 376 if (is_boot_cpu) { 377 /* 378 * Get startup parameters. 379 */ 380 kernelBootArgs = (boot_args *)boot_args_start; 381 lphysfree = kernelBootArgs->kaddr + kernelBootArgs->ksize; 382 physfree = (void *)(uintptr_t)((lphysfree + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1)); 383#if DEBUG 384 pal_serial_init(); 385#endif 386 DBG("revision 0x%x\n", kernelBootArgs->Revision); 387 DBG("version 0x%x\n", kernelBootArgs->Version); 388 DBG("command line %s\n", kernelBootArgs->CommandLine); 389 DBG("memory map 0x%x\n", kernelBootArgs->MemoryMap); 390 DBG("memory map sz 0x%x\n", kernelBootArgs->MemoryMapSize); 391 DBG("kaddr 0x%x\n", kernelBootArgs->kaddr); 392 DBG("ksize 0x%x\n", kernelBootArgs->ksize); 393 DBG("physfree %p\n", physfree); 394 DBG("bootargs: %p, &ksize: %p &kaddr: %p\n", 395 kernelBootArgs, 396 &kernelBootArgs->ksize, 397 &kernelBootArgs->kaddr); 398 399 postcode(VSTART_IDLE_PTS_INIT); 400 401 Idle_PTs_init(); 402 403 first_avail = (vm_offset_t)ID_MAP_VTOP(physfree); 404 405 cpu = 0; 406 cpu_data_alloc(TRUE); 407 408 409 /* 410 * Setup boot args given the physical start address. 411 */ 412 kernelBootArgs = (boot_args *) 413 ml_static_ptovirt(boot_args_start); 414 DBG("i386_init(0x%lx) kernelBootArgs=%p\n", 415 (unsigned long)boot_args_start, kernelBootArgs); 416 417 PE_init_platform(FALSE, kernelBootArgs); 418 postcode(PE_INIT_PLATFORM_D); 419 } else { 420#ifdef __x86_64__ 421 /* Switch to kernel's page tables (from the Boot PTs) */ 422 set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4)); 423#endif 424 /* Find our logical cpu number */ 425 cpu = lapic_to_cpu[(LAPIC_READ(ID)>>LAPIC_ID_SHIFT) & LAPIC_ID_MASK]; 426 DBG("CPU: %d, GSBASE initial value: 0x%llx\n", cpu, rdmsr64(MSR_IA32_GS_BASE)); 427 } 428 429 postcode(VSTART_CPU_DESC_INIT); 430#ifdef __x86_64__ 431 if(is_boot_cpu) 432 cpu_desc_init64(cpu_datap(cpu)); 433 cpu_desc_load64(cpu_datap(cpu)); 434#else 435 if(is_boot_cpu) 436 cpu_desc_init(cpu_datap(cpu)); 437 cpu_desc_load(cpu_datap(cpu)); 438#endif 439 postcode(VSTART_CPU_MODE_INIT); 440 if (is_boot_cpu) 441 cpu_mode_init(current_cpu_datap()); /* cpu_mode_init() will be 442 * invoked on the APs 443 * via i386_init_slave() 444 */ 445 postcode(VSTART_EXIT); 446#ifdef __i386__ 447 if (cpuid_extfeatures() & CPUID_EXTFEATURE_XD) { 448 wrmsr64(MSR_IA32_EFER, rdmsr64(MSR_IA32_EFER) | MSR_IA32_EFER_NXE); 449 DBG("vstart() NX/XD enabled, i386\n"); 450 } 451 452 if (is_boot_cpu) 453 i386_init(); 454 else 455 i386_init_slave(); 456 /*NOTREACHED*/ 457#else 458 x86_init_wrapper(is_boot_cpu ? (uintptr_t) i386_init 459 : (uintptr_t) i386_init_slave, 460 cpu_datap(cpu)->cpu_int_stack_top); 461#endif 462} 463 464/* 465 * Cpu initialization. Running virtual, but without MACH VM 466 * set up. 467 */ 468void 469i386_init(void) 470{ 471 unsigned int maxmem; 472 uint64_t maxmemtouse; 473 unsigned int cpus = 0; 474 boolean_t fidn; 475 boolean_t IA32e = TRUE; 476 477 postcode(I386_INIT_ENTRY); 478 479 pal_i386_init(); 480 481#if CONFIG_MCA 482 /* Initialize machine-check handling */ 483 mca_cpu_init(); 484#endif 485 486 487 kernel_early_bootstrap(); 488 489 master_cpu = 0; 490 cpu_init(); 491 492 postcode(CPU_INIT_D); 493 494 printf_init(); /* Init this in case we need debugger */ 495 panic_init(); /* Init this in case we need debugger */ 496 497 /* setup debugging output if one has been chosen */ 498 PE_init_kprintf(FALSE); 499 500 if (!PE_parse_boot_argn("diag", &dgWork.dgFlags, sizeof (dgWork.dgFlags))) 501 dgWork.dgFlags = 0; 502 503 serialmode = 0; 504 if(PE_parse_boot_argn("serial", &serialmode, sizeof (serialmode))) { 505 /* We want a serial keyboard and/or console */ 506 kprintf("Serial mode specified: %08X\n", serialmode); 507 } 508 if(serialmode & 1) { 509 (void)switch_to_serial_console(); 510 disableConsoleOutput = FALSE; /* Allow printfs to happen */ 511 } 512 513 /* setup console output */ 514 PE_init_printf(FALSE); 515 516 kprintf("version_variant = %s\n", version_variant); 517 kprintf("version = %s\n", version); 518 519 if (!PE_parse_boot_argn("maxmem", &maxmem, sizeof (maxmem))) 520 maxmemtouse = 0; 521 else 522 maxmemtouse = ((uint64_t)maxmem) * MB; 523 524 if (PE_parse_boot_argn("cpus", &cpus, sizeof (cpus))) { 525 if ((0 < cpus) && (cpus < max_ncpus)) 526 max_ncpus = cpus; 527 } 528 529 /* 530 * debug support for > 4G systems 531 */ 532 if (!PE_parse_boot_argn("himemory_mode", &vm_himemory_mode, sizeof (vm_himemory_mode))) 533 vm_himemory_mode = 0; 534 535 if (!PE_parse_boot_argn("immediate_NMI", &fidn, sizeof (fidn))) 536 force_immediate_debugger_NMI = FALSE; 537 else 538 force_immediate_debugger_NMI = fidn; 539 540#if DEBUG 541 nanoseconds_to_absolutetime(URGENCY_NOTIFICATION_ASSERT_NS, &urgency_notification_assert_abstime_threshold); 542#endif 543 PE_parse_boot_argn("urgency_notification_abstime", 544 &urgency_notification_assert_abstime_threshold, 545 sizeof(urgency_notification_assert_abstime_threshold)); 546 547#if CONFIG_YONAH 548 /* 549 * At this point we check whether we are a 64-bit processor 550 * and that we're not restricted to legacy mode, 32-bit operation. 551 */ 552 if (cpuid_extfeatures() & CPUID_EXTFEATURE_EM64T) { 553 boolean_t legacy_mode; 554 kprintf("EM64T supported"); 555 if (PE_parse_boot_argn("-legacy", &legacy_mode, sizeof (legacy_mode))) { 556 kprintf(" but legacy mode forced\n"); 557 IA32e = FALSE; 558 } else { 559 kprintf(" and will be enabled\n"); 560 } 561 } else 562 IA32e = FALSE; 563#endif 564 565 if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD)) 566 nx_enabled = 0; 567 568 /* 569 * VM initialization, after this we're using page tables... 570 * The maximum number of cpus must be set beforehand. 571 */ 572 i386_vm_init(maxmemtouse, IA32e, kernelBootArgs); 573 574 /* create the console for verbose or pretty mode */ 575 /* Note: doing this prior to tsc_init() allows for graceful panic! */ 576 PE_init_platform(TRUE, kernelBootArgs); 577 PE_create_console(); 578 579 tsc_init(); 580 power_management_init(); 581 processor_bootstrap(); 582 thread_bootstrap(); 583 584 machine_startup(); 585} 586 587static void 588do_init_slave(boolean_t fast_restart) 589{ 590 void *init_param = FULL_SLAVE_INIT; 591 592 postcode(I386_INIT_SLAVE); 593 594 if (!fast_restart) { 595 /* Ensure that caching and write-through are enabled */ 596 set_cr0(get_cr0() & ~(CR0_NW|CR0_CD)); 597 598 DBG("i386_init_slave() CPU%d: phys (%d) active.\n", 599 get_cpu_number(), get_cpu_phys_number()); 600 601 assert(!ml_get_interrupts_enabled()); 602 603 cpu_mode_init(current_cpu_datap()); 604 pmap_cpu_init(); 605 606#if CONFIG_MCA 607 mca_cpu_init(); 608#endif 609 610 LAPIC_INIT(); 611 lapic_configure(); 612 LAPIC_DUMP(); 613 LAPIC_CPU_MAP_DUMP(); 614 615 init_fpu(); 616 617#if CONFIG_MTRR 618 mtrr_update_cpu(); 619#endif 620 /* update CPU microcode */ 621 ucode_update_wake(); 622 } else 623 init_param = FAST_SLAVE_INIT; 624 625#if CONFIG_VMX 626 /* resume VT operation */ 627 vmx_resume(); 628#endif 629 630#if CONFIG_MTRR 631 if (!fast_restart) 632 pat_init(); 633#endif 634 635 cpu_thread_init(); /* not strictly necessary */ 636 637 cpu_init(); /* Sets cpu_running which starter cpu waits for */ 638 639 slave_main(init_param); 640 641 panic("do_init_slave() returned from slave_main()"); 642} 643 644/* 645 * i386_init_slave() is called from pstart. 646 * We're in the cpu's interrupt stack with interrupts disabled. 647 * At this point we are in legacy mode. We need to switch on IA32e 648 * if the mode is set to 64-bits. 649 */ 650void 651i386_init_slave(void) 652{ 653 do_init_slave(FALSE); 654} 655 656/* 657 * i386_init_slave_fast() is called from pmCPUHalt. 658 * We're running on the idle thread and need to fix up 659 * some accounting and get it so that the scheduler sees this 660 * CPU again. 661 */ 662void 663i386_init_slave_fast(void) 664{ 665 do_init_slave(TRUE); 666} 667 668 669