1/* 2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29#include <i386/machine_routines.h> 30#include <i386/io_map_entries.h> 31#include <i386/cpuid.h> 32#include <i386/fpu.h> 33#include <mach/processor.h> 34#include <kern/processor.h> 35#include <kern/machine.h> 36#include <kern/cpu_data.h> 37#include <kern/cpu_number.h> 38#include <kern/thread.h> 39#include <kern/thread_call.h> 40#include <prng/random.h> 41#include <i386/machine_cpu.h> 42#include <i386/lapic.h> 43#include <i386/bit_routines.h> 44#include <i386/mp_events.h> 45#include <i386/pmCPU.h> 46#include <i386/trap.h> 47#include <i386/tsc.h> 48#include <i386/cpu_threads.h> 49#include <i386/proc_reg.h> 50#include <mach/vm_param.h> 51#include <i386/pmap.h> 52#include <i386/pmap_internal.h> 53#include <i386/misc_protos.h> 54#include <kern/timer_queue.h> 55#if KPC 56#include <kern/kpc.h> 57#endif 58#include <architecture/i386/pio.h> 59 60#if DEBUG 61#define DBG(x...) kprintf("DBG: " x) 62#else 63#define DBG(x...) 64#endif 65 66extern void wakeup(void *); 67 68static int max_cpus_initialized = 0; 69 70unsigned int LockTimeOut; 71unsigned int TLBTimeOut; 72unsigned int LockTimeOutTSC; 73unsigned int MutexSpin; 74uint64_t LastDebuggerEntryAllowance; 75uint64_t delay_spin_threshold; 76 77extern uint64_t panic_restart_timeout; 78 79boolean_t virtualized = FALSE; 80 81decl_simple_lock_data(static, ml_timer_evaluation_slock); 82uint32_t ml_timer_eager_evaluations; 83uint64_t ml_timer_eager_evaluation_max; 84static boolean_t ml_timer_evaluation_in_progress = FALSE; 85 86 87#define MAX_CPUS_SET 0x1 88#define MAX_CPUS_WAIT 0x2 89 90/* IO memory map services */ 91 92/* Map memory map IO space */ 93vm_offset_t ml_io_map( 94 vm_offset_t phys_addr, 95 vm_size_t size) 96{ 97 return(io_map(phys_addr,size,VM_WIMG_IO)); 98} 99 100/* boot memory allocation */ 101vm_offset_t ml_static_malloc( 102 __unused vm_size_t size) 103{ 104 return((vm_offset_t)NULL); 105} 106 107 108void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size) 109{ 110 *phys_addr = 0; 111 *size = 0; 112} 113 114 115vm_offset_t 116ml_static_ptovirt( 117 vm_offset_t paddr) 118{ 119#if defined(__x86_64__) 120 return (vm_offset_t)(((unsigned long) paddr) | VM_MIN_KERNEL_ADDRESS); 121#else 122 return (vm_offset_t)((paddr) | LINEAR_KERNEL_ADDRESS); 123#endif 124} 125 126 127/* 128 * Routine: ml_static_mfree 129 * Function: 130 */ 131void 132ml_static_mfree( 133 vm_offset_t vaddr, 134 vm_size_t size) 135{ 136 addr64_t vaddr_cur; 137 ppnum_t ppn; 138 uint32_t freed_pages = 0; 139 assert(vaddr >= VM_MIN_KERNEL_ADDRESS); 140 141 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */ 142 143 for (vaddr_cur = vaddr; 144 vaddr_cur < round_page_64(vaddr+size); 145 vaddr_cur += PAGE_SIZE) { 146 ppn = pmap_find_phys(kernel_pmap, vaddr_cur); 147 if (ppn != (vm_offset_t)NULL) { 148 kernel_pmap->stats.resident_count++; 149 if (kernel_pmap->stats.resident_count > 150 kernel_pmap->stats.resident_max) { 151 kernel_pmap->stats.resident_max = 152 kernel_pmap->stats.resident_count; 153 } 154 pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE); 155 assert(pmap_valid_page(ppn)); 156 157 if (IS_MANAGED_PAGE(ppn)) { 158 vm_page_create(ppn,(ppn+1)); 159 vm_page_wire_count--; 160 freed_pages++; 161 } 162 } 163 } 164#if DEBUG 165 kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); 166#endif 167} 168 169 170/* virtual to physical on wired pages */ 171vm_offset_t ml_vtophys( 172 vm_offset_t vaddr) 173{ 174 return (vm_offset_t)kvtophys(vaddr); 175} 176 177/* 178 * Routine: ml_nofault_copy 179 * Function: Perform a physical mode copy if the source and 180 * destination have valid translations in the kernel pmap. 181 * If translations are present, they are assumed to 182 * be wired; i.e. no attempt is made to guarantee that the 183 * translations obtained remained valid for 184 * the duration of the copy process. 185 */ 186 187vm_size_t ml_nofault_copy( 188 vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) 189{ 190 addr64_t cur_phys_dst, cur_phys_src; 191 uint32_t count, nbytes = 0; 192 193 while (size > 0) { 194 if (!(cur_phys_src = kvtophys(virtsrc))) 195 break; 196 if (!(cur_phys_dst = kvtophys(virtdst))) 197 break; 198 if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) 199 break; 200 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); 201 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) 202 count = (uint32_t)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK)); 203 if (count > size) 204 count = (uint32_t)size; 205 206 bcopy_phys(cur_phys_src, cur_phys_dst, count); 207 208 nbytes += count; 209 virtsrc += count; 210 virtdst += count; 211 size -= count; 212 } 213 214 return nbytes; 215} 216 217/* 218 * Routine: ml_validate_nofault 219 * Function: Validate that ths address range has a valid translations 220 * in the kernel pmap. If translations are present, they are 221 * assumed to be wired; i.e. no attempt is made to guarantee 222 * that the translation persist after the check. 223 * Returns: TRUE if the range is mapped and will not cause a fault, 224 * FALSE otherwise. 225 */ 226 227boolean_t ml_validate_nofault( 228 vm_offset_t virtsrc, vm_size_t size) 229{ 230 addr64_t cur_phys_src; 231 uint32_t count; 232 233 while (size > 0) { 234 if (!(cur_phys_src = kvtophys(virtsrc))) 235 return FALSE; 236 if (!pmap_valid_page(i386_btop(cur_phys_src))) 237 return FALSE; 238 count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); 239 if (count > size) 240 count = (uint32_t)size; 241 242 virtsrc += count; 243 size -= count; 244 } 245 246 return TRUE; 247} 248 249/* Interrupt handling */ 250 251/* Initialize Interrupts */ 252void ml_init_interrupt(void) 253{ 254 (void) ml_set_interrupts_enabled(TRUE); 255} 256 257 258/* Get Interrupts Enabled */ 259boolean_t ml_get_interrupts_enabled(void) 260{ 261 unsigned long flags; 262 263 __asm__ volatile("pushf; pop %0" : "=r" (flags)); 264 return (flags & EFL_IF) != 0; 265} 266 267/* Set Interrupts Enabled */ 268boolean_t ml_set_interrupts_enabled(boolean_t enable) 269{ 270 unsigned long flags; 271 boolean_t istate; 272 273 __asm__ volatile("pushf; pop %0" : "=r" (flags)); 274 275 assert(get_interrupt_level() ? (enable == FALSE) : TRUE); 276 277 istate = ((flags & EFL_IF) != 0); 278 279 if (enable) { 280 __asm__ volatile("sti;nop"); 281 282 if ((get_preemption_level() == 0) && (*ast_pending() & AST_URGENT)) 283 __asm__ volatile ("int %0" :: "N" (T_PREEMPT)); 284 } 285 else { 286 if (istate) 287 __asm__ volatile("cli"); 288 } 289 290 return istate; 291} 292 293/* Check if running at interrupt context */ 294boolean_t ml_at_interrupt_context(void) 295{ 296 return get_interrupt_level() != 0; 297} 298 299void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { 300 *icp = (get_interrupt_level() != 0); 301 /* These will be technically inaccurate for interrupts that occur 302 * successively within a single "idle exit" event, but shouldn't 303 * matter statistically. 304 */ 305 *pidlep = (current_cpu_datap()->lcpu.package->num_idle == topoParms.nLThreadsPerPackage); 306} 307 308/* Generate a fake interrupt */ 309void ml_cause_interrupt(void) 310{ 311 panic("ml_cause_interrupt not defined yet on Intel"); 312} 313 314/* 315 * TODO: transition users of this to kernel_thread_start_priority 316 * ml_thread_policy is an unsupported KPI 317 */ 318void ml_thread_policy( 319 thread_t thread, 320__unused unsigned policy_id, 321 unsigned policy_info) 322{ 323 if (policy_info & MACHINE_NETWORK_WORKLOOP) { 324 thread_precedence_policy_data_t info; 325 __assert_only kern_return_t kret; 326 327 info.importance = 1; 328 329 kret = thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY, 330 (thread_policy_t)&info, 331 THREAD_PRECEDENCE_POLICY_COUNT); 332 assert(kret == KERN_SUCCESS); 333 } 334} 335 336/* Initialize Interrupts */ 337void ml_install_interrupt_handler( 338 void *nub, 339 int source, 340 void *target, 341 IOInterruptHandler handler, 342 void *refCon) 343{ 344 boolean_t current_state; 345 346 current_state = ml_get_interrupts_enabled(); 347 348 PE_install_interrupt_handler(nub, source, target, 349 (IOInterruptHandler) handler, refCon); 350 351 (void) ml_set_interrupts_enabled(current_state); 352 353 initialize_screen(NULL, kPEAcquireScreen); 354} 355 356 357void 358machine_signal_idle( 359 processor_t processor) 360{ 361 cpu_interrupt(processor->cpu_id); 362} 363 364static kern_return_t 365register_cpu( 366 uint32_t lapic_id, 367 processor_t *processor_out, 368 boolean_t boot_cpu ) 369{ 370 int target_cpu; 371 cpu_data_t *this_cpu_datap; 372 373 this_cpu_datap = cpu_data_alloc(boot_cpu); 374 if (this_cpu_datap == NULL) { 375 return KERN_FAILURE; 376 } 377 target_cpu = this_cpu_datap->cpu_number; 378 assert((boot_cpu && (target_cpu == 0)) || 379 (!boot_cpu && (target_cpu != 0))); 380 381 lapic_cpu_map(lapic_id, target_cpu); 382 383 /* The cpu_id is not known at registration phase. Just do 384 * lapic_id for now 385 */ 386 this_cpu_datap->cpu_phys_number = lapic_id; 387 388 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu); 389 if (this_cpu_datap->cpu_console_buf == NULL) 390 goto failed; 391 392 this_cpu_datap->cpu_chud = chudxnu_cpu_alloc(boot_cpu); 393 if (this_cpu_datap->cpu_chud == NULL) 394 goto failed; 395 396#if KPC 397 this_cpu_datap->cpu_kpc_buf[0] = kpc_counterbuf_alloc(); 398 if(this_cpu_datap->cpu_kpc_buf[0] == NULL ) 399 goto failed; 400 this_cpu_datap->cpu_kpc_buf[1] = kpc_counterbuf_alloc(); 401 if(this_cpu_datap->cpu_kpc_buf[1] == NULL ) 402 goto failed; 403 404 this_cpu_datap->cpu_kpc_shadow = kpc_counterbuf_alloc(); 405 if(this_cpu_datap->cpu_kpc_shadow == NULL ) 406 goto failed; 407 408 this_cpu_datap->cpu_kpc_reload = kpc_counterbuf_alloc(); 409 if(this_cpu_datap->cpu_kpc_reload == NULL ) 410 goto failed; 411#endif 412 413 if (!boot_cpu) { 414 cpu_thread_alloc(this_cpu_datap->cpu_number); 415 if (this_cpu_datap->lcpu.core == NULL) 416 goto failed; 417 418#if NCOPY_WINDOWS > 0 419 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu); 420 if (this_cpu_datap->cpu_pmap == NULL) 421 goto failed; 422#endif 423 424 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu); 425 if (this_cpu_datap->cpu_processor == NULL) 426 goto failed; 427 /* 428 * processor_init() deferred to topology start 429 * because "slot numbers" a.k.a. logical processor numbers 430 * are not yet finalized. 431 */ 432 } 433 434 *processor_out = this_cpu_datap->cpu_processor; 435 436 return KERN_SUCCESS; 437 438failed: 439 cpu_processor_free(this_cpu_datap->cpu_processor); 440#if NCOPY_WINDOWS > 0 441 pmap_cpu_free(this_cpu_datap->cpu_pmap); 442#endif 443 chudxnu_cpu_free(this_cpu_datap->cpu_chud); 444 console_cpu_free(this_cpu_datap->cpu_console_buf); 445#if KPC 446 kpc_counterbuf_free(this_cpu_datap->cpu_kpc_buf[0]); 447 kpc_counterbuf_free(this_cpu_datap->cpu_kpc_buf[1]); 448 kpc_counterbuf_free(this_cpu_datap->cpu_kpc_shadow); 449 kpc_counterbuf_free(this_cpu_datap->cpu_kpc_reload); 450#endif 451 452 return KERN_FAILURE; 453} 454 455 456kern_return_t 457ml_processor_register( 458 cpu_id_t cpu_id, 459 uint32_t lapic_id, 460 processor_t *processor_out, 461 boolean_t boot_cpu, 462 boolean_t start ) 463{ 464 static boolean_t done_topo_sort = FALSE; 465 static uint32_t num_registered = 0; 466 467 /* Register all CPUs first, and track max */ 468 if( start == FALSE ) 469 { 470 num_registered++; 471 472 DBG( "registering CPU lapic id %d\n", lapic_id ); 473 474 return register_cpu( lapic_id, processor_out, boot_cpu ); 475 } 476 477 /* Sort by topology before we start anything */ 478 if( !done_topo_sort ) 479 { 480 DBG( "about to start CPUs. %d registered\n", num_registered ); 481 482 cpu_topology_sort( num_registered ); 483 done_topo_sort = TRUE; 484 } 485 486 /* Assign the cpu ID */ 487 uint32_t cpunum = -1; 488 cpu_data_t *this_cpu_datap = NULL; 489 490 /* find cpu num and pointer */ 491 cpunum = ml_get_cpuid( lapic_id ); 492 493 if( cpunum == 0xFFFFFFFF ) /* never heard of it? */ 494 panic( "trying to start invalid/unregistered CPU %d\n", lapic_id ); 495 496 this_cpu_datap = cpu_datap(cpunum); 497 498 /* fix the CPU id */ 499 this_cpu_datap->cpu_id = cpu_id; 500 501 /* allocate and initialize other per-cpu structures */ 502 if (!boot_cpu) { 503 mp_cpus_call_cpu_init(cpunum); 504 prng_cpu_init(cpunum); 505 } 506 507 /* output arg */ 508 *processor_out = this_cpu_datap->cpu_processor; 509 510 /* OK, try and start this CPU */ 511 return cpu_topology_start_cpu( cpunum ); 512} 513 514 515void 516ml_cpu_get_info(ml_cpu_info_t *cpu_infop) 517{ 518 boolean_t os_supports_sse; 519 i386_cpu_info_t *cpuid_infop; 520 521 if (cpu_infop == NULL) 522 return; 523 524 /* 525 * Are we supporting MMX/SSE/SSE2/SSE3? 526 * As distinct from whether the cpu has these capabilities. 527 */ 528 os_supports_sse = !!(get_cr4() & CR4_OSXMM); 529 530 if (ml_fpu_avx_enabled()) 531 cpu_infop->vector_unit = 9; 532 else if ((cpuid_features() & CPUID_FEATURE_SSE4_2) && os_supports_sse) 533 cpu_infop->vector_unit = 8; 534 else if ((cpuid_features() & CPUID_FEATURE_SSE4_1) && os_supports_sse) 535 cpu_infop->vector_unit = 7; 536 else if ((cpuid_features() & CPUID_FEATURE_SSSE3) && os_supports_sse) 537 cpu_infop->vector_unit = 6; 538 else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse) 539 cpu_infop->vector_unit = 5; 540 else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse) 541 cpu_infop->vector_unit = 4; 542 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse) 543 cpu_infop->vector_unit = 3; 544 else if (cpuid_features() & CPUID_FEATURE_MMX) 545 cpu_infop->vector_unit = 2; 546 else 547 cpu_infop->vector_unit = 0; 548 549 cpuid_infop = cpuid_info(); 550 551 cpu_infop->cache_line_size = cpuid_infop->cache_linesize; 552 553 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I]; 554 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D]; 555 556 if (cpuid_infop->cache_size[L2U] > 0) { 557 cpu_infop->l2_settings = 1; 558 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U]; 559 } else { 560 cpu_infop->l2_settings = 0; 561 cpu_infop->l2_cache_size = 0xFFFFFFFF; 562 } 563 564 if (cpuid_infop->cache_size[L3U] > 0) { 565 cpu_infop->l3_settings = 1; 566 cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U]; 567 } else { 568 cpu_infop->l3_settings = 0; 569 cpu_infop->l3_cache_size = 0xFFFFFFFF; 570 } 571} 572 573void 574ml_init_max_cpus(unsigned long max_cpus) 575{ 576 boolean_t current_state; 577 578 current_state = ml_set_interrupts_enabled(FALSE); 579 if (max_cpus_initialized != MAX_CPUS_SET) { 580 if (max_cpus > 0 && max_cpus <= MAX_CPUS) { 581 /* 582 * Note: max_cpus is the number of enabled processors 583 * that ACPI found; max_ncpus is the maximum number 584 * that the kernel supports or that the "cpus=" 585 * boot-arg has set. Here we take int minimum. 586 */ 587 machine_info.max_cpus = (integer_t)MIN(max_cpus, max_ncpus); 588 } 589 if (max_cpus_initialized == MAX_CPUS_WAIT) 590 wakeup((event_t)&max_cpus_initialized); 591 max_cpus_initialized = MAX_CPUS_SET; 592 } 593 (void) ml_set_interrupts_enabled(current_state); 594} 595 596int 597ml_get_max_cpus(void) 598{ 599 boolean_t current_state; 600 601 current_state = ml_set_interrupts_enabled(FALSE); 602 if (max_cpus_initialized != MAX_CPUS_SET) { 603 max_cpus_initialized = MAX_CPUS_WAIT; 604 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT); 605 (void)thread_block(THREAD_CONTINUE_NULL); 606 } 607 (void) ml_set_interrupts_enabled(current_state); 608 return(machine_info.max_cpus); 609} 610 611/* 612 * Routine: ml_init_lock_timeout 613 * Function: 614 */ 615void 616ml_init_lock_timeout(void) 617{ 618 uint64_t abstime; 619 uint32_t mtxspin; 620#if DEVELOPMENT || DEBUG 621 uint64_t default_timeout_ns = NSEC_PER_SEC>>2; 622#else 623 uint64_t default_timeout_ns = NSEC_PER_SEC>>1; 624#endif 625 uint32_t slto; 626 uint32_t prt; 627 628 if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto))) 629 default_timeout_ns = slto * NSEC_PER_USEC; 630 631 /* LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks */ 632 nanoseconds_to_absolutetime(default_timeout_ns, &abstime); 633 LockTimeOut = (uint32_t) abstime; 634 LockTimeOutTSC = (uint32_t) tmrCvt(abstime, tscFCvtn2t); 635 636 /* 637 * TLBTimeOut dictates the TLB flush timeout period. It defaults to 638 * LockTimeOut but can be overriden separately. In particular, a 639 * zero value inhibits the timeout-panic and cuts a trace evnt instead 640 * - see pmap_flush_tlbs(). 641 */ 642 if (PE_parse_boot_argn("tlbto_us", &slto, sizeof (slto))) { 643 default_timeout_ns = slto * NSEC_PER_USEC; 644 nanoseconds_to_absolutetime(default_timeout_ns, &abstime); 645 TLBTimeOut = (uint32_t) abstime; 646 } else { 647 TLBTimeOut = LockTimeOut; 648 } 649 650 if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) { 651 if (mtxspin > USEC_PER_SEC>>4) 652 mtxspin = USEC_PER_SEC>>4; 653 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime); 654 } else { 655 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime); 656 } 657 MutexSpin = (unsigned int)abstime; 658 659 nanoseconds_to_absolutetime(4ULL * NSEC_PER_SEC, &LastDebuggerEntryAllowance); 660 if (PE_parse_boot_argn("panic_restart_timeout", &prt, sizeof (prt))) 661 nanoseconds_to_absolutetime(prt * NSEC_PER_SEC, &panic_restart_timeout); 662 virtualized = ((cpuid_features() & CPUID_FEATURE_VMM) != 0); 663 interrupt_latency_tracker_setup(); 664 simple_lock_init(&ml_timer_evaluation_slock, 0); 665} 666 667/* 668 * Threshold above which we should attempt to block 669 * instead of spinning for clock_delay_until(). 670 */ 671 672void 673ml_init_delay_spin_threshold(int threshold_us) 674{ 675 nanoseconds_to_absolutetime(threshold_us * NSEC_PER_USEC, &delay_spin_threshold); 676} 677 678boolean_t 679ml_delay_should_spin(uint64_t interval) 680{ 681 return (interval < delay_spin_threshold) ? TRUE : FALSE; 682} 683 684/* 685 * This is called from the machine-independent layer 686 * to perform machine-dependent info updates. Defer to cpu_thread_init(). 687 */ 688void 689ml_cpu_up(void) 690{ 691 return; 692} 693 694/* 695 * This is called from the machine-independent layer 696 * to perform machine-dependent info updates. 697 */ 698void 699ml_cpu_down(void) 700{ 701 i386_deactivate_cpu(); 702 703 return; 704} 705 706/* 707 * The following are required for parts of the kernel 708 * that cannot resolve these functions as inlines: 709 */ 710extern thread_t current_act(void); 711thread_t 712current_act(void) 713{ 714 return(current_thread_fast()); 715} 716 717#undef current_thread 718extern thread_t current_thread(void); 719thread_t 720current_thread(void) 721{ 722 return(current_thread_fast()); 723} 724 725 726boolean_t ml_is64bit(void) { 727 728 return (cpu_mode_is64bit()); 729} 730 731 732boolean_t ml_thread_is64bit(thread_t thread) { 733 734 return (thread_is_64bit(thread)); 735} 736 737 738boolean_t ml_state_is64bit(void *saved_state) { 739 740 return is_saved_state64(saved_state); 741} 742 743void ml_cpu_set_ldt(int selector) 744{ 745 /* 746 * Avoid loading the LDT 747 * if we're setting the KERNEL LDT and it's already set. 748 */ 749 if (selector == KERNEL_LDT && 750 current_cpu_datap()->cpu_ldt == KERNEL_LDT) 751 return; 752 753 lldt(selector); 754 current_cpu_datap()->cpu_ldt = selector; 755} 756 757void ml_fp_setvalid(boolean_t value) 758{ 759 fp_setvalid(value); 760} 761 762uint64_t ml_cpu_int_event_time(void) 763{ 764 return current_cpu_datap()->cpu_int_event_time; 765} 766 767vm_offset_t ml_stack_remaining(void) 768{ 769 uintptr_t local = (uintptr_t) &local; 770 771 if (ml_at_interrupt_context() != 0) { 772 return (local - (current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE)); 773 } else { 774 return (local - current_thread()->kernel_stack); 775 } 776} 777 778void 779kernel_preempt_check(void) 780{ 781 boolean_t intr; 782 unsigned long flags; 783 784 assert(get_preemption_level() == 0); 785 786 __asm__ volatile("pushf; pop %0" : "=r" (flags)); 787 788 intr = ((flags & EFL_IF) != 0); 789 790 if ((*ast_pending() & AST_URGENT) && intr == TRUE) { 791 /* 792 * can handle interrupts and preemptions 793 * at this point 794 */ 795 796 /* 797 * now cause the PRE-EMPTION trap 798 */ 799 __asm__ volatile ("int %0" :: "N" (T_PREEMPT)); 800 } 801} 802 803boolean_t machine_timeout_suspended(void) { 804 return (virtualized || pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity() || ml_recent_wake()); 805} 806 807/* Eagerly evaluate all pending timer and thread callouts 808 */ 809void ml_timer_evaluate(void) { 810 KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_START, 0, 0, 0, 0, 0); 811 812 uint64_t te_end, te_start = mach_absolute_time(); 813 simple_lock(&ml_timer_evaluation_slock); 814 ml_timer_evaluation_in_progress = TRUE; 815 thread_call_delayed_timer_rescan_all(); 816 mp_cpus_call(CPUMASK_ALL, ASYNC, timer_queue_expire_rescan, NULL); 817 ml_timer_evaluation_in_progress = FALSE; 818 ml_timer_eager_evaluations++; 819 te_end = mach_absolute_time(); 820 ml_timer_eager_evaluation_max = MAX(ml_timer_eager_evaluation_max, (te_end - te_start)); 821 simple_unlock(&ml_timer_evaluation_slock); 822 823 KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_END, 0, 0, 0, 0, 0); 824} 825 826boolean_t 827ml_timer_forced_evaluation(void) { 828 return ml_timer_evaluation_in_progress; 829} 830 831/* 32-bit right-rotate n bits */ 832static inline uint32_t ror32(uint32_t val, const unsigned int n) 833{ 834 __asm__ volatile("rorl %%cl,%0" : "=r" (val) : "0" (val), "c" (n)); 835 return val; 836} 837 838void 839ml_entropy_collect(void) 840{ 841 uint32_t tsc_lo, tsc_hi; 842 uint32_t *ep; 843 844 assert(cpu_number() == master_cpu); 845 846 /* update buffer pointer cyclically */ 847 if (EntropyData.index_ptr - EntropyData.buffer == ENTROPY_BUFFER_SIZE) 848 ep = EntropyData.index_ptr = EntropyData.buffer; 849 else 850 ep = EntropyData.index_ptr++; 851 852 rdtsc_nofence(tsc_lo, tsc_hi); 853 *ep = ror32(*ep, 9) ^ tsc_lo; 854} 855 856void 857ml_gpu_stat_update(uint64_t gpu_ns_delta) { 858 current_thread()->machine.thread_gpu_ns += gpu_ns_delta; 859} 860 861uint64_t 862ml_gpu_stat(thread_t t) { 863 return t->machine.thread_gpu_ns; 864} 865