1/* 2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 57#include <mach_rt.h> 58#include <mach_debug.h> 59#include <mach_ldebug.h> 60 61#include <sys/kdebug.h> 62 63#include <mach/kern_return.h> 64#include <mach/thread_status.h> 65#include <mach/vm_param.h> 66 67#include <kern/counters.h> 68#include <kern/kalloc.h> 69#include <kern/mach_param.h> 70#include <kern/processor.h> 71#include <kern/cpu_data.h> 72#include <kern/cpu_number.h> 73#include <kern/task.h> 74#include <kern/thread.h> 75#include <kern/sched_prim.h> 76#include <kern/misc_protos.h> 77#include <kern/assert.h> 78#include <kern/spl.h> 79#include <kern/machine.h> 80#include <ipc/ipc_port.h> 81#include <vm/vm_kern.h> 82#include <vm/vm_map.h> 83#include <vm/pmap.h> 84#include <vm/vm_protos.h> 85 86#include <i386/cpu_data.h> 87#include <i386/cpu_number.h> 88#include <i386/eflags.h> 89#include <i386/proc_reg.h> 90#include <i386/fpu.h> 91#include <i386/misc_protos.h> 92#include <i386/mp_desc.h> 93#include <i386/thread.h> 94#include <i386/machine_routines.h> 95#include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */ 96 97#if CONFIG_COUNTERS 98#include <pmc/pmc.h> 99#endif /* CONFIG_COUNTERS */ 100 101#if KPC 102#include <kern/kpc.h> 103#endif 104 105#if KPERF 106#include <kperf/kperf.h> 107#endif 108 109/* 110 * Maps state flavor to number of words in the state: 111 */ 112unsigned int _MachineStateCount[] = { 113 /* FLAVOR_LIST */ 114 0, 115 x86_THREAD_STATE32_COUNT, 116 x86_FLOAT_STATE32_COUNT, 117 x86_EXCEPTION_STATE32_COUNT, 118 x86_THREAD_STATE64_COUNT, 119 x86_FLOAT_STATE64_COUNT, 120 x86_EXCEPTION_STATE64_COUNT, 121 x86_THREAD_STATE_COUNT, 122 x86_FLOAT_STATE_COUNT, 123 x86_EXCEPTION_STATE_COUNT, 124 0, 125 x86_SAVED_STATE32_COUNT, 126 x86_SAVED_STATE64_COUNT, 127 x86_DEBUG_STATE32_COUNT, 128 x86_DEBUG_STATE64_COUNT, 129 x86_DEBUG_STATE_COUNT 130}; 131 132zone_t iss_zone; /* zone for saved_state area */ 133zone_t ids_zone; /* zone for debug_state area */ 134 135/* Forward */ 136 137extern void Thread_continue(void); 138extern void Load_context( 139 thread_t thread); 140 141static void 142get_exception_state32(thread_t thread, x86_exception_state32_t *es); 143 144static void 145get_exception_state64(thread_t thread, x86_exception_state64_t *es); 146 147static void 148get_thread_state32(thread_t thread, x86_thread_state32_t *ts); 149 150static void 151get_thread_state64(thread_t thread, x86_thread_state64_t *ts); 152 153static int 154set_thread_state32(thread_t thread, x86_thread_state32_t *ts); 155 156static int 157set_thread_state64(thread_t thread, x86_thread_state64_t *ts); 158 159#if CONFIG_COUNTERS 160static inline void 161machine_pmc_cswitch(thread_t /* old */, thread_t /* new */); 162 163static inline void 164pmc_swi(thread_t /* old */, thread_t /*new */); 165 166static inline void 167pmc_swi(thread_t old, thread_t new) { 168 current_cpu_datap()->csw_old_thread = old; 169 current_cpu_datap()->csw_new_thread = new; 170 pal_pmc_swi(); 171} 172 173static inline void 174machine_pmc_cswitch(thread_t old, thread_t new) { 175 if (pmc_thread_eligible(old) || pmc_thread_eligible(new)) { 176 pmc_swi(old, new); 177 } 178} 179 180void ml_get_csw_threads(thread_t *old, thread_t *new) { 181 *old = current_cpu_datap()->csw_old_thread; 182 *new = current_cpu_datap()->csw_new_thread; 183} 184 185#endif /* CONFIG_COUNTERS */ 186 187#if KPC 188static inline void 189ml_kpc_cswitch(thread_t old, thread_t new) 190{ 191 if(!kpc_threads_counting) 192 return; 193 194 /* call the kpc function */ 195 kpc_switch_context( old, new ); 196} 197#endif 198 199#if KPERF 200static inline void 201ml_kperf_cswitch(thread_t old, thread_t new) 202{ 203 if(!kperf_cswitch_hook) 204 return; 205 206 /* call the kpc function */ 207 kperf_switch_context( old, new ); 208} 209#endif 210 211/* 212 * Don't let an illegal value for dr7 get set. Specifically, 213 * check for undefined settings. Setting these bit patterns 214 * result in undefined behaviour and can lead to an unexpected 215 * TRCTRAP. 216 */ 217static boolean_t 218dr7_is_valid(uint32_t *dr7) 219{ 220 int i; 221 uint32_t mask1, mask2; 222 223 /* 224 * If the DE bit is set in CR4, R/W0-3 can be pattern 225 * "10B" to indicate i/o reads and write 226 */ 227 if (!(get_cr4() & CR4_DE)) 228 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4; 229 i++, mask1 <<= 4, mask2 <<= 4) 230 if ((*dr7 & mask1) == mask2) 231 return (FALSE); 232 233 /* 234 * if we are doing an instruction execution break (indicated 235 * by r/w[x] being "00B"), then the len[x] must also be set 236 * to "00B" 237 */ 238 for (i = 0; i < 4; i++) 239 if (((((*dr7 >> (16 + i*4))) & 0x3) == 0) && 240 ((((*dr7 >> (18 + i*4))) & 0x3) != 0)) 241 return (FALSE); 242 243 /* 244 * Intel docs have these bits fixed. 245 */ 246 *dr7 |= 0x1 << 10; /* set bit 10 to 1 */ 247 *dr7 &= ~(0x1 << 11); /* set bit 11 to 0 */ 248 *dr7 &= ~(0x1 << 12); /* set bit 12 to 0 */ 249 *dr7 &= ~(0x1 << 14); /* set bit 14 to 0 */ 250 *dr7 &= ~(0x1 << 15); /* set bit 15 to 0 */ 251 252 /* 253 * We don't allow anything to set the global breakpoints. 254 */ 255 256 if (*dr7 & 0x2) 257 return (FALSE); 258 259 if (*dr7 & (0x2<<2)) 260 return (FALSE); 261 262 if (*dr7 & (0x2<<4)) 263 return (FALSE); 264 265 if (*dr7 & (0x2<<6)) 266 return (FALSE); 267 268 return (TRUE); 269} 270 271static inline void 272set_live_debug_state32(cpu_data_t *cdp, x86_debug_state32_t *ds) 273{ 274 __asm__ volatile ("movl %0,%%db0" : :"r" (ds->dr0)); 275 __asm__ volatile ("movl %0,%%db1" : :"r" (ds->dr1)); 276 __asm__ volatile ("movl %0,%%db2" : :"r" (ds->dr2)); 277 __asm__ volatile ("movl %0,%%db3" : :"r" (ds->dr3)); 278 cdp->cpu_dr7 = ds->dr7; 279} 280 281extern void set_64bit_debug_regs(x86_debug_state64_t *ds); 282 283static inline void 284set_live_debug_state64(cpu_data_t *cdp, x86_debug_state64_t *ds) 285{ 286 /* 287 * We need to enter 64-bit mode in order to set the full 288 * width of these registers 289 */ 290 set_64bit_debug_regs(ds); 291 cdp->cpu_dr7 = ds->dr7; 292} 293 294boolean_t 295debug_state_is_valid32(x86_debug_state32_t *ds) 296{ 297 if (!dr7_is_valid(&ds->dr7)) 298 return FALSE; 299 300 301 return TRUE; 302} 303 304boolean_t 305debug_state_is_valid64(x86_debug_state64_t *ds) 306{ 307 if (!dr7_is_valid((uint32_t *)&ds->dr7)) 308 return FALSE; 309 310 /* 311 * Don't allow the user to set debug addresses above their max 312 * value 313 */ 314 if (ds->dr7 & 0x1) 315 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS) 316 return FALSE; 317 318 if (ds->dr7 & (0x1<<2)) 319 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS) 320 return FALSE; 321 322 if (ds->dr7 & (0x1<<4)) 323 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS) 324 return FALSE; 325 326 if (ds->dr7 & (0x1<<6)) 327 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS) 328 return FALSE; 329 330 return TRUE; 331} 332 333 334static kern_return_t 335set_debug_state32(thread_t thread, x86_debug_state32_t *ds) 336{ 337 x86_debug_state32_t *ids; 338 pcb_t pcb; 339 340 pcb = THREAD_TO_PCB(thread); 341 ids = pcb->ids; 342 343 if (debug_state_is_valid32(ds) != TRUE) { 344 return KERN_INVALID_ARGUMENT; 345 } 346 347 if (ids == NULL) { 348 ids = zalloc(ids_zone); 349 bzero(ids, sizeof *ids); 350 351 simple_lock(&pcb->lock); 352 /* make sure it wasn't already alloc()'d elsewhere */ 353 if (pcb->ids == NULL) { 354 pcb->ids = ids; 355 simple_unlock(&pcb->lock); 356 } else { 357 simple_unlock(&pcb->lock); 358 zfree(ids_zone, ids); 359 } 360 } 361 362 363 copy_debug_state32(ds, ids, FALSE); 364 365 return (KERN_SUCCESS); 366} 367 368static kern_return_t 369set_debug_state64(thread_t thread, x86_debug_state64_t *ds) 370{ 371 x86_debug_state64_t *ids; 372 pcb_t pcb; 373 374 pcb = THREAD_TO_PCB(thread); 375 ids = pcb->ids; 376 377 if (debug_state_is_valid64(ds) != TRUE) { 378 return KERN_INVALID_ARGUMENT; 379 } 380 381 if (ids == NULL) { 382 ids = zalloc(ids_zone); 383 bzero(ids, sizeof *ids); 384 385 simple_lock(&pcb->lock); 386 /* make sure it wasn't already alloc()'d elsewhere */ 387 if (pcb->ids == NULL) { 388 pcb->ids = ids; 389 simple_unlock(&pcb->lock); 390 } else { 391 simple_unlock(&pcb->lock); 392 zfree(ids_zone, ids); 393 } 394 } 395 396 copy_debug_state64(ds, ids, FALSE); 397 398 return (KERN_SUCCESS); 399} 400 401static void 402get_debug_state32(thread_t thread, x86_debug_state32_t *ds) 403{ 404 x86_debug_state32_t *saved_state; 405 406 saved_state = thread->machine.ids; 407 408 if (saved_state) { 409 copy_debug_state32(saved_state, ds, TRUE); 410 } else 411 bzero(ds, sizeof *ds); 412} 413 414static void 415get_debug_state64(thread_t thread, x86_debug_state64_t *ds) 416{ 417 x86_debug_state64_t *saved_state; 418 419 saved_state = (x86_debug_state64_t *)thread->machine.ids; 420 421 if (saved_state) { 422 copy_debug_state64(saved_state, ds, TRUE); 423 } else 424 bzero(ds, sizeof *ds); 425} 426 427/* 428 * consider_machine_collect: 429 * 430 * Try to collect machine-dependent pages 431 */ 432void 433consider_machine_collect(void) 434{ 435} 436 437void 438consider_machine_adjust(void) 439{ 440} 441 442/* 443 * Switch to the first thread on a CPU. 444 */ 445void 446machine_load_context( 447 thread_t new) 448{ 449#if CONFIG_COUNTERS 450 machine_pmc_cswitch(NULL, new); 451#endif 452 new->machine.specFlags |= OnProc; 453 act_machine_switch_pcb(NULL, new); 454 Load_context(new); 455} 456 457/* 458 * Switch to a new thread. 459 * Save the old thread`s kernel state or continuation, 460 * and return it. 461 */ 462thread_t 463machine_switch_context( 464 thread_t old, 465 thread_continue_t continuation, 466 thread_t new) 467{ 468#if MACH_RT 469 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack); 470#endif 471#if CONFIG_COUNTERS 472 machine_pmc_cswitch(old, new); 473#endif 474#if KPC 475 ml_kpc_cswitch(old, new); 476#endif 477#if KPERF 478 ml_kperf_cswitch(old, new); 479#endif 480 /* 481 * Save FP registers if in use. 482 */ 483 fpu_save_context(old); 484 485 old->machine.specFlags &= ~OnProc; 486 new->machine.specFlags |= OnProc; 487 488 /* 489 * Monitor the stack depth and report new max, 490 * not worrying about races. 491 */ 492 vm_offset_t depth = current_stack_depth(); 493 if (depth > kernel_stack_depth_max) { 494 kernel_stack_depth_max = depth; 495 KERNEL_DEBUG_CONSTANT( 496 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH), 497 (long) depth, 0, 0, 0, 0); 498 } 499 500 /* 501 * Switch address maps if need be, even if not switching tasks. 502 * (A server activation may be "borrowing" a client map.) 503 */ 504 PMAP_SWITCH_CONTEXT(old, new, cpu_number()); 505 506 /* 507 * Load the rest of the user state for the new thread 508 */ 509 act_machine_switch_pcb(old, new); 510 511 return(Switch_context(old, continuation, new)); 512} 513 514thread_t 515machine_processor_shutdown( 516 thread_t thread, 517 void (*doshutdown)(processor_t), 518 processor_t processor) 519{ 520#if CONFIG_VMX 521 vmx_suspend(); 522#endif 523 fpu_save_context(thread); 524 PMAP_SWITCH_CONTEXT(thread, processor->idle_thread, cpu_number()); 525 return(Shutdown_context(thread, doshutdown, processor)); 526} 527 528 529/* 530 * This is where registers that are not normally specified by the mach-o 531 * file on an execve would be nullified, perhaps to avoid a covert channel. 532 */ 533kern_return_t 534machine_thread_state_initialize( 535 thread_t thread) 536{ 537 /* 538 * If there's an fpu save area, free it. 539 * The initialized state will then be lazily faulted-in, if required. 540 * And if we're target, re-arm the no-fpu trap. 541 */ 542 if (thread->machine.ifps) { 543 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64); 544 545 if (thread == current_thread()) 546 clear_fpu(); 547 } 548 549 if (thread->machine.ids) { 550 zfree(ids_zone, thread->machine.ids); 551 thread->machine.ids = NULL; 552 } 553 554 return KERN_SUCCESS; 555} 556 557uint32_t 558get_eflags_exportmask(void) 559{ 560 return EFL_USER_SET; 561} 562 563/* 564 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors 565 * for 32bit tasks only 566 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors 567 * for 64bit tasks only 568 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors 569 * for 32bit tasks only 570 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors 571 * for 64bit tasks only 572 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors 573 * for either 32bit or 64bit tasks 574 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors 575 * for 32bit tasks only 576 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors 577 * for 64bit tasks only 578 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors 579 * for either 32bit or 64bit tasks 580 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors 581 * for 32bit tasks only 582 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors 583 * for 64bit tasks only 584 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors 585 * for either 32bit or 64bit tasks 586 */ 587 588 589static void 590get_exception_state64(thread_t thread, x86_exception_state64_t *es) 591{ 592 x86_saved_state64_t *saved_state; 593 594 saved_state = USER_REGS64(thread); 595 596 es->trapno = saved_state->isf.trapno; 597 es->cpu = saved_state->isf.cpu; 598 es->err = (typeof(es->err))saved_state->isf.err; 599 es->faultvaddr = saved_state->cr2; 600} 601 602static void 603get_exception_state32(thread_t thread, x86_exception_state32_t *es) 604{ 605 x86_saved_state32_t *saved_state; 606 607 saved_state = USER_REGS32(thread); 608 609 es->trapno = saved_state->trapno; 610 es->cpu = saved_state->cpu; 611 es->err = saved_state->err; 612 es->faultvaddr = saved_state->cr2; 613} 614 615 616static int 617set_thread_state32(thread_t thread, x86_thread_state32_t *ts) 618{ 619 x86_saved_state32_t *saved_state; 620 621 pal_register_cache_state(thread, DIRTY); 622 623 saved_state = USER_REGS32(thread); 624 625 /* 626 * Scrub segment selector values: 627 */ 628 ts->cs = USER_CS; 629 /* 630 * On a 64 bit kernel, we always override the data segments, 631 * as the actual selector numbers have changed. This also 632 * means that we don't support setting the data segments 633 * manually any more. 634 */ 635 ts->ss = USER_DS; 636 ts->ds = USER_DS; 637 ts->es = USER_DS; 638 639 /* Check segment selectors are safe */ 640 if (!valid_user_segment_selectors(ts->cs, 641 ts->ss, 642 ts->ds, 643 ts->es, 644 ts->fs, 645 ts->gs)) 646 return(KERN_INVALID_ARGUMENT); 647 648 saved_state->eax = ts->eax; 649 saved_state->ebx = ts->ebx; 650 saved_state->ecx = ts->ecx; 651 saved_state->edx = ts->edx; 652 saved_state->edi = ts->edi; 653 saved_state->esi = ts->esi; 654 saved_state->ebp = ts->ebp; 655 saved_state->uesp = ts->esp; 656 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET; 657 saved_state->eip = ts->eip; 658 saved_state->cs = ts->cs; 659 saved_state->ss = ts->ss; 660 saved_state->ds = ts->ds; 661 saved_state->es = ts->es; 662 saved_state->fs = ts->fs; 663 saved_state->gs = ts->gs; 664 665 /* 666 * If the trace trap bit is being set, 667 * ensure that the user returns via iret 668 * - which is signaled thusly: 669 */ 670 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS) 671 saved_state->cs = SYSENTER_TF_CS; 672 673 return(KERN_SUCCESS); 674} 675 676static int 677set_thread_state64(thread_t thread, x86_thread_state64_t *ts) 678{ 679 x86_saved_state64_t *saved_state; 680 681 pal_register_cache_state(thread, DIRTY); 682 683 saved_state = USER_REGS64(thread); 684 685 if (!IS_USERADDR64_CANONICAL(ts->rsp) || 686 !IS_USERADDR64_CANONICAL(ts->rip)) 687 return(KERN_INVALID_ARGUMENT); 688 689 saved_state->r8 = ts->r8; 690 saved_state->r9 = ts->r9; 691 saved_state->r10 = ts->r10; 692 saved_state->r11 = ts->r11; 693 saved_state->r12 = ts->r12; 694 saved_state->r13 = ts->r13; 695 saved_state->r14 = ts->r14; 696 saved_state->r15 = ts->r15; 697 saved_state->rax = ts->rax; 698 saved_state->rbx = ts->rbx; 699 saved_state->rcx = ts->rcx; 700 saved_state->rdx = ts->rdx; 701 saved_state->rdi = ts->rdi; 702 saved_state->rsi = ts->rsi; 703 saved_state->rbp = ts->rbp; 704 saved_state->isf.rsp = ts->rsp; 705 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET; 706 saved_state->isf.rip = ts->rip; 707 saved_state->isf.cs = USER64_CS; 708 saved_state->fs = (uint32_t)ts->fs; 709 saved_state->gs = (uint32_t)ts->gs; 710 711 return(KERN_SUCCESS); 712} 713 714 715 716static void 717get_thread_state32(thread_t thread, x86_thread_state32_t *ts) 718{ 719 x86_saved_state32_t *saved_state; 720 721 pal_register_cache_state(thread, VALID); 722 723 saved_state = USER_REGS32(thread); 724 725 ts->eax = saved_state->eax; 726 ts->ebx = saved_state->ebx; 727 ts->ecx = saved_state->ecx; 728 ts->edx = saved_state->edx; 729 ts->edi = saved_state->edi; 730 ts->esi = saved_state->esi; 731 ts->ebp = saved_state->ebp; 732 ts->esp = saved_state->uesp; 733 ts->eflags = saved_state->efl; 734 ts->eip = saved_state->eip; 735 ts->cs = saved_state->cs; 736 ts->ss = saved_state->ss; 737 ts->ds = saved_state->ds; 738 ts->es = saved_state->es; 739 ts->fs = saved_state->fs; 740 ts->gs = saved_state->gs; 741} 742 743 744static void 745get_thread_state64(thread_t thread, x86_thread_state64_t *ts) 746{ 747 x86_saved_state64_t *saved_state; 748 749 pal_register_cache_state(thread, VALID); 750 751 saved_state = USER_REGS64(thread); 752 753 ts->r8 = saved_state->r8; 754 ts->r9 = saved_state->r9; 755 ts->r10 = saved_state->r10; 756 ts->r11 = saved_state->r11; 757 ts->r12 = saved_state->r12; 758 ts->r13 = saved_state->r13; 759 ts->r14 = saved_state->r14; 760 ts->r15 = saved_state->r15; 761 ts->rax = saved_state->rax; 762 ts->rbx = saved_state->rbx; 763 ts->rcx = saved_state->rcx; 764 ts->rdx = saved_state->rdx; 765 ts->rdi = saved_state->rdi; 766 ts->rsi = saved_state->rsi; 767 ts->rbp = saved_state->rbp; 768 ts->rsp = saved_state->isf.rsp; 769 ts->rflags = saved_state->isf.rflags; 770 ts->rip = saved_state->isf.rip; 771 ts->cs = saved_state->isf.cs; 772 ts->fs = saved_state->fs; 773 ts->gs = saved_state->gs; 774} 775 776 777/* 778 * act_machine_set_state: 779 * 780 * Set the status of the specified thread. 781 */ 782 783kern_return_t 784machine_thread_set_state( 785 thread_t thr_act, 786 thread_flavor_t flavor, 787 thread_state_t tstate, 788 mach_msg_type_number_t count) 789{ 790 switch (flavor) { 791 case x86_SAVED_STATE32: 792 { 793 x86_saved_state32_t *state; 794 x86_saved_state32_t *saved_state; 795 796 if (count < x86_SAVED_STATE32_COUNT) 797 return(KERN_INVALID_ARGUMENT); 798 799 if (thread_is_64bit(thr_act)) 800 return(KERN_INVALID_ARGUMENT); 801 802 state = (x86_saved_state32_t *) tstate; 803 804 /* Check segment selectors are safe */ 805 if (!valid_user_segment_selectors(state->cs, 806 state->ss, 807 state->ds, 808 state->es, 809 state->fs, 810 state->gs)) 811 return KERN_INVALID_ARGUMENT; 812 813 pal_register_cache_state(thr_act, DIRTY); 814 815 saved_state = USER_REGS32(thr_act); 816 817 /* 818 * General registers 819 */ 820 saved_state->edi = state->edi; 821 saved_state->esi = state->esi; 822 saved_state->ebp = state->ebp; 823 saved_state->uesp = state->uesp; 824 saved_state->ebx = state->ebx; 825 saved_state->edx = state->edx; 826 saved_state->ecx = state->ecx; 827 saved_state->eax = state->eax; 828 saved_state->eip = state->eip; 829 830 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET; 831 832 /* 833 * If the trace trap bit is being set, 834 * ensure that the user returns via iret 835 * - which is signaled thusly: 836 */ 837 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) 838 state->cs = SYSENTER_TF_CS; 839 840 /* 841 * User setting segment registers. 842 * Code and stack selectors have already been 843 * checked. Others will be reset by 'iret' 844 * if they are not valid. 845 */ 846 saved_state->cs = state->cs; 847 saved_state->ss = state->ss; 848 saved_state->ds = state->ds; 849 saved_state->es = state->es; 850 saved_state->fs = state->fs; 851 saved_state->gs = state->gs; 852 853 break; 854 } 855 856 case x86_SAVED_STATE64: 857 { 858 x86_saved_state64_t *state; 859 x86_saved_state64_t *saved_state; 860 861 if (count < x86_SAVED_STATE64_COUNT) 862 return(KERN_INVALID_ARGUMENT); 863 864 if (!thread_is_64bit(thr_act)) 865 return(KERN_INVALID_ARGUMENT); 866 867 state = (x86_saved_state64_t *) tstate; 868 869 /* Verify that the supplied code segment selector is 870 * valid. In 64-bit mode, the FS and GS segment overrides 871 * use the FS.base and GS.base MSRs to calculate 872 * base addresses, and the trampolines don't directly 873 * restore the segment registers--hence they are no 874 * longer relevant for validation. 875 */ 876 if (!valid_user_code_selector(state->isf.cs)) 877 return KERN_INVALID_ARGUMENT; 878 879 /* Check pc and stack are canonical addresses */ 880 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) || 881 !IS_USERADDR64_CANONICAL(state->isf.rip)) 882 return KERN_INVALID_ARGUMENT; 883 884 pal_register_cache_state(thr_act, DIRTY); 885 886 saved_state = USER_REGS64(thr_act); 887 888 /* 889 * General registers 890 */ 891 saved_state->r8 = state->r8; 892 saved_state->r9 = state->r9; 893 saved_state->r10 = state->r10; 894 saved_state->r11 = state->r11; 895 saved_state->r12 = state->r12; 896 saved_state->r13 = state->r13; 897 saved_state->r14 = state->r14; 898 saved_state->r15 = state->r15; 899 saved_state->rdi = state->rdi; 900 saved_state->rsi = state->rsi; 901 saved_state->rbp = state->rbp; 902 saved_state->rbx = state->rbx; 903 saved_state->rdx = state->rdx; 904 saved_state->rcx = state->rcx; 905 saved_state->rax = state->rax; 906 saved_state->isf.rsp = state->isf.rsp; 907 saved_state->isf.rip = state->isf.rip; 908 909 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET; 910 911 /* 912 * User setting segment registers. 913 * Code and stack selectors have already been 914 * checked. Others will be reset by 'sys' 915 * if they are not valid. 916 */ 917 saved_state->isf.cs = state->isf.cs; 918 saved_state->isf.ss = state->isf.ss; 919 saved_state->fs = state->fs; 920 saved_state->gs = state->gs; 921 922 break; 923 } 924 925 case x86_FLOAT_STATE32: 926 { 927 if (count != x86_FLOAT_STATE32_COUNT) 928 return(KERN_INVALID_ARGUMENT); 929 930 if (thread_is_64bit(thr_act)) 931 return(KERN_INVALID_ARGUMENT); 932 933 return fpu_set_fxstate(thr_act, tstate, flavor); 934 } 935 936 case x86_FLOAT_STATE64: 937 { 938 if (count != x86_FLOAT_STATE64_COUNT) 939 return(KERN_INVALID_ARGUMENT); 940 941 if ( !thread_is_64bit(thr_act)) 942 return(KERN_INVALID_ARGUMENT); 943 944 return fpu_set_fxstate(thr_act, tstate, flavor); 945 } 946 947 case x86_FLOAT_STATE: 948 { 949 x86_float_state_t *state; 950 951 if (count != x86_FLOAT_STATE_COUNT) 952 return(KERN_INVALID_ARGUMENT); 953 954 state = (x86_float_state_t *)tstate; 955 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT && 956 thread_is_64bit(thr_act)) { 957 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64); 958 } 959 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT && 960 !thread_is_64bit(thr_act)) { 961 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32); 962 } 963 return(KERN_INVALID_ARGUMENT); 964 } 965 966 case x86_AVX_STATE32: 967 { 968 if (count != x86_AVX_STATE32_COUNT) 969 return(KERN_INVALID_ARGUMENT); 970 971 if (thread_is_64bit(thr_act)) 972 return(KERN_INVALID_ARGUMENT); 973 974 return fpu_set_fxstate(thr_act, tstate, flavor); 975 } 976 977 case x86_AVX_STATE64: 978 { 979 if (count != x86_AVX_STATE64_COUNT) 980 return(KERN_INVALID_ARGUMENT); 981 982 if (!thread_is_64bit(thr_act)) 983 return(KERN_INVALID_ARGUMENT); 984 985 return fpu_set_fxstate(thr_act, tstate, flavor); 986 } 987 988 case x86_AVX_STATE: 989 { 990 x86_avx_state_t *state; 991 992 if (count != x86_AVX_STATE_COUNT) 993 return(KERN_INVALID_ARGUMENT); 994 995 state = (x86_avx_state_t *)tstate; 996 if (state->ash.flavor == x86_AVX_STATE64 && 997 state->ash.count == x86_FLOAT_STATE64_COUNT && 998 thread_is_64bit(thr_act)) { 999 return fpu_set_fxstate(thr_act, 1000 (thread_state_t)&state->ufs.as64, 1001 x86_FLOAT_STATE64); 1002 } 1003 if (state->ash.flavor == x86_FLOAT_STATE32 && 1004 state->ash.count == x86_FLOAT_STATE32_COUNT && 1005 !thread_is_64bit(thr_act)) { 1006 return fpu_set_fxstate(thr_act, 1007 (thread_state_t)&state->ufs.as32, 1008 x86_FLOAT_STATE32); 1009 } 1010 return(KERN_INVALID_ARGUMENT); 1011 } 1012 1013 case x86_THREAD_STATE32: 1014 { 1015 if (count != x86_THREAD_STATE32_COUNT) 1016 return(KERN_INVALID_ARGUMENT); 1017 1018 if (thread_is_64bit(thr_act)) 1019 return(KERN_INVALID_ARGUMENT); 1020 1021 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate); 1022 } 1023 1024 case x86_THREAD_STATE64: 1025 { 1026 if (count != x86_THREAD_STATE64_COUNT) 1027 return(KERN_INVALID_ARGUMENT); 1028 1029 if (!thread_is_64bit(thr_act)) 1030 return(KERN_INVALID_ARGUMENT); 1031 1032 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate); 1033 1034 } 1035 case x86_THREAD_STATE: 1036 { 1037 x86_thread_state_t *state; 1038 1039 if (count != x86_THREAD_STATE_COUNT) 1040 return(KERN_INVALID_ARGUMENT); 1041 1042 state = (x86_thread_state_t *)tstate; 1043 1044 if (state->tsh.flavor == x86_THREAD_STATE64 && 1045 state->tsh.count == x86_THREAD_STATE64_COUNT && 1046 thread_is_64bit(thr_act)) { 1047 return set_thread_state64(thr_act, &state->uts.ts64); 1048 } else if (state->tsh.flavor == x86_THREAD_STATE32 && 1049 state->tsh.count == x86_THREAD_STATE32_COUNT && 1050 !thread_is_64bit(thr_act)) { 1051 return set_thread_state32(thr_act, &state->uts.ts32); 1052 } else 1053 return(KERN_INVALID_ARGUMENT); 1054 1055 break; 1056 } 1057 case x86_DEBUG_STATE32: 1058 { 1059 x86_debug_state32_t *state; 1060 kern_return_t ret; 1061 1062 if (thread_is_64bit(thr_act)) 1063 return(KERN_INVALID_ARGUMENT); 1064 1065 state = (x86_debug_state32_t *)tstate; 1066 1067 ret = set_debug_state32(thr_act, state); 1068 1069 return ret; 1070 } 1071 case x86_DEBUG_STATE64: 1072 { 1073 x86_debug_state64_t *state; 1074 kern_return_t ret; 1075 1076 if (!thread_is_64bit(thr_act)) 1077 return(KERN_INVALID_ARGUMENT); 1078 1079 state = (x86_debug_state64_t *)tstate; 1080 1081 ret = set_debug_state64(thr_act, state); 1082 1083 return ret; 1084 } 1085 case x86_DEBUG_STATE: 1086 { 1087 x86_debug_state_t *state; 1088 kern_return_t ret = KERN_INVALID_ARGUMENT; 1089 1090 if (count != x86_DEBUG_STATE_COUNT) 1091 return (KERN_INVALID_ARGUMENT); 1092 1093 state = (x86_debug_state_t *)tstate; 1094 if (state->dsh.flavor == x86_DEBUG_STATE64 && 1095 state->dsh.count == x86_DEBUG_STATE64_COUNT && 1096 thread_is_64bit(thr_act)) { 1097 ret = set_debug_state64(thr_act, &state->uds.ds64); 1098 } 1099 else 1100 if (state->dsh.flavor == x86_DEBUG_STATE32 && 1101 state->dsh.count == x86_DEBUG_STATE32_COUNT && 1102 !thread_is_64bit(thr_act)) { 1103 ret = set_debug_state32(thr_act, &state->uds.ds32); 1104 } 1105 return ret; 1106 } 1107 default: 1108 return(KERN_INVALID_ARGUMENT); 1109 } 1110 1111 return(KERN_SUCCESS); 1112} 1113 1114 1115 1116/* 1117 * thread_getstatus: 1118 * 1119 * Get the status of the specified thread. 1120 */ 1121 1122kern_return_t 1123machine_thread_get_state( 1124 thread_t thr_act, 1125 thread_flavor_t flavor, 1126 thread_state_t tstate, 1127 mach_msg_type_number_t *count) 1128{ 1129 1130 switch (flavor) { 1131 1132 case THREAD_STATE_FLAVOR_LIST: 1133 { 1134 if (*count < 3) 1135 return (KERN_INVALID_ARGUMENT); 1136 1137 tstate[0] = i386_THREAD_STATE; 1138 tstate[1] = i386_FLOAT_STATE; 1139 tstate[2] = i386_EXCEPTION_STATE; 1140 1141 *count = 3; 1142 break; 1143 } 1144 1145 case THREAD_STATE_FLAVOR_LIST_NEW: 1146 { 1147 if (*count < 4) 1148 return (KERN_INVALID_ARGUMENT); 1149 1150 tstate[0] = x86_THREAD_STATE; 1151 tstate[1] = x86_FLOAT_STATE; 1152 tstate[2] = x86_EXCEPTION_STATE; 1153 tstate[3] = x86_DEBUG_STATE; 1154 1155 *count = 4; 1156 break; 1157 } 1158 1159 case THREAD_STATE_FLAVOR_LIST_10_9: 1160 { 1161 if (*count < 5) 1162 return (KERN_INVALID_ARGUMENT); 1163 1164 tstate[0] = x86_THREAD_STATE; 1165 tstate[1] = x86_FLOAT_STATE; 1166 tstate[2] = x86_EXCEPTION_STATE; 1167 tstate[3] = x86_DEBUG_STATE; 1168 tstate[4] = x86_AVX_STATE; 1169 1170 *count = 5; 1171 break; 1172 } 1173 1174 case x86_SAVED_STATE32: 1175 { 1176 x86_saved_state32_t *state; 1177 x86_saved_state32_t *saved_state; 1178 1179 if (*count < x86_SAVED_STATE32_COUNT) 1180 return(KERN_INVALID_ARGUMENT); 1181 1182 if (thread_is_64bit(thr_act)) 1183 return(KERN_INVALID_ARGUMENT); 1184 1185 state = (x86_saved_state32_t *) tstate; 1186 saved_state = USER_REGS32(thr_act); 1187 1188 /* 1189 * First, copy everything: 1190 */ 1191 *state = *saved_state; 1192 state->ds = saved_state->ds & 0xffff; 1193 state->es = saved_state->es & 0xffff; 1194 state->fs = saved_state->fs & 0xffff; 1195 state->gs = saved_state->gs & 0xffff; 1196 1197 *count = x86_SAVED_STATE32_COUNT; 1198 break; 1199 } 1200 1201 case x86_SAVED_STATE64: 1202 { 1203 x86_saved_state64_t *state; 1204 x86_saved_state64_t *saved_state; 1205 1206 if (*count < x86_SAVED_STATE64_COUNT) 1207 return(KERN_INVALID_ARGUMENT); 1208 1209 if (!thread_is_64bit(thr_act)) 1210 return(KERN_INVALID_ARGUMENT); 1211 1212 state = (x86_saved_state64_t *)tstate; 1213 saved_state = USER_REGS64(thr_act); 1214 1215 /* 1216 * First, copy everything: 1217 */ 1218 *state = *saved_state; 1219 state->fs = saved_state->fs & 0xffff; 1220 state->gs = saved_state->gs & 0xffff; 1221 1222 *count = x86_SAVED_STATE64_COUNT; 1223 break; 1224 } 1225 1226 case x86_FLOAT_STATE32: 1227 { 1228 if (*count < x86_FLOAT_STATE32_COUNT) 1229 return(KERN_INVALID_ARGUMENT); 1230 1231 if (thread_is_64bit(thr_act)) 1232 return(KERN_INVALID_ARGUMENT); 1233 1234 *count = x86_FLOAT_STATE32_COUNT; 1235 1236 return fpu_get_fxstate(thr_act, tstate, flavor); 1237 } 1238 1239 case x86_FLOAT_STATE64: 1240 { 1241 if (*count < x86_FLOAT_STATE64_COUNT) 1242 return(KERN_INVALID_ARGUMENT); 1243 1244 if ( !thread_is_64bit(thr_act)) 1245 return(KERN_INVALID_ARGUMENT); 1246 1247 *count = x86_FLOAT_STATE64_COUNT; 1248 1249 return fpu_get_fxstate(thr_act, tstate, flavor); 1250 } 1251 1252 case x86_FLOAT_STATE: 1253 { 1254 x86_float_state_t *state; 1255 kern_return_t kret; 1256 1257 if (*count < x86_FLOAT_STATE_COUNT) 1258 return(KERN_INVALID_ARGUMENT); 1259 1260 state = (x86_float_state_t *)tstate; 1261 1262 /* 1263 * no need to bzero... currently 1264 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT 1265 */ 1266 if (thread_is_64bit(thr_act)) { 1267 state->fsh.flavor = x86_FLOAT_STATE64; 1268 state->fsh.count = x86_FLOAT_STATE64_COUNT; 1269 1270 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64); 1271 } else { 1272 state->fsh.flavor = x86_FLOAT_STATE32; 1273 state->fsh.count = x86_FLOAT_STATE32_COUNT; 1274 1275 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32); 1276 } 1277 *count = x86_FLOAT_STATE_COUNT; 1278 1279 return(kret); 1280 } 1281 1282 case x86_AVX_STATE32: 1283 { 1284 if (*count != x86_AVX_STATE32_COUNT) 1285 return(KERN_INVALID_ARGUMENT); 1286 1287 if (thread_is_64bit(thr_act)) 1288 return(KERN_INVALID_ARGUMENT); 1289 1290 *count = x86_AVX_STATE32_COUNT; 1291 1292 return fpu_get_fxstate(thr_act, tstate, flavor); 1293 } 1294 1295 case x86_AVX_STATE64: 1296 { 1297 if (*count != x86_AVX_STATE64_COUNT) 1298 return(KERN_INVALID_ARGUMENT); 1299 1300 if ( !thread_is_64bit(thr_act)) 1301 return(KERN_INVALID_ARGUMENT); 1302 1303 *count = x86_AVX_STATE64_COUNT; 1304 1305 return fpu_get_fxstate(thr_act, tstate, flavor); 1306 } 1307 1308 case x86_AVX_STATE: 1309 { 1310 x86_avx_state_t *state; 1311 kern_return_t kret; 1312 1313 if (*count < x86_AVX_STATE_COUNT) 1314 return(KERN_INVALID_ARGUMENT); 1315 1316 state = (x86_avx_state_t *)tstate; 1317 1318 bzero((char *)state, sizeof(x86_avx_state_t)); 1319 if (thread_is_64bit(thr_act)) { 1320 state->ash.flavor = x86_AVX_STATE64; 1321 state->ash.count = x86_AVX_STATE64_COUNT; 1322 kret = fpu_get_fxstate(thr_act, 1323 (thread_state_t)&state->ufs.as64, 1324 x86_AVX_STATE64); 1325 } else { 1326 state->ash.flavor = x86_AVX_STATE32; 1327 state->ash.count = x86_AVX_STATE32_COUNT; 1328 kret = fpu_get_fxstate(thr_act, 1329 (thread_state_t)&state->ufs.as32, 1330 x86_AVX_STATE32); 1331 } 1332 *count = x86_AVX_STATE_COUNT; 1333 1334 return(kret); 1335 } 1336 1337 case x86_THREAD_STATE32: 1338 { 1339 if (*count < x86_THREAD_STATE32_COUNT) 1340 return(KERN_INVALID_ARGUMENT); 1341 1342 if (thread_is_64bit(thr_act)) 1343 return(KERN_INVALID_ARGUMENT); 1344 1345 *count = x86_THREAD_STATE32_COUNT; 1346 1347 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate); 1348 break; 1349 } 1350 1351 case x86_THREAD_STATE64: 1352 { 1353 if (*count < x86_THREAD_STATE64_COUNT) 1354 return(KERN_INVALID_ARGUMENT); 1355 1356 if ( !thread_is_64bit(thr_act)) 1357 return(KERN_INVALID_ARGUMENT); 1358 1359 *count = x86_THREAD_STATE64_COUNT; 1360 1361 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate); 1362 break; 1363 } 1364 1365 case x86_THREAD_STATE: 1366 { 1367 x86_thread_state_t *state; 1368 1369 if (*count < x86_THREAD_STATE_COUNT) 1370 return(KERN_INVALID_ARGUMENT); 1371 1372 state = (x86_thread_state_t *)tstate; 1373 1374 bzero((char *)state, sizeof(x86_thread_state_t)); 1375 1376 if (thread_is_64bit(thr_act)) { 1377 state->tsh.flavor = x86_THREAD_STATE64; 1378 state->tsh.count = x86_THREAD_STATE64_COUNT; 1379 1380 get_thread_state64(thr_act, &state->uts.ts64); 1381 } else { 1382 state->tsh.flavor = x86_THREAD_STATE32; 1383 state->tsh.count = x86_THREAD_STATE32_COUNT; 1384 1385 get_thread_state32(thr_act, &state->uts.ts32); 1386 } 1387 *count = x86_THREAD_STATE_COUNT; 1388 1389 break; 1390 } 1391 1392 1393 case x86_EXCEPTION_STATE32: 1394 { 1395 if (*count < x86_EXCEPTION_STATE32_COUNT) 1396 return(KERN_INVALID_ARGUMENT); 1397 1398 if (thread_is_64bit(thr_act)) 1399 return(KERN_INVALID_ARGUMENT); 1400 1401 *count = x86_EXCEPTION_STATE32_COUNT; 1402 1403 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate); 1404 /* 1405 * Suppress the cpu number for binary compatibility 1406 * of this deprecated state. 1407 */ 1408 ((x86_exception_state32_t *)tstate)->cpu = 0; 1409 break; 1410 } 1411 1412 case x86_EXCEPTION_STATE64: 1413 { 1414 if (*count < x86_EXCEPTION_STATE64_COUNT) 1415 return(KERN_INVALID_ARGUMENT); 1416 1417 if ( !thread_is_64bit(thr_act)) 1418 return(KERN_INVALID_ARGUMENT); 1419 1420 *count = x86_EXCEPTION_STATE64_COUNT; 1421 1422 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate); 1423 /* 1424 * Suppress the cpu number for binary compatibility 1425 * of this deprecated state. 1426 */ 1427 ((x86_exception_state64_t *)tstate)->cpu = 0; 1428 break; 1429 } 1430 1431 case x86_EXCEPTION_STATE: 1432 { 1433 x86_exception_state_t *state; 1434 1435 if (*count < x86_EXCEPTION_STATE_COUNT) 1436 return(KERN_INVALID_ARGUMENT); 1437 1438 state = (x86_exception_state_t *)tstate; 1439 1440 bzero((char *)state, sizeof(x86_exception_state_t)); 1441 1442 if (thread_is_64bit(thr_act)) { 1443 state->esh.flavor = x86_EXCEPTION_STATE64; 1444 state->esh.count = x86_EXCEPTION_STATE64_COUNT; 1445 1446 get_exception_state64(thr_act, &state->ues.es64); 1447 } else { 1448 state->esh.flavor = x86_EXCEPTION_STATE32; 1449 state->esh.count = x86_EXCEPTION_STATE32_COUNT; 1450 1451 get_exception_state32(thr_act, &state->ues.es32); 1452 } 1453 *count = x86_EXCEPTION_STATE_COUNT; 1454 1455 break; 1456 } 1457 case x86_DEBUG_STATE32: 1458 { 1459 if (*count < x86_DEBUG_STATE32_COUNT) 1460 return(KERN_INVALID_ARGUMENT); 1461 1462 if (thread_is_64bit(thr_act)) 1463 return(KERN_INVALID_ARGUMENT); 1464 1465 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate); 1466 1467 *count = x86_DEBUG_STATE32_COUNT; 1468 1469 break; 1470 } 1471 case x86_DEBUG_STATE64: 1472 { 1473 if (*count < x86_DEBUG_STATE64_COUNT) 1474 return(KERN_INVALID_ARGUMENT); 1475 1476 if (!thread_is_64bit(thr_act)) 1477 return(KERN_INVALID_ARGUMENT); 1478 1479 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate); 1480 1481 *count = x86_DEBUG_STATE64_COUNT; 1482 1483 break; 1484 } 1485 case x86_DEBUG_STATE: 1486 { 1487 x86_debug_state_t *state; 1488 1489 if (*count < x86_DEBUG_STATE_COUNT) 1490 return(KERN_INVALID_ARGUMENT); 1491 1492 state = (x86_debug_state_t *)tstate; 1493 1494 bzero(state, sizeof *state); 1495 1496 if (thread_is_64bit(thr_act)) { 1497 state->dsh.flavor = x86_DEBUG_STATE64; 1498 state->dsh.count = x86_DEBUG_STATE64_COUNT; 1499 1500 get_debug_state64(thr_act, &state->uds.ds64); 1501 } else { 1502 state->dsh.flavor = x86_DEBUG_STATE32; 1503 state->dsh.count = x86_DEBUG_STATE32_COUNT; 1504 1505 get_debug_state32(thr_act, &state->uds.ds32); 1506 } 1507 *count = x86_DEBUG_STATE_COUNT; 1508 break; 1509 } 1510 default: 1511 return(KERN_INVALID_ARGUMENT); 1512 } 1513 1514 return(KERN_SUCCESS); 1515} 1516 1517kern_return_t 1518machine_thread_get_kern_state( 1519 thread_t thread, 1520 thread_flavor_t flavor, 1521 thread_state_t tstate, 1522 mach_msg_type_number_t *count) 1523{ 1524 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state; 1525 1526 /* 1527 * This works only for an interrupted kernel thread 1528 */ 1529 if (thread != current_thread() || int_state == NULL) 1530 return KERN_FAILURE; 1531 1532 switch (flavor) { 1533 case x86_THREAD_STATE32: { 1534 x86_thread_state32_t *state; 1535 x86_saved_state32_t *saved_state; 1536 1537 if (!is_saved_state32(int_state) || 1538 *count < x86_THREAD_STATE32_COUNT) 1539 return (KERN_INVALID_ARGUMENT); 1540 1541 state = (x86_thread_state32_t *) tstate; 1542 1543 saved_state = saved_state32(int_state); 1544 /* 1545 * General registers. 1546 */ 1547 state->eax = saved_state->eax; 1548 state->ebx = saved_state->ebx; 1549 state->ecx = saved_state->ecx; 1550 state->edx = saved_state->edx; 1551 state->edi = saved_state->edi; 1552 state->esi = saved_state->esi; 1553 state->ebp = saved_state->ebp; 1554 state->esp = saved_state->uesp; 1555 state->eflags = saved_state->efl; 1556 state->eip = saved_state->eip; 1557 state->cs = saved_state->cs; 1558 state->ss = saved_state->ss; 1559 state->ds = saved_state->ds & 0xffff; 1560 state->es = saved_state->es & 0xffff; 1561 state->fs = saved_state->fs & 0xffff; 1562 state->gs = saved_state->gs & 0xffff; 1563 1564 *count = x86_THREAD_STATE32_COUNT; 1565 1566 return KERN_SUCCESS; 1567 } 1568 1569 case x86_THREAD_STATE64: { 1570 x86_thread_state64_t *state; 1571 x86_saved_state64_t *saved_state; 1572 1573 if (!is_saved_state64(int_state) || 1574 *count < x86_THREAD_STATE64_COUNT) 1575 return (KERN_INVALID_ARGUMENT); 1576 1577 state = (x86_thread_state64_t *) tstate; 1578 1579 saved_state = saved_state64(int_state); 1580 /* 1581 * General registers. 1582 */ 1583 state->rax = saved_state->rax; 1584 state->rbx = saved_state->rbx; 1585 state->rcx = saved_state->rcx; 1586 state->rdx = saved_state->rdx; 1587 state->rdi = saved_state->rdi; 1588 state->rsi = saved_state->rsi; 1589 state->rbp = saved_state->rbp; 1590 state->rsp = saved_state->isf.rsp; 1591 state->r8 = saved_state->r8; 1592 state->r9 = saved_state->r9; 1593 state->r10 = saved_state->r10; 1594 state->r11 = saved_state->r11; 1595 state->r12 = saved_state->r12; 1596 state->r13 = saved_state->r13; 1597 state->r14 = saved_state->r14; 1598 state->r15 = saved_state->r15; 1599 1600 state->rip = saved_state->isf.rip; 1601 state->rflags = saved_state->isf.rflags; 1602 state->cs = saved_state->isf.cs; 1603 state->fs = saved_state->fs & 0xffff; 1604 state->gs = saved_state->gs & 0xffff; 1605 *count = x86_THREAD_STATE64_COUNT; 1606 1607 return KERN_SUCCESS; 1608 } 1609 1610 case x86_THREAD_STATE: { 1611 x86_thread_state_t *state = NULL; 1612 1613 if (*count < x86_THREAD_STATE_COUNT) 1614 return (KERN_INVALID_ARGUMENT); 1615 1616 state = (x86_thread_state_t *) tstate; 1617 1618 if (is_saved_state32(int_state)) { 1619 x86_saved_state32_t *saved_state = saved_state32(int_state); 1620 1621 state->tsh.flavor = x86_THREAD_STATE32; 1622 state->tsh.count = x86_THREAD_STATE32_COUNT; 1623 1624 /* 1625 * General registers. 1626 */ 1627 state->uts.ts32.eax = saved_state->eax; 1628 state->uts.ts32.ebx = saved_state->ebx; 1629 state->uts.ts32.ecx = saved_state->ecx; 1630 state->uts.ts32.edx = saved_state->edx; 1631 state->uts.ts32.edi = saved_state->edi; 1632 state->uts.ts32.esi = saved_state->esi; 1633 state->uts.ts32.ebp = saved_state->ebp; 1634 state->uts.ts32.esp = saved_state->uesp; 1635 state->uts.ts32.eflags = saved_state->efl; 1636 state->uts.ts32.eip = saved_state->eip; 1637 state->uts.ts32.cs = saved_state->cs; 1638 state->uts.ts32.ss = saved_state->ss; 1639 state->uts.ts32.ds = saved_state->ds & 0xffff; 1640 state->uts.ts32.es = saved_state->es & 0xffff; 1641 state->uts.ts32.fs = saved_state->fs & 0xffff; 1642 state->uts.ts32.gs = saved_state->gs & 0xffff; 1643 } else if (is_saved_state64(int_state)) { 1644 x86_saved_state64_t *saved_state = saved_state64(int_state); 1645 1646 state->tsh.flavor = x86_THREAD_STATE64; 1647 state->tsh.count = x86_THREAD_STATE64_COUNT; 1648 1649 /* 1650 * General registers. 1651 */ 1652 state->uts.ts64.rax = saved_state->rax; 1653 state->uts.ts64.rbx = saved_state->rbx; 1654 state->uts.ts64.rcx = saved_state->rcx; 1655 state->uts.ts64.rdx = saved_state->rdx; 1656 state->uts.ts64.rdi = saved_state->rdi; 1657 state->uts.ts64.rsi = saved_state->rsi; 1658 state->uts.ts64.rbp = saved_state->rbp; 1659 state->uts.ts64.rsp = saved_state->isf.rsp; 1660 state->uts.ts64.r8 = saved_state->r8; 1661 state->uts.ts64.r9 = saved_state->r9; 1662 state->uts.ts64.r10 = saved_state->r10; 1663 state->uts.ts64.r11 = saved_state->r11; 1664 state->uts.ts64.r12 = saved_state->r12; 1665 state->uts.ts64.r13 = saved_state->r13; 1666 state->uts.ts64.r14 = saved_state->r14; 1667 state->uts.ts64.r15 = saved_state->r15; 1668 1669 state->uts.ts64.rip = saved_state->isf.rip; 1670 state->uts.ts64.rflags = saved_state->isf.rflags; 1671 state->uts.ts64.cs = saved_state->isf.cs; 1672 state->uts.ts64.fs = saved_state->fs & 0xffff; 1673 state->uts.ts64.gs = saved_state->gs & 0xffff; 1674 } else { 1675 panic("unknown thread state"); 1676 } 1677 1678 *count = x86_THREAD_STATE_COUNT; 1679 return KERN_SUCCESS; 1680 } 1681 } 1682 return KERN_FAILURE; 1683} 1684 1685 1686void 1687machine_thread_switch_addrmode(thread_t thread) 1688{ 1689 /* 1690 * We don't want to be preempted until we're done 1691 * - particularly if we're switching the current thread 1692 */ 1693 disable_preemption(); 1694 1695 /* 1696 * Reset the state saveareas. As we're resetting, we anticipate no 1697 * memory allocations in this path. 1698 */ 1699 machine_thread_create(thread, thread->task); 1700 1701 /* If we're switching ourselves, reset the pcb addresses etc. */ 1702 if (thread == current_thread()) { 1703 boolean_t istate = ml_set_interrupts_enabled(FALSE); 1704 act_machine_switch_pcb(NULL, thread); 1705 ml_set_interrupts_enabled(istate); 1706 } 1707 enable_preemption(); 1708} 1709 1710 1711 1712/* 1713 * This is used to set the current thr_act/thread 1714 * when starting up a new processor 1715 */ 1716void 1717machine_set_current_thread(thread_t thread) 1718{ 1719 current_cpu_datap()->cpu_active_thread = thread; 1720} 1721 1722 1723/* 1724 * Perform machine-dependent per-thread initializations 1725 */ 1726void 1727machine_thread_init(void) 1728{ 1729 iss_zone = zinit(sizeof(x86_saved_state_t), 1730 thread_max * sizeof(x86_saved_state_t), 1731 THREAD_CHUNK * sizeof(x86_saved_state_t), 1732 "x86_64 saved state"); 1733 1734 ids_zone = zinit(sizeof(x86_debug_state64_t), 1735 thread_max * sizeof(x86_debug_state64_t), 1736 THREAD_CHUNK * sizeof(x86_debug_state64_t), 1737 "x86_64 debug state"); 1738 1739 fpu_module_init(); 1740} 1741 1742 1743 1744user_addr_t 1745get_useraddr(void) 1746{ 1747 thread_t thr_act = current_thread(); 1748 1749 if (thread_is_64bit(thr_act)) { 1750 x86_saved_state64_t *iss64; 1751 1752 iss64 = USER_REGS64(thr_act); 1753 1754 return(iss64->isf.rip); 1755 } else { 1756 x86_saved_state32_t *iss32; 1757 1758 iss32 = USER_REGS32(thr_act); 1759 1760 return(iss32->eip); 1761 } 1762} 1763 1764/* 1765 * detach and return a kernel stack from a thread 1766 */ 1767 1768vm_offset_t 1769machine_stack_detach(thread_t thread) 1770{ 1771 vm_offset_t stack; 1772 1773 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH), 1774 (uintptr_t)thread_tid(thread), thread->priority, 1775 thread->sched_pri, 0, 1776 0); 1777 1778 stack = thread->kernel_stack; 1779 thread->kernel_stack = 0; 1780 1781 return (stack); 1782} 1783 1784/* 1785 * attach a kernel stack to a thread and initialize it 1786 */ 1787 1788void 1789machine_stack_attach( 1790 thread_t thread, 1791 vm_offset_t stack) 1792{ 1793 struct x86_kernel_state *statep; 1794 1795 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH), 1796 (uintptr_t)thread_tid(thread), thread->priority, 1797 thread->sched_pri, 0, 0); 1798 1799 assert(stack); 1800 thread->kernel_stack = stack; 1801 1802 statep = STACK_IKS(stack); 1803#if defined(__x86_64__) 1804 statep->k_rip = (unsigned long) Thread_continue; 1805 statep->k_rbx = (unsigned long) thread_continue; 1806 statep->k_rsp = (unsigned long) (STACK_IKS(stack) - 1); 1807#else 1808 statep->k_eip = (unsigned long) Thread_continue; 1809 statep->k_ebx = (unsigned long) thread_continue; 1810 statep->k_esp = (unsigned long) (STACK_IKS(stack) - 1); 1811#endif 1812 1813 return; 1814} 1815 1816/* 1817 * move a stack from old to new thread 1818 */ 1819 1820void 1821machine_stack_handoff(thread_t old, 1822 thread_t new) 1823{ 1824 vm_offset_t stack; 1825 1826 assert(new); 1827 assert(old); 1828 1829#if CONFIG_COUNTERS 1830 machine_pmc_cswitch(old, new); 1831#endif 1832#if KPC 1833 ml_kpc_cswitch(old, new); 1834#endif 1835#if KPERF 1836 ml_kperf_cswitch(old, new); 1837#endif 1838 1839 stack = old->kernel_stack; 1840 if (stack == old->reserved_stack) { 1841 assert(new->reserved_stack); 1842 old->reserved_stack = new->reserved_stack; 1843 new->reserved_stack = stack; 1844 } 1845 old->kernel_stack = 0; 1846 /* 1847 * A full call to machine_stack_attach() is unnecessry 1848 * because old stack is already initialized. 1849 */ 1850 new->kernel_stack = stack; 1851 1852 fpu_save_context(old); 1853 1854 old->machine.specFlags &= ~OnProc; 1855 new->machine.specFlags |= OnProc; 1856 1857 PMAP_SWITCH_CONTEXT(old, new, cpu_number()); 1858 act_machine_switch_pcb(old, new); 1859 1860 machine_set_current_thread(new); 1861 1862 return; 1863} 1864 1865 1866 1867 1868struct x86_act_context32 { 1869 x86_saved_state32_t ss; 1870 x86_float_state32_t fs; 1871 x86_debug_state32_t ds; 1872}; 1873 1874struct x86_act_context64 { 1875 x86_saved_state64_t ss; 1876 x86_float_state64_t fs; 1877 x86_debug_state64_t ds; 1878}; 1879 1880 1881 1882void * 1883act_thread_csave(void) 1884{ 1885 kern_return_t kret; 1886 mach_msg_type_number_t val; 1887 thread_t thr_act = current_thread(); 1888 1889 if (thread_is_64bit(thr_act)) { 1890 struct x86_act_context64 *ic64; 1891 1892 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64)); 1893 1894 if (ic64 == (struct x86_act_context64 *)NULL) 1895 return((void *)0); 1896 1897 val = x86_SAVED_STATE64_COUNT; 1898 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64, 1899 (thread_state_t) &ic64->ss, &val); 1900 if (kret != KERN_SUCCESS) { 1901 kfree(ic64, sizeof(struct x86_act_context64)); 1902 return((void *)0); 1903 } 1904 val = x86_FLOAT_STATE64_COUNT; 1905 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64, 1906 (thread_state_t) &ic64->fs, &val); 1907 if (kret != KERN_SUCCESS) { 1908 kfree(ic64, sizeof(struct x86_act_context64)); 1909 return((void *)0); 1910 } 1911 1912 val = x86_DEBUG_STATE64_COUNT; 1913 kret = machine_thread_get_state(thr_act, 1914 x86_DEBUG_STATE64, 1915 (thread_state_t)&ic64->ds, 1916 &val); 1917 if (kret != KERN_SUCCESS) { 1918 kfree(ic64, sizeof(struct x86_act_context64)); 1919 return((void *)0); 1920 } 1921 return(ic64); 1922 1923 } else { 1924 struct x86_act_context32 *ic32; 1925 1926 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32)); 1927 1928 if (ic32 == (struct x86_act_context32 *)NULL) 1929 return((void *)0); 1930 1931 val = x86_SAVED_STATE32_COUNT; 1932 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32, 1933 (thread_state_t) &ic32->ss, &val); 1934 if (kret != KERN_SUCCESS) { 1935 kfree(ic32, sizeof(struct x86_act_context32)); 1936 return((void *)0); 1937 } 1938 val = x86_FLOAT_STATE32_COUNT; 1939 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32, 1940 (thread_state_t) &ic32->fs, &val); 1941 if (kret != KERN_SUCCESS) { 1942 kfree(ic32, sizeof(struct x86_act_context32)); 1943 return((void *)0); 1944 } 1945 1946 val = x86_DEBUG_STATE32_COUNT; 1947 kret = machine_thread_get_state(thr_act, 1948 x86_DEBUG_STATE32, 1949 (thread_state_t)&ic32->ds, 1950 &val); 1951 if (kret != KERN_SUCCESS) { 1952 kfree(ic32, sizeof(struct x86_act_context32)); 1953 return((void *)0); 1954 } 1955 return(ic32); 1956 } 1957} 1958 1959 1960void 1961act_thread_catt(void *ctx) 1962{ 1963 thread_t thr_act = current_thread(); 1964 kern_return_t kret; 1965 1966 if (ctx == (void *)NULL) 1967 return; 1968 1969 if (thread_is_64bit(thr_act)) { 1970 struct x86_act_context64 *ic64; 1971 1972 ic64 = (struct x86_act_context64 *)ctx; 1973 1974 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64, 1975 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT); 1976 if (kret == KERN_SUCCESS) { 1977 machine_thread_set_state(thr_act, x86_FLOAT_STATE64, 1978 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT); 1979 } 1980 kfree(ic64, sizeof(struct x86_act_context64)); 1981 } else { 1982 struct x86_act_context32 *ic32; 1983 1984 ic32 = (struct x86_act_context32 *)ctx; 1985 1986 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32, 1987 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT); 1988 if (kret == KERN_SUCCESS) { 1989 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32, 1990 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT); 1991 } 1992 kfree(ic32, sizeof(struct x86_act_context32)); 1993 } 1994} 1995 1996 1997void act_thread_cfree(__unused void *ctx) 1998{ 1999 /* XXX - Unused */ 2000} 2001 2002/* 2003 * Duplicate one x86_debug_state32_t to another. "all" parameter 2004 * chooses whether dr4 and dr5 are copied (they are never meant 2005 * to be installed when we do machine_task_set_state() or 2006 * machine_thread_set_state()). 2007 */ 2008void 2009copy_debug_state32( 2010 x86_debug_state32_t *src, 2011 x86_debug_state32_t *target, 2012 boolean_t all) 2013{ 2014 if (all) { 2015 target->dr4 = src->dr4; 2016 target->dr5 = src->dr5; 2017 } 2018 2019 target->dr0 = src->dr0; 2020 target->dr1 = src->dr1; 2021 target->dr2 = src->dr2; 2022 target->dr3 = src->dr3; 2023 target->dr6 = src->dr6; 2024 target->dr7 = src->dr7; 2025} 2026 2027/* 2028 * Duplicate one x86_debug_state64_t to another. "all" parameter 2029 * chooses whether dr4 and dr5 are copied (they are never meant 2030 * to be installed when we do machine_task_set_state() or 2031 * machine_thread_set_state()). 2032 */ 2033void 2034copy_debug_state64( 2035 x86_debug_state64_t *src, 2036 x86_debug_state64_t *target, 2037 boolean_t all) 2038{ 2039 if (all) { 2040 target->dr4 = src->dr4; 2041 target->dr5 = src->dr5; 2042 } 2043 2044 target->dr0 = src->dr0; 2045 target->dr1 = src->dr1; 2046 target->dr2 = src->dr2; 2047 target->dr3 = src->dr3; 2048 target->dr6 = src->dr6; 2049 target->dr7 = src->dr7; 2050} 2051