1/* 2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 57#include <mach_rt.h> 58#include <mach_debug.h> 59#include <mach_ldebug.h> 60 61#include <sys/kdebug.h> 62 63#include <mach/kern_return.h> 64#include <mach/thread_status.h> 65#include <mach/vm_param.h> 66 67#include <kern/counters.h> 68#include <kern/kalloc.h> 69#include <kern/mach_param.h> 70#include <kern/processor.h> 71#include <kern/cpu_data.h> 72#include <kern/cpu_number.h> 73#include <kern/task.h> 74#include <kern/thread.h> 75#include <kern/sched_prim.h> 76#include <kern/misc_protos.h> 77#include <kern/assert.h> 78#include <kern/spl.h> 79#include <kern/machine.h> 80#include <ipc/ipc_port.h> 81#include <vm/vm_kern.h> 82#include <vm/vm_map.h> 83#include <vm/pmap.h> 84#include <vm/vm_protos.h> 85 86#include <i386/cpu_data.h> 87#include <i386/cpu_number.h> 88#include <i386/eflags.h> 89#include <i386/proc_reg.h> 90#include <i386/fpu.h> 91#include <i386/misc_protos.h> 92#include <i386/mp_desc.h> 93#include <i386/thread.h> 94#include <i386/machine_routines.h> 95#include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */ 96 97#if CONFIG_COUNTERS 98#include <pmc/pmc.h> 99#endif /* CONFIG_COUNTERS */ 100 101#if KPC 102#include <kern/kpc.h> 103#endif 104 105#if KPERF 106#include <kperf/kperf.h> 107#endif 108 109#if HYPERVISOR 110#include <kern/hv_support.h> 111#endif 112 113/* 114 * Maps state flavor to number of words in the state: 115 */ 116unsigned int _MachineStateCount[] = { 117 /* FLAVOR_LIST */ 118 0, 119 x86_THREAD_STATE32_COUNT, 120 x86_FLOAT_STATE32_COUNT, 121 x86_EXCEPTION_STATE32_COUNT, 122 x86_THREAD_STATE64_COUNT, 123 x86_FLOAT_STATE64_COUNT, 124 x86_EXCEPTION_STATE64_COUNT, 125 x86_THREAD_STATE_COUNT, 126 x86_FLOAT_STATE_COUNT, 127 x86_EXCEPTION_STATE_COUNT, 128 0, 129 x86_SAVED_STATE32_COUNT, 130 x86_SAVED_STATE64_COUNT, 131 x86_DEBUG_STATE32_COUNT, 132 x86_DEBUG_STATE64_COUNT, 133 x86_DEBUG_STATE_COUNT 134}; 135 136zone_t iss_zone; /* zone for saved_state area */ 137zone_t ids_zone; /* zone for debug_state area */ 138 139/* Forward */ 140 141extern void Thread_continue(void); 142extern void Load_context( 143 thread_t thread); 144 145static void 146get_exception_state32(thread_t thread, x86_exception_state32_t *es); 147 148static void 149get_exception_state64(thread_t thread, x86_exception_state64_t *es); 150 151static void 152get_thread_state32(thread_t thread, x86_thread_state32_t *ts); 153 154static void 155get_thread_state64(thread_t thread, x86_thread_state64_t *ts); 156 157static int 158set_thread_state32(thread_t thread, x86_thread_state32_t *ts); 159 160static int 161set_thread_state64(thread_t thread, x86_thread_state64_t *ts); 162 163#if CONFIG_COUNTERS 164static inline void 165machine_pmc_cswitch(thread_t /* old */, thread_t /* new */); 166 167static inline void 168pmc_swi(thread_t /* old */, thread_t /*new */); 169 170static inline void 171pmc_swi(thread_t old, thread_t new) { 172 current_cpu_datap()->csw_old_thread = old; 173 current_cpu_datap()->csw_new_thread = new; 174 pal_pmc_swi(); 175} 176 177static inline void 178machine_pmc_cswitch(thread_t old, thread_t new) { 179 if (pmc_thread_eligible(old) || pmc_thread_eligible(new)) { 180 pmc_swi(old, new); 181 } 182} 183 184void ml_get_csw_threads(thread_t *old, thread_t *new) { 185 *old = current_cpu_datap()->csw_old_thread; 186 *new = current_cpu_datap()->csw_new_thread; 187} 188 189#endif /* CONFIG_COUNTERS */ 190 191#if KPC 192static inline void 193ml_kpc_cswitch(thread_t old, thread_t new) 194{ 195 if(!kpc_threads_counting) 196 return; 197 198 /* call the kpc function */ 199 kpc_switch_context( old, new ); 200} 201#endif 202 203#if KPERF 204static inline void 205ml_kperf_cswitch(thread_t old, thread_t new) 206{ 207 if(!kperf_cswitch_hook) 208 return; 209 210 /* call the kpc function */ 211 kperf_switch_context( old, new ); 212} 213#endif 214 215#if HYPERVISOR 216static inline void 217ml_hv_cswitch(thread_t old, thread_t new) 218{ 219 if (old->hv_thread_target) 220 hv_callbacks.preempt(old->hv_thread_target); 221 222 if (new->hv_thread_target) 223 hv_callbacks.dispatch(new->hv_thread_target); 224} 225#endif 226 227/* 228 * Don't let an illegal value for dr7 get set. Specifically, 229 * check for undefined settings. Setting these bit patterns 230 * result in undefined behaviour and can lead to an unexpected 231 * TRCTRAP. 232 */ 233static boolean_t 234dr7_is_valid(uint32_t *dr7) 235{ 236 int i; 237 uint32_t mask1, mask2; 238 239 /* 240 * If the DE bit is set in CR4, R/W0-3 can be pattern 241 * "10B" to indicate i/o reads and write 242 */ 243 if (!(get_cr4() & CR4_DE)) 244 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4; 245 i++, mask1 <<= 4, mask2 <<= 4) 246 if ((*dr7 & mask1) == mask2) 247 return (FALSE); 248 249 /* 250 * if we are doing an instruction execution break (indicated 251 * by r/w[x] being "00B"), then the len[x] must also be set 252 * to "00B" 253 */ 254 for (i = 0; i < 4; i++) 255 if (((((*dr7 >> (16 + i*4))) & 0x3) == 0) && 256 ((((*dr7 >> (18 + i*4))) & 0x3) != 0)) 257 return (FALSE); 258 259 /* 260 * Intel docs have these bits fixed. 261 */ 262 *dr7 |= 0x1 << 10; /* set bit 10 to 1 */ 263 *dr7 &= ~(0x1 << 11); /* set bit 11 to 0 */ 264 *dr7 &= ~(0x1 << 12); /* set bit 12 to 0 */ 265 *dr7 &= ~(0x1 << 14); /* set bit 14 to 0 */ 266 *dr7 &= ~(0x1 << 15); /* set bit 15 to 0 */ 267 268 /* 269 * We don't allow anything to set the global breakpoints. 270 */ 271 272 if (*dr7 & 0x2) 273 return (FALSE); 274 275 if (*dr7 & (0x2<<2)) 276 return (FALSE); 277 278 if (*dr7 & (0x2<<4)) 279 return (FALSE); 280 281 if (*dr7 & (0x2<<6)) 282 return (FALSE); 283 284 return (TRUE); 285} 286 287extern void set_64bit_debug_regs(x86_debug_state64_t *ds); 288 289boolean_t 290debug_state_is_valid32(x86_debug_state32_t *ds) 291{ 292 if (!dr7_is_valid(&ds->dr7)) 293 return FALSE; 294 295 296 return TRUE; 297} 298 299boolean_t 300debug_state_is_valid64(x86_debug_state64_t *ds) 301{ 302 if (!dr7_is_valid((uint32_t *)&ds->dr7)) 303 return FALSE; 304 305 /* 306 * Don't allow the user to set debug addresses above their max 307 * value 308 */ 309 if (ds->dr7 & 0x1) 310 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS) 311 return FALSE; 312 313 if (ds->dr7 & (0x1<<2)) 314 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS) 315 return FALSE; 316 317 if (ds->dr7 & (0x1<<4)) 318 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS) 319 return FALSE; 320 321 if (ds->dr7 & (0x1<<6)) 322 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS) 323 return FALSE; 324 325 return TRUE; 326} 327 328 329static kern_return_t 330set_debug_state32(thread_t thread, x86_debug_state32_t *ds) 331{ 332 x86_debug_state32_t *ids; 333 pcb_t pcb; 334 335 pcb = THREAD_TO_PCB(thread); 336 ids = pcb->ids; 337 338 if (debug_state_is_valid32(ds) != TRUE) { 339 return KERN_INVALID_ARGUMENT; 340 } 341 342 if (ids == NULL) { 343 ids = zalloc(ids_zone); 344 bzero(ids, sizeof *ids); 345 346 simple_lock(&pcb->lock); 347 /* make sure it wasn't already alloc()'d elsewhere */ 348 if (pcb->ids == NULL) { 349 pcb->ids = ids; 350 simple_unlock(&pcb->lock); 351 } else { 352 simple_unlock(&pcb->lock); 353 zfree(ids_zone, ids); 354 } 355 } 356 357 358 copy_debug_state32(ds, ids, FALSE); 359 360 return (KERN_SUCCESS); 361} 362 363static kern_return_t 364set_debug_state64(thread_t thread, x86_debug_state64_t *ds) 365{ 366 x86_debug_state64_t *ids; 367 pcb_t pcb; 368 369 pcb = THREAD_TO_PCB(thread); 370 ids = pcb->ids; 371 372 if (debug_state_is_valid64(ds) != TRUE) { 373 return KERN_INVALID_ARGUMENT; 374 } 375 376 if (ids == NULL) { 377 ids = zalloc(ids_zone); 378 bzero(ids, sizeof *ids); 379 380#if HYPERVISOR 381 if (thread->hv_thread_target) { 382 hv_callbacks.volatile_state(thread->hv_thread_target, 383 HV_DEBUG_STATE); 384 } 385#endif 386 387 simple_lock(&pcb->lock); 388 /* make sure it wasn't already alloc()'d elsewhere */ 389 if (pcb->ids == NULL) { 390 pcb->ids = ids; 391 simple_unlock(&pcb->lock); 392 } else { 393 simple_unlock(&pcb->lock); 394 zfree(ids_zone, ids); 395 } 396 } 397 398 copy_debug_state64(ds, ids, FALSE); 399 400 return (KERN_SUCCESS); 401} 402 403static void 404get_debug_state32(thread_t thread, x86_debug_state32_t *ds) 405{ 406 x86_debug_state32_t *saved_state; 407 408 saved_state = thread->machine.ids; 409 410 if (saved_state) { 411 copy_debug_state32(saved_state, ds, TRUE); 412 } else 413 bzero(ds, sizeof *ds); 414} 415 416static void 417get_debug_state64(thread_t thread, x86_debug_state64_t *ds) 418{ 419 x86_debug_state64_t *saved_state; 420 421 saved_state = (x86_debug_state64_t *)thread->machine.ids; 422 423 if (saved_state) { 424 copy_debug_state64(saved_state, ds, TRUE); 425 } else 426 bzero(ds, sizeof *ds); 427} 428 429/* 430 * consider_machine_collect: 431 * 432 * Try to collect machine-dependent pages 433 */ 434void 435consider_machine_collect(void) 436{ 437} 438 439void 440consider_machine_adjust(void) 441{ 442} 443 444/* 445 * Switch to the first thread on a CPU. 446 */ 447void 448machine_load_context( 449 thread_t new) 450{ 451#if CONFIG_COUNTERS 452 machine_pmc_cswitch(NULL, new); 453#endif 454 new->machine.specFlags |= OnProc; 455 act_machine_switch_pcb(NULL, new); 456 Load_context(new); 457} 458 459/* 460 * Switch to a new thread. 461 * Save the old thread`s kernel state or continuation, 462 * and return it. 463 */ 464thread_t 465machine_switch_context( 466 thread_t old, 467 thread_continue_t continuation, 468 thread_t new) 469{ 470#if MACH_RT 471 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack); 472#endif 473#if CONFIG_COUNTERS 474 machine_pmc_cswitch(old, new); 475#endif 476#if KPC 477 ml_kpc_cswitch(old, new); 478#endif 479#if KPERF 480 ml_kperf_cswitch(old, new); 481#endif 482 /* 483 * Save FP registers if in use. 484 */ 485 fpu_save_context(old); 486 487 old->machine.specFlags &= ~OnProc; 488 new->machine.specFlags |= OnProc; 489 490 /* 491 * Monitor the stack depth and report new max, 492 * not worrying about races. 493 */ 494 vm_offset_t depth = current_stack_depth(); 495 if (depth > kernel_stack_depth_max) { 496 kernel_stack_depth_max = depth; 497 KERNEL_DEBUG_CONSTANT( 498 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH), 499 (long) depth, 0, 0, 0, 0); 500 } 501 502 /* 503 * Switch address maps if need be, even if not switching tasks. 504 * (A server activation may be "borrowing" a client map.) 505 */ 506 PMAP_SWITCH_CONTEXT(old, new, cpu_number()); 507 508 /* 509 * Load the rest of the user state for the new thread 510 */ 511 act_machine_switch_pcb(old, new); 512 513#if HYPERVISOR 514 ml_hv_cswitch(old, new); 515#endif 516 517 return(Switch_context(old, continuation, new)); 518} 519 520thread_t 521machine_processor_shutdown( 522 thread_t thread, 523 void (*doshutdown)(processor_t), 524 processor_t processor) 525{ 526#if CONFIG_VMX 527 vmx_suspend(); 528#endif 529 fpu_save_context(thread); 530 PMAP_SWITCH_CONTEXT(thread, processor->idle_thread, cpu_number()); 531 return(Shutdown_context(thread, doshutdown, processor)); 532} 533 534 535/* 536 * This is where registers that are not normally specified by the mach-o 537 * file on an execve would be nullified, perhaps to avoid a covert channel. 538 */ 539kern_return_t 540machine_thread_state_initialize( 541 thread_t thread) 542{ 543 /* 544 * If there's an fpu save area, free it. 545 * The initialized state will then be lazily faulted-in, if required. 546 * And if we're target, re-arm the no-fpu trap. 547 */ 548 if (thread->machine.ifps) { 549 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64); 550 551 if (thread == current_thread()) 552 clear_fpu(); 553 } 554 555 if (thread->machine.ids) { 556 zfree(ids_zone, thread->machine.ids); 557 thread->machine.ids = NULL; 558 } 559 560 return KERN_SUCCESS; 561} 562 563uint32_t 564get_eflags_exportmask(void) 565{ 566 return EFL_USER_SET; 567} 568 569/* 570 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors 571 * for 32bit tasks only 572 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors 573 * for 64bit tasks only 574 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors 575 * for 32bit tasks only 576 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors 577 * for 64bit tasks only 578 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors 579 * for either 32bit or 64bit tasks 580 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors 581 * for 32bit tasks only 582 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors 583 * for 64bit tasks only 584 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors 585 * for either 32bit or 64bit tasks 586 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors 587 * for 32bit tasks only 588 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors 589 * for 64bit tasks only 590 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors 591 * for either 32bit or 64bit tasks 592 */ 593 594 595static void 596get_exception_state64(thread_t thread, x86_exception_state64_t *es) 597{ 598 x86_saved_state64_t *saved_state; 599 600 saved_state = USER_REGS64(thread); 601 602 es->trapno = saved_state->isf.trapno; 603 es->cpu = saved_state->isf.cpu; 604 es->err = (typeof(es->err))saved_state->isf.err; 605 es->faultvaddr = saved_state->cr2; 606} 607 608static void 609get_exception_state32(thread_t thread, x86_exception_state32_t *es) 610{ 611 x86_saved_state32_t *saved_state; 612 613 saved_state = USER_REGS32(thread); 614 615 es->trapno = saved_state->trapno; 616 es->cpu = saved_state->cpu; 617 es->err = saved_state->err; 618 es->faultvaddr = saved_state->cr2; 619} 620 621 622static int 623set_thread_state32(thread_t thread, x86_thread_state32_t *ts) 624{ 625 x86_saved_state32_t *saved_state; 626 627 pal_register_cache_state(thread, DIRTY); 628 629 saved_state = USER_REGS32(thread); 630 631 /* 632 * Scrub segment selector values: 633 */ 634 ts->cs = USER_CS; 635 /* 636 * On a 64 bit kernel, we always override the data segments, 637 * as the actual selector numbers have changed. This also 638 * means that we don't support setting the data segments 639 * manually any more. 640 */ 641 ts->ss = USER_DS; 642 ts->ds = USER_DS; 643 ts->es = USER_DS; 644 645 /* Set GS to CTHREAD only if's been established */ 646 ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG; 647 648 /* Check segment selectors are safe */ 649 if (!valid_user_segment_selectors(ts->cs, 650 ts->ss, 651 ts->ds, 652 ts->es, 653 ts->fs, 654 ts->gs)) 655 return(KERN_INVALID_ARGUMENT); 656 657 saved_state->eax = ts->eax; 658 saved_state->ebx = ts->ebx; 659 saved_state->ecx = ts->ecx; 660 saved_state->edx = ts->edx; 661 saved_state->edi = ts->edi; 662 saved_state->esi = ts->esi; 663 saved_state->ebp = ts->ebp; 664 saved_state->uesp = ts->esp; 665 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET; 666 saved_state->eip = ts->eip; 667 saved_state->cs = ts->cs; 668 saved_state->ss = ts->ss; 669 saved_state->ds = ts->ds; 670 saved_state->es = ts->es; 671 saved_state->fs = ts->fs; 672 saved_state->gs = ts->gs; 673 674 /* 675 * If the trace trap bit is being set, 676 * ensure that the user returns via iret 677 * - which is signaled thusly: 678 */ 679 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS) 680 saved_state->cs = SYSENTER_TF_CS; 681 682 return(KERN_SUCCESS); 683} 684 685static int 686set_thread_state64(thread_t thread, x86_thread_state64_t *ts) 687{ 688 x86_saved_state64_t *saved_state; 689 690 pal_register_cache_state(thread, DIRTY); 691 692 saved_state = USER_REGS64(thread); 693 694 if (!IS_USERADDR64_CANONICAL(ts->rsp) || 695 !IS_USERADDR64_CANONICAL(ts->rip)) 696 return(KERN_INVALID_ARGUMENT); 697 698 saved_state->r8 = ts->r8; 699 saved_state->r9 = ts->r9; 700 saved_state->r10 = ts->r10; 701 saved_state->r11 = ts->r11; 702 saved_state->r12 = ts->r12; 703 saved_state->r13 = ts->r13; 704 saved_state->r14 = ts->r14; 705 saved_state->r15 = ts->r15; 706 saved_state->rax = ts->rax; 707 saved_state->rbx = ts->rbx; 708 saved_state->rcx = ts->rcx; 709 saved_state->rdx = ts->rdx; 710 saved_state->rdi = ts->rdi; 711 saved_state->rsi = ts->rsi; 712 saved_state->rbp = ts->rbp; 713 saved_state->isf.rsp = ts->rsp; 714 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET; 715 saved_state->isf.rip = ts->rip; 716 saved_state->isf.cs = USER64_CS; 717 saved_state->fs = (uint32_t)ts->fs; 718 saved_state->gs = (uint32_t)ts->gs; 719 720 return(KERN_SUCCESS); 721} 722 723 724 725static void 726get_thread_state32(thread_t thread, x86_thread_state32_t *ts) 727{ 728 x86_saved_state32_t *saved_state; 729 730 pal_register_cache_state(thread, VALID); 731 732 saved_state = USER_REGS32(thread); 733 734 ts->eax = saved_state->eax; 735 ts->ebx = saved_state->ebx; 736 ts->ecx = saved_state->ecx; 737 ts->edx = saved_state->edx; 738 ts->edi = saved_state->edi; 739 ts->esi = saved_state->esi; 740 ts->ebp = saved_state->ebp; 741 ts->esp = saved_state->uesp; 742 ts->eflags = saved_state->efl; 743 ts->eip = saved_state->eip; 744 ts->cs = saved_state->cs; 745 ts->ss = saved_state->ss; 746 ts->ds = saved_state->ds; 747 ts->es = saved_state->es; 748 ts->fs = saved_state->fs; 749 ts->gs = saved_state->gs; 750} 751 752 753static void 754get_thread_state64(thread_t thread, x86_thread_state64_t *ts) 755{ 756 x86_saved_state64_t *saved_state; 757 758 pal_register_cache_state(thread, VALID); 759 760 saved_state = USER_REGS64(thread); 761 762 ts->r8 = saved_state->r8; 763 ts->r9 = saved_state->r9; 764 ts->r10 = saved_state->r10; 765 ts->r11 = saved_state->r11; 766 ts->r12 = saved_state->r12; 767 ts->r13 = saved_state->r13; 768 ts->r14 = saved_state->r14; 769 ts->r15 = saved_state->r15; 770 ts->rax = saved_state->rax; 771 ts->rbx = saved_state->rbx; 772 ts->rcx = saved_state->rcx; 773 ts->rdx = saved_state->rdx; 774 ts->rdi = saved_state->rdi; 775 ts->rsi = saved_state->rsi; 776 ts->rbp = saved_state->rbp; 777 ts->rsp = saved_state->isf.rsp; 778 ts->rflags = saved_state->isf.rflags; 779 ts->rip = saved_state->isf.rip; 780 ts->cs = saved_state->isf.cs; 781 ts->fs = saved_state->fs; 782 ts->gs = saved_state->gs; 783} 784 785 786/* 787 * act_machine_set_state: 788 * 789 * Set the status of the specified thread. 790 */ 791 792kern_return_t 793machine_thread_set_state( 794 thread_t thr_act, 795 thread_flavor_t flavor, 796 thread_state_t tstate, 797 mach_msg_type_number_t count) 798{ 799 switch (flavor) { 800 case x86_SAVED_STATE32: 801 { 802 x86_saved_state32_t *state; 803 x86_saved_state32_t *saved_state; 804 805 if (count < x86_SAVED_STATE32_COUNT) 806 return(KERN_INVALID_ARGUMENT); 807 808 if (thread_is_64bit(thr_act)) 809 return(KERN_INVALID_ARGUMENT); 810 811 state = (x86_saved_state32_t *) tstate; 812 813 /* Check segment selectors are safe */ 814 if (!valid_user_segment_selectors(state->cs, 815 state->ss, 816 state->ds, 817 state->es, 818 state->fs, 819 state->gs)) 820 return KERN_INVALID_ARGUMENT; 821 822 pal_register_cache_state(thr_act, DIRTY); 823 824 saved_state = USER_REGS32(thr_act); 825 826 /* 827 * General registers 828 */ 829 saved_state->edi = state->edi; 830 saved_state->esi = state->esi; 831 saved_state->ebp = state->ebp; 832 saved_state->uesp = state->uesp; 833 saved_state->ebx = state->ebx; 834 saved_state->edx = state->edx; 835 saved_state->ecx = state->ecx; 836 saved_state->eax = state->eax; 837 saved_state->eip = state->eip; 838 839 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET; 840 841 /* 842 * If the trace trap bit is being set, 843 * ensure that the user returns via iret 844 * - which is signaled thusly: 845 */ 846 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) 847 state->cs = SYSENTER_TF_CS; 848 849 /* 850 * User setting segment registers. 851 * Code and stack selectors have already been 852 * checked. Others will be reset by 'iret' 853 * if they are not valid. 854 */ 855 saved_state->cs = state->cs; 856 saved_state->ss = state->ss; 857 saved_state->ds = state->ds; 858 saved_state->es = state->es; 859 saved_state->fs = state->fs; 860 saved_state->gs = state->gs; 861 862 break; 863 } 864 865 case x86_SAVED_STATE64: 866 { 867 x86_saved_state64_t *state; 868 x86_saved_state64_t *saved_state; 869 870 if (count < x86_SAVED_STATE64_COUNT) 871 return(KERN_INVALID_ARGUMENT); 872 873 if (!thread_is_64bit(thr_act)) 874 return(KERN_INVALID_ARGUMENT); 875 876 state = (x86_saved_state64_t *) tstate; 877 878 /* Verify that the supplied code segment selector is 879 * valid. In 64-bit mode, the FS and GS segment overrides 880 * use the FS.base and GS.base MSRs to calculate 881 * base addresses, and the trampolines don't directly 882 * restore the segment registers--hence they are no 883 * longer relevant for validation. 884 */ 885 if (!valid_user_code_selector(state->isf.cs)) 886 return KERN_INVALID_ARGUMENT; 887 888 /* Check pc and stack are canonical addresses */ 889 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) || 890 !IS_USERADDR64_CANONICAL(state->isf.rip)) 891 return KERN_INVALID_ARGUMENT; 892 893 pal_register_cache_state(thr_act, DIRTY); 894 895 saved_state = USER_REGS64(thr_act); 896 897 /* 898 * General registers 899 */ 900 saved_state->r8 = state->r8; 901 saved_state->r9 = state->r9; 902 saved_state->r10 = state->r10; 903 saved_state->r11 = state->r11; 904 saved_state->r12 = state->r12; 905 saved_state->r13 = state->r13; 906 saved_state->r14 = state->r14; 907 saved_state->r15 = state->r15; 908 saved_state->rdi = state->rdi; 909 saved_state->rsi = state->rsi; 910 saved_state->rbp = state->rbp; 911 saved_state->rbx = state->rbx; 912 saved_state->rdx = state->rdx; 913 saved_state->rcx = state->rcx; 914 saved_state->rax = state->rax; 915 saved_state->isf.rsp = state->isf.rsp; 916 saved_state->isf.rip = state->isf.rip; 917 918 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET; 919 920 /* 921 * User setting segment registers. 922 * Code and stack selectors have already been 923 * checked. Others will be reset by 'sys' 924 * if they are not valid. 925 */ 926 saved_state->isf.cs = state->isf.cs; 927 saved_state->isf.ss = state->isf.ss; 928 saved_state->fs = state->fs; 929 saved_state->gs = state->gs; 930 931 break; 932 } 933 934 case x86_FLOAT_STATE32: 935 { 936 if (count != x86_FLOAT_STATE32_COUNT) 937 return(KERN_INVALID_ARGUMENT); 938 939 if (thread_is_64bit(thr_act)) 940 return(KERN_INVALID_ARGUMENT); 941 942 return fpu_set_fxstate(thr_act, tstate, flavor); 943 } 944 945 case x86_FLOAT_STATE64: 946 { 947 if (count != x86_FLOAT_STATE64_COUNT) 948 return(KERN_INVALID_ARGUMENT); 949 950 if ( !thread_is_64bit(thr_act)) 951 return(KERN_INVALID_ARGUMENT); 952 953 return fpu_set_fxstate(thr_act, tstate, flavor); 954 } 955 956 case x86_FLOAT_STATE: 957 { 958 x86_float_state_t *state; 959 960 if (count != x86_FLOAT_STATE_COUNT) 961 return(KERN_INVALID_ARGUMENT); 962 963 state = (x86_float_state_t *)tstate; 964 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT && 965 thread_is_64bit(thr_act)) { 966 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64); 967 } 968 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT && 969 !thread_is_64bit(thr_act)) { 970 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32); 971 } 972 return(KERN_INVALID_ARGUMENT); 973 } 974 975 case x86_AVX_STATE32: 976 { 977 if (count != x86_AVX_STATE32_COUNT) 978 return(KERN_INVALID_ARGUMENT); 979 980 if (thread_is_64bit(thr_act)) 981 return(KERN_INVALID_ARGUMENT); 982 983 return fpu_set_fxstate(thr_act, tstate, flavor); 984 } 985 986 case x86_AVX_STATE64: 987 { 988 if (count != x86_AVX_STATE64_COUNT) 989 return(KERN_INVALID_ARGUMENT); 990 991 if (!thread_is_64bit(thr_act)) 992 return(KERN_INVALID_ARGUMENT); 993 994 return fpu_set_fxstate(thr_act, tstate, flavor); 995 } 996 997 case x86_AVX_STATE: 998 { 999 x86_avx_state_t *state; 1000 1001 if (count != x86_AVX_STATE_COUNT) 1002 return(KERN_INVALID_ARGUMENT); 1003 1004 state = (x86_avx_state_t *)tstate; 1005 if (state->ash.flavor == x86_AVX_STATE64 && 1006 state->ash.count == x86_FLOAT_STATE64_COUNT && 1007 thread_is_64bit(thr_act)) { 1008 return fpu_set_fxstate(thr_act, 1009 (thread_state_t)&state->ufs.as64, 1010 x86_FLOAT_STATE64); 1011 } 1012 if (state->ash.flavor == x86_FLOAT_STATE32 && 1013 state->ash.count == x86_FLOAT_STATE32_COUNT && 1014 !thread_is_64bit(thr_act)) { 1015 return fpu_set_fxstate(thr_act, 1016 (thread_state_t)&state->ufs.as32, 1017 x86_FLOAT_STATE32); 1018 } 1019 return(KERN_INVALID_ARGUMENT); 1020 } 1021 1022 case x86_THREAD_STATE32: 1023 { 1024 if (count != x86_THREAD_STATE32_COUNT) 1025 return(KERN_INVALID_ARGUMENT); 1026 1027 if (thread_is_64bit(thr_act)) 1028 return(KERN_INVALID_ARGUMENT); 1029 1030 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate); 1031 } 1032 1033 case x86_THREAD_STATE64: 1034 { 1035 if (count != x86_THREAD_STATE64_COUNT) 1036 return(KERN_INVALID_ARGUMENT); 1037 1038 if (!thread_is_64bit(thr_act)) 1039 return(KERN_INVALID_ARGUMENT); 1040 1041 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate); 1042 1043 } 1044 case x86_THREAD_STATE: 1045 { 1046 x86_thread_state_t *state; 1047 1048 if (count != x86_THREAD_STATE_COUNT) 1049 return(KERN_INVALID_ARGUMENT); 1050 1051 state = (x86_thread_state_t *)tstate; 1052 1053 if (state->tsh.flavor == x86_THREAD_STATE64 && 1054 state->tsh.count == x86_THREAD_STATE64_COUNT && 1055 thread_is_64bit(thr_act)) { 1056 return set_thread_state64(thr_act, &state->uts.ts64); 1057 } else if (state->tsh.flavor == x86_THREAD_STATE32 && 1058 state->tsh.count == x86_THREAD_STATE32_COUNT && 1059 !thread_is_64bit(thr_act)) { 1060 return set_thread_state32(thr_act, &state->uts.ts32); 1061 } else 1062 return(KERN_INVALID_ARGUMENT); 1063 1064 break; 1065 } 1066 case x86_DEBUG_STATE32: 1067 { 1068 x86_debug_state32_t *state; 1069 kern_return_t ret; 1070 1071 if (thread_is_64bit(thr_act)) 1072 return(KERN_INVALID_ARGUMENT); 1073 1074 state = (x86_debug_state32_t *)tstate; 1075 1076 ret = set_debug_state32(thr_act, state); 1077 1078 return ret; 1079 } 1080 case x86_DEBUG_STATE64: 1081 { 1082 x86_debug_state64_t *state; 1083 kern_return_t ret; 1084 1085 if (!thread_is_64bit(thr_act)) 1086 return(KERN_INVALID_ARGUMENT); 1087 1088 state = (x86_debug_state64_t *)tstate; 1089 1090 ret = set_debug_state64(thr_act, state); 1091 1092 return ret; 1093 } 1094 case x86_DEBUG_STATE: 1095 { 1096 x86_debug_state_t *state; 1097 kern_return_t ret = KERN_INVALID_ARGUMENT; 1098 1099 if (count != x86_DEBUG_STATE_COUNT) 1100 return (KERN_INVALID_ARGUMENT); 1101 1102 state = (x86_debug_state_t *)tstate; 1103 if (state->dsh.flavor == x86_DEBUG_STATE64 && 1104 state->dsh.count == x86_DEBUG_STATE64_COUNT && 1105 thread_is_64bit(thr_act)) { 1106 ret = set_debug_state64(thr_act, &state->uds.ds64); 1107 } 1108 else 1109 if (state->dsh.flavor == x86_DEBUG_STATE32 && 1110 state->dsh.count == x86_DEBUG_STATE32_COUNT && 1111 !thread_is_64bit(thr_act)) { 1112 ret = set_debug_state32(thr_act, &state->uds.ds32); 1113 } 1114 return ret; 1115 } 1116 default: 1117 return(KERN_INVALID_ARGUMENT); 1118 } 1119 1120 return(KERN_SUCCESS); 1121} 1122 1123 1124 1125/* 1126 * thread_getstatus: 1127 * 1128 * Get the status of the specified thread. 1129 */ 1130 1131kern_return_t 1132machine_thread_get_state( 1133 thread_t thr_act, 1134 thread_flavor_t flavor, 1135 thread_state_t tstate, 1136 mach_msg_type_number_t *count) 1137{ 1138 1139 switch (flavor) { 1140 1141 case THREAD_STATE_FLAVOR_LIST: 1142 { 1143 if (*count < 3) 1144 return (KERN_INVALID_ARGUMENT); 1145 1146 tstate[0] = i386_THREAD_STATE; 1147 tstate[1] = i386_FLOAT_STATE; 1148 tstate[2] = i386_EXCEPTION_STATE; 1149 1150 *count = 3; 1151 break; 1152 } 1153 1154 case THREAD_STATE_FLAVOR_LIST_NEW: 1155 { 1156 if (*count < 4) 1157 return (KERN_INVALID_ARGUMENT); 1158 1159 tstate[0] = x86_THREAD_STATE; 1160 tstate[1] = x86_FLOAT_STATE; 1161 tstate[2] = x86_EXCEPTION_STATE; 1162 tstate[3] = x86_DEBUG_STATE; 1163 1164 *count = 4; 1165 break; 1166 } 1167 1168 case THREAD_STATE_FLAVOR_LIST_10_9: 1169 { 1170 if (*count < 5) 1171 return (KERN_INVALID_ARGUMENT); 1172 1173 tstate[0] = x86_THREAD_STATE; 1174 tstate[1] = x86_FLOAT_STATE; 1175 tstate[2] = x86_EXCEPTION_STATE; 1176 tstate[3] = x86_DEBUG_STATE; 1177 tstate[4] = x86_AVX_STATE; 1178 1179 *count = 5; 1180 break; 1181 } 1182 1183 case x86_SAVED_STATE32: 1184 { 1185 x86_saved_state32_t *state; 1186 x86_saved_state32_t *saved_state; 1187 1188 if (*count < x86_SAVED_STATE32_COUNT) 1189 return(KERN_INVALID_ARGUMENT); 1190 1191 if (thread_is_64bit(thr_act)) 1192 return(KERN_INVALID_ARGUMENT); 1193 1194 state = (x86_saved_state32_t *) tstate; 1195 saved_state = USER_REGS32(thr_act); 1196 1197 /* 1198 * First, copy everything: 1199 */ 1200 *state = *saved_state; 1201 state->ds = saved_state->ds & 0xffff; 1202 state->es = saved_state->es & 0xffff; 1203 state->fs = saved_state->fs & 0xffff; 1204 state->gs = saved_state->gs & 0xffff; 1205 1206 *count = x86_SAVED_STATE32_COUNT; 1207 break; 1208 } 1209 1210 case x86_SAVED_STATE64: 1211 { 1212 x86_saved_state64_t *state; 1213 x86_saved_state64_t *saved_state; 1214 1215 if (*count < x86_SAVED_STATE64_COUNT) 1216 return(KERN_INVALID_ARGUMENT); 1217 1218 if (!thread_is_64bit(thr_act)) 1219 return(KERN_INVALID_ARGUMENT); 1220 1221 state = (x86_saved_state64_t *)tstate; 1222 saved_state = USER_REGS64(thr_act); 1223 1224 /* 1225 * First, copy everything: 1226 */ 1227 *state = *saved_state; 1228 state->fs = saved_state->fs & 0xffff; 1229 state->gs = saved_state->gs & 0xffff; 1230 1231 *count = x86_SAVED_STATE64_COUNT; 1232 break; 1233 } 1234 1235 case x86_FLOAT_STATE32: 1236 { 1237 if (*count < x86_FLOAT_STATE32_COUNT) 1238 return(KERN_INVALID_ARGUMENT); 1239 1240 if (thread_is_64bit(thr_act)) 1241 return(KERN_INVALID_ARGUMENT); 1242 1243 *count = x86_FLOAT_STATE32_COUNT; 1244 1245 return fpu_get_fxstate(thr_act, tstate, flavor); 1246 } 1247 1248 case x86_FLOAT_STATE64: 1249 { 1250 if (*count < x86_FLOAT_STATE64_COUNT) 1251 return(KERN_INVALID_ARGUMENT); 1252 1253 if ( !thread_is_64bit(thr_act)) 1254 return(KERN_INVALID_ARGUMENT); 1255 1256 *count = x86_FLOAT_STATE64_COUNT; 1257 1258 return fpu_get_fxstate(thr_act, tstate, flavor); 1259 } 1260 1261 case x86_FLOAT_STATE: 1262 { 1263 x86_float_state_t *state; 1264 kern_return_t kret; 1265 1266 if (*count < x86_FLOAT_STATE_COUNT) 1267 return(KERN_INVALID_ARGUMENT); 1268 1269 state = (x86_float_state_t *)tstate; 1270 1271 /* 1272 * no need to bzero... currently 1273 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT 1274 */ 1275 if (thread_is_64bit(thr_act)) { 1276 state->fsh.flavor = x86_FLOAT_STATE64; 1277 state->fsh.count = x86_FLOAT_STATE64_COUNT; 1278 1279 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64); 1280 } else { 1281 state->fsh.flavor = x86_FLOAT_STATE32; 1282 state->fsh.count = x86_FLOAT_STATE32_COUNT; 1283 1284 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32); 1285 } 1286 *count = x86_FLOAT_STATE_COUNT; 1287 1288 return(kret); 1289 } 1290 1291 case x86_AVX_STATE32: 1292 { 1293 if (*count != x86_AVX_STATE32_COUNT) 1294 return(KERN_INVALID_ARGUMENT); 1295 1296 if (thread_is_64bit(thr_act)) 1297 return(KERN_INVALID_ARGUMENT); 1298 1299 *count = x86_AVX_STATE32_COUNT; 1300 1301 return fpu_get_fxstate(thr_act, tstate, flavor); 1302 } 1303 1304 case x86_AVX_STATE64: 1305 { 1306 if (*count != x86_AVX_STATE64_COUNT) 1307 return(KERN_INVALID_ARGUMENT); 1308 1309 if ( !thread_is_64bit(thr_act)) 1310 return(KERN_INVALID_ARGUMENT); 1311 1312 *count = x86_AVX_STATE64_COUNT; 1313 1314 return fpu_get_fxstate(thr_act, tstate, flavor); 1315 } 1316 1317 case x86_AVX_STATE: 1318 { 1319 x86_avx_state_t *state; 1320 kern_return_t kret; 1321 1322 if (*count < x86_AVX_STATE_COUNT) 1323 return(KERN_INVALID_ARGUMENT); 1324 1325 state = (x86_avx_state_t *)tstate; 1326 1327 bzero((char *)state, sizeof(x86_avx_state_t)); 1328 if (thread_is_64bit(thr_act)) { 1329 state->ash.flavor = x86_AVX_STATE64; 1330 state->ash.count = x86_AVX_STATE64_COUNT; 1331 kret = fpu_get_fxstate(thr_act, 1332 (thread_state_t)&state->ufs.as64, 1333 x86_AVX_STATE64); 1334 } else { 1335 state->ash.flavor = x86_AVX_STATE32; 1336 state->ash.count = x86_AVX_STATE32_COUNT; 1337 kret = fpu_get_fxstate(thr_act, 1338 (thread_state_t)&state->ufs.as32, 1339 x86_AVX_STATE32); 1340 } 1341 *count = x86_AVX_STATE_COUNT; 1342 1343 return(kret); 1344 } 1345 1346 case x86_THREAD_STATE32: 1347 { 1348 if (*count < x86_THREAD_STATE32_COUNT) 1349 return(KERN_INVALID_ARGUMENT); 1350 1351 if (thread_is_64bit(thr_act)) 1352 return(KERN_INVALID_ARGUMENT); 1353 1354 *count = x86_THREAD_STATE32_COUNT; 1355 1356 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate); 1357 break; 1358 } 1359 1360 case x86_THREAD_STATE64: 1361 { 1362 if (*count < x86_THREAD_STATE64_COUNT) 1363 return(KERN_INVALID_ARGUMENT); 1364 1365 if ( !thread_is_64bit(thr_act)) 1366 return(KERN_INVALID_ARGUMENT); 1367 1368 *count = x86_THREAD_STATE64_COUNT; 1369 1370 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate); 1371 break; 1372 } 1373 1374 case x86_THREAD_STATE: 1375 { 1376 x86_thread_state_t *state; 1377 1378 if (*count < x86_THREAD_STATE_COUNT) 1379 return(KERN_INVALID_ARGUMENT); 1380 1381 state = (x86_thread_state_t *)tstate; 1382 1383 bzero((char *)state, sizeof(x86_thread_state_t)); 1384 1385 if (thread_is_64bit(thr_act)) { 1386 state->tsh.flavor = x86_THREAD_STATE64; 1387 state->tsh.count = x86_THREAD_STATE64_COUNT; 1388 1389 get_thread_state64(thr_act, &state->uts.ts64); 1390 } else { 1391 state->tsh.flavor = x86_THREAD_STATE32; 1392 state->tsh.count = x86_THREAD_STATE32_COUNT; 1393 1394 get_thread_state32(thr_act, &state->uts.ts32); 1395 } 1396 *count = x86_THREAD_STATE_COUNT; 1397 1398 break; 1399 } 1400 1401 1402 case x86_EXCEPTION_STATE32: 1403 { 1404 if (*count < x86_EXCEPTION_STATE32_COUNT) 1405 return(KERN_INVALID_ARGUMENT); 1406 1407 if (thread_is_64bit(thr_act)) 1408 return(KERN_INVALID_ARGUMENT); 1409 1410 *count = x86_EXCEPTION_STATE32_COUNT; 1411 1412 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate); 1413 /* 1414 * Suppress the cpu number for binary compatibility 1415 * of this deprecated state. 1416 */ 1417 ((x86_exception_state32_t *)tstate)->cpu = 0; 1418 break; 1419 } 1420 1421 case x86_EXCEPTION_STATE64: 1422 { 1423 if (*count < x86_EXCEPTION_STATE64_COUNT) 1424 return(KERN_INVALID_ARGUMENT); 1425 1426 if ( !thread_is_64bit(thr_act)) 1427 return(KERN_INVALID_ARGUMENT); 1428 1429 *count = x86_EXCEPTION_STATE64_COUNT; 1430 1431 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate); 1432 /* 1433 * Suppress the cpu number for binary compatibility 1434 * of this deprecated state. 1435 */ 1436 ((x86_exception_state64_t *)tstate)->cpu = 0; 1437 break; 1438 } 1439 1440 case x86_EXCEPTION_STATE: 1441 { 1442 x86_exception_state_t *state; 1443 1444 if (*count < x86_EXCEPTION_STATE_COUNT) 1445 return(KERN_INVALID_ARGUMENT); 1446 1447 state = (x86_exception_state_t *)tstate; 1448 1449 bzero((char *)state, sizeof(x86_exception_state_t)); 1450 1451 if (thread_is_64bit(thr_act)) { 1452 state->esh.flavor = x86_EXCEPTION_STATE64; 1453 state->esh.count = x86_EXCEPTION_STATE64_COUNT; 1454 1455 get_exception_state64(thr_act, &state->ues.es64); 1456 } else { 1457 state->esh.flavor = x86_EXCEPTION_STATE32; 1458 state->esh.count = x86_EXCEPTION_STATE32_COUNT; 1459 1460 get_exception_state32(thr_act, &state->ues.es32); 1461 } 1462 *count = x86_EXCEPTION_STATE_COUNT; 1463 1464 break; 1465 } 1466 case x86_DEBUG_STATE32: 1467 { 1468 if (*count < x86_DEBUG_STATE32_COUNT) 1469 return(KERN_INVALID_ARGUMENT); 1470 1471 if (thread_is_64bit(thr_act)) 1472 return(KERN_INVALID_ARGUMENT); 1473 1474 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate); 1475 1476 *count = x86_DEBUG_STATE32_COUNT; 1477 1478 break; 1479 } 1480 case x86_DEBUG_STATE64: 1481 { 1482 if (*count < x86_DEBUG_STATE64_COUNT) 1483 return(KERN_INVALID_ARGUMENT); 1484 1485 if (!thread_is_64bit(thr_act)) 1486 return(KERN_INVALID_ARGUMENT); 1487 1488 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate); 1489 1490 *count = x86_DEBUG_STATE64_COUNT; 1491 1492 break; 1493 } 1494 case x86_DEBUG_STATE: 1495 { 1496 x86_debug_state_t *state; 1497 1498 if (*count < x86_DEBUG_STATE_COUNT) 1499 return(KERN_INVALID_ARGUMENT); 1500 1501 state = (x86_debug_state_t *)tstate; 1502 1503 bzero(state, sizeof *state); 1504 1505 if (thread_is_64bit(thr_act)) { 1506 state->dsh.flavor = x86_DEBUG_STATE64; 1507 state->dsh.count = x86_DEBUG_STATE64_COUNT; 1508 1509 get_debug_state64(thr_act, &state->uds.ds64); 1510 } else { 1511 state->dsh.flavor = x86_DEBUG_STATE32; 1512 state->dsh.count = x86_DEBUG_STATE32_COUNT; 1513 1514 get_debug_state32(thr_act, &state->uds.ds32); 1515 } 1516 *count = x86_DEBUG_STATE_COUNT; 1517 break; 1518 } 1519 default: 1520 return(KERN_INVALID_ARGUMENT); 1521 } 1522 1523 return(KERN_SUCCESS); 1524} 1525 1526kern_return_t 1527machine_thread_get_kern_state( 1528 thread_t thread, 1529 thread_flavor_t flavor, 1530 thread_state_t tstate, 1531 mach_msg_type_number_t *count) 1532{ 1533 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state; 1534 1535 /* 1536 * This works only for an interrupted kernel thread 1537 */ 1538 if (thread != current_thread() || int_state == NULL) 1539 return KERN_FAILURE; 1540 1541 switch (flavor) { 1542 case x86_THREAD_STATE32: { 1543 x86_thread_state32_t *state; 1544 x86_saved_state32_t *saved_state; 1545 1546 if (!is_saved_state32(int_state) || 1547 *count < x86_THREAD_STATE32_COUNT) 1548 return (KERN_INVALID_ARGUMENT); 1549 1550 state = (x86_thread_state32_t *) tstate; 1551 1552 saved_state = saved_state32(int_state); 1553 /* 1554 * General registers. 1555 */ 1556 state->eax = saved_state->eax; 1557 state->ebx = saved_state->ebx; 1558 state->ecx = saved_state->ecx; 1559 state->edx = saved_state->edx; 1560 state->edi = saved_state->edi; 1561 state->esi = saved_state->esi; 1562 state->ebp = saved_state->ebp; 1563 state->esp = saved_state->uesp; 1564 state->eflags = saved_state->efl; 1565 state->eip = saved_state->eip; 1566 state->cs = saved_state->cs; 1567 state->ss = saved_state->ss; 1568 state->ds = saved_state->ds & 0xffff; 1569 state->es = saved_state->es & 0xffff; 1570 state->fs = saved_state->fs & 0xffff; 1571 state->gs = saved_state->gs & 0xffff; 1572 1573 *count = x86_THREAD_STATE32_COUNT; 1574 1575 return KERN_SUCCESS; 1576 } 1577 1578 case x86_THREAD_STATE64: { 1579 x86_thread_state64_t *state; 1580 x86_saved_state64_t *saved_state; 1581 1582 if (!is_saved_state64(int_state) || 1583 *count < x86_THREAD_STATE64_COUNT) 1584 return (KERN_INVALID_ARGUMENT); 1585 1586 state = (x86_thread_state64_t *) tstate; 1587 1588 saved_state = saved_state64(int_state); 1589 /* 1590 * General registers. 1591 */ 1592 state->rax = saved_state->rax; 1593 state->rbx = saved_state->rbx; 1594 state->rcx = saved_state->rcx; 1595 state->rdx = saved_state->rdx; 1596 state->rdi = saved_state->rdi; 1597 state->rsi = saved_state->rsi; 1598 state->rbp = saved_state->rbp; 1599 state->rsp = saved_state->isf.rsp; 1600 state->r8 = saved_state->r8; 1601 state->r9 = saved_state->r9; 1602 state->r10 = saved_state->r10; 1603 state->r11 = saved_state->r11; 1604 state->r12 = saved_state->r12; 1605 state->r13 = saved_state->r13; 1606 state->r14 = saved_state->r14; 1607 state->r15 = saved_state->r15; 1608 1609 state->rip = saved_state->isf.rip; 1610 state->rflags = saved_state->isf.rflags; 1611 state->cs = saved_state->isf.cs; 1612 state->fs = saved_state->fs & 0xffff; 1613 state->gs = saved_state->gs & 0xffff; 1614 *count = x86_THREAD_STATE64_COUNT; 1615 1616 return KERN_SUCCESS; 1617 } 1618 1619 case x86_THREAD_STATE: { 1620 x86_thread_state_t *state = NULL; 1621 1622 if (*count < x86_THREAD_STATE_COUNT) 1623 return (KERN_INVALID_ARGUMENT); 1624 1625 state = (x86_thread_state_t *) tstate; 1626 1627 if (is_saved_state32(int_state)) { 1628 x86_saved_state32_t *saved_state = saved_state32(int_state); 1629 1630 state->tsh.flavor = x86_THREAD_STATE32; 1631 state->tsh.count = x86_THREAD_STATE32_COUNT; 1632 1633 /* 1634 * General registers. 1635 */ 1636 state->uts.ts32.eax = saved_state->eax; 1637 state->uts.ts32.ebx = saved_state->ebx; 1638 state->uts.ts32.ecx = saved_state->ecx; 1639 state->uts.ts32.edx = saved_state->edx; 1640 state->uts.ts32.edi = saved_state->edi; 1641 state->uts.ts32.esi = saved_state->esi; 1642 state->uts.ts32.ebp = saved_state->ebp; 1643 state->uts.ts32.esp = saved_state->uesp; 1644 state->uts.ts32.eflags = saved_state->efl; 1645 state->uts.ts32.eip = saved_state->eip; 1646 state->uts.ts32.cs = saved_state->cs; 1647 state->uts.ts32.ss = saved_state->ss; 1648 state->uts.ts32.ds = saved_state->ds & 0xffff; 1649 state->uts.ts32.es = saved_state->es & 0xffff; 1650 state->uts.ts32.fs = saved_state->fs & 0xffff; 1651 state->uts.ts32.gs = saved_state->gs & 0xffff; 1652 } else if (is_saved_state64(int_state)) { 1653 x86_saved_state64_t *saved_state = saved_state64(int_state); 1654 1655 state->tsh.flavor = x86_THREAD_STATE64; 1656 state->tsh.count = x86_THREAD_STATE64_COUNT; 1657 1658 /* 1659 * General registers. 1660 */ 1661 state->uts.ts64.rax = saved_state->rax; 1662 state->uts.ts64.rbx = saved_state->rbx; 1663 state->uts.ts64.rcx = saved_state->rcx; 1664 state->uts.ts64.rdx = saved_state->rdx; 1665 state->uts.ts64.rdi = saved_state->rdi; 1666 state->uts.ts64.rsi = saved_state->rsi; 1667 state->uts.ts64.rbp = saved_state->rbp; 1668 state->uts.ts64.rsp = saved_state->isf.rsp; 1669 state->uts.ts64.r8 = saved_state->r8; 1670 state->uts.ts64.r9 = saved_state->r9; 1671 state->uts.ts64.r10 = saved_state->r10; 1672 state->uts.ts64.r11 = saved_state->r11; 1673 state->uts.ts64.r12 = saved_state->r12; 1674 state->uts.ts64.r13 = saved_state->r13; 1675 state->uts.ts64.r14 = saved_state->r14; 1676 state->uts.ts64.r15 = saved_state->r15; 1677 1678 state->uts.ts64.rip = saved_state->isf.rip; 1679 state->uts.ts64.rflags = saved_state->isf.rflags; 1680 state->uts.ts64.cs = saved_state->isf.cs; 1681 state->uts.ts64.fs = saved_state->fs & 0xffff; 1682 state->uts.ts64.gs = saved_state->gs & 0xffff; 1683 } else { 1684 panic("unknown thread state"); 1685 } 1686 1687 *count = x86_THREAD_STATE_COUNT; 1688 return KERN_SUCCESS; 1689 } 1690 } 1691 return KERN_FAILURE; 1692} 1693 1694 1695void 1696machine_thread_switch_addrmode(thread_t thread) 1697{ 1698 /* 1699 * We don't want to be preempted until we're done 1700 * - particularly if we're switching the current thread 1701 */ 1702 disable_preemption(); 1703 1704 /* 1705 * Reset the state saveareas. As we're resetting, we anticipate no 1706 * memory allocations in this path. 1707 */ 1708 machine_thread_create(thread, thread->task); 1709 1710 /* If we're switching ourselves, reset the pcb addresses etc. */ 1711 if (thread == current_thread()) { 1712 boolean_t istate = ml_set_interrupts_enabled(FALSE); 1713 act_machine_switch_pcb(NULL, thread); 1714 ml_set_interrupts_enabled(istate); 1715 } 1716 enable_preemption(); 1717} 1718 1719 1720 1721/* 1722 * This is used to set the current thr_act/thread 1723 * when starting up a new processor 1724 */ 1725void 1726machine_set_current_thread(thread_t thread) 1727{ 1728 current_cpu_datap()->cpu_active_thread = thread; 1729} 1730 1731 1732/* 1733 * Perform machine-dependent per-thread initializations 1734 */ 1735void 1736machine_thread_init(void) 1737{ 1738 iss_zone = zinit(sizeof(x86_saved_state_t), 1739 thread_max * sizeof(x86_saved_state_t), 1740 THREAD_CHUNK * sizeof(x86_saved_state_t), 1741 "x86_64 saved state"); 1742 1743 ids_zone = zinit(sizeof(x86_debug_state64_t), 1744 thread_max * sizeof(x86_debug_state64_t), 1745 THREAD_CHUNK * sizeof(x86_debug_state64_t), 1746 "x86_64 debug state"); 1747 1748 fpu_module_init(); 1749} 1750 1751 1752 1753user_addr_t 1754get_useraddr(void) 1755{ 1756 thread_t thr_act = current_thread(); 1757 1758 if (thread_is_64bit(thr_act)) { 1759 x86_saved_state64_t *iss64; 1760 1761 iss64 = USER_REGS64(thr_act); 1762 1763 return(iss64->isf.rip); 1764 } else { 1765 x86_saved_state32_t *iss32; 1766 1767 iss32 = USER_REGS32(thr_act); 1768 1769 return(iss32->eip); 1770 } 1771} 1772 1773/* 1774 * detach and return a kernel stack from a thread 1775 */ 1776 1777vm_offset_t 1778machine_stack_detach(thread_t thread) 1779{ 1780 vm_offset_t stack; 1781 1782 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH), 1783 (uintptr_t)thread_tid(thread), thread->priority, 1784 thread->sched_pri, 0, 1785 0); 1786 1787 stack = thread->kernel_stack; 1788 thread->kernel_stack = 0; 1789 1790 return (stack); 1791} 1792 1793/* 1794 * attach a kernel stack to a thread and initialize it 1795 */ 1796 1797void 1798machine_stack_attach( 1799 thread_t thread, 1800 vm_offset_t stack) 1801{ 1802 struct x86_kernel_state *statep; 1803 1804 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH), 1805 (uintptr_t)thread_tid(thread), thread->priority, 1806 thread->sched_pri, 0, 0); 1807 1808 assert(stack); 1809 thread->kernel_stack = stack; 1810 1811 statep = STACK_IKS(stack); 1812#if defined(__x86_64__) 1813 statep->k_rip = (unsigned long) Thread_continue; 1814 statep->k_rbx = (unsigned long) thread_continue; 1815 statep->k_rsp = (unsigned long) (STACK_IKS(stack) - 1); 1816#else 1817 statep->k_eip = (unsigned long) Thread_continue; 1818 statep->k_ebx = (unsigned long) thread_continue; 1819 statep->k_esp = (unsigned long) (STACK_IKS(stack) - 1); 1820#endif 1821 1822 return; 1823} 1824 1825/* 1826 * move a stack from old to new thread 1827 */ 1828 1829void 1830machine_stack_handoff(thread_t old, 1831 thread_t new) 1832{ 1833 vm_offset_t stack; 1834 1835 assert(new); 1836 assert(old); 1837 1838#if CONFIG_COUNTERS 1839 machine_pmc_cswitch(old, new); 1840#endif 1841#if KPC 1842 ml_kpc_cswitch(old, new); 1843#endif 1844#if KPERF 1845 ml_kperf_cswitch(old, new); 1846#endif 1847 1848 stack = old->kernel_stack; 1849 if (stack == old->reserved_stack) { 1850 assert(new->reserved_stack); 1851 old->reserved_stack = new->reserved_stack; 1852 new->reserved_stack = stack; 1853 } 1854 old->kernel_stack = 0; 1855 /* 1856 * A full call to machine_stack_attach() is unnecessry 1857 * because old stack is already initialized. 1858 */ 1859 new->kernel_stack = stack; 1860 1861 fpu_save_context(old); 1862 1863 old->machine.specFlags &= ~OnProc; 1864 new->machine.specFlags |= OnProc; 1865 1866 PMAP_SWITCH_CONTEXT(old, new, cpu_number()); 1867 act_machine_switch_pcb(old, new); 1868 1869#if HYPERVISOR 1870 ml_hv_cswitch(old, new); 1871#endif 1872 1873 machine_set_current_thread(new); 1874 1875 return; 1876} 1877 1878 1879 1880 1881struct x86_act_context32 { 1882 x86_saved_state32_t ss; 1883 x86_float_state32_t fs; 1884 x86_debug_state32_t ds; 1885}; 1886 1887struct x86_act_context64 { 1888 x86_saved_state64_t ss; 1889 x86_float_state64_t fs; 1890 x86_debug_state64_t ds; 1891}; 1892 1893 1894 1895void * 1896act_thread_csave(void) 1897{ 1898 kern_return_t kret; 1899 mach_msg_type_number_t val; 1900 thread_t thr_act = current_thread(); 1901 1902 if (thread_is_64bit(thr_act)) { 1903 struct x86_act_context64 *ic64; 1904 1905 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64)); 1906 1907 if (ic64 == (struct x86_act_context64 *)NULL) 1908 return((void *)0); 1909 1910 val = x86_SAVED_STATE64_COUNT; 1911 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64, 1912 (thread_state_t) &ic64->ss, &val); 1913 if (kret != KERN_SUCCESS) { 1914 kfree(ic64, sizeof(struct x86_act_context64)); 1915 return((void *)0); 1916 } 1917 val = x86_FLOAT_STATE64_COUNT; 1918 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64, 1919 (thread_state_t) &ic64->fs, &val); 1920 if (kret != KERN_SUCCESS) { 1921 kfree(ic64, sizeof(struct x86_act_context64)); 1922 return((void *)0); 1923 } 1924 1925 val = x86_DEBUG_STATE64_COUNT; 1926 kret = machine_thread_get_state(thr_act, 1927 x86_DEBUG_STATE64, 1928 (thread_state_t)&ic64->ds, 1929 &val); 1930 if (kret != KERN_SUCCESS) { 1931 kfree(ic64, sizeof(struct x86_act_context64)); 1932 return((void *)0); 1933 } 1934 return(ic64); 1935 1936 } else { 1937 struct x86_act_context32 *ic32; 1938 1939 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32)); 1940 1941 if (ic32 == (struct x86_act_context32 *)NULL) 1942 return((void *)0); 1943 1944 val = x86_SAVED_STATE32_COUNT; 1945 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32, 1946 (thread_state_t) &ic32->ss, &val); 1947 if (kret != KERN_SUCCESS) { 1948 kfree(ic32, sizeof(struct x86_act_context32)); 1949 return((void *)0); 1950 } 1951 val = x86_FLOAT_STATE32_COUNT; 1952 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32, 1953 (thread_state_t) &ic32->fs, &val); 1954 if (kret != KERN_SUCCESS) { 1955 kfree(ic32, sizeof(struct x86_act_context32)); 1956 return((void *)0); 1957 } 1958 1959 val = x86_DEBUG_STATE32_COUNT; 1960 kret = machine_thread_get_state(thr_act, 1961 x86_DEBUG_STATE32, 1962 (thread_state_t)&ic32->ds, 1963 &val); 1964 if (kret != KERN_SUCCESS) { 1965 kfree(ic32, sizeof(struct x86_act_context32)); 1966 return((void *)0); 1967 } 1968 return(ic32); 1969 } 1970} 1971 1972 1973void 1974act_thread_catt(void *ctx) 1975{ 1976 thread_t thr_act = current_thread(); 1977 kern_return_t kret; 1978 1979 if (ctx == (void *)NULL) 1980 return; 1981 1982 if (thread_is_64bit(thr_act)) { 1983 struct x86_act_context64 *ic64; 1984 1985 ic64 = (struct x86_act_context64 *)ctx; 1986 1987 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64, 1988 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT); 1989 if (kret == KERN_SUCCESS) { 1990 machine_thread_set_state(thr_act, x86_FLOAT_STATE64, 1991 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT); 1992 } 1993 kfree(ic64, sizeof(struct x86_act_context64)); 1994 } else { 1995 struct x86_act_context32 *ic32; 1996 1997 ic32 = (struct x86_act_context32 *)ctx; 1998 1999 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32, 2000 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT); 2001 if (kret == KERN_SUCCESS) { 2002 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32, 2003 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT); 2004 } 2005 kfree(ic32, sizeof(struct x86_act_context32)); 2006 } 2007} 2008 2009 2010void act_thread_cfree(__unused void *ctx) 2011{ 2012 /* XXX - Unused */ 2013} 2014 2015/* 2016 * Duplicate one x86_debug_state32_t to another. "all" parameter 2017 * chooses whether dr4 and dr5 are copied (they are never meant 2018 * to be installed when we do machine_task_set_state() or 2019 * machine_thread_set_state()). 2020 */ 2021void 2022copy_debug_state32( 2023 x86_debug_state32_t *src, 2024 x86_debug_state32_t *target, 2025 boolean_t all) 2026{ 2027 if (all) { 2028 target->dr4 = src->dr4; 2029 target->dr5 = src->dr5; 2030 } 2031 2032 target->dr0 = src->dr0; 2033 target->dr1 = src->dr1; 2034 target->dr2 = src->dr2; 2035 target->dr3 = src->dr3; 2036 target->dr6 = src->dr6; 2037 target->dr7 = src->dr7; 2038} 2039 2040/* 2041 * Duplicate one x86_debug_state64_t to another. "all" parameter 2042 * chooses whether dr4 and dr5 are copied (they are never meant 2043 * to be installed when we do machine_task_set_state() or 2044 * machine_thread_set_state()). 2045 */ 2046void 2047copy_debug_state64( 2048 x86_debug_state64_t *src, 2049 x86_debug_state64_t *target, 2050 boolean_t all) 2051{ 2052 if (all) { 2053 target->dr4 = src->dr4; 2054 target->dr5 = src->dr5; 2055 } 2056 2057 target->dr0 = src->dr0; 2058 target->dr1 = src->dr1; 2059 target->dr2 = src->dr2; 2060 target->dr3 = src->dr3; 2061 target->dr6 = src->dr6; 2062 target->dr7 = src->dr7; 2063} 2064