1/* 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 57#include <mach_rt.h> 58#include <mach_debug.h> 59#include <mach_ldebug.h> 60 61#include <sys/kdebug.h> 62 63#include <mach/kern_return.h> 64#include <mach/thread_status.h> 65#include <mach/vm_param.h> 66 67#include <kern/counters.h> 68#include <kern/kalloc.h> 69#include <kern/mach_param.h> 70#include <kern/processor.h> 71#include <kern/cpu_data.h> 72#include <kern/cpu_number.h> 73#include <kern/task.h> 74#include <kern/thread.h> 75#include <kern/sched_prim.h> 76#include <kern/misc_protos.h> 77#include <kern/assert.h> 78#include <kern/spl.h> 79#include <kern/machine.h> 80#include <ipc/ipc_port.h> 81#include <vm/vm_kern.h> 82#include <vm/vm_map.h> 83#include <vm/pmap.h> 84#include <vm/vm_protos.h> 85 86#include <i386/cpu_data.h> 87#include <i386/cpu_number.h> 88#include <i386/eflags.h> 89#include <i386/proc_reg.h> 90#include <i386/fpu.h> 91#include <i386/misc_protos.h> 92#include <i386/mp_desc.h> 93#include <i386/thread.h> 94#if defined(__i386__) 95#include <i386/fpu.h> 96#endif 97#include <i386/machine_routines.h> 98#include <i386/lapic.h> /* LAPIC_PMC_SWI_VECTOR */ 99 100#if CONFIG_COUNTERS 101#include <pmc/pmc.h> 102#endif /* CONFIG_COUNTERS */ 103 104/* 105 * Maps state flavor to number of words in the state: 106 */ 107unsigned int _MachineStateCount[] = { 108 /* FLAVOR_LIST */ 109 0, 110 x86_THREAD_STATE32_COUNT, 111 x86_FLOAT_STATE32_COUNT, 112 x86_EXCEPTION_STATE32_COUNT, 113 x86_THREAD_STATE64_COUNT, 114 x86_FLOAT_STATE64_COUNT, 115 x86_EXCEPTION_STATE64_COUNT, 116 x86_THREAD_STATE_COUNT, 117 x86_FLOAT_STATE_COUNT, 118 x86_EXCEPTION_STATE_COUNT, 119 0, 120 x86_SAVED_STATE32_COUNT, 121 x86_SAVED_STATE64_COUNT, 122 x86_DEBUG_STATE32_COUNT, 123 x86_DEBUG_STATE64_COUNT, 124 x86_DEBUG_STATE_COUNT 125}; 126 127zone_t iss_zone; /* zone for saved_state area */ 128zone_t ids_zone; /* zone for debug_state area */ 129 130/* Forward */ 131 132extern void Thread_continue(void); 133extern void Load_context( 134 thread_t thread); 135 136static void 137get_exception_state32(thread_t thread, x86_exception_state32_t *es); 138 139static void 140get_exception_state64(thread_t thread, x86_exception_state64_t *es); 141 142static void 143get_thread_state32(thread_t thread, x86_thread_state32_t *ts); 144 145static void 146get_thread_state64(thread_t thread, x86_thread_state64_t *ts); 147 148static int 149set_thread_state32(thread_t thread, x86_thread_state32_t *ts); 150 151static int 152set_thread_state64(thread_t thread, x86_thread_state64_t *ts); 153 154#if CONFIG_COUNTERS 155static inline void 156machine_pmc_cswitch(thread_t /* old */, thread_t /* new */); 157 158static inline void 159pmc_swi(thread_t /* old */, thread_t /*new */); 160 161static inline void 162pmc_swi(thread_t old, thread_t new) { 163 current_cpu_datap()->csw_old_thread = old; 164 current_cpu_datap()->csw_new_thread = new; 165 pal_pmc_swi(); 166} 167 168static inline void 169machine_pmc_cswitch(thread_t old, thread_t new) { 170 if (pmc_thread_eligible(old) || pmc_thread_eligible(new)) { 171 pmc_swi(old, new); 172 } 173} 174 175void ml_get_csw_threads(thread_t *old, thread_t *new) { 176 *old = current_cpu_datap()->csw_old_thread; 177 *new = current_cpu_datap()->csw_new_thread; 178} 179 180#endif /* CONFIG_COUNTERS */ 181 182/* 183 * Don't let an illegal value for dr7 get set. Specifically, 184 * check for undefined settings. Setting these bit patterns 185 * result in undefined behaviour and can lead to an unexpected 186 * TRCTRAP. 187 */ 188static boolean_t 189dr7_is_valid(uint32_t *dr7) 190{ 191 int i; 192 uint32_t mask1, mask2; 193 194 /* 195 * If the DE bit is set in CR4, R/W0-3 can be pattern 196 * "10B" to indicate i/o reads and write 197 */ 198 if (!(get_cr4() & CR4_DE)) 199 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4; 200 i++, mask1 <<= 4, mask2 <<= 4) 201 if ((*dr7 & mask1) == mask2) 202 return (FALSE); 203 204 /* 205 * len0-3 pattern "10B" is ok for len on Merom and newer processors 206 * (it signifies an 8-byte wide region). We use the 64bit capability 207 * of the processor in lieu of the more laborious model/family checks 208 * as all 64-bit capable processors so far support this. 209 * Reject an attempt to use this on 64-bit incapable processors. 210 */ 211 if (current_cpu_datap()->cpu_is64bit == FALSE) 212 for (i = 0, mask1 = 0x3<<18, mask2 = 0x2<<18; i < 4; 213 i++, mask1 <<= 4, mask2 <<= 4) 214 if ((*dr7 & mask1) == mask2) 215 return (FALSE); 216 217 /* 218 * if we are doing an instruction execution break (indicated 219 * by r/w[x] being "00B"), then the len[x] must also be set 220 * to "00B" 221 */ 222 for (i = 0; i < 4; i++) 223 if (((((*dr7 >> (16 + i*4))) & 0x3) == 0) && 224 ((((*dr7 >> (18 + i*4))) & 0x3) != 0)) 225 return (FALSE); 226 227 /* 228 * Intel docs have these bits fixed. 229 */ 230 *dr7 |= 0x1 << 10; /* set bit 10 to 1 */ 231 *dr7 &= ~(0x1 << 11); /* set bit 11 to 0 */ 232 *dr7 &= ~(0x1 << 12); /* set bit 12 to 0 */ 233 *dr7 &= ~(0x1 << 14); /* set bit 14 to 0 */ 234 *dr7 &= ~(0x1 << 15); /* set bit 15 to 0 */ 235 236 /* 237 * We don't allow anything to set the global breakpoints. 238 */ 239 240 if (*dr7 & 0x2) 241 return (FALSE); 242 243 if (*dr7 & (0x2<<2)) 244 return (FALSE); 245 246 if (*dr7 & (0x2<<4)) 247 return (FALSE); 248 249 if (*dr7 & (0x2<<6)) 250 return (FALSE); 251 252 return (TRUE); 253} 254 255static inline void 256set_live_debug_state32(cpu_data_t *cdp, x86_debug_state32_t *ds) 257{ 258 __asm__ volatile ("movl %0,%%db0" : :"r" (ds->dr0)); 259 __asm__ volatile ("movl %0,%%db1" : :"r" (ds->dr1)); 260 __asm__ volatile ("movl %0,%%db2" : :"r" (ds->dr2)); 261 __asm__ volatile ("movl %0,%%db3" : :"r" (ds->dr3)); 262 if (cpu_mode_is64bit()) 263 cdp->cpu_dr7 = ds->dr7; 264} 265 266extern void set_64bit_debug_regs(x86_debug_state64_t *ds); 267 268static inline void 269set_live_debug_state64(cpu_data_t *cdp, x86_debug_state64_t *ds) 270{ 271 /* 272 * We need to enter 64-bit mode in order to set the full 273 * width of these registers 274 */ 275 set_64bit_debug_regs(ds); 276 cdp->cpu_dr7 = ds->dr7; 277} 278 279boolean_t 280debug_state_is_valid32(x86_debug_state32_t *ds) 281{ 282 if (!dr7_is_valid(&ds->dr7)) 283 return FALSE; 284 285#if defined(__i386__) 286 /* 287 * Only allow local breakpoints and make sure they are not 288 * in the trampoline code. 289 */ 290 if (ds->dr7 & 0x1) 291 if (ds->dr0 >= (unsigned long)HIGH_MEM_BASE) 292 return FALSE; 293 294 if (ds->dr7 & (0x1<<2)) 295 if (ds->dr1 >= (unsigned long)HIGH_MEM_BASE) 296 return FALSE; 297 298 if (ds->dr7 & (0x1<<4)) 299 if (ds->dr2 >= (unsigned long)HIGH_MEM_BASE) 300 return FALSE; 301 302 if (ds->dr7 & (0x1<<6)) 303 if (ds->dr3 >= (unsigned long)HIGH_MEM_BASE) 304 return FALSE; 305#endif 306 307 return TRUE; 308} 309 310boolean_t 311debug_state_is_valid64(x86_debug_state64_t *ds) 312{ 313 if (!dr7_is_valid((uint32_t *)&ds->dr7)) 314 return FALSE; 315 316 /* 317 * Don't allow the user to set debug addresses above their max 318 * value 319 */ 320 if (ds->dr7 & 0x1) 321 if (ds->dr0 >= VM_MAX_PAGE_ADDRESS) 322 return FALSE; 323 324 if (ds->dr7 & (0x1<<2)) 325 if (ds->dr1 >= VM_MAX_PAGE_ADDRESS) 326 return FALSE; 327 328 if (ds->dr7 & (0x1<<4)) 329 if (ds->dr2 >= VM_MAX_PAGE_ADDRESS) 330 return FALSE; 331 332 if (ds->dr7 & (0x1<<6)) 333 if (ds->dr3 >= VM_MAX_PAGE_ADDRESS) 334 return FALSE; 335 336 return TRUE; 337} 338 339 340static kern_return_t 341set_debug_state32(thread_t thread, x86_debug_state32_t *ds) 342{ 343 x86_debug_state32_t *ids; 344 pcb_t pcb; 345 346 pcb = THREAD_TO_PCB(thread); 347 ids = pcb->ids; 348 349 if (debug_state_is_valid32(ds) != TRUE) { 350 return KERN_INVALID_ARGUMENT; 351 } 352 353 if (ids == NULL) { 354 ids = zalloc(ids_zone); 355 bzero(ids, sizeof *ids); 356 357 simple_lock(&pcb->lock); 358 /* make sure it wasn't already alloc()'d elsewhere */ 359 if (pcb->ids == NULL) { 360 pcb->ids = ids; 361 simple_unlock(&pcb->lock); 362 } else { 363 simple_unlock(&pcb->lock); 364 zfree(ids_zone, ids); 365 } 366 } 367 368 369 copy_debug_state32(ds, ids, FALSE); 370 371 return (KERN_SUCCESS); 372} 373 374static kern_return_t 375set_debug_state64(thread_t thread, x86_debug_state64_t *ds) 376{ 377 x86_debug_state64_t *ids; 378 pcb_t pcb; 379 380 pcb = THREAD_TO_PCB(thread); 381 ids = pcb->ids; 382 383 if (debug_state_is_valid64(ds) != TRUE) { 384 return KERN_INVALID_ARGUMENT; 385 } 386 387 if (ids == NULL) { 388 ids = zalloc(ids_zone); 389 bzero(ids, sizeof *ids); 390 391 simple_lock(&pcb->lock); 392 /* make sure it wasn't already alloc()'d elsewhere */ 393 if (pcb->ids == NULL) { 394 pcb->ids = ids; 395 simple_unlock(&pcb->lock); 396 } else { 397 simple_unlock(&pcb->lock); 398 zfree(ids_zone, ids); 399 } 400 } 401 402 copy_debug_state64(ds, ids, FALSE); 403 404 return (KERN_SUCCESS); 405} 406 407static void 408get_debug_state32(thread_t thread, x86_debug_state32_t *ds) 409{ 410 x86_debug_state32_t *saved_state; 411 412 saved_state = thread->machine.ids; 413 414 if (saved_state) { 415 copy_debug_state32(saved_state, ds, TRUE); 416 } else 417 bzero(ds, sizeof *ds); 418} 419 420static void 421get_debug_state64(thread_t thread, x86_debug_state64_t *ds) 422{ 423 x86_debug_state64_t *saved_state; 424 425 saved_state = (x86_debug_state64_t *)thread->machine.ids; 426 427 if (saved_state) { 428 copy_debug_state64(saved_state, ds, TRUE); 429 } else 430 bzero(ds, sizeof *ds); 431} 432 433/* 434 * consider_machine_collect: 435 * 436 * Try to collect machine-dependent pages 437 */ 438void 439consider_machine_collect(void) 440{ 441} 442 443void 444consider_machine_adjust(void) 445{ 446} 447 448/* 449 * Switch to the first thread on a CPU. 450 */ 451void 452machine_load_context( 453 thread_t new) 454{ 455#if CONFIG_COUNTERS 456 machine_pmc_cswitch(NULL, new); 457#endif 458 new->machine.specFlags |= OnProc; 459 act_machine_switch_pcb(NULL, new); 460 Load_context(new); 461} 462 463/* 464 * Switch to a new thread. 465 * Save the old thread`s kernel state or continuation, 466 * and return it. 467 */ 468thread_t 469machine_switch_context( 470 thread_t old, 471 thread_continue_t continuation, 472 thread_t new) 473{ 474#if MACH_RT 475 assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack); 476#endif 477#if CONFIG_COUNTERS 478 machine_pmc_cswitch(old, new); 479#endif 480 /* 481 * Save FP registers if in use. 482 */ 483 fpu_save_context(old); 484 485 old->machine.specFlags &= ~OnProc; 486 new->machine.specFlags |= OnProc; 487 488 /* 489 * Monitor the stack depth and report new max, 490 * not worrying about races. 491 */ 492 vm_offset_t depth = current_stack_depth(); 493 if (depth > kernel_stack_depth_max) { 494 kernel_stack_depth_max = depth; 495 KERNEL_DEBUG_CONSTANT( 496 MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH), 497 (long) depth, 0, 0, 0, 0); 498 } 499 500 /* 501 * Switch address maps if need be, even if not switching tasks. 502 * (A server activation may be "borrowing" a client map.) 503 */ 504 PMAP_SWITCH_CONTEXT(old, new, cpu_number()); 505 506 /* 507 * Load the rest of the user state for the new thread 508 */ 509 act_machine_switch_pcb(old, new); 510 511 return(Switch_context(old, continuation, new)); 512} 513 514thread_t 515machine_processor_shutdown( 516 thread_t thread, 517 void (*doshutdown)(processor_t), 518 processor_t processor) 519{ 520#if CONFIG_VMX 521 vmx_suspend(); 522#endif 523 fpu_save_context(thread); 524 PMAP_SWITCH_CONTEXT(thread, processor->idle_thread, cpu_number()); 525 return(Shutdown_context(thread, doshutdown, processor)); 526} 527 528 529/* 530 * This is where registers that are not normally specified by the mach-o 531 * file on an execve would be nullified, perhaps to avoid a covert channel. 532 */ 533kern_return_t 534machine_thread_state_initialize( 535 thread_t thread) 536{ 537 /* 538 * If there's an fpu save area, free it. 539 * The initialized state will then be lazily faulted-in, if required. 540 * And if we're target, re-arm the no-fpu trap. 541 */ 542 if (thread->machine.ifps) { 543 (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64); 544 545 if (thread == current_thread()) 546 clear_fpu(); 547 } 548 549 if (thread->machine.ids) { 550 zfree(ids_zone, thread->machine.ids); 551 thread->machine.ids = NULL; 552 } 553 554 return KERN_SUCCESS; 555} 556 557uint32_t 558get_eflags_exportmask(void) 559{ 560 return EFL_USER_SET; 561} 562 563/* 564 * x86_SAVED_STATE32 - internal save/restore general register state on 32/64 bit processors 565 * for 32bit tasks only 566 * x86_SAVED_STATE64 - internal save/restore general register state on 64 bit processors 567 * for 64bit tasks only 568 * x86_THREAD_STATE32 - external set/get general register state on 32/64 bit processors 569 * for 32bit tasks only 570 * x86_THREAD_STATE64 - external set/get general register state on 64 bit processors 571 * for 64bit tasks only 572 * x86_SAVED_STATE - external set/get general register state on 32/64 bit processors 573 * for either 32bit or 64bit tasks 574 * x86_FLOAT_STATE32 - internal/external save/restore float and xmm state on 32/64 bit processors 575 * for 32bit tasks only 576 * x86_FLOAT_STATE64 - internal/external save/restore float and xmm state on 64 bit processors 577 * for 64bit tasks only 578 * x86_FLOAT_STATE - external save/restore float and xmm state on 32/64 bit processors 579 * for either 32bit or 64bit tasks 580 * x86_EXCEPTION_STATE32 - external get exception state on 32/64 bit processors 581 * for 32bit tasks only 582 * x86_EXCEPTION_STATE64 - external get exception state on 64 bit processors 583 * for 64bit tasks only 584 * x86_EXCEPTION_STATE - external get exception state on 323/64 bit processors 585 * for either 32bit or 64bit tasks 586 */ 587 588 589static void 590get_exception_state64(thread_t thread, x86_exception_state64_t *es) 591{ 592 x86_saved_state64_t *saved_state; 593 594 saved_state = USER_REGS64(thread); 595 596 es->trapno = saved_state->isf.trapno; 597 es->cpu = saved_state->isf.cpu; 598 es->err = (typeof(es->err))saved_state->isf.err; 599 es->faultvaddr = saved_state->cr2; 600} 601 602static void 603get_exception_state32(thread_t thread, x86_exception_state32_t *es) 604{ 605 x86_saved_state32_t *saved_state; 606 607 saved_state = USER_REGS32(thread); 608 609 es->trapno = saved_state->trapno; 610 es->cpu = saved_state->cpu; 611 es->err = saved_state->err; 612 es->faultvaddr = saved_state->cr2; 613} 614 615 616static int 617set_thread_state32(thread_t thread, x86_thread_state32_t *ts) 618{ 619 x86_saved_state32_t *saved_state; 620 621 pal_register_cache_state(thread, DIRTY); 622 623 saved_state = USER_REGS32(thread); 624 625 /* 626 * Scrub segment selector values: 627 */ 628 ts->cs = USER_CS; 629#ifdef __i386__ 630 if (ts->ss == 0) ts->ss = USER_DS; 631 if (ts->ds == 0) ts->ds = USER_DS; 632 if (ts->es == 0) ts->es = USER_DS; 633#else /* __x86_64__ */ 634 /* 635 * On a 64 bit kernel, we always override the data segments, 636 * as the actual selector numbers have changed. This also 637 * means that we don't support setting the data segments 638 * manually any more. 639 */ 640 ts->ss = USER_DS; 641 ts->ds = USER_DS; 642 ts->es = USER_DS; 643#endif 644 645 /* Check segment selectors are safe */ 646 if (!valid_user_segment_selectors(ts->cs, 647 ts->ss, 648 ts->ds, 649 ts->es, 650 ts->fs, 651 ts->gs)) 652 return(KERN_INVALID_ARGUMENT); 653 654 saved_state->eax = ts->eax; 655 saved_state->ebx = ts->ebx; 656 saved_state->ecx = ts->ecx; 657 saved_state->edx = ts->edx; 658 saved_state->edi = ts->edi; 659 saved_state->esi = ts->esi; 660 saved_state->ebp = ts->ebp; 661 saved_state->uesp = ts->esp; 662 saved_state->efl = (ts->eflags & ~EFL_USER_CLEAR) | EFL_USER_SET; 663 saved_state->eip = ts->eip; 664 saved_state->cs = ts->cs; 665 saved_state->ss = ts->ss; 666 saved_state->ds = ts->ds; 667 saved_state->es = ts->es; 668 saved_state->fs = ts->fs; 669 saved_state->gs = ts->gs; 670 671 /* 672 * If the trace trap bit is being set, 673 * ensure that the user returns via iret 674 * - which is signaled thusly: 675 */ 676 if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS) 677 saved_state->cs = SYSENTER_TF_CS; 678 679 return(KERN_SUCCESS); 680} 681 682static int 683set_thread_state64(thread_t thread, x86_thread_state64_t *ts) 684{ 685 x86_saved_state64_t *saved_state; 686 687 pal_register_cache_state(thread, DIRTY); 688 689 saved_state = USER_REGS64(thread); 690 691 if (!IS_USERADDR64_CANONICAL(ts->rsp) || 692 !IS_USERADDR64_CANONICAL(ts->rip)) 693 return(KERN_INVALID_ARGUMENT); 694 695 saved_state->r8 = ts->r8; 696 saved_state->r9 = ts->r9; 697 saved_state->r10 = ts->r10; 698 saved_state->r11 = ts->r11; 699 saved_state->r12 = ts->r12; 700 saved_state->r13 = ts->r13; 701 saved_state->r14 = ts->r14; 702 saved_state->r15 = ts->r15; 703 saved_state->rax = ts->rax; 704 saved_state->rbx = ts->rbx; 705 saved_state->rcx = ts->rcx; 706 saved_state->rdx = ts->rdx; 707 saved_state->rdi = ts->rdi; 708 saved_state->rsi = ts->rsi; 709 saved_state->rbp = ts->rbp; 710 saved_state->isf.rsp = ts->rsp; 711 saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET; 712 saved_state->isf.rip = ts->rip; 713 saved_state->isf.cs = USER64_CS; 714 saved_state->fs = (uint32_t)ts->fs; 715 saved_state->gs = (uint32_t)ts->gs; 716 717 return(KERN_SUCCESS); 718} 719 720 721 722static void 723get_thread_state32(thread_t thread, x86_thread_state32_t *ts) 724{ 725 x86_saved_state32_t *saved_state; 726 727 pal_register_cache_state(thread, VALID); 728 729 saved_state = USER_REGS32(thread); 730 731 ts->eax = saved_state->eax; 732 ts->ebx = saved_state->ebx; 733 ts->ecx = saved_state->ecx; 734 ts->edx = saved_state->edx; 735 ts->edi = saved_state->edi; 736 ts->esi = saved_state->esi; 737 ts->ebp = saved_state->ebp; 738 ts->esp = saved_state->uesp; 739 ts->eflags = saved_state->efl; 740 ts->eip = saved_state->eip; 741 ts->cs = saved_state->cs; 742 ts->ss = saved_state->ss; 743 ts->ds = saved_state->ds; 744 ts->es = saved_state->es; 745 ts->fs = saved_state->fs; 746 ts->gs = saved_state->gs; 747} 748 749 750static void 751get_thread_state64(thread_t thread, x86_thread_state64_t *ts) 752{ 753 x86_saved_state64_t *saved_state; 754 755 pal_register_cache_state(thread, VALID); 756 757 saved_state = USER_REGS64(thread); 758 759 ts->r8 = saved_state->r8; 760 ts->r9 = saved_state->r9; 761 ts->r10 = saved_state->r10; 762 ts->r11 = saved_state->r11; 763 ts->r12 = saved_state->r12; 764 ts->r13 = saved_state->r13; 765 ts->r14 = saved_state->r14; 766 ts->r15 = saved_state->r15; 767 ts->rax = saved_state->rax; 768 ts->rbx = saved_state->rbx; 769 ts->rcx = saved_state->rcx; 770 ts->rdx = saved_state->rdx; 771 ts->rdi = saved_state->rdi; 772 ts->rsi = saved_state->rsi; 773 ts->rbp = saved_state->rbp; 774 ts->rsp = saved_state->isf.rsp; 775 ts->rflags = saved_state->isf.rflags; 776 ts->rip = saved_state->isf.rip; 777 ts->cs = saved_state->isf.cs; 778 ts->fs = saved_state->fs; 779 ts->gs = saved_state->gs; 780} 781 782 783/* 784 * act_machine_set_state: 785 * 786 * Set the status of the specified thread. 787 */ 788 789kern_return_t 790machine_thread_set_state( 791 thread_t thr_act, 792 thread_flavor_t flavor, 793 thread_state_t tstate, 794 mach_msg_type_number_t count) 795{ 796 switch (flavor) { 797 case x86_SAVED_STATE32: 798 { 799 x86_saved_state32_t *state; 800 x86_saved_state32_t *saved_state; 801 802 if (count < x86_SAVED_STATE32_COUNT) 803 return(KERN_INVALID_ARGUMENT); 804 805 if (thread_is_64bit(thr_act)) 806 return(KERN_INVALID_ARGUMENT); 807 808 state = (x86_saved_state32_t *) tstate; 809 810 /* Check segment selectors are safe */ 811 if (!valid_user_segment_selectors(state->cs, 812 state->ss, 813 state->ds, 814 state->es, 815 state->fs, 816 state->gs)) 817 return KERN_INVALID_ARGUMENT; 818 819 pal_register_cache_state(thr_act, DIRTY); 820 821 saved_state = USER_REGS32(thr_act); 822 823 /* 824 * General registers 825 */ 826 saved_state->edi = state->edi; 827 saved_state->esi = state->esi; 828 saved_state->ebp = state->ebp; 829 saved_state->uesp = state->uesp; 830 saved_state->ebx = state->ebx; 831 saved_state->edx = state->edx; 832 saved_state->ecx = state->ecx; 833 saved_state->eax = state->eax; 834 saved_state->eip = state->eip; 835 836 saved_state->efl = (state->efl & ~EFL_USER_CLEAR) | EFL_USER_SET; 837 838 /* 839 * If the trace trap bit is being set, 840 * ensure that the user returns via iret 841 * - which is signaled thusly: 842 */ 843 if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) 844 state->cs = SYSENTER_TF_CS; 845 846 /* 847 * User setting segment registers. 848 * Code and stack selectors have already been 849 * checked. Others will be reset by 'iret' 850 * if they are not valid. 851 */ 852 saved_state->cs = state->cs; 853 saved_state->ss = state->ss; 854 saved_state->ds = state->ds; 855 saved_state->es = state->es; 856 saved_state->fs = state->fs; 857 saved_state->gs = state->gs; 858 859 break; 860 } 861 862 case x86_SAVED_STATE64: 863 { 864 x86_saved_state64_t *state; 865 x86_saved_state64_t *saved_state; 866 867 if (count < x86_SAVED_STATE64_COUNT) 868 return(KERN_INVALID_ARGUMENT); 869 870 if (!thread_is_64bit(thr_act)) 871 return(KERN_INVALID_ARGUMENT); 872 873 state = (x86_saved_state64_t *) tstate; 874 875 /* Verify that the supplied code segment selector is 876 * valid. In 64-bit mode, the FS and GS segment overrides 877 * use the FS.base and GS.base MSRs to calculate 878 * base addresses, and the trampolines don't directly 879 * restore the segment registers--hence they are no 880 * longer relevant for validation. 881 */ 882 if (!valid_user_code_selector(state->isf.cs)) 883 return KERN_INVALID_ARGUMENT; 884 885 /* Check pc and stack are canonical addresses */ 886 if (!IS_USERADDR64_CANONICAL(state->isf.rsp) || 887 !IS_USERADDR64_CANONICAL(state->isf.rip)) 888 return KERN_INVALID_ARGUMENT; 889 890 pal_register_cache_state(thr_act, DIRTY); 891 892 saved_state = USER_REGS64(thr_act); 893 894 /* 895 * General registers 896 */ 897 saved_state->r8 = state->r8; 898 saved_state->r9 = state->r9; 899 saved_state->r10 = state->r10; 900 saved_state->r11 = state->r11; 901 saved_state->r12 = state->r12; 902 saved_state->r13 = state->r13; 903 saved_state->r14 = state->r14; 904 saved_state->r15 = state->r15; 905 saved_state->rdi = state->rdi; 906 saved_state->rsi = state->rsi; 907 saved_state->rbp = state->rbp; 908 saved_state->rbx = state->rbx; 909 saved_state->rdx = state->rdx; 910 saved_state->rcx = state->rcx; 911 saved_state->rax = state->rax; 912 saved_state->isf.rsp = state->isf.rsp; 913 saved_state->isf.rip = state->isf.rip; 914 915 saved_state->isf.rflags = (state->isf.rflags & ~EFL_USER_CLEAR) | EFL_USER_SET; 916 917 /* 918 * User setting segment registers. 919 * Code and stack selectors have already been 920 * checked. Others will be reset by 'sys' 921 * if they are not valid. 922 */ 923 saved_state->isf.cs = state->isf.cs; 924 saved_state->isf.ss = state->isf.ss; 925 saved_state->fs = state->fs; 926 saved_state->gs = state->gs; 927 928 break; 929 } 930 931 case x86_FLOAT_STATE32: 932 { 933 if (count != x86_FLOAT_STATE32_COUNT) 934 return(KERN_INVALID_ARGUMENT); 935 936 if (thread_is_64bit(thr_act)) 937 return(KERN_INVALID_ARGUMENT); 938 939 return fpu_set_fxstate(thr_act, tstate, flavor); 940 } 941 942 case x86_FLOAT_STATE64: 943 { 944 if (count != x86_FLOAT_STATE64_COUNT) 945 return(KERN_INVALID_ARGUMENT); 946 947 if ( !thread_is_64bit(thr_act)) 948 return(KERN_INVALID_ARGUMENT); 949 950 return fpu_set_fxstate(thr_act, tstate, flavor); 951 } 952 953 case x86_FLOAT_STATE: 954 { 955 x86_float_state_t *state; 956 957 if (count != x86_FLOAT_STATE_COUNT) 958 return(KERN_INVALID_ARGUMENT); 959 960 state = (x86_float_state_t *)tstate; 961 if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT && 962 thread_is_64bit(thr_act)) { 963 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64); 964 } 965 if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT && 966 !thread_is_64bit(thr_act)) { 967 return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32); 968 } 969 return(KERN_INVALID_ARGUMENT); 970 } 971 972 case x86_AVX_STATE32: 973 { 974 if (count != x86_AVX_STATE32_COUNT) 975 return(KERN_INVALID_ARGUMENT); 976 977 if (thread_is_64bit(thr_act)) 978 return(KERN_INVALID_ARGUMENT); 979 980 return fpu_set_fxstate(thr_act, tstate, flavor); 981 } 982 983 case x86_AVX_STATE64: 984 { 985 if (count != x86_AVX_STATE64_COUNT) 986 return(KERN_INVALID_ARGUMENT); 987 988 if (!thread_is_64bit(thr_act)) 989 return(KERN_INVALID_ARGUMENT); 990 991 return fpu_set_fxstate(thr_act, tstate, flavor); 992 } 993 994 case x86_AVX_STATE: 995 { 996 x86_avx_state_t *state; 997 998 if (count != x86_AVX_STATE_COUNT) 999 return(KERN_INVALID_ARGUMENT); 1000 1001 state = (x86_avx_state_t *)tstate; 1002 if (state->ash.flavor == x86_AVX_STATE64 && 1003 state->ash.count == x86_FLOAT_STATE64_COUNT && 1004 thread_is_64bit(thr_act)) { 1005 return fpu_set_fxstate(thr_act, 1006 (thread_state_t)&state->ufs.as64, 1007 x86_FLOAT_STATE64); 1008 } 1009 if (state->ash.flavor == x86_FLOAT_STATE32 && 1010 state->ash.count == x86_FLOAT_STATE32_COUNT && 1011 !thread_is_64bit(thr_act)) { 1012 return fpu_set_fxstate(thr_act, 1013 (thread_state_t)&state->ufs.as32, 1014 x86_FLOAT_STATE32); 1015 } 1016 return(KERN_INVALID_ARGUMENT); 1017 } 1018 1019 case x86_THREAD_STATE32: 1020 { 1021 if (count != x86_THREAD_STATE32_COUNT) 1022 return(KERN_INVALID_ARGUMENT); 1023 1024 if (thread_is_64bit(thr_act)) 1025 return(KERN_INVALID_ARGUMENT); 1026 1027 return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate); 1028 } 1029 1030 case x86_THREAD_STATE64: 1031 { 1032 if (count != x86_THREAD_STATE64_COUNT) 1033 return(KERN_INVALID_ARGUMENT); 1034 1035 if (!thread_is_64bit(thr_act)) 1036 return(KERN_INVALID_ARGUMENT); 1037 1038 return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate); 1039 1040 } 1041 case x86_THREAD_STATE: 1042 { 1043 x86_thread_state_t *state; 1044 1045 if (count != x86_THREAD_STATE_COUNT) 1046 return(KERN_INVALID_ARGUMENT); 1047 1048 state = (x86_thread_state_t *)tstate; 1049 1050 if (state->tsh.flavor == x86_THREAD_STATE64 && 1051 state->tsh.count == x86_THREAD_STATE64_COUNT && 1052 thread_is_64bit(thr_act)) { 1053 return set_thread_state64(thr_act, &state->uts.ts64); 1054 } else if (state->tsh.flavor == x86_THREAD_STATE32 && 1055 state->tsh.count == x86_THREAD_STATE32_COUNT && 1056 !thread_is_64bit(thr_act)) { 1057 return set_thread_state32(thr_act, &state->uts.ts32); 1058 } else 1059 return(KERN_INVALID_ARGUMENT); 1060 1061 break; 1062 } 1063 case x86_DEBUG_STATE32: 1064 { 1065 x86_debug_state32_t *state; 1066 kern_return_t ret; 1067 1068 if (thread_is_64bit(thr_act)) 1069 return(KERN_INVALID_ARGUMENT); 1070 1071 state = (x86_debug_state32_t *)tstate; 1072 1073 ret = set_debug_state32(thr_act, state); 1074 1075 return ret; 1076 } 1077 case x86_DEBUG_STATE64: 1078 { 1079 x86_debug_state64_t *state; 1080 kern_return_t ret; 1081 1082 if (!thread_is_64bit(thr_act)) 1083 return(KERN_INVALID_ARGUMENT); 1084 1085 state = (x86_debug_state64_t *)tstate; 1086 1087 ret = set_debug_state64(thr_act, state); 1088 1089 return ret; 1090 } 1091 case x86_DEBUG_STATE: 1092 { 1093 x86_debug_state_t *state; 1094 kern_return_t ret = KERN_INVALID_ARGUMENT; 1095 1096 if (count != x86_DEBUG_STATE_COUNT) 1097 return (KERN_INVALID_ARGUMENT); 1098 1099 state = (x86_debug_state_t *)tstate; 1100 if (state->dsh.flavor == x86_DEBUG_STATE64 && 1101 state->dsh.count == x86_DEBUG_STATE64_COUNT && 1102 thread_is_64bit(thr_act)) { 1103 ret = set_debug_state64(thr_act, &state->uds.ds64); 1104 } 1105 else 1106 if (state->dsh.flavor == x86_DEBUG_STATE32 && 1107 state->dsh.count == x86_DEBUG_STATE32_COUNT && 1108 !thread_is_64bit(thr_act)) { 1109 ret = set_debug_state32(thr_act, &state->uds.ds32); 1110 } 1111 return ret; 1112 } 1113 default: 1114 return(KERN_INVALID_ARGUMENT); 1115 } 1116 1117 return(KERN_SUCCESS); 1118} 1119 1120 1121 1122/* 1123 * thread_getstatus: 1124 * 1125 * Get the status of the specified thread. 1126 */ 1127 1128kern_return_t 1129machine_thread_get_state( 1130 thread_t thr_act, 1131 thread_flavor_t flavor, 1132 thread_state_t tstate, 1133 mach_msg_type_number_t *count) 1134{ 1135 1136 switch (flavor) { 1137 1138 case THREAD_STATE_FLAVOR_LIST: 1139 { 1140 if (*count < 3) 1141 return (KERN_INVALID_ARGUMENT); 1142 1143 tstate[0] = i386_THREAD_STATE; 1144 tstate[1] = i386_FLOAT_STATE; 1145 tstate[2] = i386_EXCEPTION_STATE; 1146 1147 *count = 3; 1148 break; 1149 } 1150 1151 case THREAD_STATE_FLAVOR_LIST_NEW: 1152 { 1153 if (*count < 4) 1154 return (KERN_INVALID_ARGUMENT); 1155 1156 tstate[0] = x86_THREAD_STATE; 1157 tstate[1] = x86_FLOAT_STATE; 1158 tstate[2] = x86_EXCEPTION_STATE; 1159 tstate[3] = x86_DEBUG_STATE; 1160 1161 *count = 4; 1162 break; 1163 } 1164 1165 case THREAD_STATE_FLAVOR_LIST_10_9: 1166 { 1167 if (*count < 5) 1168 return (KERN_INVALID_ARGUMENT); 1169 1170 tstate[0] = x86_THREAD_STATE; 1171 tstate[1] = x86_FLOAT_STATE; 1172 tstate[2] = x86_EXCEPTION_STATE; 1173 tstate[3] = x86_DEBUG_STATE; 1174 tstate[4] = x86_AVX_STATE; 1175 1176 *count = 5; 1177 break; 1178 } 1179 1180 case x86_SAVED_STATE32: 1181 { 1182 x86_saved_state32_t *state; 1183 x86_saved_state32_t *saved_state; 1184 1185 if (*count < x86_SAVED_STATE32_COUNT) 1186 return(KERN_INVALID_ARGUMENT); 1187 1188 if (thread_is_64bit(thr_act)) 1189 return(KERN_INVALID_ARGUMENT); 1190 1191 state = (x86_saved_state32_t *) tstate; 1192 saved_state = USER_REGS32(thr_act); 1193 1194 /* 1195 * First, copy everything: 1196 */ 1197 *state = *saved_state; 1198 state->ds = saved_state->ds & 0xffff; 1199 state->es = saved_state->es & 0xffff; 1200 state->fs = saved_state->fs & 0xffff; 1201 state->gs = saved_state->gs & 0xffff; 1202 1203 *count = x86_SAVED_STATE32_COUNT; 1204 break; 1205 } 1206 1207 case x86_SAVED_STATE64: 1208 { 1209 x86_saved_state64_t *state; 1210 x86_saved_state64_t *saved_state; 1211 1212 if (*count < x86_SAVED_STATE64_COUNT) 1213 return(KERN_INVALID_ARGUMENT); 1214 1215 if (!thread_is_64bit(thr_act)) 1216 return(KERN_INVALID_ARGUMENT); 1217 1218 state = (x86_saved_state64_t *)tstate; 1219 saved_state = USER_REGS64(thr_act); 1220 1221 /* 1222 * First, copy everything: 1223 */ 1224 *state = *saved_state; 1225 state->fs = saved_state->fs & 0xffff; 1226 state->gs = saved_state->gs & 0xffff; 1227 1228 *count = x86_SAVED_STATE64_COUNT; 1229 break; 1230 } 1231 1232 case x86_FLOAT_STATE32: 1233 { 1234 if (*count < x86_FLOAT_STATE32_COUNT) 1235 return(KERN_INVALID_ARGUMENT); 1236 1237 if (thread_is_64bit(thr_act)) 1238 return(KERN_INVALID_ARGUMENT); 1239 1240 *count = x86_FLOAT_STATE32_COUNT; 1241 1242 return fpu_get_fxstate(thr_act, tstate, flavor); 1243 } 1244 1245 case x86_FLOAT_STATE64: 1246 { 1247 if (*count < x86_FLOAT_STATE64_COUNT) 1248 return(KERN_INVALID_ARGUMENT); 1249 1250 if ( !thread_is_64bit(thr_act)) 1251 return(KERN_INVALID_ARGUMENT); 1252 1253 *count = x86_FLOAT_STATE64_COUNT; 1254 1255 return fpu_get_fxstate(thr_act, tstate, flavor); 1256 } 1257 1258 case x86_FLOAT_STATE: 1259 { 1260 x86_float_state_t *state; 1261 kern_return_t kret; 1262 1263 if (*count < x86_FLOAT_STATE_COUNT) 1264 return(KERN_INVALID_ARGUMENT); 1265 1266 state = (x86_float_state_t *)tstate; 1267 1268 /* 1269 * no need to bzero... currently 1270 * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT 1271 */ 1272 if (thread_is_64bit(thr_act)) { 1273 state->fsh.flavor = x86_FLOAT_STATE64; 1274 state->fsh.count = x86_FLOAT_STATE64_COUNT; 1275 1276 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64); 1277 } else { 1278 state->fsh.flavor = x86_FLOAT_STATE32; 1279 state->fsh.count = x86_FLOAT_STATE32_COUNT; 1280 1281 kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32); 1282 } 1283 *count = x86_FLOAT_STATE_COUNT; 1284 1285 return(kret); 1286 } 1287 1288 case x86_AVX_STATE32: 1289 { 1290 if (*count != x86_AVX_STATE32_COUNT) 1291 return(KERN_INVALID_ARGUMENT); 1292 1293 if (thread_is_64bit(thr_act)) 1294 return(KERN_INVALID_ARGUMENT); 1295 1296 *count = x86_AVX_STATE32_COUNT; 1297 1298 return fpu_get_fxstate(thr_act, tstate, flavor); 1299 } 1300 1301 case x86_AVX_STATE64: 1302 { 1303 if (*count != x86_AVX_STATE64_COUNT) 1304 return(KERN_INVALID_ARGUMENT); 1305 1306 if ( !thread_is_64bit(thr_act)) 1307 return(KERN_INVALID_ARGUMENT); 1308 1309 *count = x86_AVX_STATE64_COUNT; 1310 1311 return fpu_get_fxstate(thr_act, tstate, flavor); 1312 } 1313 1314 case x86_AVX_STATE: 1315 { 1316 x86_avx_state_t *state; 1317 kern_return_t kret; 1318 1319 if (*count < x86_AVX_STATE_COUNT) 1320 return(KERN_INVALID_ARGUMENT); 1321 1322 state = (x86_avx_state_t *)tstate; 1323 1324 bzero((char *)state, sizeof(x86_avx_state_t)); 1325 if (thread_is_64bit(thr_act)) { 1326 state->ash.flavor = x86_AVX_STATE64; 1327 state->ash.count = x86_AVX_STATE64_COUNT; 1328 kret = fpu_get_fxstate(thr_act, 1329 (thread_state_t)&state->ufs.as64, 1330 x86_AVX_STATE64); 1331 } else { 1332 state->ash.flavor = x86_AVX_STATE32; 1333 state->ash.count = x86_AVX_STATE32_COUNT; 1334 kret = fpu_get_fxstate(thr_act, 1335 (thread_state_t)&state->ufs.as32, 1336 x86_AVX_STATE32); 1337 } 1338 *count = x86_AVX_STATE_COUNT; 1339 1340 return(kret); 1341 } 1342 1343 case x86_THREAD_STATE32: 1344 { 1345 if (*count < x86_THREAD_STATE32_COUNT) 1346 return(KERN_INVALID_ARGUMENT); 1347 1348 if (thread_is_64bit(thr_act)) 1349 return(KERN_INVALID_ARGUMENT); 1350 1351 *count = x86_THREAD_STATE32_COUNT; 1352 1353 get_thread_state32(thr_act, (x86_thread_state32_t *)tstate); 1354 break; 1355 } 1356 1357 case x86_THREAD_STATE64: 1358 { 1359 if (*count < x86_THREAD_STATE64_COUNT) 1360 return(KERN_INVALID_ARGUMENT); 1361 1362 if ( !thread_is_64bit(thr_act)) 1363 return(KERN_INVALID_ARGUMENT); 1364 1365 *count = x86_THREAD_STATE64_COUNT; 1366 1367 get_thread_state64(thr_act, (x86_thread_state64_t *)tstate); 1368 break; 1369 } 1370 1371 case x86_THREAD_STATE: 1372 { 1373 x86_thread_state_t *state; 1374 1375 if (*count < x86_THREAD_STATE_COUNT) 1376 return(KERN_INVALID_ARGUMENT); 1377 1378 state = (x86_thread_state_t *)tstate; 1379 1380 bzero((char *)state, sizeof(x86_thread_state_t)); 1381 1382 if (thread_is_64bit(thr_act)) { 1383 state->tsh.flavor = x86_THREAD_STATE64; 1384 state->tsh.count = x86_THREAD_STATE64_COUNT; 1385 1386 get_thread_state64(thr_act, &state->uts.ts64); 1387 } else { 1388 state->tsh.flavor = x86_THREAD_STATE32; 1389 state->tsh.count = x86_THREAD_STATE32_COUNT; 1390 1391 get_thread_state32(thr_act, &state->uts.ts32); 1392 } 1393 *count = x86_THREAD_STATE_COUNT; 1394 1395 break; 1396 } 1397 1398 1399 case x86_EXCEPTION_STATE32: 1400 { 1401 if (*count < x86_EXCEPTION_STATE32_COUNT) 1402 return(KERN_INVALID_ARGUMENT); 1403 1404 if (thread_is_64bit(thr_act)) 1405 return(KERN_INVALID_ARGUMENT); 1406 1407 *count = x86_EXCEPTION_STATE32_COUNT; 1408 1409 get_exception_state32(thr_act, (x86_exception_state32_t *)tstate); 1410 /* 1411 * Suppress the cpu number for binary compatibility 1412 * of this deprecated state. 1413 */ 1414 ((x86_exception_state32_t *)tstate)->cpu = 0; 1415 break; 1416 } 1417 1418 case x86_EXCEPTION_STATE64: 1419 { 1420 if (*count < x86_EXCEPTION_STATE64_COUNT) 1421 return(KERN_INVALID_ARGUMENT); 1422 1423 if ( !thread_is_64bit(thr_act)) 1424 return(KERN_INVALID_ARGUMENT); 1425 1426 *count = x86_EXCEPTION_STATE64_COUNT; 1427 1428 get_exception_state64(thr_act, (x86_exception_state64_t *)tstate); 1429 /* 1430 * Suppress the cpu number for binary compatibility 1431 * of this deprecated state. 1432 */ 1433 ((x86_exception_state64_t *)tstate)->cpu = 0; 1434 break; 1435 } 1436 1437 case x86_EXCEPTION_STATE: 1438 { 1439 x86_exception_state_t *state; 1440 1441 if (*count < x86_EXCEPTION_STATE_COUNT) 1442 return(KERN_INVALID_ARGUMENT); 1443 1444 state = (x86_exception_state_t *)tstate; 1445 1446 bzero((char *)state, sizeof(x86_exception_state_t)); 1447 1448 if (thread_is_64bit(thr_act)) { 1449 state->esh.flavor = x86_EXCEPTION_STATE64; 1450 state->esh.count = x86_EXCEPTION_STATE64_COUNT; 1451 1452 get_exception_state64(thr_act, &state->ues.es64); 1453 } else { 1454 state->esh.flavor = x86_EXCEPTION_STATE32; 1455 state->esh.count = x86_EXCEPTION_STATE32_COUNT; 1456 1457 get_exception_state32(thr_act, &state->ues.es32); 1458 } 1459 *count = x86_EXCEPTION_STATE_COUNT; 1460 1461 break; 1462 } 1463 case x86_DEBUG_STATE32: 1464 { 1465 if (*count < x86_DEBUG_STATE32_COUNT) 1466 return(KERN_INVALID_ARGUMENT); 1467 1468 if (thread_is_64bit(thr_act)) 1469 return(KERN_INVALID_ARGUMENT); 1470 1471 get_debug_state32(thr_act, (x86_debug_state32_t *)tstate); 1472 1473 *count = x86_DEBUG_STATE32_COUNT; 1474 1475 break; 1476 } 1477 case x86_DEBUG_STATE64: 1478 { 1479 if (*count < x86_DEBUG_STATE64_COUNT) 1480 return(KERN_INVALID_ARGUMENT); 1481 1482 if (!thread_is_64bit(thr_act)) 1483 return(KERN_INVALID_ARGUMENT); 1484 1485 get_debug_state64(thr_act, (x86_debug_state64_t *)tstate); 1486 1487 *count = x86_DEBUG_STATE64_COUNT; 1488 1489 break; 1490 } 1491 case x86_DEBUG_STATE: 1492 { 1493 x86_debug_state_t *state; 1494 1495 if (*count < x86_DEBUG_STATE_COUNT) 1496 return(KERN_INVALID_ARGUMENT); 1497 1498 state = (x86_debug_state_t *)tstate; 1499 1500 bzero(state, sizeof *state); 1501 1502 if (thread_is_64bit(thr_act)) { 1503 state->dsh.flavor = x86_DEBUG_STATE64; 1504 state->dsh.count = x86_DEBUG_STATE64_COUNT; 1505 1506 get_debug_state64(thr_act, &state->uds.ds64); 1507 } else { 1508 state->dsh.flavor = x86_DEBUG_STATE32; 1509 state->dsh.count = x86_DEBUG_STATE32_COUNT; 1510 1511 get_debug_state32(thr_act, &state->uds.ds32); 1512 } 1513 *count = x86_DEBUG_STATE_COUNT; 1514 break; 1515 } 1516 default: 1517 return(KERN_INVALID_ARGUMENT); 1518 } 1519 1520 return(KERN_SUCCESS); 1521} 1522 1523kern_return_t 1524machine_thread_get_kern_state( 1525 thread_t thread, 1526 thread_flavor_t flavor, 1527 thread_state_t tstate, 1528 mach_msg_type_number_t *count) 1529{ 1530 x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state; 1531 1532 /* 1533 * This works only for an interrupted kernel thread 1534 */ 1535 if (thread != current_thread() || int_state == NULL) 1536 return KERN_FAILURE; 1537 1538 switch (flavor) { 1539 case x86_THREAD_STATE32: { 1540 x86_thread_state32_t *state; 1541 x86_saved_state32_t *saved_state; 1542 1543 if (!is_saved_state32(int_state) || 1544 *count < x86_THREAD_STATE32_COUNT) 1545 return (KERN_INVALID_ARGUMENT); 1546 1547 state = (x86_thread_state32_t *) tstate; 1548 1549 saved_state = saved_state32(int_state); 1550 /* 1551 * General registers. 1552 */ 1553 state->eax = saved_state->eax; 1554 state->ebx = saved_state->ebx; 1555 state->ecx = saved_state->ecx; 1556 state->edx = saved_state->edx; 1557 state->edi = saved_state->edi; 1558 state->esi = saved_state->esi; 1559 state->ebp = saved_state->ebp; 1560 state->esp = saved_state->uesp; 1561 state->eflags = saved_state->efl; 1562 state->eip = saved_state->eip; 1563 state->cs = saved_state->cs; 1564 state->ss = saved_state->ss; 1565 state->ds = saved_state->ds & 0xffff; 1566 state->es = saved_state->es & 0xffff; 1567 state->fs = saved_state->fs & 0xffff; 1568 state->gs = saved_state->gs & 0xffff; 1569 1570 *count = x86_THREAD_STATE32_COUNT; 1571 1572 return KERN_SUCCESS; 1573 } 1574 1575 case x86_THREAD_STATE64: { 1576 x86_thread_state64_t *state; 1577 x86_saved_state64_t *saved_state; 1578 1579 if (!is_saved_state64(int_state) || 1580 *count < x86_THREAD_STATE64_COUNT) 1581 return (KERN_INVALID_ARGUMENT); 1582 1583 state = (x86_thread_state64_t *) tstate; 1584 1585 saved_state = saved_state64(int_state); 1586 /* 1587 * General registers. 1588 */ 1589 state->rax = saved_state->rax; 1590 state->rbx = saved_state->rbx; 1591 state->rcx = saved_state->rcx; 1592 state->rdx = saved_state->rdx; 1593 state->rdi = saved_state->rdi; 1594 state->rsi = saved_state->rsi; 1595 state->rbp = saved_state->rbp; 1596 state->rsp = saved_state->isf.rsp; 1597 state->r8 = saved_state->r8; 1598 state->r9 = saved_state->r9; 1599 state->r10 = saved_state->r10; 1600 state->r11 = saved_state->r11; 1601 state->r12 = saved_state->r12; 1602 state->r13 = saved_state->r13; 1603 state->r14 = saved_state->r14; 1604 state->r15 = saved_state->r15; 1605 1606 state->rip = saved_state->isf.rip; 1607 state->rflags = saved_state->isf.rflags; 1608 state->cs = saved_state->isf.cs; 1609 state->fs = saved_state->fs & 0xffff; 1610 state->gs = saved_state->gs & 0xffff; 1611 *count = x86_THREAD_STATE64_COUNT; 1612 1613 return KERN_SUCCESS; 1614 } 1615 1616 case x86_THREAD_STATE: { 1617 x86_thread_state_t *state = NULL; 1618 1619 if (*count < x86_THREAD_STATE_COUNT) 1620 return (KERN_INVALID_ARGUMENT); 1621 1622 state = (x86_thread_state_t *) tstate; 1623 1624 if (is_saved_state32(int_state)) { 1625 x86_saved_state32_t *saved_state = saved_state32(int_state); 1626 1627 state->tsh.flavor = x86_THREAD_STATE32; 1628 state->tsh.count = x86_THREAD_STATE32_COUNT; 1629 1630 /* 1631 * General registers. 1632 */ 1633 state->uts.ts32.eax = saved_state->eax; 1634 state->uts.ts32.ebx = saved_state->ebx; 1635 state->uts.ts32.ecx = saved_state->ecx; 1636 state->uts.ts32.edx = saved_state->edx; 1637 state->uts.ts32.edi = saved_state->edi; 1638 state->uts.ts32.esi = saved_state->esi; 1639 state->uts.ts32.ebp = saved_state->ebp; 1640 state->uts.ts32.esp = saved_state->uesp; 1641 state->uts.ts32.eflags = saved_state->efl; 1642 state->uts.ts32.eip = saved_state->eip; 1643 state->uts.ts32.cs = saved_state->cs; 1644 state->uts.ts32.ss = saved_state->ss; 1645 state->uts.ts32.ds = saved_state->ds & 0xffff; 1646 state->uts.ts32.es = saved_state->es & 0xffff; 1647 state->uts.ts32.fs = saved_state->fs & 0xffff; 1648 state->uts.ts32.gs = saved_state->gs & 0xffff; 1649 } else if (is_saved_state64(int_state)) { 1650 x86_saved_state64_t *saved_state = saved_state64(int_state); 1651 1652 state->tsh.flavor = x86_THREAD_STATE64; 1653 state->tsh.count = x86_THREAD_STATE64_COUNT; 1654 1655 /* 1656 * General registers. 1657 */ 1658 state->uts.ts64.rax = saved_state->rax; 1659 state->uts.ts64.rbx = saved_state->rbx; 1660 state->uts.ts64.rcx = saved_state->rcx; 1661 state->uts.ts64.rdx = saved_state->rdx; 1662 state->uts.ts64.rdi = saved_state->rdi; 1663 state->uts.ts64.rsi = saved_state->rsi; 1664 state->uts.ts64.rbp = saved_state->rbp; 1665 state->uts.ts64.rsp = saved_state->isf.rsp; 1666 state->uts.ts64.r8 = saved_state->r8; 1667 state->uts.ts64.r9 = saved_state->r9; 1668 state->uts.ts64.r10 = saved_state->r10; 1669 state->uts.ts64.r11 = saved_state->r11; 1670 state->uts.ts64.r12 = saved_state->r12; 1671 state->uts.ts64.r13 = saved_state->r13; 1672 state->uts.ts64.r14 = saved_state->r14; 1673 state->uts.ts64.r15 = saved_state->r15; 1674 1675 state->uts.ts64.rip = saved_state->isf.rip; 1676 state->uts.ts64.rflags = saved_state->isf.rflags; 1677 state->uts.ts64.cs = saved_state->isf.cs; 1678 state->uts.ts64.fs = saved_state->fs & 0xffff; 1679 state->uts.ts64.gs = saved_state->gs & 0xffff; 1680 } else { 1681 panic("unknown thread state"); 1682 } 1683 1684 *count = x86_THREAD_STATE_COUNT; 1685 return KERN_SUCCESS; 1686 } 1687 } 1688 return KERN_FAILURE; 1689} 1690 1691 1692void 1693machine_thread_switch_addrmode(thread_t thread) 1694{ 1695 /* 1696 * We don't want to be preempted until we're done 1697 * - particularly if we're switching the current thread 1698 */ 1699 disable_preemption(); 1700 1701 /* 1702 * Reset the state saveareas. As we're resetting, we anticipate no 1703 * memory allocations in this path. 1704 */ 1705 machine_thread_create(thread, thread->task); 1706 1707 /* If we're switching ourselves, reset the pcb addresses etc. */ 1708 if (thread == current_thread()) { 1709 boolean_t istate = ml_set_interrupts_enabled(FALSE); 1710#if defined(__i386__) 1711 if (current_cpu_datap()->cpu_active_cr3 != kernel_pmap->pm_cr3) 1712 pmap_load_kernel_cr3(); 1713#endif /* defined(__i386) */ 1714 act_machine_switch_pcb(NULL, thread); 1715 ml_set_interrupts_enabled(istate); 1716 } 1717 enable_preemption(); 1718} 1719 1720 1721 1722/* 1723 * This is used to set the current thr_act/thread 1724 * when starting up a new processor 1725 */ 1726void 1727machine_set_current_thread(thread_t thread) 1728{ 1729 current_cpu_datap()->cpu_active_thread = thread; 1730} 1731 1732 1733/* 1734 * Perform machine-dependent per-thread initializations 1735 */ 1736void 1737machine_thread_init(void) 1738{ 1739 if (cpu_mode_is64bit()) { 1740 assert(sizeof(x86_sframe_compat32_t) % 16 == 0); 1741 iss_zone = zinit(sizeof(x86_sframe64_t), 1742 thread_max * sizeof(x86_sframe64_t), 1743 THREAD_CHUNK * sizeof(x86_sframe64_t), 1744 "x86_64 saved state"); 1745 1746 ids_zone = zinit(sizeof(x86_debug_state64_t), 1747 thread_max * sizeof(x86_debug_state64_t), 1748 THREAD_CHUNK * sizeof(x86_debug_state64_t), 1749 "x86_64 debug state"); 1750 1751 } else { 1752 iss_zone = zinit(sizeof(x86_sframe32_t), 1753 thread_max * sizeof(x86_sframe32_t), 1754 THREAD_CHUNK * sizeof(x86_sframe32_t), 1755 "x86 saved state"); 1756 ids_zone = zinit(sizeof(x86_debug_state32_t), 1757 thread_max * (sizeof(x86_debug_state32_t)), 1758 THREAD_CHUNK * (sizeof(x86_debug_state32_t)), 1759 "x86 debug state"); 1760 } 1761 fpu_module_init(); 1762} 1763 1764 1765#if defined(__i386__) 1766/* 1767 * Some routines for debugging activation code 1768 */ 1769static void dump_handlers(thread_t); 1770void dump_regs(thread_t); 1771int dump_act(thread_t thr_act); 1772 1773static void 1774dump_handlers(thread_t thr_act) 1775{ 1776 ReturnHandler *rhp = thr_act->handlers; 1777 int counter = 0; 1778 1779 printf("\t"); 1780 while (rhp) { 1781 if (rhp == &thr_act->special_handler){ 1782 if (rhp->next) 1783 printf("[NON-Zero next ptr(%p)]", rhp->next); 1784 printf("special_handler()->"); 1785 break; 1786 } 1787 printf("hdlr_%d(%p)->", counter, rhp->handler); 1788 rhp = rhp->next; 1789 if (++counter > 32) { 1790 printf("Aborting: HUGE handler chain\n"); 1791 break; 1792 } 1793 } 1794 printf("HLDR_NULL\n"); 1795} 1796 1797void 1798dump_regs(thread_t thr_act) 1799{ 1800 if (thread_is_64bit(thr_act)) { 1801 x86_saved_state64_t *ssp; 1802 1803 ssp = USER_REGS64(thr_act); 1804 1805 panic("dump_regs: 64bit tasks not yet supported"); 1806 1807 } else { 1808 x86_saved_state32_t *ssp; 1809 1810 ssp = USER_REGS32(thr_act); 1811 1812 /* 1813 * Print out user register state 1814 */ 1815 printf("\tRegs:\tedi=%x esi=%x ebp=%x ebx=%x edx=%x\n", 1816 ssp->edi, ssp->esi, ssp->ebp, ssp->ebx, ssp->edx); 1817 1818 printf("\t\tecx=%x eax=%x eip=%x efl=%x uesp=%x\n", 1819 ssp->ecx, ssp->eax, ssp->eip, ssp->efl, ssp->uesp); 1820 1821 printf("\t\tcs=%x ss=%x\n", ssp->cs, ssp->ss); 1822 } 1823} 1824 1825int 1826dump_act(thread_t thr_act) 1827{ 1828 if (!thr_act) 1829 return(0); 1830 1831 printf("thread(%p)(%d): task=%p(%d)\n", 1832 thr_act, thr_act->ref_count, 1833 thr_act->task, 1834 thr_act->task ? thr_act->task->ref_count : 0); 1835 1836 printf("\tsusp=%d user_stop=%d active=%x ast=%x\n", 1837 thr_act->suspend_count, thr_act->user_stop_count, 1838 thr_act->active, thr_act->ast); 1839 printf("\tpcb=%p\n", &thr_act->machine); 1840 1841 if (thr_act->kernel_stack) { 1842 vm_offset_t stack = thr_act->kernel_stack; 1843 1844 printf("\tk_stk %lx eip %x ebx %x esp %x iss %p\n", 1845 (long)stack, STACK_IKS(stack)->k_eip, STACK_IKS(stack)->k_ebx, 1846 STACK_IKS(stack)->k_esp, thr_act->machine.iss); 1847 } 1848 1849 dump_handlers(thr_act); 1850 dump_regs(thr_act); 1851 return((int)thr_act); 1852} 1853#endif 1854 1855user_addr_t 1856get_useraddr(void) 1857{ 1858 thread_t thr_act = current_thread(); 1859 1860 if (thread_is_64bit(thr_act)) { 1861 x86_saved_state64_t *iss64; 1862 1863 iss64 = USER_REGS64(thr_act); 1864 1865 return(iss64->isf.rip); 1866 } else { 1867 x86_saved_state32_t *iss32; 1868 1869 iss32 = USER_REGS32(thr_act); 1870 1871 return(iss32->eip); 1872 } 1873} 1874 1875/* 1876 * detach and return a kernel stack from a thread 1877 */ 1878 1879vm_offset_t 1880machine_stack_detach(thread_t thread) 1881{ 1882 vm_offset_t stack; 1883 1884 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH), 1885 (uintptr_t)thread_tid(thread), thread->priority, 1886 thread->sched_pri, 0, 1887 0); 1888 1889 stack = thread->kernel_stack; 1890 thread->kernel_stack = 0; 1891 1892 return (stack); 1893} 1894 1895/* 1896 * attach a kernel stack to a thread and initialize it 1897 */ 1898 1899void 1900machine_stack_attach( 1901 thread_t thread, 1902 vm_offset_t stack) 1903{ 1904 struct x86_kernel_state *statep; 1905 1906 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH), 1907 (uintptr_t)thread_tid(thread), thread->priority, 1908 thread->sched_pri, 0, 0); 1909 1910 assert(stack); 1911 thread->kernel_stack = stack; 1912 1913 statep = STACK_IKS(stack); 1914#if defined(__x86_64__) 1915 statep->k_rip = (unsigned long) Thread_continue; 1916 statep->k_rbx = (unsigned long) thread_continue; 1917 statep->k_rsp = (unsigned long) (STACK_IKS(stack) - 1); 1918#else 1919 statep->k_eip = (unsigned long) Thread_continue; 1920 statep->k_ebx = (unsigned long) thread_continue; 1921 statep->k_esp = (unsigned long) (STACK_IKS(stack) - 1); 1922#endif 1923 1924 return; 1925} 1926 1927/* 1928 * move a stack from old to new thread 1929 */ 1930 1931void 1932machine_stack_handoff(thread_t old, 1933 thread_t new) 1934{ 1935 vm_offset_t stack; 1936 1937 assert(new); 1938 assert(old); 1939 1940#if CONFIG_COUNTERS 1941 machine_pmc_cswitch(old, new); 1942#endif 1943 1944 stack = old->kernel_stack; 1945 if (stack == old->reserved_stack) { 1946 assert(new->reserved_stack); 1947 old->reserved_stack = new->reserved_stack; 1948 new->reserved_stack = stack; 1949 } 1950 old->kernel_stack = 0; 1951 /* 1952 * A full call to machine_stack_attach() is unnecessry 1953 * because old stack is already initialized. 1954 */ 1955 new->kernel_stack = stack; 1956 1957 fpu_save_context(old); 1958 1959 old->machine.specFlags &= ~OnProc; 1960 new->machine.specFlags |= OnProc; 1961 1962 PMAP_SWITCH_CONTEXT(old, new, cpu_number()); 1963 act_machine_switch_pcb(old, new); 1964 1965 machine_set_current_thread(new); 1966 1967 return; 1968} 1969 1970 1971 1972 1973struct x86_act_context32 { 1974 x86_saved_state32_t ss; 1975 x86_float_state32_t fs; 1976 x86_debug_state32_t ds; 1977}; 1978 1979struct x86_act_context64 { 1980 x86_saved_state64_t ss; 1981 x86_float_state64_t fs; 1982 x86_debug_state64_t ds; 1983}; 1984 1985 1986 1987void * 1988act_thread_csave(void) 1989{ 1990 kern_return_t kret; 1991 mach_msg_type_number_t val; 1992 thread_t thr_act = current_thread(); 1993 1994 if (thread_is_64bit(thr_act)) { 1995 struct x86_act_context64 *ic64; 1996 1997 ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64)); 1998 1999 if (ic64 == (struct x86_act_context64 *)NULL) 2000 return((void *)0); 2001 2002 val = x86_SAVED_STATE64_COUNT; 2003 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64, 2004 (thread_state_t) &ic64->ss, &val); 2005 if (kret != KERN_SUCCESS) { 2006 kfree(ic64, sizeof(struct x86_act_context64)); 2007 return((void *)0); 2008 } 2009 val = x86_FLOAT_STATE64_COUNT; 2010 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64, 2011 (thread_state_t) &ic64->fs, &val); 2012 if (kret != KERN_SUCCESS) { 2013 kfree(ic64, sizeof(struct x86_act_context64)); 2014 return((void *)0); 2015 } 2016 2017 val = x86_DEBUG_STATE64_COUNT; 2018 kret = machine_thread_get_state(thr_act, 2019 x86_DEBUG_STATE64, 2020 (thread_state_t)&ic64->ds, 2021 &val); 2022 if (kret != KERN_SUCCESS) { 2023 kfree(ic64, sizeof(struct x86_act_context64)); 2024 return((void *)0); 2025 } 2026 return(ic64); 2027 2028 } else { 2029 struct x86_act_context32 *ic32; 2030 2031 ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32)); 2032 2033 if (ic32 == (struct x86_act_context32 *)NULL) 2034 return((void *)0); 2035 2036 val = x86_SAVED_STATE32_COUNT; 2037 kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32, 2038 (thread_state_t) &ic32->ss, &val); 2039 if (kret != KERN_SUCCESS) { 2040 kfree(ic32, sizeof(struct x86_act_context32)); 2041 return((void *)0); 2042 } 2043 val = x86_FLOAT_STATE32_COUNT; 2044 kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32, 2045 (thread_state_t) &ic32->fs, &val); 2046 if (kret != KERN_SUCCESS) { 2047 kfree(ic32, sizeof(struct x86_act_context32)); 2048 return((void *)0); 2049 } 2050 2051 val = x86_DEBUG_STATE32_COUNT; 2052 kret = machine_thread_get_state(thr_act, 2053 x86_DEBUG_STATE32, 2054 (thread_state_t)&ic32->ds, 2055 &val); 2056 if (kret != KERN_SUCCESS) { 2057 kfree(ic32, sizeof(struct x86_act_context32)); 2058 return((void *)0); 2059 } 2060 return(ic32); 2061 } 2062} 2063 2064 2065void 2066act_thread_catt(void *ctx) 2067{ 2068 thread_t thr_act = current_thread(); 2069 kern_return_t kret; 2070 2071 if (ctx == (void *)NULL) 2072 return; 2073 2074 if (thread_is_64bit(thr_act)) { 2075 struct x86_act_context64 *ic64; 2076 2077 ic64 = (struct x86_act_context64 *)ctx; 2078 2079 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64, 2080 (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT); 2081 if (kret == KERN_SUCCESS) { 2082 machine_thread_set_state(thr_act, x86_FLOAT_STATE64, 2083 (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT); 2084 } 2085 kfree(ic64, sizeof(struct x86_act_context64)); 2086 } else { 2087 struct x86_act_context32 *ic32; 2088 2089 ic32 = (struct x86_act_context32 *)ctx; 2090 2091 kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32, 2092 (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT); 2093 if (kret == KERN_SUCCESS) { 2094 (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32, 2095 (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT); 2096 } 2097 kfree(ic32, sizeof(struct x86_act_context32)); 2098 } 2099} 2100 2101 2102void act_thread_cfree(__unused void *ctx) 2103{ 2104 /* XXX - Unused */ 2105} 2106void x86_toggle_sysenter_arg_store(thread_t thread, boolean_t valid); 2107void x86_toggle_sysenter_arg_store(thread_t thread, boolean_t valid) { 2108 thread->machine.arg_store_valid = valid; 2109} 2110 2111boolean_t x86_sysenter_arg_store_isvalid(thread_t thread); 2112 2113boolean_t x86_sysenter_arg_store_isvalid(thread_t thread) { 2114 return (thread->machine.arg_store_valid); 2115} 2116 2117/* 2118 * Duplicate one x86_debug_state32_t to another. "all" parameter 2119 * chooses whether dr4 and dr5 are copied (they are never meant 2120 * to be installed when we do machine_task_set_state() or 2121 * machine_thread_set_state()). 2122 */ 2123void 2124copy_debug_state32( 2125 x86_debug_state32_t *src, 2126 x86_debug_state32_t *target, 2127 boolean_t all) 2128{ 2129 if (all) { 2130 target->dr4 = src->dr4; 2131 target->dr5 = src->dr5; 2132 } 2133 2134 target->dr0 = src->dr0; 2135 target->dr1 = src->dr1; 2136 target->dr2 = src->dr2; 2137 target->dr3 = src->dr3; 2138 target->dr6 = src->dr6; 2139 target->dr7 = src->dr7; 2140} 2141 2142/* 2143 * Duplicate one x86_debug_state64_t to another. "all" parameter 2144 * chooses whether dr4 and dr5 are copied (they are never meant 2145 * to be installed when we do machine_task_set_state() or 2146 * machine_thread_set_state()). 2147 */ 2148void 2149copy_debug_state64( 2150 x86_debug_state64_t *src, 2151 x86_debug_state64_t *target, 2152 boolean_t all) 2153{ 2154 if (all) { 2155 target->dr4 = src->dr4; 2156 target->dr5 = src->dr5; 2157 } 2158 2159 target->dr0 = src->dr0; 2160 target->dr1 = src->dr1; 2161 target->dr2 = src->dr2; 2162 target->dr3 = src->dr3; 2163 target->dr6 = src->dr6; 2164 target->dr7 = src->dr7; 2165} 2166 2167boolean_t is_useraddr64_canonical(uint64_t addr64); 2168 2169boolean_t 2170is_useraddr64_canonical(uint64_t addr64) 2171{ 2172 return IS_USERADDR64_CANONICAL(addr64); 2173} 2174