1/* 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/* 29 * @OSF_COPYRIGHT@ 30 */ 31/* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56/* 57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce 58 * support for mandatory and extensible security protections. This notice 59 * is included in support of clause 2.2 (b) of the Apple Public License, 60 * Version 2.0. 61 */ 62/* 63 */ 64 65/* 66 * File: ipc_tt.c 67 * Purpose: 68 * Task and thread related IPC functions. 69 */ 70 71#include <mach/mach_types.h> 72#include <mach/boolean.h> 73#include <mach/kern_return.h> 74#include <mach/mach_param.h> 75#include <mach/task_special_ports.h> 76#include <mach/thread_special_ports.h> 77#include <mach/thread_status.h> 78#include <mach/exception_types.h> 79#include <mach/memory_object_types.h> 80#include <mach/mach_traps.h> 81#include <mach/task_server.h> 82#include <mach/thread_act_server.h> 83#include <mach/mach_host_server.h> 84#include <mach/host_priv_server.h> 85#include <mach/vm_map_server.h> 86 87#include <kern/kern_types.h> 88#include <kern/host.h> 89#include <kern/ipc_kobject.h> 90#include <kern/ipc_tt.h> 91#include <kern/kalloc.h> 92#include <kern/thread.h> 93#include <kern/misc_protos.h> 94 95#include <vm/vm_map.h> 96#include <vm/vm_pageout.h> 97#include <vm/vm_protos.h> 98 99#include <security/mac_mach_internal.h> 100 101/* forward declarations */ 102task_t convert_port_to_locked_task(ipc_port_t port); 103 104 105/* 106 * Routine: ipc_task_init 107 * Purpose: 108 * Initialize a task's IPC state. 109 * 110 * If non-null, some state will be inherited from the parent. 111 * The parent must be appropriately initialized. 112 * Conditions: 113 * Nothing locked. 114 */ 115 116void 117ipc_task_init( 118 task_t task, 119 task_t parent) 120{ 121 ipc_space_t space; 122 ipc_port_t kport; 123 ipc_port_t nport; 124 kern_return_t kr; 125 int i; 126 127 128 kr = ipc_space_create(&ipc_table_entries[0], &space); 129 if (kr != KERN_SUCCESS) 130 panic("ipc_task_init"); 131 132 space->is_task = task; 133 134 kport = ipc_port_alloc_kernel(); 135 if (kport == IP_NULL) 136 panic("ipc_task_init"); 137 138 nport = ipc_port_alloc_kernel(); 139 if (nport == IP_NULL) 140 panic("ipc_task_init"); 141 142 itk_lock_init(task); 143 task->itk_self = kport; 144 task->itk_nself = nport; 145 task->itk_sself = ipc_port_make_send(kport); 146 task->itk_space = space; 147 148#if CONFIG_MACF_MACH 149 if (parent) 150 mac_task_label_associate(parent, task, &parent->maclabel, 151 &task->maclabel, &kport->ip_label); 152 else 153 mac_task_label_associate_kernel(task, &task->maclabel, &kport->ip_label); 154#endif 155 156 if (parent == TASK_NULL) { 157 ipc_port_t port; 158 159 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { 160 task->exc_actions[i].port = IP_NULL; 161 }/* for */ 162 163 kr = host_get_host_port(host_priv_self(), &port); 164 assert(kr == KERN_SUCCESS); 165 task->itk_host = port; 166 167 task->itk_bootstrap = IP_NULL; 168 task->itk_seatbelt = IP_NULL; 169 task->itk_gssd = IP_NULL; 170 task->itk_task_access = IP_NULL; 171 172 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) 173 task->itk_registered[i] = IP_NULL; 174 } else { 175 itk_lock(parent); 176 assert(parent->itk_self != IP_NULL); 177 178 /* inherit registered ports */ 179 180 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) 181 task->itk_registered[i] = 182 ipc_port_copy_send(parent->itk_registered[i]); 183 184 /* inherit exception and bootstrap ports */ 185 186 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { 187 task->exc_actions[i].port = 188 ipc_port_copy_send(parent->exc_actions[i].port); 189 task->exc_actions[i].flavor = 190 parent->exc_actions[i].flavor; 191 task->exc_actions[i].behavior = 192 parent->exc_actions[i].behavior; 193 task->exc_actions[i].privileged = 194 parent->exc_actions[i].privileged; 195 }/* for */ 196 task->itk_host = 197 ipc_port_copy_send(parent->itk_host); 198 199 task->itk_bootstrap = 200 ipc_port_copy_send(parent->itk_bootstrap); 201 202 task->itk_seatbelt = 203 ipc_port_copy_send(parent->itk_seatbelt); 204 205 task->itk_gssd = 206 ipc_port_copy_send(parent->itk_gssd); 207 208 task->itk_task_access = 209 ipc_port_copy_send(parent->itk_task_access); 210 211 itk_unlock(parent); 212 } 213} 214 215/* 216 * Routine: ipc_task_enable 217 * Purpose: 218 * Enable a task for IPC access. 219 * Conditions: 220 * Nothing locked. 221 */ 222 223void 224ipc_task_enable( 225 task_t task) 226{ 227 ipc_port_t kport; 228 ipc_port_t nport; 229 230 itk_lock(task); 231 kport = task->itk_self; 232 if (kport != IP_NULL) 233 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK); 234 nport = task->itk_nself; 235 if (nport != IP_NULL) 236 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME); 237 itk_unlock(task); 238} 239 240/* 241 * Routine: ipc_task_disable 242 * Purpose: 243 * Disable IPC access to a task. 244 * Conditions: 245 * Nothing locked. 246 */ 247 248void 249ipc_task_disable( 250 task_t task) 251{ 252 ipc_port_t kport; 253 ipc_port_t nport; 254 255 itk_lock(task); 256 kport = task->itk_self; 257 if (kport != IP_NULL) 258 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); 259 nport = task->itk_nself; 260 if (nport != IP_NULL) 261 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE); 262 itk_unlock(task); 263} 264 265/* 266 * Routine: ipc_task_terminate 267 * Purpose: 268 * Clean up and destroy a task's IPC state. 269 * Conditions: 270 * Nothing locked. The task must be suspended. 271 * (Or the current thread must be in the task.) 272 */ 273 274void 275ipc_task_terminate( 276 task_t task) 277{ 278 ipc_port_t kport; 279 ipc_port_t nport; 280 int i; 281 282 itk_lock(task); 283 kport = task->itk_self; 284 285 if (kport == IP_NULL) { 286 /* the task is already terminated (can this happen?) */ 287 itk_unlock(task); 288 return; 289 } 290 task->itk_self = IP_NULL; 291 292 nport = task->itk_nself; 293 assert(nport != IP_NULL); 294 task->itk_nself = IP_NULL; 295 296 itk_unlock(task); 297 298 /* release the naked send rights */ 299 300 if (IP_VALID(task->itk_sself)) 301 ipc_port_release_send(task->itk_sself); 302 303 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { 304 if (IP_VALID(task->exc_actions[i].port)) { 305 ipc_port_release_send(task->exc_actions[i].port); 306 } 307 } 308 309 if (IP_VALID(task->itk_host)) 310 ipc_port_release_send(task->itk_host); 311 312 if (IP_VALID(task->itk_bootstrap)) 313 ipc_port_release_send(task->itk_bootstrap); 314 315 if (IP_VALID(task->itk_seatbelt)) 316 ipc_port_release_send(task->itk_seatbelt); 317 318 if (IP_VALID(task->itk_gssd)) 319 ipc_port_release_send(task->itk_gssd); 320 321 if (IP_VALID(task->itk_task_access)) 322 ipc_port_release_send(task->itk_task_access); 323 324 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) 325 if (IP_VALID(task->itk_registered[i])) 326 ipc_port_release_send(task->itk_registered[i]); 327 328 /* destroy the kernel ports */ 329 ipc_port_dealloc_kernel(kport); 330 ipc_port_dealloc_kernel(nport); 331 332 itk_lock_destroy(task); 333} 334 335/* 336 * Routine: ipc_task_reset 337 * Purpose: 338 * Reset a task's IPC state to protect it when 339 * it enters an elevated security context. The 340 * task name port can remain the same - since 341 * it represents no specific privilege. 342 * Conditions: 343 * Nothing locked. The task must be suspended. 344 * (Or the current thread must be in the task.) 345 */ 346 347void 348ipc_task_reset( 349 task_t task) 350{ 351 ipc_port_t old_kport, new_kport; 352 ipc_port_t old_sself; 353 ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; 354 int i; 355 356 new_kport = ipc_port_alloc_kernel(); 357 if (new_kport == IP_NULL) 358 panic("ipc_task_reset"); 359 360 itk_lock(task); 361 362 old_kport = task->itk_self; 363 364 if (old_kport == IP_NULL) { 365 /* the task is already terminated (can this happen?) */ 366 itk_unlock(task); 367 ipc_port_dealloc_kernel(new_kport); 368 return; 369 } 370 371 task->itk_self = new_kport; 372 old_sself = task->itk_sself; 373 task->itk_sself = ipc_port_make_send(new_kport); 374 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE); 375 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK); 376 377 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { 378 if (!task->exc_actions[i].privileged) { 379 old_exc_actions[i] = task->exc_actions[i].port; 380 task->exc_actions[i].port = IP_NULL; 381 } else { 382 old_exc_actions[i] = IP_NULL; 383 } 384 }/* for */ 385 386 itk_unlock(task); 387 388 /* release the naked send rights */ 389 390 if (IP_VALID(old_sself)) 391 ipc_port_release_send(old_sself); 392 393 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { 394 if (IP_VALID(old_exc_actions[i])) { 395 ipc_port_release_send(old_exc_actions[i]); 396 } 397 }/* for */ 398 399 /* destroy the kernel port */ 400 ipc_port_dealloc_kernel(old_kport); 401} 402 403/* 404 * Routine: ipc_thread_init 405 * Purpose: 406 * Initialize a thread's IPC state. 407 * Conditions: 408 * Nothing locked. 409 */ 410 411void 412ipc_thread_init( 413 thread_t thread) 414{ 415 ipc_port_t kport; 416 int i; 417 418 kport = ipc_port_alloc_kernel(); 419 if (kport == IP_NULL) 420 panic("ipc_thread_init"); 421 422 thread->ith_self = kport; 423 thread->ith_sself = ipc_port_make_send(kport); 424 425 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) 426 thread->exc_actions[i].port = IP_NULL; 427 428 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD); 429 430 ipc_kmsg_queue_init(&thread->ith_messages); 431 432 thread->ith_rpc_reply = IP_NULL; 433} 434 435void 436ipc_thread_disable( 437 thread_t thread) 438{ 439 ipc_port_t kport = thread->ith_self; 440 441 if (kport != IP_NULL) 442 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); 443} 444 445/* 446 * Routine: ipc_thread_terminate 447 * Purpose: 448 * Clean up and destroy a thread's IPC state. 449 * Conditions: 450 * Nothing locked. 451 */ 452 453void 454ipc_thread_terminate( 455 thread_t thread) 456{ 457 ipc_port_t kport = thread->ith_self; 458 459 if (kport != IP_NULL) { 460 int i; 461 462 if (IP_VALID(thread->ith_sself)) 463 ipc_port_release_send(thread->ith_sself); 464 465 thread->ith_sself = thread->ith_self = IP_NULL; 466 467 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { 468 if (IP_VALID(thread->exc_actions[i].port)) 469 ipc_port_release_send(thread->exc_actions[i].port); 470 } 471 472 ipc_port_dealloc_kernel(kport); 473 } 474 475 assert(ipc_kmsg_queue_empty(&thread->ith_messages)); 476 477 if (thread->ith_rpc_reply != IP_NULL) 478 ipc_port_dealloc_reply(thread->ith_rpc_reply); 479 480 thread->ith_rpc_reply = IP_NULL; 481} 482 483/* 484 * Routine: ipc_thread_reset 485 * Purpose: 486 * Reset the IPC state for a given Mach thread when 487 * its task enters an elevated security context. 488 * Both the thread port and its exception ports have 489 * to be reset. Its RPC reply port cannot have any 490 * rights outstanding, so it should be fine. 491 * Conditions: 492 * Nothing locked. 493 */ 494 495void 496ipc_thread_reset( 497 thread_t thread) 498{ 499 ipc_port_t old_kport, new_kport; 500 ipc_port_t old_sself; 501 ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; 502 int i; 503 504 new_kport = ipc_port_alloc_kernel(); 505 if (new_kport == IP_NULL) 506 panic("ipc_task_reset"); 507 508 thread_mtx_lock(thread); 509 510 old_kport = thread->ith_self; 511 512 if (old_kport == IP_NULL) { 513 /* the is already terminated (can this happen?) */ 514 thread_mtx_unlock(thread); 515 ipc_port_dealloc_kernel(new_kport); 516 return; 517 } 518 519 thread->ith_self = new_kport; 520 old_sself = thread->ith_sself; 521 thread->ith_sself = ipc_port_make_send(new_kport); 522 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE); 523 ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD); 524 525 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { 526 if (!thread->exc_actions[i].privileged) { 527 old_exc_actions[i] = thread->exc_actions[i].port; 528 thread->exc_actions[i].port = IP_NULL; 529 } else { 530 old_exc_actions[i] = IP_NULL; 531 } 532 }/* for */ 533 534 thread_mtx_unlock(thread); 535 536 /* release the naked send rights */ 537 538 if (IP_VALID(old_sself)) 539 ipc_port_release_send(old_sself); 540 541 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { 542 if (IP_VALID(old_exc_actions[i])) { 543 ipc_port_release_send(old_exc_actions[i]); 544 } 545 }/* for */ 546 547 /* destroy the kernel port */ 548 ipc_port_dealloc_kernel(old_kport); 549} 550 551/* 552 * Routine: retrieve_task_self_fast 553 * Purpose: 554 * Optimized version of retrieve_task_self, 555 * that only works for the current task. 556 * 557 * Return a send right (possibly null/dead) 558 * for the task's user-visible self port. 559 * Conditions: 560 * Nothing locked. 561 */ 562 563ipc_port_t 564retrieve_task_self_fast( 565 register task_t task) 566{ 567 register ipc_port_t port; 568 569 assert(task == current_task()); 570 571 itk_lock(task); 572 assert(task->itk_self != IP_NULL); 573 574 if ((port = task->itk_sself) == task->itk_self) { 575 /* no interposing */ 576 577 ip_lock(port); 578 assert(ip_active(port)); 579 ip_reference(port); 580 port->ip_srights++; 581 ip_unlock(port); 582 } else 583 port = ipc_port_copy_send(port); 584 itk_unlock(task); 585 586 return port; 587} 588 589/* 590 * Routine: retrieve_thread_self_fast 591 * Purpose: 592 * Return a send right (possibly null/dead) 593 * for the thread's user-visible self port. 594 * 595 * Only works for the current thread. 596 * 597 * Conditions: 598 * Nothing locked. 599 */ 600 601ipc_port_t 602retrieve_thread_self_fast( 603 thread_t thread) 604{ 605 register ipc_port_t port; 606 607 assert(thread == current_thread()); 608 609 thread_mtx_lock(thread); 610 611 assert(thread->ith_self != IP_NULL); 612 613 if ((port = thread->ith_sself) == thread->ith_self) { 614 /* no interposing */ 615 616 ip_lock(port); 617 assert(ip_active(port)); 618 ip_reference(port); 619 port->ip_srights++; 620 ip_unlock(port); 621 } 622 else 623 port = ipc_port_copy_send(port); 624 625 thread_mtx_unlock(thread); 626 627 return port; 628} 629 630/* 631 * Routine: task_self_trap [mach trap] 632 * Purpose: 633 * Give the caller send rights for his own task port. 634 * Conditions: 635 * Nothing locked. 636 * Returns: 637 * MACH_PORT_NULL if there are any resource failures 638 * or other errors. 639 */ 640 641mach_port_name_t 642task_self_trap( 643 __unused struct task_self_trap_args *args) 644{ 645 task_t task = current_task(); 646 ipc_port_t sright; 647 mach_port_name_t name; 648 649 sright = retrieve_task_self_fast(task); 650 name = ipc_port_copyout_send(sright, task->itk_space); 651 return name; 652} 653 654/* 655 * Routine: thread_self_trap [mach trap] 656 * Purpose: 657 * Give the caller send rights for his own thread port. 658 * Conditions: 659 * Nothing locked. 660 * Returns: 661 * MACH_PORT_NULL if there are any resource failures 662 * or other errors. 663 */ 664 665mach_port_name_t 666thread_self_trap( 667 __unused struct thread_self_trap_args *args) 668{ 669 thread_t thread = current_thread(); 670 task_t task = thread->task; 671 ipc_port_t sright; 672 mach_port_name_t name; 673 674 sright = retrieve_thread_self_fast(thread); 675 name = ipc_port_copyout_send(sright, task->itk_space); 676 return name; 677 678} 679 680/* 681 * Routine: mach_reply_port [mach trap] 682 * Purpose: 683 * Allocate a port for the caller. 684 * Conditions: 685 * Nothing locked. 686 * Returns: 687 * MACH_PORT_NULL if there are any resource failures 688 * or other errors. 689 */ 690 691mach_port_name_t 692mach_reply_port( 693 __unused struct mach_reply_port_args *args) 694{ 695 ipc_port_t port; 696 mach_port_name_t name; 697 kern_return_t kr; 698 699 kr = ipc_port_alloc(current_task()->itk_space, &name, &port); 700 701 if (kr == KERN_SUCCESS) 702 ip_unlock(port); 703 else 704 name = MACH_PORT_NULL; 705 return name; 706} 707 708/* 709 * Routine: thread_get_special_port [kernel call] 710 * Purpose: 711 * Clones a send right for one of the thread's 712 * special ports. 713 * Conditions: 714 * Nothing locked. 715 * Returns: 716 * KERN_SUCCESS Extracted a send right. 717 * KERN_INVALID_ARGUMENT The thread is null. 718 * KERN_FAILURE The thread is dead. 719 * KERN_INVALID_ARGUMENT Invalid special port. 720 */ 721 722kern_return_t 723thread_get_special_port( 724 thread_t thread, 725 int which, 726 ipc_port_t *portp) 727{ 728 kern_return_t result = KERN_SUCCESS; 729 ipc_port_t *whichp; 730 731 if (thread == THREAD_NULL) 732 return (KERN_INVALID_ARGUMENT); 733 734 switch (which) { 735 736 case THREAD_KERNEL_PORT: 737 whichp = &thread->ith_sself; 738 break; 739 740 default: 741 return (KERN_INVALID_ARGUMENT); 742 } 743 744 thread_mtx_lock(thread); 745 746 if (thread->active) 747 *portp = ipc_port_copy_send(*whichp); 748 else 749 result = KERN_FAILURE; 750 751 thread_mtx_unlock(thread); 752 753 return (result); 754} 755 756/* 757 * Routine: thread_set_special_port [kernel call] 758 * Purpose: 759 * Changes one of the thread's special ports, 760 * setting it to the supplied send right. 761 * Conditions: 762 * Nothing locked. If successful, consumes 763 * the supplied send right. 764 * Returns: 765 * KERN_SUCCESS Changed the special port. 766 * KERN_INVALID_ARGUMENT The thread is null. 767 * KERN_FAILURE The thread is dead. 768 * KERN_INVALID_ARGUMENT Invalid special port. 769 */ 770 771kern_return_t 772thread_set_special_port( 773 thread_t thread, 774 int which, 775 ipc_port_t port) 776{ 777 kern_return_t result = KERN_SUCCESS; 778 ipc_port_t *whichp, old = IP_NULL; 779 780 if (thread == THREAD_NULL) 781 return (KERN_INVALID_ARGUMENT); 782 783 switch (which) { 784 785 case THREAD_KERNEL_PORT: 786 whichp = &thread->ith_sself; 787 break; 788 789 default: 790 return (KERN_INVALID_ARGUMENT); 791 } 792 793 thread_mtx_lock(thread); 794 795 if (thread->active) { 796 old = *whichp; 797 *whichp = port; 798 } 799 else 800 result = KERN_FAILURE; 801 802 thread_mtx_unlock(thread); 803 804 if (IP_VALID(old)) 805 ipc_port_release_send(old); 806 807 return (result); 808} 809 810/* 811 * Routine: task_get_special_port [kernel call] 812 * Purpose: 813 * Clones a send right for one of the task's 814 * special ports. 815 * Conditions: 816 * Nothing locked. 817 * Returns: 818 * KERN_SUCCESS Extracted a send right. 819 * KERN_INVALID_ARGUMENT The task is null. 820 * KERN_FAILURE The task/space is dead. 821 * KERN_INVALID_ARGUMENT Invalid special port. 822 */ 823 824kern_return_t 825task_get_special_port( 826 task_t task, 827 int which, 828 ipc_port_t *portp) 829{ 830 ipc_port_t port; 831 832 if (task == TASK_NULL) 833 return KERN_INVALID_ARGUMENT; 834 835 itk_lock(task); 836 if (task->itk_self == IP_NULL) { 837 itk_unlock(task); 838 return KERN_FAILURE; 839 } 840 841 switch (which) { 842 case TASK_KERNEL_PORT: 843 port = ipc_port_copy_send(task->itk_sself); 844 break; 845 846 case TASK_NAME_PORT: 847 port = ipc_port_make_send(task->itk_nself); 848 break; 849 850 case TASK_HOST_PORT: 851 port = ipc_port_copy_send(task->itk_host); 852 break; 853 854 case TASK_BOOTSTRAP_PORT: 855 port = ipc_port_copy_send(task->itk_bootstrap); 856 break; 857 858 case TASK_SEATBELT_PORT: 859 port = ipc_port_copy_send(task->itk_seatbelt); 860 break; 861 862 case TASK_ACCESS_PORT: 863 port = ipc_port_copy_send(task->itk_task_access); 864 break; 865 866 default: 867 itk_unlock(task); 868 return KERN_INVALID_ARGUMENT; 869 } 870 itk_unlock(task); 871 872 *portp = port; 873 return KERN_SUCCESS; 874} 875 876/* 877 * Routine: task_set_special_port [kernel call] 878 * Purpose: 879 * Changes one of the task's special ports, 880 * setting it to the supplied send right. 881 * Conditions: 882 * Nothing locked. If successful, consumes 883 * the supplied send right. 884 * Returns: 885 * KERN_SUCCESS Changed the special port. 886 * KERN_INVALID_ARGUMENT The task is null. 887 * KERN_FAILURE The task/space is dead. 888 * KERN_INVALID_ARGUMENT Invalid special port. 889 * KERN_NO_ACCESS Attempted overwrite of seatbelt port. 890 */ 891 892kern_return_t 893task_set_special_port( 894 task_t task, 895 int which, 896 ipc_port_t port) 897{ 898 ipc_port_t *whichp; 899 ipc_port_t old; 900 901 if (task == TASK_NULL) 902 return KERN_INVALID_ARGUMENT; 903 904 switch (which) { 905 case TASK_KERNEL_PORT: 906 whichp = &task->itk_sself; 907 break; 908 909 case TASK_HOST_PORT: 910 whichp = &task->itk_host; 911 break; 912 913 case TASK_BOOTSTRAP_PORT: 914 whichp = &task->itk_bootstrap; 915 break; 916 917 case TASK_SEATBELT_PORT: 918 whichp = &task->itk_seatbelt; 919 break; 920 921 case TASK_ACCESS_PORT: 922 whichp = &task->itk_task_access; 923 break; 924 925 default: 926 return KERN_INVALID_ARGUMENT; 927 }/* switch */ 928 929 itk_lock(task); 930 if (task->itk_self == IP_NULL) { 931 itk_unlock(task); 932 return KERN_FAILURE; 933 } 934 935 /* do not allow overwrite of seatbelt or task access ports */ 936 if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which) 937 && IP_VALID(*whichp)) { 938 itk_unlock(task); 939 return KERN_NO_ACCESS; 940 } 941 942#if CONFIG_MACF_MACH 943 if (mac_task_check_service(current_task(), task, "set_special_port")) { 944 itk_unlock(task); 945 return KERN_NO_ACCESS; 946 } 947#endif 948 949 old = *whichp; 950 *whichp = port; 951 itk_unlock(task); 952 953 if (IP_VALID(old)) 954 ipc_port_release_send(old); 955 return KERN_SUCCESS; 956} 957 958 959/* 960 * Routine: mach_ports_register [kernel call] 961 * Purpose: 962 * Stash a handful of port send rights in the task. 963 * Child tasks will inherit these rights, but they 964 * must use mach_ports_lookup to acquire them. 965 * 966 * The rights are supplied in a (wired) kalloc'd segment. 967 * Rights which aren't supplied are assumed to be null. 968 * Conditions: 969 * Nothing locked. If successful, consumes 970 * the supplied rights and memory. 971 * Returns: 972 * KERN_SUCCESS Stashed the port rights. 973 * KERN_INVALID_ARGUMENT The task is null. 974 * KERN_INVALID_ARGUMENT The task is dead. 975 * KERN_INVALID_ARGUMENT Too many port rights supplied. 976 */ 977 978kern_return_t 979mach_ports_register( 980 task_t task, 981 mach_port_array_t memory, 982 mach_msg_type_number_t portsCnt) 983{ 984 ipc_port_t ports[TASK_PORT_REGISTER_MAX]; 985 unsigned int i; 986 987 if ((task == TASK_NULL) || 988 (portsCnt > TASK_PORT_REGISTER_MAX)) 989 return KERN_INVALID_ARGUMENT; 990 991 /* 992 * Pad the port rights with nulls. 993 */ 994 995 for (i = 0; i < portsCnt; i++) 996 ports[i] = memory[i]; 997 for (; i < TASK_PORT_REGISTER_MAX; i++) 998 ports[i] = IP_NULL; 999 1000 itk_lock(task); 1001 if (task->itk_self == IP_NULL) { 1002 itk_unlock(task); 1003 return KERN_INVALID_ARGUMENT; 1004 } 1005 1006 /* 1007 * Replace the old send rights with the new. 1008 * Release the old rights after unlocking. 1009 */ 1010 1011 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) { 1012 ipc_port_t old; 1013 1014 old = task->itk_registered[i]; 1015 task->itk_registered[i] = ports[i]; 1016 ports[i] = old; 1017 } 1018 1019 itk_unlock(task); 1020 1021 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) 1022 if (IP_VALID(ports[i])) 1023 ipc_port_release_send(ports[i]); 1024 1025 /* 1026 * Now that the operation is known to be successful, 1027 * we can free the memory. 1028 */ 1029 1030 if (portsCnt != 0) 1031 kfree(memory, 1032 (vm_size_t) (portsCnt * sizeof(mach_port_t))); 1033 1034 return KERN_SUCCESS; 1035} 1036 1037/* 1038 * Routine: mach_ports_lookup [kernel call] 1039 * Purpose: 1040 * Retrieves (clones) the stashed port send rights. 1041 * Conditions: 1042 * Nothing locked. If successful, the caller gets 1043 * rights and memory. 1044 * Returns: 1045 * KERN_SUCCESS Retrieved the send rights. 1046 * KERN_INVALID_ARGUMENT The task is null. 1047 * KERN_INVALID_ARGUMENT The task is dead. 1048 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. 1049 */ 1050 1051kern_return_t 1052mach_ports_lookup( 1053 task_t task, 1054 mach_port_array_t *portsp, 1055 mach_msg_type_number_t *portsCnt) 1056{ 1057 void *memory; 1058 vm_size_t size; 1059 ipc_port_t *ports; 1060 int i; 1061 1062 if (task == TASK_NULL) 1063 return KERN_INVALID_ARGUMENT; 1064 1065 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t)); 1066 1067 memory = kalloc(size); 1068 if (memory == 0) 1069 return KERN_RESOURCE_SHORTAGE; 1070 1071 itk_lock(task); 1072 if (task->itk_self == IP_NULL) { 1073 itk_unlock(task); 1074 1075 kfree(memory, size); 1076 return KERN_INVALID_ARGUMENT; 1077 } 1078 1079 ports = (ipc_port_t *) memory; 1080 1081 /* 1082 * Clone port rights. Because kalloc'd memory 1083 * is wired, we won't fault while holding the task lock. 1084 */ 1085 1086 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) 1087 ports[i] = ipc_port_copy_send(task->itk_registered[i]); 1088 1089 itk_unlock(task); 1090 1091 *portsp = (mach_port_array_t) ports; 1092 *portsCnt = TASK_PORT_REGISTER_MAX; 1093 return KERN_SUCCESS; 1094} 1095 1096/* 1097 * Routine: convert_port_to_locked_task 1098 * Purpose: 1099 * Internal helper routine to convert from a port to a locked 1100 * task. Used by several routines that try to convert from a 1101 * task port to a reference on some task related object. 1102 * Conditions: 1103 * Nothing locked, blocking OK. 1104 */ 1105task_t 1106convert_port_to_locked_task(ipc_port_t port) 1107{ 1108 int try_failed_count = 0; 1109 1110 while (IP_VALID(port)) { 1111 task_t task; 1112 1113 ip_lock(port); 1114 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) { 1115 ip_unlock(port); 1116 return TASK_NULL; 1117 } 1118 task = (task_t) port->ip_kobject; 1119 assert(task != TASK_NULL); 1120 1121 /* 1122 * Normal lock ordering puts task_lock() before ip_lock(). 1123 * Attempt out-of-order locking here. 1124 */ 1125 if (task_lock_try(task)) { 1126 ip_unlock(port); 1127 return(task); 1128 } 1129 try_failed_count++; 1130 1131 ip_unlock(port); 1132 mutex_pause(try_failed_count); 1133 } 1134 return TASK_NULL; 1135} 1136 1137/* 1138 * Routine: convert_port_to_task 1139 * Purpose: 1140 * Convert from a port to a task. 1141 * Doesn't consume the port ref; produces a task ref, 1142 * which may be null. 1143 * Conditions: 1144 * Nothing locked. 1145 */ 1146task_t 1147convert_port_to_task( 1148 ipc_port_t port) 1149{ 1150 task_t task = TASK_NULL; 1151 1152 if (IP_VALID(port)) { 1153 ip_lock(port); 1154 1155 if ( ip_active(port) && 1156 ip_kotype(port) == IKOT_TASK ) { 1157 task = (task_t)port->ip_kobject; 1158 assert(task != TASK_NULL); 1159 1160 task_reference_internal(task); 1161 } 1162 1163 ip_unlock(port); 1164 } 1165 1166 return (task); 1167} 1168 1169/* 1170 * Routine: convert_port_to_task_name 1171 * Purpose: 1172 * Convert from a port to a task name. 1173 * Doesn't consume the port ref; produces a task name ref, 1174 * which may be null. 1175 * Conditions: 1176 * Nothing locked. 1177 */ 1178task_name_t 1179convert_port_to_task_name( 1180 ipc_port_t port) 1181{ 1182 task_name_t task = TASK_NULL; 1183 1184 if (IP_VALID(port)) { 1185 ip_lock(port); 1186 1187 if ( ip_active(port) && 1188 (ip_kotype(port) == IKOT_TASK || 1189 ip_kotype(port) == IKOT_TASK_NAME)) { 1190 task = (task_name_t)port->ip_kobject; 1191 assert(task != TASK_NAME_NULL); 1192 1193 task_reference_internal(task); 1194 } 1195 1196 ip_unlock(port); 1197 } 1198 1199 return (task); 1200} 1201 1202/* 1203 * Routine: convert_port_to_space 1204 * Purpose: 1205 * Convert from a port to a space. 1206 * Doesn't consume the port ref; produces a space ref, 1207 * which may be null. 1208 * Conditions: 1209 * Nothing locked. 1210 */ 1211ipc_space_t 1212convert_port_to_space( 1213 ipc_port_t port) 1214{ 1215 ipc_space_t space; 1216 task_t task; 1217 1218 task = convert_port_to_locked_task(port); 1219 1220 if (task == TASK_NULL) 1221 return IPC_SPACE_NULL; 1222 1223 if (!task->active) { 1224 task_unlock(task); 1225 return IPC_SPACE_NULL; 1226 } 1227 1228 space = task->itk_space; 1229 is_reference(space); 1230 task_unlock(task); 1231 return (space); 1232} 1233 1234/* 1235 * Routine: convert_port_to_map 1236 * Purpose: 1237 * Convert from a port to a map. 1238 * Doesn't consume the port ref; produces a map ref, 1239 * which may be null. 1240 * Conditions: 1241 * Nothing locked. 1242 */ 1243 1244vm_map_t 1245convert_port_to_map( 1246 ipc_port_t port) 1247{ 1248 task_t task; 1249 vm_map_t map; 1250 1251 task = convert_port_to_locked_task(port); 1252 1253 if (task == TASK_NULL) 1254 return VM_MAP_NULL; 1255 1256 if (!task->active) { 1257 task_unlock(task); 1258 return VM_MAP_NULL; 1259 } 1260 1261 map = task->map; 1262 vm_map_reference_swap(map); 1263 task_unlock(task); 1264 return map; 1265} 1266 1267 1268/* 1269 * Routine: convert_port_to_thread 1270 * Purpose: 1271 * Convert from a port to a thread. 1272 * Doesn't consume the port ref; produces an thread ref, 1273 * which may be null. 1274 * Conditions: 1275 * Nothing locked. 1276 */ 1277 1278thread_t 1279convert_port_to_thread( 1280 ipc_port_t port) 1281{ 1282 thread_t thread = THREAD_NULL; 1283 1284 if (IP_VALID(port)) { 1285 ip_lock(port); 1286 1287 if ( ip_active(port) && 1288 ip_kotype(port) == IKOT_THREAD ) { 1289 thread = (thread_t)port->ip_kobject; 1290 assert(thread != THREAD_NULL); 1291 1292 thread_reference_internal(thread); 1293 } 1294 1295 ip_unlock(port); 1296 } 1297 1298 return (thread); 1299} 1300 1301/* 1302 * Routine: port_name_to_thread 1303 * Purpose: 1304 * Convert from a port name to an thread reference 1305 * A name of MACH_PORT_NULL is valid for the null thread. 1306 * Conditions: 1307 * Nothing locked. 1308 */ 1309thread_t 1310port_name_to_thread( 1311 mach_port_name_t name) 1312{ 1313 thread_t thread = THREAD_NULL; 1314 ipc_port_t kport; 1315 1316 if (MACH_PORT_VALID(name)) { 1317 if (ipc_object_copyin(current_space(), name, 1318 MACH_MSG_TYPE_COPY_SEND, 1319 (ipc_object_t *)&kport) != KERN_SUCCESS) 1320 return (THREAD_NULL); 1321 1322 thread = convert_port_to_thread(kport); 1323 1324 if (IP_VALID(kport)) 1325 ipc_port_release_send(kport); 1326 } 1327 1328 return (thread); 1329} 1330 1331task_t 1332port_name_to_task( 1333 mach_port_name_t name) 1334{ 1335 ipc_port_t kern_port; 1336 kern_return_t kr; 1337 task_t task = TASK_NULL; 1338 1339 if (MACH_PORT_VALID(name)) { 1340 kr = ipc_object_copyin(current_space(), name, 1341 MACH_MSG_TYPE_COPY_SEND, 1342 (ipc_object_t *) &kern_port); 1343 if (kr != KERN_SUCCESS) 1344 return TASK_NULL; 1345 1346 task = convert_port_to_task(kern_port); 1347 1348 if (IP_VALID(kern_port)) 1349 ipc_port_release_send(kern_port); 1350 } 1351 return task; 1352} 1353 1354/* 1355 * Routine: convert_task_to_port 1356 * Purpose: 1357 * Convert from a task to a port. 1358 * Consumes a task ref; produces a naked send right 1359 * which may be invalid. 1360 * Conditions: 1361 * Nothing locked. 1362 */ 1363 1364ipc_port_t 1365convert_task_to_port( 1366 task_t task) 1367{ 1368 ipc_port_t port; 1369 1370 itk_lock(task); 1371 if (task->itk_self != IP_NULL) 1372 port = ipc_port_make_send(task->itk_self); 1373 else 1374 port = IP_NULL; 1375 itk_unlock(task); 1376 1377 task_deallocate(task); 1378 return port; 1379} 1380 1381/* 1382 * Routine: convert_task_name_to_port 1383 * Purpose: 1384 * Convert from a task name ref to a port. 1385 * Consumes a task name ref; produces a naked send right 1386 * which may be invalid. 1387 * Conditions: 1388 * Nothing locked. 1389 */ 1390 1391ipc_port_t 1392convert_task_name_to_port( 1393 task_name_t task_name) 1394{ 1395 ipc_port_t port; 1396 1397 itk_lock(task_name); 1398 if (task_name->itk_nself != IP_NULL) 1399 port = ipc_port_make_send(task_name->itk_nself); 1400 else 1401 port = IP_NULL; 1402 itk_unlock(task_name); 1403 1404 task_name_deallocate(task_name); 1405 return port; 1406} 1407 1408/* 1409 * Routine: convert_thread_to_port 1410 * Purpose: 1411 * Convert from a thread to a port. 1412 * Consumes an thread ref; produces a naked send right 1413 * which may be invalid. 1414 * Conditions: 1415 * Nothing locked. 1416 */ 1417 1418ipc_port_t 1419convert_thread_to_port( 1420 thread_t thread) 1421{ 1422 ipc_port_t port; 1423 1424 thread_mtx_lock(thread); 1425 1426 if (thread->ith_self != IP_NULL) 1427 port = ipc_port_make_send(thread->ith_self); 1428 else 1429 port = IP_NULL; 1430 1431 thread_mtx_unlock(thread); 1432 1433 thread_deallocate(thread); 1434 1435 return (port); 1436} 1437 1438/* 1439 * Routine: space_deallocate 1440 * Purpose: 1441 * Deallocate a space ref produced by convert_port_to_space. 1442 * Conditions: 1443 * Nothing locked. 1444 */ 1445 1446void 1447space_deallocate( 1448 ipc_space_t space) 1449{ 1450 if (space != IS_NULL) 1451 is_release(space); 1452} 1453 1454/* 1455 * Routine: thread/task_set_exception_ports [kernel call] 1456 * Purpose: 1457 * Sets the thread/task exception port, flavor and 1458 * behavior for the exception types specified by the mask. 1459 * There will be one send right per exception per valid 1460 * port. 1461 * Conditions: 1462 * Nothing locked. If successful, consumes 1463 * the supplied send right. 1464 * Returns: 1465 * KERN_SUCCESS Changed the special port. 1466 * KERN_INVALID_ARGUMENT The thread is null, 1467 * Illegal mask bit set. 1468 * Illegal exception behavior 1469 * KERN_FAILURE The thread is dead. 1470 */ 1471 1472kern_return_t 1473thread_set_exception_ports( 1474 thread_t thread, 1475 exception_mask_t exception_mask, 1476 ipc_port_t new_port, 1477 exception_behavior_t new_behavior, 1478 thread_state_flavor_t new_flavor) 1479{ 1480 ipc_port_t old_port[EXC_TYPES_COUNT]; 1481 boolean_t privileged = current_task()->sec_token.val[0] == 0; 1482 register int i; 1483 1484 if (thread == THREAD_NULL) 1485 return (KERN_INVALID_ARGUMENT); 1486 1487 if (exception_mask & ~EXC_MASK_VALID) 1488 return (KERN_INVALID_ARGUMENT); 1489 1490 if (IP_VALID(new_port)) { 1491 switch (new_behavior & ~MACH_EXCEPTION_CODES) { 1492 1493 case EXCEPTION_DEFAULT: 1494 case EXCEPTION_STATE: 1495 case EXCEPTION_STATE_IDENTITY: 1496 break; 1497 1498 default: 1499 return (KERN_INVALID_ARGUMENT); 1500 } 1501 } 1502 1503 /* 1504 * Check the validity of the thread_state_flavor by calling the 1505 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in 1506 * osfmk/mach/ARCHITECTURE/thread_status.h 1507 */ 1508 if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) 1509 return (KERN_INVALID_ARGUMENT); 1510 1511 thread_mtx_lock(thread); 1512 1513 if (!thread->active) { 1514 thread_mtx_unlock(thread); 1515 1516 return (KERN_FAILURE); 1517 } 1518 1519 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { 1520 if (exception_mask & (1 << i)) { 1521 old_port[i] = thread->exc_actions[i].port; 1522 thread->exc_actions[i].port = ipc_port_copy_send(new_port); 1523 thread->exc_actions[i].behavior = new_behavior; 1524 thread->exc_actions[i].flavor = new_flavor; 1525 thread->exc_actions[i].privileged = privileged; 1526 } 1527 else 1528 old_port[i] = IP_NULL; 1529 } 1530 1531 thread_mtx_unlock(thread); 1532 1533 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) 1534 if (IP_VALID(old_port[i])) 1535 ipc_port_release_send(old_port[i]); 1536 1537 if (IP_VALID(new_port)) /* consume send right */ 1538 ipc_port_release_send(new_port); 1539 1540 return (KERN_SUCCESS); 1541} 1542 1543kern_return_t 1544task_set_exception_ports( 1545 task_t task, 1546 exception_mask_t exception_mask, 1547 ipc_port_t new_port, 1548 exception_behavior_t new_behavior, 1549 thread_state_flavor_t new_flavor) 1550{ 1551 ipc_port_t old_port[EXC_TYPES_COUNT]; 1552 boolean_t privileged = current_task()->sec_token.val[0] == 0; 1553 register int i; 1554 1555 if (task == TASK_NULL) 1556 return (KERN_INVALID_ARGUMENT); 1557 1558 if (exception_mask & ~EXC_MASK_VALID) 1559 return (KERN_INVALID_ARGUMENT); 1560 1561 if (IP_VALID(new_port)) { 1562 switch (new_behavior & ~MACH_EXCEPTION_CODES) { 1563 1564 case EXCEPTION_DEFAULT: 1565 case EXCEPTION_STATE: 1566 case EXCEPTION_STATE_IDENTITY: 1567 break; 1568 1569 default: 1570 return (KERN_INVALID_ARGUMENT); 1571 } 1572 } 1573 1574 itk_lock(task); 1575 1576 if (task->itk_self == IP_NULL) { 1577 itk_unlock(task); 1578 1579 return (KERN_FAILURE); 1580 } 1581 1582 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { 1583 if (exception_mask & (1 << i)) { 1584 old_port[i] = task->exc_actions[i].port; 1585 task->exc_actions[i].port = 1586 ipc_port_copy_send(new_port); 1587 task->exc_actions[i].behavior = new_behavior; 1588 task->exc_actions[i].flavor = new_flavor; 1589 task->exc_actions[i].privileged = privileged; 1590 } 1591 else 1592 old_port[i] = IP_NULL; 1593 } 1594 1595 itk_unlock(task); 1596 1597 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) 1598 if (IP_VALID(old_port[i])) 1599 ipc_port_release_send(old_port[i]); 1600 1601 if (IP_VALID(new_port)) /* consume send right */ 1602 ipc_port_release_send(new_port); 1603 1604 return (KERN_SUCCESS); 1605} 1606 1607/* 1608 * Routine: thread/task_swap_exception_ports [kernel call] 1609 * Purpose: 1610 * Sets the thread/task exception port, flavor and 1611 * behavior for the exception types specified by the 1612 * mask. 1613 * 1614 * The old ports, behavior and flavors are returned 1615 * Count specifies the array sizes on input and 1616 * the number of returned ports etc. on output. The 1617 * arrays must be large enough to hold all the returned 1618 * data, MIG returnes an error otherwise. The masks 1619 * array specifies the corresponding exception type(s). 1620 * 1621 * Conditions: 1622 * Nothing locked. If successful, consumes 1623 * the supplied send right. 1624 * 1625 * Returns upto [in} CountCnt elements. 1626 * Returns: 1627 * KERN_SUCCESS Changed the special port. 1628 * KERN_INVALID_ARGUMENT The thread is null, 1629 * Illegal mask bit set. 1630 * Illegal exception behavior 1631 * KERN_FAILURE The thread is dead. 1632 */ 1633 1634kern_return_t 1635thread_swap_exception_ports( 1636 thread_t thread, 1637 exception_mask_t exception_mask, 1638 ipc_port_t new_port, 1639 exception_behavior_t new_behavior, 1640 thread_state_flavor_t new_flavor, 1641 exception_mask_array_t masks, 1642 mach_msg_type_number_t *CountCnt, 1643 exception_port_array_t ports, 1644 exception_behavior_array_t behaviors, 1645 thread_state_flavor_array_t flavors) 1646{ 1647 ipc_port_t old_port[EXC_TYPES_COUNT]; 1648 boolean_t privileged = current_task()->sec_token.val[0] == 0; 1649 unsigned int i, j, count; 1650 1651 if (thread == THREAD_NULL) 1652 return (KERN_INVALID_ARGUMENT); 1653 1654 if (exception_mask & ~EXC_MASK_VALID) 1655 return (KERN_INVALID_ARGUMENT); 1656 1657 if (IP_VALID(new_port)) { 1658 switch (new_behavior & ~MACH_EXCEPTION_CODES) { 1659 1660 case EXCEPTION_DEFAULT: 1661 case EXCEPTION_STATE: 1662 case EXCEPTION_STATE_IDENTITY: 1663 break; 1664 1665 default: 1666 return (KERN_INVALID_ARGUMENT); 1667 } 1668 } 1669 1670 thread_mtx_lock(thread); 1671 1672 if (!thread->active) { 1673 thread_mtx_unlock(thread); 1674 1675 return (KERN_FAILURE); 1676 } 1677 1678 count = 0; 1679 1680 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { 1681 if (exception_mask & (1 << i)) { 1682 for (j = 0; j < count; ++j) { 1683 /* 1684 * search for an identical entry, if found 1685 * set corresponding mask for this exception. 1686 */ 1687 if ( thread->exc_actions[i].port == ports[j] && 1688 thread->exc_actions[i].behavior == behaviors[j] && 1689 thread->exc_actions[i].flavor == flavors[j] ) { 1690 masks[j] |= (1 << i); 1691 break; 1692 } 1693 } 1694 1695 if (j == count) { 1696 masks[j] = (1 << i); 1697 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port); 1698 1699 behaviors[j] = thread->exc_actions[i].behavior; 1700 flavors[j] = thread->exc_actions[i].flavor; 1701 ++count; 1702 } 1703 1704 old_port[i] = thread->exc_actions[i].port; 1705 thread->exc_actions[i].port = ipc_port_copy_send(new_port); 1706 thread->exc_actions[i].behavior = new_behavior; 1707 thread->exc_actions[i].flavor = new_flavor; 1708 thread->exc_actions[i].privileged = privileged; 1709 if (count > *CountCnt) 1710 break; 1711 } 1712 else 1713 old_port[i] = IP_NULL; 1714 } 1715 1716 thread_mtx_unlock(thread); 1717 1718 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) 1719 if (IP_VALID(old_port[i])) 1720 ipc_port_release_send(old_port[i]); 1721 1722 if (IP_VALID(new_port)) /* consume send right */ 1723 ipc_port_release_send(new_port); 1724 1725 *CountCnt = count; 1726 1727 return (KERN_SUCCESS); 1728} 1729 1730kern_return_t 1731task_swap_exception_ports( 1732 task_t task, 1733 exception_mask_t exception_mask, 1734 ipc_port_t new_port, 1735 exception_behavior_t new_behavior, 1736 thread_state_flavor_t new_flavor, 1737 exception_mask_array_t masks, 1738 mach_msg_type_number_t *CountCnt, 1739 exception_port_array_t ports, 1740 exception_behavior_array_t behaviors, 1741 thread_state_flavor_array_t flavors) 1742{ 1743 ipc_port_t old_port[EXC_TYPES_COUNT]; 1744 boolean_t privileged = current_task()->sec_token.val[0] == 0; 1745 unsigned int i, j, count; 1746 1747 if (task == TASK_NULL) 1748 return (KERN_INVALID_ARGUMENT); 1749 1750 if (exception_mask & ~EXC_MASK_VALID) 1751 return (KERN_INVALID_ARGUMENT); 1752 1753 if (IP_VALID(new_port)) { 1754 switch (new_behavior & ~MACH_EXCEPTION_CODES) { 1755 1756 case EXCEPTION_DEFAULT: 1757 case EXCEPTION_STATE: 1758 case EXCEPTION_STATE_IDENTITY: 1759 break; 1760 1761 default: 1762 return (KERN_INVALID_ARGUMENT); 1763 } 1764 } 1765 1766 itk_lock(task); 1767 1768 if (task->itk_self == IP_NULL) { 1769 itk_unlock(task); 1770 1771 return (KERN_FAILURE); 1772 } 1773 1774 count = 0; 1775 1776 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { 1777 if (exception_mask & (1 << i)) { 1778 for (j = 0; j < count; j++) { 1779 /* 1780 * search for an identical entry, if found 1781 * set corresponding mask for this exception. 1782 */ 1783 if ( task->exc_actions[i].port == ports[j] && 1784 task->exc_actions[i].behavior == behaviors[j] && 1785 task->exc_actions[i].flavor == flavors[j] ) { 1786 masks[j] |= (1 << i); 1787 break; 1788 } 1789 } 1790 1791 if (j == count) { 1792 masks[j] = (1 << i); 1793 ports[j] = ipc_port_copy_send(task->exc_actions[i].port); 1794 behaviors[j] = task->exc_actions[i].behavior; 1795 flavors[j] = task->exc_actions[i].flavor; 1796 ++count; 1797 } 1798 1799 old_port[i] = task->exc_actions[i].port; 1800 task->exc_actions[i].port = ipc_port_copy_send(new_port); 1801 task->exc_actions[i].behavior = new_behavior; 1802 task->exc_actions[i].flavor = new_flavor; 1803 task->exc_actions[i].privileged = privileged; 1804 if (count > *CountCnt) 1805 break; 1806 } 1807 else 1808 old_port[i] = IP_NULL; 1809 } 1810 1811 itk_unlock(task); 1812 1813 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) 1814 if (IP_VALID(old_port[i])) 1815 ipc_port_release_send(old_port[i]); 1816 1817 if (IP_VALID(new_port)) /* consume send right */ 1818 ipc_port_release_send(new_port); 1819 1820 *CountCnt = count; 1821 1822 return (KERN_SUCCESS); 1823} 1824 1825/* 1826 * Routine: thread/task_get_exception_ports [kernel call] 1827 * Purpose: 1828 * Clones a send right for each of the thread/task's exception 1829 * ports specified in the mask and returns the behaviour 1830 * and flavor of said port. 1831 * 1832 * Returns upto [in} CountCnt elements. 1833 * 1834 * Conditions: 1835 * Nothing locked. 1836 * Returns: 1837 * KERN_SUCCESS Extracted a send right. 1838 * KERN_INVALID_ARGUMENT The thread is null, 1839 * Invalid special port, 1840 * Illegal mask bit set. 1841 * KERN_FAILURE The thread is dead. 1842 */ 1843 1844kern_return_t 1845thread_get_exception_ports( 1846 thread_t thread, 1847 exception_mask_t exception_mask, 1848 exception_mask_array_t masks, 1849 mach_msg_type_number_t *CountCnt, 1850 exception_port_array_t ports, 1851 exception_behavior_array_t behaviors, 1852 thread_state_flavor_array_t flavors) 1853{ 1854 unsigned int i, j, count; 1855 1856 if (thread == THREAD_NULL) 1857 return (KERN_INVALID_ARGUMENT); 1858 1859 if (exception_mask & ~EXC_MASK_VALID) 1860 return (KERN_INVALID_ARGUMENT); 1861 1862 thread_mtx_lock(thread); 1863 1864 if (!thread->active) { 1865 thread_mtx_unlock(thread); 1866 1867 return (KERN_FAILURE); 1868 } 1869 1870 count = 0; 1871 1872 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { 1873 if (exception_mask & (1 << i)) { 1874 for (j = 0; j < count; ++j) { 1875 /* 1876 * search for an identical entry, if found 1877 * set corresponding mask for this exception. 1878 */ 1879 if ( thread->exc_actions[i].port == ports[j] && 1880 thread->exc_actions[i].behavior ==behaviors[j] && 1881 thread->exc_actions[i].flavor == flavors[j] ) { 1882 masks[j] |= (1 << i); 1883 break; 1884 } 1885 } 1886 1887 if (j == count) { 1888 masks[j] = (1 << i); 1889 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port); 1890 behaviors[j] = thread->exc_actions[i].behavior; 1891 flavors[j] = thread->exc_actions[i].flavor; 1892 ++count; 1893 if (count >= *CountCnt) 1894 break; 1895 } 1896 } 1897 } 1898 1899 thread_mtx_unlock(thread); 1900 1901 *CountCnt = count; 1902 1903 return (KERN_SUCCESS); 1904} 1905 1906kern_return_t 1907task_get_exception_ports( 1908 task_t task, 1909 exception_mask_t exception_mask, 1910 exception_mask_array_t masks, 1911 mach_msg_type_number_t *CountCnt, 1912 exception_port_array_t ports, 1913 exception_behavior_array_t behaviors, 1914 thread_state_flavor_array_t flavors) 1915{ 1916 unsigned int i, j, count; 1917 1918 if (task == TASK_NULL) 1919 return (KERN_INVALID_ARGUMENT); 1920 1921 if (exception_mask & ~EXC_MASK_VALID) 1922 return (KERN_INVALID_ARGUMENT); 1923 1924 itk_lock(task); 1925 1926 if (task->itk_self == IP_NULL) { 1927 itk_unlock(task); 1928 1929 return (KERN_FAILURE); 1930 } 1931 1932 count = 0; 1933 1934 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { 1935 if (exception_mask & (1 << i)) { 1936 for (j = 0; j < count; ++j) { 1937 /* 1938 * search for an identical entry, if found 1939 * set corresponding mask for this exception. 1940 */ 1941 if ( task->exc_actions[i].port == ports[j] && 1942 task->exc_actions[i].behavior == behaviors[j] && 1943 task->exc_actions[i].flavor == flavors[j] ) { 1944 masks[j] |= (1 << i); 1945 break; 1946 } 1947 } 1948 1949 if (j == count) { 1950 masks[j] = (1 << i); 1951 ports[j] = ipc_port_copy_send(task->exc_actions[i].port); 1952 behaviors[j] = task->exc_actions[i].behavior; 1953 flavors[j] = task->exc_actions[i].flavor; 1954 ++count; 1955 if (count > *CountCnt) 1956 break; 1957 } 1958 } 1959 } 1960 1961 itk_unlock(task); 1962 1963 *CountCnt = count; 1964 1965 return (KERN_SUCCESS); 1966} 1967