1/* 2 * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29#include <mach/mach_types.h> 30#include <mach/thread_act.h> 31 32#include <kern/kern_types.h> 33#include <kern/zalloc.h> 34#include <kern/sched_prim.h> 35#include <kern/clock.h> 36#include <kern/task.h> 37#include <kern/thread.h> 38#include <kern/wait_queue.h> 39#include <kern/ledger.h> 40 41#include <vm/vm_pageout.h> 42 43#include <kern/thread_call.h> 44#include <kern/call_entry.h> 45#include <kern/timer_call.h> 46 47#include <libkern/OSAtomic.h> 48#include <kern/timer_queue.h> 49 50#include <sys/kdebug.h> 51#if CONFIG_DTRACE 52#include <mach/sdt.h> 53#endif 54#include <machine/machine_routines.h> 55 56static zone_t thread_call_zone; 57static struct wait_queue daemon_wqueue; 58 59struct thread_call_group { 60 queue_head_t pending_queue; 61 uint32_t pending_count; 62 63 queue_head_t delayed_queue; 64 uint32_t delayed_count; 65 66 timer_call_data_t delayed_timer; 67 timer_call_data_t dealloc_timer; 68 69 struct wait_queue idle_wqueue; 70 uint32_t idle_count, active_count; 71 72 integer_t pri; 73 uint32_t target_thread_count; 74 uint64_t idle_timestamp; 75 76 uint32_t flags; 77 sched_call_t sched_call; 78}; 79 80typedef struct thread_call_group *thread_call_group_t; 81 82#define TCG_PARALLEL 0x01 83#define TCG_DEALLOC_ACTIVE 0x02 84 85#define THREAD_CALL_GROUP_COUNT 4 86#define THREAD_CALL_THREAD_MIN 4 87#define INTERNAL_CALL_COUNT 768 88#define THREAD_CALL_DEALLOC_INTERVAL_NS (5 * 1000 * 1000) /* 5 ms */ 89#define THREAD_CALL_ADD_RATIO 4 90#define THREAD_CALL_MACH_FACTOR_CAP 3 91 92static struct thread_call_group thread_call_groups[THREAD_CALL_GROUP_COUNT]; 93static boolean_t thread_call_daemon_awake; 94static thread_call_data_t internal_call_storage[INTERNAL_CALL_COUNT]; 95static queue_head_t thread_call_internal_queue; 96int thread_call_internal_queue_count = 0; 97static uint64_t thread_call_dealloc_interval_abs; 98 99static __inline__ thread_call_t _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0); 100static __inline__ void _internal_call_release(thread_call_t call); 101static __inline__ boolean_t _pending_call_enqueue(thread_call_t call, thread_call_group_t group); 102static __inline__ boolean_t _delayed_call_enqueue(thread_call_t call, thread_call_group_t group, uint64_t deadline); 103static __inline__ boolean_t _call_dequeue(thread_call_t call, thread_call_group_t group); 104static __inline__ void thread_call_wake(thread_call_group_t group); 105static __inline__ void _set_delayed_call_timer(thread_call_t call, thread_call_group_t group); 106static boolean_t _remove_from_pending_queue(thread_call_func_t func, thread_call_param_t param0, boolean_t remove_all); 107static boolean_t _remove_from_delayed_queue(thread_call_func_t func, thread_call_param_t param0, boolean_t remove_all); 108static void thread_call_daemon(void *arg); 109static void thread_call_thread(thread_call_group_t group, wait_result_t wres); 110extern void thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1); 111static void thread_call_dealloc_timer(timer_call_param_t p0, timer_call_param_t p1); 112static void thread_call_group_setup(thread_call_group_t group, thread_call_priority_t pri, uint32_t target_thread_count, boolean_t parallel); 113static void sched_call_thread(int type, thread_t thread); 114static void thread_call_start_deallocate_timer(thread_call_group_t group); 115static void thread_call_wait_locked(thread_call_t call); 116static boolean_t thread_call_enter_delayed_internal(thread_call_t call, 117 thread_call_func_t alt_func, thread_call_param_t alt_param0, 118 thread_call_param_t param1, uint64_t deadline, 119 uint64_t leeway, unsigned int flags); 120 121#define qe(x) ((queue_entry_t)(x)) 122#define TC(x) ((thread_call_t)(x)) 123 124 125lck_grp_t thread_call_queues_lck_grp; 126lck_grp_t thread_call_lck_grp; 127lck_attr_t thread_call_lck_attr; 128lck_grp_attr_t thread_call_lck_grp_attr; 129 130#if defined(__i386__) || defined(__x86_64__) 131lck_mtx_t thread_call_lock_data; 132#else 133lck_spin_t thread_call_lock_data; 134#endif 135 136 137#define thread_call_lock_spin() \ 138 lck_mtx_lock_spin_always(&thread_call_lock_data) 139 140#define thread_call_unlock() \ 141 lck_mtx_unlock_always(&thread_call_lock_data) 142 143extern boolean_t mach_timer_coalescing_enabled; 144 145static inline spl_t 146disable_ints_and_lock(void) 147{ 148 spl_t s; 149 150 s = splsched(); 151 thread_call_lock_spin(); 152 153 return s; 154} 155 156static inline void 157enable_ints_and_unlock(spl_t s) 158{ 159 thread_call_unlock(); 160 splx(s); 161} 162 163 164static inline boolean_t 165group_isparallel(thread_call_group_t group) 166{ 167 return ((group->flags & TCG_PARALLEL) != 0); 168} 169 170static boolean_t 171thread_call_group_should_add_thread(thread_call_group_t group) 172{ 173 uint32_t thread_count; 174 175 if (!group_isparallel(group)) { 176 if (group->pending_count > 0 && group->active_count == 0) { 177 return TRUE; 178 } 179 180 return FALSE; 181 } 182 183 if (group->pending_count > 0) { 184 if (group->idle_count > 0) { 185 panic("Pending work, but threads are idle?"); 186 } 187 188 thread_count = group->active_count; 189 190 /* 191 * Add a thread if either there are no threads, 192 * the group has fewer than its target number of 193 * threads, or the amount of work is large relative 194 * to the number of threads. In the last case, pay attention 195 * to the total load on the system, and back off if 196 * it's high. 197 */ 198 if ((thread_count == 0) || 199 (thread_count < group->target_thread_count) || 200 ((group->pending_count > THREAD_CALL_ADD_RATIO * thread_count) && 201 (sched_mach_factor < THREAD_CALL_MACH_FACTOR_CAP))) { 202 return TRUE; 203 } 204 } 205 206 return FALSE; 207} 208 209static inline integer_t 210thread_call_priority_to_sched_pri(thread_call_priority_t pri) 211{ 212 switch (pri) { 213 case THREAD_CALL_PRIORITY_HIGH: 214 return BASEPRI_PREEMPT; 215 case THREAD_CALL_PRIORITY_KERNEL: 216 return BASEPRI_KERNEL; 217 case THREAD_CALL_PRIORITY_USER: 218 return BASEPRI_DEFAULT; 219 case THREAD_CALL_PRIORITY_LOW: 220 return MAXPRI_THROTTLE; 221 default: 222 panic("Invalid priority."); 223 } 224 225 return 0; 226} 227 228/* Lock held */ 229static inline thread_call_group_t 230thread_call_get_group( 231 thread_call_t call) 232{ 233 thread_call_priority_t pri = call->tc_pri; 234 235 assert(pri == THREAD_CALL_PRIORITY_LOW || 236 pri == THREAD_CALL_PRIORITY_USER || 237 pri == THREAD_CALL_PRIORITY_KERNEL || 238 pri == THREAD_CALL_PRIORITY_HIGH); 239 240 return &thread_call_groups[pri]; 241} 242 243static void 244thread_call_group_setup( 245 thread_call_group_t group, 246 thread_call_priority_t pri, 247 uint32_t target_thread_count, 248 boolean_t parallel) 249{ 250 queue_init(&group->pending_queue); 251 queue_init(&group->delayed_queue); 252 253 timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group); 254 timer_call_setup(&group->dealloc_timer, thread_call_dealloc_timer, group); 255 256 wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO); 257 258 group->target_thread_count = target_thread_count; 259 group->pri = thread_call_priority_to_sched_pri(pri); 260 261 group->sched_call = sched_call_thread; 262 if (parallel) { 263 group->flags |= TCG_PARALLEL; 264 group->sched_call = NULL; 265 } 266} 267 268/* 269 * Simple wrapper for creating threads bound to 270 * thread call groups. 271 */ 272static kern_return_t 273thread_call_thread_create( 274 thread_call_group_t group) 275{ 276 thread_t thread; 277 kern_return_t result; 278 279 result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, group->pri, &thread); 280 if (result != KERN_SUCCESS) { 281 return result; 282 } 283 284 if (group->pri < BASEPRI_PREEMPT) { 285 /* 286 * New style doesn't get to run to completion in 287 * kernel if there are higher priority threads 288 * available. 289 */ 290 thread_set_eager_preempt(thread); 291 } 292 293 thread_deallocate(thread); 294 return KERN_SUCCESS; 295} 296 297/* 298 * thread_call_initialize: 299 * 300 * Initialize this module, called 301 * early during system initialization. 302 */ 303void 304thread_call_initialize(void) 305{ 306 thread_call_t call; 307 kern_return_t result; 308 thread_t thread; 309 int i; 310 spl_t s; 311 312 i = sizeof (thread_call_data_t); 313 thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call"); 314 zone_change(thread_call_zone, Z_CALLERACCT, FALSE); 315 zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); 316 317 lck_attr_setdefault(&thread_call_lck_attr); 318 lck_grp_attr_setdefault(&thread_call_lck_grp_attr); 319 lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr); 320 lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr); 321 322#if defined(__i386__) || defined(__x86_64__) 323 lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); 324#else 325 lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); 326#endif 327 328 nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs); 329 wait_queue_init(&daemon_wqueue, SYNC_POLICY_FIFO); 330 331 thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE); 332 thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE); 333 thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE); 334 thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE); 335 336 s = disable_ints_and_lock(); 337 338 queue_init(&thread_call_internal_queue); 339 for ( 340 call = internal_call_storage; 341 call < &internal_call_storage[INTERNAL_CALL_COUNT]; 342 call++) { 343 344 enqueue_tail(&thread_call_internal_queue, qe(call)); 345 thread_call_internal_queue_count++; 346 } 347 348 thread_call_daemon_awake = TRUE; 349 350 enable_ints_and_unlock(s); 351 352 result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread); 353 if (result != KERN_SUCCESS) 354 panic("thread_call_initialize"); 355 356 thread_deallocate(thread); 357} 358 359void 360thread_call_setup( 361 thread_call_t call, 362 thread_call_func_t func, 363 thread_call_param_t param0) 364{ 365 bzero(call, sizeof(*call)); 366 call_entry_setup((call_entry_t)call, func, param0); 367 call->tc_pri = THREAD_CALL_PRIORITY_HIGH; /* Default priority */ 368} 369 370/* 371 * _internal_call_allocate: 372 * 373 * Allocate an internal callout entry. 374 * 375 * Called with thread_call_lock held. 376 */ 377static __inline__ thread_call_t 378_internal_call_allocate(thread_call_func_t func, thread_call_param_t param0) 379{ 380 thread_call_t call; 381 382 if (queue_empty(&thread_call_internal_queue)) 383 panic("_internal_call_allocate"); 384 385 call = TC(dequeue_head(&thread_call_internal_queue)); 386 thread_call_internal_queue_count--; 387 388 thread_call_setup(call, func, param0); 389 call->tc_refs = 0; 390 call->tc_flags = 0; /* THREAD_CALL_ALLOC not set, do not free back to zone */ 391 392 return (call); 393} 394 395/* 396 * _internal_call_release: 397 * 398 * Release an internal callout entry which 399 * is no longer pending (or delayed). This is 400 * safe to call on a non-internal entry, in which 401 * case nothing happens. 402 * 403 * Called with thread_call_lock held. 404 */ 405static __inline__ void 406_internal_call_release( 407 thread_call_t call) 408{ 409 if ( call >= internal_call_storage && 410 call < &internal_call_storage[INTERNAL_CALL_COUNT] ) { 411 assert((call->tc_flags & THREAD_CALL_ALLOC) == 0); 412 enqueue_head(&thread_call_internal_queue, qe(call)); 413 thread_call_internal_queue_count++; 414 } 415} 416 417/* 418 * _pending_call_enqueue: 419 * 420 * Place an entry at the end of the 421 * pending queue, to be executed soon. 422 * 423 * Returns TRUE if the entry was already 424 * on a queue. 425 * 426 * Called with thread_call_lock held. 427 */ 428static __inline__ boolean_t 429_pending_call_enqueue( 430 thread_call_t call, 431 thread_call_group_t group) 432{ 433 queue_head_t *old_queue; 434 435 old_queue = call_entry_enqueue_tail(CE(call), &group->pending_queue); 436 437 if (old_queue == NULL) { 438 call->tc_submit_count++; 439 } 440 441 group->pending_count++; 442 443 thread_call_wake(group); 444 445 return (old_queue != NULL); 446} 447 448/* 449 * _delayed_call_enqueue: 450 * 451 * Place an entry on the delayed queue, 452 * after existing entries with an earlier 453 * (or identical) deadline. 454 * 455 * Returns TRUE if the entry was already 456 * on a queue. 457 * 458 * Called with thread_call_lock held. 459 */ 460static __inline__ boolean_t 461_delayed_call_enqueue( 462 thread_call_t call, 463 thread_call_group_t group, 464 uint64_t deadline) 465{ 466 queue_head_t *old_queue; 467 468 old_queue = call_entry_enqueue_deadline(CE(call), &group->delayed_queue, deadline); 469 470 if (old_queue == &group->pending_queue) 471 group->pending_count--; 472 else if (old_queue == NULL) 473 call->tc_submit_count++; 474 475 return (old_queue != NULL); 476} 477 478/* 479 * _call_dequeue: 480 * 481 * Remove an entry from a queue. 482 * 483 * Returns TRUE if the entry was on a queue. 484 * 485 * Called with thread_call_lock held. 486 */ 487static __inline__ boolean_t 488_call_dequeue( 489 thread_call_t call, 490 thread_call_group_t group) 491{ 492 queue_head_t *old_queue; 493 494 old_queue = call_entry_dequeue(CE(call)); 495 496 if (old_queue != NULL) { 497 call->tc_finish_count++; 498 if (old_queue == &group->pending_queue) 499 group->pending_count--; 500 } 501 502 return (old_queue != NULL); 503} 504 505/* 506 * _set_delayed_call_timer: 507 * 508 * Reset the timer so that it 509 * next expires when the entry is due. 510 * 511 * Called with thread_call_lock held. 512 */ 513static __inline__ void 514_set_delayed_call_timer( 515 thread_call_t call, 516 thread_call_group_t group) 517{ 518 uint64_t leeway; 519 520 assert((call->tc_soft_deadline != 0) && ((call->tc_soft_deadline <= call->tc_call.deadline))); 521 522 leeway = call->tc_call.deadline - call->tc_soft_deadline; 523 timer_call_enter_with_leeway(&group->delayed_timer, NULL, 524 call->tc_soft_deadline, leeway, 525 TIMER_CALL_SYS_CRITICAL|TIMER_CALL_LEEWAY, 526 ((call->tc_flags & THREAD_CALL_RATELIMITED) == THREAD_CALL_RATELIMITED)); 527} 528 529/* 530 * _remove_from_pending_queue: 531 * 532 * Remove the first (or all) matching 533 * entries from the pending queue. 534 * 535 * Returns TRUE if any matching entries 536 * were found. 537 * 538 * Called with thread_call_lock held. 539 */ 540static boolean_t 541_remove_from_pending_queue( 542 thread_call_func_t func, 543 thread_call_param_t param0, 544 boolean_t remove_all) 545{ 546 boolean_t call_removed = FALSE; 547 thread_call_t call; 548 thread_call_group_t group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH]; 549 550 call = TC(queue_first(&group->pending_queue)); 551 552 while (!queue_end(&group->pending_queue, qe(call))) { 553 if (call->tc_call.func == func && 554 call->tc_call.param0 == param0) { 555 thread_call_t next = TC(queue_next(qe(call))); 556 557 _call_dequeue(call, group); 558 559 _internal_call_release(call); 560 561 call_removed = TRUE; 562 if (!remove_all) 563 break; 564 565 call = next; 566 } 567 else 568 call = TC(queue_next(qe(call))); 569 } 570 571 return (call_removed); 572} 573 574/* 575 * _remove_from_delayed_queue: 576 * 577 * Remove the first (or all) matching 578 * entries from the delayed queue. 579 * 580 * Returns TRUE if any matching entries 581 * were found. 582 * 583 * Called with thread_call_lock held. 584 */ 585static boolean_t 586_remove_from_delayed_queue( 587 thread_call_func_t func, 588 thread_call_param_t param0, 589 boolean_t remove_all) 590{ 591 boolean_t call_removed = FALSE; 592 thread_call_t call; 593 thread_call_group_t group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH]; 594 595 call = TC(queue_first(&group->delayed_queue)); 596 597 while (!queue_end(&group->delayed_queue, qe(call))) { 598 if (call->tc_call.func == func && 599 call->tc_call.param0 == param0) { 600 thread_call_t next = TC(queue_next(qe(call))); 601 602 _call_dequeue(call, group); 603 604 _internal_call_release(call); 605 606 call_removed = TRUE; 607 if (!remove_all) 608 break; 609 610 call = next; 611 } 612 else 613 call = TC(queue_next(qe(call))); 614 } 615 616 return (call_removed); 617} 618 619/* 620 * thread_call_func_delayed: 621 * 622 * Enqueue a function callout to 623 * occur at the stated time. 624 */ 625void 626thread_call_func_delayed( 627 thread_call_func_t func, 628 thread_call_param_t param, 629 uint64_t deadline) 630{ 631 (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, 0, 0); 632} 633 634/* 635 * thread_call_func_delayed_with_leeway: 636 * 637 * Same as thread_call_func_delayed(), but with 638 * leeway/flags threaded through. 639 */ 640 641void 642thread_call_func_delayed_with_leeway( 643 thread_call_func_t func, 644 thread_call_param_t param, 645 uint64_t deadline, 646 uint64_t leeway, 647 uint32_t flags) 648{ 649 (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, leeway, flags); 650} 651 652/* 653 * thread_call_func_cancel: 654 * 655 * Dequeue a function callout. 656 * 657 * Removes one (or all) { function, argument } 658 * instance(s) from either (or both) 659 * the pending and the delayed queue, 660 * in that order. 661 * 662 * Returns TRUE if any calls were cancelled. 663 */ 664boolean_t 665thread_call_func_cancel( 666 thread_call_func_t func, 667 thread_call_param_t param, 668 boolean_t cancel_all) 669{ 670 boolean_t result; 671 spl_t s; 672 673 s = splsched(); 674 thread_call_lock_spin(); 675 676 if (cancel_all) 677 result = _remove_from_pending_queue(func, param, cancel_all) | 678 _remove_from_delayed_queue(func, param, cancel_all); 679 else 680 result = _remove_from_pending_queue(func, param, cancel_all) || 681 _remove_from_delayed_queue(func, param, cancel_all); 682 683 thread_call_unlock(); 684 splx(s); 685 686 return (result); 687} 688 689/* 690 * Allocate a thread call with a given priority. Importances 691 * other than THREAD_CALL_PRIORITY_HIGH will be run in threads 692 * with eager preemption enabled (i.e. may be aggressively preempted 693 * by higher-priority threads which are not in the normal "urgent" bands). 694 */ 695thread_call_t 696thread_call_allocate_with_priority( 697 thread_call_func_t func, 698 thread_call_param_t param0, 699 thread_call_priority_t pri) 700{ 701 thread_call_t call; 702 703 if (pri > THREAD_CALL_PRIORITY_LOW) { 704 panic("Invalid pri: %d\n", pri); 705 } 706 707 call = thread_call_allocate(func, param0); 708 call->tc_pri = pri; 709 710 return call; 711} 712 713/* 714 * thread_call_allocate: 715 * 716 * Allocate a callout entry. 717 */ 718thread_call_t 719thread_call_allocate( 720 thread_call_func_t func, 721 thread_call_param_t param0) 722{ 723 thread_call_t call = zalloc(thread_call_zone); 724 725 thread_call_setup(call, func, param0); 726 call->tc_refs = 1; 727 call->tc_flags = THREAD_CALL_ALLOC; 728 729 return (call); 730} 731 732/* 733 * thread_call_free: 734 * 735 * Release a callout. If the callout is currently 736 * executing, it will be freed when all invocations 737 * finish. 738 */ 739boolean_t 740thread_call_free( 741 thread_call_t call) 742{ 743 spl_t s; 744 int32_t refs; 745 746 s = splsched(); 747 thread_call_lock_spin(); 748 749 if (call->tc_call.queue != NULL) { 750 thread_call_unlock(); 751 splx(s); 752 753 return (FALSE); 754 } 755 756 refs = --call->tc_refs; 757 if (refs < 0) { 758 panic("Refcount negative: %d\n", refs); 759 } 760 761 thread_call_unlock(); 762 splx(s); 763 764 if (refs == 0) { 765 zfree(thread_call_zone, call); 766 } 767 768 return (TRUE); 769} 770 771/* 772 * thread_call_enter: 773 * 774 * Enqueue a callout entry to occur "soon". 775 * 776 * Returns TRUE if the call was 777 * already on a queue. 778 */ 779boolean_t 780thread_call_enter( 781 thread_call_t call) 782{ 783 boolean_t result = TRUE; 784 thread_call_group_t group; 785 spl_t s; 786 787 group = thread_call_get_group(call); 788 789 s = splsched(); 790 thread_call_lock_spin(); 791 792 if (call->tc_call.queue != &group->pending_queue) { 793 result = _pending_call_enqueue(call, group); 794 } 795 796 call->tc_call.param1 = 0; 797 798 thread_call_unlock(); 799 splx(s); 800 801 return (result); 802} 803 804boolean_t 805thread_call_enter1( 806 thread_call_t call, 807 thread_call_param_t param1) 808{ 809 boolean_t result = TRUE; 810 thread_call_group_t group; 811 spl_t s; 812 813 group = thread_call_get_group(call); 814 815 s = splsched(); 816 thread_call_lock_spin(); 817 818 if (call->tc_call.queue != &group->pending_queue) { 819 result = _pending_call_enqueue(call, group); 820 } 821 822 call->tc_call.param1 = param1; 823 824 thread_call_unlock(); 825 splx(s); 826 827 return (result); 828} 829 830/* 831 * thread_call_enter_delayed: 832 * 833 * Enqueue a callout entry to occur 834 * at the stated time. 835 * 836 * Returns TRUE if the call was 837 * already on a queue. 838 */ 839boolean_t 840thread_call_enter_delayed( 841 thread_call_t call, 842 uint64_t deadline) 843{ 844 assert(call); 845 return thread_call_enter_delayed_internal(call, NULL, 0, 0, deadline, 0, 0); 846} 847 848boolean_t 849thread_call_enter1_delayed( 850 thread_call_t call, 851 thread_call_param_t param1, 852 uint64_t deadline) 853{ 854 assert(call); 855 return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, 0, 0); 856} 857 858boolean_t 859thread_call_enter_delayed_with_leeway( 860 thread_call_t call, 861 thread_call_param_t param1, 862 uint64_t deadline, 863 uint64_t leeway, 864 unsigned int flags) 865{ 866 assert(call); 867 return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, leeway, flags); 868} 869 870 871/* 872 * thread_call_enter_delayed_internal: 873 * enqueue a callout entry to occur at the stated time 874 * 875 * Returns True if the call was already on a queue 876 * params: 877 * call - structure encapsulating state of the callout 878 * alt_func/alt_param0 - if call is NULL, allocate temporary storage using these parameters 879 * deadline - time deadline in nanoseconds 880 * leeway - timer slack represented as delta of deadline. 881 * flags - THREAD_CALL_DELAY_XXX : classification of caller's desires wrt timer coalescing. 882 * THREAD_CALL_DELAY_LEEWAY : value in leeway is used for timer coalescing. 883 */ 884boolean_t 885thread_call_enter_delayed_internal( 886 thread_call_t call, 887 thread_call_func_t alt_func, 888 thread_call_param_t alt_param0, 889 thread_call_param_t param1, 890 uint64_t deadline, 891 uint64_t leeway, 892 unsigned int flags) 893{ 894 boolean_t result = TRUE; 895 thread_call_group_t group; 896 spl_t s; 897 uint64_t abstime, sdeadline, slop; 898 uint32_t urgency; 899 900 /* direct mapping between thread_call, timer_call, and timeout_urgency values */ 901 urgency = (flags & TIMEOUT_URGENCY_MASK); 902 903 s = splsched(); 904 thread_call_lock_spin(); 905 906 if (call == NULL) { 907 /* allocate a structure out of internal storage, as a convenience for BSD callers */ 908 call = _internal_call_allocate(alt_func, alt_param0); 909 } 910 911 group = thread_call_get_group(call); 912 abstime = mach_absolute_time(); 913 914 call->tc_flags |= THREAD_CALL_DELAYED; 915 916 call->tc_soft_deadline = sdeadline = deadline; 917 918 boolean_t ratelimited = FALSE; 919 slop = timer_call_slop(deadline, abstime, urgency, current_thread(), &ratelimited); 920 921 if ((flags & THREAD_CALL_DELAY_LEEWAY) != 0 && leeway > slop) 922 slop = leeway; 923 924 if (UINT64_MAX - deadline <= slop) 925 deadline = UINT64_MAX; 926 else 927 deadline += slop; 928 929 if (ratelimited) { 930 call->tc_flags |= TIMER_CALL_RATELIMITED; 931 } else { 932 call->tc_flags &= ~TIMER_CALL_RATELIMITED; 933 } 934 935 936 call->tc_call.param1 = param1; 937 call->ttd = (sdeadline > abstime) ? (sdeadline - abstime) : 0; 938 939 result = _delayed_call_enqueue(call, group, deadline); 940 941 if (queue_first(&group->delayed_queue) == qe(call)) 942 _set_delayed_call_timer(call, group); 943 944#if CONFIG_DTRACE 945 DTRACE_TMR5(thread_callout__create, thread_call_func_t, call->tc_call.func, uint64_t, (deadline - sdeadline), uint64_t, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), call); 946#endif 947 thread_call_unlock(); 948 splx(s); 949 950 return (result); 951} 952 953/* 954 * thread_call_cancel: 955 * 956 * Dequeue a callout entry. 957 * 958 * Returns TRUE if the call was 959 * on a queue. 960 */ 961boolean_t 962thread_call_cancel( 963 thread_call_t call) 964{ 965 boolean_t result, do_cancel_callout = FALSE; 966 thread_call_group_t group; 967 spl_t s; 968 969 group = thread_call_get_group(call); 970 971 s = splsched(); 972 thread_call_lock_spin(); 973 974 if ((call->tc_call.deadline != 0) && 975 (queue_first(&group->delayed_queue) == qe(call))) { 976 assert (call->tc_call.queue == &group->delayed_queue); 977 do_cancel_callout = TRUE; 978 } 979 980 result = _call_dequeue(call, group); 981 982 if (do_cancel_callout) { 983 timer_call_cancel(&group->delayed_timer); 984 if (!queue_empty(&group->delayed_queue)) { 985 _set_delayed_call_timer(TC(queue_first(&group->delayed_queue)), group); 986 } 987 } 988 989 thread_call_unlock(); 990 splx(s); 991#if CONFIG_DTRACE 992 DTRACE_TMR4(thread_callout__cancel, thread_call_func_t, call->tc_call.func, 0, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF)); 993#endif 994 995 return (result); 996} 997 998/* 999 * Cancel a thread call. If it cannot be cancelled (i.e. 1000 * is already in flight), waits for the most recent invocation 1001 * to finish. Note that if clients re-submit this thread call, 1002 * it may still be pending or in flight when thread_call_cancel_wait 1003 * returns, but all requests to execute this work item prior 1004 * to the call to thread_call_cancel_wait will have finished. 1005 */ 1006boolean_t 1007thread_call_cancel_wait( 1008 thread_call_t call) 1009{ 1010 boolean_t result; 1011 thread_call_group_t group; 1012 1013 if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) { 1014 panic("%s: Can't wait on thread call whose storage I don't own.", __FUNCTION__); 1015 } 1016 1017 group = thread_call_get_group(call); 1018 1019 (void) splsched(); 1020 thread_call_lock_spin(); 1021 1022 result = _call_dequeue(call, group); 1023 if (result == FALSE) { 1024 thread_call_wait_locked(call); 1025 } 1026 1027 thread_call_unlock(); 1028 (void) spllo(); 1029 1030 return result; 1031} 1032 1033 1034/* 1035 * thread_call_wake: 1036 * 1037 * Wake a call thread to service 1038 * pending call entries. May wake 1039 * the daemon thread in order to 1040 * create additional call threads. 1041 * 1042 * Called with thread_call_lock held. 1043 * 1044 * For high-priority group, only does wakeup/creation if there are no threads 1045 * running. 1046 */ 1047static __inline__ void 1048thread_call_wake( 1049 thread_call_group_t group) 1050{ 1051 /* 1052 * New behavior: use threads if you've got 'em. 1053 * Traditional behavior: wake only if no threads running. 1054 */ 1055 if (group_isparallel(group) || group->active_count == 0) { 1056 if (wait_queue_wakeup_one(&group->idle_wqueue, NO_EVENT, THREAD_AWAKENED, -1) == KERN_SUCCESS) { 1057 group->idle_count--; group->active_count++; 1058 1059 if (group->idle_count == 0) { 1060 timer_call_cancel(&group->dealloc_timer); 1061 group->flags &= TCG_DEALLOC_ACTIVE; 1062 } 1063 } else { 1064 if (!thread_call_daemon_awake && thread_call_group_should_add_thread(group)) { 1065 thread_call_daemon_awake = TRUE; 1066 wait_queue_wakeup_one(&daemon_wqueue, NO_EVENT, THREAD_AWAKENED, -1); 1067 } 1068 } 1069 } 1070} 1071 1072/* 1073 * sched_call_thread: 1074 * 1075 * Call out invoked by the scheduler. Used only for high-priority 1076 * thread call group. 1077 */ 1078static void 1079sched_call_thread( 1080 int type, 1081 __unused thread_t thread) 1082{ 1083 thread_call_group_t group; 1084 1085 group = &thread_call_groups[THREAD_CALL_PRIORITY_HIGH]; /* XXX */ 1086 1087 thread_call_lock_spin(); 1088 1089 switch (type) { 1090 1091 case SCHED_CALL_BLOCK: 1092 --group->active_count; 1093 if (group->pending_count > 0) 1094 thread_call_wake(group); 1095 break; 1096 1097 case SCHED_CALL_UNBLOCK: 1098 group->active_count++; 1099 break; 1100 } 1101 1102 thread_call_unlock(); 1103} 1104 1105/* 1106 * Interrupts disabled, lock held; returns the same way. 1107 * Only called on thread calls whose storage we own. Wakes up 1108 * anyone who might be waiting on this work item and frees it 1109 * if the client has so requested. 1110 */ 1111static void 1112thread_call_finish(thread_call_t call, spl_t *s) 1113{ 1114 boolean_t dowake = FALSE; 1115 1116 call->tc_finish_count++; 1117 call->tc_refs--; 1118 1119 if ((call->tc_flags & THREAD_CALL_WAIT) != 0) { 1120 dowake = TRUE; 1121 call->tc_flags &= ~THREAD_CALL_WAIT; 1122 1123 /* 1124 * Dropping lock here because the sched call for the 1125 * high-pri group can take the big lock from under 1126 * a thread lock. 1127 */ 1128 thread_call_unlock(); 1129 thread_wakeup((event_t)call); 1130 thread_call_lock_spin(); 1131 } 1132 1133 if (call->tc_refs == 0) { 1134 if (dowake) { 1135 panic("Someone waiting on a thread call that is scheduled for free: %p\n", call->tc_call.func); 1136 } 1137 1138 enable_ints_and_unlock(*s); 1139 1140 zfree(thread_call_zone, call); 1141 1142 *s = disable_ints_and_lock(); 1143 } 1144 1145} 1146 1147/* 1148 * thread_call_thread: 1149 */ 1150static void 1151thread_call_thread( 1152 thread_call_group_t group, 1153 wait_result_t wres) 1154{ 1155 thread_t self = current_thread(); 1156 boolean_t canwait; 1157 spl_t s; 1158 1159 if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0) 1160 (void)thread_set_tag_internal(self, THREAD_TAG_CALLOUT); 1161 1162 /* 1163 * A wakeup with THREAD_INTERRUPTED indicates that 1164 * we should terminate. 1165 */ 1166 if (wres == THREAD_INTERRUPTED) { 1167 thread_terminate(self); 1168 1169 /* NOTREACHED */ 1170 panic("thread_terminate() returned?"); 1171 } 1172 1173 s = disable_ints_and_lock(); 1174 1175 thread_sched_call(self, group->sched_call); 1176 1177 while (group->pending_count > 0) { 1178 thread_call_t call; 1179 thread_call_func_t func; 1180 thread_call_param_t param0, param1; 1181 1182 call = TC(dequeue_head(&group->pending_queue)); 1183 group->pending_count--; 1184 1185 func = call->tc_call.func; 1186 param0 = call->tc_call.param0; 1187 param1 = call->tc_call.param1; 1188 1189 call->tc_call.queue = NULL; 1190 1191 _internal_call_release(call); 1192 1193 /* 1194 * Can only do wakeups for thread calls whose storage 1195 * we control. 1196 */ 1197 if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) { 1198 canwait = TRUE; 1199 call->tc_refs++; /* Delay free until we're done */ 1200 } else 1201 canwait = FALSE; 1202 1203 enable_ints_and_unlock(s); 1204 1205 KERNEL_DEBUG_CONSTANT( 1206 MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_NONE, 1207 VM_KERNEL_UNSLIDE(func), param0, param1, 0, 0); 1208 1209#if CONFIG_DTRACE 1210 DTRACE_TMR6(thread_callout__start, thread_call_func_t, func, int, 0, int, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), (call->tc_flags & THREAD_CALL_DELAYED), call); 1211#endif 1212 1213 (*func)(param0, param1); 1214 1215#if CONFIG_DTRACE 1216 DTRACE_TMR6(thread_callout__end, thread_call_func_t, func, int, 0, int, (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF), (call->tc_flags & THREAD_CALL_DELAYED), call); 1217#endif 1218 1219 if (get_preemption_level() != 0) { 1220 int pl = get_preemption_level(); 1221 panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)", 1222 pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1); 1223 } 1224 1225 s = disable_ints_and_lock(); 1226 1227 if (canwait) { 1228 /* Frees if so desired */ 1229 thread_call_finish(call, &s); 1230 } 1231 } 1232 1233 thread_sched_call(self, NULL); 1234 group->active_count--; 1235 1236 if (self->callout_woken_from_icontext && !self->callout_woke_thread) { 1237 ledger_credit(self->t_ledger, task_ledgers.interrupt_wakeups, 1); 1238 if (self->callout_woken_from_platform_idle) 1239 ledger_credit(self->t_ledger, task_ledgers.platform_idle_wakeups, 1); 1240 } 1241 1242 self->callout_woken_from_icontext = FALSE; 1243 self->callout_woken_from_platform_idle = FALSE; 1244 self->callout_woke_thread = FALSE; 1245 1246 if (group_isparallel(group)) { 1247 /* 1248 * For new style of thread group, thread always blocks. 1249 * If we have more than the target number of threads, 1250 * and this is the first to block, and it isn't active 1251 * already, set a timer for deallocating a thread if we 1252 * continue to have a surplus. 1253 */ 1254 group->idle_count++; 1255 1256 if (group->idle_count == 1) { 1257 group->idle_timestamp = mach_absolute_time(); 1258 } 1259 1260 if (((group->flags & TCG_DEALLOC_ACTIVE) == 0) && 1261 ((group->active_count + group->idle_count) > group->target_thread_count)) { 1262 group->flags |= TCG_DEALLOC_ACTIVE; 1263 thread_call_start_deallocate_timer(group); 1264 } 1265 1266 /* Wait for more work (or termination) */ 1267 wres = wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_INTERRUPTIBLE, 0); 1268 if (wres != THREAD_WAITING) { 1269 panic("kcall worker unable to assert wait?"); 1270 } 1271 1272 enable_ints_and_unlock(s); 1273 1274 thread_block_parameter((thread_continue_t)thread_call_thread, group); 1275 } else { 1276 if (group->idle_count < group->target_thread_count) { 1277 group->idle_count++; 1278 1279 wait_queue_assert_wait(&group->idle_wqueue, NO_EVENT, THREAD_UNINT, 0); /* Interrupted means to exit */ 1280 1281 enable_ints_and_unlock(s); 1282 1283 thread_block_parameter((thread_continue_t)thread_call_thread, group); 1284 /* NOTREACHED */ 1285 } 1286 } 1287 1288 enable_ints_and_unlock(s); 1289 1290 thread_terminate(self); 1291 /* NOTREACHED */ 1292} 1293 1294/* 1295 * thread_call_daemon: walk list of groups, allocating 1296 * threads if appropriate (as determined by 1297 * thread_call_group_should_add_thread()). 1298 */ 1299static void 1300thread_call_daemon_continue(__unused void *arg) 1301{ 1302 int i; 1303 kern_return_t kr; 1304 thread_call_group_t group; 1305 spl_t s; 1306 1307 s = disable_ints_and_lock(); 1308 1309 /* Starting at zero happens to be high-priority first. */ 1310 for (i = 0; i < THREAD_CALL_GROUP_COUNT; i++) { 1311 group = &thread_call_groups[i]; 1312 while (thread_call_group_should_add_thread(group)) { 1313 group->active_count++; 1314 1315 enable_ints_and_unlock(s); 1316 1317 kr = thread_call_thread_create(group); 1318 if (kr != KERN_SUCCESS) { 1319 /* 1320 * On failure, just pause for a moment and give up. 1321 * We can try again later. 1322 */ 1323 delay(10000); /* 10 ms */ 1324 s = disable_ints_and_lock(); 1325 goto out; 1326 } 1327 1328 s = disable_ints_and_lock(); 1329 } 1330 } 1331 1332out: 1333 thread_call_daemon_awake = FALSE; 1334 wait_queue_assert_wait(&daemon_wqueue, NO_EVENT, THREAD_UNINT, 0); 1335 1336 enable_ints_and_unlock(s); 1337 1338 thread_block_parameter((thread_continue_t)thread_call_daemon_continue, NULL); 1339 /* NOTREACHED */ 1340} 1341 1342static void 1343thread_call_daemon( 1344 __unused void *arg) 1345{ 1346 thread_t self = current_thread(); 1347 1348 self->options |= TH_OPT_VMPRIV; 1349 vm_page_free_reserve(2); /* XXX */ 1350 1351 thread_call_daemon_continue(NULL); 1352 /* NOTREACHED */ 1353} 1354 1355/* 1356 * Schedule timer to deallocate a worker thread if we have a surplus 1357 * of threads (in excess of the group's target) and at least one thread 1358 * is idle the whole time. 1359 */ 1360static void 1361thread_call_start_deallocate_timer( 1362 thread_call_group_t group) 1363{ 1364 uint64_t deadline; 1365 boolean_t onqueue; 1366 1367 assert(group->idle_count > 0); 1368 1369 group->flags |= TCG_DEALLOC_ACTIVE; 1370 deadline = group->idle_timestamp + thread_call_dealloc_interval_abs; 1371 onqueue = timer_call_enter(&group->dealloc_timer, deadline, 0); 1372 1373 if (onqueue) { 1374 panic("Deallocate timer already active?"); 1375 } 1376} 1377 1378void 1379thread_call_delayed_timer( 1380 timer_call_param_t p0, 1381 __unused timer_call_param_t p1 1382) 1383{ 1384 thread_call_t call; 1385 thread_call_group_t group = p0; 1386 uint64_t timestamp; 1387 1388 thread_call_lock_spin(); 1389 1390 timestamp = mach_absolute_time(); 1391 1392 call = TC(queue_first(&group->delayed_queue)); 1393 1394 while (!queue_end(&group->delayed_queue, qe(call))) { 1395 if (call->tc_soft_deadline <= timestamp) { 1396 if ((call->tc_flags & THREAD_CALL_RATELIMITED) && 1397 (CE(call)->deadline > timestamp) && 1398 (ml_timer_forced_evaluation() == FALSE)) { 1399 break; 1400 } 1401 _pending_call_enqueue(call, group); 1402 } /* TODO, identify differentially coalesced timers */ 1403 else 1404 break; 1405 1406 call = TC(queue_first(&group->delayed_queue)); 1407 } 1408 1409 if (!queue_end(&group->delayed_queue, qe(call))) 1410 _set_delayed_call_timer(call, group); 1411 1412 thread_call_unlock(); 1413} 1414 1415static void 1416thread_call_delayed_timer_rescan(timer_call_param_t p0, __unused timer_call_param_t p1) 1417{ 1418 thread_call_t call; 1419 thread_call_group_t group = p0; 1420 uint64_t timestamp; 1421 boolean_t istate; 1422 1423 istate = ml_set_interrupts_enabled(FALSE); 1424 thread_call_lock_spin(); 1425 1426 assert(ml_timer_forced_evaluation() == TRUE); 1427 timestamp = mach_absolute_time(); 1428 1429 call = TC(queue_first(&group->delayed_queue)); 1430 1431 while (!queue_end(&group->delayed_queue, qe(call))) { 1432 if (call->tc_soft_deadline <= timestamp) { 1433 _pending_call_enqueue(call, group); 1434 call = TC(queue_first(&group->delayed_queue)); 1435 } 1436 else { 1437 uint64_t skew = call->tc_call.deadline - call->tc_soft_deadline; 1438 assert (call->tc_call.deadline >= call->tc_soft_deadline); 1439 /* On a latency quality-of-service level change, 1440 * re-sort potentially rate-limited callout. The platform 1441 * layer determines which timers require this. 1442 */ 1443 if (timer_resort_threshold(skew)) { 1444 _call_dequeue(call, group); 1445 _delayed_call_enqueue(call, group, call->tc_soft_deadline); 1446 } 1447 call = TC(queue_next(qe(call))); 1448 } 1449 } 1450 1451 if (!queue_empty(&group->delayed_queue)) 1452 _set_delayed_call_timer(TC(queue_first(&group->delayed_queue)), group); 1453 thread_call_unlock(); 1454 ml_set_interrupts_enabled(istate); 1455} 1456 1457void 1458thread_call_delayed_timer_rescan_all(void) { 1459 thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_LOW], NULL); 1460 thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_USER], NULL); 1461 thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], NULL); 1462 thread_call_delayed_timer_rescan((timer_call_param_t)&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], NULL); 1463} 1464 1465/* 1466 * Timer callback to tell a thread to terminate if 1467 * we have an excess of threads and at least one has been 1468 * idle for a long time. 1469 */ 1470static void 1471thread_call_dealloc_timer( 1472 timer_call_param_t p0, 1473 __unused timer_call_param_t p1) 1474{ 1475 thread_call_group_t group = (thread_call_group_t)p0; 1476 uint64_t now; 1477 kern_return_t res; 1478 boolean_t terminated = FALSE; 1479 1480 thread_call_lock_spin(); 1481 1482 now = mach_absolute_time(); 1483 if (group->idle_count > 0) { 1484 if (now > group->idle_timestamp + thread_call_dealloc_interval_abs) { 1485 terminated = TRUE; 1486 group->idle_count--; 1487 res = wait_queue_wakeup_one(&group->idle_wqueue, NO_EVENT, THREAD_INTERRUPTED, -1); 1488 if (res != KERN_SUCCESS) { 1489 panic("Unable to wake up idle thread for termination?"); 1490 } 1491 } 1492 1493 } 1494 1495 /* 1496 * If we still have an excess of threads, schedule another 1497 * invocation of this function. 1498 */ 1499 if (group->idle_count > 0 && (group->idle_count + group->active_count > group->target_thread_count)) { 1500 /* 1501 * If we killed someone just now, push out the 1502 * next deadline. 1503 */ 1504 if (terminated) { 1505 group->idle_timestamp = now; 1506 } 1507 1508 thread_call_start_deallocate_timer(group); 1509 } else { 1510 group->flags &= ~TCG_DEALLOC_ACTIVE; 1511 } 1512 1513 thread_call_unlock(); 1514} 1515 1516/* 1517 * Wait for all requested invocations of a thread call prior to now 1518 * to finish. Can only be invoked on thread calls whose storage we manage. 1519 * Just waits for the finish count to catch up to the submit count we find 1520 * at the beginning of our wait. 1521 */ 1522static void 1523thread_call_wait_locked(thread_call_t call) 1524{ 1525 uint64_t submit_count; 1526 wait_result_t res; 1527 1528 assert(call->tc_flags & THREAD_CALL_ALLOC); 1529 1530 submit_count = call->tc_submit_count; 1531 1532 while (call->tc_finish_count < submit_count) { 1533 call->tc_flags |= THREAD_CALL_WAIT; 1534 1535 res = assert_wait(call, THREAD_UNINT); 1536 if (res != THREAD_WAITING) { 1537 panic("Unable to assert wait?"); 1538 } 1539 1540 thread_call_unlock(); 1541 (void) spllo(); 1542 1543 res = thread_block(NULL); 1544 if (res != THREAD_AWAKENED) { 1545 panic("Awoken with %d?", res); 1546 } 1547 1548 (void) splsched(); 1549 thread_call_lock_spin(); 1550 } 1551} 1552 1553/* 1554 * Determine whether a thread call is either on a queue or 1555 * currently being executed. 1556 */ 1557boolean_t 1558thread_call_isactive(thread_call_t call) 1559{ 1560 boolean_t active; 1561 spl_t s; 1562 1563 s = disable_ints_and_lock(); 1564 active = (call->tc_submit_count > call->tc_finish_count); 1565 enable_ints_and_unlock(s); 1566 1567 return active; 1568} 1569