• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.10/xnu-2782.1.97/osfmk/kern/

Lines Matching refs:thread

100 #include <kern/thread.h>
214 static inline void runq_consider_incr_bound_count(processor_t processor, thread_t thread)
216 if (thread->bound_processor == PROCESSOR_NULL)
219 assert(thread->bound_processor == processor);
228 static inline void runq_consider_decr_bound_count(processor_t processor, thread_t thread)
230 if (thread->bound_processor == PROCESSOR_NULL)
233 assert(thread->bound_processor == processor);
258 thread_t thread,
264 thread_t thread,
269 thread_t thread,
293 thread_t thread,
299 thread_t thread,
305 thread_t thread);
374 thread_t thread);
815 * thread).
827 * For a given system load "i", the per-thread priority
860 thread_t thread = p0;
864 thread_lock(thread);
865 if (--thread->wait_timer_active == 0) {
866 if (thread->wait_timer_is_set) {
867 thread->wait_timer_is_set = FALSE;
868 clear_wait_internal(thread, THREAD_TIMED_OUT);
871 thread_unlock(thread);
878 * Unblock thread on wake up.
880 * Returns TRUE if the thread is still running.
886 thread_t thread,
896 thread->wait_result = wresult;
901 if (thread->wait_timer_is_set) {
902 if (timer_call_cancel(&thread->wait_timer))
903 thread->wait_timer_active--;
904 thread->wait_timer_is_set = FALSE;
911 thread->state &= ~(TH_WAIT|TH_UNINT);
913 if (!(thread->state & TH_RUN)) {
914 thread->state |= TH_RUN;
916 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
921 new_run_count = sched_run_incr(thread);
922 if (thread->sched_mode == TH_MODE_TIMESHARE) {
923 sched_share_incr(thread);
925 if (thread->sched_flags & TH_SFLAG_THROTTLED)
926 sched_background_incr(thread);
934 if (thread->state & TH_IDLE) {
935 processor_t processor = thread->last_processor;
941 assert((thread->state & TH_IDLE) == 0);
951 if (thread->sched_mode == TH_MODE_REALTIME) {
955 thread->realtime.deadline = thread->realtime.constraint + ctime;
961 thread->quantum_remaining = 0;
962 thread->computation_metered = 0;
963 thread->reason = AST_NONE;
966 * We also account for "double hop" thread signaling via
967 * the thread callout infrastructure.
974 if (__improbable(aticontext && !(thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT))) {
975 ledger_credit(thread->t_ledger, task_ledgers.interrupt_wakeups, 1);
976 DTRACE_SCHED2(iwakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
982 thread->thread_timer_wakeups_bin_1++;
985 thread->thread_timer_wakeups_bin_2++;
989 ledger_credit(thread->t_ledger, task_ledgers.platform_idle_wakeups, 1);
994 ledger_credit(thread->t_ledger, task_ledgers.interrupt_wakeups, 1);
995 thread->thread_callout_interrupt_wakeups++;
997 ledger_credit(thread->t_ledger, task_ledgers.platform_idle_wakeups, 1);
998 thread->thread_callout_platform_idle_wakeups++;
1005 if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
1006 thread->callout_woken_from_icontext = aticontext;
1007 thread->callout_woken_from_platform_idle = pidle;
1008 thread->callout_woke_thread = FALSE;
1013 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result, new_run_count, 0);
1015 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
1023 * Unblock and dispatch thread.
1025 * thread lock held, IPC locks may be held.
1026 * thread must have been pulled from wait queue under same lock hold.
1033 thread_t thread,
1036 assert(thread->at_safe_point == FALSE);
1037 assert(thread->wait_event == NO_EVENT64);
1038 assert(thread->wait_queue == WAIT_QUEUE_NULL);
1040 if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT) {
1041 if (!thread_unblock(thread, wresult))
1042 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1053 * Mark a thread as waiting. If, given the circumstances,
1057 * at splsched() and thread is locked.
1062 thread_t thread,
1067 assert(thread == current_thread());
1070 * The thread may have certain types of interrupts/aborts masked
1075 if (interruptible > (thread->options & TH_OPT_INTMASK))
1076 interruptible = thread->options & TH_OPT_INTMASK;
1081 !(thread->sched_flags & TH_SFLAG_ABORT) ||
1083 (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
1085 if ( !(thread->state & TH_TERMINATE))
1088 thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT);
1089 thread->at_safe_point = at_safe_point;
1090 return (thread->wait_result = THREAD_WAITING);
1093 if (thread->sched_flags & TH_SFLAG_ABORTSAFELY)
1094 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1096 return (thread->wait_result = THREAD_INTERRUPTED);
1103 * current thread. The effective value of any
1110 * The old interrupt level for the thread.
1117 thread_t thread = current_thread();
1118 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
1120 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
1136 thread_t thread;
1142 thread = current_thread();
1144 return (thread == NULL || wait_queue_assert_possible(thread));
1150 * Assert that the current thread is about to go to
1180 thread_t thread = current_thread();
1193 thread_lock(thread);
1205 thread);
1207 thread_unlock(thread);
1223 thread_t thread = current_thread();
1245 thread_lock(thread);
1254 thread);
1256 thread_unlock(thread);
1269 thread_t thread = current_thread();
1279 thread_lock(thread);
1288 thread);
1290 thread_unlock(thread);
1305 thread_t thread = current_thread();
1317 thread_lock(thread);
1326 thread);
1328 thread_unlock(thread);
1338 * Return TRUE if a thread is running on a processor such that an AST
1341 * thread state to be serialized in the thread PCB.
1347 thread_isoncpu(thread_t thread)
1350 if (!(thread->state & TH_RUN))
1354 /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */
1355 if (thread->runq != PROCESSOR_NULL)
1362 * to guarantee that the thread is kicked out
1372 * Force a preemption point for a thread and wait
1378 * The thread must enter a wait state and stop via a
1385 thread_t thread,
1392 wake_lock(thread);
1393 thread_lock(thread);
1395 while (thread->state & TH_SUSP) {
1396 thread->wake_active = TRUE;
1397 thread_unlock(thread);
1399 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1400 wake_unlock(thread);
1410 wake_lock(thread);
1411 thread_lock(thread);
1414 thread->state |= TH_SUSP;
1416 while ((oncpu = thread_isoncpu(thread)) ||
1417 (until_not_runnable && (thread->state & TH_RUN))) {
1421 assert(thread->state & TH_RUN);
1422 processor = thread->chosen_processor;
1426 thread->wake_active = TRUE;
1427 thread_unlock(thread);
1429 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1430 wake_unlock(thread);
1437 thread_unstop(thread);
1442 wake_lock(thread);
1443 thread_lock(thread);
1446 thread_unlock(thread);
1447 wake_unlock(thread);
1451 * We return with the thread unlocked. To prevent it from
1453 * being on the CPU), the caller must ensure the thread
1464 * the thread running if appropriate.
1470 thread_t thread)
1474 wake_lock(thread);
1475 thread_lock(thread);
1477 if ((thread->state & (TH_RUN|TH_WAIT|TH_SUSP)) == TH_SUSP) {
1478 thread->state &= ~TH_SUSP;
1479 thread_unblock(thread, THREAD_AWAKENED);
1481 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1484 if (thread->state & TH_SUSP) {
1485 thread->state &= ~TH_SUSP;
1487 if (thread->wake_active) {
1488 thread->wake_active = FALSE;
1489 thread_unlock(thread);
1491 thread_wakeup(&thread->wake_active);
1492 wake_unlock(thread);
1499 thread_unlock(thread);
1500 wake_unlock(thread);
1507 * Wait for a thread to stop running. (non-interruptible)
1512 thread_t thread,
1520 wake_lock(thread);
1521 thread_lock(thread);
1525 * desired, wait until not runnable. Assumption: if thread is
1530 while ((oncpu = thread_isoncpu(thread)) ||
1531 (until_not_runnable && (thread->state & TH_RUN))) {
1534 assert(thread->state & TH_RUN);
1535 processor = thread->chosen_processor;
1539 thread->wake_active = TRUE;
1540 thread_unlock(thread);
1542 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1543 wake_unlock(thread);
1550 wake_lock(thread);
1551 thread_lock(thread);
1554 thread_unlock(thread);
1555 wake_unlock(thread);
1562 * Clear the wait condition for the specified thread.
1563 * Start the thread executing if that is appropriate.
1565 * thread thread to awaken
1566 * result Wakeup result the thread should see
1569 * the thread is locked.
1571 * KERN_SUCCESS thread was rousted out a wait
1572 * KERN_FAILURE thread was waiting but could not be rousted
1573 * KERN_NOT_WAITING thread was not waiting
1577 thread_t thread,
1580 wait_queue_t wq = thread->wait_queue;
1584 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT))
1589 wait_queue_pull_thread_locked(wq, thread, TRUE);
1590 /* wait queue unlocked, thread still locked */
1593 thread_unlock(thread);
1596 thread_lock(thread);
1597 if (wq != thread->wait_queue)
1604 return (thread_go(thread, wresult));
1607 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1608 thread, wq, cpu_number());
1617 * Clear the wait condition for the specified thread. Start the thread
1621 * thread thread to awaken
1622 * result Wakeup result the thread should see
1626 thread_t thread,
1633 thread_lock(thread);
1634 ret = clear_wait_internal(thread, result);
1635 thread_unlock(thread);
1682 * Force the current thread to execute on the specified processor.
1776 * Select a new thread for the current processor to execute.
1778 * May select the current thread, which must be locked.
1782 thread_t thread,
1795 if (SCHED(can_update_priority)(thread))
1796 SCHED(update_priority)(thread);
1798 processor->current_pri = thread->sched_pri;
1799 processor->current_thmode = thread->sched_mode;
1800 processor->current_sfi_class = thread->sfi_class;
1823 * Test to see if the current thread should continue
1828 if (((thread->state & ~TH_SUSP) == TH_RUN) &&
1829 (thread->sched_pri >= BASEPRI_RTQUEUES || processor->processor_primary == processor) &&
1830 (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor) &&
1831 (thread->affinity_set == AFFINITY_SET_NULL || thread->affinity_set->aset_pset == pset)) {
1832 if (thread->sched_pri >= BASEPRI_RTQUEUES && first_timeslice(processor)) {
1839 thread = (thread_t)dequeue_head(&rt_runq.queue);
1840 thread->runq = PROCESSOR_NULL;
1848 processor->deadline = thread->realtime.deadline;
1852 return (thread);
1855 if ((thread->sched_mode != TH_MODE_FAIRSHARE || SCHED(fairshare_runq_count)() == 0) && (rt_runq.count == 0 || BASEPRI_RTQUEUES < thread->sched_pri) && (new_thread = SCHED(choose_thread)(processor, thread->sched_mode == TH_MODE_FAIRSHARE ? MINPRI : thread->sched_pri, reason)) == THREAD_NULL) {
1859 /* This thread is still the highest priority runnable (non-idle) thread */
1865 return (thread);
1884 thread = (thread_t)dequeue_head(&rt_runq.queue);
1886 thread->runq = PROCESSOR_NULL;
1892 processor->deadline = thread->realtime.deadline;
1895 return (thread);
1954 * Choose idle thread if fast idle is not possible.
1959 if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES)
1964 * context switch. Return dispatched thread,
1965 * else check again for a runnable thread.
1967 new_thread = thread_select_idle(thread, processor);
1973 * thread can start running on another processor without
1989 * Idle the processor using the current thread context.
1991 * Called with thread locked, then dropped and relocked.
1995 thread_t thread,
2002 if (thread->sched_mode == TH_MODE_TIMESHARE) {
2003 if (thread->sched_flags & TH_SFLAG_THROTTLED)
2004 sched_background_decr(thread);
2006 sched_share_decr(thread);
2008 sched_run_decr(thread);
2010 thread->state |= TH_IDLE;
2015 /* Reload precise timing global policy to thread-local policy */
2016 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2018 thread_unlock(thread);
2021 * Switch execution timing to processor idle thread.
2029 thread->last_run_time = processor->last_dispatch;
2039 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2047 spllo(); new_thread = processor_idle(thread, processor);
2052 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
2054 thread_lock(thread);
2057 * If awakened, switch to thread timer and start a new quantum.
2058 * Otherwise skip; we will context switch to another thread or return here.
2060 if (!(thread->state & TH_WAIT)) {
2062 thread_timer_event(processor->last_dispatch, &thread->system_timer);
2063 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2065 thread_quantum_init(thread);
2066 processor->quantum_end = processor->last_dispatch + thread->quantum_remaining;
2067 timer_call_enter1(&processor->quantum_timer, thread, processor->quantum_end, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL);
2070 thread->computation_epoch = processor->last_dispatch;
2073 thread->state &= ~TH_IDLE;
2077 * to the original priority of the thread so that the
2079 * switch to the idle thread.
2082 urgency = thread_get_urgency(thread, &arg1, &arg2);
2086 sched_run_incr(thread);
2087 if (thread->sched_mode == TH_MODE_TIMESHARE) {
2088 sched_share_incr(thread);
2090 if (thread->sched_flags & TH_SFLAG_THROTTLED)
2091 sched_background_incr(thread);
2105 thread_t thread;
2107 thread = choose_thread_from_runq(processor, runq_for_processor(processor), priority);
2108 if (thread != THREAD_NULL) {
2109 runq_consider_decr_bound_count(processor, thread);
2112 return thread;
2122 * Locate a thread to execute from the processor run queue
2123 * and return it. Only choose a thread with greater or equal
2137 thread_t thread;
2140 thread = (thread_t)queue_first(queue);
2141 while (!queue_end(queue, (queue_entry_t)thread)) {
2142 if (thread->bound_processor == PROCESSOR_NULL ||
2143 thread->bound_processor == processor) {
2144 remqueue((queue_entry_t)thread);
2146 thread->runq = PROCESSOR_NULL;
2158 return (thread);
2162 thread = (thread_t)queue_next((queue_entry_t)thread);
2174 * Perform a context switch and start executing the new thread.
2176 * Returns FALSE on failure, and the thread is re-dispatched.
2185 * "thread" is the new thread to context switch to
2186 * (which may be the same thread in some cases)
2191 thread_t thread,
2218 * Mark thread interruptible.
2220 thread_lock(thread);
2221 thread->state &= ~TH_UNINT;
2223 assert(thread_runnable(thread));
2224 assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor());
2225 assert(thread->runq == PROCESSOR_NULL);
2227 /* Reload precise timing global policy to thread-local policy */
2228 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2231 thread->sfi_class = sfi_thread_classify(thread);
2241 if (!thread->kernel_stack) {
2245 * that of the other thread.
2247 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack)
2253 continuation = thread->continuation;
2254 parameter = thread->parameter;
2257 processor->active_thread = thread;
2258 processor->current_pri = thread->sched_pri;
2259 processor->current_thmode = thread->sched_mode;
2260 processor->current_sfi_class = thread->sfi_class;
2261 if (thread->last_processor != processor && thread->last_processor != NULL) {
2262 if (thread->last_processor->processor_set != processor->processor_set)
2263 thread->ps_switch++;
2264 thread->p_switch++;
2266 thread->last_processor = processor;
2267 thread->c_switch++;
2268 ast_context(thread);
2269 thread_unlock(thread);
2275 thread_timer_event(ctime, &thread->system_timer);
2276 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2282 if (!thread->precise_user_kernel_time) {
2290 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2292 if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) {
2294 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2297 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2299 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2302 stack_handoff(self, thread);
2306 thread_dispatch(self, thread);
2308 thread->continuation = thread->parameter = NULL;
2315 call_continuation(continuation, parameter, thread->wait_result);
2318 else if (thread == self) {
2319 /* same thread but with continuation */
2326 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2338 * Check that the other thread has a stack
2340 if (!thread->kernel_stack) {
2342 if (!stack_alloc_try(thread)) {
2344 thread_unlock(thread);
2345 thread_stack_enqueue(thread);
2349 else if (thread == self) {
2356 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2366 processor->active_thread = thread;
2367 processor->current_pri = thread->sched_pri;
2368 processor->current_thmode = thread->sched_mode;
2369 processor->current_sfi_class = thread->sfi_class;
2370 if (thread->last_processor != processor && thread->last_processor != NULL) {
2371 if (thread->last_processor->processor_set != processor->processor_set)
2372 thread->ps_switch++;
2373 thread->p_switch++;
2375 thread->last_processor = processor;
2376 thread->c_switch++;
2377 ast_context(thread);
2378 thread_unlock(thread);
2387 thread_timer_event(ctime, &thread->system_timer);
2388 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2394 if (!thread->precise_user_kernel_time) {
2402 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2404 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
2406 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2409 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2411 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2419 thread = machine_switch_context(self, continuation, thread);
2421 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
2428 thread_dispatch(thread, self);
2445 * Handle threads at context switch. Re-dispatch other thread
2447 * special actions. Update quantum for other thread and begin
2450 * "self" is our new current thread that we have context switched
2451 * to, "thread" is the old thread that we have switched away from.
2457 thread_t thread,
2462 if (thread != THREAD_NULL) {
2467 if (thread->continuation != NULL && thread->kernel_stack != 0)
2468 stack_free(thread);
2470 if (!(thread->state & TH_IDLE)) {
2478 consumed = thread->quantum_remaining - remainder;
2480 if ((thread->reason & AST_LEDGER) == 0) {
2483 * the individual thread.
2485 ledger_credit(thread->t_ledger,
2487 ledger_credit(thread->t_threadledger,
2490 if (thread->t_bankledger) {
2491 ledger_credit(thread->t_bankledger,
2493 (consumed - thread->t_deduct_bank_ledger_time));
2496 thread->t_deduct_bank_ledger_time =0;
2500 wake_lock(thread);
2501 thread_lock(thread);
2508 thread->quantum_remaining = (uint32_t)remainder;
2510 thread->quantum_remaining = 0;
2512 if (thread->sched_mode == TH_MODE_REALTIME) {
2514 * Cancel the deadline if the thread has
2517 if (thread->quantum_remaining == 0) {
2518 thread->realtime.deadline = UINT64_MAX;
2527 if (thread->quantum_remaining < min_std_quantum) {
2528 thread->reason |= AST_QUANTUM;
2529 thread->quantum_remaining += SCHED(initial_quantum_size)(thread);
2538 if ((thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) {
2539 self->quantum_remaining = thread->quantum_remaining;
2540 thread->reason |= AST_QUANTUM;
2541 thread->quantum_remaining = 0;
2544 if (sched_groups_enabled && thread->sched_group == self->sched_group) {
2548 self->reason, (uintptr_t)thread_tid(thread),
2549 self->quantum_remaining, thread->quantum_remaining, 0);
2551 self->quantum_remaining = thread->quantum_remaining;
2552 thread->quantum_remaining = 0;
2558 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
2560 if ((thread->rwlock_count != 0) && !(LcksOpts & disLkRWPrio)) {
2563 priority = thread->sched_pri;
2565 if (priority < thread->priority)
2566 priority = thread->priority;
2570 if ((thread->sched_pri < priority) || !(thread->sched_flags & TH_SFLAG_RW_PROMOTED)) {
2573 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->priority, priority, 0);
2575 thread->sched_flags |= TH_SFLAG_RW_PROMOTED;
2577 if (thread->sched_pri < priority)
2578 set_sched_pri(thread, priority);
2582 if (!(thread->state & TH_WAIT)) {
2586 if (thread->reason & AST_QUANTUM)
2587 thread_setrun(thread, SCHED_TAILQ);
2589 if (thread->reason & AST_PREEMPT)
2590 thread_setrun(thread, SCHED_HEADQ);
2592 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
2594 thread->reason = AST_NONE;
2598 (uintptr_t)thread_tid(thread), thread->reason, thread->state, sched_run_count, 0);
2600 if (thread->wake_active) {
2601 thread->wake_active = FALSE;
2602 thread_unlock(thread);
2604 thread_wakeup(&thread->wake_active);
2607 thread_unlock(thread);
2609 wake_unlock(thread);
2620 * the thread to the termination queue
2622 if ((thread->state & (TH_TERMINATE|TH_TERMINATE2)) == TH_TERMINATE) {
2624 thread->state |= TH_TERMINATE2;
2627 thread->state &= ~TH_RUN;
2628 thread->chosen_processor = PROCESSOR_NULL;
2630 if (thread->sched_mode == TH_MODE_TIMESHARE) {
2631 if (thread->sched_flags & TH_SFLAG_THROTTLED)
2632 sched_background_decr(thread);
2634 sched_share_decr(thread);
2636 new_run_count = sched_run_decr(thread);
2638 if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) {
2639 if (thread->reason & AST_SFI) {
2640 thread->wait_sfi_begin_time = processor->last_dispatch;
2646 (uintptr_t)thread_tid(thread), thread->reason, thread->state, new_run_count, 0);
2648 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2650 if (thread->wake_active) {
2651 thread->wake_active = FALSE;
2652 thread_unlock(thread);
2654 thread_wakeup(&thread->wake_active);
2657 thread_unlock(thread);
2659 wake_unlock(thread);
2662 thread_terminate_enqueue(thread);
2716 * attempt to discard the thread's kernel stack. When the
2717 * thread resumes, it will execute the continuation function
2769 * Block the current thread if a wait has been asserted.
2789 * Switch directly from the current thread to the
2790 * new thread, handing off our quantum if appropriate.
2792 * New thread must be runnable, and not on a run queue.
2823 * Called at splsched when a thread first receives
2828 register thread_t thread)
2839 thread_dispatch(thread, self);
2843 if (thread != THREAD_NULL)
2852 thread_quantum_init(thread_t thread)
2854 if (thread->sched_mode == TH_MODE_REALTIME) {
2855 thread->quantum_remaining = thread->realtime.computation;
2857 thread->quantum_remaining = SCHED(initial_quantum_size)(thread);
2864 sched_traditional_initial_quantum_size(thread_t thread)
2866 if ((thread == THREAD_NULL) || !(thread->sched_flags & TH_SFLAG_THROTTLED))
2921 sched_traditional_fairshare_enqueue(thread_t thread)
2927 enqueue_tail(queue, (queue_entry_t)thread);
2929 thread->runq = FS_RUNQ;
2939 thread_t thread;
2943 thread = (thread_t)dequeue_head(&fs_runq.queue);
2945 thread->runq = PROCESSOR_NULL;
2951 return (thread);
2959 sched_traditional_fairshare_queue_remove(thread_t thread)
2966 if (FS_RUNQ == thread->runq) {
2967 remqueue((queue_entry_t)thread);
2971 thread->runq = PROCESSOR_NULL;
2977 * The thread left the run queue before we could
2980 assert(thread->runq == PROCESSOR_NULL);
2992 * and return the resulting thread.
3002 thread_t thread;
3006 thread = (thread_t)dequeue_head(queue);
3009 thread = (thread_t)dequeue_tail(queue);
3012 thread->runq = PROCESSOR_NULL;
3024 return (thread);
3038 thread_t thread,
3041 queue_t queue = rq->queues + thread->sched_pri;
3045 enqueue_tail(queue, (queue_entry_t)thread);
3047 setbit(MAXPRI - thread->sched_pri, rq->bitmap);
3048 if (thread->sched_pri > rq->highq) {
3049 rq->highq = thread->sched_pri;
3054 enqueue_tail(queue, (queue_entry_t)thread);
3056 enqueue_head(queue, (queue_entry_t)thread);
3058 if (SCHED(priority_is_urgent)(thread->sched_pri))
3070 * Remove a specific thread from a runqueue.
3077 thread_t thread)
3080 remqueue((queue_entry_t)thread);
3083 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
3087 if (queue_empty(rq->queues + thread->sched_pri)) {
3089 if (thread->sched_pri != IDLEPRI)
3090 clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
3094 thread->runq = PROCESSOR_NULL;
3100 * Dispatch a thread for round-robin execution.
3108 thread_t thread)
3112 thread->chosen_processor = processor;
3114 SCHED(fairshare_enqueue)(thread);
3127 * Enqueue a thread for realtime execution.
3131 thread_t thread)
3134 uint64_t deadline = thread->realtime.deadline;
3140 enqueue_tail(queue, (queue_entry_t)thread);
3159 insque((queue_entry_t)thread, (queue_entry_t)entry);
3162 thread->runq = RT_RUNQ;
3174 * Dispatch a thread for realtime execution.
3182 thread_t thread)
3189 thread->chosen_processor = processor;
3192 assert(thread->bound_processor == PROCESSOR_NULL);
3197 if ( (thread->bound_processor == processor)
3202 processor->next_thread = thread;
3203 processor->current_pri = thread->sched_pri;
3204 processor->current_thmode = thread->sched_mode;
3205 processor->current_sfi_class = thread->sfi_class;
3206 processor->deadline = thread->realtime.deadline;
3226 else if (thread->realtime.deadline < processor->deadline)
3231 realtime_queue_insert(thread);
3238 processor->current_pri = thread->sched_pri;
3239 processor->current_thmode = thread->sched_mode;
3240 processor->current_sfi_class = thread->sfi_class;
3241 processor->deadline = thread->realtime.deadline;
3253 if ((processor->next_thread == THREAD_NULL) && ((processor->current_pri < thread->sched_pri) || (processor->deadline > thread->realtime.deadline))) {
3254 processor->current_pri = thread->sched_pri;
3255 processor->current_thmode = thread->sched_mode;
3256 processor->current_sfi_class = thread->sfi_class;
3257 processor->deadline = thread->realtime.deadline;
3271 /* Selected processor was too busy, just keep thread enqueued and let other processors drain it naturally. */
3298 * Enqueue thread on a processor run queue. Thread must be locked,
3310 thread_t thread,
3316 result = run_queue_enqueue(rq, thread, options);
3317 thread->runq = processor;
3318 runq_consider_incr_bound_count(processor, thread);
3328 * Dispatch a thread for execution on a
3337 thread_t thread,
3346 thread->chosen_processor = processor;
3352 thread->bound_processor == processor)
3357 processor->next_thread = thread;
3358 processor->current_pri = thread->sched_pri;
3359 processor->current_thmode = thread->sched_mode;
3360 processor->current_sfi_class = thread->sfi_class;
3381 if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri)
3385 else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->priority)) {
3386 if(SCHED(priority_is_urgent)(thread->priority) && thread->sched_pri > processor->current_pri) {
3394 SCHED(processor_enqueue)(processor, thread, options);
3401 processor->current_pri = thread->sched_pri;
3402 processor->current_thmode = thread->sched_mode;
3403 processor->current_sfi_class = thread->sfi_class;
3409 if ((processor->next_thread == THREAD_NULL) && (processor->current_pri < thread->sched_pri)) {
3410 processor->current_pri = thread->sched_pri;
3411 processor->current_thmode = thread->sched_mode;
3412 processor->current_sfi_class = thread->sfi_class;
3417 (thread->sched_pri >= processor->current_pri ||
3423 * New thread is not important enough to preempt what is running, but
3427 thread->sched_pri >= processor->current_pri ) {
3434 processor->current_pri = thread->sched_pri;
3435 processor->current_thmode = thread->sched_mode;
3436 processor->current_sfi_class = thread->sfi_class;
3501 * is not eligible to execute the thread. So we only
3505 * a runnable thread bound to a different processor in the
3614 * Choose a processor for the thread, beginning at
3620 * The thread must be locked. The pset must be locked,
3627 thread_t thread)
3687 if ((thread->sched_pri >= BASEPRI_RTQUEUES) &&
3798 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
3807 if (thread->sched_pri > lowest_unpaired_primary_priority) {
3808 /* Move to end of active queue so that the next thread doesn't also pick it */
3813 if (thread->sched_pri > lowest_priority) {
3814 /* Move to end of active queue so that the next thread doesn't also pick it */
3819 if (thread->realtime.deadline < furthest_deadline)
3830 if (thread->sched_pri > lowest_unpaired_primary_priority) {
3831 /* Move to end of active queue so that the next thread doesn't also pick it */
3836 if (thread->sched_pri > lowest_priority) {
3837 /* Move to end of active queue so that the next thread doesn't also pick it */
3845 * priority thread, move on to next pset. Only when we have
3870 * thread, so the only options left are enqueuing on
3905 * a running thread on it during processor shutdown (for sleep),
3906 * and that thread needs to be enqueued on its runqueue to run
3920 * Dispatch thread for execution, onto an idle
3928 thread_t thread,
3934 assert(thread_runnable(thread));
3939 if (SCHED(can_update_priority)(thread))
3940 SCHED(update_priority)(thread);
3942 thread->sfi_class = sfi_thread_classify(thread);
3944 assert(thread->runq == PROCESSOR_NULL);
3946 if (thread->bound_processor == PROCESSOR_NULL) {
3950 if (thread->affinity_set != AFFINITY_SET_NULL) {
3954 pset = thread->affinity_set->aset_pset;
3957 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
3960 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
3963 if (thread->last_processor != PROCESSOR_NULL) {
3967 processor = thread->last_processor;
3970 processor = SCHED(choose_processor)(pset, processor, thread);
3973 (uintptr_t)thread_tid(thread), thread->last_processor->cpu_id, processor->cpu_id, processor->state, 0);
3982 task_t task = thread->task;
3991 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
3995 (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0);
4004 processor = thread->bound_processor;
4009 (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0);
4013 * Dispatch the thread on the choosen processor.
4016 if (thread->sched_pri >= BASEPRI_RTQUEUES)
4017 realtime_setrun(processor, thread);
4018 else if (thread->sched_mode == TH_MODE_FAIRSHARE)
4019 fairshare_setrun(processor, thread);
4021 processor_setrun(processor, thread, options);
4055 thread_t next, thread;
4061 thread = (thread_t)queue_first(queue);
4062 while (!queue_end(queue, (queue_entry_t)thread)) {
4063 next = (thread_t)queue_next((queue_entry_t)thread);
4065 if (thread->bound_processor == PROCESSOR_NULL) {
4066 remqueue((queue_entry_t)thread);
4068 thread->runq = PROCESSOR_NULL;
4070 runq_consider_decr_bound_count(processor, thread);
4081 enqueue_tail(&tqueue, (queue_entry_t)thread);
4085 thread = next;
4093 while ((thread = (thread_t)dequeue_head(&tqueue)) != THREAD_NULL) {
4094 thread_lock(thread);
4096 thread_setrun(thread, SCHED_TAILQ);
4098 thread_unlock(thread);
4108 * Called at splsched with thread locked.
4132 * pset and thread locked
4141 thread_t thread = processor->active_thread;
4163 if (thread->state & TH_SUSP)
4167 * Current thread may not need to be preempted, but maybe needs
4170 result = sfi_thread_needs_ast(thread, NULL);
4180 * Set the scheduled priority of the specified thread.
4182 * This may cause the thread to change queues.
4188 thread_t thread,
4191 boolean_t removed = thread_run_queue_remove(thread);
4196 if (thread == cthread) {
4197 curgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
4200 thread->sched_pri = priority;
4202 if (thread == cthread) {
4203 nurgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
4210 thread_tell_urgency(nurgency, urgency_param1, urgency_param2, thread);
4215 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
4217 if (thread->state & TH_RUN) {
4218 processor_t processor = thread->last_processor;
4220 if (thread == current_thread()) {
4224 processor->current_thmode = thread->sched_mode;
4225 processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
4231 processor->active_thread == thread )
4241 thread_t thread)
4246 if (rq != thread->runq)
4247 panic("run_queue_check: thread runq");
4249 if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI)
4250 panic("run_queue_check: thread sched_pri");
4252 q = &rq->queues[thread->sched_pri];
4255 if (qe == (queue_entry_t)thread)
4276 thread_t thread)
4285 if (processor == thread->runq) {
4290 runq_consider_decr_bound_count(processor, thread);
4291 run_queue_remove(rq, thread);
4295 * The thread left the run queue before we could
4298 assert(thread->runq == PROCESSOR_NULL);
4313 * Remove a thread from its current run queue and
4318 * If thread->runq is PROCESSOR_NULL, the thread will not re-enter the
4319 * run queues because the caller locked the thread. Otherwise
4320 * the thread is on a run queue, but could be chosen for dispatch
4322 * will set thread->runq to PROCESSOR_NULL.
4324 * Hence the thread select path must not rely on anything that could
4325 * be changed under the thread lock after calling this function,
4326 * most importantly thread->sched_pri.
4330 thread_t thread)
4333 processor_t processor = thread->runq;
4335 if ((thread->state & (TH_RUN|TH_WAIT)) == TH_WAIT) {
4337 assert(thread->runq == PROCESSOR_NULL);
4343 * The thread is either not on the runq,
4346 * runq is set to NULL under the pset lock, not the thread
4347 * lock, so the thread may still be in the process of being dequeued
4348 * from the runq. It will wait in invoke for the thread lock to be
4355 if (thread->sched_mode == TH_MODE_FAIRSHARE) {
4356 return SCHED(fairshare_queue_remove)(thread);
4359 if (thread->sched_pri < BASEPRI_RTQUEUES) {
4360 return SCHED(processor_queue_remove)(processor, thread);
4365 if (thread->runq != PROCESSOR_NULL) {
4371 assert(thread->runq == RT_RUNQ);
4373 remqueue((queue_entry_t)thread);
4377 thread->runq = PROCESSOR_NULL;
4392 * Locate a thread to steal from the processor and
4405 thread_t thread;
4408 thread = (thread_t)queue_first(queue);
4409 while (!queue_end(queue, (queue_entry_t)thread)) {
4410 if (thread->bound_processor == PROCESSOR_NULL) {
4411 remqueue((queue_entry_t)thread);
4413 thread->runq = PROCESSOR_NULL;
4415 runq_consider_decr_bound_count(processor, thread);
4426 return (thread);
4430 thread = (thread_t)queue_next((queue_entry_t)thread);
4440 * Locate and steal a thread, beginning
4446 * Returns the stolen thread, or THREAD_NULL on
4455 thread_t thread;
4461 thread = steal_processor_thread(processor);
4462 if (thread != THREAD_NULL) {
4468 return (thread);
4511 thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2)
4513 if (thread == NULL || (thread->state & TH_IDLE)) {
4518 } else if (thread->sched_mode == TH_MODE_REALTIME) {
4519 *arg1 = thread->realtime.period;
4520 *arg2 = thread->realtime.deadline;
4524 ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->priority <= MAXPRI_THROTTLE))) {
4526 * Background urgency applied when thread priority is MAXPRI_THROTTLE or lower and thread is not promoted
4529 *arg1 = thread->sched_pri;
4530 *arg2 = thread->priority;
4537 *arg1 = thread->effective_policy.t_through_qos;
4538 *arg2 = thread->task->effective_policy.t_through_qos;
4548 * current thread to idle without an asserted wait state.
4550 * Returns a the next thread to execute if dispatched directly.
4561 thread_t thread,
4571 (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
4587 if (thread != THREAD_NULL) {
4588 /* Did idle-in-place thread wake up */
4589 if ((thread->state & (TH_WAIT|TH_SUSP)) != TH_WAIT || thread->wake_active)
4595 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq.count, SCHED(processor_runq_count)(processor), -1, 0);
4606 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq.count, SCHED(processor_runq_count)(processor), -2, 0);
4637 /* Something higher priority has popped up on the runqueue - redispatch this thread elsewhere */
4652 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4661 (uintptr_t)thread_tid(thread), state, (uintptr_t)thread_tid(new_thread), 0, 0);
4697 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4707 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4713 * Each processor has a dedicated thread which
4738 thread_t thread;
4741 result = kernel_thread_create((thread_continue_t)idle_thread, NULL, MAXPRI_KERNEL, &thread);
4746 thread_lock(thread);
4747 thread->bound_processor = processor;
4748 processor->idle_thread = thread;
4749 thread->sched_pri = thread->priority = IDLEPRI;
4750 thread->state = (TH_RUN | TH_IDLE);
4751 thread->options |= TH_OPT_IDLE_THREAD;
4752 thread_unlock(thread);
4755 thread_deallocate(thread);
4771 thread_t thread;
4774 (void *)SCHED(maintenance_continuation), MAXPRI_KERNEL, &thread);
4778 thread_deallocate(thread);
4782 * initialize our own thread after being switched
4785 * The current thread is the only other thread
4880 * maintenance thread on deadline expiration. Must be invoked at an interval
4950 * the candidate scan, but the thread is locked for the update.
4961 /* Returns TRUE if thread was added, FALSE if thread_update_array is full */
4963 thread_update_add_thread(thread_t thread)
4968 thread_update_array[thread_update_count++] = thread;
4969 thread_reference_internal(thread);
4978 thread_t thread = thread_update_array[--thread_update_count];
4982 thread_lock(thread);
4983 if (!(thread->state & (TH_WAIT)) && (SCHED(can_update_priority)(thread))) {
4984 SCHED(update_priority)(thread);
4986 thread_unlock(thread);
4989 thread_deallocate(thread);
5004 register thread_t thread;
5009 queue_iterate(q, thread, thread_t, links) {
5010 if ( thread->sched_stamp != sched_tick &&
5011 (thread->sched_mode == TH_MODE_TIMESHARE) ) {
5012 if (thread_update_add_thread(thread) == FALSE)
5036 thread_t thread;
5058 thread = processor->idle_thread;
5059 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
5060 if (thread_update_add_thread(thread) == FALSE) {
5075 thread_eager_preemption(thread_t thread)
5077 return ((thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0);
5081 thread_set_eager_preempt(thread_t thread)
5090 thread_lock(thread);
5091 thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
5093 if (thread == current_thread()) {
5096 thread_unlock(thread);
5101 p = thread->last_processor;
5104 p->active_thread == thread) {
5108 thread_unlock(thread);
5115 thread_clear_eager_preempt(thread_t thread)
5120 thread_lock(thread);
5122 thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
5124 thread_unlock(thread);
5190 thread_t thread)
5192 return ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN);