Lines Matching refs:thread

96 #include <kern/thread.h>
186 static inline void runq_consider_incr_bound_count(processor_t processor, thread_t thread)
188 if (thread->bound_processor == PROCESSOR_NULL)
191 assert(thread->bound_processor == processor);
200 static inline void runq_consider_decr_bound_count(processor_t processor, thread_t thread)
202 if (thread->bound_processor == PROCESSOR_NULL)
205 assert(thread->bound_processor == processor);
230 thread_t thread,
235 thread_t thread,
240 thread_t thread,
259 thread_t thread,
265 thread_t thread,
271 thread_t thread);
328 sched_traditional_initial_quantum_size(thread_t thread);
353 thread_t thread);
776 thread_t thread = p0;
780 thread_lock(thread);
781 if (--thread->wait_timer_active == 0) {
782 if (thread->wait_timer_is_set) {
783 thread->wait_timer_is_set = FALSE;
784 clear_wait_internal(thread, THREAD_TIMED_OUT);
787 thread_unlock(thread);
796 * Set a timer for the current thread, if the thread
805 thread_t thread = current_thread();
810 thread_lock(thread);
811 if ((thread->state & TH_WAIT) != 0) {
813 if (!timer_call_enter(&thread->wait_timer, deadline, thread->sched_pri >= BASEPRI_RTQUEUES ? TIMER_CALL_CRITICAL : 0))
814 thread->wait_timer_active++;
815 thread->wait_timer_is_set = TRUE;
817 thread_unlock(thread);
825 thread_t thread = current_thread();
829 thread_lock(thread);
830 if ((thread->state & TH_WAIT) != 0) {
831 if (!timer_call_enter(&thread->wait_timer, deadline, thread->sched_pri >= BASEPRI_RTQUEUES ? TIMER_CALL_CRITICAL : 0))
832 thread->wait_timer_active++;
833 thread->wait_timer_is_set = TRUE;
835 thread_unlock(thread);
842 thread_t thread = current_thread();
846 thread_lock(thread);
847 if (thread->wait_timer_is_set) {
848 if (timer_call_cancel(&thread->wait_timer))
849 thread->wait_timer_active--;
850 thread->wait_timer_is_set = FALSE;
852 thread_unlock(thread);
861 * Unblock thread on wake up.
863 * Returns TRUE if the thread is still running.
869 thread_t thread,
878 thread->wait_result = wresult;
883 if (thread->wait_timer_is_set) {
884 if (timer_call_cancel(&thread->wait_timer))
885 thread->wait_timer_active--;
886 thread->wait_timer_is_set = FALSE;
893 thread->state &= ~(TH_WAIT|TH_UNINT);
895 if (!(thread->state & TH_RUN)) {
896 thread->state |= TH_RUN;
898 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
904 if (thread->sched_mode == TH_MODE_TIMESHARE)
912 if (thread->state & TH_IDLE) {
913 processor_t processor = thread->last_processor;
919 assert((thread->state & TH_IDLE) == 0);
928 if (thread->sched_mode == TH_MODE_REALTIME) {
929 thread->realtime.deadline = mach_absolute_time();
930 thread->realtime.deadline += thread->realtime.constraint;
936 thread->current_quantum = 0;
937 thread->computation_metered = 0;
938 thread->reason = AST_NONE;
941 * We also account for "double hop" thread signaling via
942 * the thread callout infrastructure.
949 ledger_credit(thread->t_ledger, task_ledgers.interrupt_wakeups, 1);
953 thread->thread_timer_wakeups_bin_1++;
956 thread->thread_timer_wakeups_bin_2++;
959 ledger_credit(thread->t_ledger, task_ledgers.platform_idle_wakeups, 1);
963 ledger_credit(thread->t_ledger, task_ledgers.interrupt_wakeups, 1);
964 thread->thread_callout_interrupt_wakeups++;
966 ledger_credit(thread->t_ledger, task_ledgers.platform_idle_wakeups, 1);
967 thread->thread_callout_platform_idle_wakeups++;
972 if (thread_get_tag_internal(thread) & THREAD_TAG_CALLOUT) {
973 thread->callout_woken_from_icontext = aticontext;
974 thread->callout_woken_from_platform_idle = pidle;
977 /* Event should only be triggered if thread is not already running */
981 (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result, 0, 0);
984 DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info);
992 * Unblock and dispatch thread.
994 * thread lock held, IPC locks may be held.
995 * thread must have been pulled from wait queue under same lock hold.
1002 thread_t thread,
1005 assert(thread->at_safe_point == FALSE);
1006 assert(thread->wait_event == NO_EVENT64);
1007 assert(thread->wait_queue == WAIT_QUEUE_NULL);
1009 if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT) {
1010 if (!thread_unblock(thread, wresult))
1011 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1022 * Mark a thread as waiting. If, given the circumstances,
1026 * at splsched() and thread is locked.
1031 thread_t thread,
1036 assert(thread == current_thread());
1039 * The thread may have certain types of interrupts/aborts masked
1044 if (interruptible > (thread->options & TH_OPT_INTMASK))
1045 interruptible = thread->options & TH_OPT_INTMASK;
1050 !(thread->sched_flags & TH_SFLAG_ABORT) ||
1052 (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) {
1054 if ( !(thread->state & TH_TERMINATE))
1057 thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT);
1058 thread->at_safe_point = at_safe_point;
1059 return (thread->wait_result = THREAD_WAITING);
1062 if (thread->sched_flags & TH_SFLAG_ABORTSAFELY)
1063 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1065 return (thread->wait_result = THREAD_INTERRUPTED);
1072 * current thread. The effective value of any
1079 * The old interrupt level for the thread.
1086 thread_t thread = current_thread();
1087 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
1089 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
1105 thread_t thread;
1111 thread = current_thread();
1113 return (thread == NULL || wait_queue_assert_possible(thread));
1119 * Assert that the current thread is about to go to
1148 thread_t thread = current_thread();
1159 thread_lock(thread);
1168 interruptible, deadline, thread);
1170 thread_unlock(thread);
1183 thread_t thread = current_thread();
1193 thread_lock(thread);
1200 interruptible, deadline, thread);
1202 thread_unlock(thread);
1212 * Cause the current thread to wait until the specified event
1240 * Cause the current thread to wait until the specified event
1267 * Cause the current thread to wait until the specified event
1291 * Force a preemption point for a thread and wait
1295 * The thread must enter a wait state and stop via a
1302 thread_t thread)
1307 wake_lock(thread);
1308 thread_lock(thread);
1310 while (thread->state & TH_SUSP) {
1311 thread->wake_active = TRUE;
1312 thread_unlock(thread);
1314 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1315 wake_unlock(thread);
1325 wake_lock(thread);
1326 thread_lock(thread);
1329 thread->state |= TH_SUSP;
1331 while (thread->state & TH_RUN) {
1332 processor_t processor = thread->last_processor;
1334 if (processor != PROCESSOR_NULL && processor->active_thread == thread)
1337 thread->wake_active = TRUE;
1338 thread_unlock(thread);
1340 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
1341 wake_unlock(thread);
1348 thread_unstop(thread);
1353 wake_lock(thread);
1354 thread_lock(thread);
1357 thread_unlock(thread);
1358 wake_unlock(thread);
1368 * the thread running if appropriate.
1374 thread_t thread)
1378 wake_lock(thread);
1379 thread_lock(thread);
1381 if ((thread->state & (TH_RUN|TH_WAIT|TH_SUSP)) == TH_SUSP) {
1382 thread->state &= ~TH_SUSP;
1383 thread_unblock(thread, THREAD_AWAKENED);
1385 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1388 if (thread->state & TH_SUSP) {
1389 thread->state &= ~TH_SUSP;
1391 if (thread->wake_active) {
1392 thread->wake_active = FALSE;
1393 thread_unlock(thread);
1395 thread_wakeup(&thread->wake_active);
1396 wake_unlock(thread);
1403 thread_unlock(thread);
1404 wake_unlock(thread);
1412 thread_isoncpu(thread_t thread)
1414 processor_t processor = thread->last_processor;
1416 return ((processor != PROCESSOR_NULL) && (processor->active_thread == thread));
1421 * Wait for a thread to stop running. (non-interruptible)
1426 thread_t thread,
1434 wake_lock(thread);
1435 thread_lock(thread);
1439 * desired, wait until not runnable. Assumption: if thread is
1444 while ((oncpu = thread_isoncpu(thread)) ||
1445 (until_not_runnable && (thread->state & TH_RUN))) {
1448 assert(thread->state & TH_RUN);
1449 processor = thread->last_processor;
1453 thread->wake_active = TRUE;
1454 thread_unlock(thread);
1456 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1457 wake_unlock(thread);
1464 wake_lock(thread);
1465 thread_lock(thread);
1468 thread_unlock(thread);
1469 wake_unlock(thread);
1476 * Clear the wait condition for the specified thread.
1477 * Start the thread executing if that is appropriate.
1479 * thread thread to awaken
1480 * result Wakeup result the thread should see
1483 * the thread is locked.
1485 * KERN_SUCCESS thread was rousted out a wait
1486 * KERN_FAILURE thread was waiting but could not be rousted
1487 * KERN_NOT_WAITING thread was not waiting
1491 thread_t thread,
1494 wait_queue_t wq = thread->wait_queue;
1498 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT))
1503 wait_queue_pull_thread_locked(wq, thread, TRUE);
1504 /* wait queue unlocked, thread still locked */
1507 thread_unlock(thread);
1510 thread_lock(thread);
1511 if (wq != thread->wait_queue)
1518 return (thread_go(thread, wresult));
1521 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1522 thread, wq, cpu_number());
1531 * Clear the wait condition for the specified thread. Start the thread
1535 * thread thread to awaken
1536 * result Wakeup result the thread should see
1540 thread_t thread,
1547 thread_lock(thread);
1548 ret = clear_wait_internal(thread, result);
1549 thread_unlock(thread);
1593 * Force the current thread to execute on the specified processor.
1623 * Select a new thread for the current processor to execute.
1625 * May select the current thread, which must be locked.
1629 thread_t thread,
1642 if (SCHED(can_update_priority)(thread))
1643 SCHED(update_priority)(thread);
1645 processor->current_pri = thread->sched_pri;
1646 processor->current_thmode = thread->sched_mode;
1658 * Test to see if the current thread should continue
1663 if ( ((thread->state & ~TH_SUSP) == TH_RUN) &&
1664 (thread->sched_pri >= BASEPRI_RTQUEUES ||
1667 (thread->bound_processor == PROCESSOR_NULL ||
1668 thread->bound_processor == processor) &&
1669 (thread->affinity_set == AFFINITY_SET_NULL ||
1670 thread->affinity_set->aset_pset == pset) ) {
1671 if ( thread->sched_pri >= BASEPRI_RTQUEUES &&
1679 thread = (thread_t)dequeue_head(q);
1680 thread->runq = PROCESSOR_NULL;
1688 processor->deadline = thread->realtime.deadline;
1692 return (thread);
1695 if (!inactive_state && (thread->sched_mode != TH_MODE_FAIRSHARE || SCHED(fairshare_runq_count)() == 0) && (rt_runq.count == 0 || BASEPRI_RTQUEUES < thread->sched_pri) &&
1696 (new_thread = SCHED(choose_thread)(processor, thread->sched_mode == TH_MODE_FAIRSHARE ? MINPRI : thread->sched_pri)) == THREAD_NULL) {
1700 /* I am the highest priority runnable (non-idle) thread */
1710 return (thread);
1732 thread = (thread_t)dequeue_head(&rt_runq.queue);
1734 thread->runq = PROCESSOR_NULL;
1740 processor->deadline = thread->realtime.deadline;
1743 return (thread);
1821 * Choose idle thread if fast idle is not possible.
1823 if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES)
1828 * context switch. Return dispatched thread,
1829 * else check again for a runnable thread.
1831 new_thread = thread_select_idle(thread, processor);
1837 * thread can start running on another processor without
1853 * Idle the processor using the current thread context.
1855 * Called with thread locked, then dropped and relocked.
1859 thread_t thread,
1864 if (thread->sched_mode == TH_MODE_TIMESHARE)
1868 thread->state |= TH_IDLE;
1872 /* Reload precise timing global policy to thread-local policy */
1873 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
1875 thread_unlock(thread);
1878 * Switch execution timing to processor idle thread.
1881 thread->last_run_time = processor->last_dispatch;
1891 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
1899 spllo(); new_thread = processor_idle(thread, processor);
1904 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
1906 thread_lock(thread);
1910 * to the original priority of the thread so that the
1912 * switch to the idle thread.
1914 if (thread->sched_mode == TH_MODE_REALTIME)
1915 thread_tell_urgency(THREAD_URGENCY_REAL_TIME, thread->realtime.period, thread->realtime.deadline);
1919 else if ((thread->sched_pri <= MAXPRI_THROTTLE) &&
1920 (thread->priority <= MAXPRI_THROTTLE))
1921 thread_tell_urgency(THREAD_URGENCY_BACKGROUND, thread->sched_pri, thread->priority);
1923 thread_tell_urgency(THREAD_URGENCY_NORMAL, thread->sched_pri, thread->priority);
1926 * If awakened, switch to thread timer and start a new quantum.
1927 * Otherwise skip; we will context switch to another thread or return here.
1929 if (!(thread->state & TH_WAIT)) {
1931 thread_timer_event(processor->last_dispatch, &thread->system_timer);
1932 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
1934 thread_quantum_init(thread);
1935 thread->last_quantum_refill_time = processor->last_dispatch;
1937 processor->quantum_end = processor->last_dispatch + thread->current_quantum;
1938 timer_call_enter1(&processor->quantum_timer, thread, processor->quantum_end, TIMER_CALL_CRITICAL);
1941 thread->computation_epoch = processor->last_dispatch;
1944 thread->state &= ~TH_IDLE;
1947 if (thread->sched_mode == TH_MODE_TIMESHARE)
1960 thread_t thread;
1962 thread = choose_thread(processor, runq_for_processor(processor), priority);
1963 if (thread != THREAD_NULL) {
1964 runq_consider_decr_bound_count(processor, thread);
1967 return thread;
1977 * Locate a thread to execute from the processor run queue
1978 * and return it. Only choose a thread with greater or equal
1992 thread_t thread;
1995 thread = (thread_t)queue_first(queue);
1996 while (!queue_end(queue, (queue_entry_t)thread)) {
1997 if (thread->bound_processor == PROCESSOR_NULL ||
1998 thread->bound_processor == processor) {
1999 remqueue((queue_entry_t)thread);
2001 thread->runq = PROCESSOR_NULL;
2013 return (thread);
2017 thread = (thread_t)queue_next((queue_entry_t)thread);
2029 * Perform a context switch and start executing the new thread.
2031 * Returns FALSE on failure, and the thread is re-dispatched.
2036 #define funnel_release_check(thread, debug) \
2038 if ((thread)->funnel_state & TH_FN_OWNED) { \
2039 (thread)->funnel_state = TH_FN_REFUNNEL; \
2041 (thread)->funnel_lock, (debug), 0, 0, 0); \
2042 funnel_unlock((thread)->funnel_lock); \
2046 #define funnel_refunnel_check(thread, debug) \
2048 if ((thread)->funnel_state & TH_FN_REFUNNEL) { \
2049 kern_return_t result = (thread)->wait_result; \
2051 (thread)->funnel_state = 0; \
2053 (thread)->funnel_lock, (debug), 0, 0, 0); \
2054 funnel_lock((thread)->funnel_lock); \
2056 (thread)->funnel_lock, (debug), 0, 0, 0); \
2057 (thread)->funnel_state = TH_FN_OWNED; \
2058 (thread)->wait_result = result; \
2065 register thread_t thread,
2082 * Mark thread interruptible.
2084 thread_lock(thread);
2085 thread->state &= ~TH_UNINT;
2088 assert(thread_runnable(thread));
2091 /* Reload precise timing global policy to thread-local policy */
2092 thread->precise_user_kernel_time = use_precise_user_kernel_time(thread);
2102 if (!thread->kernel_stack) {
2106 * that of the other thread.
2108 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack)
2114 continuation = thread->continuation;
2115 parameter = thread->parameter;
2118 processor->active_thread = thread;
2119 processor->current_pri = thread->sched_pri;
2120 processor->current_thmode = thread->sched_mode;
2121 if (thread->last_processor != processor && thread->last_processor != NULL) {
2122 if (thread->last_processor->processor_set != processor->processor_set)
2123 thread->ps_switch++;
2124 thread->p_switch++;
2126 thread->last_processor = processor;
2127 thread->c_switch++;
2128 ast_context(thread);
2129 thread_unlock(thread);
2135 thread_timer_event(processor->last_dispatch, &thread->system_timer);
2136 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2142 if (!thread->precise_user_kernel_time) {
2150 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2152 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
2154 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2157 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2159 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2162 stack_handoff(self, thread);
2166 thread_dispatch(self, thread);
2168 thread->continuation = thread->parameter = NULL;
2172 funnel_refunnel_check(thread, 2);
2176 call_continuation(continuation, parameter, thread->wait_result);
2179 else if (thread == self) {
2180 /* same thread but with continuation */
2187 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2200 * Check that the other thread has a stack
2202 if (!thread->kernel_stack) {
2204 if (!stack_alloc_try(thread)) {
2206 thread_unlock(thread);
2207 thread_stack_enqueue(thread);
2211 else if (thread == self) {
2218 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2228 processor->active_thread = thread;
2229 processor->current_pri = thread->sched_pri;
2230 processor->current_thmode = thread->sched_mode;
2231 if (thread->last_processor != processor && thread->last_processor != NULL) {
2232 if (thread->last_processor->processor_set != processor->processor_set)
2233 thread->ps_switch++;
2234 thread->p_switch++;
2236 thread->last_processor = processor;
2237 thread->c_switch++;
2238 ast_context(thread);
2239 thread_unlock(thread);
2248 thread_timer_event(processor->last_dispatch, &thread->system_timer);
2249 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
2255 if (!thread->precise_user_kernel_time) {
2264 self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0);
2266 if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) {
2268 (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0);
2271 DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info);
2273 SCHED_STATS_CSW(processor, self->reason, self->sched_pri, thread->sched_pri);
2281 thread = machine_switch_context(self, continuation, thread);
2283 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
2290 thread_dispatch(thread, self);
2308 * Handle threads at context switch. Re-dispatch other thread
2310 * special actions. Update quantum for other thread and begin
2317 thread_t thread,
2322 if (thread != THREAD_NULL) {
2327 if (thread->continuation != NULL && thread->kernel_stack != 0)
2328 stack_free(thread);
2330 if (!(thread->state & TH_IDLE)) {
2338 consumed = thread->current_quantum - remainder;
2340 if ((thread->reason & AST_LEDGER) == 0)
2342 * Bill CPU time to both the individual thread
2345 ledger_credit(thread->t_ledger,
2347 ledger_credit(thread->t_threadledger,
2350 wake_lock(thread);
2351 thread_lock(thread);
2358 thread->current_quantum = (uint32_t)remainder;
2360 thread->current_quantum = 0;
2362 if (thread->sched_mode == TH_MODE_REALTIME) {
2364 * Cancel the deadline if the thread has
2367 if (thread->current_quantum == 0) {
2368 thread->realtime.deadline = UINT64_MAX;
2369 thread->reason |= AST_QUANTUM;
2378 if (thread->current_quantum < min_std_quantum) {
2379 thread->reason |= AST_QUANTUM;
2380 thread->current_quantum += SCHED(initial_quantum_size)(thread);
2389 if ((thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) {
2390 self->current_quantum = thread->current_quantum;
2391 thread->reason |= AST_QUANTUM;
2392 thread->current_quantum = 0;
2395 thread->computation_metered += (processor->last_dispatch - thread->computation_epoch);
2397 if (!(thread->state & TH_WAIT)) {
2401 if (thread->reason & AST_QUANTUM)
2402 thread_setrun(thread, SCHED_TAILQ);
2404 if (thread->reason & AST_PREEMPT)
2405 thread_setrun(thread, SCHED_HEADQ);
2407 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
2409 thread->reason = AST_NONE;
2411 if (thread->wake_active) {
2412 thread->wake_active = FALSE;
2413 thread_unlock(thread);
2415 thread_wakeup(&thread->wake_active);
2418 thread_unlock(thread);
2420 wake_unlock(thread);
2430 * the thread to the termination queue
2432 if ((thread->state & (TH_TERMINATE|TH_TERMINATE2)) == TH_TERMINATE) {
2434 thread->state |= TH_TERMINATE2;
2437 thread->state &= ~TH_RUN;
2439 if (thread->sched_mode == TH_MODE_TIMESHARE)
2443 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
2445 if (thread->wake_active) {
2446 thread->wake_active = FALSE;
2447 thread_unlock(thread);
2449 thread_wakeup(&thread->wake_active);
2452 thread_unlock(thread);
2454 wake_unlock(thread);
2457 thread_terminate_enqueue(thread);
2512 * attempt to discard the thread's kernel stack. When the
2513 * thread resumes, it will execute the continuation function
2569 * Block the current thread if a wait has been asserted.
2589 * Switch directly from the current thread to the
2590 * new thread, handing off our quantum if appropriate.
2592 * New thread must be runnable, and not on a run queue.
2627 * Called at splsched when a thread first receives
2632 register thread_t thread)
2643 thread_dispatch(thread, self);
2649 if (thread != THREAD_NULL)
2658 thread_quantum_init(thread_t thread)
2660 if (thread->sched_mode == TH_MODE_REALTIME) {
2661 thread->current_quantum = thread->realtime.computation;
2663 thread->current_quantum = SCHED(initial_quantum_size)(thread);
2669 sched_traditional_initial_quantum_size(thread_t thread)
2671 if ((thread == THREAD_NULL) || thread->priority > MAXPRI_THROTTLE)
2728 sched_traditional_fairshare_enqueue(thread_t thread)
2734 enqueue_tail(queue, (queue_entry_t)thread);
2736 thread->runq = FS_RUNQ;
2746 thread_t thread;
2750 thread = (thread_t)dequeue_head(&fs_runq.queue);
2752 thread->runq = PROCESSOR_NULL;
2758 return (thread);
2766 sched_traditional_fairshare_queue_remove(thread_t thread)
2773 if (FS_RUNQ == thread->runq) {
2774 remqueue((queue_entry_t)thread);
2778 thread->runq = PROCESSOR_NULL;
2784 * The thread left the run queue before we could
2787 assert(thread->runq == PROCESSOR_NULL);
2799 * and return the resulting thread.
2809 thread_t thread;
2813 thread = (thread_t)dequeue_head(queue);
2816 thread = (thread_t)dequeue_tail(queue);
2819 thread->runq = PROCESSOR_NULL;
2831 return (thread);
2845 thread_t thread,
2848 queue_t queue = rq->queues + thread->sched_pri;
2852 enqueue_tail(queue, (queue_entry_t)thread);
2854 setbit(MAXPRI - thread->sched_pri, rq->bitmap);
2855 if (thread->sched_pri > rq->highq) {
2856 rq->highq = thread->sched_pri;
2862 enqueue_tail(queue, (queue_entry_t)thread);
2864 enqueue_head(queue, (queue_entry_t)thread);
2866 if (SCHED(priority_is_urgent)(thread->sched_pri))
2878 * Remove a specific thread from a runqueue.
2885 thread_t thread)
2888 remqueue((queue_entry_t)thread);
2891 if (SCHED(priority_is_urgent)(thread->sched_pri)) {
2895 if (queue_empty(rq->queues + thread->sched_pri)) {
2897 if (thread->sched_pri != IDLEPRI)
2898 clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
2902 thread->runq = PROCESSOR_NULL;
2908 * Dispatch a thread for round-robin execution.
2916 thread_t thread)
2920 thread->chosen_processor = processor;
2922 SCHED(fairshare_enqueue)(thread);
2934 * Enqueue a thread for realtime execution.
2938 thread_t thread)
2941 uint64_t deadline = thread->realtime.deadline;
2947 enqueue_tail(queue, (queue_entry_t)thread);
2966 insque((queue_entry_t)thread, (queue_entry_t)entry);
2969 thread->runq = RT_RUNQ;
2981 * Dispatch a thread for realtime execution.
2989 thread_t thread)
2993 thread->chosen_processor = processor;
2998 if ( (thread->bound_processor == processor)
3003 processor->next_thread = thread;
3004 processor->deadline = thread->realtime.deadline;
3013 if (realtime_queue_insert(thread)) {
3037 * Enqueue thread on a processor run queue. Thread must be locked,
3049 thread_t thread,
3055 result = run_queue_enqueue(rq, thread, options);
3056 thread->runq = processor;
3057 runq_consider_incr_bound_count(processor, thread);
3067 * Dispatch a thread for execution on a
3076 thread_t thread,
3082 thread->chosen_processor = processor;
3088 thread->bound_processor == processor)
3093 processor->next_thread = thread;
3106 if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri)
3111 if ((thread->sched_mode == TH_MODE_TIMESHARE) && thread->sched_pri < thread->priority)
3116 if (!SCHED(processor_enqueue)(processor, thread, options))
3131 (thread->sched_pri >= processor->current_pri ||
3138 thread->sched_pri >= processor->current_pri ) {
3169 * is not eligible to execute the thread. So we only
3173 * a runnable thread bound to a different processor in the
3280 * Choose a processor for the thread, beginning at
3286 * The thread must be locked. The pset must be locked,
3293 thread_t thread)
3320 ((thread->sched_pri >= BASEPRI_RTQUEUES) &&
3336 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
3353 (lp_processor->current_pri < thread->sched_pri))
3359 * thread with the lowest priority within
3389 if (thread->sched_pri > lowest_unpaired)
3394 if (thread->sched_pri > lowest_priority)
3396 if (thread->realtime.deadline < furthest_deadline)
3408 (thread->sched_pri > BASEPRI_DEFAULT && cset->low_pri->current_pri < thread->sched_pri))) {
3414 (processor == PROCESSOR_NULL || (thread->sched_pri <= BASEPRI_DEFAULT &&
3513 * Dispatch thread for execution, onto an idle
3521 thread_t thread,
3528 assert(thread_runnable(thread));
3534 if (SCHED(can_update_priority)(thread))
3535 SCHED(update_priority)(thread);
3537 assert(thread->runq == PROCESSOR_NULL);
3539 if (thread->bound_processor == PROCESSOR_NULL) {
3543 if (thread->affinity_set != AFFINITY_SET_NULL) {
3547 pset = thread->affinity_set->aset_pset;
3550 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
3553 if (thread->last_processor != PROCESSOR_NULL) {
3557 processor = thread->last_processor;
3560 processor = SCHED(choose_processor)(pset, processor, thread);
3562 if ((thread->last_processor != processor) && (thread->last_processor != PROCESSOR_NULL)) {
3564 (uintptr_t)thread_tid(thread), (uintptr_t)thread->last_processor->cpu_id, (uintptr_t)processor->cpu_id, thread->last_processor->state, 0);
3575 task_t task = thread->task;
3584 processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread);
3594 processor = thread->bound_processor;
3600 * Dispatch the thread on the choosen processor.
3602 if (thread->sched_pri >= BASEPRI_RTQUEUES)
3603 realtime_setrun(processor, thread);
3604 else if (thread->sched_mode == TH_MODE_FAIRSHARE)
3605 fairshare_setrun(processor, thread);
3607 processor_setrun(processor, thread, options);
3641 thread_t next, thread;
3647 thread = (thread_t)queue_first(queue);
3648 while (!queue_end(queue, (queue_entry_t)thread)) {
3649 next = (thread_t)queue_next((queue_entry_t)thread);
3651 if (thread->bound_processor == PROCESSOR_NULL) {
3652 remqueue((queue_entry_t)thread);
3654 thread->runq = PROCESSOR_NULL;
3656 runq_consider_decr_bound_count(processor, thread);
3667 enqueue_tail(&tqueue, (queue_entry_t)thread);
3671 thread = next;
3679 while ((thread = (thread_t)dequeue_head(&tqueue)) != THREAD_NULL) {
3680 thread_lock(thread);
3682 thread_setrun(thread, SCHED_TAILQ);
3684 thread_unlock(thread);
3703 thread_t thread = processor->active_thread;
3724 if (thread->state & TH_SUSP)
3733 * Set the scheduled priority of the specified thread.
3735 * This may cause the thread to change queues.
3741 thread_t thread,
3744 boolean_t removed = thread_run_queue_remove(thread);
3746 thread->sched_pri = priority;
3748 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
3750 if (thread->state & TH_RUN) {
3751 processor_t processor = thread->last_processor;
3753 if (thread == current_thread()) {
3757 processor->current_thmode = thread->sched_mode;
3763 processor->active_thread == thread )
3773 thread_t thread)
3778 if (rq != thread->runq)
3779 panic("run_queue_check: thread runq");
3781 if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI)
3782 panic("run_queue_check: thread sched_pri");
3784 q = &rq->queues[thread->sched_pri];
3787 if (qe == (queue_entry_t)thread)
3805 thread_t thread)
3814 if (processor == thread->runq) {
3819 runq_consider_decr_bound_count(processor, thread);
3820 run_queue_remove(rq, thread);
3824 * The thread left the run queue before we could
3827 assert(thread->runq == PROCESSOR_NULL);
3841 * Remove a thread from a current run queue and
3848 thread_t thread)
3850 processor_t processor = thread->runq;
3853 * If processor is PROCESSOR_NULL, the thread will stay out of the
3854 * run queues because the caller locked the thread. Otherwise
3855 * the thread is on a run queue, but could be chosen for dispatch
3866 if (thread->sched_mode == TH_MODE_FAIRSHARE) {
3867 return SCHED(fairshare_queue_remove)(thread);
3870 if (thread->sched_pri < BASEPRI_RTQUEUES) {
3871 return SCHED(processor_queue_remove)(processor, thread);
3877 if (processor == thread->runq) {
3882 remqueue((queue_entry_t)thread);
3886 thread->runq = PROCESSOR_NULL;
3890 * The thread left the run queue before we could
3893 assert(thread->runq == PROCESSOR_NULL);
3908 * Locate a thread to steal from the processor and
3921 thread_t thread;
3924 thread = (thread_t)queue_first(queue);
3925 while (!queue_end(queue, (queue_entry_t)thread)) {
3926 if (thread->bound_processor == PROCESSOR_NULL) {
3927 remqueue((queue_entry_t)thread);
3929 thread->runq = PROCESSOR_NULL;
3931 runq_consider_decr_bound_count(processor, thread);
3942 return (thread);
3946 thread = (thread_t)queue_next((queue_entry_t)thread);
3956 * Locate and steal a thread, beginning
3962 * Returns the stolen thread, or THREAD_NULL on
3971 thread_t thread;
3977 thread = steal_processor_thread(processor);
3978 if (thread != THREAD_NULL) {
3984 return (thread);
4021 thread_t thread;
4025 thread = processor->next_thread;
4027 if (thread != NULL) {
4028 if (thread->sched_mode == TH_MODE_REALTIME) {
4031 *rt_period = thread->realtime.period;
4033 *rt_deadline = thread->realtime.deadline;
4035 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_GET_URGENCY), THREAD_URGENCY_REAL_TIME, thread->realtime.period,
4036 (thread->realtime.deadline >> 32), thread->realtime.deadline, 0);
4039 } else if ((thread->sched_pri <= MAXPRI_THROTTLE) &&
4040 (thread->priority <= MAXPRI_THROTTLE)) {
4041 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_GET_URGENCY), THREAD_URGENCY_BACKGROUND, thread->sched_pri, thread->priority, 0, 0);
4058 * current thread to idle without an asserted wait state.
4060 * Returns a the next thread to execute if dispatched directly.
4071 thread_t thread,
4081 (uintptr_t)thread_tid(thread), 0, 0, 0, 0);
4090 (thread == THREAD_NULL || ((thread->state & (TH_WAIT|TH_SUSP)) == TH_WAIT && !thread->wake_active))) {
4092 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq.count, SCHED(processor_runq_count)(processor), -1, 0);
4103 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq.count, SCHED(processor_runq_count)(processor), -2, 0);
4137 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4146 (uintptr_t)thread_tid(thread), state, (uintptr_t)thread_tid(new_thread), 0, 0);
4180 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4190 (uintptr_t)thread_tid(thread), state, 0, 0, 0);
4196 * Each processor has a dedicated thread which
4221 thread_t thread;
4224 result = kernel_thread_create((thread_continue_t)idle_thread, NULL, MAXPRI_KERNEL, &thread);
4229 thread_lock(thread);
4230 thread->bound_processor = processor;
4231 processor->idle_thread = thread;
4232 thread->sched_pri = thread->priority = IDLEPRI;
4233 thread->state = (TH_RUN | TH_IDLE);
4234 thread_unlock(thread);
4237 thread_deallocate(thread);
4253 thread_t thread;
4257 MAXPRI_KERNEL, &thread);
4261 thread_deallocate(thread);
4265 * initialize our own thread after being switched
4268 * The current thread is the only other thread
4337 * the candidate scan, but the thread is locked for the update.
4359 register thread_t thread;
4364 queue_iterate(q, thread, thread_t, links) {
4365 if ( thread->sched_stamp != sched_tick &&
4366 (thread->sched_mode == TH_MODE_TIMESHARE) ) {
4370 thread_update_array[thread_update_count++] = thread;
4371 thread_reference_internal(thread);
4390 thread_t thread;
4408 thread = processor->idle_thread;
4409 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
4415 thread_update_array[thread_update_count++] = thread;
4416 thread_reference_internal(thread);
4424 thread = thread_update_array[--thread_update_count];
4428 thread_lock(thread);
4429 if ( !(thread->state & (TH_WAIT)) ) {
4430 if (SCHED(can_update_priority)(thread))
4431 SCHED(update_priority)(thread);
4433 thread_unlock(thread);
4436 thread_deallocate(thread);
4444 thread_eager_preemption(thread_t thread)
4446 return ((thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0);
4450 thread_set_eager_preempt(thread_t thread)
4459 thread_lock(thread);
4460 thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
4462 if (thread == current_thread()) {
4463 thread_unlock(thread);
4470 p = thread->last_processor;
4473 p->active_thread == thread) {
4477 thread_unlock(thread);
4484 thread_clear_eager_preempt(thread_t thread)
4489 thread_lock(thread);
4491 thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
4493 thread_unlock(thread);
4560 thread_t thread)
4562 return ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN);