• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.5.8/xnu-1228.15.4/osfmk/kern/

Lines Matching defs:thread

97 #include <kern/thread.h>
158 thread_t thread,
162 thread_t thread,
183 thread_t thread);
216 * Each thread may be waiting for exactly one event; this event
217 * is set using assert_wait(). That thread may be awakened either
219 * or by directly waking that thread up with clear_wait().
227 * Locks on both the thread and on the hash buckets govern the
230 * bucket must be locked before any thread.
234 * thread or hash bucket locks.
369 thread_t thread = p0;
373 thread_lock(thread);
374 if (--thread->wait_timer_active == 0) {
375 if (thread->wait_timer_is_set) {
376 thread->wait_timer_is_set = FALSE;
377 clear_wait_internal(thread, THREAD_TIMED_OUT);
380 thread_unlock(thread);
387 * Set a timer for the current thread, if the thread
396 thread_t thread = current_thread();
401 thread_lock(thread);
402 if ((thread->state & TH_WAIT) != 0) {
404 if (!timer_call_enter(&thread->wait_timer, deadline))
405 thread->wait_timer_active++;
406 thread->wait_timer_is_set = TRUE;
408 thread_unlock(thread);
416 thread_t thread = current_thread();
420 thread_lock(thread);
421 if ((thread->state & TH_WAIT) != 0) {
422 if (!timer_call_enter(&thread->wait_timer, deadline))
423 thread->wait_timer_active++;
424 thread->wait_timer_is_set = TRUE;
426 thread_unlock(thread);
433 thread_t thread = current_thread();
437 thread_lock(thread);
438 if (thread->wait_timer_is_set) {
439 if (timer_call_cancel(&thread->wait_timer))
440 thread->wait_timer_active--;
441 thread->wait_timer_is_set = FALSE;
443 thread_unlock(thread);
450 * Unblock thread on wake up.
452 * Returns TRUE if the thread is still running.
458 thread_t thread,
466 thread->wait_result = wresult;
471 if (thread->wait_timer_is_set) {
472 if (timer_call_cancel(&thread->wait_timer))
473 thread->wait_timer_active--;
474 thread->wait_timer_is_set = FALSE;
481 thread->state &= ~(TH_WAIT|TH_UNINT);
483 if (!(thread->state & TH_RUN)) {
484 thread->state |= TH_RUN;
486 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
492 if (thread->sched_mode & TH_MODE_TIMESHARE)
499 if (thread->state & TH_IDLE) {
500 processor_t processor = thread->last_processor;
512 if (thread->sched_mode & TH_MODE_REALTIME) {
513 thread->realtime.deadline = mach_absolute_time();
514 thread->realtime.deadline += thread->realtime.constraint;
520 thread->current_quantum = 0;
521 thread->computation_metered = 0;
522 thread->reason = AST_NONE;
526 (int)thread, (int)thread->sched_pri, 0, 0, 0);
534 * Unblock and dispatch thread.
536 * thread lock held, IPC locks may be held.
537 * thread must have been pulled from wait queue under same lock hold.
544 thread_t thread,
547 assert(thread->at_safe_point == FALSE);
548 assert(thread->wait_event == NO_EVENT64);
549 assert(thread->wait_queue == WAIT_QUEUE_NULL);
551 if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT) {
552 if (!thread_unblock(thread, wresult))
553 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
564 * Mark a thread as waiting. If, given the circumstances,
568 * at splsched() and thread is locked.
573 thread_t thread,
579 * The thread may have certain types of interrupts/aborts masked
584 if (interruptible > (thread->options & TH_OPT_INTMASK))
585 interruptible = thread->options & TH_OPT_INTMASK;
590 !(thread->sched_mode & TH_MODE_ABORT) ||
592 (thread->sched_mode & TH_MODE_ABORTSAFELY))) {
593 thread->state |= (interruptible) ? TH_WAIT : (TH_WAIT | TH_UNINT);
594 thread->at_safe_point = at_safe_point;
595 return (thread->wait_result = THREAD_WAITING);
598 if (thread->sched_mode & TH_MODE_ABORTSAFELY)
599 thread->sched_mode &= ~TH_MODE_ISABORTED;
601 return (thread->wait_result = THREAD_INTERRUPTED);
608 * current thread. The effective value of any
615 * The old interrupt level for the thread.
622 thread_t thread = current_thread();
623 wait_interrupt_t result = thread->options & TH_OPT_INTMASK;
625 thread->options = (thread->options & ~TH_OPT_INTMASK) | (new_level & TH_OPT_INTMASK);
641 thread_t thread;
647 thread = current_thread();
649 return (thread == NULL || wait_queue_assert_possible(thread));
655 * Assert that the current thread is about to go to
680 thread_t thread = current_thread();
691 thread_lock(thread);
695 interruptible, deadline, thread);
697 thread_unlock(thread);
710 thread_t thread = current_thread();
720 thread_lock(thread);
723 interruptible, deadline, thread);
725 thread_unlock(thread);
735 * Cause the current thread to wait until the specified event
763 * Cause the current thread to wait until the specified event
790 * Cause the current thread to wait until the specified event
816 * Cause the current thread to wait until the specified event
841 * Cause the current thread to wait until the specified event
865 * Force a preemption point for a thread and wait
869 * The thread must enter a wait state and stop via a
876 thread_t thread)
881 wake_lock(thread);
882 thread_lock(thread);
884 while (thread->state & TH_SUSP) {
885 thread->wake_active = TRUE;
886 thread_unlock(thread);
888 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
889 wake_unlock(thread);
899 wake_lock(thread);
900 thread_lock(thread);
903 thread->state |= TH_SUSP;
905 while (thread->state & TH_RUN) {
906 processor_t processor = thread->last_processor;
908 if (processor != PROCESSOR_NULL && processor->active_thread == thread)
911 thread->wake_active = TRUE;
912 thread_unlock(thread);
914 wresult = assert_wait(&thread->wake_active, THREAD_ABORTSAFE);
915 wake_unlock(thread);
922 thread_unstop(thread);
927 wake_lock(thread);
928 thread_lock(thread);
931 thread_unlock(thread);
932 wake_unlock(thread);
942 * the thread running if appropriate.
948 thread_t thread)
952 wake_lock(thread);
953 thread_lock(thread);
955 if ((thread->state & (TH_RUN|TH_WAIT|TH_SUSP)) == TH_SUSP) {
956 thread->state &= ~TH_SUSP;
957 thread_unblock(thread, THREAD_AWAKENED);
959 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
962 if (thread->state & TH_SUSP) {
963 thread->state &= ~TH_SUSP;
965 if (thread->wake_active) {
966 thread->wake_active = FALSE;
967 thread_unlock(thread);
969 thread_wakeup(&thread->wake_active);
970 wake_unlock(thread);
977 thread_unlock(thread);
978 wake_unlock(thread);
985 * Wait for a thread to stop running. (non-interruptible)
990 thread_t thread)
995 wake_lock(thread);
996 thread_lock(thread);
998 while (thread->state & TH_RUN) {
999 processor_t processor = thread->last_processor;
1001 if (processor != PROCESSOR_NULL && processor->active_thread == thread)
1004 thread->wake_active = TRUE;
1005 thread_unlock(thread);
1007 wresult = assert_wait(&thread->wake_active, THREAD_UNINT);
1008 wake_unlock(thread);
1015 wake_lock(thread);
1016 thread_lock(thread);
1019 thread_unlock(thread);
1020 wake_unlock(thread);
1027 * Clear the wait condition for the specified thread.
1028 * Start the thread executing if that is appropriate.
1030 * thread thread to awaken
1031 * result Wakeup result the thread should see
1034 * the thread is locked.
1036 * KERN_SUCCESS thread was rousted out a wait
1037 * KERN_FAILURE thread was waiting but could not be rousted
1038 * KERN_NOT_WAITING thread was not waiting
1042 thread_t thread,
1045 wait_queue_t wq = thread->wait_queue;
1049 if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT))
1054 wait_queue_pull_thread_locked(wq, thread, TRUE);
1055 /* wait queue unlocked, thread still locked */
1058 thread_unlock(thread);
1061 thread_lock(thread);
1062 if (wq != thread->wait_queue)
1069 return (thread_go(thread, wresult));
1072 panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n",
1073 thread, wq, cpu_number());
1082 * Clear the wait condition for the specified thread. Start the thread
1086 * thread thread to awaken
1087 * result Wakeup result the thread should see
1091 thread_t thread,
1098 thread_lock(thread);
1099 ret = clear_wait_internal(thread, result);
1100 thread_unlock(thread);
1133 * Force the current thread to execute on the specified processor.
1163 * Select a new thread for the current processor to execute.
1165 * May select the current thread, which must be locked.
1169 thread_t thread,
1180 if (thread->sched_stamp != sched_tick)
1181 update_priority(thread);
1183 processor->current_pri = thread->sched_pri;
1197 * Test to see if the current thread should continue
1202 if ( thread->state == TH_RUN &&
1203 (thread->bound_processor == PROCESSOR_NULL ||
1204 thread->bound_processor == processor) &&
1205 (thread->affinity_set == AFFINITY_SET_NULL ||
1206 thread->affinity_set->aset_pset == pset) ) {
1207 if ( thread->sched_pri >= BASEPRI_RTQUEUES &&
1216 thread = (thread_t)q->next;
1217 ((queue_entry_t)thread)->next->prev = q;
1218 q->next = ((queue_entry_t)thread)->next;
1219 thread->runq = PROCESSOR_NULL;
1232 processor->deadline = thread->realtime.deadline;
1236 return (thread);
1241 (processor->runq.highq < thread->sched_pri &&
1242 rt_runq.highq < thread->sched_pri)) ) {
1246 /* I am the highest priority runnable (non-idle) thread */
1256 return (thread);
1264 thread = run_queue_dequeue(&processor->runq, SCHED_HEADQ);
1267 pset_pri_hint(pset, processor, thread->sched_pri);
1275 return (thread);
1278 thread = run_queue_dequeue(&rt_runq, SCHED_HEADQ);
1281 processor->deadline = thread->realtime.deadline;
1284 return (thread);
1337 * Choose idle thread if fast idle is not possible.
1339 if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active)
1344 * context switch. Return dispatched thread,
1345 * else check again for a runnable thread.
1347 new_thread = thread_select_idle(thread, processor);
1357 * Idle the processor using the current thread context.
1359 * Called with thread locked, then dropped and relocked.
1363 thread_t thread,
1368 if (thread->sched_mode & TH_MODE_TIMESHARE)
1372 thread->state |= TH_IDLE;
1375 thread_unlock(thread);
1378 * Switch execution timing to processor idle thread.
1390 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
1396 spllo(); new_thread = processor_idle(thread, processor);
1401 (*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
1403 thread_lock(thread);
1406 * If awakened, switch to thread timer and start a new quantum.
1407 * Otherwise skip; we will context switch to another thread or return here.
1409 if (!(thread->state & TH_WAIT)) {
1411 thread_timer_event(processor->last_dispatch, &thread->system_timer);
1412 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
1414 thread_quantum_init(thread);
1416 processor->quantum_end = processor->last_dispatch + thread->current_quantum;
1417 timer_call_enter1(&processor->quantum_timer, thread, processor->quantum_end);
1420 thread->computation_epoch = processor->last_dispatch;
1423 thread->state &= ~TH_IDLE;
1426 if (thread->sched_mode & TH_MODE_TIMESHARE)
1433 * Perform a context switch and start executing the new thread.
1435 * Returns FALSE on failure, and the thread is re-dispatched.
1440 #define funnel_release_check(thread, debug) \
1442 if ((thread)->funnel_state & TH_FN_OWNED) { \
1443 (thread)->funnel_state = TH_FN_REFUNNEL; \
1445 (thread)->funnel_lock, (debug), 0, 0, 0); \
1446 funnel_unlock((thread)->funnel_lock); \
1450 #define funnel_refunnel_check(thread, debug) \
1452 if ((thread)->funnel_state & TH_FN_REFUNNEL) { \
1453 kern_return_t result = (thread)->wait_result; \
1455 (thread)->funnel_state = 0; \
1457 (thread)->funnel_lock, (debug), 0, 0, 0); \
1458 funnel_lock((thread)->funnel_lock); \
1460 (thread)->funnel_lock, (debug), 0, 0, 0); \
1461 (thread)->funnel_state = TH_FN_OWNED; \
1462 (thread)->wait_result = result; \
1469 register thread_t thread,
1483 * Mark thread interruptible.
1485 thread_lock(thread);
1486 thread->state &= ~TH_UNINT;
1489 assert(thread_runnable(thread));
1500 if (!thread->kernel_stack) {
1504 * that of the other thread.
1506 if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack)
1512 continuation = thread->continuation;
1513 parameter = thread->parameter;
1516 processor->active_thread = thread;
1517 processor->current_pri = thread->sched_pri;
1518 if (thread->last_processor != processor && thread->last_processor != NULL) {
1519 if (thread->last_processor->processor_set != processor->processor_set)
1520 thread->ps_switch++;
1521 thread->p_switch++;
1523 thread->last_processor = processor;
1524 thread->c_switch++;
1525 ast_context(thread);
1526 thread_unlock(thread);
1531 thread_timer_event(processor->last_dispatch, &thread->system_timer);
1532 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
1535 self->reason, (int)thread, self->sched_pri, thread->sched_pri, 0);
1538 machine_stack_handoff(self, thread);
1540 thread_dispatch(self, thread);
1542 thread->continuation = thread->parameter = NULL;
1546 funnel_refunnel_check(thread, 2);
1550 call_continuation(continuation, parameter, thread->wait_result);
1553 else if (thread == self) {
1554 /* same thread but with continuation */
1570 * Check that the other thread has a stack
1572 if (!thread->kernel_stack) {
1574 if (!stack_alloc_try(thread)) {
1576 thread_unlock(thread);
1577 thread_stack_enqueue(thread);
1581 else if (thread == self) {
1593 processor->active_thread = thread;
1594 processor->current_pri = thread->sched_pri;
1595 if (thread->last_processor != processor && thread->last_processor != NULL) {
1596 if (thread->last_processor->processor_set != processor->processor_set)
1597 thread->ps_switch++;
1598 thread->p_switch++;
1600 thread->last_processor = processor;
1601 thread->c_switch++;
1602 ast_context(thread);
1603 thread_unlock(thread);
1611 thread_timer_event(processor->last_dispatch, &thread->system_timer);
1612 PROCESSOR_DATA(processor, kernel_timer) = &thread->system_timer;
1615 (int)self->reason, (int)thread, self->sched_pri, thread->sched_pri, 0);
1622 thread = machine_switch_context(self, continuation, thread);
1623 TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread);
1628 thread_dispatch(thread, self);
1646 * Handle threads at context switch. Re-dispatch other thread
1648 * special actions. Update quantum for other thread and begin
1655 thread_t thread,
1660 if (thread != THREAD_NULL) {
1665 if (thread->continuation != NULL && thread->kernel_stack != 0)
1666 stack_free(thread);
1668 if (!(thread->state & TH_IDLE)) {
1669 wake_lock(thread);
1670 thread_lock(thread);
1677 thread->current_quantum = (processor->quantum_end - processor->last_dispatch);
1679 thread->current_quantum = 0;
1681 if (thread->sched_mode & TH_MODE_REALTIME) {
1683 * Cancel the deadline if the thread has
1686 if (thread->current_quantum == 0) {
1687 thread->realtime.deadline = UINT64_MAX;
1688 thread->reason |= AST_QUANTUM;
1697 if (thread->current_quantum < min_std_quantum) {
1698 thread->reason |= AST_QUANTUM;
1699 thread->current_quantum += std_quantum;
1707 if ((thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) {
1708 self->current_quantum = thread->current_quantum;
1709 thread->reason |= AST_QUANTUM;
1710 thread->current_quantum = 0;
1713 thread->last_switch = processor->last_dispatch;
1715 thread->computation_metered += (thread->last_switch - thread->computation_epoch);
1717 if (!(thread->state & TH_WAIT)) {
1721 if (thread->reason & AST_QUANTUM)
1722 thread_setrun(thread, SCHED_TAILQ);
1724 if (thread->reason & AST_PREEMPT)
1725 thread_setrun(thread, SCHED_HEADQ);
1727 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
1729 thread->reason = AST_NONE;
1731 thread_unlock(thread);
1732 wake_unlock(thread);
1738 thread->state &= ~TH_RUN;
1740 if (thread->sched_mode & TH_MODE_TIMESHARE)
1744 if (thread->wake_active) {
1745 thread->wake_active = FALSE;
1746 thread_unlock(thread);
1748 thread_wakeup(&thread->wake_active);
1751 thread_unlock(thread);
1753 wake_unlock(thread);
1755 (*thread->sched_call)(SCHED_CALL_BLOCK, thread);
1757 if (thread->state & TH_TERMINATE)
1758 thread_terminate_enqueue(thread);
1795 * attempt to discard the thread's kernel stack. When the
1796 * thread resumes, it will execute the continuation function
1846 * Block the current thread if a wait has been asserted.
1866 * Switch directly from the current thread to the
1867 * new thread, handing off our quantum if appropriate.
1869 * New thread must be runnable, and not on a run queue.
1904 * Called at splsched when a thread first receives
1909 register thread_t thread)
1918 thread_dispatch(thread, self);
1924 if (thread != THREAD_NULL)
1956 * and return the resulting thread.
1966 thread_t thread;
1970 thread = (thread_t)queue->next;
1971 ((queue_entry_t)thread)->next->prev = queue;
1972 queue->next = ((queue_entry_t)thread)->next;
1975 thread = (thread_t)queue->prev;
1976 ((queue_entry_t)thread)->prev->next = queue;
1977 queue->prev = ((queue_entry_t)thread)->prev;
1980 thread->runq = PROCESSOR_NULL;
1991 return (thread);
1997 * Enqueue a thread for realtime execution.
2001 thread_t thread)
2004 queue_t queue = rq->queues + thread->sched_pri;
2005 uint64_t deadline = thread->realtime.deadline;
2011 enqueue_tail(queue, (queue_entry_t)thread);
2013 setbit(MAXPRI - thread->sched_pri, rq->bitmap);
2014 if (thread->sched_pri > rq->highq)
2015 rq->highq = thread->sched_pri;
2034 insque((queue_entry_t)thread, (queue_entry_t)entry);
2037 thread->runq = RT_RUNQ;
2048 * Dispatch a thread for realtime execution.
2056 thread_t thread)
2067 processor->next_thread = thread;
2068 processor->deadline = thread->realtime.deadline;
2077 if (realtime_queue_insert(thread)) {
2090 * Enqueue thread on a processor run queue. Thread must be locked,
2102 thread_t thread,
2106 queue_t queue = rq->queues + thread->sched_pri;
2110 enqueue_tail(queue, (queue_entry_t)thread);
2112 setbit(MAXPRI - thread->sched_pri, rq->bitmap);
2113 if (thread->sched_pri > rq->highq) {
2114 rq->highq = thread->sched_pri;
2120 enqueue_tail(queue, (queue_entry_t)thread);
2122 enqueue_head(queue, (queue_entry_t)thread);
2124 thread->runq = processor;
2125 if (testbit(thread->sched_pri, sched_preempt_pri))
2135 * Dispatch a thread for execution on a
2144 thread_t thread,
2157 processor->next_thread = thread;
2170 if (testbit(thread->sched_pri, sched_preempt_pri))
2173 if (thread->sched_mode & TH_MODE_TIMESHARE && thread->sched_pri < thread->priority)
2178 if (!processor_enqueue(processor, thread, options))
2189 thread->sched_pri >= processor->current_pri ) {
2195 thread->sched_pri >= processor->current_pri ) {
2229 * Choose a processor for the thread, beginning at
2234 * The thread must be locked. The pset must be locked,
2240 thread_t thread)
2243 processor_t processor = thread->last_processor;
2253 if (processor->state == PROCESSOR_IDLE || ( thread->sched_pri > BASEPRI_DEFAULT && processor->current_pri < thread->sched_pri))
2268 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
2270 * For an RT thread, iterate through active processors, first fit.
2274 if (thread->sched_pri > processor->current_pri ||
2275 thread->realtime.deadline < processor->deadline)
2290 (thread->sched_pri > BASEPRI_DEFAULT && cset->low_pri->current_pri < thread->sched_pri))) {
2297 ( thread->sched_pri <= BASEPRI_DEFAULT && cset->low_count->runq.count < processor->runq.count))) {
2375 * Dispatch thread for execution, onto an idle
2383 thread_t thread,
2390 assert(thread_runnable(thread));
2396 if (thread->sched_stamp != sched_tick)
2397 update_priority(thread);
2399 assert(thread->runq == PROCESSOR_NULL);
2401 if (thread->bound_processor == PROCESSOR_NULL) {
2405 if (thread->affinity_set != AFFINITY_SET_NULL) {
2409 pset = thread->affinity_set->aset_pset;
2412 processor = choose_processor(pset, thread);
2415 if (thread->last_processor != PROCESSOR_NULL) {
2419 processor = thread->last_processor;
2426 if (thread->sched_pri >= BASEPRI_RTQUEUES) {
2428 * If the processor is executing an RT thread with
2431 if (thread->sched_pri <= processor->current_pri ||
2432 thread->realtime.deadline >= processor->deadline)
2433 processor = choose_processor(pset, thread);
2436 processor = choose_processor(pset, thread);
2445 task_t task = thread->task;
2454 processor = choose_processor(pset, thread);
2464 processor = thread->bound_processor;
2470 * Dispatch the thread on the choosen processor.
2472 if (thread->sched_pri >= BASEPRI_RTQUEUES)
2473 realtime_setrun(processor, thread);
2475 processor_setrun(processor, thread, options);
2495 thread_t next, thread;
2501 thread = (thread_t)queue_first(queue);
2502 while (!queue_end(queue, (queue_entry_t)thread)) {
2503 next = (thread_t)queue_next((queue_entry_t)thread);
2505 if (thread->bound_processor != processor) {
2506 remqueue(queue, (queue_entry_t)thread);
2508 thread->runq = PROCESSOR_NULL;
2519 enqueue_tail(&tqueue, (queue_entry_t)thread);
2523 thread = next;
2531 while ((thread = (thread_t)dequeue_head(&tqueue)) != THREAD_NULL) {
2532 thread_lock(thread);
2534 thread_setrun(thread, SCHED_TAILQ);
2536 thread_unlock(thread);
2606 * Set the scheduled priority of the specified thread.
2608 * This may cause the thread to change queues.
2614 thread_t thread,
2617 boolean_t removed = run_queue_remove(thread);
2619 thread->sched_pri = priority;
2621 thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ);
2623 if (thread->state & TH_RUN) {
2624 processor_t processor = thread->last_processor;
2626 if (thread == current_thread()) {
2635 processor->active_thread == thread )
2645 thread_t thread)
2650 if (rq != thread->runq)
2651 panic("run_queue_check: thread runq");
2653 if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI)
2654 panic("run_queue_check: thread sched_pri");
2656 q = &rq->queues[thread->sched_pri];
2659 if (qe == (queue_entry_t)thread)
2673 * Remove a thread from a current run queue and
2680 thread_t thread)
2682 processor_t processor = thread->runq;
2685 * If processor is PROCESSOR_NULL, the thread will stay out of the
2686 * run queues because the caller locked the thread. Otherwise
2687 * the thread is on a run queue, but could be chosen for dispatch
2699 if (thread->sched_pri < BASEPRI_RTQUEUES) {
2709 if (processor == thread->runq) {
2714 remqueue(&rq->queues[0], (queue_entry_t)thread);
2716 if (testbit(thread->sched_pri, sched_preempt_pri)) {
2720 if (queue_empty(rq->queues + thread->sched_pri)) {
2722 if (thread->sched_pri != IDLEPRI)
2723 clrbit(MAXPRI - thread->sched_pri, rq->bitmap);
2727 thread->runq = PROCESSOR_NULL;
2731 * The thread left the run queue before we could
2734 assert(thread->runq == PROCESSOR_NULL);
2747 * Locate a thread to steal from the processor and
2760 thread_t thread;
2763 thread = (thread_t)queue_first(queue);
2764 while (!queue_end(queue, (queue_entry_t)thread)) {
2765 if (thread->bound_processor != processor) {
2766 remqueue(queue, (queue_entry_t)thread);
2768 thread->runq = PROCESSOR_NULL;
2779 return (thread);
2783 thread = (thread_t)queue_next((queue_entry_t)thread);
2793 * Locate and steal a thread, beginning
2799 * Returns the stolen thread, or THREAD_NULL on
2808 thread_t thread;
2814 thread = steal_processor_thread(processor);
2815 if (thread != THREAD_NULL) {
2821 return (thread);
2846 * current thread to idle without an asserted wait state.
2848 * Returns a the next thread to execute if dispatched directly.
2852 thread_t thread,
2866 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_START, (int)thread, 0, 0, 0, 0);
2873 (thread == THREAD_NULL || ((thread->state & (TH_WAIT|TH_SUSP)) == TH_WAIT && !thread->wake_active))) {
2912 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (int)thread, (int)state, 0, 0, 0);
2920 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (int)thread, (int)state, (int)new_thread, 0, 0);
2953 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (int)thread, (int)state, 0, 0, 0);
2962 MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, (int)thread, (int)state, 0, 0, 0);
2968 * Each processor has a dedicated thread which
2993 thread_t thread;
2996 result = kernel_thread_create((thread_continue_t)idle_thread, NULL, MAXPRI_KERNEL, &thread);
3001 thread_lock(thread);
3002 thread->bound_processor = processor;
3003 processor->idle_thread = thread;
3004 thread->sched_pri = thread->priority = IDLEPRI;
3005 thread->state = (TH_RUN | TH_IDLE);
3006 thread_unlock(thread);
3009 thread_deallocate(thread);
3027 thread_t thread;
3029 result = kernel_thread_start_priority((thread_continue_t)sched_tick_thread, NULL, MAXPRI_KERNEL, &thread);
3033 thread_deallocate(thread);
3040 * The current thread is the only other thread
3138 * the candidate scan, but the thread is locked for the update.
3160 register thread_t thread;
3165 queue_iterate(q, thread, thread_t, links) {
3166 if ( thread->sched_stamp != sched_tick &&
3167 (thread->sched_mode & TH_MODE_TIMESHARE) ) {
3171 thread_update_array[thread_update_count++] = thread;
3172 thread_reference_internal(thread);
3191 thread_t thread;
3209 thread = processor->idle_thread;
3210 if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) {
3216 thread_update_array[thread_update_count++] = thread;
3217 thread_reference_internal(thread);
3225 thread = thread_update_array[--thread_update_count];
3229 thread_lock(thread);
3230 if ( !(thread->state & (TH_WAIT|TH_SUSP)) &&
3231 thread->sched_stamp != sched_tick )
3232 update_priority(thread);
3233 thread_unlock(thread);
3236 thread_deallocate(thread);
3265 thread_t thread)
3267 return ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN);
3286 iprintf("Idle thread:\n\thandoff %d block %d\n",
3289 iprintf("Sched thread blocks: %d\n", c_sched_thread_block);