Lines Matching refs:td

85 #define	TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
117 #define THREAD_CAN_SCHED(td, cpu) \
118 CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
133 static void sched_priority(struct thread *td, u_char prio);
135 static void maybe_resched(struct thread *td);
136 static void updatepri(struct thread *td);
137 static void resetpriority(struct thread *td);
138 static void resetpriority_thread(struct thread *td);
140 static int sched_pickcpu(struct thread *td);
300 maybe_resched(struct thread *td)
303 THREAD_LOCK_ASSERT(td, MA_OWNED);
304 if (td->td_priority < curthread->td_priority)
315 maybe_preempt(struct thread *td)
344 THREAD_LOCK_ASSERT(td, MA_OWNED);
345 KASSERT((td->td_inhibitors == 0),
347 pri = td->td_priority;
460 struct thread *td;
472 FOREACH_THREAD_IN_PROC(p, td) {
474 ts = td_get_sched(td);
475 thread_lock(td);
485 if (TD_ON_RUNQ(td)) {
487 td->td_flags &= ~TDF_DIDRUN;
488 } else if (TD_IS_RUNNING(td)) {
491 } else if (td->td_flags & TDF_DIDRUN) {
493 td->td_flags &= ~TDF_DIDRUN;
535 updatepri(td);
541 thread_unlock(td);
545 resetpriority(td);
546 resetpriority_thread(td);
547 thread_unlock(td);
573 updatepri(struct thread *td)
579 ts = td_get_sched(td);
598 resetpriority(struct thread *td)
602 if (td->td_pri_class != PRI_TIMESHARE)
605 td_get_sched(td)->ts_estcpu / INVERSE_ESTCPU_WEIGHT +
606 NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
609 sched_user_prio(td, newpriority);
617 resetpriority_thread(struct thread *td)
621 if (td->td_priority < PRI_MIN_TIMESHARE ||
622 td->td_priority > PRI_MAX_TIMESHARE)
626 maybe_resched(td);
628 sched_prio(td, td->td_user_pri);
708 sched_clock(struct thread *td)
713 THREAD_LOCK_ASSERT(td, MA_OWNED);
714 ts = td_get_sched(td);
719 resetpriority(td);
720 resetpriority_thread(td);
727 if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
729 td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
741 sched_exit(struct proc *p, struct thread *td)
744 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "proc exit",
745 "prio:%d", td->td_priority);
748 sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
752 sched_exit_thread(struct thread *td, struct thread *child)
757 thread_lock(td);
758 td_get_sched(td)->ts_estcpu = ESTCPULIM(td_get_sched(td)->ts_estcpu +
760 thread_unlock(td);
768 sched_fork(struct thread *td, struct thread *childtd)
770 sched_fork_thread(td, childtd);
774 sched_fork_thread(struct thread *td, struct thread *childtd)
781 childtd->td_cpuset = cpuset_ref(td->td_cpuset);
785 tsc = td_get_sched(td);
794 struct thread *td;
798 FOREACH_THREAD_IN_PROC(p, td) {
799 thread_lock(td);
800 resetpriority(td);
801 resetpriority_thread(td);
802 thread_unlock(td);
807 sched_class(struct thread *td, int class)
809 THREAD_LOCK_ASSERT(td, MA_OWNED);
810 td->td_pri_class = class;
817 sched_priority(struct thread *td, u_char prio)
821 KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "priority change",
822 "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
824 SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
825 if (td != curthread && prio > td->td_priority) {
827 "lend prio", "prio:%d", td->td_priority, "new prio:%d",
828 prio, KTR_ATTR_LINKED, sched_tdname(td));
829 SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
832 THREAD_LOCK_ASSERT(td, MA_OWNED);
833 if (td->td_priority == prio)
835 td->td_priority = prio;
836 if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
837 sched_rem(td);
838 sched_add(td, SRQ_BORING);
847 sched_lend_prio(struct thread *td, u_char prio)
850 td->td_flags |= TDF_BORROWING;
851 sched_priority(td, prio);
863 sched_unlend_prio(struct thread *td, u_char prio)
867 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
868 td->td_base_pri <= PRI_MAX_TIMESHARE)
869 base_pri = td->td_user_pri;
871 base_pri = td->td_base_pri;
873 td->td_flags &= ~TDF_BORROWING;
874 sched_prio(td, base_pri);
876 sched_lend_prio(td, prio);
880 sched_prio(struct thread *td, u_char prio)
885 td->td_base_pri = prio;
891 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
895 oldprio = td->td_priority;
896 sched_priority(td, prio);
902 if (TD_ON_LOCK(td) && oldprio != prio)
903 turnstile_adjust(td, oldprio);
907 sched_user_prio(struct thread *td, u_char prio)
910 THREAD_LOCK_ASSERT(td, MA_OWNED);
911 td->td_base_user_pri = prio;
912 if (td->td_lend_user_pri <= prio)
914 td->td_user_pri = prio;
918 sched_lend_user_prio(struct thread *td, u_char prio)
921 THREAD_LOCK_ASSERT(td, MA_OWNED);
922 td->td_lend_user_pri = prio;
923 td->td_user_pri = min(prio, td->td_base_user_pri);
924 if (td->td_priority > td->td_user_pri)
925 sched_prio(td, td->td_user_pri);
926 else if (td->td_priority != td->td_user_pri)
927 td->td_flags |= TDF_NEEDRESCHED;
931 sched_sleep(struct thread *td, int pri)
934 THREAD_LOCK_ASSERT(td, MA_OWNED);
935 td->td_slptick = ticks;
936 td_get_sched(td)->ts_slptime = 0;
937 if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
938 sched_prio(td, pri);
939 if (TD_IS_SUSPENDED(td) || pri >= PSOCK)
940 td->td_flags |= TDF_CANSWAP;
944 sched_switch(struct thread *td, struct thread *newtd, int flags)
952 ts = td_get_sched(td);
953 p = td->td_proc;
955 THREAD_LOCK_ASSERT(td, MA_OWNED);
962 if (td->td_lock != &sched_lock) {
964 tmtx = thread_lock_block(td);
967 if ((td->td_flags & TDF_NOLOAD) == 0)
970 td->td_lastcpu = td->td_oncpu;
971 preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
973 td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
974 td->td_owepreempt = 0;
975 td->td_oncpu = NOCPU;
983 if (td->td_flags & TDF_IDLETD) {
984 TD_SET_CAN_RUN(td);
989 if (TD_IS_RUNNING(td)) {
991 sched_add(td, preempted ?
1017 if (TD_IS_IDLETHREAD(td))
1018 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
1019 "prio:%d", td->td_priority);
1021 KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
1022 "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
1023 "lockname:\"%s\"", td->td_lockname);
1026 if (td != newtd) {
1028 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1029 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1046 cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
1068 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1069 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1074 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
1075 "prio:%d", td->td_priority);
1078 if (td->td_flags & TDF_IDLETD)
1081 sched_lock.mtx_lock = (uintptr_t)td;
1082 td->td_oncpu = PCPU_GET(cpuid);
1083 MPASS(td->td_lock == &sched_lock);
1087 sched_wakeup(struct thread *td)
1091 THREAD_LOCK_ASSERT(td, MA_OWNED);
1092 ts = td_get_sched(td);
1093 td->td_flags &= ~TDF_CANSWAP;
1095 updatepri(td);
1096 resetpriority(td);
1098 td->td_slptick = 0;
1101 sched_add(td, SRQ_BORING);
1228 sched_pickcpu(struct thread *td)
1234 if (td->td_lastcpu != NOCPU && THREAD_CAN_SCHED(td, td->td_lastcpu))
1235 best = td->td_lastcpu;
1239 if (!THREAD_CAN_SCHED(td, cpu))
1254 sched_add(struct thread *td, int flags)
1263 ts = td_get_sched(td);
1264 THREAD_LOCK_ASSERT(td, MA_OWNED);
1265 KASSERT((td->td_inhibitors == 0),
1267 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1269 KASSERT(td->td_flags & TDF_INMEM,
1272 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1273 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1276 KTR_ATTR_LINKED, sched_tdname(td));
1277 SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1285 if (td->td_lock != &sched_lock) {
1287 thread_lock_set(td, &sched_lock);
1289 TD_SET_RUNQ(td);
1300 if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND ||
1302 if (td->td_pinned != 0)
1303 cpu = td->td_lastcpu;
1304 else if (td->td_flags & TDF_BOUND) {
1311 cpu = sched_pickcpu(td);
1315 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1319 "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
1320 td);
1325 if ((td->td_flags & TDF_NOLOAD) == 0)
1327 runq_add(ts->ts_runq, td, flags);
1333 kick_other_cpu(td->td_priority, cpu);
1347 if (!maybe_preempt(td))
1348 maybe_resched(td);
1356 ts = td_get_sched(td);
1357 THREAD_LOCK_ASSERT(td, MA_OWNED);
1358 KASSERT((td->td_inhibitors == 0),
1360 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1362 KASSERT(td->td_flags & TDF_INMEM,
1364 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
1365 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1368 KTR_ATTR_LINKED, sched_tdname(td));
1369 SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1376 if (td->td_lock != &sched_lock) {
1378 thread_lock_set(td, &sched_lock);
1380 TD_SET_RUNQ(td);
1381 CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1384 if ((td->td_flags & TDF_NOLOAD) == 0)
1386 runq_add(ts->ts_runq, td, flags);
1387 if (!maybe_preempt(td))
1388 maybe_resched(td);
1393 sched_rem(struct thread *td)
1397 ts = td_get_sched(td);
1398 KASSERT(td->td_flags & TDF_INMEM,
1400 KASSERT(TD_ON_RUNQ(td),
1403 KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
1404 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1406 SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
1408 if ((td->td_flags & TDF_NOLOAD) == 0)
1414 runq_remove(ts->ts_runq, td);
1415 TD_SET_CAN_RUN(td);
1425 struct thread *td;
1433 td = runq_choose_fuzz(&runq, runq_fuzz);
1436 if (td == NULL ||
1438 tdcpu->td_priority < td->td_priority)) {
1439 CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
1441 td = tdcpu;
1444 CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
1449 td = runq_choose(&runq);
1452 if (td) {
1454 if (td == tdcpu)
1457 runq_remove(rq, td);
1458 td->td_flags |= TDF_DIDRUN;
1460 KASSERT(td->td_flags & TDF_INMEM,
1462 return (td);
1468 sched_preempt(struct thread *td)
1471 SDT_PROBE2(sched, , , surrender, td, td->td_proc);
1472 thread_lock(td);
1473 if (td->td_critnest > 1)
1474 td->td_owepreempt = 1;
1477 thread_unlock(td);
1481 sched_userret(struct thread *td)
1492 KASSERT((td->td_flags & TDF_BORROWING) == 0,
1494 if (td->td_priority != td->td_user_pri) {
1495 thread_lock(td);
1496 td->td_priority = td->td_user_pri;
1497 td->td_base_pri = td->td_user_pri;
1498 thread_unlock(td);
1503 sched_bind(struct thread *td, int cpu)
1507 THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
1508 KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
1510 ts = td_get_sched(td);
1512 td->td_flags |= TDF_BOUND;
1523 sched_unbind(struct thread* td)
1525 THREAD_LOCK_ASSERT(td, MA_OWNED);
1526 KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
1527 td->td_flags &= ~TDF_BOUND;
1531 sched_is_bound(struct thread *td)
1533 THREAD_LOCK_ASSERT(td, MA_OWNED);
1534 return (td->td_flags & TDF_BOUND);
1538 sched_relinquish(struct thread *td)
1540 thread_lock(td);
1542 thread_unlock(td);
1564 sched_pctcpu(struct thread *td)
1568 THREAD_LOCK_ASSERT(td, MA_OWNED);
1569 ts = td_get_sched(td);
1579 sched_pctcpu_delta(struct thread *td)
1585 THREAD_LOCK_ASSERT(td, MA_OWNED);
1586 ts = td_get_sched(td);
1608 sched_estcpu(struct thread *td)
1611 return (td_get_sched(td)->ts_estcpu);
1642 sched_throw(struct thread *td)
1653 if (td == NULL) {
1660 MPASS(td->td_lock == &sched_lock);
1661 td->td_lastcpu = td->td_oncpu;
1662 td->td_oncpu = NOCPU;
1666 cpu_throw(td, choosethread()); /* doesn't return */
1670 sched_fork_exit(struct thread *td)
1677 td->td_oncpu = PCPU_GET(cpuid);
1678 sched_lock.mtx_lock = (uintptr_t)td;
1681 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
1683 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
1684 "prio:%d", td->td_priority);
1689 sched_tdname(struct thread *td)
1694 ts = td_get_sched(td);
1697 "%s tid %d", td->td_name, td->td_tid);
1700 return (td->td_name);
1706 sched_clear_tdname(struct thread *td)
1710 ts = td_get_sched(td);
1716 sched_affinity(struct thread *td)
1722 THREAD_LOCK_ASSERT(td, MA_OWNED);
1728 ts = td_get_sched(td);
1731 if (!THREAD_CAN_SCHED(td, cpu)) {
1744 if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
1747 switch (td->td_state) {
1754 THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
1758 sched_rem(td);
1759 sched_add(td, SRQ_BORING);
1766 if (THREAD_CAN_SCHED(td, td->td_oncpu))
1769 td->td_flags |= TDF_NEEDRESCHED;
1770 if (td != curthread)