Deleted Added
full compact
sched_4bsd.c (177426) sched_4bsd.c (177435)
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 19 unchanged lines hidden (view full) ---

28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 19 unchanged lines hidden (view full) ---

28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 177426 2008-03-20 03:06:33Z jeff $");
36__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 177435 2008-03-20 05:51:16Z jeff $");
37
38#include "opt_hwpmc_hooks.h"
39#include "opt_sched.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/cpuset.h>
44#include <sys/kernel.h>

--- 31 unchanged lines hidden (view full) ---

76#define NICE_WEIGHT 1 /* Priorities per nice level. */
77
78/*
79 * The schedulable entity that runs a context.
80 * This is an extension to the thread structure and is tailored to
81 * the requirements of this scheduler
82 */
83struct td_sched {
37
38#include "opt_hwpmc_hooks.h"
39#include "opt_sched.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/cpuset.h>
44#include <sys/kernel.h>

--- 31 unchanged lines hidden (view full) ---

76#define NICE_WEIGHT 1 /* Priorities per nice level. */
77
78/*
79 * The schedulable entity that runs a context.
80 * This is an extension to the thread structure and is tailored to
81 * the requirements of this scheduler
82 */
83struct td_sched {
84 TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */
85 struct thread *ts_thread; /* (*) Active associated thread. */
86 fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
84 fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
87 u_char ts_rqindex; /* (j) Run queue index. */
88 int ts_cpticks; /* (j) Ticks of cpu time. */
89 int ts_slptime; /* (j) Seconds !RUNNING. */
90 struct runq *ts_runq; /* runq the thread is currently on */
91};
92
93/* flags kept in td_flags */
94#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */
85 int ts_cpticks; /* (j) Ticks of cpu time. */
86 int ts_slptime; /* (j) Seconds !RUNNING. */
87 struct runq *ts_runq; /* runq the thread is currently on */
88};
89
90/* flags kept in td_flags */
91#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */
95#define TDF_EXIT TDF_SCHED1 /* thread is being killed. */
96#define TDF_BOUND TDF_SCHED2
92#define TDF_BOUND TDF_SCHED1 /* Bound to one CPU. */
97
93
98#define ts_flags ts_thread->td_flags
99#define TSF_DIDRUN TDF_DIDRUN /* thread actually ran. */
100#define TSF_EXIT TDF_EXIT /* thread is being killed. */
101#define TSF_BOUND TDF_BOUND /* stuck to one CPU */
102
103#define SKE_RUNQ_PCPU(ts) \
104 ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
105
106static struct td_sched td_sched0;
107struct mtx sched_lock;
108
109static int sched_tdcnt; /* Total runnable threads in the system. */
110static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */

--- 183 unchanged lines hidden (view full) ---

294 *
295 * If all of these conditions are false, but the current thread is in
296 * a nested critical section, then we have to defer the preemption
297 * until we exit the critical section. Otherwise, switch immediately
298 * to the new thread.
299 */
300 ctd = curthread;
301 THREAD_LOCK_ASSERT(td, MA_OWNED);
94#define SKE_RUNQ_PCPU(ts) \
95 ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
96
97static struct td_sched td_sched0;
98struct mtx sched_lock;
99
100static int sched_tdcnt; /* Total runnable threads in the system. */
101static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */

--- 183 unchanged lines hidden (view full) ---

285 *
286 * If all of these conditions are false, but the current thread is in
287 * a nested critical section, then we have to defer the preemption
288 * until we exit the critical section. Otherwise, switch immediately
289 * to the new thread.
290 */
291 ctd = curthread;
292 THREAD_LOCK_ASSERT(td, MA_OWNED);
302 KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
303 ("thread has no (or wrong) sched-private part."));
304 KASSERT((td->td_inhibitors == 0),
305 ("maybe_preempt: trying to run inhibited thread"));
306 pri = td->td_priority;
307 cpri = ctd->td_priority;
308 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
309 TD_IS_INHIBITED(ctd))
310 return (0);
311#ifndef FULL_PREEMPTION

--- 145 unchanged lines hidden (view full) ---

457 */
458 /*
459 * The td_sched slptimes are not touched in wakeup
460 * because the thread may not HAVE everything in
461 * memory? XXX I think this is out of date.
462 */
463 if (TD_ON_RUNQ(td)) {
464 awake = 1;
293 KASSERT((td->td_inhibitors == 0),
294 ("maybe_preempt: trying to run inhibited thread"));
295 pri = td->td_priority;
296 cpri = ctd->td_priority;
297 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
298 TD_IS_INHIBITED(ctd))
299 return (0);
300#ifndef FULL_PREEMPTION

--- 145 unchanged lines hidden (view full) ---

446 */
447 /*
448 * The td_sched slptimes are not touched in wakeup
449 * because the thread may not HAVE everything in
450 * memory? XXX I think this is out of date.
451 */
452 if (TD_ON_RUNQ(td)) {
453 awake = 1;
465 ts->ts_flags &= ~TSF_DIDRUN;
454 td->td_flags &= ~TDF_DIDRUN;
466 } else if (TD_IS_RUNNING(td)) {
467 awake = 1;
455 } else if (TD_IS_RUNNING(td)) {
456 awake = 1;
468 /* Do not clear TSF_DIDRUN */
469 } else if (ts->ts_flags & TSF_DIDRUN) {
457 /* Do not clear TDF_DIDRUN */
458 } else if (td->td_flags & TDF_DIDRUN) {
470 awake = 1;
459 awake = 1;
471 ts->ts_flags &= ~TSF_DIDRUN;
460 td->td_flags &= ~TDF_DIDRUN;
472 }
473
474 /*
475 * ts_pctcpu is only for ps and ttyinfo().
476 */
477 ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
478 /*
479 * If the td_sched has been idle the entire second,

--- 151 unchanged lines hidden (view full) ---

631schedinit(void)
632{
633 /*
634 * Set up the scheduler specific parts of proc0.
635 */
636 proc0.p_sched = NULL; /* XXX */
637 thread0.td_sched = &td_sched0;
638 thread0.td_lock = &sched_lock;
461 }
462
463 /*
464 * ts_pctcpu is only for ps and ttyinfo().
465 */
466 ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
467 /*
468 * If the td_sched has been idle the entire second,

--- 151 unchanged lines hidden (view full) ---

620schedinit(void)
621{
622 /*
623 * Set up the scheduler specific parts of proc0.
624 */
625 proc0.p_sched = NULL; /* XXX */
626 thread0.td_sched = &td_sched0;
627 thread0.td_lock = &sched_lock;
639 td_sched0.ts_thread = &thread0;
640 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
641}
642
643int
644sched_runnable(void)
645{
646#ifdef SMP
647 return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);

--- 87 unchanged lines hidden (view full) ---

735{
736 struct td_sched *ts;
737
738 childtd->td_estcpu = td->td_estcpu;
739 childtd->td_lock = &sched_lock;
740 childtd->td_cpuset = cpuset_ref(td->td_cpuset);
741 ts = childtd->td_sched;
742 bzero(ts, sizeof(*ts));
628 mtx_init(&sched_lock, "sched lock", NULL, MTX_SPIN | MTX_RECURSE);
629}
630
631int
632sched_runnable(void)
633{
634#ifdef SMP
635 return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);

--- 87 unchanged lines hidden (view full) ---

723{
724 struct td_sched *ts;
725
726 childtd->td_estcpu = td->td_estcpu;
727 childtd->td_lock = &sched_lock;
728 childtd->td_cpuset = cpuset_ref(td->td_cpuset);
729 ts = childtd->td_sched;
730 bzero(ts, sizeof(*ts));
743 ts->ts_thread = childtd;
744}
745
746void
747sched_nice(struct proc *p, int nice)
748{
749 struct thread *td;
750
751 PROC_LOCK_ASSERT(p, MA_OWNED);

--- 22 unchanged lines hidden (view full) ---

774 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
775 td, td->td_name, td->td_priority, prio, curthread,
776 curthread->td_name);
777
778 THREAD_LOCK_ASSERT(td, MA_OWNED);
779 if (td->td_priority == prio)
780 return;
781 td->td_priority = prio;
731}
732
733void
734sched_nice(struct proc *p, int nice)
735{
736 struct thread *td;
737
738 PROC_LOCK_ASSERT(p, MA_OWNED);

--- 22 unchanged lines hidden (view full) ---

761 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
762 td, td->td_name, td->td_priority, prio, curthread,
763 curthread->td_name);
764
765 THREAD_LOCK_ASSERT(td, MA_OWNED);
766 if (td->td_priority == prio)
767 return;
768 td->td_priority = prio;
782 if (TD_ON_RUNQ(td) &&
783 td->td_sched->ts_rqindex != (prio / RQ_PPQ)) {
769 if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
784 sched_rem(td);
785 sched_add(td, SRQ_BORING);
786 }
787}
788
789/*
790 * Update a thread's priority when it is lent another thread's
791 * priority.

--- 164 unchanged lines hidden (view full) ---

956 * as if it had been added to the run queue and selected.
957 * It came from:
958 * * A preemption
959 * * An upcall
960 * * A followon
961 */
962 KASSERT((newtd->td_inhibitors == 0),
963 ("trying to run inhibited thread"));
770 sched_rem(td);
771 sched_add(td, SRQ_BORING);
772 }
773}
774
775/*
776 * Update a thread's priority when it is lent another thread's
777 * priority.

--- 164 unchanged lines hidden (view full) ---

942 * as if it had been added to the run queue and selected.
943 * It came from:
944 * * A preemption
945 * * An upcall
946 * * A followon
947 */
948 KASSERT((newtd->td_inhibitors == 0),
949 ("trying to run inhibited thread"));
964 newtd->td_sched->ts_flags |= TSF_DIDRUN;
950 newtd->td_flags |= TDF_DIDRUN;
965 TD_SET_RUNNING(newtd);
966 if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
967 sched_load_add();
968 } else {
969 newtd = choosethread();
970 }
971 MPASS(newtd->td_lock == &sched_lock);
972

--- 208 unchanged lines hidden (view full) ---

1181 TD_SET_RUNQ(td);
1182
1183 if (td->td_pinned != 0) {
1184 cpu = td->td_lastcpu;
1185 ts->ts_runq = &runq_pcpu[cpu];
1186 single_cpu = 1;
1187 CTR3(KTR_RUNQ,
1188 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
951 TD_SET_RUNNING(newtd);
952 if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
953 sched_load_add();
954 } else {
955 newtd = choosethread();
956 }
957 MPASS(newtd->td_lock == &sched_lock);
958

--- 208 unchanged lines hidden (view full) ---

1167 TD_SET_RUNQ(td);
1168
1169 if (td->td_pinned != 0) {
1170 cpu = td->td_lastcpu;
1171 ts->ts_runq = &runq_pcpu[cpu];
1172 single_cpu = 1;
1173 CTR3(KTR_RUNQ,
1174 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
1189 } else if ((ts)->ts_flags & TSF_BOUND) {
1175 } else if ((td)->td_flags & TDF_BOUND) {
1190 /* Find CPU from bound runq */
1191 KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq"));
1192 cpu = ts->ts_runq - &runq_pcpu[0];
1193 single_cpu = 1;
1194 CTR3(KTR_RUNQ,
1195 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
1196 } else {
1197 CTR2(KTR_RUNQ,

--- 20 unchanged lines hidden (view full) ---

1218 return;
1219 else
1220 maybe_resched(td);
1221 }
1222 }
1223
1224 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1225 sched_load_add();
1176 /* Find CPU from bound runq */
1177 KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq"));
1178 cpu = ts->ts_runq - &runq_pcpu[0];
1179 single_cpu = 1;
1180 CTR3(KTR_RUNQ,
1181 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
1182 } else {
1183 CTR2(KTR_RUNQ,

--- 20 unchanged lines hidden (view full) ---

1204 return;
1205 else
1206 maybe_resched(td);
1207 }
1208 }
1209
1210 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1211 sched_load_add();
1226 runq_add(ts->ts_runq, ts, flags);
1212 runq_add(ts->ts_runq, td, flags);
1227}
1228#else /* SMP */
1229{
1230 struct td_sched *ts;
1231 ts = td->td_sched;
1232 THREAD_LOCK_ASSERT(td, MA_OWNED);
1233 KASSERT((td->td_inhibitors == 0),
1234 ("sched_add: trying to run inhibited thread"));

--- 28 unchanged lines hidden (view full) ---

1263 * which also only happens when we are about to yield.
1264 */
1265 if((flags & SRQ_YIELDING) == 0) {
1266 if (maybe_preempt(td))
1267 return;
1268 }
1269 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1270 sched_load_add();
1213}
1214#else /* SMP */
1215{
1216 struct td_sched *ts;
1217 ts = td->td_sched;
1218 THREAD_LOCK_ASSERT(td, MA_OWNED);
1219 KASSERT((td->td_inhibitors == 0),
1220 ("sched_add: trying to run inhibited thread"));

--- 28 unchanged lines hidden (view full) ---

1249 * which also only happens when we are about to yield.
1250 */
1251 if((flags & SRQ_YIELDING) == 0) {
1252 if (maybe_preempt(td))
1253 return;
1254 }
1255 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1256 sched_load_add();
1271 runq_add(ts->ts_runq, ts, flags);
1257 runq_add(ts->ts_runq, td, flags);
1272 maybe_resched(td);
1273}
1274#endif /* SMP */
1275
1276void
1277sched_rem(struct thread *td)
1278{
1279 struct td_sched *ts;

--- 5 unchanged lines hidden (view full) ---

1285 ("sched_rem: thread not on run queue"));
1286 mtx_assert(&sched_lock, MA_OWNED);
1287 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1288 td, td->td_name, td->td_priority, curthread,
1289 curthread->td_name);
1290
1291 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1292 sched_load_rem();
1258 maybe_resched(td);
1259}
1260#endif /* SMP */
1261
1262void
1263sched_rem(struct thread *td)
1264{
1265 struct td_sched *ts;

--- 5 unchanged lines hidden (view full) ---

1271 ("sched_rem: thread not on run queue"));
1272 mtx_assert(&sched_lock, MA_OWNED);
1273 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1274 td, td->td_name, td->td_priority, curthread,
1275 curthread->td_name);
1276
1277 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1278 sched_load_rem();
1293 runq_remove(ts->ts_runq, ts);
1279 runq_remove(ts->ts_runq, td);
1294 TD_SET_CAN_RUN(td);
1295}
1296
1297/*
1298 * Select threads to run.
1299 * Notice that the running threads still consume a slot.
1300 */
1301struct thread *
1302sched_choose(void)
1303{
1280 TD_SET_CAN_RUN(td);
1281}
1282
1283/*
1284 * Select threads to run.
1285 * Notice that the running threads still consume a slot.
1286 */
1287struct thread *
1288sched_choose(void)
1289{
1304 struct td_sched *ts;
1290 struct thread *td;
1305 struct runq *rq;
1306
1307 mtx_assert(&sched_lock, MA_OWNED);
1308#ifdef SMP
1291 struct runq *rq;
1292
1293 mtx_assert(&sched_lock, MA_OWNED);
1294#ifdef SMP
1309 struct td_sched *kecpu;
1295 struct thread *tdcpu;
1310
1311 rq = &runq;
1296
1297 rq = &runq;
1312 ts = runq_choose_fuzz(&runq, runq_fuzz);
1313 kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1298 td = runq_choose_fuzz(&runq, runq_fuzz);
1299 tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1314
1300
1315 if (ts == NULL ||
1316 (kecpu != NULL &&
1317 kecpu->ts_thread->td_priority < ts->ts_thread->td_priority)) {
1318 CTR2(KTR_RUNQ, "choosing td_sched %p from pcpu runq %d", kecpu,
1301 if (td == NULL ||
1302 (tdcpu != NULL &&
1303 tdcpu->td_priority < td->td_priority)) {
1304 CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
1319 PCPU_GET(cpuid));
1305 PCPU_GET(cpuid));
1320 ts = kecpu;
1306 td = tdcpu;
1321 rq = &runq_pcpu[PCPU_GET(cpuid)];
1322 } else {
1307 rq = &runq_pcpu[PCPU_GET(cpuid)];
1308 } else {
1323 CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", ts);
1309 CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
1324 }
1325
1326#else
1327 rq = &runq;
1310 }
1311
1312#else
1313 rq = &runq;
1328 ts = runq_choose(&runq);
1314 td = runq_choose(&runq);
1329#endif
1330
1315#endif
1316
1331 if (ts) {
1332 runq_remove(rq, ts);
1333 ts->ts_flags |= TSF_DIDRUN;
1317 if (td) {
1318 runq_remove(rq, td);
1319 td->td_flags |= TDF_DIDRUN;
1334
1320
1335 KASSERT(ts->ts_thread->td_flags & TDF_INMEM,
1321 KASSERT(td->td_flags & TDF_INMEM,
1336 ("sched_choose: thread swapped out"));
1322 ("sched_choose: thread swapped out"));
1337 return (ts->ts_thread);
1323 return (td);
1338 }
1339 return (PCPU_GET(idlethread));
1340}
1341
1342void
1343sched_preempt(struct thread *td)
1344{
1345 thread_lock(td);

--- 32 unchanged lines hidden (view full) ---

1378 struct td_sched *ts;
1379
1380 THREAD_LOCK_ASSERT(td, MA_OWNED);
1381 KASSERT(TD_IS_RUNNING(td),
1382 ("sched_bind: cannot bind non-running thread"));
1383
1384 ts = td->td_sched;
1385
1324 }
1325 return (PCPU_GET(idlethread));
1326}
1327
1328void
1329sched_preempt(struct thread *td)
1330{
1331 thread_lock(td);

--- 32 unchanged lines hidden (view full) ---

1364 struct td_sched *ts;
1365
1366 THREAD_LOCK_ASSERT(td, MA_OWNED);
1367 KASSERT(TD_IS_RUNNING(td),
1368 ("sched_bind: cannot bind non-running thread"));
1369
1370 ts = td->td_sched;
1371
1386 ts->ts_flags |= TSF_BOUND;
1372 td->td_flags |= TDF_BOUND;
1387#ifdef SMP
1388 ts->ts_runq = &runq_pcpu[cpu];
1389 if (PCPU_GET(cpuid) == cpu)
1390 return;
1391
1392 mi_switch(SW_VOL, NULL);
1393#endif
1394}
1395
1396void
1397sched_unbind(struct thread* td)
1398{
1399 THREAD_LOCK_ASSERT(td, MA_OWNED);
1373#ifdef SMP
1374 ts->ts_runq = &runq_pcpu[cpu];
1375 if (PCPU_GET(cpuid) == cpu)
1376 return;
1377
1378 mi_switch(SW_VOL, NULL);
1379#endif
1380}
1381
1382void
1383sched_unbind(struct thread* td)
1384{
1385 THREAD_LOCK_ASSERT(td, MA_OWNED);
1400 td->td_sched->ts_flags &= ~TSF_BOUND;
1386 td->td_flags &= ~TDF_BOUND;
1401}
1402
1403int
1404sched_is_bound(struct thread *td)
1405{
1406 THREAD_LOCK_ASSERT(td, MA_OWNED);
1387}
1388
1389int
1390sched_is_bound(struct thread *td)
1391{
1392 THREAD_LOCK_ASSERT(td, MA_OWNED);
1407 return (td->td_sched->ts_flags & TSF_BOUND);
1393 return (td->td_flags & TDF_BOUND);
1408}
1409
1410void
1411sched_relinquish(struct thread *td)
1412{
1413 thread_lock(td);
1414 SCHED_STAT_INC(switch_relinquish);
1415 mi_switch(SW_VOL, NULL);

--- 94 unchanged lines hidden (view full) ---

1510 0, 0, __FILE__, __LINE__);
1511 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
1512}
1513
1514void
1515sched_affinity(struct thread *td)
1516{
1517}
1394}
1395
1396void
1397sched_relinquish(struct thread *td)
1398{
1399 thread_lock(td);
1400 SCHED_STAT_INC(switch_relinquish);
1401 mi_switch(SW_VOL, NULL);

--- 94 unchanged lines hidden (view full) ---

1496 0, 0, __FILE__, __LINE__);
1497 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
1498}
1499
1500void
1501sched_affinity(struct thread *td)
1502{
1503}
1518
1519#define KERN_SWITCH_INCLUDE 1
1520#include "kern/kern_switch.c"