Deleted Added
full compact
sched_4bsd.c (165693) sched_4bsd.c (166188)
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 19 unchanged lines hidden (view full) ---

28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.

--- 19 unchanged lines hidden (view full) ---

28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 165693 2006-12-31 15:56:04Z rwatson $");
36__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 166188 2007-01-23 08:46:51Z jeff $");
37
38#include "opt_hwpmc_hooks.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/ktr.h>
44#include <sys/lock.h>

--- 33 unchanged lines hidden (view full) ---

78 * This is an extension to the thread structure and is tailored to
79 * the requirements of this scheduler
80 */
81struct td_sched {
82 TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */
83 struct thread *ts_thread; /* (*) Active associated thread. */
84 fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
85 u_char ts_rqindex; /* (j) Run queue index. */
37
38#include "opt_hwpmc_hooks.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/ktr.h>
44#include <sys/lock.h>

--- 33 unchanged lines hidden (view full) ---

78 * This is an extension to the thread structure and is tailored to
79 * the requirements of this scheduler
80 */
81struct td_sched {
82 TAILQ_ENTRY(td_sched) ts_procq; /* (j/z) Run queue. */
83 struct thread *ts_thread; /* (*) Active associated thread. */
84 fixpt_t ts_pctcpu; /* (j) %cpu during p_swtime. */
85 u_char ts_rqindex; /* (j) Run queue index. */
86 enum {
87 TSS_THREAD = 0x0, /* slaved to thread state */
88 TSS_ONRUNQ
89 } ts_state; /* (j) TD_STAT in scheduler status. */
90 int ts_cpticks; /* (j) Ticks of cpu time. */
91 struct runq *ts_runq; /* runq the thread is currently on */
92};
93
94/* flags kept in td_flags */
95#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */
96#define TDF_EXIT TDF_SCHED1 /* thread is being killed. */
97#define TDF_BOUND TDF_SCHED2

--- 9 unchanged lines hidden (view full) ---

107static struct td_sched td_sched0;
108
109static int sched_tdcnt; /* Total runnable threads in the system. */
110static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
111#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */
112
113static struct callout roundrobin_callout;
114
86 int ts_cpticks; /* (j) Ticks of cpu time. */
87 struct runq *ts_runq; /* runq the thread is currently on */
88};
89
90/* flags kept in td_flags */
91#define TDF_DIDRUN TDF_SCHED0 /* thread actually ran. */
92#define TDF_EXIT TDF_SCHED1 /* thread is being killed. */
93#define TDF_BOUND TDF_SCHED2

--- 9 unchanged lines hidden (view full) ---

103static struct td_sched td_sched0;
104
105static int sched_tdcnt; /* Total runnable threads in the system. */
106static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
107#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */
108
109static struct callout roundrobin_callout;
110
115static struct td_sched *sched_choose(void);
116
117static void setup_runqs(void);
118static void roundrobin(void *arg);
119static void schedcpu(void);
120static void schedcpu_thread(void);
121static void sched_priority(struct thread *td, u_char prio);
122static void sched_setup(void *dummy);
123static void maybe_resched(struct thread *td);
124static void updatepri(struct thread *td);

--- 274 unchanged lines hidden (view full) ---

399 * Increment sleep time (if sleeping). We
400 * ignore overflow, as above.
401 */
402 /*
403 * The td_sched slptimes are not touched in wakeup
404 * because the thread may not HAVE everything in
405 * memory? XXX I think this is out of date.
406 */
111static void setup_runqs(void);
112static void roundrobin(void *arg);
113static void schedcpu(void);
114static void schedcpu_thread(void);
115static void sched_priority(struct thread *td, u_char prio);
116static void sched_setup(void *dummy);
117static void maybe_resched(struct thread *td);
118static void updatepri(struct thread *td);

--- 274 unchanged lines hidden (view full) ---

393 * Increment sleep time (if sleeping). We
394 * ignore overflow, as above.
395 */
396 /*
397 * The td_sched slptimes are not touched in wakeup
398 * because the thread may not HAVE everything in
399 * memory? XXX I think this is out of date.
400 */
407 if (ts->ts_state == TSS_ONRUNQ) {
401 if (TD_ON_RUNQ(td)) {
408 awake = 1;
409 ts->ts_flags &= ~TSF_DIDRUN;
402 awake = 1;
403 ts->ts_flags &= ~TSF_DIDRUN;
410 } else if ((ts->ts_state == TSS_THREAD) &&
411 (TD_IS_RUNNING(td))) {
404 } else if (TD_IS_RUNNING(td)) {
412 awake = 1;
413 /* Do not clear TSF_DIDRUN */
414 } else if (ts->ts_flags & TSF_DIDRUN) {
415 awake = 1;
416 ts->ts_flags &= ~TSF_DIDRUN;
417 }
418
419 /*

--- 159 unchanged lines hidden (view full) ---

579schedinit(void)
580{
581 /*
582 * Set up the scheduler specific parts of proc0.
583 */
584 proc0.p_sched = NULL; /* XXX */
585 thread0.td_sched = &td_sched0;
586 td_sched0.ts_thread = &thread0;
405 awake = 1;
406 /* Do not clear TSF_DIDRUN */
407 } else if (ts->ts_flags & TSF_DIDRUN) {
408 awake = 1;
409 ts->ts_flags &= ~TSF_DIDRUN;
410 }
411
412 /*

--- 159 unchanged lines hidden (view full) ---

572schedinit(void)
573{
574 /*
575 * Set up the scheduler specific parts of proc0.
576 */
577 proc0.p_sched = NULL; /* XXX */
578 thread0.td_sched = &td_sched0;
579 td_sched0.ts_thread = &thread0;
587 td_sched0.ts_state = TSS_THREAD;
588}
589
590int
591sched_runnable(void)
592{
593#ifdef SMP
594 return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
595#else

--- 108 unchanged lines hidden (view full) ---

704{
705 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
706 td, td->td_proc->p_comm, td->td_priority, prio, curthread,
707 curthread->td_proc->p_comm);
708
709 mtx_assert(&sched_lock, MA_OWNED);
710 if (td->td_priority == prio)
711 return;
580}
581
582int
583sched_runnable(void)
584{
585#ifdef SMP
586 return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
587#else

--- 108 unchanged lines hidden (view full) ---

696{
697 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
698 td, td->td_proc->p_comm, td->td_priority, prio, curthread,
699 curthread->td_proc->p_comm);
700
701 mtx_assert(&sched_lock, MA_OWNED);
702 if (td->td_priority == prio)
703 return;
712 if (TD_ON_RUNQ(td)) {
713 adjustrunqueue(td, prio);
714 } else {
715 td->td_priority = prio;
704 td->td_priority = prio;
705 if (TD_ON_RUNQ(td) &&
706 td->td_sched->ts_rqindex != (prio / RQ_PPQ)) {
707 sched_rem(td);
708 sched_add(td, SRQ_BORING);
716 }
717}
718
719/*
720 * Update a thread's priority when it is lent another thread's
721 * priority.
722 */
723void

--- 149 unchanged lines hidden (view full) ---

873 * or stopped or any thing else similar. We never put the idle
874 * threads on the run queue, however.
875 */
876 if (td == PCPU_GET(idlethread))
877 TD_SET_CAN_RUN(td);
878 else {
879 if (TD_IS_RUNNING(td)) {
880 /* Put us back on the run queue. */
709 }
710}
711
712/*
713 * Update a thread's priority when it is lent another thread's
714 * priority.
715 */
716void

--- 149 unchanged lines hidden (view full) ---

866 * or stopped or any thing else similar. We never put the idle
867 * threads on the run queue, however.
868 */
869 if (td == PCPU_GET(idlethread))
870 TD_SET_CAN_RUN(td);
871 else {
872 if (TD_IS_RUNNING(td)) {
873 /* Put us back on the run queue. */
881 setrunqueue(td, (flags & SW_PREEMPT) ?
874 sched_add(td, (flags & SW_PREEMPT) ?
882 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
883 SRQ_OURSELF|SRQ_YIELDING);
884 }
885 }
886 if (newtd) {
887 /*
888 * The thread we are about to run needs to be counted
889 * as if it had been added to the run queue and selected.

--- 33 unchanged lines hidden (view full) ---

923sched_wakeup(struct thread *td)
924{
925 mtx_assert(&sched_lock, MA_OWNED);
926 if (td->td_slptime > 1) {
927 updatepri(td);
928 resetpriority(td);
929 }
930 td->td_slptime = 0;
875 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
876 SRQ_OURSELF|SRQ_YIELDING);
877 }
878 }
879 if (newtd) {
880 /*
881 * The thread we are about to run needs to be counted
882 * as if it had been added to the run queue and selected.

--- 33 unchanged lines hidden (view full) ---

916sched_wakeup(struct thread *td)
917{
918 mtx_assert(&sched_lock, MA_OWNED);
919 if (td->td_slptime > 1) {
920 updatepri(td);
921 resetpriority(td);
922 }
923 td->td_slptime = 0;
931 setrunqueue(td, SRQ_BORING);
924 sched_add(td, SRQ_BORING);
932}
933
934#ifdef SMP
935/* enable HTT_2 if you have a 2-way HTT cpu.*/
936static int
937forward_wakeup(int cpunum)
938{
939 cpumask_t map, me, dontuse;

--- 120 unchanged lines hidden (view full) ---

1060{
1061 struct td_sched *ts;
1062 int forwarded = 0;
1063 int cpu;
1064 int single_cpu = 0;
1065
1066 ts = td->td_sched;
1067 mtx_assert(&sched_lock, MA_OWNED);
925}
926
927#ifdef SMP
928/* enable HTT_2 if you have a 2-way HTT cpu.*/
929static int
930forward_wakeup(int cpunum)
931{
932 cpumask_t map, me, dontuse;

--- 120 unchanged lines hidden (view full) ---

1053{
1054 struct td_sched *ts;
1055 int forwarded = 0;
1056 int cpu;
1057 int single_cpu = 0;
1058
1059 ts = td->td_sched;
1060 mtx_assert(&sched_lock, MA_OWNED);
1068 KASSERT(ts->ts_state != TSS_ONRUNQ,
1069 ("sched_add: td_sched %p (%s) already in run queue", ts,
1070 td->td_proc->p_comm));
1061 KASSERT((td->td_inhibitors == 0),
1062 ("sched_add: trying to run inhibited thread"));
1063 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1064 ("sched_add: bad thread state"));
1071 KASSERT(td->td_proc->p_sflag & PS_INMEM,
1072 ("sched_add: process swapped out"));
1073 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1074 td, td->td_proc->p_comm, td->td_priority, curthread,
1075 curthread->td_proc->p_comm);
1065 KASSERT(td->td_proc->p_sflag & PS_INMEM,
1066 ("sched_add: process swapped out"));
1067 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1068 td, td->td_proc->p_comm, td->td_priority, curthread,
1069 curthread->td_proc->p_comm);
1070 TD_SET_RUNQ(td);
1076
1071
1077
1078 if (td->td_pinned != 0) {
1079 cpu = td->td_lastcpu;
1080 ts->ts_runq = &runq_pcpu[cpu];
1081 single_cpu = 1;
1082 CTR3(KTR_RUNQ,
1083 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
1084 } else if ((ts)->ts_flags & TSF_BOUND) {
1085 /* Find CPU from bound runq */

--- 28 unchanged lines hidden (view full) ---

1114 else
1115 maybe_resched(td);
1116 }
1117 }
1118
1119 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1120 sched_load_add();
1121 runq_add(ts->ts_runq, ts, flags);
1072 if (td->td_pinned != 0) {
1073 cpu = td->td_lastcpu;
1074 ts->ts_runq = &runq_pcpu[cpu];
1075 single_cpu = 1;
1076 CTR3(KTR_RUNQ,
1077 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
1078 } else if ((ts)->ts_flags & TSF_BOUND) {
1079 /* Find CPU from bound runq */

--- 28 unchanged lines hidden (view full) ---

1108 else
1109 maybe_resched(td);
1110 }
1111 }
1112
1113 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1114 sched_load_add();
1115 runq_add(ts->ts_runq, ts, flags);
1122 ts->ts_state = TSS_ONRUNQ;
1123}
1124#else /* SMP */
1125{
1126 struct td_sched *ts;
1127 ts = td->td_sched;
1128 mtx_assert(&sched_lock, MA_OWNED);
1116}
1117#else /* SMP */
1118{
1119 struct td_sched *ts;
1120 ts = td->td_sched;
1121 mtx_assert(&sched_lock, MA_OWNED);
1129 KASSERT(ts->ts_state != TSS_ONRUNQ,
1130 ("sched_add: td_sched %p (%s) already in run queue", ts,
1131 td->td_proc->p_comm));
1122 KASSERT((td->td_inhibitors == 0),
1123 ("sched_add: trying to run inhibited thread"));
1124 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1125 ("sched_add: bad thread state"));
1132 KASSERT(td->td_proc->p_sflag & PS_INMEM,
1133 ("sched_add: process swapped out"));
1134 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1135 td, td->td_proc->p_comm, td->td_priority, curthread,
1136 curthread->td_proc->p_comm);
1126 KASSERT(td->td_proc->p_sflag & PS_INMEM,
1127 ("sched_add: process swapped out"));
1128 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1129 td, td->td_proc->p_comm, td->td_priority, curthread,
1130 curthread->td_proc->p_comm);
1131 TD_SET_RUNQ(td);
1137 CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1138 ts->ts_runq = &runq;
1139
1140 /*
1141 * If we are yielding (on the way out anyhow)
1142 * or the thread being saved is US,
1143 * then don't try be smart about preemption
1144 * or kicking off another CPU

--- 5 unchanged lines hidden (view full) ---

1150 */
1151 if((flags & SRQ_YIELDING) == 0) {
1152 if (maybe_preempt(td))
1153 return;
1154 }
1155 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1156 sched_load_add();
1157 runq_add(ts->ts_runq, ts, flags);
1132 CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1133 ts->ts_runq = &runq;
1134
1135 /*
1136 * If we are yielding (on the way out anyhow)
1137 * or the thread being saved is US,
1138 * then don't try be smart about preemption
1139 * or kicking off another CPU

--- 5 unchanged lines hidden (view full) ---

1145 */
1146 if((flags & SRQ_YIELDING) == 0) {
1147 if (maybe_preempt(td))
1148 return;
1149 }
1150 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1151 sched_load_add();
1152 runq_add(ts->ts_runq, ts, flags);
1158 ts->ts_state = TSS_ONRUNQ;
1159 maybe_resched(td);
1160}
1161#endif /* SMP */
1162
1163void
1164sched_rem(struct thread *td)
1165{
1166 struct td_sched *ts;
1167
1168 ts = td->td_sched;
1169 KASSERT(td->td_proc->p_sflag & PS_INMEM,
1170 ("sched_rem: process swapped out"));
1153 maybe_resched(td);
1154}
1155#endif /* SMP */
1156
1157void
1158sched_rem(struct thread *td)
1159{
1160 struct td_sched *ts;
1161
1162 ts = td->td_sched;
1163 KASSERT(td->td_proc->p_sflag & PS_INMEM,
1164 ("sched_rem: process swapped out"));
1171 KASSERT((ts->ts_state == TSS_ONRUNQ),
1165 KASSERT(TD_ON_RUNQ(td),
1172 ("sched_rem: thread not on run queue"));
1173 mtx_assert(&sched_lock, MA_OWNED);
1174 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1175 td, td->td_proc->p_comm, td->td_priority, curthread,
1176 curthread->td_proc->p_comm);
1177
1178 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1179 sched_load_rem();
1180 runq_remove(ts->ts_runq, ts);
1166 ("sched_rem: thread not on run queue"));
1167 mtx_assert(&sched_lock, MA_OWNED);
1168 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1169 td, td->td_proc->p_comm, td->td_priority, curthread,
1170 curthread->td_proc->p_comm);
1171
1172 if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1173 sched_load_rem();
1174 runq_remove(ts->ts_runq, ts);
1181
1182 ts->ts_state = TSS_THREAD;
1175 TD_SET_CAN_RUN(td);
1183}
1184
1185/*
1186 * Select threads to run.
1187 * Notice that the running threads still consume a slot.
1188 */
1176}
1177
1178/*
1179 * Select threads to run.
1180 * Notice that the running threads still consume a slot.
1181 */
1189struct td_sched *
1182struct thread *
1190sched_choose(void)
1191{
1192 struct td_sched *ts;
1193 struct runq *rq;
1194
1195#ifdef SMP
1196 struct td_sched *kecpu;
1197

--- 14 unchanged lines hidden (view full) ---

1212
1213#else
1214 rq = &runq;
1215 ts = runq_choose(&runq);
1216#endif
1217
1218 if (ts) {
1219 runq_remove(rq, ts);
1183sched_choose(void)
1184{
1185 struct td_sched *ts;
1186 struct runq *rq;
1187
1188#ifdef SMP
1189 struct td_sched *kecpu;
1190

--- 14 unchanged lines hidden (view full) ---

1205
1206#else
1207 rq = &runq;
1208 ts = runq_choose(&runq);
1209#endif
1210
1211 if (ts) {
1212 runq_remove(rq, ts);
1220 ts->ts_state = TSS_THREAD;
1213 ts->ts_flags |= TSF_DIDRUN;
1221
1222 KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
1223 ("sched_choose: process swapped out"));
1214
1215 KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
1216 ("sched_choose: process swapped out"));
1224 }
1225 return (ts);
1217 return (ts->ts_thread);
1218 }
1219 return (PCPU_GET(idlethread));
1226}
1227
1228void
1229sched_userret(struct thread *td)
1230{
1231 /*
1232 * XXX we cheat slightly on the locking here to avoid locking in
1233 * the usual case. Setting td_priority here is essentially an

--- 25 unchanged lines hidden (view full) ---

1259 ts = td->td_sched;
1260
1261 ts->ts_flags |= TSF_BOUND;
1262#ifdef SMP
1263 ts->ts_runq = &runq_pcpu[cpu];
1264 if (PCPU_GET(cpuid) == cpu)
1265 return;
1266
1220}
1221
1222void
1223sched_userret(struct thread *td)
1224{
1225 /*
1226 * XXX we cheat slightly on the locking here to avoid locking in
1227 * the usual case. Setting td_priority here is essentially an

--- 25 unchanged lines hidden (view full) ---

1253 ts = td->td_sched;
1254
1255 ts->ts_flags |= TSF_BOUND;
1256#ifdef SMP
1257 ts->ts_runq = &runq_pcpu[cpu];
1258 if (PCPU_GET(cpuid) == cpu)
1259 return;
1260
1267 ts->ts_state = TSS_THREAD;
1268
1269 mi_switch(SW_VOL, NULL);
1270#endif
1271}
1272
1273void
1274sched_unbind(struct thread* td)
1275{
1276 mtx_assert(&sched_lock, MA_OWNED);

--- 43 unchanged lines hidden (view full) ---

1320 ts = td->td_sched;
1321 return (ts->ts_pctcpu);
1322}
1323
1324void
1325sched_tick(void)
1326{
1327}
1261 mi_switch(SW_VOL, NULL);
1262#endif
1263}
1264
1265void
1266sched_unbind(struct thread* td)
1267{
1268 mtx_assert(&sched_lock, MA_OWNED);

--- 43 unchanged lines hidden (view full) ---

1312 ts = td->td_sched;
1313 return (ts->ts_pctcpu);
1314}
1315
1316void
1317sched_tick(void)
1318{
1319}
1320
1321/*
1322 * The actual idle process.
1323 */
1324void
1325sched_idletd(void *dummy)
1326{
1327 struct proc *p;
1328 struct thread *td;
1329#ifdef SMP
1330 cpumask_t mycpu;
1331#endif
1332
1333 td = curthread;
1334 p = td->td_proc;
1335#ifdef SMP
1336 mycpu = PCPU_GET(cpumask);
1337 mtx_lock_spin(&sched_lock);
1338 idle_cpus_mask |= mycpu;
1339 mtx_unlock_spin(&sched_lock);
1340#endif
1341 for (;;) {
1342 mtx_assert(&Giant, MA_NOTOWNED);
1343
1344 while (sched_runnable() == 0)
1345 cpu_idle();
1346
1347 mtx_lock_spin(&sched_lock);
1348#ifdef SMP
1349 idle_cpus_mask &= ~mycpu;
1350#endif
1351 mi_switch(SW_VOL, NULL);
1352#ifdef SMP
1353 idle_cpus_mask |= mycpu;
1354#endif
1355 mtx_unlock_spin(&sched_lock);
1356 }
1357}
1358
1328#define KERN_SWITCH_INCLUDE 1
1329#include "kern/kern_switch.c"
1359#define KERN_SWITCH_INCLUDE 1
1360#include "kern/kern_switch.c"