Deleted Added
full compact
sched_4bsd.c (113923) sched_4bsd.c (114293)
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * $FreeBSD: head/sys/kern/sched_4bsd.c 113923 2003-04-23 18:51:05Z jhb $
38 * $FreeBSD: head/sys/kern/sched_4bsd.c 114293 2003-04-30 12:57:40Z markm $
39 */
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/proc.h>
48#include <sys/resourcevar.h>
49#include <sys/sched.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <sys/sx.h>
53
54/*
55 * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
56 * the range 100-256 Hz (approximately).
57 */
58#define ESTCPULIM(e) \
59 min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
60 RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
61#define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */
62#define NICE_WEIGHT 1 /* Priorities per nice level. */
63
64struct ke_sched {
65 int ske_cpticks; /* (j) Ticks of cpu time. */
66};
67
39 */
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/proc.h>
48#include <sys/resourcevar.h>
49#include <sys/sched.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <sys/sx.h>
53
54/*
55 * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
56 * the range 100-256 Hz (approximately).
57 */
58#define ESTCPULIM(e) \
59 min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
60 RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
61#define INVERSE_ESTCPU_WEIGHT 8 /* 1 / (priorities per estcpu level). */
62#define NICE_WEIGHT 1 /* Priorities per nice level. */
63
64struct ke_sched {
65 int ske_cpticks; /* (j) Ticks of cpu time. */
66};
67
68struct ke_sched ke_sched;
68static struct ke_sched ke_sched;
69
70struct ke_sched *kse0_sched = &ke_sched;
71struct kg_sched *ksegrp0_sched = NULL;
72struct p_sched *proc0_sched = NULL;
73struct td_sched *thread0_sched = NULL;
74
75static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
76#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */
77
78static struct callout schedcpu_callout;
79static struct callout roundrobin_callout;
80
81static void roundrobin(void *arg);
82static void schedcpu(void *arg);
83static void sched_setup(void *dummy);
84static void maybe_resched(struct thread *td);
85static void updatepri(struct ksegrp *kg);
86static void resetpriority(struct ksegrp *kg);
87
88SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
89
90/*
91 * Global run queue.
92 */
93static struct runq runq;
94SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq)
95
96static int
97sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
98{
99 int error, new_val;
100
101 new_val = sched_quantum * tick;
102 error = sysctl_handle_int(oidp, &new_val, 0, req);
103 if (error != 0 || req->newptr == NULL)
104 return (error);
105 if (new_val < tick)
106 return (EINVAL);
107 sched_quantum = new_val / tick;
108 hogticks = 2 * sched_quantum;
109 return (0);
110}
111
112SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
113 0, sizeof sched_quantum, sysctl_kern_quantum, "I",
114 "Roundrobin scheduling quantum in microseconds");
115
116/*
117 * Arrange to reschedule if necessary, taking the priorities and
118 * schedulers into account.
119 */
120static void
121maybe_resched(struct thread *td)
122{
123
124 mtx_assert(&sched_lock, MA_OWNED);
125 if (td->td_priority < curthread->td_priority && curthread->td_kse)
126 curthread->td_flags |= TDF_NEEDRESCHED;
127}
128
129/*
130 * Force switch among equal priority processes every 100ms.
131 * We don't actually need to force a context switch of the current process.
132 * The act of firing the event triggers a context switch to softclock() and
133 * then switching back out again which is equivalent to a preemption, thus
134 * no further work is needed on the local CPU.
135 */
136/* ARGSUSED */
137static void
138roundrobin(void *arg)
139{
140
141#ifdef SMP
142 mtx_lock_spin(&sched_lock);
143 forward_roundrobin();
144 mtx_unlock_spin(&sched_lock);
145#endif
146
147 callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
148}
149
150/*
151 * Constants for digital decay and forget:
152 * 90% of (p_estcpu) usage in 5 * loadav time
153 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
154 * Note that, as ps(1) mentions, this can let percentages
155 * total over 100% (I've seen 137.9% for 3 processes).
156 *
157 * Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
158 *
159 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
160 * That is, the system wants to compute a value of decay such
161 * that the following for loop:
162 * for (i = 0; i < (5 * loadavg); i++)
163 * p_estcpu *= decay;
164 * will compute
165 * p_estcpu *= 0.1;
166 * for all values of loadavg:
167 *
168 * Mathematically this loop can be expressed by saying:
169 * decay ** (5 * loadavg) ~= .1
170 *
171 * The system computes decay as:
172 * decay = (2 * loadavg) / (2 * loadavg + 1)
173 *
174 * We wish to prove that the system's computation of decay
175 * will always fulfill the equation:
176 * decay ** (5 * loadavg) ~= .1
177 *
178 * If we compute b as:
179 * b = 2 * loadavg
180 * then
181 * decay = b / (b + 1)
182 *
183 * We now need to prove two things:
184 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
185 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
186 *
187 * Facts:
188 * For x close to zero, exp(x) =~ 1 + x, since
189 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
190 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
191 * For x close to zero, ln(1+x) =~ x, since
192 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
193 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
194 * ln(.1) =~ -2.30
195 *
196 * Proof of (1):
197 * Solve (factor)**(power) =~ .1 given power (5*loadav):
198 * solving for factor,
199 * ln(factor) =~ (-2.30/5*loadav), or
200 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
201 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
202 *
203 * Proof of (2):
204 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
205 * solving for power,
206 * power*ln(b/(b+1)) =~ -2.30, or
207 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
208 *
209 * Actual power values for the implemented algorithm are as follows:
210 * loadav: 1 2 3 4
211 * power: 5.68 10.32 14.94 19.55
212 */
213
214/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
215#define loadfactor(loadav) (2 * (loadav))
216#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
217
218/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
219static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
220SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
221
222/*
223 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
224 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
225 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
226 *
227 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
228 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
229 *
230 * If you don't want to bother with the faster/more-accurate formula, you
231 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
232 * (more general) method of calculating the %age of CPU used by a process.
233 */
234#define CCPU_SHIFT 11
235
236/*
237 * Recompute process priorities, every hz ticks.
238 * MP-safe, called without the Giant mutex.
239 */
240/* ARGSUSED */
241static void
242schedcpu(void *arg)
243{
244 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
245 struct thread *td;
246 struct proc *p;
247 struct kse *ke;
248 struct ksegrp *kg;
249 int realstathz;
250 int awake;
251
252 realstathz = stathz ? stathz : hz;
253 sx_slock(&allproc_lock);
254 FOREACH_PROC_IN_SYSTEM(p) {
255 mtx_lock_spin(&sched_lock);
256 p->p_swtime++;
257 FOREACH_KSEGRP_IN_PROC(p, kg) {
258 awake = 0;
259 FOREACH_KSE_IN_GROUP(kg, ke) {
260 /*
261 * Increment time in/out of memory and sleep
262 * time (if sleeping). We ignore overflow;
263 * with 16-bit int's (remember them?)
264 * overflow takes 45 days.
265 */
266 /*
267 * The kse slptimes are not touched in wakeup
268 * because the thread may not HAVE a KSE.
269 */
270 if (ke->ke_state == KES_ONRUNQ) {
271 awake = 1;
272 ke->ke_flags &= ~KEF_DIDRUN;
273 } else if ((ke->ke_state == KES_THREAD) &&
274 (TD_IS_RUNNING(ke->ke_thread))) {
275 awake = 1;
276 /* Do not clear KEF_DIDRUN */
277 } else if (ke->ke_flags & KEF_DIDRUN) {
278 awake = 1;
279 ke->ke_flags &= ~KEF_DIDRUN;
280 }
281
282 /*
283 * pctcpu is only for ps?
284 * Do it per kse.. and add them up at the end?
285 * XXXKSE
286 */
287 ke->ke_pctcpu
288 = (ke->ke_pctcpu * ccpu) >>
289 FSHIFT;
290 /*
291 * If the kse has been idle the entire second,
292 * stop recalculating its priority until
293 * it wakes up.
294 */
295 if (ke->ke_sched->ske_cpticks == 0)
296 continue;
297#if (FSHIFT >= CCPU_SHIFT)
298 ke->ke_pctcpu += (realstathz == 100)
299 ? ((fixpt_t) ke->ke_sched->ske_cpticks) <<
300 (FSHIFT - CCPU_SHIFT) :
301 100 * (((fixpt_t) ke->ke_sched->ske_cpticks)
302 << (FSHIFT - CCPU_SHIFT)) / realstathz;
303#else
304 ke->ke_pctcpu += ((FSCALE - ccpu) *
305 (ke->ke_sched->ske_cpticks *
306 FSCALE / realstathz)) >> FSHIFT;
307#endif
308 ke->ke_sched->ske_cpticks = 0;
309 } /* end of kse loop */
310 /*
311 * If there are ANY running threads in this KSEGRP,
312 * then don't count it as sleeping.
313 */
314 if (awake) {
315 if (kg->kg_slptime > 1) {
316 /*
317 * In an ideal world, this should not
318 * happen, because whoever woke us
319 * up from the long sleep should have
320 * unwound the slptime and reset our
321 * priority before we run at the stale
322 * priority. Should KASSERT at some
323 * point when all the cases are fixed.
324 */
325 updatepri(kg);
326 }
327 kg->kg_slptime = 0;
328 } else {
329 kg->kg_slptime++;
330 }
331 if (kg->kg_slptime > 1)
332 continue;
333 kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
334 resetpriority(kg);
335 FOREACH_THREAD_IN_GROUP(kg, td) {
336 if (td->td_priority >= PUSER) {
337 sched_prio(td, kg->kg_user_pri);
338 }
339 }
340 } /* end of ksegrp loop */
341 mtx_unlock_spin(&sched_lock);
342 } /* end of process loop */
343 sx_sunlock(&allproc_lock);
344 callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
345}
346
347/*
348 * Recalculate the priority of a process after it has slept for a while.
349 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
350 * least six times the loadfactor will decay p_estcpu to zero.
351 */
352static void
353updatepri(struct ksegrp *kg)
354{
355 register unsigned int newcpu;
356 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
357
358 newcpu = kg->kg_estcpu;
359 if (kg->kg_slptime > 5 * loadfac)
360 kg->kg_estcpu = 0;
361 else {
362 kg->kg_slptime--; /* the first time was done in schedcpu */
363 while (newcpu && --kg->kg_slptime)
364 newcpu = decay_cpu(loadfac, newcpu);
365 kg->kg_estcpu = newcpu;
366 }
367 resetpriority(kg);
368}
369
370/*
371 * Compute the priority of a process when running in user mode.
372 * Arrange to reschedule if the resulting priority is better
373 * than that of the current process.
374 */
375static void
376resetpriority(struct ksegrp *kg)
377{
378 register unsigned int newpriority;
379 struct thread *td;
380
381 if (kg->kg_pri_class == PRI_TIMESHARE) {
382 newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
383 NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
384 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
385 PRI_MAX_TIMESHARE);
386 kg->kg_user_pri = newpriority;
387 }
388 FOREACH_THREAD_IN_GROUP(kg, td) {
389 maybe_resched(td); /* XXXKSE silly */
390 }
391}
392
393/* ARGSUSED */
394static void
395sched_setup(void *dummy)
396{
397 if (sched_quantum == 0)
398 sched_quantum = SCHED_QUANTUM;
399 hogticks = 2 * sched_quantum;
400
401 callout_init(&schedcpu_callout, 1);
402 callout_init(&roundrobin_callout, 0);
403
404 /* Kick off timeout driven events by calling first time. */
405 roundrobin(NULL);
406 schedcpu(NULL);
407}
408
409/* External interfaces start here */
410int
411sched_runnable(void)
412{
413 return runq_check(&runq);
414}
415
416int
417sched_rr_interval(void)
418{
419 if (sched_quantum == 0)
420 sched_quantum = SCHED_QUANTUM;
421 return (sched_quantum);
422}
423
424/*
425 * We adjust the priority of the current process. The priority of
426 * a process gets worse as it accumulates CPU time. The cpu usage
427 * estimator (p_estcpu) is increased here. resetpriority() will
428 * compute a different priority each time p_estcpu increases by
429 * INVERSE_ESTCPU_WEIGHT
430 * (until MAXPRI is reached). The cpu usage estimator ramps up
431 * quite quickly when the process is running (linearly), and decays
432 * away exponentially, at a rate which is proportionally slower when
433 * the system is busy. The basic principle is that the system will
434 * 90% forget that the process used a lot of CPU time in 5 * loadav
435 * seconds. This causes the system to favor processes which haven't
436 * run much recently, and to round-robin among other processes.
437 */
438void
439sched_clock(struct kse *ke)
440{
441 struct ksegrp *kg;
442 struct thread *td;
443
444 mtx_assert(&sched_lock, MA_OWNED);
445 kg = ke->ke_ksegrp;
446 td = ke->ke_thread;
447
448 ke->ke_sched->ske_cpticks++;
449 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
450 if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
451 resetpriority(kg);
452 if (td->td_priority >= PUSER)
453 td->td_priority = kg->kg_user_pri;
454 }
455}
456/*
457 * charge childs scheduling cpu usage to parent.
458 *
459 * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
460 * Charge it to the ksegrp that did the wait since process estcpu is sum of
461 * all ksegrps, this is strictly as expected. Assume that the child process
462 * aggregated all the estcpu into the 'built-in' ksegrp.
463 */
464void
465sched_exit(struct proc *p, struct proc *p1)
466{
467 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
468 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
469 sched_exit_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
470}
471
472void
473sched_exit_kse(struct kse *ke, struct kse *child)
474{
475}
476
477void
478sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
479{
480
481 mtx_assert(&sched_lock, MA_OWNED);
482 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu);
483}
484
485void
486sched_exit_thread(struct thread *td, struct thread *child)
487{
488}
489
490void
491sched_fork(struct proc *p, struct proc *p1)
492{
493 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
494 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
495 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
496}
497
498void
499sched_fork_kse(struct kse *ke, struct kse *child)
500{
501 child->ke_sched->ske_cpticks = 0;
502}
503
504void
505sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
506{
507 mtx_assert(&sched_lock, MA_OWNED);
508 child->kg_estcpu = kg->kg_estcpu;
509}
510
511void
512sched_fork_thread(struct thread *td, struct thread *child)
513{
514}
515
516void
517sched_nice(struct ksegrp *kg, int nice)
518{
519
520 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
521 mtx_assert(&sched_lock, MA_OWNED);
522 kg->kg_nice = nice;
523 resetpriority(kg);
524}
525
526void
527sched_class(struct ksegrp *kg, int class)
528{
529 mtx_assert(&sched_lock, MA_OWNED);
530 kg->kg_pri_class = class;
531}
532
533/*
534 * Adjust the priority of a thread.
535 * This may include moving the thread within the KSEGRP,
536 * changing the assignment of a kse to the thread,
537 * and moving a KSE in the system run queue.
538 */
539void
540sched_prio(struct thread *td, u_char prio)
541{
542
543 mtx_assert(&sched_lock, MA_OWNED);
544 if (TD_ON_RUNQ(td)) {
545 adjustrunqueue(td, prio);
546 } else {
547 td->td_priority = prio;
548 }
549}
550
551void
552sched_sleep(struct thread *td, u_char prio)
553{
554
555 mtx_assert(&sched_lock, MA_OWNED);
556 td->td_ksegrp->kg_slptime = 0;
557 td->td_priority = prio;
558}
559
560void
561sched_switchin(struct thread *td)
562{
563
564 mtx_assert(&sched_lock, MA_OWNED);
565 td->td_oncpu = PCPU_GET(cpuid);
566}
567
568void
569sched_switchout(struct thread *td)
570{
571 struct kse *ke;
572 struct proc *p;
573
574 ke = td->td_kse;
575 p = td->td_proc;
576
577 mtx_assert(&sched_lock, MA_OWNED);
578 KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
579
580 td->td_lastcpu = td->td_oncpu;
581 td->td_last_kse = ke;
582 td->td_oncpu = NOCPU;
583 td->td_flags &= ~TDF_NEEDRESCHED;
584 /*
585 * At the last moment, if this thread is still marked RUNNING,
586 * then put it back on the run queue as it has not been suspended
587 * or stopped or any thing else similar.
588 */
589 if (TD_IS_RUNNING(td)) {
590 /* Put us back on the run queue (kse and all). */
591 setrunqueue(td);
592 } else if (p->p_flag & P_THREADED) {
593 /*
594 * We will not be on the run queue. So we must be
595 * sleeping or similar. As it's available,
596 * someone else can use the KSE if they need it.
597 */
598 kse_reassign(ke);
599 }
600}
601
602void
603sched_wakeup(struct thread *td)
604{
605 struct ksegrp *kg;
606
607 mtx_assert(&sched_lock, MA_OWNED);
608 kg = td->td_ksegrp;
609 if (kg->kg_slptime > 1)
610 updatepri(kg);
611 kg->kg_slptime = 0;
612 setrunqueue(td);
613 maybe_resched(td);
614}
615
616void
617sched_add(struct kse *ke)
618{
619 mtx_assert(&sched_lock, MA_OWNED);
620 KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
621 KASSERT((ke->ke_thread->td_kse != NULL),
622 ("runq_add: No KSE on thread"));
623 KASSERT(ke->ke_state != KES_ONRUNQ,
624 ("runq_add: kse %p (%s) already in run queue", ke,
625 ke->ke_proc->p_comm));
626 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
627 ("runq_add: process swapped out"));
628 ke->ke_ksegrp->kg_runq_kses++;
629 ke->ke_state = KES_ONRUNQ;
630
631 runq_add(&runq, ke);
632}
633
634void
635sched_rem(struct kse *ke)
636{
637 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
638 ("runq_remove: process swapped out"));
639 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
640 mtx_assert(&sched_lock, MA_OWNED);
641
642 runq_remove(&runq, ke);
643 ke->ke_state = KES_THREAD;
644 ke->ke_ksegrp->kg_runq_kses--;
645}
646
647struct kse *
648sched_choose(void)
649{
650 struct kse *ke;
651
652 ke = runq_choose(&runq);
653
654 if (ke != NULL) {
655 runq_remove(&runq, ke);
656 ke->ke_state = KES_THREAD;
657
658 KASSERT((ke->ke_thread != NULL),
659 ("runq_choose: No thread on KSE"));
660 KASSERT((ke->ke_thread->td_kse != NULL),
661 ("runq_choose: No KSE on thread"));
662 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
663 ("runq_choose: process swapped out"));
664 }
665 return (ke);
666}
667
668void
669sched_userret(struct thread *td)
670{
671 struct ksegrp *kg;
672 /*
673 * XXX we cheat slightly on the locking here to avoid locking in
674 * the usual case. Setting td_priority here is essentially an
675 * incomplete workaround for not setting it properly elsewhere.
676 * Now that some interrupt handlers are threads, not setting it
677 * properly elsewhere can clobber it in the window between setting
678 * it here and returning to user mode, so don't waste time setting
679 * it perfectly here.
680 */
681 kg = td->td_ksegrp;
682 if (td->td_priority != kg->kg_user_pri) {
683 mtx_lock_spin(&sched_lock);
684 td->td_priority = kg->kg_user_pri;
685 mtx_unlock_spin(&sched_lock);
686 }
687}
688
689int
690sched_sizeof_kse(void)
691{
692 return (sizeof(struct kse) + sizeof(struct ke_sched));
693}
694int
695sched_sizeof_ksegrp(void)
696{
697 return (sizeof(struct ksegrp));
698}
699int
700sched_sizeof_proc(void)
701{
702 return (sizeof(struct proc));
703}
704int
705sched_sizeof_thread(void)
706{
707 return (sizeof(struct thread));
708}
709
710fixpt_t
711sched_pctcpu(struct kse *ke)
712{
713 return (ke->ke_pctcpu);
714}
69
70struct ke_sched *kse0_sched = &ke_sched;
71struct kg_sched *ksegrp0_sched = NULL;
72struct p_sched *proc0_sched = NULL;
73struct td_sched *thread0_sched = NULL;
74
75static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
76#define SCHED_QUANTUM (hz / 10) /* Default sched quantum */
77
78static struct callout schedcpu_callout;
79static struct callout roundrobin_callout;
80
81static void roundrobin(void *arg);
82static void schedcpu(void *arg);
83static void sched_setup(void *dummy);
84static void maybe_resched(struct thread *td);
85static void updatepri(struct ksegrp *kg);
86static void resetpriority(struct ksegrp *kg);
87
88SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
89
90/*
91 * Global run queue.
92 */
93static struct runq runq;
94SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq)
95
96static int
97sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
98{
99 int error, new_val;
100
101 new_val = sched_quantum * tick;
102 error = sysctl_handle_int(oidp, &new_val, 0, req);
103 if (error != 0 || req->newptr == NULL)
104 return (error);
105 if (new_val < tick)
106 return (EINVAL);
107 sched_quantum = new_val / tick;
108 hogticks = 2 * sched_quantum;
109 return (0);
110}
111
112SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
113 0, sizeof sched_quantum, sysctl_kern_quantum, "I",
114 "Roundrobin scheduling quantum in microseconds");
115
116/*
117 * Arrange to reschedule if necessary, taking the priorities and
118 * schedulers into account.
119 */
120static void
121maybe_resched(struct thread *td)
122{
123
124 mtx_assert(&sched_lock, MA_OWNED);
125 if (td->td_priority < curthread->td_priority && curthread->td_kse)
126 curthread->td_flags |= TDF_NEEDRESCHED;
127}
128
129/*
130 * Force switch among equal priority processes every 100ms.
131 * We don't actually need to force a context switch of the current process.
132 * The act of firing the event triggers a context switch to softclock() and
133 * then switching back out again which is equivalent to a preemption, thus
134 * no further work is needed on the local CPU.
135 */
136/* ARGSUSED */
137static void
138roundrobin(void *arg)
139{
140
141#ifdef SMP
142 mtx_lock_spin(&sched_lock);
143 forward_roundrobin();
144 mtx_unlock_spin(&sched_lock);
145#endif
146
147 callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
148}
149
150/*
151 * Constants for digital decay and forget:
152 * 90% of (p_estcpu) usage in 5 * loadav time
153 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
154 * Note that, as ps(1) mentions, this can let percentages
155 * total over 100% (I've seen 137.9% for 3 processes).
156 *
157 * Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
158 *
159 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
160 * That is, the system wants to compute a value of decay such
161 * that the following for loop:
162 * for (i = 0; i < (5 * loadavg); i++)
163 * p_estcpu *= decay;
164 * will compute
165 * p_estcpu *= 0.1;
166 * for all values of loadavg:
167 *
168 * Mathematically this loop can be expressed by saying:
169 * decay ** (5 * loadavg) ~= .1
170 *
171 * The system computes decay as:
172 * decay = (2 * loadavg) / (2 * loadavg + 1)
173 *
174 * We wish to prove that the system's computation of decay
175 * will always fulfill the equation:
176 * decay ** (5 * loadavg) ~= .1
177 *
178 * If we compute b as:
179 * b = 2 * loadavg
180 * then
181 * decay = b / (b + 1)
182 *
183 * We now need to prove two things:
184 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
185 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
186 *
187 * Facts:
188 * For x close to zero, exp(x) =~ 1 + x, since
189 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
190 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
191 * For x close to zero, ln(1+x) =~ x, since
192 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
193 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
194 * ln(.1) =~ -2.30
195 *
196 * Proof of (1):
197 * Solve (factor)**(power) =~ .1 given power (5*loadav):
198 * solving for factor,
199 * ln(factor) =~ (-2.30/5*loadav), or
200 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
201 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
202 *
203 * Proof of (2):
204 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
205 * solving for power,
206 * power*ln(b/(b+1)) =~ -2.30, or
207 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
208 *
209 * Actual power values for the implemented algorithm are as follows:
210 * loadav: 1 2 3 4
211 * power: 5.68 10.32 14.94 19.55
212 */
213
214/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
215#define loadfactor(loadav) (2 * (loadav))
216#define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
217
218/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
219static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
220SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
221
222/*
223 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
224 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
225 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
226 *
227 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
228 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
229 *
230 * If you don't want to bother with the faster/more-accurate formula, you
231 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
232 * (more general) method of calculating the %age of CPU used by a process.
233 */
234#define CCPU_SHIFT 11
235
236/*
237 * Recompute process priorities, every hz ticks.
238 * MP-safe, called without the Giant mutex.
239 */
240/* ARGSUSED */
241static void
242schedcpu(void *arg)
243{
244 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
245 struct thread *td;
246 struct proc *p;
247 struct kse *ke;
248 struct ksegrp *kg;
249 int realstathz;
250 int awake;
251
252 realstathz = stathz ? stathz : hz;
253 sx_slock(&allproc_lock);
254 FOREACH_PROC_IN_SYSTEM(p) {
255 mtx_lock_spin(&sched_lock);
256 p->p_swtime++;
257 FOREACH_KSEGRP_IN_PROC(p, kg) {
258 awake = 0;
259 FOREACH_KSE_IN_GROUP(kg, ke) {
260 /*
261 * Increment time in/out of memory and sleep
262 * time (if sleeping). We ignore overflow;
263 * with 16-bit int's (remember them?)
264 * overflow takes 45 days.
265 */
266 /*
267 * The kse slptimes are not touched in wakeup
268 * because the thread may not HAVE a KSE.
269 */
270 if (ke->ke_state == KES_ONRUNQ) {
271 awake = 1;
272 ke->ke_flags &= ~KEF_DIDRUN;
273 } else if ((ke->ke_state == KES_THREAD) &&
274 (TD_IS_RUNNING(ke->ke_thread))) {
275 awake = 1;
276 /* Do not clear KEF_DIDRUN */
277 } else if (ke->ke_flags & KEF_DIDRUN) {
278 awake = 1;
279 ke->ke_flags &= ~KEF_DIDRUN;
280 }
281
282 /*
283 * pctcpu is only for ps?
284 * Do it per kse.. and add them up at the end?
285 * XXXKSE
286 */
287 ke->ke_pctcpu
288 = (ke->ke_pctcpu * ccpu) >>
289 FSHIFT;
290 /*
291 * If the kse has been idle the entire second,
292 * stop recalculating its priority until
293 * it wakes up.
294 */
295 if (ke->ke_sched->ske_cpticks == 0)
296 continue;
297#if (FSHIFT >= CCPU_SHIFT)
298 ke->ke_pctcpu += (realstathz == 100)
299 ? ((fixpt_t) ke->ke_sched->ske_cpticks) <<
300 (FSHIFT - CCPU_SHIFT) :
301 100 * (((fixpt_t) ke->ke_sched->ske_cpticks)
302 << (FSHIFT - CCPU_SHIFT)) / realstathz;
303#else
304 ke->ke_pctcpu += ((FSCALE - ccpu) *
305 (ke->ke_sched->ske_cpticks *
306 FSCALE / realstathz)) >> FSHIFT;
307#endif
308 ke->ke_sched->ske_cpticks = 0;
309 } /* end of kse loop */
310 /*
311 * If there are ANY running threads in this KSEGRP,
312 * then don't count it as sleeping.
313 */
314 if (awake) {
315 if (kg->kg_slptime > 1) {
316 /*
317 * In an ideal world, this should not
318 * happen, because whoever woke us
319 * up from the long sleep should have
320 * unwound the slptime and reset our
321 * priority before we run at the stale
322 * priority. Should KASSERT at some
323 * point when all the cases are fixed.
324 */
325 updatepri(kg);
326 }
327 kg->kg_slptime = 0;
328 } else {
329 kg->kg_slptime++;
330 }
331 if (kg->kg_slptime > 1)
332 continue;
333 kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
334 resetpriority(kg);
335 FOREACH_THREAD_IN_GROUP(kg, td) {
336 if (td->td_priority >= PUSER) {
337 sched_prio(td, kg->kg_user_pri);
338 }
339 }
340 } /* end of ksegrp loop */
341 mtx_unlock_spin(&sched_lock);
342 } /* end of process loop */
343 sx_sunlock(&allproc_lock);
344 callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
345}
346
347/*
348 * Recalculate the priority of a process after it has slept for a while.
349 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
350 * least six times the loadfactor will decay p_estcpu to zero.
351 */
352static void
353updatepri(struct ksegrp *kg)
354{
355 register unsigned int newcpu;
356 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
357
358 newcpu = kg->kg_estcpu;
359 if (kg->kg_slptime > 5 * loadfac)
360 kg->kg_estcpu = 0;
361 else {
362 kg->kg_slptime--; /* the first time was done in schedcpu */
363 while (newcpu && --kg->kg_slptime)
364 newcpu = decay_cpu(loadfac, newcpu);
365 kg->kg_estcpu = newcpu;
366 }
367 resetpriority(kg);
368}
369
370/*
371 * Compute the priority of a process when running in user mode.
372 * Arrange to reschedule if the resulting priority is better
373 * than that of the current process.
374 */
375static void
376resetpriority(struct ksegrp *kg)
377{
378 register unsigned int newpriority;
379 struct thread *td;
380
381 if (kg->kg_pri_class == PRI_TIMESHARE) {
382 newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
383 NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
384 newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
385 PRI_MAX_TIMESHARE);
386 kg->kg_user_pri = newpriority;
387 }
388 FOREACH_THREAD_IN_GROUP(kg, td) {
389 maybe_resched(td); /* XXXKSE silly */
390 }
391}
392
393/* ARGSUSED */
394static void
395sched_setup(void *dummy)
396{
397 if (sched_quantum == 0)
398 sched_quantum = SCHED_QUANTUM;
399 hogticks = 2 * sched_quantum;
400
401 callout_init(&schedcpu_callout, 1);
402 callout_init(&roundrobin_callout, 0);
403
404 /* Kick off timeout driven events by calling first time. */
405 roundrobin(NULL);
406 schedcpu(NULL);
407}
408
409/* External interfaces start here */
410int
411sched_runnable(void)
412{
413 return runq_check(&runq);
414}
415
416int
417sched_rr_interval(void)
418{
419 if (sched_quantum == 0)
420 sched_quantum = SCHED_QUANTUM;
421 return (sched_quantum);
422}
423
424/*
425 * We adjust the priority of the current process. The priority of
426 * a process gets worse as it accumulates CPU time. The cpu usage
427 * estimator (p_estcpu) is increased here. resetpriority() will
428 * compute a different priority each time p_estcpu increases by
429 * INVERSE_ESTCPU_WEIGHT
430 * (until MAXPRI is reached). The cpu usage estimator ramps up
431 * quite quickly when the process is running (linearly), and decays
432 * away exponentially, at a rate which is proportionally slower when
433 * the system is busy. The basic principle is that the system will
434 * 90% forget that the process used a lot of CPU time in 5 * loadav
435 * seconds. This causes the system to favor processes which haven't
436 * run much recently, and to round-robin among other processes.
437 */
438void
439sched_clock(struct kse *ke)
440{
441 struct ksegrp *kg;
442 struct thread *td;
443
444 mtx_assert(&sched_lock, MA_OWNED);
445 kg = ke->ke_ksegrp;
446 td = ke->ke_thread;
447
448 ke->ke_sched->ske_cpticks++;
449 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
450 if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
451 resetpriority(kg);
452 if (td->td_priority >= PUSER)
453 td->td_priority = kg->kg_user_pri;
454 }
455}
456/*
457 * charge childs scheduling cpu usage to parent.
458 *
459 * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
460 * Charge it to the ksegrp that did the wait since process estcpu is sum of
461 * all ksegrps, this is strictly as expected. Assume that the child process
462 * aggregated all the estcpu into the 'built-in' ksegrp.
463 */
464void
465sched_exit(struct proc *p, struct proc *p1)
466{
467 sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
468 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
469 sched_exit_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
470}
471
472void
473sched_exit_kse(struct kse *ke, struct kse *child)
474{
475}
476
477void
478sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
479{
480
481 mtx_assert(&sched_lock, MA_OWNED);
482 kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu);
483}
484
485void
486sched_exit_thread(struct thread *td, struct thread *child)
487{
488}
489
490void
491sched_fork(struct proc *p, struct proc *p1)
492{
493 sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
494 sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
495 sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
496}
497
498void
499sched_fork_kse(struct kse *ke, struct kse *child)
500{
501 child->ke_sched->ske_cpticks = 0;
502}
503
504void
505sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
506{
507 mtx_assert(&sched_lock, MA_OWNED);
508 child->kg_estcpu = kg->kg_estcpu;
509}
510
511void
512sched_fork_thread(struct thread *td, struct thread *child)
513{
514}
515
516void
517sched_nice(struct ksegrp *kg, int nice)
518{
519
520 PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
521 mtx_assert(&sched_lock, MA_OWNED);
522 kg->kg_nice = nice;
523 resetpriority(kg);
524}
525
526void
527sched_class(struct ksegrp *kg, int class)
528{
529 mtx_assert(&sched_lock, MA_OWNED);
530 kg->kg_pri_class = class;
531}
532
533/*
534 * Adjust the priority of a thread.
535 * This may include moving the thread within the KSEGRP,
536 * changing the assignment of a kse to the thread,
537 * and moving a KSE in the system run queue.
538 */
539void
540sched_prio(struct thread *td, u_char prio)
541{
542
543 mtx_assert(&sched_lock, MA_OWNED);
544 if (TD_ON_RUNQ(td)) {
545 adjustrunqueue(td, prio);
546 } else {
547 td->td_priority = prio;
548 }
549}
550
551void
552sched_sleep(struct thread *td, u_char prio)
553{
554
555 mtx_assert(&sched_lock, MA_OWNED);
556 td->td_ksegrp->kg_slptime = 0;
557 td->td_priority = prio;
558}
559
560void
561sched_switchin(struct thread *td)
562{
563
564 mtx_assert(&sched_lock, MA_OWNED);
565 td->td_oncpu = PCPU_GET(cpuid);
566}
567
568void
569sched_switchout(struct thread *td)
570{
571 struct kse *ke;
572 struct proc *p;
573
574 ke = td->td_kse;
575 p = td->td_proc;
576
577 mtx_assert(&sched_lock, MA_OWNED);
578 KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
579
580 td->td_lastcpu = td->td_oncpu;
581 td->td_last_kse = ke;
582 td->td_oncpu = NOCPU;
583 td->td_flags &= ~TDF_NEEDRESCHED;
584 /*
585 * At the last moment, if this thread is still marked RUNNING,
586 * then put it back on the run queue as it has not been suspended
587 * or stopped or any thing else similar.
588 */
589 if (TD_IS_RUNNING(td)) {
590 /* Put us back on the run queue (kse and all). */
591 setrunqueue(td);
592 } else if (p->p_flag & P_THREADED) {
593 /*
594 * We will not be on the run queue. So we must be
595 * sleeping or similar. As it's available,
596 * someone else can use the KSE if they need it.
597 */
598 kse_reassign(ke);
599 }
600}
601
602void
603sched_wakeup(struct thread *td)
604{
605 struct ksegrp *kg;
606
607 mtx_assert(&sched_lock, MA_OWNED);
608 kg = td->td_ksegrp;
609 if (kg->kg_slptime > 1)
610 updatepri(kg);
611 kg->kg_slptime = 0;
612 setrunqueue(td);
613 maybe_resched(td);
614}
615
616void
617sched_add(struct kse *ke)
618{
619 mtx_assert(&sched_lock, MA_OWNED);
620 KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
621 KASSERT((ke->ke_thread->td_kse != NULL),
622 ("runq_add: No KSE on thread"));
623 KASSERT(ke->ke_state != KES_ONRUNQ,
624 ("runq_add: kse %p (%s) already in run queue", ke,
625 ke->ke_proc->p_comm));
626 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
627 ("runq_add: process swapped out"));
628 ke->ke_ksegrp->kg_runq_kses++;
629 ke->ke_state = KES_ONRUNQ;
630
631 runq_add(&runq, ke);
632}
633
634void
635sched_rem(struct kse *ke)
636{
637 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
638 ("runq_remove: process swapped out"));
639 KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
640 mtx_assert(&sched_lock, MA_OWNED);
641
642 runq_remove(&runq, ke);
643 ke->ke_state = KES_THREAD;
644 ke->ke_ksegrp->kg_runq_kses--;
645}
646
647struct kse *
648sched_choose(void)
649{
650 struct kse *ke;
651
652 ke = runq_choose(&runq);
653
654 if (ke != NULL) {
655 runq_remove(&runq, ke);
656 ke->ke_state = KES_THREAD;
657
658 KASSERT((ke->ke_thread != NULL),
659 ("runq_choose: No thread on KSE"));
660 KASSERT((ke->ke_thread->td_kse != NULL),
661 ("runq_choose: No KSE on thread"));
662 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
663 ("runq_choose: process swapped out"));
664 }
665 return (ke);
666}
667
668void
669sched_userret(struct thread *td)
670{
671 struct ksegrp *kg;
672 /*
673 * XXX we cheat slightly on the locking here to avoid locking in
674 * the usual case. Setting td_priority here is essentially an
675 * incomplete workaround for not setting it properly elsewhere.
676 * Now that some interrupt handlers are threads, not setting it
677 * properly elsewhere can clobber it in the window between setting
678 * it here and returning to user mode, so don't waste time setting
679 * it perfectly here.
680 */
681 kg = td->td_ksegrp;
682 if (td->td_priority != kg->kg_user_pri) {
683 mtx_lock_spin(&sched_lock);
684 td->td_priority = kg->kg_user_pri;
685 mtx_unlock_spin(&sched_lock);
686 }
687}
688
689int
690sched_sizeof_kse(void)
691{
692 return (sizeof(struct kse) + sizeof(struct ke_sched));
693}
694int
695sched_sizeof_ksegrp(void)
696{
697 return (sizeof(struct ksegrp));
698}
699int
700sched_sizeof_proc(void)
701{
702 return (sizeof(struct proc));
703}
704int
705sched_sizeof_thread(void)
706{
707 return (sizeof(struct thread));
708}
709
710fixpt_t
711sched_pctcpu(struct kse *ke)
712{
713 return (ke->ke_pctcpu);
714}