sched_4bsd.c revision 107135
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * $FreeBSD: head/sys/kern/sched_4bsd.c 107135 2002-11-21 09:14:13Z jeff $
39 */
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/proc.h>
48#include <sys/resourcevar.h>
49#include <sys/sched.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <sys/sx.h>
53
54/*
55 * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
56 * the range 100-256 Hz (approximately).
57 */
58#define	ESTCPULIM(e) \
59    min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
60    RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
61#define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
62#define	NICE_WEIGHT		1	/* Priorities per nice level. */
63
64struct ke_sched *kse0_sched = NULL;
65struct kg_sched *ksegrp0_sched = NULL;
66struct p_sched *proc0_sched = NULL;
67struct td_sched *thread0_sched = NULL;
68
69static int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
70#define	SCHED_QUANTUM	(hz / 10);	/* Default sched quantum */
71
72static struct callout schedcpu_callout;
73static struct callout roundrobin_callout;
74
75static void	roundrobin(void *arg);
76static void	schedcpu(void *arg);
77static void	sched_setup(void *dummy);
78static void	maybe_resched(struct thread *td);
79static void	updatepri(struct ksegrp *kg);
80static void	resetpriority(struct ksegrp *kg);
81
82SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
83
84/*
85 * Global run queue.
86 */
87static struct runq runq;
88SYSINIT(runq, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, runq_init, &runq)
89
90static int
91sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
92{
93	int error, new_val;
94
95	new_val = sched_quantum * tick;
96	error = sysctl_handle_int(oidp, &new_val, 0, req);
97        if (error != 0 || req->newptr == NULL)
98		return (error);
99	if (new_val < tick)
100		return (EINVAL);
101	sched_quantum = new_val / tick;
102	hogticks = 2 * sched_quantum;
103	return (0);
104}
105
106SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
107	0, sizeof sched_quantum, sysctl_kern_quantum, "I",
108	"Roundrobin scheduling quantum in microseconds");
109
110/*
111 * Arrange to reschedule if necessary, taking the priorities and
112 * schedulers into account.
113 */
114static void
115maybe_resched(struct thread *td)
116{
117
118	mtx_assert(&sched_lock, MA_OWNED);
119	if (td->td_priority < curthread->td_priority)
120		curthread->td_kse->ke_flags |= KEF_NEEDRESCHED;
121}
122
123/*
124 * Force switch among equal priority processes every 100ms.
125 * We don't actually need to force a context switch of the current process.
126 * The act of firing the event triggers a context switch to softclock() and
127 * then switching back out again which is equivalent to a preemption, thus
128 * no further work is needed on the local CPU.
129 */
130/* ARGSUSED */
131static void
132roundrobin(void *arg)
133{
134
135#ifdef SMP
136	mtx_lock_spin(&sched_lock);
137	forward_roundrobin();
138	mtx_unlock_spin(&sched_lock);
139#endif
140
141	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
142}
143
144/*
145 * Constants for digital decay and forget:
146 *	90% of (p_estcpu) usage in 5 * loadav time
147 *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
148 *          Note that, as ps(1) mentions, this can let percentages
149 *          total over 100% (I've seen 137.9% for 3 processes).
150 *
151 * Note that schedclock() updates p_estcpu and p_cpticks asynchronously.
152 *
153 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
154 * That is, the system wants to compute a value of decay such
155 * that the following for loop:
156 * 	for (i = 0; i < (5 * loadavg); i++)
157 * 		p_estcpu *= decay;
158 * will compute
159 * 	p_estcpu *= 0.1;
160 * for all values of loadavg:
161 *
162 * Mathematically this loop can be expressed by saying:
163 * 	decay ** (5 * loadavg) ~= .1
164 *
165 * The system computes decay as:
166 * 	decay = (2 * loadavg) / (2 * loadavg + 1)
167 *
168 * We wish to prove that the system's computation of decay
169 * will always fulfill the equation:
170 * 	decay ** (5 * loadavg) ~= .1
171 *
172 * If we compute b as:
173 * 	b = 2 * loadavg
174 * then
175 * 	decay = b / (b + 1)
176 *
177 * We now need to prove two things:
178 *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
179 *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
180 *
181 * Facts:
182 *         For x close to zero, exp(x) =~ 1 + x, since
183 *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
184 *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
185 *         For x close to zero, ln(1+x) =~ x, since
186 *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
187 *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
188 *         ln(.1) =~ -2.30
189 *
190 * Proof of (1):
191 *    Solve (factor)**(power) =~ .1 given power (5*loadav):
192 *	solving for factor,
193 *      ln(factor) =~ (-2.30/5*loadav), or
194 *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
195 *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
196 *
197 * Proof of (2):
198 *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
199 *	solving for power,
200 *      power*ln(b/(b+1)) =~ -2.30, or
201 *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
202 *
203 * Actual power values for the implemented algorithm are as follows:
204 *      loadav: 1       2       3       4
205 *      power:  5.68    10.32   14.94   19.55
206 */
207
208/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
209#define	loadfactor(loadav)	(2 * (loadav))
210#define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
211
212/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
213static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
214SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
215
216/*
217 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
218 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
219 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
220 *
221 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
222 *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
223 *
224 * If you don't want to bother with the faster/more-accurate formula, you
225 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
226 * (more general) method of calculating the %age of CPU used by a process.
227 */
228#define	CCPU_SHIFT	11
229
230/*
231 * Recompute process priorities, every hz ticks.
232 * MP-safe, called without the Giant mutex.
233 */
234/* ARGSUSED */
235static void
236schedcpu(void *arg)
237{
238	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
239	struct thread *td;
240	struct proc *p;
241	struct kse *ke;
242	struct ksegrp *kg;
243	int realstathz;
244	int awake;
245
246	realstathz = stathz ? stathz : hz;
247	sx_slock(&allproc_lock);
248	FOREACH_PROC_IN_SYSTEM(p) {
249		mtx_lock_spin(&sched_lock);
250		p->p_swtime++;
251		FOREACH_KSEGRP_IN_PROC(p, kg) {
252			awake = 0;
253			FOREACH_KSE_IN_GROUP(kg, ke) {
254				/*
255				 * Increment time in/out of memory and sleep
256				 * time (if sleeping).  We ignore overflow;
257				 * with 16-bit int's (remember them?)
258				 * overflow takes 45 days.
259				 */
260				/*
261				 * The kse slptimes are not touched in wakeup
262				 * because the thread may not HAVE a KSE.
263				 */
264				if (ke->ke_state == KES_ONRUNQ) {
265					awake = 1;
266					ke->ke_flags &= ~KEF_DIDRUN;
267				} else if ((ke->ke_state == KES_THREAD) &&
268				    (TD_IS_RUNNING(ke->ke_thread))) {
269					awake = 1;
270					/* Do not clear KEF_DIDRUN */
271				} else if (ke->ke_flags & KEF_DIDRUN) {
272					awake = 1;
273					ke->ke_flags &= ~KEF_DIDRUN;
274				}
275
276				/*
277				 * pctcpu is only for ps?
278				 * Do it per kse.. and add them up at the end?
279				 * XXXKSE
280				 */
281				ke->ke_pctcpu
282				    = (ke->ke_pctcpu * ccpu) >> FSHIFT;
283				/*
284				 * If the kse has been idle the entire second,
285				 * stop recalculating its priority until
286				 * it wakes up.
287				 */
288				if (ke->ke_cpticks == 0)
289					continue;
290#if	(FSHIFT >= CCPU_SHIFT)
291				ke->ke_pctcpu += (realstathz == 100) ?
292				    ((fixpt_t) ke->ke_cpticks) <<
293				    (FSHIFT - CCPU_SHIFT) :
294				    100 * (((fixpt_t) ke->ke_cpticks) <<
295				    (FSHIFT - CCPU_SHIFT)) / realstathz;
296#else
297				ke->ke_pctcpu += ((FSCALE - ccpu) *
298				    (ke->ke_cpticks * FSCALE / realstathz)) >>
299				    FSHIFT;
300#endif
301				ke->ke_cpticks = 0;
302			} /* end of kse loop */
303			/*
304			 * If there are ANY running threads in this KSEGRP,
305			 * then don't count it as sleeping.
306			 */
307			if (awake) {
308				if (kg->kg_slptime > 1) {
309					/*
310					 * In an ideal world, this should not
311					 * happen, because whoever woke us
312					 * up from the long sleep should have
313					 * unwound the slptime and reset our
314					 * priority before we run at the stale
315					 * priority.  Should KASSERT at some
316					 * point when all the cases are fixed.
317					 */
318					updatepri(kg);
319				}
320				kg->kg_slptime = 0;
321			} else {
322				kg->kg_slptime++;
323			}
324			if (kg->kg_slptime > 1)
325				continue;
326			kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
327		      	resetpriority(kg);
328			FOREACH_THREAD_IN_GROUP(kg, td) {
329				if (td->td_priority >= PUSER) {
330					sched_prio(td, kg->kg_user_pri);
331				}
332			}
333		} /* end of ksegrp loop */
334		mtx_unlock_spin(&sched_lock);
335	} /* end of process loop */
336	sx_sunlock(&allproc_lock);
337	callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
338}
339
340/*
341 * Recalculate the priority of a process after it has slept for a while.
342 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
343 * least six times the loadfactor will decay p_estcpu to zero.
344 */
345static void
346updatepri(struct ksegrp *kg)
347{
348	register unsigned int newcpu;
349	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
350
351	newcpu = kg->kg_estcpu;
352	if (kg->kg_slptime > 5 * loadfac)
353		kg->kg_estcpu = 0;
354	else {
355		kg->kg_slptime--;	/* the first time was done in schedcpu */
356		while (newcpu && --kg->kg_slptime)
357			newcpu = decay_cpu(loadfac, newcpu);
358		kg->kg_estcpu = newcpu;
359	}
360	resetpriority(kg);
361}
362
363/*
364 * Compute the priority of a process when running in user mode.
365 * Arrange to reschedule if the resulting priority is better
366 * than that of the current process.
367 */
368static void
369resetpriority(struct ksegrp *kg)
370{
371	register unsigned int newpriority;
372	struct thread *td;
373
374	mtx_lock_spin(&sched_lock);
375	if (kg->kg_pri_class == PRI_TIMESHARE) {
376		newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
377		    NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
378		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
379		    PRI_MAX_TIMESHARE);
380		kg->kg_user_pri = newpriority;
381	}
382	FOREACH_THREAD_IN_GROUP(kg, td) {
383		maybe_resched(td);			/* XXXKSE silly */
384	}
385	mtx_unlock_spin(&sched_lock);
386}
387
388/* ARGSUSED */
389static void
390sched_setup(void *dummy)
391{
392	if (sched_quantum == 0)
393		sched_quantum = SCHED_QUANTUM;
394	hogticks = 2 * sched_quantum;
395
396	callout_init(&schedcpu_callout, 1);
397	callout_init(&roundrobin_callout, 0);
398
399	/* Kick off timeout driven events by calling first time. */
400	roundrobin(NULL);
401	schedcpu(NULL);
402}
403
404/* External interfaces start here */
405int
406sched_runnable(void)
407{
408        return runq_check(&runq);
409}
410
411int
412sched_rr_interval(void)
413{
414	if (sched_quantum == 0)
415		sched_quantum = SCHED_QUANTUM;
416	return (sched_quantum);
417}
418
419/*
420 * We adjust the priority of the current process.  The priority of
421 * a process gets worse as it accumulates CPU time.  The cpu usage
422 * estimator (p_estcpu) is increased here.  resetpriority() will
423 * compute a different priority each time p_estcpu increases by
424 * INVERSE_ESTCPU_WEIGHT
425 * (until MAXPRI is reached).  The cpu usage estimator ramps up
426 * quite quickly when the process is running (linearly), and decays
427 * away exponentially, at a rate which is proportionally slower when
428 * the system is busy.  The basic principle is that the system will
429 * 90% forget that the process used a lot of CPU time in 5 * loadav
430 * seconds.  This causes the system to favor processes which haven't
431 * run much recently, and to round-robin among other processes.
432 */
433void
434sched_clock(struct thread *td)
435{
436	struct kse *ke;
437	struct ksegrp *kg;
438
439	KASSERT((td != NULL), ("schedclock: null thread pointer"));
440	ke = td->td_kse;
441	kg = td->td_ksegrp;
442	ke->ke_cpticks++;
443	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
444	if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
445		resetpriority(kg);
446		if (td->td_priority >= PUSER)
447			td->td_priority = kg->kg_user_pri;
448	}
449}
450/*
451 * charge childs scheduling cpu usage to parent.
452 *
453 * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
454 * Charge it to the ksegrp that did the wait since process estcpu is sum of
455 * all ksegrps, this is strictly as expected.  Assume that the child process
456 * aggregated all the estcpu into the 'built-in' ksegrp.
457 */
458void
459sched_exit(struct ksegrp *kg, struct ksegrp *child)
460{
461	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + child->kg_estcpu);
462}
463
464void
465sched_fork(struct ksegrp *kg, struct ksegrp *child)
466{
467	/*
468	 * set priority of child to be that of parent.
469	 * XXXKSE this needs redefining..
470	 */
471	child->kg_estcpu = kg->kg_estcpu;
472}
473
474void
475sched_nice(struct ksegrp *kg, int nice)
476{
477	kg->kg_nice = nice;
478	resetpriority(kg);
479}
480
481/*
482 * Adjust the priority of a thread.
483 * This may include moving the thread within the KSEGRP,
484 * changing the assignment of a kse to the thread,
485 * and moving a KSE in the system run queue.
486 */
487void
488sched_prio(struct thread *td, u_char prio)
489{
490
491	if (TD_ON_RUNQ(td)) {
492		adjustrunqueue(td, prio);
493	} else {
494		td->td_priority = prio;
495	}
496}
497
498void
499sched_sleep(struct thread *td, u_char prio)
500{
501	td->td_ksegrp->kg_slptime = 0;
502	td->td_priority = prio;
503}
504
505void
506sched_switchin(struct thread *td)
507{
508	td->td_kse->ke_oncpu = PCPU_GET(cpuid);
509}
510
511void
512sched_switchout(struct thread *td)
513{
514	struct kse *ke;
515	struct proc *p;
516
517	ke = td->td_kse;
518	p = td->td_proc;
519
520	KASSERT((ke->ke_state == KES_THREAD), ("mi_switch: kse state?"));
521
522	td->td_lastcpu = ke->ke_oncpu;
523	td->td_last_kse = ke;
524	ke->ke_oncpu = NOCPU;
525	ke->ke_flags &= ~KEF_NEEDRESCHED;
526	/*
527	 * At the last moment, if this thread is still marked RUNNING,
528	 * then put it back on the run queue as it has not been suspended
529	 * or stopped or any thing else similar.
530	 */
531	if (TD_IS_RUNNING(td)) {
532		/* Put us back on the run queue (kse and all). */
533		setrunqueue(td);
534	} else if (p->p_flag & P_KSES) {
535		/*
536		 * We will not be on the run queue. So we must be
537		 * sleeping or similar. As it's available,
538		 * someone else can use the KSE if they need it.
539		 * (If bound LOANING can still occur).
540		 */
541		kse_reassign(ke);
542	}
543}
544
545void
546sched_wakeup(struct thread *td)
547{
548	struct ksegrp *kg;
549
550	kg = td->td_ksegrp;
551	if (kg->kg_slptime > 1)
552		updatepri(kg);
553	kg->kg_slptime = 0;
554	setrunqueue(td);
555	maybe_resched(td);
556}
557
558void
559sched_add(struct kse *ke)
560{
561	mtx_assert(&sched_lock, MA_OWNED);
562	KASSERT((ke->ke_thread != NULL), ("runq_add: No thread on KSE"));
563	KASSERT((ke->ke_thread->td_kse != NULL),
564	    ("runq_add: No KSE on thread"));
565	KASSERT(ke->ke_state != KES_ONRUNQ,
566	    ("runq_add: kse %p (%s) already in run queue", ke,
567	    ke->ke_proc->p_comm));
568	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
569	    ("runq_add: process swapped out"));
570	ke->ke_ksegrp->kg_runq_kses++;
571	ke->ke_state = KES_ONRUNQ;
572
573	runq_add(&runq, ke);
574}
575
576void
577sched_rem(struct kse *ke)
578{
579	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
580	    ("runq_remove: process swapped out"));
581	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
582	mtx_assert(&sched_lock, MA_OWNED);
583
584	runq_remove(&runq, ke);
585	ke->ke_state = KES_THREAD;
586	ke->ke_ksegrp->kg_runq_kses--;
587}
588
589struct kse *
590sched_choose(void)
591{
592	struct kse *ke;
593
594	ke = runq_choose(&runq);
595
596	if (ke != NULL) {
597		runq_remove(&runq, ke);
598		ke->ke_state = KES_THREAD;
599
600		KASSERT((ke->ke_thread != NULL),
601		    ("runq_choose: No thread on KSE"));
602		KASSERT((ke->ke_thread->td_kse != NULL),
603		    ("runq_choose: No KSE on thread"));
604		KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
605		    ("runq_choose: process swapped out"));
606	}
607	return (ke);
608}
609
610void
611sched_userret(struct thread *td)
612{
613	struct ksegrp *kg;
614	/*
615	 * XXX we cheat slightly on the locking here to avoid locking in
616	 * the usual case.  Setting td_priority here is essentially an
617	 * incomplete workaround for not setting it properly elsewhere.
618	 * Now that some interrupt handlers are threads, not setting it
619	 * properly elsewhere can clobber it in the window between setting
620	 * it here and returning to user mode, so don't waste time setting
621	 * it perfectly here.
622	 */
623	kg = td->td_ksegrp;
624	if (td->td_priority != kg->kg_user_pri) {
625		mtx_lock_spin(&sched_lock);
626		td->td_priority = kg->kg_user_pri;
627		mtx_unlock_spin(&sched_lock);
628	}
629}
630
631int
632sched_sizeof_kse(void)
633{
634	return (sizeof(struct kse));
635}
636int
637sched_sizeof_ksegrp(void)
638{
639	return (sizeof(struct ksegrp));
640}
641int
642sched_sizeof_proc(void)
643{
644	return (sizeof(struct proc));
645}
646int
647sched_sizeof_thread(void)
648{
649	return (sizeof(struct thread));
650}
651