sched_4bsd.c revision 145256
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/sched_4bsd.c 145256 2005-04-19 04:01:25Z jkoshy $");
37
38#define kse td_sched
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/kernel.h>
43#include <sys/ktr.h>
44#include <sys/lock.h>
45#include <sys/kthread.h>
46#include <sys/mutex.h>
47#include <sys/proc.h>
48#include <sys/resourcevar.h>
49#include <sys/sched.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <sys/sx.h>
53#include <sys/turnstile.h>
54#include <machine/smp.h>
55
56#ifdef HWPMC_HOOKS
57#include <sys/pmckern.h>
58#endif
59
60/*
61 * INVERSE_ESTCPU_WEIGHT is only suitable for statclock() frequencies in
62 * the range 100-256 Hz (approximately).
63 */
64#define	ESTCPULIM(e) \
65    min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
66    RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
67#ifdef SMP
68#define	INVERSE_ESTCPU_WEIGHT	(8 * smp_cpus)
69#else
70#define	INVERSE_ESTCPU_WEIGHT	8	/* 1 / (priorities per estcpu level). */
71#endif
72#define	NICE_WEIGHT		1	/* Priorities per nice level. */
73
74/*
75 * The schedulable entity that can be given a context to run.
76 * A process may have several of these. Probably one per processor
77 * but posibly a few more. In this universe they are grouped
78 * with a KSEG that contains the priority and niceness
79 * for the group.
80 */
81struct kse {
82	TAILQ_ENTRY(kse) ke_procq;	/* (j/z) Run queue. */
83	struct thread	*ke_thread;	/* (*) Active associated thread. */
84	fixpt_t		ke_pctcpu;	/* (j) %cpu during p_swtime. */
85	char		ke_rqindex;	/* (j) Run queue index. */
86	enum {
87		KES_THREAD = 0x0,	/* slaved to thread state */
88		KES_ONRUNQ
89	} ke_state;			/* (j) KSE status. */
90	int		ke_cpticks;	/* (j) Ticks of cpu time. */
91	struct runq	*ke_runq;	/* runq the kse is currently on */
92};
93
94#define ke_proc		ke_thread->td_proc
95#define ke_ksegrp	ke_thread->td_ksegrp
96
97#define td_kse td_sched
98
99/* flags kept in td_flags */
100#define TDF_DIDRUN	TDF_SCHED0	/* KSE actually ran. */
101#define TDF_EXIT	TDF_SCHED1	/* KSE is being killed. */
102#define TDF_BOUND	TDF_SCHED2
103
104#define ke_flags	ke_thread->td_flags
105#define KEF_DIDRUN	TDF_DIDRUN /* KSE actually ran. */
106#define KEF_EXIT	TDF_EXIT /* KSE is being killed. */
107#define KEF_BOUND	TDF_BOUND /* stuck to one CPU */
108
109#define SKE_RUNQ_PCPU(ke)						\
110    ((ke)->ke_runq != 0 && (ke)->ke_runq != &runq)
111
112struct kg_sched {
113	struct thread	*skg_last_assigned; /* (j) Last thread assigned to */
114					   /* the system scheduler. */
115	int	skg_avail_opennings;	/* (j) Num KSEs requested in group. */
116	int	skg_concurrency;	/* (j) Num KSEs requested in group. */
117};
118#define kg_last_assigned	kg_sched->skg_last_assigned
119#define kg_avail_opennings	kg_sched->skg_avail_opennings
120#define kg_concurrency		kg_sched->skg_concurrency
121
122#define SLOT_RELEASE(kg)						\
123do {									\
124	kg->kg_avail_opennings++; 					\
125	CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)",		\
126	kg,								\
127	kg->kg_concurrency,						\
128	 kg->kg_avail_opennings);					\
129/*	KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency),		\
130	    ("slots out of whack"));*/					\
131} while (0)
132
133#define SLOT_USE(kg)							\
134do {									\
135	kg->kg_avail_opennings--; 					\
136	CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)",			\
137	kg,								\
138	kg->kg_concurrency,						\
139	 kg->kg_avail_opennings);					\
140/*	KASSERT((kg->kg_avail_opennings >= 0),				\
141	    ("slots out of whack"));*/					\
142} while (0)
143
144/*
145 * KSE_CAN_MIGRATE macro returns true if the kse can migrate between
146 * cpus.
147 */
148#define KSE_CAN_MIGRATE(ke)						\
149    ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
150
151static struct kse kse0;
152static struct kg_sched kg_sched0;
153
154static int	sched_tdcnt;	/* Total runnable threads in the system. */
155static int	sched_quantum;	/* Roundrobin scheduling quantum in ticks. */
156#define	SCHED_QUANTUM	(hz / 10)	/* Default sched quantum */
157
158static struct callout roundrobin_callout;
159
160static void	slot_fill(struct ksegrp *kg);
161static struct kse *sched_choose(void);		/* XXX Should be thread * */
162
163static void	setup_runqs(void);
164static void	roundrobin(void *arg);
165static void	schedcpu(void);
166static void	schedcpu_thread(void);
167static void	sched_priority(struct thread *td, u_char prio);
168static void	sched_setup(void *dummy);
169static void	maybe_resched(struct thread *td);
170static void	updatepri(struct ksegrp *kg);
171static void	resetpriority(struct ksegrp *kg);
172static void	resetpriority_thread(struct thread *td, struct ksegrp *kg);
173#ifdef SMP
174static int	forward_wakeup(int  cpunum);
175#endif
176
177static struct kproc_desc sched_kp = {
178        "schedcpu",
179        schedcpu_thread,
180        NULL
181};
182SYSINIT(schedcpu, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, kproc_start, &sched_kp)
183SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
184
185/*
186 * Global run queue.
187 */
188static struct runq runq;
189
190#ifdef SMP
191/*
192 * Per-CPU run queues
193 */
194static struct runq runq_pcpu[MAXCPU];
195#endif
196
197static void
198setup_runqs(void)
199{
200#ifdef SMP
201	int i;
202
203	for (i = 0; i < MAXCPU; ++i)
204		runq_init(&runq_pcpu[i]);
205#endif
206
207	runq_init(&runq);
208}
209
210static int
211sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
212{
213	int error, new_val;
214
215	new_val = sched_quantum * tick;
216	error = sysctl_handle_int(oidp, &new_val, 0, req);
217        if (error != 0 || req->newptr == NULL)
218		return (error);
219	if (new_val < tick)
220		return (EINVAL);
221	sched_quantum = new_val / tick;
222	hogticks = 2 * sched_quantum;
223	return (0);
224}
225
226SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RD, 0, "Scheduler");
227
228SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "4BSD", 0,
229    "Scheduler name");
230
231SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
232    0, sizeof sched_quantum, sysctl_kern_quantum, "I",
233    "Roundrobin scheduling quantum in microseconds");
234
235#ifdef SMP
236/* Enable forwarding of wakeups to all other cpus */
237SYSCTL_NODE(_kern_sched, OID_AUTO, ipiwakeup, CTLFLAG_RD, NULL, "Kernel SMP");
238
239static int forward_wakeup_enabled = 1;
240SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, enabled, CTLFLAG_RW,
241	   &forward_wakeup_enabled, 0,
242	   "Forwarding of wakeup to idle CPUs");
243
244static int forward_wakeups_requested = 0;
245SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, requested, CTLFLAG_RD,
246	   &forward_wakeups_requested, 0,
247	   "Requests for Forwarding of wakeup to idle CPUs");
248
249static int forward_wakeups_delivered = 0;
250SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, delivered, CTLFLAG_RD,
251	   &forward_wakeups_delivered, 0,
252	   "Completed Forwarding of wakeup to idle CPUs");
253
254static int forward_wakeup_use_mask = 1;
255SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, usemask, CTLFLAG_RW,
256	   &forward_wakeup_use_mask, 0,
257	   "Use the mask of idle cpus");
258
259static int forward_wakeup_use_loop = 0;
260SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, useloop, CTLFLAG_RW,
261	   &forward_wakeup_use_loop, 0,
262	   "Use a loop to find idle cpus");
263
264static int forward_wakeup_use_single = 0;
265SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, onecpu, CTLFLAG_RW,
266	   &forward_wakeup_use_single, 0,
267	   "Only signal one idle cpu");
268
269static int forward_wakeup_use_htt = 0;
270SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
271	   &forward_wakeup_use_htt, 0,
272	   "account for htt");
273
274#endif
275static int sched_followon = 0;
276SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
277	   &sched_followon, 0,
278	   "allow threads to share a quantum");
279
280static int sched_pfollowons = 0;
281SYSCTL_INT(_kern_sched, OID_AUTO, pfollowons, CTLFLAG_RD,
282	   &sched_pfollowons, 0,
283	   "number of followons done to a different ksegrp");
284
285static int sched_kgfollowons = 0;
286SYSCTL_INT(_kern_sched, OID_AUTO, kgfollowons, CTLFLAG_RD,
287	   &sched_kgfollowons, 0,
288	   "number of followons done in a ksegrp");
289
290static __inline void
291sched_load_add(void)
292{
293	sched_tdcnt++;
294	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
295}
296
297static __inline void
298sched_load_rem(void)
299{
300	sched_tdcnt--;
301	CTR1(KTR_SCHED, "global load: %d", sched_tdcnt);
302}
303/*
304 * Arrange to reschedule if necessary, taking the priorities and
305 * schedulers into account.
306 */
307static void
308maybe_resched(struct thread *td)
309{
310
311	mtx_assert(&sched_lock, MA_OWNED);
312	if (td->td_priority < curthread->td_priority)
313		curthread->td_flags |= TDF_NEEDRESCHED;
314}
315
316/*
317 * Force switch among equal priority processes every 100ms.
318 * We don't actually need to force a context switch of the current process.
319 * The act of firing the event triggers a context switch to softclock() and
320 * then switching back out again which is equivalent to a preemption, thus
321 * no further work is needed on the local CPU.
322 */
323/* ARGSUSED */
324static void
325roundrobin(void *arg)
326{
327
328#ifdef SMP
329	mtx_lock_spin(&sched_lock);
330	forward_roundrobin();
331	mtx_unlock_spin(&sched_lock);
332#endif
333
334	callout_reset(&roundrobin_callout, sched_quantum, roundrobin, NULL);
335}
336
337/*
338 * Constants for digital decay and forget:
339 *	90% of (kg_estcpu) usage in 5 * loadav time
340 *	95% of (ke_pctcpu) usage in 60 seconds (load insensitive)
341 *          Note that, as ps(1) mentions, this can let percentages
342 *          total over 100% (I've seen 137.9% for 3 processes).
343 *
344 * Note that schedclock() updates kg_estcpu and p_cpticks asynchronously.
345 *
346 * We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds.
347 * That is, the system wants to compute a value of decay such
348 * that the following for loop:
349 * 	for (i = 0; i < (5 * loadavg); i++)
350 * 		kg_estcpu *= decay;
351 * will compute
352 * 	kg_estcpu *= 0.1;
353 * for all values of loadavg:
354 *
355 * Mathematically this loop can be expressed by saying:
356 * 	decay ** (5 * loadavg) ~= .1
357 *
358 * The system computes decay as:
359 * 	decay = (2 * loadavg) / (2 * loadavg + 1)
360 *
361 * We wish to prove that the system's computation of decay
362 * will always fulfill the equation:
363 * 	decay ** (5 * loadavg) ~= .1
364 *
365 * If we compute b as:
366 * 	b = 2 * loadavg
367 * then
368 * 	decay = b / (b + 1)
369 *
370 * We now need to prove two things:
371 *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
372 *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
373 *
374 * Facts:
375 *         For x close to zero, exp(x) =~ 1 + x, since
376 *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
377 *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
378 *         For x close to zero, ln(1+x) =~ x, since
379 *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
380 *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
381 *         ln(.1) =~ -2.30
382 *
383 * Proof of (1):
384 *    Solve (factor)**(power) =~ .1 given power (5*loadav):
385 *	solving for factor,
386 *      ln(factor) =~ (-2.30/5*loadav), or
387 *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
388 *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
389 *
390 * Proof of (2):
391 *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
392 *	solving for power,
393 *      power*ln(b/(b+1)) =~ -2.30, or
394 *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
395 *
396 * Actual power values for the implemented algorithm are as follows:
397 *      loadav: 1       2       3       4
398 *      power:  5.68    10.32   14.94   19.55
399 */
400
401/* calculations for digital decay to forget 90% of usage in 5*loadav sec */
402#define	loadfactor(loadav)	(2 * (loadav))
403#define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
404
405/* decay 95% of `ke_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
406static fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;	/* exp(-1/20) */
407SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
408
409/*
410 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
411 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
412 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
413 *
414 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
415 *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
416 *
417 * If you don't want to bother with the faster/more-accurate formula, you
418 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
419 * (more general) method of calculating the %age of CPU used by a process.
420 */
421#define	CCPU_SHIFT	11
422
423/*
424 * Recompute process priorities, every hz ticks.
425 * MP-safe, called without the Giant mutex.
426 */
427/* ARGSUSED */
428static void
429schedcpu(void)
430{
431	register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
432	struct thread *td;
433	struct proc *p;
434	struct kse *ke;
435	struct ksegrp *kg;
436	int awake, realstathz;
437
438	realstathz = stathz ? stathz : hz;
439	sx_slock(&allproc_lock);
440	FOREACH_PROC_IN_SYSTEM(p) {
441		/*
442		 * Prevent state changes and protect run queue.
443		 */
444		mtx_lock_spin(&sched_lock);
445		/*
446		 * Increment time in/out of memory.  We ignore overflow; with
447		 * 16-bit int's (remember them?) overflow takes 45 days.
448		 */
449		p->p_swtime++;
450		FOREACH_KSEGRP_IN_PROC(p, kg) {
451			awake = 0;
452			FOREACH_THREAD_IN_GROUP(kg, td) {
453				ke = td->td_kse;
454				/*
455				 * Increment sleep time (if sleeping).  We
456				 * ignore overflow, as above.
457				 */
458				/*
459				 * The kse slptimes are not touched in wakeup
460				 * because the thread may not HAVE a KSE.
461				 */
462				if (ke->ke_state == KES_ONRUNQ) {
463					awake = 1;
464					ke->ke_flags &= ~KEF_DIDRUN;
465				} else if ((ke->ke_state == KES_THREAD) &&
466				    (TD_IS_RUNNING(td))) {
467					awake = 1;
468					/* Do not clear KEF_DIDRUN */
469				} else if (ke->ke_flags & KEF_DIDRUN) {
470					awake = 1;
471					ke->ke_flags &= ~KEF_DIDRUN;
472				}
473
474				/*
475				 * ke_pctcpu is only for ps and ttyinfo().
476				 * Do it per kse, and add them up at the end?
477				 * XXXKSE
478				 */
479				ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
480				    FSHIFT;
481				/*
482				 * If the kse has been idle the entire second,
483				 * stop recalculating its priority until
484				 * it wakes up.
485				 */
486				if (ke->ke_cpticks == 0)
487					continue;
488#if	(FSHIFT >= CCPU_SHIFT)
489				ke->ke_pctcpu += (realstathz == 100)
490				    ? ((fixpt_t) ke->ke_cpticks) <<
491				    (FSHIFT - CCPU_SHIFT) :
492				    100 * (((fixpt_t) ke->ke_cpticks)
493				    << (FSHIFT - CCPU_SHIFT)) / realstathz;
494#else
495				ke->ke_pctcpu += ((FSCALE - ccpu) *
496				    (ke->ke_cpticks *
497				    FSCALE / realstathz)) >> FSHIFT;
498#endif
499				ke->ke_cpticks = 0;
500			} /* end of kse loop */
501			/*
502			 * If there are ANY running threads in this KSEGRP,
503			 * then don't count it as sleeping.
504			 */
505			if (awake) {
506				if (kg->kg_slptime > 1) {
507					/*
508					 * In an ideal world, this should not
509					 * happen, because whoever woke us
510					 * up from the long sleep should have
511					 * unwound the slptime and reset our
512					 * priority before we run at the stale
513					 * priority.  Should KASSERT at some
514					 * point when all the cases are fixed.
515					 */
516					updatepri(kg);
517				}
518				kg->kg_slptime = 0;
519			} else
520				kg->kg_slptime++;
521			if (kg->kg_slptime > 1)
522				continue;
523			kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu);
524		      	resetpriority(kg);
525			FOREACH_THREAD_IN_GROUP(kg, td) {
526				resetpriority_thread(td, kg);
527			}
528		} /* end of ksegrp loop */
529		mtx_unlock_spin(&sched_lock);
530	} /* end of process loop */
531	sx_sunlock(&allproc_lock);
532}
533
534/*
535 * Main loop for a kthread that executes schedcpu once a second.
536 */
537static void
538schedcpu_thread(void)
539{
540	int nowake;
541
542	for (;;) {
543		schedcpu();
544		tsleep(&nowake, curthread->td_priority, "-", hz);
545	}
546}
547
548/*
549 * Recalculate the priority of a process after it has slept for a while.
550 * For all load averages >= 1 and max kg_estcpu of 255, sleeping for at
551 * least six times the loadfactor will decay kg_estcpu to zero.
552 */
553static void
554updatepri(struct ksegrp *kg)
555{
556	register fixpt_t loadfac;
557	register unsigned int newcpu;
558
559	loadfac = loadfactor(averunnable.ldavg[0]);
560	if (kg->kg_slptime > 5 * loadfac)
561		kg->kg_estcpu = 0;
562	else {
563		newcpu = kg->kg_estcpu;
564		kg->kg_slptime--;	/* was incremented in schedcpu() */
565		while (newcpu && --kg->kg_slptime)
566			newcpu = decay_cpu(loadfac, newcpu);
567		kg->kg_estcpu = newcpu;
568	}
569}
570
571/*
572 * Compute the priority of a process when running in user mode.
573 * Arrange to reschedule if the resulting priority is better
574 * than that of the current process.
575 */
576static void
577resetpriority(struct ksegrp *kg)
578{
579	register unsigned int newpriority;
580
581	if (kg->kg_pri_class == PRI_TIMESHARE) {
582		newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
583		    NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN);
584		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
585		    PRI_MAX_TIMESHARE);
586		kg->kg_user_pri = newpriority;
587	}
588}
589
590/*
591 * Update the thread's priority when the associated ksegroup's user
592 * priority changes.
593 */
594static void
595resetpriority_thread(struct thread *td, struct ksegrp *kg)
596{
597
598	/* Only change threads with a time sharing user priority. */
599	if (td->td_priority < PRI_MIN_TIMESHARE ||
600	    td->td_priority > PRI_MAX_TIMESHARE)
601		return;
602
603	/* XXX the whole needresched thing is broken, but not silly. */
604	maybe_resched(td);
605
606	sched_prio(td, kg->kg_user_pri);
607}
608
609/* ARGSUSED */
610static void
611sched_setup(void *dummy)
612{
613	setup_runqs();
614
615	if (sched_quantum == 0)
616		sched_quantum = SCHED_QUANTUM;
617	hogticks = 2 * sched_quantum;
618
619	callout_init(&roundrobin_callout, CALLOUT_MPSAFE);
620
621	/* Kick off timeout driven events by calling first time. */
622	roundrobin(NULL);
623
624	/* Account for thread0. */
625	sched_load_add();
626}
627
628/* External interfaces start here */
629/*
630 * Very early in the boot some setup of scheduler-specific
631 * parts of proc0 and of some scheduler resources needs to be done.
632 * Called from:
633 *  proc0_init()
634 */
635void
636schedinit(void)
637{
638	/*
639	 * Set up the scheduler specific parts of proc0.
640	 */
641	proc0.p_sched = NULL; /* XXX */
642	ksegrp0.kg_sched = &kg_sched0;
643	thread0.td_sched = &kse0;
644	kse0.ke_thread = &thread0;
645	kse0.ke_state = KES_THREAD;
646	kg_sched0.skg_concurrency = 1;
647	kg_sched0.skg_avail_opennings = 0; /* we are already running */
648}
649
650int
651sched_runnable(void)
652{
653#ifdef SMP
654	return runq_check(&runq) + runq_check(&runq_pcpu[PCPU_GET(cpuid)]);
655#else
656	return runq_check(&runq);
657#endif
658}
659
660int
661sched_rr_interval(void)
662{
663	if (sched_quantum == 0)
664		sched_quantum = SCHED_QUANTUM;
665	return (sched_quantum);
666}
667
668/*
669 * We adjust the priority of the current process.  The priority of
670 * a process gets worse as it accumulates CPU time.  The cpu usage
671 * estimator (kg_estcpu) is increased here.  resetpriority() will
672 * compute a different priority each time kg_estcpu increases by
673 * INVERSE_ESTCPU_WEIGHT
674 * (until MAXPRI is reached).  The cpu usage estimator ramps up
675 * quite quickly when the process is running (linearly), and decays
676 * away exponentially, at a rate which is proportionally slower when
677 * the system is busy.  The basic principle is that the system will
678 * 90% forget that the process used a lot of CPU time in 5 * loadav
679 * seconds.  This causes the system to favor processes which haven't
680 * run much recently, and to round-robin among other processes.
681 */
682void
683sched_clock(struct thread *td)
684{
685	struct ksegrp *kg;
686	struct kse *ke;
687
688	mtx_assert(&sched_lock, MA_OWNED);
689	kg = td->td_ksegrp;
690	ke = td->td_kse;
691
692	ke->ke_cpticks++;
693	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
694	if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
695		resetpriority(kg);
696		resetpriority_thread(td, kg);
697	}
698}
699
700/*
701 * charge childs scheduling cpu usage to parent.
702 *
703 * XXXKSE assume only one thread & kse & ksegrp keep estcpu in each ksegrp.
704 * Charge it to the ksegrp that did the wait since process estcpu is sum of
705 * all ksegrps, this is strictly as expected.  Assume that the child process
706 * aggregated all the estcpu into the 'built-in' ksegrp.
707 */
708void
709sched_exit(struct proc *p, struct thread *td)
710{
711	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
712	sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
713}
714
715void
716sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd)
717{
718
719	mtx_assert(&sched_lock, MA_OWNED);
720	kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + childtd->td_ksegrp->kg_estcpu);
721}
722
723void
724sched_exit_thread(struct thread *td, struct thread *child)
725{
726	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
727	    child, child->td_proc->p_comm, child->td_priority);
728	if ((child->td_proc->p_flag & P_NOLOAD) == 0)
729		sched_load_rem();
730}
731
732void
733sched_fork(struct thread *td, struct thread *childtd)
734{
735	sched_fork_ksegrp(td, childtd->td_ksegrp);
736	sched_fork_thread(td, childtd);
737}
738
739void
740sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
741{
742	mtx_assert(&sched_lock, MA_OWNED);
743	child->kg_estcpu = td->td_ksegrp->kg_estcpu;
744}
745
746void
747sched_fork_thread(struct thread *td, struct thread *childtd)
748{
749	sched_newthread(childtd);
750}
751
752void
753sched_nice(struct proc *p, int nice)
754{
755	struct ksegrp *kg;
756	struct thread *td;
757
758	PROC_LOCK_ASSERT(p, MA_OWNED);
759	mtx_assert(&sched_lock, MA_OWNED);
760	p->p_nice = nice;
761	FOREACH_KSEGRP_IN_PROC(p, kg) {
762		resetpriority(kg);
763		FOREACH_THREAD_IN_GROUP(kg, td) {
764			resetpriority_thread(td, kg);
765		}
766	}
767}
768
769void
770sched_class(struct ksegrp *kg, int class)
771{
772	mtx_assert(&sched_lock, MA_OWNED);
773	kg->kg_pri_class = class;
774}
775
776/*
777 * Adjust the priority of a thread.
778 * This may include moving the thread within the KSEGRP,
779 * changing the assignment of a kse to the thread,
780 * and moving a KSE in the system run queue.
781 */
782static void
783sched_priority(struct thread *td, u_char prio)
784{
785	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
786	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
787	    curthread->td_proc->p_comm);
788
789	mtx_assert(&sched_lock, MA_OWNED);
790	if (td->td_priority == prio)
791		return;
792	if (TD_ON_RUNQ(td)) {
793		adjustrunqueue(td, prio);
794	} else {
795		td->td_priority = prio;
796	}
797}
798
799/*
800 * Update a thread's priority when it is lent another thread's
801 * priority.
802 */
803void
804sched_lend_prio(struct thread *td, u_char prio)
805{
806
807	td->td_flags |= TDF_BORROWING;
808	sched_priority(td, prio);
809}
810
811/*
812 * Restore a thread's priority when priority propagation is
813 * over.  The prio argument is the minimum priority the thread
814 * needs to have to satisfy other possible priority lending
815 * requests.  If the thread's regulary priority is less
816 * important than prio the thread will keep a priority boost
817 * of prio.
818 */
819void
820sched_unlend_prio(struct thread *td, u_char prio)
821{
822	u_char base_pri;
823
824	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
825	    td->td_base_pri <= PRI_MAX_TIMESHARE)
826		base_pri = td->td_ksegrp->kg_user_pri;
827	else
828		base_pri = td->td_base_pri;
829	if (prio >= base_pri) {
830		td->td_flags &= ~TDF_BORROWING;
831		sched_prio(td, base_pri);
832	} else
833		sched_lend_prio(td, prio);
834}
835
836void
837sched_prio(struct thread *td, u_char prio)
838{
839	u_char oldprio;
840
841	/* First, update the base priority. */
842	td->td_base_pri = prio;
843
844	/*
845	 * If the thread is borrowing another thread's priority, don't ever
846	 * lower the priority.
847	 */
848	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
849		return;
850
851	/* Change the real priority. */
852	oldprio = td->td_priority;
853	sched_priority(td, prio);
854
855	/*
856	 * If the thread is on a turnstile, then let the turnstile update
857	 * its state.
858	 */
859	if (TD_ON_LOCK(td) && oldprio != prio)
860		turnstile_adjust(td, oldprio);
861}
862
863void
864sched_sleep(struct thread *td)
865{
866
867	mtx_assert(&sched_lock, MA_OWNED);
868	td->td_ksegrp->kg_slptime = 0;
869}
870
871static void remrunqueue(struct thread *td);
872
873void
874sched_switch(struct thread *td, struct thread *newtd, int flags)
875{
876	struct kse *ke;
877	struct ksegrp *kg;
878	struct proc *p;
879
880	ke = td->td_kse;
881	p = td->td_proc;
882
883	mtx_assert(&sched_lock, MA_OWNED);
884
885	if ((p->p_flag & P_NOLOAD) == 0)
886		sched_load_rem();
887	/*
888	 * We are volunteering to switch out so we get to nominate
889	 * a successor for the rest of our quantum
890	 * First try another thread in our ksegrp, and then look for
891	 * other ksegrps in our process.
892	 */
893	if (sched_followon &&
894	    (p->p_flag & P_HADTHREADS) &&
895	    (flags & SW_VOL) &&
896	    newtd == NULL) {
897		/* lets schedule another thread from this process */
898		 kg = td->td_ksegrp;
899		 if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
900			remrunqueue(newtd);
901			sched_kgfollowons++;
902		 } else {
903			FOREACH_KSEGRP_IN_PROC(p, kg) {
904				if ((newtd = TAILQ_FIRST(&kg->kg_runq))) {
905					sched_pfollowons++;
906					remrunqueue(newtd);
907					break;
908				}
909			}
910		}
911	}
912
913	if (newtd)
914		newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
915
916	td->td_lastcpu = td->td_oncpu;
917	td->td_flags &= ~TDF_NEEDRESCHED;
918	td->td_owepreempt = 0;
919	td->td_oncpu = NOCPU;
920	/*
921	 * At the last moment, if this thread is still marked RUNNING,
922	 * then put it back on the run queue as it has not been suspended
923	 * or stopped or any thing else similar.  We never put the idle
924	 * threads on the run queue, however.
925	 */
926	if (td == PCPU_GET(idlethread))
927		TD_SET_CAN_RUN(td);
928	else {
929		SLOT_RELEASE(td->td_ksegrp);
930		if (TD_IS_RUNNING(td)) {
931			/* Put us back on the run queue (kse and all). */
932			setrunqueue(td, (flags & SW_PREEMPT) ?
933			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
934			    SRQ_OURSELF|SRQ_YIELDING);
935		} else if (p->p_flag & P_HADTHREADS) {
936			/*
937			 * We will not be on the run queue. So we must be
938			 * sleeping or similar. As it's available,
939			 * someone else can use the KSE if they need it.
940			 * It's NOT available if we are about to need it
941			 */
942			if (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)
943				slot_fill(td->td_ksegrp);
944		}
945	}
946	if (newtd) {
947		/*
948		 * The thread we are about to run needs to be counted
949		 * as if it had been added to the run queue and selected.
950		 * It came from:
951		 * * A preemption
952		 * * An upcall
953		 * * A followon
954		 */
955		KASSERT((newtd->td_inhibitors == 0),
956			("trying to run inhibitted thread"));
957		SLOT_USE(newtd->td_ksegrp);
958		newtd->td_kse->ke_flags |= KEF_DIDRUN;
959        	TD_SET_RUNNING(newtd);
960		if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
961			sched_load_add();
962	} else {
963		newtd = choosethread();
964	}
965
966	if (td != newtd) {
967#ifdef	HWPMC_HOOKS
968		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
969			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
970#endif
971		cpu_switch(td, newtd);
972#ifdef	HWPMC_HOOKS
973		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
974			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
975#endif
976	}
977
978	sched_lock.mtx_lock = (uintptr_t)td;
979	td->td_oncpu = PCPU_GET(cpuid);
980}
981
982void
983sched_wakeup(struct thread *td)
984{
985	struct ksegrp *kg;
986
987	mtx_assert(&sched_lock, MA_OWNED);
988	kg = td->td_ksegrp;
989	if (kg->kg_slptime > 1) {
990		updatepri(kg);
991		resetpriority(kg);
992	}
993	kg->kg_slptime = 0;
994	setrunqueue(td, SRQ_BORING);
995}
996
997#ifdef SMP
998/* enable HTT_2 if you have a 2-way HTT cpu.*/
999static int
1000forward_wakeup(int  cpunum)
1001{
1002	cpumask_t map, me, dontuse;
1003	cpumask_t map2;
1004	struct pcpu *pc;
1005	cpumask_t id, map3;
1006
1007	mtx_assert(&sched_lock, MA_OWNED);
1008
1009	CTR0(KTR_RUNQ, "forward_wakeup()");
1010
1011	if ((!forward_wakeup_enabled) ||
1012	     (forward_wakeup_use_mask == 0 && forward_wakeup_use_loop == 0))
1013		return (0);
1014	if (!smp_started || cold || panicstr)
1015		return (0);
1016
1017	forward_wakeups_requested++;
1018
1019/*
1020 * check the idle mask we received against what we calculated before
1021 * in the old version.
1022 */
1023	me = PCPU_GET(cpumask);
1024	/*
1025	 * don't bother if we should be doing it ourself..
1026	 */
1027	if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
1028		return (0);
1029
1030	dontuse = me | stopped_cpus | hlt_cpus_mask;
1031	map3 = 0;
1032	if (forward_wakeup_use_loop) {
1033		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
1034			id = pc->pc_cpumask;
1035			if ( (id & dontuse) == 0 &&
1036			    pc->pc_curthread == pc->pc_idlethread) {
1037				map3 |= id;
1038			}
1039		}
1040	}
1041
1042	if (forward_wakeup_use_mask) {
1043		map = 0;
1044		map = idle_cpus_mask & ~dontuse;
1045
1046		/* If they are both on, compare and use loop if different */
1047		if (forward_wakeup_use_loop) {
1048			if (map != map3) {
1049				printf("map (%02X) != map3 (%02X)\n",
1050						map, map3);
1051				map = map3;
1052			}
1053		}
1054	} else {
1055		map = map3;
1056	}
1057	/* If we only allow a specific CPU, then mask off all the others */
1058	if (cpunum != NOCPU) {
1059		KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
1060		map &= (1 << cpunum);
1061	} else {
1062		/* Try choose an idle die. */
1063		if (forward_wakeup_use_htt) {
1064			map2 =  (map & (map >> 1)) & 0x5555;
1065			if (map2) {
1066				map = map2;
1067			}
1068		}
1069
1070		/* set only one bit */
1071		if (forward_wakeup_use_single) {
1072			map = map & ((~map) + 1);
1073		}
1074	}
1075	if (map) {
1076		forward_wakeups_delivered++;
1077		ipi_selected(map, IPI_AST);
1078		return (1);
1079	}
1080	if (cpunum == NOCPU)
1081		printf("forward_wakeup: Idle processor not found\n");
1082	return (0);
1083}
1084#endif
1085
1086void
1087sched_add(struct thread *td, int flags)
1088{
1089	struct kse *ke;
1090#ifdef SMP
1091	int forwarded = 0;
1092	int cpu;
1093#endif
1094
1095	ke = td->td_kse;
1096	mtx_assert(&sched_lock, MA_OWNED);
1097	KASSERT(ke->ke_state != KES_ONRUNQ,
1098	    ("sched_add: kse %p (%s) already in run queue", ke,
1099	    ke->ke_proc->p_comm));
1100	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1101	    ("sched_add: process swapped out"));
1102	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1103	    td, td->td_proc->p_comm, td->td_priority, curthread,
1104	    curthread->td_proc->p_comm);
1105
1106#ifdef SMP
1107	if (KSE_CAN_MIGRATE(ke)) {
1108		CTR2(KTR_RUNQ,
1109		    "sched_add: adding kse:%p (td:%p) to gbl runq", ke, td);
1110		cpu = NOCPU;
1111		ke->ke_runq = &runq;
1112	} else {
1113		if (!SKE_RUNQ_PCPU(ke))
1114			ke->ke_runq = &runq_pcpu[(cpu = PCPU_GET(cpuid))];
1115		else
1116			cpu = td->td_lastcpu;
1117		CTR3(KTR_RUNQ,
1118		    "sched_add: Put kse:%p(td:%p) on cpu%d runq", ke, td, cpu);
1119	}
1120#else
1121	CTR2(KTR_RUNQ, "sched_add: adding kse:%p (td:%p) to runq", ke, td);
1122	ke->ke_runq = &runq;
1123
1124#endif
1125	/*
1126	 * If we are yielding (on the way out anyhow)
1127	 * or the thread being saved is US,
1128	 * then don't try be smart about preemption
1129	 * or kicking off another CPU
1130	 * as it won't help and may hinder.
1131	 * In the YIEDLING case, we are about to run whoever is
1132	 * being put in the queue anyhow, and in the
1133	 * OURSELF case, we are puting ourself on the run queue
1134	 * which also only happens when we are about to yield.
1135	 */
1136	if((flags & SRQ_YIELDING) == 0) {
1137#ifdef SMP
1138		cpumask_t me = PCPU_GET(cpumask);
1139		int idle = idle_cpus_mask & me;
1140		/*
1141		 * Only try to kick off another CPU if
1142		 * the thread is unpinned
1143		 * or pinned to another cpu,
1144		 * and there are other available and idle CPUs.
1145		 * if we are idle, or it's an interrupt,
1146		 * then skip straight to preemption.
1147		 */
1148		if ( (! idle) && ((flags & SRQ_INTR) == 0) &&
1149		    (idle_cpus_mask & ~(hlt_cpus_mask | me)) &&
1150		    ( KSE_CAN_MIGRATE(ke) ||
1151		      ke->ke_runq != &runq_pcpu[PCPU_GET(cpuid)])) {
1152			forwarded = forward_wakeup(cpu);
1153		}
1154		/*
1155		 * If we failed to kick off another cpu, then look to
1156		 * see if we should preempt this CPU. Only allow this
1157		 * if it is not pinned or IS pinned to this CPU.
1158		 * If we are the idle thread, we also try do preempt.
1159		 * as it will be quicker and being idle, we won't
1160		 * lose in doing so..
1161		 */
1162		if ((!forwarded) &&
1163		    (ke->ke_runq == &runq ||
1164		     ke->ke_runq == &runq_pcpu[PCPU_GET(cpuid)]))
1165#endif
1166
1167		{
1168			if (maybe_preempt(td))
1169				return;
1170		}
1171	}
1172	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1173		sched_load_add();
1174	SLOT_USE(td->td_ksegrp);
1175	runq_add(ke->ke_runq, ke, flags);
1176	ke->ke_state = KES_ONRUNQ;
1177	maybe_resched(td);
1178}
1179
1180void
1181sched_rem(struct thread *td)
1182{
1183	struct kse *ke;
1184
1185	ke = td->td_kse;
1186	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1187	    ("sched_rem: process swapped out"));
1188	KASSERT((ke->ke_state == KES_ONRUNQ),
1189	    ("sched_rem: KSE not on run queue"));
1190	mtx_assert(&sched_lock, MA_OWNED);
1191	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1192	    td, td->td_proc->p_comm, td->td_priority, curthread,
1193	    curthread->td_proc->p_comm);
1194
1195	if ((td->td_proc->p_flag & P_NOLOAD) == 0)
1196		sched_load_rem();
1197	SLOT_RELEASE(td->td_ksegrp);
1198	runq_remove(ke->ke_runq, ke);
1199
1200	ke->ke_state = KES_THREAD;
1201}
1202
1203/*
1204 * Select threads to run.
1205 * Notice that the running threads still consume a slot.
1206 */
1207struct kse *
1208sched_choose(void)
1209{
1210	struct kse *ke;
1211	struct runq *rq;
1212
1213#ifdef SMP
1214	struct kse *kecpu;
1215
1216	rq = &runq;
1217	ke = runq_choose(&runq);
1218	kecpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
1219
1220	if (ke == NULL ||
1221	    (kecpu != NULL &&
1222	     kecpu->ke_thread->td_priority < ke->ke_thread->td_priority)) {
1223		CTR2(KTR_RUNQ, "choosing kse %p from pcpu runq %d", kecpu,
1224		     PCPU_GET(cpuid));
1225		ke = kecpu;
1226		rq = &runq_pcpu[PCPU_GET(cpuid)];
1227	} else {
1228		CTR1(KTR_RUNQ, "choosing kse %p from main runq", ke);
1229	}
1230
1231#else
1232	rq = &runq;
1233	ke = runq_choose(&runq);
1234#endif
1235
1236	if (ke != NULL) {
1237		runq_remove(rq, ke);
1238		ke->ke_state = KES_THREAD;
1239
1240		KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1241		    ("sched_choose: process swapped out"));
1242	}
1243	return (ke);
1244}
1245
1246void
1247sched_userret(struct thread *td)
1248{
1249	struct ksegrp *kg;
1250	/*
1251	 * XXX we cheat slightly on the locking here to avoid locking in
1252	 * the usual case.  Setting td_priority here is essentially an
1253	 * incomplete workaround for not setting it properly elsewhere.
1254	 * Now that some interrupt handlers are threads, not setting it
1255	 * properly elsewhere can clobber it in the window between setting
1256	 * it here and returning to user mode, so don't waste time setting
1257	 * it perfectly here.
1258	 */
1259	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1260	    ("thread with borrowed priority returning to userland"));
1261	kg = td->td_ksegrp;
1262	if (td->td_priority != kg->kg_user_pri) {
1263		mtx_lock_spin(&sched_lock);
1264		td->td_priority = kg->kg_user_pri;
1265		td->td_base_pri = kg->kg_user_pri;
1266		mtx_unlock_spin(&sched_lock);
1267	}
1268}
1269
1270void
1271sched_bind(struct thread *td, int cpu)
1272{
1273	struct kse *ke;
1274
1275	mtx_assert(&sched_lock, MA_OWNED);
1276	KASSERT(TD_IS_RUNNING(td),
1277	    ("sched_bind: cannot bind non-running thread"));
1278
1279	ke = td->td_kse;
1280
1281	ke->ke_flags |= KEF_BOUND;
1282#ifdef SMP
1283	ke->ke_runq = &runq_pcpu[cpu];
1284	if (PCPU_GET(cpuid) == cpu)
1285		return;
1286
1287	ke->ke_state = KES_THREAD;
1288
1289	mi_switch(SW_VOL, NULL);
1290#endif
1291}
1292
1293void
1294sched_unbind(struct thread* td)
1295{
1296	mtx_assert(&sched_lock, MA_OWNED);
1297	td->td_kse->ke_flags &= ~KEF_BOUND;
1298}
1299
1300int
1301sched_is_bound(struct thread *td)
1302{
1303	mtx_assert(&sched_lock, MA_OWNED);
1304	return (td->td_kse->ke_flags & KEF_BOUND);
1305}
1306
1307int
1308sched_load(void)
1309{
1310	return (sched_tdcnt);
1311}
1312
1313int
1314sched_sizeof_ksegrp(void)
1315{
1316	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1317}
1318int
1319sched_sizeof_proc(void)
1320{
1321	return (sizeof(struct proc));
1322}
1323int
1324sched_sizeof_thread(void)
1325{
1326	return (sizeof(struct thread) + sizeof(struct kse));
1327}
1328
1329fixpt_t
1330sched_pctcpu(struct thread *td)
1331{
1332	struct kse *ke;
1333
1334	ke = td->td_kse;
1335	return (ke->ke_pctcpu);
1336
1337	return (0);
1338}
1339#define KERN_SWITCH_INCLUDE 1
1340#include "kern/kern_switch.c"
1341