sched_ule.c revision 147565
1/*-
2 * Copyright (c) 2002-2005, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 147565 2005-06-24 00:16:57Z peter $");
29
30#include "opt_hwpmc_hooks.h"
31#include "opt_sched.h"
32
33#define kse td_sched
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kdb.h>
38#include <sys/kernel.h>
39#include <sys/ktr.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43#include <sys/resource.h>
44#include <sys/resourcevar.h>
45#include <sys/sched.h>
46#include <sys/smp.h>
47#include <sys/sx.h>
48#include <sys/sysctl.h>
49#include <sys/sysproto.h>
50#include <sys/turnstile.h>
51#include <sys/vmmeter.h>
52#ifdef KTRACE
53#include <sys/uio.h>
54#include <sys/ktrace.h>
55#endif
56
57#ifdef HWPMC_HOOKS
58#include <sys/pmckern.h>
59#endif
60
61#include <machine/cpu.h>
62#include <machine/smp.h>
63
64/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
65/* XXX This is bogus compatability crap for ps */
66static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
67SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
68
69static void sched_setup(void *dummy);
70SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
71
72static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
73
74SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
75    "Scheduler name");
76
77static int slice_min = 1;
78SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
79
80static int slice_max = 10;
81SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
82
83int realstathz;
84int tickincr = 1;
85
86/*
87 * The following datastructures are allocated within their parent structure
88 * but are scheduler specific.
89 */
90/*
91 * The schedulable entity that can be given a context to run.  A process may
92 * have several of these.
93 */
94struct kse {
95	TAILQ_ENTRY(kse) ke_procq;	/* (j/z) Run queue. */
96	int		ke_flags;	/* (j) KEF_* flags. */
97	struct thread	*ke_thread;	/* (*) Active associated thread. */
98	fixpt_t		ke_pctcpu;	/* (j) %cpu during p_swtime. */
99	char		ke_rqindex;	/* (j) Run queue index. */
100	enum {
101		KES_THREAD = 0x0,	/* slaved to thread state */
102		KES_ONRUNQ
103	} ke_state;			/* (j) thread sched specific status. */
104	int		ke_slptime;
105	int		ke_slice;
106	struct runq	*ke_runq;
107	u_char		ke_cpu;		/* CPU that we have affinity for. */
108	/* The following variables are only used for pctcpu calculation */
109	int		ke_ltick;	/* Last tick that we were running on */
110	int		ke_ftick;	/* First tick that we were running on */
111	int		ke_ticks;	/* Tick count */
112
113};
114#define	td_kse			td_sched
115#define	td_slptime		td_kse->ke_slptime
116#define ke_proc			ke_thread->td_proc
117#define ke_ksegrp		ke_thread->td_ksegrp
118#define	ke_assign		ke_procq.tqe_next
119/* flags kept in ke_flags */
120#define	KEF_ASSIGNED	0x0001		/* Thread is being migrated. */
121#define	KEF_BOUND	0x0002		/* Thread can not migrate. */
122#define	KEF_XFERABLE	0x0004		/* Thread was added as transferable. */
123#define	KEF_HOLD	0x0008		/* Thread is temporarily bound. */
124#define	KEF_REMOVED	0x0010		/* Thread was removed while ASSIGNED */
125#define	KEF_INTERNAL	0x0020		/* Thread added due to migration. */
126#define	KEF_DIDRUN	0x02000		/* Thread actually ran. */
127#define	KEF_EXIT	0x04000		/* Thread is being killed. */
128
129struct kg_sched {
130	struct thread	*skg_last_assigned; /* (j) Last thread assigned to */
131					   /* the system scheduler */
132	int	skg_slptime;		/* Number of ticks we vol. slept */
133	int	skg_runtime;		/* Number of ticks we were running */
134	int	skg_avail_opennings;	/* (j) Num unfilled slots in group.*/
135	int	skg_concurrency;	/* (j) Num threads requested in group.*/
136};
137#define kg_last_assigned	kg_sched->skg_last_assigned
138#define kg_avail_opennings	kg_sched->skg_avail_opennings
139#define kg_concurrency		kg_sched->skg_concurrency
140#define kg_runtime		kg_sched->skg_runtime
141#define kg_slptime		kg_sched->skg_slptime
142
143#define SLOT_RELEASE(kg)	(kg)->kg_avail_opennings++
144#define	SLOT_USE(kg)		(kg)->kg_avail_opennings--
145
146static struct kse kse0;
147static struct kg_sched kg_sched0;
148
149/*
150 * The priority is primarily determined by the interactivity score.  Thus, we
151 * give lower(better) priorities to kse groups that use less CPU.  The nice
152 * value is then directly added to this to allow nice to have some effect
153 * on latency.
154 *
155 * PRI_RANGE:	Total priority range for timeshare threads.
156 * PRI_NRESV:	Number of nice values.
157 * PRI_BASE:	The start of the dynamic range.
158 */
159#define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
160#define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
161#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
162#define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
163#define	SCHED_PRI_INTERACT(score)					\
164    ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
165
166/*
167 * These determine the interactivity of a process.
168 *
169 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
170 *		before throttling back.
171 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
172 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
173 * INTERACT_THRESH:	Threshhold for placement on the current runq.
174 */
175#define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
176#define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
177#define	SCHED_INTERACT_MAX	(100)
178#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
179#define	SCHED_INTERACT_THRESH	(30)
180
181/*
182 * These parameters and macros determine the size of the time slice that is
183 * granted to each thread.
184 *
185 * SLICE_MIN:	Minimum time slice granted, in units of ticks.
186 * SLICE_MAX:	Maximum time slice granted.
187 * SLICE_RANGE:	Range of available time slices scaled by hz.
188 * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
189 * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
190 * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
191 */
192#define	SCHED_SLICE_MIN			(slice_min)
193#define	SCHED_SLICE_MAX			(slice_max)
194#define	SCHED_SLICE_INTERACTIVE		(slice_max)
195#define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
196#define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
197#define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
198#define	SCHED_SLICE_NICE(nice)						\
199    (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
200
201/*
202 * This macro determines whether or not the thread belongs on the current or
203 * next run queue.
204 */
205#define	SCHED_INTERACTIVE(kg)						\
206    (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
207#define	SCHED_CURR(kg, ke)						\
208    ((ke->ke_thread->td_flags & TDF_BORROWING) || SCHED_INTERACTIVE(kg))
209
210/*
211 * Cpu percentage computation macros and defines.
212 *
213 * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
214 * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
215 */
216
217#define	SCHED_CPU_TIME	10
218#define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
219
220/*
221 * kseq - per processor runqs and statistics.
222 */
223struct kseq {
224	struct runq	ksq_idle;		/* Queue of IDLE threads. */
225	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
226	struct runq	*ksq_next;		/* Next timeshare queue. */
227	struct runq	*ksq_curr;		/* Current queue. */
228	int		ksq_load_timeshare;	/* Load for timeshare. */
229	int		ksq_load;		/* Aggregate load. */
230	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
231	short		ksq_nicemin;		/* Least nice. */
232#ifdef SMP
233	int			ksq_transferable;
234	LIST_ENTRY(kseq)	ksq_siblings;	/* Next in kseq group. */
235	struct kseq_group	*ksq_group;	/* Our processor group. */
236	volatile struct kse	*ksq_assigned;	/* assigned by another CPU. */
237#else
238	int		ksq_sysload;		/* For loadavg, !ITHD load. */
239#endif
240};
241
242#ifdef SMP
243/*
244 * kseq groups are groups of processors which can cheaply share threads.  When
245 * one processor in the group goes idle it will check the runqs of the other
246 * processors in its group prior to halting and waiting for an interrupt.
247 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
248 * In a numa environment we'd want an idle bitmap per group and a two tiered
249 * load balancer.
250 */
251struct kseq_group {
252	int	ksg_cpus;		/* Count of CPUs in this kseq group. */
253	cpumask_t ksg_cpumask;		/* Mask of cpus in this group. */
254	cpumask_t ksg_idlemask;		/* Idle cpus in this group. */
255	cpumask_t ksg_mask;		/* Bit mask for first cpu. */
256	int	ksg_load;		/* Total load of this group. */
257	int	ksg_transferable;	/* Transferable load of this group. */
258	LIST_HEAD(, kseq)	ksg_members; /* Linked list of all members. */
259};
260#endif
261
262/*
263 * One kse queue per processor.
264 */
265#ifdef SMP
266static cpumask_t kseq_idle;
267static int ksg_maxid;
268static struct kseq	kseq_cpu[MAXCPU];
269static struct kseq_group kseq_groups[MAXCPU];
270static int bal_tick;
271static int gbal_tick;
272static int balance_groups;
273
274#define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
275#define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
276#define	KSEQ_ID(x)	((x) - kseq_cpu)
277#define	KSEQ_GROUP(x)	(&kseq_groups[(x)])
278#else	/* !SMP */
279static struct kseq	kseq_cpu;
280
281#define	KSEQ_SELF()	(&kseq_cpu)
282#define	KSEQ_CPU(x)	(&kseq_cpu)
283#endif
284
285static void slot_fill(struct ksegrp *);
286static struct kse *sched_choose(void);		/* XXX Should be thread * */
287static void sched_slice(struct kse *);
288static void sched_priority(struct ksegrp *);
289static void sched_thread_priority(struct thread *, u_char);
290static int sched_interact_score(struct ksegrp *);
291static void sched_interact_update(struct ksegrp *);
292static void sched_interact_fork(struct ksegrp *);
293static void sched_pctcpu_update(struct kse *);
294
295/* Operations on per processor queues */
296static struct kse * kseq_choose(struct kseq *);
297static void kseq_setup(struct kseq *);
298static void kseq_load_add(struct kseq *, struct kse *);
299static void kseq_load_rem(struct kseq *, struct kse *);
300static __inline void kseq_runq_add(struct kseq *, struct kse *, int);
301static __inline void kseq_runq_rem(struct kseq *, struct kse *);
302static void kseq_nice_add(struct kseq *, int);
303static void kseq_nice_rem(struct kseq *, int);
304void kseq_print(int cpu);
305#ifdef SMP
306static int kseq_transfer(struct kseq *, struct kse *, int);
307static struct kse *runq_steal(struct runq *);
308static void sched_balance(void);
309static void sched_balance_groups(void);
310static void sched_balance_group(struct kseq_group *);
311static void sched_balance_pair(struct kseq *, struct kseq *);
312static void kseq_move(struct kseq *, int);
313static int kseq_idled(struct kseq *);
314static void kseq_notify(struct kse *, int);
315static void kseq_assign(struct kseq *);
316static struct kse *kseq_steal(struct kseq *, int);
317#define	KSE_CAN_MIGRATE(ke)						\
318    ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
319#endif
320
321void
322kseq_print(int cpu)
323{
324	struct kseq *kseq;
325	int i;
326
327	kseq = KSEQ_CPU(cpu);
328
329	printf("kseq:\n");
330	printf("\tload:           %d\n", kseq->ksq_load);
331	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
332#ifdef SMP
333	printf("\tload transferable: %d\n", kseq->ksq_transferable);
334#endif
335	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
336	printf("\tnice counts:\n");
337	for (i = 0; i < SCHED_PRI_NRESV; i++)
338		if (kseq->ksq_nice[i])
339			printf("\t\t%d = %d\n",
340			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
341}
342
343static __inline void
344kseq_runq_add(struct kseq *kseq, struct kse *ke, int flags)
345{
346#ifdef SMP
347	if (KSE_CAN_MIGRATE(ke)) {
348		kseq->ksq_transferable++;
349		kseq->ksq_group->ksg_transferable++;
350		ke->ke_flags |= KEF_XFERABLE;
351	}
352#endif
353	runq_add(ke->ke_runq, ke, flags);
354}
355
356static __inline void
357kseq_runq_rem(struct kseq *kseq, struct kse *ke)
358{
359#ifdef SMP
360	if (ke->ke_flags & KEF_XFERABLE) {
361		kseq->ksq_transferable--;
362		kseq->ksq_group->ksg_transferable--;
363		ke->ke_flags &= ~KEF_XFERABLE;
364	}
365#endif
366	runq_remove(ke->ke_runq, ke);
367}
368
369static void
370kseq_load_add(struct kseq *kseq, struct kse *ke)
371{
372	int class;
373	mtx_assert(&sched_lock, MA_OWNED);
374	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
375	if (class == PRI_TIMESHARE)
376		kseq->ksq_load_timeshare++;
377	kseq->ksq_load++;
378	CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
379	if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
380#ifdef SMP
381		kseq->ksq_group->ksg_load++;
382#else
383		kseq->ksq_sysload++;
384#endif
385	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
386		kseq_nice_add(kseq, ke->ke_proc->p_nice);
387}
388
389static void
390kseq_load_rem(struct kseq *kseq, struct kse *ke)
391{
392	int class;
393	mtx_assert(&sched_lock, MA_OWNED);
394	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
395	if (class == PRI_TIMESHARE)
396		kseq->ksq_load_timeshare--;
397	if (class != PRI_ITHD  && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
398#ifdef SMP
399		kseq->ksq_group->ksg_load--;
400#else
401		kseq->ksq_sysload--;
402#endif
403	kseq->ksq_load--;
404	CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
405	ke->ke_runq = NULL;
406	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
407		kseq_nice_rem(kseq, ke->ke_proc->p_nice);
408}
409
410static void
411kseq_nice_add(struct kseq *kseq, int nice)
412{
413	mtx_assert(&sched_lock, MA_OWNED);
414	/* Normalize to zero. */
415	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
416	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
417		kseq->ksq_nicemin = nice;
418}
419
420static void
421kseq_nice_rem(struct kseq *kseq, int nice)
422{
423	int n;
424
425	mtx_assert(&sched_lock, MA_OWNED);
426	/* Normalize to zero. */
427	n = nice + SCHED_PRI_NHALF;
428	kseq->ksq_nice[n]--;
429	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
430
431	/*
432	 * If this wasn't the smallest nice value or there are more in
433	 * this bucket we can just return.  Otherwise we have to recalculate
434	 * the smallest nice.
435	 */
436	if (nice != kseq->ksq_nicemin ||
437	    kseq->ksq_nice[n] != 0 ||
438	    kseq->ksq_load_timeshare == 0)
439		return;
440
441	for (; n < SCHED_PRI_NRESV; n++)
442		if (kseq->ksq_nice[n]) {
443			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
444			return;
445		}
446}
447
448#ifdef SMP
449/*
450 * sched_balance is a simple CPU load balancing algorithm.  It operates by
451 * finding the least loaded and most loaded cpu and equalizing their load
452 * by migrating some processes.
453 *
454 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
455 * installations will only have 2 cpus.  Secondly, load balancing too much at
456 * once can have an unpleasant effect on the system.  The scheduler rarely has
457 * enough information to make perfect decisions.  So this algorithm chooses
458 * algorithm simplicity and more gradual effects on load in larger systems.
459 *
460 * It could be improved by considering the priorities and slices assigned to
461 * each task prior to balancing them.  There are many pathological cases with
462 * any approach and so the semi random algorithm below may work as well as any.
463 *
464 */
465static void
466sched_balance(void)
467{
468	struct kseq_group *high;
469	struct kseq_group *low;
470	struct kseq_group *ksg;
471	int cnt;
472	int i;
473
474	bal_tick = ticks + (random() % (hz * 2));
475	if (smp_started == 0)
476		return;
477	low = high = NULL;
478	i = random() % (ksg_maxid + 1);
479	for (cnt = 0; cnt <= ksg_maxid; cnt++) {
480		ksg = KSEQ_GROUP(i);
481		/*
482		 * Find the CPU with the highest load that has some
483		 * threads to transfer.
484		 */
485		if ((high == NULL || ksg->ksg_load > high->ksg_load)
486		    && ksg->ksg_transferable)
487			high = ksg;
488		if (low == NULL || ksg->ksg_load < low->ksg_load)
489			low = ksg;
490		if (++i > ksg_maxid)
491			i = 0;
492	}
493	if (low != NULL && high != NULL && high != low)
494		sched_balance_pair(LIST_FIRST(&high->ksg_members),
495		    LIST_FIRST(&low->ksg_members));
496}
497
498static void
499sched_balance_groups(void)
500{
501	int i;
502
503	gbal_tick = ticks + (random() % (hz * 2));
504	mtx_assert(&sched_lock, MA_OWNED);
505	if (smp_started)
506		for (i = 0; i <= ksg_maxid; i++)
507			sched_balance_group(KSEQ_GROUP(i));
508}
509
510static void
511sched_balance_group(struct kseq_group *ksg)
512{
513	struct kseq *kseq;
514	struct kseq *high;
515	struct kseq *low;
516	int load;
517
518	if (ksg->ksg_transferable == 0)
519		return;
520	low = NULL;
521	high = NULL;
522	LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
523		load = kseq->ksq_load;
524		if (high == NULL || load > high->ksq_load)
525			high = kseq;
526		if (low == NULL || load < low->ksq_load)
527			low = kseq;
528	}
529	if (high != NULL && low != NULL && high != low)
530		sched_balance_pair(high, low);
531}
532
533static void
534sched_balance_pair(struct kseq *high, struct kseq *low)
535{
536	int transferable;
537	int high_load;
538	int low_load;
539	int move;
540	int diff;
541	int i;
542
543	/*
544	 * If we're transfering within a group we have to use this specific
545	 * kseq's transferable count, otherwise we can steal from other members
546	 * of the group.
547	 */
548	if (high->ksq_group == low->ksq_group) {
549		transferable = high->ksq_transferable;
550		high_load = high->ksq_load;
551		low_load = low->ksq_load;
552	} else {
553		transferable = high->ksq_group->ksg_transferable;
554		high_load = high->ksq_group->ksg_load;
555		low_load = low->ksq_group->ksg_load;
556	}
557	if (transferable == 0)
558		return;
559	/*
560	 * Determine what the imbalance is and then adjust that to how many
561	 * kses we actually have to give up (transferable).
562	 */
563	diff = high_load - low_load;
564	move = diff / 2;
565	if (diff & 0x1)
566		move++;
567	move = min(move, transferable);
568	for (i = 0; i < move; i++)
569		kseq_move(high, KSEQ_ID(low));
570	return;
571}
572
573static void
574kseq_move(struct kseq *from, int cpu)
575{
576	struct kseq *kseq;
577	struct kseq *to;
578	struct kse *ke;
579
580	kseq = from;
581	to = KSEQ_CPU(cpu);
582	ke = kseq_steal(kseq, 1);
583	if (ke == NULL) {
584		struct kseq_group *ksg;
585
586		ksg = kseq->ksq_group;
587		LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
588			if (kseq == from || kseq->ksq_transferable == 0)
589				continue;
590			ke = kseq_steal(kseq, 1);
591			break;
592		}
593		if (ke == NULL)
594			panic("kseq_move: No KSEs available with a "
595			    "transferable count of %d\n",
596			    ksg->ksg_transferable);
597	}
598	if (kseq == to)
599		return;
600	ke->ke_state = KES_THREAD;
601	kseq_runq_rem(kseq, ke);
602	kseq_load_rem(kseq, ke);
603	kseq_notify(ke, cpu);
604}
605
606static int
607kseq_idled(struct kseq *kseq)
608{
609	struct kseq_group *ksg;
610	struct kseq *steal;
611	struct kse *ke;
612
613	ksg = kseq->ksq_group;
614	/*
615	 * If we're in a cpu group, try and steal kses from another cpu in
616	 * the group before idling.
617	 */
618	if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
619		LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
620			if (steal == kseq || steal->ksq_transferable == 0)
621				continue;
622			ke = kseq_steal(steal, 0);
623			if (ke == NULL)
624				continue;
625			ke->ke_state = KES_THREAD;
626			kseq_runq_rem(steal, ke);
627			kseq_load_rem(steal, ke);
628			ke->ke_cpu = PCPU_GET(cpuid);
629			ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
630			sched_add(ke->ke_thread, SRQ_YIELDING);
631			return (0);
632		}
633	}
634	/*
635	 * We only set the idled bit when all of the cpus in the group are
636	 * idle.  Otherwise we could get into a situation where a KSE bounces
637	 * back and forth between two idle cores on seperate physical CPUs.
638	 */
639	ksg->ksg_idlemask |= PCPU_GET(cpumask);
640	if (ksg->ksg_idlemask != ksg->ksg_cpumask)
641		return (1);
642	atomic_set_int(&kseq_idle, ksg->ksg_mask);
643	return (1);
644}
645
646static void
647kseq_assign(struct kseq *kseq)
648{
649	struct kse *nke;
650	struct kse *ke;
651
652	do {
653		*(volatile struct kse **)&ke = kseq->ksq_assigned;
654	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
655	for (; ke != NULL; ke = nke) {
656		nke = ke->ke_assign;
657		kseq->ksq_group->ksg_load--;
658		kseq->ksq_load--;
659		ke->ke_flags &= ~KEF_ASSIGNED;
660		ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
661		sched_add(ke->ke_thread, SRQ_YIELDING);
662	}
663}
664
665static void
666kseq_notify(struct kse *ke, int cpu)
667{
668	struct kseq *kseq;
669	struct thread *td;
670	struct pcpu *pcpu;
671	int class;
672	int prio;
673
674	kseq = KSEQ_CPU(cpu);
675	/* XXX */
676	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
677	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
678	    (kseq_idle & kseq->ksq_group->ksg_mask))
679		atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
680	kseq->ksq_group->ksg_load++;
681	kseq->ksq_load++;
682	ke->ke_cpu = cpu;
683	ke->ke_flags |= KEF_ASSIGNED;
684	prio = ke->ke_thread->td_priority;
685
686	/*
687	 * Place a KSE on another cpu's queue and force a resched.
688	 */
689	do {
690		*(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned;
691	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
692	/*
693	 * Without sched_lock we could lose a race where we set NEEDRESCHED
694	 * on a thread that is switched out before the IPI is delivered.  This
695	 * would lead us to miss the resched.  This will be a problem once
696	 * sched_lock is pushed down.
697	 */
698	pcpu = pcpu_find(cpu);
699	td = pcpu->pc_curthread;
700	if (ke->ke_thread->td_priority < td->td_priority ||
701	    td == pcpu->pc_idlethread) {
702		td->td_flags |= TDF_NEEDRESCHED;
703		ipi_selected(1 << cpu, IPI_AST);
704	}
705}
706
707static struct kse *
708runq_steal(struct runq *rq)
709{
710	struct rqhead *rqh;
711	struct rqbits *rqb;
712	struct kse *ke;
713	int word;
714	int bit;
715
716	mtx_assert(&sched_lock, MA_OWNED);
717	rqb = &rq->rq_status;
718	for (word = 0; word < RQB_LEN; word++) {
719		if (rqb->rqb_bits[word] == 0)
720			continue;
721		for (bit = 0; bit < RQB_BPW; bit++) {
722			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
723				continue;
724			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
725			TAILQ_FOREACH(ke, rqh, ke_procq) {
726				if (KSE_CAN_MIGRATE(ke))
727					return (ke);
728			}
729		}
730	}
731	return (NULL);
732}
733
734static struct kse *
735kseq_steal(struct kseq *kseq, int stealidle)
736{
737	struct kse *ke;
738
739	/*
740	 * Steal from next first to try to get a non-interactive task that
741	 * may not have run for a while.
742	 */
743	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
744		return (ke);
745	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
746		return (ke);
747	if (stealidle)
748		return (runq_steal(&kseq->ksq_idle));
749	return (NULL);
750}
751
752int
753kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
754{
755	struct kseq_group *nksg;
756	struct kseq_group *ksg;
757	struct kseq *old;
758	int cpu;
759	int idx;
760
761	if (smp_started == 0)
762		return (0);
763	cpu = 0;
764	/*
765	 * If our load exceeds a certain threshold we should attempt to
766	 * reassign this thread.  The first candidate is the cpu that
767	 * originally ran the thread.  If it is idle, assign it there,
768	 * otherwise, pick an idle cpu.
769	 *
770	 * The threshold at which we start to reassign kses has a large impact
771	 * on the overall performance of the system.  Tuned too high and
772	 * some CPUs may idle.  Too low and there will be excess migration
773	 * and context switches.
774	 */
775	old = KSEQ_CPU(ke->ke_cpu);
776	nksg = old->ksq_group;
777	ksg = kseq->ksq_group;
778	if (kseq_idle) {
779		if (kseq_idle & nksg->ksg_mask) {
780			cpu = ffs(nksg->ksg_idlemask);
781			if (cpu) {
782				CTR2(KTR_SCHED,
783				    "kseq_transfer: %p found old cpu %X "
784				    "in idlemask.", ke, cpu);
785				goto migrate;
786			}
787		}
788		/*
789		 * Multiple cpus could find this bit simultaneously
790		 * but the race shouldn't be terrible.
791		 */
792		cpu = ffs(kseq_idle);
793		if (cpu) {
794			CTR2(KTR_SCHED, "kseq_transfer: %p found %X "
795			    "in idlemask.", ke, cpu);
796			goto migrate;
797		}
798	}
799	idx = 0;
800#if 0
801	if (old->ksq_load < kseq->ksq_load) {
802		cpu = ke->ke_cpu + 1;
803		CTR2(KTR_SCHED, "kseq_transfer: %p old cpu %X "
804		    "load less than ours.", ke, cpu);
805		goto migrate;
806	}
807	/*
808	 * No new CPU was found, look for one with less load.
809	 */
810	for (idx = 0; idx <= ksg_maxid; idx++) {
811		nksg = KSEQ_GROUP(idx);
812		if (nksg->ksg_load /*+ (nksg->ksg_cpus  * 2)*/ < ksg->ksg_load) {
813			cpu = ffs(nksg->ksg_cpumask);
814			CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X load less "
815			    "than ours.", ke, cpu);
816			goto migrate;
817		}
818	}
819#endif
820	/*
821	 * If another cpu in this group has idled, assign a thread over
822	 * to them after checking to see if there are idled groups.
823	 */
824	if (ksg->ksg_idlemask) {
825		cpu = ffs(ksg->ksg_idlemask);
826		if (cpu) {
827			CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X idle in "
828			    "group.", ke, cpu);
829			goto migrate;
830		}
831	}
832	return (0);
833migrate:
834	/*
835	 * Now that we've found an idle CPU, migrate the thread.
836	 */
837	cpu--;
838	ke->ke_runq = NULL;
839	kseq_notify(ke, cpu);
840
841	return (1);
842}
843
844#endif	/* SMP */
845
846/*
847 * Pick the highest priority task we have and return it.
848 */
849
850static struct kse *
851kseq_choose(struct kseq *kseq)
852{
853	struct runq *swap;
854	struct kse *ke;
855	int nice;
856
857	mtx_assert(&sched_lock, MA_OWNED);
858	swap = NULL;
859
860	for (;;) {
861		ke = runq_choose(kseq->ksq_curr);
862		if (ke == NULL) {
863			/*
864			 * We already swapped once and didn't get anywhere.
865			 */
866			if (swap)
867				break;
868			swap = kseq->ksq_curr;
869			kseq->ksq_curr = kseq->ksq_next;
870			kseq->ksq_next = swap;
871			continue;
872		}
873		/*
874		 * If we encounter a slice of 0 the kse is in a
875		 * TIMESHARE kse group and its nice was too far out
876		 * of the range that receives slices.
877		 */
878		nice = ke->ke_proc->p_nice + (0 - kseq->ksq_nicemin);
879		if (ke->ke_slice == 0 || (nice > SCHED_SLICE_NTHRESH &&
880		    ke->ke_proc->p_nice != 0)) {
881			runq_remove(ke->ke_runq, ke);
882			sched_slice(ke);
883			ke->ke_runq = kseq->ksq_next;
884			runq_add(ke->ke_runq, ke, 0);
885			continue;
886		}
887		return (ke);
888	}
889
890	return (runq_choose(&kseq->ksq_idle));
891}
892
893static void
894kseq_setup(struct kseq *kseq)
895{
896	runq_init(&kseq->ksq_timeshare[0]);
897	runq_init(&kseq->ksq_timeshare[1]);
898	runq_init(&kseq->ksq_idle);
899	kseq->ksq_curr = &kseq->ksq_timeshare[0];
900	kseq->ksq_next = &kseq->ksq_timeshare[1];
901	kseq->ksq_load = 0;
902	kseq->ksq_load_timeshare = 0;
903}
904
905static void
906sched_setup(void *dummy)
907{
908#ifdef SMP
909	int i;
910#endif
911
912	slice_min = (hz/100);	/* 10ms */
913	slice_max = (hz/7);	/* ~140ms */
914
915#ifdef SMP
916	balance_groups = 0;
917	/*
918	 * Initialize the kseqs.
919	 */
920	for (i = 0; i < MAXCPU; i++) {
921		struct kseq *ksq;
922
923		ksq = &kseq_cpu[i];
924		ksq->ksq_assigned = NULL;
925		kseq_setup(&kseq_cpu[i]);
926	}
927	if (smp_topology == NULL) {
928		struct kseq_group *ksg;
929		struct kseq *ksq;
930		int cpus;
931
932		for (cpus = 0, i = 0; i < MAXCPU; i++) {
933			if (CPU_ABSENT(i))
934				continue;
935			ksq = &kseq_cpu[cpus];
936			ksg = &kseq_groups[cpus];
937			/*
938			 * Setup a kseq group with one member.
939			 */
940			ksq->ksq_transferable = 0;
941			ksq->ksq_group = ksg;
942			ksg->ksg_cpus = 1;
943			ksg->ksg_idlemask = 0;
944			ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
945			ksg->ksg_load = 0;
946			ksg->ksg_transferable = 0;
947			LIST_INIT(&ksg->ksg_members);
948			LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
949			cpus++;
950		}
951		ksg_maxid = cpus - 1;
952	} else {
953		struct kseq_group *ksg;
954		struct cpu_group *cg;
955		int j;
956
957		for (i = 0; i < smp_topology->ct_count; i++) {
958			cg = &smp_topology->ct_group[i];
959			ksg = &kseq_groups[i];
960			/*
961			 * Initialize the group.
962			 */
963			ksg->ksg_idlemask = 0;
964			ksg->ksg_load = 0;
965			ksg->ksg_transferable = 0;
966			ksg->ksg_cpus = cg->cg_count;
967			ksg->ksg_cpumask = cg->cg_mask;
968			LIST_INIT(&ksg->ksg_members);
969			/*
970			 * Find all of the group members and add them.
971			 */
972			for (j = 0; j < MAXCPU; j++) {
973				if ((cg->cg_mask & (1 << j)) != 0) {
974					if (ksg->ksg_mask == 0)
975						ksg->ksg_mask = 1 << j;
976					kseq_cpu[j].ksq_transferable = 0;
977					kseq_cpu[j].ksq_group = ksg;
978					LIST_INSERT_HEAD(&ksg->ksg_members,
979					    &kseq_cpu[j], ksq_siblings);
980				}
981			}
982			if (ksg->ksg_cpus > 1)
983				balance_groups = 1;
984		}
985		ksg_maxid = smp_topology->ct_count - 1;
986	}
987	/*
988	 * Stagger the group and global load balancer so they do not
989	 * interfere with each other.
990	 */
991	bal_tick = ticks + hz;
992	if (balance_groups)
993		gbal_tick = ticks + (hz / 2);
994#else
995	kseq_setup(KSEQ_SELF());
996#endif
997	mtx_lock_spin(&sched_lock);
998	kseq_load_add(KSEQ_SELF(), &kse0);
999	mtx_unlock_spin(&sched_lock);
1000}
1001
1002/*
1003 * Scale the scheduling priority according to the "interactivity" of this
1004 * process.
1005 */
1006static void
1007sched_priority(struct ksegrp *kg)
1008{
1009	int pri;
1010
1011	if (kg->kg_pri_class != PRI_TIMESHARE)
1012		return;
1013
1014	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
1015	pri += SCHED_PRI_BASE;
1016	pri += kg->kg_proc->p_nice;
1017
1018	if (pri > PRI_MAX_TIMESHARE)
1019		pri = PRI_MAX_TIMESHARE;
1020	else if (pri < PRI_MIN_TIMESHARE)
1021		pri = PRI_MIN_TIMESHARE;
1022
1023	kg->kg_user_pri = pri;
1024
1025	return;
1026}
1027
1028/*
1029 * Calculate a time slice based on the properties of the kseg and the runq
1030 * that we're on.  This is only for PRI_TIMESHARE ksegrps.
1031 */
1032static void
1033sched_slice(struct kse *ke)
1034{
1035	struct kseq *kseq;
1036	struct ksegrp *kg;
1037
1038	kg = ke->ke_ksegrp;
1039	kseq = KSEQ_CPU(ke->ke_cpu);
1040
1041	if (ke->ke_thread->td_flags & TDF_BORROWING) {
1042		ke->ke_slice = SCHED_SLICE_MIN;
1043		return;
1044	}
1045
1046	/*
1047	 * Rationale:
1048	 * KSEs in interactive ksegs get a minimal slice so that we
1049	 * quickly notice if it abuses its advantage.
1050	 *
1051	 * KSEs in non-interactive ksegs are assigned a slice that is
1052	 * based on the ksegs nice value relative to the least nice kseg
1053	 * on the run queue for this cpu.
1054	 *
1055	 * If the KSE is less nice than all others it gets the maximum
1056	 * slice and other KSEs will adjust their slice relative to
1057	 * this when they first expire.
1058	 *
1059	 * There is 20 point window that starts relative to the least
1060	 * nice kse on the run queue.  Slice size is determined by
1061	 * the kse distance from the last nice ksegrp.
1062	 *
1063	 * If the kse is outside of the window it will get no slice
1064	 * and will be reevaluated each time it is selected on the
1065	 * run queue.  The exception to this is nice 0 ksegs when
1066	 * a nice -20 is running.  They are always granted a minimum
1067	 * slice.
1068	 */
1069	if (!SCHED_INTERACTIVE(kg)) {
1070		int nice;
1071
1072		nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
1073		if (kseq->ksq_load_timeshare == 0 ||
1074		    kg->kg_proc->p_nice < kseq->ksq_nicemin)
1075			ke->ke_slice = SCHED_SLICE_MAX;
1076		else if (nice <= SCHED_SLICE_NTHRESH)
1077			ke->ke_slice = SCHED_SLICE_NICE(nice);
1078		else if (kg->kg_proc->p_nice == 0)
1079			ke->ke_slice = SCHED_SLICE_MIN;
1080		else
1081			ke->ke_slice = 0;
1082	} else
1083		ke->ke_slice = SCHED_SLICE_INTERACTIVE;
1084
1085	return;
1086}
1087
1088/*
1089 * This routine enforces a maximum limit on the amount of scheduling history
1090 * kept.  It is called after either the slptime or runtime is adjusted.
1091 * This routine will not operate correctly when slp or run times have been
1092 * adjusted to more than double their maximum.
1093 */
1094static void
1095sched_interact_update(struct ksegrp *kg)
1096{
1097	int sum;
1098
1099	sum = kg->kg_runtime + kg->kg_slptime;
1100	if (sum < SCHED_SLP_RUN_MAX)
1101		return;
1102	/*
1103	 * If we have exceeded by more than 1/5th then the algorithm below
1104	 * will not bring us back into range.  Dividing by two here forces
1105	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1106	 */
1107	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1108		kg->kg_runtime /= 2;
1109		kg->kg_slptime /= 2;
1110		return;
1111	}
1112	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1113	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1114}
1115
1116static void
1117sched_interact_fork(struct ksegrp *kg)
1118{
1119	int ratio;
1120	int sum;
1121
1122	sum = kg->kg_runtime + kg->kg_slptime;
1123	if (sum > SCHED_SLP_RUN_FORK) {
1124		ratio = sum / SCHED_SLP_RUN_FORK;
1125		kg->kg_runtime /= ratio;
1126		kg->kg_slptime /= ratio;
1127	}
1128}
1129
1130static int
1131sched_interact_score(struct ksegrp *kg)
1132{
1133	int div;
1134
1135	if (kg->kg_runtime > kg->kg_slptime) {
1136		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1137		return (SCHED_INTERACT_HALF +
1138		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1139	} if (kg->kg_slptime > kg->kg_runtime) {
1140		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1141		return (kg->kg_runtime / div);
1142	}
1143
1144	/*
1145	 * This can happen if slptime and runtime are 0.
1146	 */
1147	return (0);
1148
1149}
1150
1151/*
1152 * Very early in the boot some setup of scheduler-specific
1153 * parts of proc0 and of soem scheduler resources needs to be done.
1154 * Called from:
1155 *  proc0_init()
1156 */
1157void
1158schedinit(void)
1159{
1160	/*
1161	 * Set up the scheduler specific parts of proc0.
1162	 */
1163	proc0.p_sched = NULL; /* XXX */
1164	ksegrp0.kg_sched = &kg_sched0;
1165	thread0.td_sched = &kse0;
1166	kse0.ke_thread = &thread0;
1167	kse0.ke_state = KES_THREAD;
1168	kg_sched0.skg_concurrency = 1;
1169	kg_sched0.skg_avail_opennings = 0; /* we are already running */
1170}
1171
1172/*
1173 * This is only somewhat accurate since given many processes of the same
1174 * priority they will switch when their slices run out, which will be
1175 * at most SCHED_SLICE_MAX.
1176 */
1177int
1178sched_rr_interval(void)
1179{
1180	return (SCHED_SLICE_MAX);
1181}
1182
1183static void
1184sched_pctcpu_update(struct kse *ke)
1185{
1186	/*
1187	 * Adjust counters and watermark for pctcpu calc.
1188	 */
1189	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1190		/*
1191		 * Shift the tick count out so that the divide doesn't
1192		 * round away our results.
1193		 */
1194		ke->ke_ticks <<= 10;
1195		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1196			    SCHED_CPU_TICKS;
1197		ke->ke_ticks >>= 10;
1198	} else
1199		ke->ke_ticks = 0;
1200	ke->ke_ltick = ticks;
1201	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1202}
1203
1204void
1205sched_thread_priority(struct thread *td, u_char prio)
1206{
1207	struct kse *ke;
1208
1209	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1210	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1211	    curthread->td_proc->p_comm);
1212	ke = td->td_kse;
1213	mtx_assert(&sched_lock, MA_OWNED);
1214	if (td->td_priority == prio)
1215		return;
1216	if (TD_ON_RUNQ(td)) {
1217		/*
1218		 * If the priority has been elevated due to priority
1219		 * propagation, we may have to move ourselves to a new
1220		 * queue.  We still call adjustrunqueue below in case kse
1221		 * needs to fix things up.
1222		 */
1223		if (prio < td->td_priority && ke->ke_runq != NULL &&
1224		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1225		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1226			runq_remove(ke->ke_runq, ke);
1227			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1228			runq_add(ke->ke_runq, ke, 0);
1229		}
1230		/*
1231		 * Hold this kse on this cpu so that sched_prio() doesn't
1232		 * cause excessive migration.  We only want migration to
1233		 * happen as the result of a wakeup.
1234		 */
1235		ke->ke_flags |= KEF_HOLD;
1236		adjustrunqueue(td, prio);
1237		ke->ke_flags &= ~KEF_HOLD;
1238	} else
1239		td->td_priority = prio;
1240}
1241
1242/*
1243 * Update a thread's priority when it is lent another thread's
1244 * priority.
1245 */
1246void
1247sched_lend_prio(struct thread *td, u_char prio)
1248{
1249
1250	td->td_flags |= TDF_BORROWING;
1251	sched_thread_priority(td, prio);
1252}
1253
1254/*
1255 * Restore a thread's priority when priority propagation is
1256 * over.  The prio argument is the minimum priority the thread
1257 * needs to have to satisfy other possible priority lending
1258 * requests.  If the thread's regular priority is less
1259 * important than prio, the thread will keep a priority boost
1260 * of prio.
1261 */
1262void
1263sched_unlend_prio(struct thread *td, u_char prio)
1264{
1265	u_char base_pri;
1266
1267	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1268	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1269		base_pri = td->td_ksegrp->kg_user_pri;
1270	else
1271		base_pri = td->td_base_pri;
1272	if (prio >= base_pri) {
1273		td->td_flags &= ~TDF_BORROWING;
1274		sched_thread_priority(td, base_pri);
1275	} else
1276		sched_lend_prio(td, prio);
1277}
1278
1279void
1280sched_prio(struct thread *td, u_char prio)
1281{
1282	u_char oldprio;
1283
1284	/* First, update the base priority. */
1285	td->td_base_pri = prio;
1286
1287	/*
1288	 * If the thread is borrowing another thread's priority, don't
1289	 * ever lower the priority.
1290	 */
1291	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1292		return;
1293
1294	/* Change the real priority. */
1295	oldprio = td->td_priority;
1296	sched_thread_priority(td, prio);
1297
1298	/*
1299	 * If the thread is on a turnstile, then let the turnstile update
1300	 * its state.
1301	 */
1302	if (TD_ON_LOCK(td) && oldprio != prio)
1303		turnstile_adjust(td, oldprio);
1304}
1305
1306void
1307sched_switch(struct thread *td, struct thread *newtd, int flags)
1308{
1309	struct kseq *ksq;
1310	struct kse *ke;
1311
1312	mtx_assert(&sched_lock, MA_OWNED);
1313
1314	ke = td->td_kse;
1315	ksq = KSEQ_SELF();
1316
1317	td->td_lastcpu = td->td_oncpu;
1318	td->td_oncpu = NOCPU;
1319	td->td_flags &= ~TDF_NEEDRESCHED;
1320	td->td_owepreempt = 0;
1321
1322	/*
1323	 * If the KSE has been assigned it may be in the process of switching
1324	 * to the new cpu.  This is the case in sched_bind().
1325	 */
1326	if (td == PCPU_GET(idlethread)) {
1327		TD_SET_CAN_RUN(td);
1328	} else if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1329		/* We are ending our run so make our slot available again */
1330		SLOT_RELEASE(td->td_ksegrp);
1331		kseq_load_rem(ksq, ke);
1332		if (TD_IS_RUNNING(td)) {
1333			/*
1334			 * Don't allow the thread to migrate
1335			 * from a preemption.
1336			 */
1337			ke->ke_flags |= KEF_HOLD;
1338			setrunqueue(td, (flags & SW_PREEMPT) ?
1339			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1340			    SRQ_OURSELF|SRQ_YIELDING);
1341			ke->ke_flags &= ~KEF_HOLD;
1342		} else if ((td->td_proc->p_flag & P_HADTHREADS) &&
1343		    (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp))
1344			/*
1345			 * We will not be on the run queue.
1346			 * So we must be sleeping or similar.
1347			 * Don't use the slot if we will need it
1348			 * for newtd.
1349			 */
1350			slot_fill(td->td_ksegrp);
1351	}
1352	if (newtd != NULL) {
1353		/*
1354		 * If we bring in a thread account for it as if it had been
1355		 * added to the run queue and then chosen.
1356		 */
1357		newtd->td_kse->ke_flags |= KEF_DIDRUN;
1358		newtd->td_kse->ke_runq = ksq->ksq_curr;
1359		TD_SET_RUNNING(newtd);
1360		kseq_load_add(KSEQ_SELF(), newtd->td_kse);
1361		/*
1362		 * XXX When we preempt, we've already consumed a slot because
1363		 * we got here through sched_add().  However, newtd can come
1364		 * from thread_switchout() which can't SLOT_USE() because
1365		 * the SLOT code is scheduler dependent.  We must use the
1366		 * slot here otherwise.
1367		 */
1368		if ((flags & SW_PREEMPT) == 0)
1369			SLOT_USE(newtd->td_ksegrp);
1370	} else
1371		newtd = choosethread();
1372	if (td != newtd) {
1373#ifdef	HWPMC_HOOKS
1374		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1375			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1376#endif
1377		cpu_switch(td, newtd);
1378#ifdef	HWPMC_HOOKS
1379		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1380			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1381#endif
1382	}
1383
1384	sched_lock.mtx_lock = (uintptr_t)td;
1385
1386	td->td_oncpu = PCPU_GET(cpuid);
1387}
1388
1389void
1390sched_nice(struct proc *p, int nice)
1391{
1392	struct ksegrp *kg;
1393	struct kse *ke;
1394	struct thread *td;
1395	struct kseq *kseq;
1396
1397	PROC_LOCK_ASSERT(p, MA_OWNED);
1398	mtx_assert(&sched_lock, MA_OWNED);
1399	/*
1400	 * We need to adjust the nice counts for running KSEs.
1401	 */
1402	FOREACH_KSEGRP_IN_PROC(p, kg) {
1403		if (kg->kg_pri_class == PRI_TIMESHARE) {
1404			FOREACH_THREAD_IN_GROUP(kg, td) {
1405				ke = td->td_kse;
1406				if (ke->ke_runq == NULL)
1407					continue;
1408				kseq = KSEQ_CPU(ke->ke_cpu);
1409				kseq_nice_rem(kseq, p->p_nice);
1410				kseq_nice_add(kseq, nice);
1411			}
1412		}
1413	}
1414	p->p_nice = nice;
1415	FOREACH_KSEGRP_IN_PROC(p, kg) {
1416		sched_priority(kg);
1417		FOREACH_THREAD_IN_GROUP(kg, td)
1418			td->td_flags |= TDF_NEEDRESCHED;
1419	}
1420}
1421
1422void
1423sched_sleep(struct thread *td)
1424{
1425	mtx_assert(&sched_lock, MA_OWNED);
1426
1427	td->td_slptime = ticks;
1428}
1429
1430void
1431sched_wakeup(struct thread *td)
1432{
1433	mtx_assert(&sched_lock, MA_OWNED);
1434
1435	/*
1436	 * Let the kseg know how long we slept for.  This is because process
1437	 * interactivity behavior is modeled in the kseg.
1438	 */
1439	if (td->td_slptime) {
1440		struct ksegrp *kg;
1441		int hzticks;
1442
1443		kg = td->td_ksegrp;
1444		hzticks = (ticks - td->td_slptime) << 10;
1445		if (hzticks >= SCHED_SLP_RUN_MAX) {
1446			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1447			kg->kg_runtime = 1;
1448		} else {
1449			kg->kg_slptime += hzticks;
1450			sched_interact_update(kg);
1451		}
1452		sched_priority(kg);
1453		sched_slice(td->td_kse);
1454		td->td_slptime = 0;
1455	}
1456	setrunqueue(td, SRQ_BORING);
1457}
1458
1459/*
1460 * Penalize the parent for creating a new child and initialize the child's
1461 * priority.
1462 */
1463void
1464sched_fork(struct thread *td, struct thread *childtd)
1465{
1466
1467	mtx_assert(&sched_lock, MA_OWNED);
1468
1469	sched_fork_ksegrp(td, childtd->td_ksegrp);
1470	sched_fork_thread(td, childtd);
1471}
1472
1473void
1474sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
1475{
1476	struct ksegrp *kg = td->td_ksegrp;
1477	mtx_assert(&sched_lock, MA_OWNED);
1478
1479	child->kg_slptime = kg->kg_slptime;
1480	child->kg_runtime = kg->kg_runtime;
1481	child->kg_user_pri = kg->kg_user_pri;
1482	sched_interact_fork(child);
1483	kg->kg_runtime += tickincr << 10;
1484	sched_interact_update(kg);
1485}
1486
1487void
1488sched_fork_thread(struct thread *td, struct thread *child)
1489{
1490	struct kse *ke;
1491	struct kse *ke2;
1492
1493	sched_newthread(child);
1494	ke = td->td_kse;
1495	ke2 = child->td_kse;
1496	ke2->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1497	ke2->ke_cpu = ke->ke_cpu;
1498	ke2->ke_runq = NULL;
1499
1500	/* Grab our parents cpu estimation information. */
1501	ke2->ke_ticks = ke->ke_ticks;
1502	ke2->ke_ltick = ke->ke_ltick;
1503	ke2->ke_ftick = ke->ke_ftick;
1504}
1505
1506void
1507sched_class(struct ksegrp *kg, int class)
1508{
1509	struct kseq *kseq;
1510	struct kse *ke;
1511	struct thread *td;
1512	int nclass;
1513	int oclass;
1514
1515	mtx_assert(&sched_lock, MA_OWNED);
1516	if (kg->kg_pri_class == class)
1517		return;
1518
1519	nclass = PRI_BASE(class);
1520	oclass = PRI_BASE(kg->kg_pri_class);
1521	FOREACH_THREAD_IN_GROUP(kg, td) {
1522		ke = td->td_kse;
1523		if ((ke->ke_state != KES_ONRUNQ &&
1524		    ke->ke_state != KES_THREAD) || ke->ke_runq == NULL)
1525			continue;
1526		kseq = KSEQ_CPU(ke->ke_cpu);
1527
1528#ifdef SMP
1529		/*
1530		 * On SMP if we're on the RUNQ we must adjust the transferable
1531		 * count because could be changing to or from an interrupt
1532		 * class.
1533		 */
1534		if (ke->ke_state == KES_ONRUNQ) {
1535			if (KSE_CAN_MIGRATE(ke)) {
1536				kseq->ksq_transferable--;
1537				kseq->ksq_group->ksg_transferable--;
1538			}
1539			if (KSE_CAN_MIGRATE(ke)) {
1540				kseq->ksq_transferable++;
1541				kseq->ksq_group->ksg_transferable++;
1542			}
1543		}
1544#endif
1545		if (oclass == PRI_TIMESHARE) {
1546			kseq->ksq_load_timeshare--;
1547			kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1548		}
1549		if (nclass == PRI_TIMESHARE) {
1550			kseq->ksq_load_timeshare++;
1551			kseq_nice_add(kseq, kg->kg_proc->p_nice);
1552		}
1553	}
1554
1555	kg->kg_pri_class = class;
1556}
1557
1558/*
1559 * Return some of the child's priority and interactivity to the parent.
1560 */
1561void
1562sched_exit(struct proc *p, struct thread *childtd)
1563{
1564	mtx_assert(&sched_lock, MA_OWNED);
1565	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd);
1566	sched_exit_thread(NULL, childtd);
1567}
1568
1569void
1570sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
1571{
1572	/* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
1573	kg->kg_runtime += td->td_ksegrp->kg_runtime;
1574	sched_interact_update(kg);
1575}
1576
1577void
1578sched_exit_thread(struct thread *td, struct thread *childtd)
1579{
1580	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
1581	    childtd, childtd->td_proc->p_comm, childtd->td_priority);
1582	kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse);
1583}
1584
1585void
1586sched_clock(struct thread *td)
1587{
1588	struct kseq *kseq;
1589	struct ksegrp *kg;
1590	struct kse *ke;
1591
1592	mtx_assert(&sched_lock, MA_OWNED);
1593	kseq = KSEQ_SELF();
1594#ifdef SMP
1595	if (ticks >= bal_tick)
1596		sched_balance();
1597	if (ticks >= gbal_tick && balance_groups)
1598		sched_balance_groups();
1599	/*
1600	 * We could have been assigned a non real-time thread without an
1601	 * IPI.
1602	 */
1603	if (kseq->ksq_assigned)
1604		kseq_assign(kseq);	/* Potentially sets NEEDRESCHED */
1605#endif
1606	/*
1607	 * sched_setup() apparently happens prior to stathz being set.  We
1608	 * need to resolve the timers earlier in the boot so we can avoid
1609	 * calculating this here.
1610	 */
1611	if (realstathz == 0) {
1612		realstathz = stathz ? stathz : hz;
1613		tickincr = hz / realstathz;
1614		/*
1615		 * XXX This does not work for values of stathz that are much
1616		 * larger than hz.
1617		 */
1618		if (tickincr == 0)
1619			tickincr = 1;
1620	}
1621
1622	ke = td->td_kse;
1623	kg = ke->ke_ksegrp;
1624
1625	/* Adjust ticks for pctcpu */
1626	ke->ke_ticks++;
1627	ke->ke_ltick = ticks;
1628
1629	/* Go up to one second beyond our max and then trim back down */
1630	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1631		sched_pctcpu_update(ke);
1632
1633	if (td->td_flags & TDF_IDLETD)
1634		return;
1635	/*
1636	 * We only do slicing code for TIMESHARE ksegrps.
1637	 */
1638	if (kg->kg_pri_class != PRI_TIMESHARE)
1639		return;
1640	/*
1641	 * We used a tick charge it to the ksegrp so that we can compute our
1642	 * interactivity.
1643	 */
1644	kg->kg_runtime += tickincr << 10;
1645	sched_interact_update(kg);
1646
1647	/*
1648	 * We used up one time slice.
1649	 */
1650	if (--ke->ke_slice > 0)
1651		return;
1652	/*
1653	 * We're out of time, recompute priorities and requeue.
1654	 */
1655	kseq_load_rem(kseq, ke);
1656	sched_priority(kg);
1657	sched_slice(ke);
1658	if (SCHED_CURR(kg, ke))
1659		ke->ke_runq = kseq->ksq_curr;
1660	else
1661		ke->ke_runq = kseq->ksq_next;
1662	kseq_load_add(kseq, ke);
1663	td->td_flags |= TDF_NEEDRESCHED;
1664}
1665
1666int
1667sched_runnable(void)
1668{
1669	struct kseq *kseq;
1670	int load;
1671
1672	load = 1;
1673
1674	kseq = KSEQ_SELF();
1675#ifdef SMP
1676	if (kseq->ksq_assigned) {
1677		mtx_lock_spin(&sched_lock);
1678		kseq_assign(kseq);
1679		mtx_unlock_spin(&sched_lock);
1680	}
1681#endif
1682	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1683		if (kseq->ksq_load > 0)
1684			goto out;
1685	} else
1686		if (kseq->ksq_load - 1 > 0)
1687			goto out;
1688	load = 0;
1689out:
1690	return (load);
1691}
1692
1693void
1694sched_userret(struct thread *td)
1695{
1696	struct ksegrp *kg;
1697
1698	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1699	    ("thread with borrowed priority returning to userland"));
1700	kg = td->td_ksegrp;
1701	if (td->td_priority != kg->kg_user_pri) {
1702		mtx_lock_spin(&sched_lock);
1703		td->td_priority = kg->kg_user_pri;
1704		td->td_base_pri = kg->kg_user_pri;
1705		mtx_unlock_spin(&sched_lock);
1706	}
1707}
1708
1709struct kse *
1710sched_choose(void)
1711{
1712	struct kseq *kseq;
1713	struct kse *ke;
1714
1715	mtx_assert(&sched_lock, MA_OWNED);
1716	kseq = KSEQ_SELF();
1717#ifdef SMP
1718restart:
1719	if (kseq->ksq_assigned)
1720		kseq_assign(kseq);
1721#endif
1722	ke = kseq_choose(kseq);
1723	if (ke) {
1724#ifdef SMP
1725		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1726			if (kseq_idled(kseq) == 0)
1727				goto restart;
1728#endif
1729		kseq_runq_rem(kseq, ke);
1730		ke->ke_state = KES_THREAD;
1731		return (ke);
1732	}
1733#ifdef SMP
1734	if (kseq_idled(kseq) == 0)
1735		goto restart;
1736#endif
1737	return (NULL);
1738}
1739
1740void
1741sched_add(struct thread *td, int flags)
1742{
1743	struct kseq *kseq;
1744	struct ksegrp *kg;
1745	struct kse *ke;
1746	int preemptive;
1747	int canmigrate;
1748	int class;
1749
1750	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1751	    td, td->td_proc->p_comm, td->td_priority, curthread,
1752	    curthread->td_proc->p_comm);
1753	mtx_assert(&sched_lock, MA_OWNED);
1754	ke = td->td_kse;
1755	kg = td->td_ksegrp;
1756	canmigrate = 1;
1757	preemptive = !(flags & SRQ_YIELDING);
1758	class = PRI_BASE(kg->kg_pri_class);
1759	kseq = KSEQ_SELF();
1760	if ((ke->ke_flags & KEF_INTERNAL) == 0)
1761		SLOT_USE(td->td_ksegrp);
1762	ke->ke_flags &= ~KEF_INTERNAL;
1763#ifdef SMP
1764	if (ke->ke_flags & KEF_ASSIGNED) {
1765		if (ke->ke_flags & KEF_REMOVED)
1766			ke->ke_flags &= ~KEF_REMOVED;
1767		return;
1768	}
1769	canmigrate = KSE_CAN_MIGRATE(ke);
1770#endif
1771	KASSERT(ke->ke_state != KES_ONRUNQ,
1772	    ("sched_add: kse %p (%s) already in run queue", ke,
1773	    ke->ke_proc->p_comm));
1774	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1775	    ("sched_add: process swapped out"));
1776	KASSERT(ke->ke_runq == NULL,
1777	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1778	switch (class) {
1779	case PRI_ITHD:
1780	case PRI_REALTIME:
1781		ke->ke_runq = kseq->ksq_curr;
1782		ke->ke_slice = SCHED_SLICE_MAX;
1783		if (canmigrate)
1784			ke->ke_cpu = PCPU_GET(cpuid);
1785		break;
1786	case PRI_TIMESHARE:
1787		if (SCHED_CURR(kg, ke))
1788			ke->ke_runq = kseq->ksq_curr;
1789		else
1790			ke->ke_runq = kseq->ksq_next;
1791		break;
1792	case PRI_IDLE:
1793		/*
1794		 * This is for priority prop.
1795		 */
1796		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1797			ke->ke_runq = kseq->ksq_curr;
1798		else
1799			ke->ke_runq = &kseq->ksq_idle;
1800		ke->ke_slice = SCHED_SLICE_MIN;
1801		break;
1802	default:
1803		panic("Unknown pri class.");
1804		break;
1805	}
1806#ifdef SMP
1807	/*
1808	 * Don't migrate running threads here.  Force the long term balancer
1809	 * to do it.
1810	 */
1811	if (ke->ke_flags & KEF_HOLD) {
1812		ke->ke_flags &= ~KEF_HOLD;
1813		canmigrate = 0;
1814	}
1815	/*
1816	 * If this thread is pinned or bound, notify the target cpu.
1817	 */
1818	if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) {
1819		ke->ke_runq = NULL;
1820		kseq_notify(ke, ke->ke_cpu);
1821		return;
1822	}
1823	/*
1824	 * If we had been idle, clear our bit in the group and potentially
1825	 * the global bitmap.  If not, see if we should transfer this thread.
1826	 */
1827	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1828	    (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1829		/*
1830		 * Check to see if our group is unidling, and if so, remove it
1831		 * from the global idle mask.
1832		 */
1833		if (kseq->ksq_group->ksg_idlemask ==
1834		    kseq->ksq_group->ksg_cpumask)
1835			atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1836		/*
1837		 * Now remove ourselves from the group specific idle mask.
1838		 */
1839		kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1840	} else if (canmigrate && kseq->ksq_load > 1 && class != PRI_ITHD)
1841		if (kseq_transfer(kseq, ke, class))
1842			return;
1843	ke->ke_cpu = PCPU_GET(cpuid);
1844#endif
1845	if (td->td_priority < curthread->td_priority &&
1846	    ke->ke_runq == kseq->ksq_curr)
1847		curthread->td_flags |= TDF_NEEDRESCHED;
1848	if (preemptive && maybe_preempt(td))
1849		return;
1850	ke->ke_state = KES_ONRUNQ;
1851
1852	kseq_runq_add(kseq, ke, flags);
1853	kseq_load_add(kseq, ke);
1854}
1855
1856void
1857sched_rem(struct thread *td)
1858{
1859	struct kseq *kseq;
1860	struct kse *ke;
1861
1862	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1863	    td, td->td_proc->p_comm, td->td_priority, curthread,
1864	    curthread->td_proc->p_comm);
1865	mtx_assert(&sched_lock, MA_OWNED);
1866	ke = td->td_kse;
1867	SLOT_RELEASE(td->td_ksegrp);
1868	if (ke->ke_flags & KEF_ASSIGNED) {
1869		ke->ke_flags |= KEF_REMOVED;
1870		return;
1871	}
1872	KASSERT((ke->ke_state == KES_ONRUNQ),
1873	    ("sched_rem: KSE not on run queue"));
1874
1875	ke->ke_state = KES_THREAD;
1876	kseq = KSEQ_CPU(ke->ke_cpu);
1877	kseq_runq_rem(kseq, ke);
1878	kseq_load_rem(kseq, ke);
1879}
1880
1881fixpt_t
1882sched_pctcpu(struct thread *td)
1883{
1884	fixpt_t pctcpu;
1885	struct kse *ke;
1886
1887	pctcpu = 0;
1888	ke = td->td_kse;
1889	if (ke == NULL)
1890		return (0);
1891
1892	mtx_lock_spin(&sched_lock);
1893	if (ke->ke_ticks) {
1894		int rtick;
1895
1896		/*
1897		 * Don't update more frequently than twice a second.  Allowing
1898		 * this causes the cpu usage to decay away too quickly due to
1899		 * rounding errors.
1900		 */
1901		if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1902		    ke->ke_ltick < (ticks - (hz / 2)))
1903			sched_pctcpu_update(ke);
1904		/* How many rtick per second ? */
1905		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1906		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1907	}
1908
1909	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1910	mtx_unlock_spin(&sched_lock);
1911
1912	return (pctcpu);
1913}
1914
1915void
1916sched_bind(struct thread *td, int cpu)
1917{
1918	struct kse *ke;
1919
1920	mtx_assert(&sched_lock, MA_OWNED);
1921	ke = td->td_kse;
1922	ke->ke_flags |= KEF_BOUND;
1923#ifdef SMP
1924	if (PCPU_GET(cpuid) == cpu)
1925		return;
1926	/* sched_rem without the runq_remove */
1927	ke->ke_state = KES_THREAD;
1928	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1929	kseq_notify(ke, cpu);
1930	/* When we return from mi_switch we'll be on the correct cpu. */
1931	mi_switch(SW_VOL, NULL);
1932#endif
1933}
1934
1935void
1936sched_unbind(struct thread *td)
1937{
1938	mtx_assert(&sched_lock, MA_OWNED);
1939	td->td_kse->ke_flags &= ~KEF_BOUND;
1940}
1941
1942int
1943sched_is_bound(struct thread *td)
1944{
1945	mtx_assert(&sched_lock, MA_OWNED);
1946	return (td->td_kse->ke_flags & KEF_BOUND);
1947}
1948
1949int
1950sched_load(void)
1951{
1952#ifdef SMP
1953	int total;
1954	int i;
1955
1956	total = 0;
1957	for (i = 0; i <= ksg_maxid; i++)
1958		total += KSEQ_GROUP(i)->ksg_load;
1959	return (total);
1960#else
1961	return (KSEQ_SELF()->ksq_sysload);
1962#endif
1963}
1964
1965int
1966sched_sizeof_ksegrp(void)
1967{
1968	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1969}
1970
1971int
1972sched_sizeof_proc(void)
1973{
1974	return (sizeof(struct proc));
1975}
1976
1977int
1978sched_sizeof_thread(void)
1979{
1980	return (sizeof(struct thread) + sizeof(struct td_sched));
1981}
1982#define KERN_SWITCH_INCLUDE 1
1983#include "kern/kern_switch.c"
1984