sched_ule.c revision 148603
1/*-
2 * Copyright (c) 2002-2005, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 148603 2005-07-31 15:11:21Z davidxu $");
29
30#include "opt_hwpmc_hooks.h"
31#include "opt_sched.h"
32
33#define kse td_sched
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kdb.h>
38#include <sys/kernel.h>
39#include <sys/ktr.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43#include <sys/resource.h>
44#include <sys/resourcevar.h>
45#include <sys/sched.h>
46#include <sys/smp.h>
47#include <sys/sx.h>
48#include <sys/sysctl.h>
49#include <sys/sysproto.h>
50#include <sys/turnstile.h>
51#include <sys/vmmeter.h>
52#ifdef KTRACE
53#include <sys/uio.h>
54#include <sys/ktrace.h>
55#endif
56
57#ifdef HWPMC_HOOKS
58#include <sys/pmckern.h>
59#endif
60
61#include <machine/cpu.h>
62#include <machine/smp.h>
63
64/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
65/* XXX This is bogus compatability crap for ps */
66static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
67SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
68
69static void sched_setup(void *dummy);
70SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
71
72static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
73
74SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
75    "Scheduler name");
76
77static int slice_min = 1;
78SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
79
80static int slice_max = 10;
81SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
82
83int realstathz;
84int tickincr = 1;
85
86/*
87 * The following datastructures are allocated within their parent structure
88 * but are scheduler specific.
89 */
90/*
91 * The schedulable entity that can be given a context to run.  A process may
92 * have several of these.
93 */
94struct kse {
95	TAILQ_ENTRY(kse) ke_procq;	/* (j/z) Run queue. */
96	int		ke_flags;	/* (j) KEF_* flags. */
97	struct thread	*ke_thread;	/* (*) Active associated thread. */
98	fixpt_t		ke_pctcpu;	/* (j) %cpu during p_swtime. */
99	char		ke_rqindex;	/* (j) Run queue index. */
100	enum {
101		KES_THREAD = 0x0,	/* slaved to thread state */
102		KES_ONRUNQ
103	} ke_state;			/* (j) thread sched specific status. */
104	int		ke_slptime;
105	int		ke_slice;
106	struct runq	*ke_runq;
107	u_char		ke_cpu;		/* CPU that we have affinity for. */
108	/* The following variables are only used for pctcpu calculation */
109	int		ke_ltick;	/* Last tick that we were running on */
110	int		ke_ftick;	/* First tick that we were running on */
111	int		ke_ticks;	/* Tick count */
112
113};
114#define	td_kse			td_sched
115#define	td_slptime		td_kse->ke_slptime
116#define ke_proc			ke_thread->td_proc
117#define ke_ksegrp		ke_thread->td_ksegrp
118#define	ke_assign		ke_procq.tqe_next
119/* flags kept in ke_flags */
120#define	KEF_ASSIGNED	0x0001		/* Thread is being migrated. */
121#define	KEF_BOUND	0x0002		/* Thread can not migrate. */
122#define	KEF_XFERABLE	0x0004		/* Thread was added as transferable. */
123#define	KEF_HOLD	0x0008		/* Thread is temporarily bound. */
124#define	KEF_REMOVED	0x0010		/* Thread was removed while ASSIGNED */
125#define	KEF_INTERNAL	0x0020		/* Thread added due to migration. */
126#define	KEF_DIDRUN	0x02000		/* Thread actually ran. */
127#define	KEF_EXIT	0x04000		/* Thread is being killed. */
128
129struct kg_sched {
130	struct thread	*skg_last_assigned; /* (j) Last thread assigned to */
131					   /* the system scheduler */
132	int	skg_slptime;		/* Number of ticks we vol. slept */
133	int	skg_runtime;		/* Number of ticks we were running */
134	int	skg_avail_opennings;	/* (j) Num unfilled slots in group.*/
135	int	skg_concurrency;	/* (j) Num threads requested in group.*/
136};
137#define kg_last_assigned	kg_sched->skg_last_assigned
138#define kg_avail_opennings	kg_sched->skg_avail_opennings
139#define kg_concurrency		kg_sched->skg_concurrency
140#define kg_runtime		kg_sched->skg_runtime
141#define kg_slptime		kg_sched->skg_slptime
142
143#define SLOT_RELEASE(kg)	(kg)->kg_avail_opennings++
144#define	SLOT_USE(kg)		(kg)->kg_avail_opennings--
145
146static struct kse kse0;
147static struct kg_sched kg_sched0;
148
149/*
150 * The priority is primarily determined by the interactivity score.  Thus, we
151 * give lower(better) priorities to kse groups that use less CPU.  The nice
152 * value is then directly added to this to allow nice to have some effect
153 * on latency.
154 *
155 * PRI_RANGE:	Total priority range for timeshare threads.
156 * PRI_NRESV:	Number of nice values.
157 * PRI_BASE:	The start of the dynamic range.
158 */
159#define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
160#define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
161#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
162#define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
163#define	SCHED_PRI_INTERACT(score)					\
164    ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
165
166/*
167 * These determine the interactivity of a process.
168 *
169 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
170 *		before throttling back.
171 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
172 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
173 * INTERACT_THRESH:	Threshhold for placement on the current runq.
174 */
175#define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
176#define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
177#define	SCHED_INTERACT_MAX	(100)
178#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
179#define	SCHED_INTERACT_THRESH	(30)
180
181/*
182 * These parameters and macros determine the size of the time slice that is
183 * granted to each thread.
184 *
185 * SLICE_MIN:	Minimum time slice granted, in units of ticks.
186 * SLICE_MAX:	Maximum time slice granted.
187 * SLICE_RANGE:	Range of available time slices scaled by hz.
188 * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
189 * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
190 * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
191 */
192#define	SCHED_SLICE_MIN			(slice_min)
193#define	SCHED_SLICE_MAX			(slice_max)
194#define	SCHED_SLICE_INTERACTIVE		(slice_max)
195#define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
196#define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
197#define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
198#define	SCHED_SLICE_NICE(nice)						\
199    (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
200
201/*
202 * This macro determines whether or not the thread belongs on the current or
203 * next run queue.
204 */
205#define	SCHED_INTERACTIVE(kg)						\
206    (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
207#define	SCHED_CURR(kg, ke)						\
208    ((ke->ke_thread->td_flags & TDF_BORROWING) || SCHED_INTERACTIVE(kg))
209
210/*
211 * Cpu percentage computation macros and defines.
212 *
213 * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
214 * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
215 */
216
217#define	SCHED_CPU_TIME	10
218#define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
219
220/*
221 * kseq - per processor runqs and statistics.
222 */
223struct kseq {
224	struct runq	ksq_idle;		/* Queue of IDLE threads. */
225	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
226	struct runq	*ksq_next;		/* Next timeshare queue. */
227	struct runq	*ksq_curr;		/* Current queue. */
228	int		ksq_load_timeshare;	/* Load for timeshare. */
229	int		ksq_load;		/* Aggregate load. */
230	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
231	short		ksq_nicemin;		/* Least nice. */
232#ifdef SMP
233	int			ksq_transferable;
234	LIST_ENTRY(kseq)	ksq_siblings;	/* Next in kseq group. */
235	struct kseq_group	*ksq_group;	/* Our processor group. */
236	volatile struct kse	*ksq_assigned;	/* assigned by another CPU. */
237#else
238	int		ksq_sysload;		/* For loadavg, !ITHD load. */
239#endif
240};
241
242#ifdef SMP
243/*
244 * kseq groups are groups of processors which can cheaply share threads.  When
245 * one processor in the group goes idle it will check the runqs of the other
246 * processors in its group prior to halting and waiting for an interrupt.
247 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
248 * In a numa environment we'd want an idle bitmap per group and a two tiered
249 * load balancer.
250 */
251struct kseq_group {
252	int	ksg_cpus;		/* Count of CPUs in this kseq group. */
253	cpumask_t ksg_cpumask;		/* Mask of cpus in this group. */
254	cpumask_t ksg_idlemask;		/* Idle cpus in this group. */
255	cpumask_t ksg_mask;		/* Bit mask for first cpu. */
256	int	ksg_load;		/* Total load of this group. */
257	int	ksg_transferable;	/* Transferable load of this group. */
258	LIST_HEAD(, kseq)	ksg_members; /* Linked list of all members. */
259};
260#endif
261
262/*
263 * One kse queue per processor.
264 */
265#ifdef SMP
266static cpumask_t kseq_idle;
267static int ksg_maxid;
268static struct kseq	kseq_cpu[MAXCPU];
269static struct kseq_group kseq_groups[MAXCPU];
270static int bal_tick;
271static int gbal_tick;
272static int balance_groups;
273
274#define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
275#define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
276#define	KSEQ_ID(x)	((x) - kseq_cpu)
277#define	KSEQ_GROUP(x)	(&kseq_groups[(x)])
278#else	/* !SMP */
279static struct kseq	kseq_cpu;
280
281#define	KSEQ_SELF()	(&kseq_cpu)
282#define	KSEQ_CPU(x)	(&kseq_cpu)
283#endif
284
285static void slot_fill(struct ksegrp *);
286static struct kse *sched_choose(void);		/* XXX Should be thread * */
287static void sched_slice(struct kse *);
288static void sched_priority(struct ksegrp *);
289static void sched_thread_priority(struct thread *, u_char);
290static int sched_interact_score(struct ksegrp *);
291static void sched_interact_update(struct ksegrp *);
292static void sched_interact_fork(struct ksegrp *);
293static void sched_pctcpu_update(struct kse *);
294
295/* Operations on per processor queues */
296static struct kse * kseq_choose(struct kseq *);
297static void kseq_setup(struct kseq *);
298static void kseq_load_add(struct kseq *, struct kse *);
299static void kseq_load_rem(struct kseq *, struct kse *);
300static __inline void kseq_runq_add(struct kseq *, struct kse *, int);
301static __inline void kseq_runq_rem(struct kseq *, struct kse *);
302static void kseq_nice_add(struct kseq *, int);
303static void kseq_nice_rem(struct kseq *, int);
304void kseq_print(int cpu);
305#ifdef SMP
306static int kseq_transfer(struct kseq *, struct kse *, int);
307static struct kse *runq_steal(struct runq *);
308static void sched_balance(void);
309static void sched_balance_groups(void);
310static void sched_balance_group(struct kseq_group *);
311static void sched_balance_pair(struct kseq *, struct kseq *);
312static void kseq_move(struct kseq *, int);
313static int kseq_idled(struct kseq *);
314static void kseq_notify(struct kse *, int);
315static void kseq_assign(struct kseq *);
316static struct kse *kseq_steal(struct kseq *, int);
317#define	KSE_CAN_MIGRATE(ke)						\
318    ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
319#endif
320
321void
322kseq_print(int cpu)
323{
324	struct kseq *kseq;
325	int i;
326
327	kseq = KSEQ_CPU(cpu);
328
329	printf("kseq:\n");
330	printf("\tload:           %d\n", kseq->ksq_load);
331	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
332#ifdef SMP
333	printf("\tload transferable: %d\n", kseq->ksq_transferable);
334#endif
335	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
336	printf("\tnice counts:\n");
337	for (i = 0; i < SCHED_PRI_NRESV; i++)
338		if (kseq->ksq_nice[i])
339			printf("\t\t%d = %d\n",
340			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
341}
342
343static __inline void
344kseq_runq_add(struct kseq *kseq, struct kse *ke, int flags)
345{
346#ifdef SMP
347	if (KSE_CAN_MIGRATE(ke)) {
348		kseq->ksq_transferable++;
349		kseq->ksq_group->ksg_transferable++;
350		ke->ke_flags |= KEF_XFERABLE;
351	}
352#endif
353	runq_add(ke->ke_runq, ke, flags);
354}
355
356static __inline void
357kseq_runq_rem(struct kseq *kseq, struct kse *ke)
358{
359#ifdef SMP
360	if (ke->ke_flags & KEF_XFERABLE) {
361		kseq->ksq_transferable--;
362		kseq->ksq_group->ksg_transferable--;
363		ke->ke_flags &= ~KEF_XFERABLE;
364	}
365#endif
366	runq_remove(ke->ke_runq, ke);
367}
368
369static void
370kseq_load_add(struct kseq *kseq, struct kse *ke)
371{
372	int class;
373	mtx_assert(&sched_lock, MA_OWNED);
374	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
375	if (class == PRI_TIMESHARE)
376		kseq->ksq_load_timeshare++;
377	kseq->ksq_load++;
378	CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
379	if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
380#ifdef SMP
381		kseq->ksq_group->ksg_load++;
382#else
383		kseq->ksq_sysload++;
384#endif
385	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
386		kseq_nice_add(kseq, ke->ke_proc->p_nice);
387}
388
389static void
390kseq_load_rem(struct kseq *kseq, struct kse *ke)
391{
392	int class;
393	mtx_assert(&sched_lock, MA_OWNED);
394	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
395	if (class == PRI_TIMESHARE)
396		kseq->ksq_load_timeshare--;
397	if (class != PRI_ITHD  && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
398#ifdef SMP
399		kseq->ksq_group->ksg_load--;
400#else
401		kseq->ksq_sysload--;
402#endif
403	kseq->ksq_load--;
404	CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
405	ke->ke_runq = NULL;
406	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
407		kseq_nice_rem(kseq, ke->ke_proc->p_nice);
408}
409
410static void
411kseq_nice_add(struct kseq *kseq, int nice)
412{
413	mtx_assert(&sched_lock, MA_OWNED);
414	/* Normalize to zero. */
415	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
416	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
417		kseq->ksq_nicemin = nice;
418}
419
420static void
421kseq_nice_rem(struct kseq *kseq, int nice)
422{
423	int n;
424
425	mtx_assert(&sched_lock, MA_OWNED);
426	/* Normalize to zero. */
427	n = nice + SCHED_PRI_NHALF;
428	kseq->ksq_nice[n]--;
429	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
430
431	/*
432	 * If this wasn't the smallest nice value or there are more in
433	 * this bucket we can just return.  Otherwise we have to recalculate
434	 * the smallest nice.
435	 */
436	if (nice != kseq->ksq_nicemin ||
437	    kseq->ksq_nice[n] != 0 ||
438	    kseq->ksq_load_timeshare == 0)
439		return;
440
441	for (; n < SCHED_PRI_NRESV; n++)
442		if (kseq->ksq_nice[n]) {
443			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
444			return;
445		}
446}
447
448#ifdef SMP
449/*
450 * sched_balance is a simple CPU load balancing algorithm.  It operates by
451 * finding the least loaded and most loaded cpu and equalizing their load
452 * by migrating some processes.
453 *
454 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
455 * installations will only have 2 cpus.  Secondly, load balancing too much at
456 * once can have an unpleasant effect on the system.  The scheduler rarely has
457 * enough information to make perfect decisions.  So this algorithm chooses
458 * algorithm simplicity and more gradual effects on load in larger systems.
459 *
460 * It could be improved by considering the priorities and slices assigned to
461 * each task prior to balancing them.  There are many pathological cases with
462 * any approach and so the semi random algorithm below may work as well as any.
463 *
464 */
465static void
466sched_balance(void)
467{
468	struct kseq_group *high;
469	struct kseq_group *low;
470	struct kseq_group *ksg;
471	int cnt;
472	int i;
473
474	bal_tick = ticks + (random() % (hz * 2));
475	if (smp_started == 0)
476		return;
477	low = high = NULL;
478	i = random() % (ksg_maxid + 1);
479	for (cnt = 0; cnt <= ksg_maxid; cnt++) {
480		ksg = KSEQ_GROUP(i);
481		/*
482		 * Find the CPU with the highest load that has some
483		 * threads to transfer.
484		 */
485		if ((high == NULL || ksg->ksg_load > high->ksg_load)
486		    && ksg->ksg_transferable)
487			high = ksg;
488		if (low == NULL || ksg->ksg_load < low->ksg_load)
489			low = ksg;
490		if (++i > ksg_maxid)
491			i = 0;
492	}
493	if (low != NULL && high != NULL && high != low)
494		sched_balance_pair(LIST_FIRST(&high->ksg_members),
495		    LIST_FIRST(&low->ksg_members));
496}
497
498static void
499sched_balance_groups(void)
500{
501	int i;
502
503	gbal_tick = ticks + (random() % (hz * 2));
504	mtx_assert(&sched_lock, MA_OWNED);
505	if (smp_started)
506		for (i = 0; i <= ksg_maxid; i++)
507			sched_balance_group(KSEQ_GROUP(i));
508}
509
510static void
511sched_balance_group(struct kseq_group *ksg)
512{
513	struct kseq *kseq;
514	struct kseq *high;
515	struct kseq *low;
516	int load;
517
518	if (ksg->ksg_transferable == 0)
519		return;
520	low = NULL;
521	high = NULL;
522	LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
523		load = kseq->ksq_load;
524		if (high == NULL || load > high->ksq_load)
525			high = kseq;
526		if (low == NULL || load < low->ksq_load)
527			low = kseq;
528	}
529	if (high != NULL && low != NULL && high != low)
530		sched_balance_pair(high, low);
531}
532
533static void
534sched_balance_pair(struct kseq *high, struct kseq *low)
535{
536	int transferable;
537	int high_load;
538	int low_load;
539	int move;
540	int diff;
541	int i;
542
543	/*
544	 * If we're transfering within a group we have to use this specific
545	 * kseq's transferable count, otherwise we can steal from other members
546	 * of the group.
547	 */
548	if (high->ksq_group == low->ksq_group) {
549		transferable = high->ksq_transferable;
550		high_load = high->ksq_load;
551		low_load = low->ksq_load;
552	} else {
553		transferable = high->ksq_group->ksg_transferable;
554		high_load = high->ksq_group->ksg_load;
555		low_load = low->ksq_group->ksg_load;
556	}
557	if (transferable == 0)
558		return;
559	/*
560	 * Determine what the imbalance is and then adjust that to how many
561	 * kses we actually have to give up (transferable).
562	 */
563	diff = high_load - low_load;
564	move = diff / 2;
565	if (diff & 0x1)
566		move++;
567	move = min(move, transferable);
568	for (i = 0; i < move; i++)
569		kseq_move(high, KSEQ_ID(low));
570	return;
571}
572
573static void
574kseq_move(struct kseq *from, int cpu)
575{
576	struct kseq *kseq;
577	struct kseq *to;
578	struct kse *ke;
579
580	kseq = from;
581	to = KSEQ_CPU(cpu);
582	ke = kseq_steal(kseq, 1);
583	if (ke == NULL) {
584		struct kseq_group *ksg;
585
586		ksg = kseq->ksq_group;
587		LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
588			if (kseq == from || kseq->ksq_transferable == 0)
589				continue;
590			ke = kseq_steal(kseq, 1);
591			break;
592		}
593		if (ke == NULL)
594			panic("kseq_move: No KSEs available with a "
595			    "transferable count of %d\n",
596			    ksg->ksg_transferable);
597	}
598	if (kseq == to)
599		return;
600	ke->ke_state = KES_THREAD;
601	kseq_runq_rem(kseq, ke);
602	kseq_load_rem(kseq, ke);
603	kseq_notify(ke, cpu);
604}
605
606static int
607kseq_idled(struct kseq *kseq)
608{
609	struct kseq_group *ksg;
610	struct kseq *steal;
611	struct kse *ke;
612
613	ksg = kseq->ksq_group;
614	/*
615	 * If we're in a cpu group, try and steal kses from another cpu in
616	 * the group before idling.
617	 */
618	if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
619		LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
620			if (steal == kseq || steal->ksq_transferable == 0)
621				continue;
622			ke = kseq_steal(steal, 0);
623			if (ke == NULL)
624				continue;
625			ke->ke_state = KES_THREAD;
626			kseq_runq_rem(steal, ke);
627			kseq_load_rem(steal, ke);
628			ke->ke_cpu = PCPU_GET(cpuid);
629			ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
630			sched_add(ke->ke_thread, SRQ_YIELDING);
631			return (0);
632		}
633	}
634	/*
635	 * We only set the idled bit when all of the cpus in the group are
636	 * idle.  Otherwise we could get into a situation where a KSE bounces
637	 * back and forth between two idle cores on seperate physical CPUs.
638	 */
639	ksg->ksg_idlemask |= PCPU_GET(cpumask);
640	if (ksg->ksg_idlemask != ksg->ksg_cpumask)
641		return (1);
642	atomic_set_int(&kseq_idle, ksg->ksg_mask);
643	return (1);
644}
645
646static void
647kseq_assign(struct kseq *kseq)
648{
649	struct kse *nke;
650	struct kse *ke;
651
652	do {
653		*(volatile struct kse **)&ke = kseq->ksq_assigned;
654	} while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
655		(uintptr_t)ke, (uintptr_t)NULL));
656	for (; ke != NULL; ke = nke) {
657		nke = ke->ke_assign;
658		kseq->ksq_group->ksg_load--;
659		kseq->ksq_load--;
660		ke->ke_flags &= ~KEF_ASSIGNED;
661		if (ke->ke_flags & KEF_REMOVED) {
662			ke->ke_flags &= ~KEF_REMOVED;
663			continue;
664		}
665		ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
666		sched_add(ke->ke_thread, SRQ_YIELDING);
667	}
668}
669
670static void
671kseq_notify(struct kse *ke, int cpu)
672{
673	struct kseq *kseq;
674	struct thread *td;
675	struct pcpu *pcpu;
676	int class;
677	int prio;
678
679	kseq = KSEQ_CPU(cpu);
680	/* XXX */
681	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
682	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
683	    (kseq_idle & kseq->ksq_group->ksg_mask))
684		atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
685	kseq->ksq_group->ksg_load++;
686	kseq->ksq_load++;
687	ke->ke_cpu = cpu;
688	ke->ke_flags |= KEF_ASSIGNED;
689	prio = ke->ke_thread->td_priority;
690
691	/*
692	 * Place a KSE on another cpu's queue and force a resched.
693	 */
694	do {
695		*(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned;
696	} while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
697		(uintptr_t)ke->ke_assign, (uintptr_t)ke));
698	/*
699	 * Without sched_lock we could lose a race where we set NEEDRESCHED
700	 * on a thread that is switched out before the IPI is delivered.  This
701	 * would lead us to miss the resched.  This will be a problem once
702	 * sched_lock is pushed down.
703	 */
704	pcpu = pcpu_find(cpu);
705	td = pcpu->pc_curthread;
706	if (ke->ke_thread->td_priority < td->td_priority ||
707	    td == pcpu->pc_idlethread) {
708		td->td_flags |= TDF_NEEDRESCHED;
709		ipi_selected(1 << cpu, IPI_AST);
710	}
711}
712
713static struct kse *
714runq_steal(struct runq *rq)
715{
716	struct rqhead *rqh;
717	struct rqbits *rqb;
718	struct kse *ke;
719	int word;
720	int bit;
721
722	mtx_assert(&sched_lock, MA_OWNED);
723	rqb = &rq->rq_status;
724	for (word = 0; word < RQB_LEN; word++) {
725		if (rqb->rqb_bits[word] == 0)
726			continue;
727		for (bit = 0; bit < RQB_BPW; bit++) {
728			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
729				continue;
730			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
731			TAILQ_FOREACH(ke, rqh, ke_procq) {
732				if (KSE_CAN_MIGRATE(ke))
733					return (ke);
734			}
735		}
736	}
737	return (NULL);
738}
739
740static struct kse *
741kseq_steal(struct kseq *kseq, int stealidle)
742{
743	struct kse *ke;
744
745	/*
746	 * Steal from next first to try to get a non-interactive task that
747	 * may not have run for a while.
748	 */
749	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
750		return (ke);
751	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
752		return (ke);
753	if (stealidle)
754		return (runq_steal(&kseq->ksq_idle));
755	return (NULL);
756}
757
758int
759kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
760{
761	struct kseq_group *nksg;
762	struct kseq_group *ksg;
763	struct kseq *old;
764	int cpu;
765	int idx;
766
767	if (smp_started == 0)
768		return (0);
769	cpu = 0;
770	/*
771	 * If our load exceeds a certain threshold we should attempt to
772	 * reassign this thread.  The first candidate is the cpu that
773	 * originally ran the thread.  If it is idle, assign it there,
774	 * otherwise, pick an idle cpu.
775	 *
776	 * The threshold at which we start to reassign kses has a large impact
777	 * on the overall performance of the system.  Tuned too high and
778	 * some CPUs may idle.  Too low and there will be excess migration
779	 * and context switches.
780	 */
781	old = KSEQ_CPU(ke->ke_cpu);
782	nksg = old->ksq_group;
783	ksg = kseq->ksq_group;
784	if (kseq_idle) {
785		if (kseq_idle & nksg->ksg_mask) {
786			cpu = ffs(nksg->ksg_idlemask);
787			if (cpu) {
788				CTR2(KTR_SCHED,
789				    "kseq_transfer: %p found old cpu %X "
790				    "in idlemask.", ke, cpu);
791				goto migrate;
792			}
793		}
794		/*
795		 * Multiple cpus could find this bit simultaneously
796		 * but the race shouldn't be terrible.
797		 */
798		cpu = ffs(kseq_idle);
799		if (cpu) {
800			CTR2(KTR_SCHED, "kseq_transfer: %p found %X "
801			    "in idlemask.", ke, cpu);
802			goto migrate;
803		}
804	}
805	idx = 0;
806#if 0
807	if (old->ksq_load < kseq->ksq_load) {
808		cpu = ke->ke_cpu + 1;
809		CTR2(KTR_SCHED, "kseq_transfer: %p old cpu %X "
810		    "load less than ours.", ke, cpu);
811		goto migrate;
812	}
813	/*
814	 * No new CPU was found, look for one with less load.
815	 */
816	for (idx = 0; idx <= ksg_maxid; idx++) {
817		nksg = KSEQ_GROUP(idx);
818		if (nksg->ksg_load /*+ (nksg->ksg_cpus  * 2)*/ < ksg->ksg_load) {
819			cpu = ffs(nksg->ksg_cpumask);
820			CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X load less "
821			    "than ours.", ke, cpu);
822			goto migrate;
823		}
824	}
825#endif
826	/*
827	 * If another cpu in this group has idled, assign a thread over
828	 * to them after checking to see if there are idled groups.
829	 */
830	if (ksg->ksg_idlemask) {
831		cpu = ffs(ksg->ksg_idlemask);
832		if (cpu) {
833			CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X idle in "
834			    "group.", ke, cpu);
835			goto migrate;
836		}
837	}
838	return (0);
839migrate:
840	/*
841	 * Now that we've found an idle CPU, migrate the thread.
842	 */
843	cpu--;
844	ke->ke_runq = NULL;
845	kseq_notify(ke, cpu);
846
847	return (1);
848}
849
850#endif	/* SMP */
851
852/*
853 * Pick the highest priority task we have and return it.
854 */
855
856static struct kse *
857kseq_choose(struct kseq *kseq)
858{
859	struct runq *swap;
860	struct kse *ke;
861	int nice;
862
863	mtx_assert(&sched_lock, MA_OWNED);
864	swap = NULL;
865
866	for (;;) {
867		ke = runq_choose(kseq->ksq_curr);
868		if (ke == NULL) {
869			/*
870			 * We already swapped once and didn't get anywhere.
871			 */
872			if (swap)
873				break;
874			swap = kseq->ksq_curr;
875			kseq->ksq_curr = kseq->ksq_next;
876			kseq->ksq_next = swap;
877			continue;
878		}
879		/*
880		 * If we encounter a slice of 0 the kse is in a
881		 * TIMESHARE kse group and its nice was too far out
882		 * of the range that receives slices.
883		 */
884		nice = ke->ke_proc->p_nice + (0 - kseq->ksq_nicemin);
885		if (ke->ke_slice == 0 || (nice > SCHED_SLICE_NTHRESH &&
886		    ke->ke_proc->p_nice != 0)) {
887			runq_remove(ke->ke_runq, ke);
888			sched_slice(ke);
889			ke->ke_runq = kseq->ksq_next;
890			runq_add(ke->ke_runq, ke, 0);
891			continue;
892		}
893		return (ke);
894	}
895
896	return (runq_choose(&kseq->ksq_idle));
897}
898
899static void
900kseq_setup(struct kseq *kseq)
901{
902	runq_init(&kseq->ksq_timeshare[0]);
903	runq_init(&kseq->ksq_timeshare[1]);
904	runq_init(&kseq->ksq_idle);
905	kseq->ksq_curr = &kseq->ksq_timeshare[0];
906	kseq->ksq_next = &kseq->ksq_timeshare[1];
907	kseq->ksq_load = 0;
908	kseq->ksq_load_timeshare = 0;
909}
910
911static void
912sched_setup(void *dummy)
913{
914#ifdef SMP
915	int i;
916#endif
917
918	slice_min = (hz/100);	/* 10ms */
919	slice_max = (hz/7);	/* ~140ms */
920
921#ifdef SMP
922	balance_groups = 0;
923	/*
924	 * Initialize the kseqs.
925	 */
926	for (i = 0; i < MAXCPU; i++) {
927		struct kseq *ksq;
928
929		ksq = &kseq_cpu[i];
930		ksq->ksq_assigned = NULL;
931		kseq_setup(&kseq_cpu[i]);
932	}
933	if (smp_topology == NULL) {
934		struct kseq_group *ksg;
935		struct kseq *ksq;
936		int cpus;
937
938		for (cpus = 0, i = 0; i < MAXCPU; i++) {
939			if (CPU_ABSENT(i))
940				continue;
941			ksq = &kseq_cpu[cpus];
942			ksg = &kseq_groups[cpus];
943			/*
944			 * Setup a kseq group with one member.
945			 */
946			ksq->ksq_transferable = 0;
947			ksq->ksq_group = ksg;
948			ksg->ksg_cpus = 1;
949			ksg->ksg_idlemask = 0;
950			ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
951			ksg->ksg_load = 0;
952			ksg->ksg_transferable = 0;
953			LIST_INIT(&ksg->ksg_members);
954			LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
955			cpus++;
956		}
957		ksg_maxid = cpus - 1;
958	} else {
959		struct kseq_group *ksg;
960		struct cpu_group *cg;
961		int j;
962
963		for (i = 0; i < smp_topology->ct_count; i++) {
964			cg = &smp_topology->ct_group[i];
965			ksg = &kseq_groups[i];
966			/*
967			 * Initialize the group.
968			 */
969			ksg->ksg_idlemask = 0;
970			ksg->ksg_load = 0;
971			ksg->ksg_transferable = 0;
972			ksg->ksg_cpus = cg->cg_count;
973			ksg->ksg_cpumask = cg->cg_mask;
974			LIST_INIT(&ksg->ksg_members);
975			/*
976			 * Find all of the group members and add them.
977			 */
978			for (j = 0; j < MAXCPU; j++) {
979				if ((cg->cg_mask & (1 << j)) != 0) {
980					if (ksg->ksg_mask == 0)
981						ksg->ksg_mask = 1 << j;
982					kseq_cpu[j].ksq_transferable = 0;
983					kseq_cpu[j].ksq_group = ksg;
984					LIST_INSERT_HEAD(&ksg->ksg_members,
985					    &kseq_cpu[j], ksq_siblings);
986				}
987			}
988			if (ksg->ksg_cpus > 1)
989				balance_groups = 1;
990		}
991		ksg_maxid = smp_topology->ct_count - 1;
992	}
993	/*
994	 * Stagger the group and global load balancer so they do not
995	 * interfere with each other.
996	 */
997	bal_tick = ticks + hz;
998	if (balance_groups)
999		gbal_tick = ticks + (hz / 2);
1000#else
1001	kseq_setup(KSEQ_SELF());
1002#endif
1003	mtx_lock_spin(&sched_lock);
1004	kseq_load_add(KSEQ_SELF(), &kse0);
1005	mtx_unlock_spin(&sched_lock);
1006}
1007
1008/*
1009 * Scale the scheduling priority according to the "interactivity" of this
1010 * process.
1011 */
1012static void
1013sched_priority(struct ksegrp *kg)
1014{
1015	int pri;
1016
1017	if (kg->kg_pri_class != PRI_TIMESHARE)
1018		return;
1019
1020	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
1021	pri += SCHED_PRI_BASE;
1022	pri += kg->kg_proc->p_nice;
1023
1024	if (pri > PRI_MAX_TIMESHARE)
1025		pri = PRI_MAX_TIMESHARE;
1026	else if (pri < PRI_MIN_TIMESHARE)
1027		pri = PRI_MIN_TIMESHARE;
1028
1029	kg->kg_user_pri = pri;
1030
1031	return;
1032}
1033
1034/*
1035 * Calculate a time slice based on the properties of the kseg and the runq
1036 * that we're on.  This is only for PRI_TIMESHARE ksegrps.
1037 */
1038static void
1039sched_slice(struct kse *ke)
1040{
1041	struct kseq *kseq;
1042	struct ksegrp *kg;
1043
1044	kg = ke->ke_ksegrp;
1045	kseq = KSEQ_CPU(ke->ke_cpu);
1046
1047	if (ke->ke_thread->td_flags & TDF_BORROWING) {
1048		ke->ke_slice = SCHED_SLICE_MIN;
1049		return;
1050	}
1051
1052	/*
1053	 * Rationale:
1054	 * KSEs in interactive ksegs get a minimal slice so that we
1055	 * quickly notice if it abuses its advantage.
1056	 *
1057	 * KSEs in non-interactive ksegs are assigned a slice that is
1058	 * based on the ksegs nice value relative to the least nice kseg
1059	 * on the run queue for this cpu.
1060	 *
1061	 * If the KSE is less nice than all others it gets the maximum
1062	 * slice and other KSEs will adjust their slice relative to
1063	 * this when they first expire.
1064	 *
1065	 * There is 20 point window that starts relative to the least
1066	 * nice kse on the run queue.  Slice size is determined by
1067	 * the kse distance from the last nice ksegrp.
1068	 *
1069	 * If the kse is outside of the window it will get no slice
1070	 * and will be reevaluated each time it is selected on the
1071	 * run queue.  The exception to this is nice 0 ksegs when
1072	 * a nice -20 is running.  They are always granted a minimum
1073	 * slice.
1074	 */
1075	if (!SCHED_INTERACTIVE(kg)) {
1076		int nice;
1077
1078		nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
1079		if (kseq->ksq_load_timeshare == 0 ||
1080		    kg->kg_proc->p_nice < kseq->ksq_nicemin)
1081			ke->ke_slice = SCHED_SLICE_MAX;
1082		else if (nice <= SCHED_SLICE_NTHRESH)
1083			ke->ke_slice = SCHED_SLICE_NICE(nice);
1084		else if (kg->kg_proc->p_nice == 0)
1085			ke->ke_slice = SCHED_SLICE_MIN;
1086		else
1087			ke->ke_slice = 0;
1088	} else
1089		ke->ke_slice = SCHED_SLICE_INTERACTIVE;
1090
1091	return;
1092}
1093
1094/*
1095 * This routine enforces a maximum limit on the amount of scheduling history
1096 * kept.  It is called after either the slptime or runtime is adjusted.
1097 * This routine will not operate correctly when slp or run times have been
1098 * adjusted to more than double their maximum.
1099 */
1100static void
1101sched_interact_update(struct ksegrp *kg)
1102{
1103	int sum;
1104
1105	sum = kg->kg_runtime + kg->kg_slptime;
1106	if (sum < SCHED_SLP_RUN_MAX)
1107		return;
1108	/*
1109	 * If we have exceeded by more than 1/5th then the algorithm below
1110	 * will not bring us back into range.  Dividing by two here forces
1111	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1112	 */
1113	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1114		kg->kg_runtime /= 2;
1115		kg->kg_slptime /= 2;
1116		return;
1117	}
1118	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1119	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1120}
1121
1122static void
1123sched_interact_fork(struct ksegrp *kg)
1124{
1125	int ratio;
1126	int sum;
1127
1128	sum = kg->kg_runtime + kg->kg_slptime;
1129	if (sum > SCHED_SLP_RUN_FORK) {
1130		ratio = sum / SCHED_SLP_RUN_FORK;
1131		kg->kg_runtime /= ratio;
1132		kg->kg_slptime /= ratio;
1133	}
1134}
1135
1136static int
1137sched_interact_score(struct ksegrp *kg)
1138{
1139	int div;
1140
1141	if (kg->kg_runtime > kg->kg_slptime) {
1142		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1143		return (SCHED_INTERACT_HALF +
1144		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1145	} if (kg->kg_slptime > kg->kg_runtime) {
1146		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1147		return (kg->kg_runtime / div);
1148	}
1149
1150	/*
1151	 * This can happen if slptime and runtime are 0.
1152	 */
1153	return (0);
1154
1155}
1156
1157/*
1158 * Very early in the boot some setup of scheduler-specific
1159 * parts of proc0 and of soem scheduler resources needs to be done.
1160 * Called from:
1161 *  proc0_init()
1162 */
1163void
1164schedinit(void)
1165{
1166	/*
1167	 * Set up the scheduler specific parts of proc0.
1168	 */
1169	proc0.p_sched = NULL; /* XXX */
1170	ksegrp0.kg_sched = &kg_sched0;
1171	thread0.td_sched = &kse0;
1172	kse0.ke_thread = &thread0;
1173	kse0.ke_state = KES_THREAD;
1174	kg_sched0.skg_concurrency = 1;
1175	kg_sched0.skg_avail_opennings = 0; /* we are already running */
1176}
1177
1178/*
1179 * This is only somewhat accurate since given many processes of the same
1180 * priority they will switch when their slices run out, which will be
1181 * at most SCHED_SLICE_MAX.
1182 */
1183int
1184sched_rr_interval(void)
1185{
1186	return (SCHED_SLICE_MAX);
1187}
1188
1189static void
1190sched_pctcpu_update(struct kse *ke)
1191{
1192	/*
1193	 * Adjust counters and watermark for pctcpu calc.
1194	 */
1195	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1196		/*
1197		 * Shift the tick count out so that the divide doesn't
1198		 * round away our results.
1199		 */
1200		ke->ke_ticks <<= 10;
1201		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1202			    SCHED_CPU_TICKS;
1203		ke->ke_ticks >>= 10;
1204	} else
1205		ke->ke_ticks = 0;
1206	ke->ke_ltick = ticks;
1207	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1208}
1209
1210void
1211sched_thread_priority(struct thread *td, u_char prio)
1212{
1213	struct kse *ke;
1214
1215	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1216	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1217	    curthread->td_proc->p_comm);
1218	ke = td->td_kse;
1219	mtx_assert(&sched_lock, MA_OWNED);
1220	if (td->td_priority == prio)
1221		return;
1222	if (TD_ON_RUNQ(td)) {
1223		/*
1224		 * If the priority has been elevated due to priority
1225		 * propagation, we may have to move ourselves to a new
1226		 * queue.  We still call adjustrunqueue below in case kse
1227		 * needs to fix things up.
1228		 */
1229		if (prio < td->td_priority && ke->ke_runq != NULL &&
1230		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1231		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1232			runq_remove(ke->ke_runq, ke);
1233			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1234			runq_add(ke->ke_runq, ke, 0);
1235		}
1236		/*
1237		 * Hold this kse on this cpu so that sched_prio() doesn't
1238		 * cause excessive migration.  We only want migration to
1239		 * happen as the result of a wakeup.
1240		 */
1241		ke->ke_flags |= KEF_HOLD;
1242		adjustrunqueue(td, prio);
1243		ke->ke_flags &= ~KEF_HOLD;
1244	} else
1245		td->td_priority = prio;
1246}
1247
1248/*
1249 * Update a thread's priority when it is lent another thread's
1250 * priority.
1251 */
1252void
1253sched_lend_prio(struct thread *td, u_char prio)
1254{
1255
1256	td->td_flags |= TDF_BORROWING;
1257	sched_thread_priority(td, prio);
1258}
1259
1260/*
1261 * Restore a thread's priority when priority propagation is
1262 * over.  The prio argument is the minimum priority the thread
1263 * needs to have to satisfy other possible priority lending
1264 * requests.  If the thread's regular priority is less
1265 * important than prio, the thread will keep a priority boost
1266 * of prio.
1267 */
1268void
1269sched_unlend_prio(struct thread *td, u_char prio)
1270{
1271	u_char base_pri;
1272
1273	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1274	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1275		base_pri = td->td_ksegrp->kg_user_pri;
1276	else
1277		base_pri = td->td_base_pri;
1278	if (prio >= base_pri) {
1279		td->td_flags &= ~TDF_BORROWING;
1280		sched_thread_priority(td, base_pri);
1281	} else
1282		sched_lend_prio(td, prio);
1283}
1284
1285void
1286sched_prio(struct thread *td, u_char prio)
1287{
1288	u_char oldprio;
1289
1290	/* First, update the base priority. */
1291	td->td_base_pri = prio;
1292
1293	/*
1294	 * If the thread is borrowing another thread's priority, don't
1295	 * ever lower the priority.
1296	 */
1297	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1298		return;
1299
1300	/* Change the real priority. */
1301	oldprio = td->td_priority;
1302	sched_thread_priority(td, prio);
1303
1304	/*
1305	 * If the thread is on a turnstile, then let the turnstile update
1306	 * its state.
1307	 */
1308	if (TD_ON_LOCK(td) && oldprio != prio)
1309		turnstile_adjust(td, oldprio);
1310}
1311
1312void
1313sched_switch(struct thread *td, struct thread *newtd, int flags)
1314{
1315	struct kseq *ksq;
1316	struct kse *ke;
1317
1318	mtx_assert(&sched_lock, MA_OWNED);
1319
1320	ke = td->td_kse;
1321	ksq = KSEQ_SELF();
1322
1323	td->td_lastcpu = td->td_oncpu;
1324	td->td_oncpu = NOCPU;
1325	td->td_flags &= ~TDF_NEEDRESCHED;
1326	td->td_owepreempt = 0;
1327
1328	/*
1329	 * If the KSE has been assigned it may be in the process of switching
1330	 * to the new cpu.  This is the case in sched_bind().
1331	 */
1332	if (td == PCPU_GET(idlethread)) {
1333		TD_SET_CAN_RUN(td);
1334	} else if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1335		/* We are ending our run so make our slot available again */
1336		SLOT_RELEASE(td->td_ksegrp);
1337		kseq_load_rem(ksq, ke);
1338		if (TD_IS_RUNNING(td)) {
1339			/*
1340			 * Don't allow the thread to migrate
1341			 * from a preemption.
1342			 */
1343			ke->ke_flags |= KEF_HOLD;
1344			setrunqueue(td, (flags & SW_PREEMPT) ?
1345			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1346			    SRQ_OURSELF|SRQ_YIELDING);
1347			ke->ke_flags &= ~KEF_HOLD;
1348		} else if ((td->td_proc->p_flag & P_HADTHREADS) &&
1349		    (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp))
1350			/*
1351			 * We will not be on the run queue.
1352			 * So we must be sleeping or similar.
1353			 * Don't use the slot if we will need it
1354			 * for newtd.
1355			 */
1356			slot_fill(td->td_ksegrp);
1357	}
1358	if (newtd != NULL) {
1359		/*
1360		 * If we bring in a thread account for it as if it had been
1361		 * added to the run queue and then chosen.
1362		 */
1363		newtd->td_kse->ke_flags |= KEF_DIDRUN;
1364		newtd->td_kse->ke_runq = ksq->ksq_curr;
1365		TD_SET_RUNNING(newtd);
1366		kseq_load_add(KSEQ_SELF(), newtd->td_kse);
1367		/*
1368		 * XXX When we preempt, we've already consumed a slot because
1369		 * we got here through sched_add().  However, newtd can come
1370		 * from thread_switchout() which can't SLOT_USE() because
1371		 * the SLOT code is scheduler dependent.  We must use the
1372		 * slot here otherwise.
1373		 */
1374		if ((flags & SW_PREEMPT) == 0)
1375			SLOT_USE(newtd->td_ksegrp);
1376	} else
1377		newtd = choosethread();
1378	if (td != newtd) {
1379#ifdef	HWPMC_HOOKS
1380		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1381			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1382#endif
1383		cpu_switch(td, newtd);
1384#ifdef	HWPMC_HOOKS
1385		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1386			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1387#endif
1388	}
1389
1390	sched_lock.mtx_lock = (uintptr_t)td;
1391
1392	td->td_oncpu = PCPU_GET(cpuid);
1393}
1394
1395void
1396sched_nice(struct proc *p, int nice)
1397{
1398	struct ksegrp *kg;
1399	struct kse *ke;
1400	struct thread *td;
1401	struct kseq *kseq;
1402
1403	PROC_LOCK_ASSERT(p, MA_OWNED);
1404	mtx_assert(&sched_lock, MA_OWNED);
1405	/*
1406	 * We need to adjust the nice counts for running KSEs.
1407	 */
1408	FOREACH_KSEGRP_IN_PROC(p, kg) {
1409		if (kg->kg_pri_class == PRI_TIMESHARE) {
1410			FOREACH_THREAD_IN_GROUP(kg, td) {
1411				ke = td->td_kse;
1412				if (ke->ke_runq == NULL)
1413					continue;
1414				kseq = KSEQ_CPU(ke->ke_cpu);
1415				kseq_nice_rem(kseq, p->p_nice);
1416				kseq_nice_add(kseq, nice);
1417			}
1418		}
1419	}
1420	p->p_nice = nice;
1421	FOREACH_KSEGRP_IN_PROC(p, kg) {
1422		sched_priority(kg);
1423		FOREACH_THREAD_IN_GROUP(kg, td)
1424			td->td_flags |= TDF_NEEDRESCHED;
1425	}
1426}
1427
1428void
1429sched_sleep(struct thread *td)
1430{
1431	mtx_assert(&sched_lock, MA_OWNED);
1432
1433	td->td_slptime = ticks;
1434}
1435
1436void
1437sched_wakeup(struct thread *td)
1438{
1439	mtx_assert(&sched_lock, MA_OWNED);
1440
1441	/*
1442	 * Let the kseg know how long we slept for.  This is because process
1443	 * interactivity behavior is modeled in the kseg.
1444	 */
1445	if (td->td_slptime) {
1446		struct ksegrp *kg;
1447		int hzticks;
1448
1449		kg = td->td_ksegrp;
1450		hzticks = (ticks - td->td_slptime) << 10;
1451		if (hzticks >= SCHED_SLP_RUN_MAX) {
1452			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1453			kg->kg_runtime = 1;
1454		} else {
1455			kg->kg_slptime += hzticks;
1456			sched_interact_update(kg);
1457		}
1458		sched_priority(kg);
1459		sched_slice(td->td_kse);
1460		td->td_slptime = 0;
1461	}
1462	setrunqueue(td, SRQ_BORING);
1463}
1464
1465/*
1466 * Penalize the parent for creating a new child and initialize the child's
1467 * priority.
1468 */
1469void
1470sched_fork(struct thread *td, struct thread *childtd)
1471{
1472
1473	mtx_assert(&sched_lock, MA_OWNED);
1474
1475	sched_fork_ksegrp(td, childtd->td_ksegrp);
1476	sched_fork_thread(td, childtd);
1477}
1478
1479void
1480sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
1481{
1482	struct ksegrp *kg = td->td_ksegrp;
1483	mtx_assert(&sched_lock, MA_OWNED);
1484
1485	child->kg_slptime = kg->kg_slptime;
1486	child->kg_runtime = kg->kg_runtime;
1487	child->kg_user_pri = kg->kg_user_pri;
1488	sched_interact_fork(child);
1489	kg->kg_runtime += tickincr << 10;
1490	sched_interact_update(kg);
1491}
1492
1493void
1494sched_fork_thread(struct thread *td, struct thread *child)
1495{
1496	struct kse *ke;
1497	struct kse *ke2;
1498
1499	sched_newthread(child);
1500	ke = td->td_kse;
1501	ke2 = child->td_kse;
1502	ke2->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1503	ke2->ke_cpu = ke->ke_cpu;
1504	ke2->ke_runq = NULL;
1505
1506	/* Grab our parents cpu estimation information. */
1507	ke2->ke_ticks = ke->ke_ticks;
1508	ke2->ke_ltick = ke->ke_ltick;
1509	ke2->ke_ftick = ke->ke_ftick;
1510}
1511
1512void
1513sched_class(struct ksegrp *kg, int class)
1514{
1515	struct kseq *kseq;
1516	struct kse *ke;
1517	struct thread *td;
1518	int nclass;
1519	int oclass;
1520
1521	mtx_assert(&sched_lock, MA_OWNED);
1522	if (kg->kg_pri_class == class)
1523		return;
1524
1525	nclass = PRI_BASE(class);
1526	oclass = PRI_BASE(kg->kg_pri_class);
1527	FOREACH_THREAD_IN_GROUP(kg, td) {
1528		ke = td->td_kse;
1529		if ((ke->ke_state != KES_ONRUNQ &&
1530		    ke->ke_state != KES_THREAD) || ke->ke_runq == NULL)
1531			continue;
1532		kseq = KSEQ_CPU(ke->ke_cpu);
1533
1534#ifdef SMP
1535		/*
1536		 * On SMP if we're on the RUNQ we must adjust the transferable
1537		 * count because could be changing to or from an interrupt
1538		 * class.
1539		 */
1540		if (ke->ke_state == KES_ONRUNQ) {
1541			if (KSE_CAN_MIGRATE(ke)) {
1542				kseq->ksq_transferable--;
1543				kseq->ksq_group->ksg_transferable--;
1544			}
1545			if (KSE_CAN_MIGRATE(ke)) {
1546				kseq->ksq_transferable++;
1547				kseq->ksq_group->ksg_transferable++;
1548			}
1549		}
1550#endif
1551		if (oclass == PRI_TIMESHARE) {
1552			kseq->ksq_load_timeshare--;
1553			kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1554		}
1555		if (nclass == PRI_TIMESHARE) {
1556			kseq->ksq_load_timeshare++;
1557			kseq_nice_add(kseq, kg->kg_proc->p_nice);
1558		}
1559	}
1560
1561	kg->kg_pri_class = class;
1562}
1563
1564/*
1565 * Return some of the child's priority and interactivity to the parent.
1566 */
1567void
1568sched_exit(struct proc *p, struct thread *childtd)
1569{
1570	mtx_assert(&sched_lock, MA_OWNED);
1571	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd);
1572	sched_exit_thread(NULL, childtd);
1573}
1574
1575void
1576sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
1577{
1578	/* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
1579	kg->kg_runtime += td->td_ksegrp->kg_runtime;
1580	sched_interact_update(kg);
1581}
1582
1583void
1584sched_exit_thread(struct thread *td, struct thread *childtd)
1585{
1586	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
1587	    childtd, childtd->td_proc->p_comm, childtd->td_priority);
1588	kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse);
1589}
1590
1591void
1592sched_clock(struct thread *td)
1593{
1594	struct kseq *kseq;
1595	struct ksegrp *kg;
1596	struct kse *ke;
1597
1598	mtx_assert(&sched_lock, MA_OWNED);
1599	kseq = KSEQ_SELF();
1600#ifdef SMP
1601	if (ticks >= bal_tick)
1602		sched_balance();
1603	if (ticks >= gbal_tick && balance_groups)
1604		sched_balance_groups();
1605	/*
1606	 * We could have been assigned a non real-time thread without an
1607	 * IPI.
1608	 */
1609	if (kseq->ksq_assigned)
1610		kseq_assign(kseq);	/* Potentially sets NEEDRESCHED */
1611#endif
1612	/*
1613	 * sched_setup() apparently happens prior to stathz being set.  We
1614	 * need to resolve the timers earlier in the boot so we can avoid
1615	 * calculating this here.
1616	 */
1617	if (realstathz == 0) {
1618		realstathz = stathz ? stathz : hz;
1619		tickincr = hz / realstathz;
1620		/*
1621		 * XXX This does not work for values of stathz that are much
1622		 * larger than hz.
1623		 */
1624		if (tickincr == 0)
1625			tickincr = 1;
1626	}
1627
1628	ke = td->td_kse;
1629	kg = ke->ke_ksegrp;
1630
1631	/* Adjust ticks for pctcpu */
1632	ke->ke_ticks++;
1633	ke->ke_ltick = ticks;
1634
1635	/* Go up to one second beyond our max and then trim back down */
1636	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1637		sched_pctcpu_update(ke);
1638
1639	if (td->td_flags & TDF_IDLETD)
1640		return;
1641	/*
1642	 * We only do slicing code for TIMESHARE ksegrps.
1643	 */
1644	if (kg->kg_pri_class != PRI_TIMESHARE)
1645		return;
1646	/*
1647	 * We used a tick charge it to the ksegrp so that we can compute our
1648	 * interactivity.
1649	 */
1650	kg->kg_runtime += tickincr << 10;
1651	sched_interact_update(kg);
1652
1653	/*
1654	 * We used up one time slice.
1655	 */
1656	if (--ke->ke_slice > 0)
1657		return;
1658	/*
1659	 * We're out of time, recompute priorities and requeue.
1660	 */
1661	kseq_load_rem(kseq, ke);
1662	sched_priority(kg);
1663	sched_slice(ke);
1664	if (SCHED_CURR(kg, ke))
1665		ke->ke_runq = kseq->ksq_curr;
1666	else
1667		ke->ke_runq = kseq->ksq_next;
1668	kseq_load_add(kseq, ke);
1669	td->td_flags |= TDF_NEEDRESCHED;
1670}
1671
1672int
1673sched_runnable(void)
1674{
1675	struct kseq *kseq;
1676	int load;
1677
1678	load = 1;
1679
1680	kseq = KSEQ_SELF();
1681#ifdef SMP
1682	if (kseq->ksq_assigned) {
1683		mtx_lock_spin(&sched_lock);
1684		kseq_assign(kseq);
1685		mtx_unlock_spin(&sched_lock);
1686	}
1687#endif
1688	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1689		if (kseq->ksq_load > 0)
1690			goto out;
1691	} else
1692		if (kseq->ksq_load - 1 > 0)
1693			goto out;
1694	load = 0;
1695out:
1696	return (load);
1697}
1698
1699void
1700sched_userret(struct thread *td)
1701{
1702	struct ksegrp *kg;
1703
1704	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1705	    ("thread with borrowed priority returning to userland"));
1706	kg = td->td_ksegrp;
1707	if (td->td_priority != kg->kg_user_pri) {
1708		mtx_lock_spin(&sched_lock);
1709		td->td_priority = kg->kg_user_pri;
1710		td->td_base_pri = kg->kg_user_pri;
1711		mtx_unlock_spin(&sched_lock);
1712	}
1713}
1714
1715struct kse *
1716sched_choose(void)
1717{
1718	struct kseq *kseq;
1719	struct kse *ke;
1720
1721	mtx_assert(&sched_lock, MA_OWNED);
1722	kseq = KSEQ_SELF();
1723#ifdef SMP
1724restart:
1725	if (kseq->ksq_assigned)
1726		kseq_assign(kseq);
1727#endif
1728	ke = kseq_choose(kseq);
1729	if (ke) {
1730#ifdef SMP
1731		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1732			if (kseq_idled(kseq) == 0)
1733				goto restart;
1734#endif
1735		kseq_runq_rem(kseq, ke);
1736		ke->ke_state = KES_THREAD;
1737		return (ke);
1738	}
1739#ifdef SMP
1740	if (kseq_idled(kseq) == 0)
1741		goto restart;
1742#endif
1743	return (NULL);
1744}
1745
1746void
1747sched_add(struct thread *td, int flags)
1748{
1749	struct kseq *kseq;
1750	struct ksegrp *kg;
1751	struct kse *ke;
1752	int preemptive;
1753	int canmigrate;
1754	int class;
1755
1756	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1757	    td, td->td_proc->p_comm, td->td_priority, curthread,
1758	    curthread->td_proc->p_comm);
1759	mtx_assert(&sched_lock, MA_OWNED);
1760	ke = td->td_kse;
1761	kg = td->td_ksegrp;
1762	canmigrate = 1;
1763	preemptive = !(flags & SRQ_YIELDING);
1764	class = PRI_BASE(kg->kg_pri_class);
1765	kseq = KSEQ_SELF();
1766	if ((ke->ke_flags & KEF_INTERNAL) == 0)
1767		SLOT_USE(td->td_ksegrp);
1768	ke->ke_flags &= ~KEF_INTERNAL;
1769#ifdef SMP
1770	if (ke->ke_flags & KEF_ASSIGNED) {
1771		if (ke->ke_flags & KEF_REMOVED)
1772			ke->ke_flags &= ~KEF_REMOVED;
1773		return;
1774	}
1775	canmigrate = KSE_CAN_MIGRATE(ke);
1776#endif
1777	KASSERT(ke->ke_state != KES_ONRUNQ,
1778	    ("sched_add: kse %p (%s) already in run queue", ke,
1779	    ke->ke_proc->p_comm));
1780	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1781	    ("sched_add: process swapped out"));
1782	KASSERT(ke->ke_runq == NULL,
1783	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1784	switch (class) {
1785	case PRI_ITHD:
1786	case PRI_REALTIME:
1787		ke->ke_runq = kseq->ksq_curr;
1788		ke->ke_slice = SCHED_SLICE_MAX;
1789		if (canmigrate)
1790			ke->ke_cpu = PCPU_GET(cpuid);
1791		break;
1792	case PRI_TIMESHARE:
1793		if (SCHED_CURR(kg, ke))
1794			ke->ke_runq = kseq->ksq_curr;
1795		else
1796			ke->ke_runq = kseq->ksq_next;
1797		break;
1798	case PRI_IDLE:
1799		/*
1800		 * This is for priority prop.
1801		 */
1802		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1803			ke->ke_runq = kseq->ksq_curr;
1804		else
1805			ke->ke_runq = &kseq->ksq_idle;
1806		ke->ke_slice = SCHED_SLICE_MIN;
1807		break;
1808	default:
1809		panic("Unknown pri class.");
1810		break;
1811	}
1812#ifdef SMP
1813	/*
1814	 * Don't migrate running threads here.  Force the long term balancer
1815	 * to do it.
1816	 */
1817	if (ke->ke_flags & KEF_HOLD) {
1818		ke->ke_flags &= ~KEF_HOLD;
1819		canmigrate = 0;
1820	}
1821	/*
1822	 * If this thread is pinned or bound, notify the target cpu.
1823	 */
1824	if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) {
1825		ke->ke_runq = NULL;
1826		kseq_notify(ke, ke->ke_cpu);
1827		return;
1828	}
1829	/*
1830	 * If we had been idle, clear our bit in the group and potentially
1831	 * the global bitmap.  If not, see if we should transfer this thread.
1832	 */
1833	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1834	    (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1835		/*
1836		 * Check to see if our group is unidling, and if so, remove it
1837		 * from the global idle mask.
1838		 */
1839		if (kseq->ksq_group->ksg_idlemask ==
1840		    kseq->ksq_group->ksg_cpumask)
1841			atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1842		/*
1843		 * Now remove ourselves from the group specific idle mask.
1844		 */
1845		kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1846	} else if (canmigrate && kseq->ksq_load > 1 && class != PRI_ITHD)
1847		if (kseq_transfer(kseq, ke, class))
1848			return;
1849	ke->ke_cpu = PCPU_GET(cpuid);
1850#endif
1851	if (td->td_priority < curthread->td_priority &&
1852	    ke->ke_runq == kseq->ksq_curr)
1853		curthread->td_flags |= TDF_NEEDRESCHED;
1854	if (preemptive && maybe_preempt(td))
1855		return;
1856	ke->ke_state = KES_ONRUNQ;
1857
1858	kseq_runq_add(kseq, ke, flags);
1859	kseq_load_add(kseq, ke);
1860}
1861
1862void
1863sched_rem(struct thread *td)
1864{
1865	struct kseq *kseq;
1866	struct kse *ke;
1867
1868	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1869	    td, td->td_proc->p_comm, td->td_priority, curthread,
1870	    curthread->td_proc->p_comm);
1871	mtx_assert(&sched_lock, MA_OWNED);
1872	ke = td->td_kse;
1873	SLOT_RELEASE(td->td_ksegrp);
1874	if (ke->ke_flags & KEF_ASSIGNED) {
1875		ke->ke_flags |= KEF_REMOVED;
1876		return;
1877	}
1878	KASSERT((ke->ke_state == KES_ONRUNQ),
1879	    ("sched_rem: KSE not on run queue"));
1880
1881	ke->ke_state = KES_THREAD;
1882	kseq = KSEQ_CPU(ke->ke_cpu);
1883	kseq_runq_rem(kseq, ke);
1884	kseq_load_rem(kseq, ke);
1885}
1886
1887fixpt_t
1888sched_pctcpu(struct thread *td)
1889{
1890	fixpt_t pctcpu;
1891	struct kse *ke;
1892
1893	pctcpu = 0;
1894	ke = td->td_kse;
1895	if (ke == NULL)
1896		return (0);
1897
1898	mtx_lock_spin(&sched_lock);
1899	if (ke->ke_ticks) {
1900		int rtick;
1901
1902		/*
1903		 * Don't update more frequently than twice a second.  Allowing
1904		 * this causes the cpu usage to decay away too quickly due to
1905		 * rounding errors.
1906		 */
1907		if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1908		    ke->ke_ltick < (ticks - (hz / 2)))
1909			sched_pctcpu_update(ke);
1910		/* How many rtick per second ? */
1911		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1912		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1913	}
1914
1915	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1916	mtx_unlock_spin(&sched_lock);
1917
1918	return (pctcpu);
1919}
1920
1921void
1922sched_bind(struct thread *td, int cpu)
1923{
1924	struct kse *ke;
1925
1926	mtx_assert(&sched_lock, MA_OWNED);
1927	ke = td->td_kse;
1928	ke->ke_flags |= KEF_BOUND;
1929#ifdef SMP
1930	if (PCPU_GET(cpuid) == cpu)
1931		return;
1932	/* sched_rem without the runq_remove */
1933	ke->ke_state = KES_THREAD;
1934	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1935	kseq_notify(ke, cpu);
1936	/* When we return from mi_switch we'll be on the correct cpu. */
1937	mi_switch(SW_VOL, NULL);
1938#endif
1939}
1940
1941void
1942sched_unbind(struct thread *td)
1943{
1944	mtx_assert(&sched_lock, MA_OWNED);
1945	td->td_kse->ke_flags &= ~KEF_BOUND;
1946}
1947
1948int
1949sched_is_bound(struct thread *td)
1950{
1951	mtx_assert(&sched_lock, MA_OWNED);
1952	return (td->td_kse->ke_flags & KEF_BOUND);
1953}
1954
1955int
1956sched_load(void)
1957{
1958#ifdef SMP
1959	int total;
1960	int i;
1961
1962	total = 0;
1963	for (i = 0; i <= ksg_maxid; i++)
1964		total += KSEQ_GROUP(i)->ksg_load;
1965	return (total);
1966#else
1967	return (KSEQ_SELF()->ksq_sysload);
1968#endif
1969}
1970
1971int
1972sched_sizeof_ksegrp(void)
1973{
1974	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1975}
1976
1977int
1978sched_sizeof_proc(void)
1979{
1980	return (sizeof(struct proc));
1981}
1982
1983int
1984sched_sizeof_thread(void)
1985{
1986	return (sizeof(struct thread) + sizeof(struct td_sched));
1987}
1988#define KERN_SWITCH_INCLUDE 1
1989#include "kern/kern_switch.c"
1990