sched_ule.c revision 165627
1/*-
2 * Copyright (c) 2002-2006, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 165627 2006-12-29 12:55:32Z jeff $");
29
30#include "opt_hwpmc_hooks.h"
31#include "opt_sched.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kdb.h>
36#include <sys/kernel.h>
37#include <sys/ktr.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/proc.h>
41#include <sys/resource.h>
42#include <sys/resourcevar.h>
43#include <sys/sched.h>
44#include <sys/smp.h>
45#include <sys/sx.h>
46#include <sys/sysctl.h>
47#include <sys/sysproto.h>
48#include <sys/turnstile.h>
49#include <sys/umtx.h>
50#include <sys/vmmeter.h>
51#ifdef KTRACE
52#include <sys/uio.h>
53#include <sys/ktrace.h>
54#endif
55
56#ifdef HWPMC_HOOKS
57#include <sys/pmckern.h>
58#endif
59
60#include <machine/cpu.h>
61#include <machine/smp.h>
62
63/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
64/* XXX This is bogus compatability crap for ps */
65static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
66SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
67
68static void sched_setup(void *dummy);
69SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
70
71static void sched_initticks(void *dummy);
72SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
73
74static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
75
76SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
77    "Scheduler name");
78
79static int slice_min = 1;
80SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
81
82static int slice_max = 10;
83SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
84
85int realstathz;
86int tickincr = 1 << 10;
87
88/*
89 * Thread scheduler specific section.
90 */
91struct td_sched {
92	TAILQ_ENTRY(td_sched) ts_procq;	/* (j/z) Run queue. */
93	int		ts_flags;	/* (j) TSF_* flags. */
94	struct thread	*ts_thread;	/* (*) Active associated thread. */
95	fixpt_t		ts_pctcpu;	/* (j) %cpu during p_swtime. */
96	u_char		ts_rqindex;	/* (j) Run queue index. */
97	enum {
98		TSS_THREAD = 0x0,	/* slaved to thread state */
99		TSS_ONRUNQ
100	} ts_state;			/* (j) thread sched specific status. */
101	int		ts_slptime;
102	int		ts_slice;
103	struct runq	*ts_runq;
104	u_char		ts_cpu;		/* CPU that we have affinity for. */
105	/* The following variables are only used for pctcpu calculation */
106	int		ts_ltick;	/* Last tick that we were running on */
107	int		ts_ftick;	/* First tick that we were running on */
108	int		ts_ticks;	/* Tick count */
109
110	/* originally from kg_sched */
111	int	skg_slptime;		/* Number of ticks we vol. slept */
112	int	skg_runtime;		/* Number of ticks we were running */
113};
114#define	ts_assign		ts_procq.tqe_next
115/* flags kept in ts_flags */
116#define	TSF_ASSIGNED	0x0001		/* Thread is being migrated. */
117#define	TSF_BOUND	0x0002		/* Thread can not migrate. */
118#define	TSF_XFERABLE	0x0004		/* Thread was added as transferable. */
119#define	TSF_HOLD	0x0008		/* Thread is temporarily bound. */
120#define	TSF_REMOVED	0x0010		/* Thread was removed while ASSIGNED */
121#define	TSF_INTERNAL	0x0020		/* Thread added due to migration. */
122#define	TSF_PREEMPTED	0x0040		/* Thread was preempted */
123#define	TSF_DIDRUN	0x2000		/* Thread actually ran. */
124#define	TSF_EXIT	0x4000		/* Thread is being killed. */
125
126static struct td_sched td_sched0;
127
128/*
129 * The priority is primarily determined by the interactivity score.  Thus, we
130 * give lower(better) priorities to threads that use less CPU.  The nice
131 * value is then directly added to this to allow nice to have some effect
132 * on latency.
133 *
134 * PRI_RANGE:	Total priority range for timeshare threads.
135 * PRI_NRESV:	Number of nice values.
136 * PRI_BASE:	The start of the dynamic range.
137 */
138#define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
139#define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
140#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
141#define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
142#define	SCHED_PRI_INTERACT(score)					\
143    ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
144
145/*
146 * These determine the interactivity of a process.
147 *
148 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
149 *		before throttling back.
150 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
151 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
152 * INTERACT_THRESH:	Threshhold for placement on the current runq.
153 */
154#define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
155#define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
156#define	SCHED_INTERACT_MAX	(100)
157#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
158#define	SCHED_INTERACT_THRESH	(30)
159
160/*
161 * These parameters and macros determine the size of the time slice that is
162 * granted to each thread.
163 *
164 * SLICE_MIN:	Minimum time slice granted, in units of ticks.
165 * SLICE_MAX:	Maximum time slice granted.
166 * SLICE_RANGE:	Range of available time slices scaled by hz.
167 * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
168 * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
169 * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
170 */
171#define	SCHED_SLICE_MIN			(slice_min)
172#define	SCHED_SLICE_MAX			(slice_max)
173#define	SCHED_SLICE_INTERACTIVE		(slice_max)
174#define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
175#define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
176#define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
177#define	SCHED_SLICE_NICE(nice)						\
178    (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
179
180/*
181 * This macro determines whether or not the thread belongs on the current or
182 * next run queue.
183 */
184#define	SCHED_INTERACTIVE(td)						\
185    (sched_interact_score(td) < SCHED_INTERACT_THRESH)
186#define	SCHED_CURR(td, ts)						\
187    ((ts->ts_thread->td_flags & TDF_BORROWING) ||			\
188     (ts->ts_flags & TSF_PREEMPTED) || SCHED_INTERACTIVE(td))
189
190/*
191 * Cpu percentage computation macros and defines.
192 *
193 * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
194 * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
195 */
196
197#define	SCHED_CPU_TIME	10
198#define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
199
200/*
201 * tdq - per processor runqs and statistics.
202 */
203struct tdq {
204	struct runq	tdq_idle;		/* Queue of IDLE threads. */
205	struct runq	tdq_timeshare[2];	/* Run queues for !IDLE. */
206	struct runq	*tdq_next;		/* Next timeshare queue. */
207	struct runq	*tdq_curr;		/* Current queue. */
208	int		tdq_load_timeshare;	/* Load for timeshare. */
209	int		tdq_load;		/* Aggregate load. */
210	short		tdq_nice[SCHED_PRI_NRESV]; /* threadss in each nice bin. */
211	short		tdq_nicemin;		/* Least nice. */
212#ifdef SMP
213	int		tdq_transferable;
214	LIST_ENTRY(tdq)	tdq_siblings;		/* Next in tdq group. */
215	struct tdq_group *tdq_group;		/* Our processor group. */
216	volatile struct td_sched *tdq_assigned;	/* assigned by another CPU. */
217#else
218	int		tdq_sysload;		/* For loadavg, !ITHD load. */
219#endif
220};
221
222#ifdef SMP
223/*
224 * tdq groups are groups of processors which can cheaply share threads.  When
225 * one processor in the group goes idle it will check the runqs of the other
226 * processors in its group prior to halting and waiting for an interrupt.
227 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
228 * In a numa environment we'd want an idle bitmap per group and a two tiered
229 * load balancer.
230 */
231struct tdq_group {
232	int	tdg_cpus;		/* Count of CPUs in this tdq group. */
233	cpumask_t tdg_cpumask;		/* Mask of cpus in this group. */
234	cpumask_t tdg_idlemask;		/* Idle cpus in this group. */
235	cpumask_t tdg_mask;		/* Bit mask for first cpu. */
236	int	tdg_load;		/* Total load of this group. */
237	int	tdg_transferable;	/* Transferable load of this group. */
238	LIST_HEAD(, tdq) tdg_members;	/* Linked list of all members. */
239};
240#endif
241
242/*
243 * One thread queue per processor.
244 */
245#ifdef SMP
246static cpumask_t tdq_idle;
247static int tdg_maxid;
248static struct tdq	tdq_cpu[MAXCPU];
249static struct tdq_group tdq_groups[MAXCPU];
250static int bal_tick;
251static int gbal_tick;
252static int balance_groups;
253
254#define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
255#define	TDQ_CPU(x)	(&tdq_cpu[(x)])
256#define	TDQ_ID(x)	((x) - tdq_cpu)
257#define	TDQ_GROUP(x)	(&tdq_groups[(x)])
258#else	/* !SMP */
259static struct tdq	tdq_cpu;
260
261#define	TDQ_SELF()	(&tdq_cpu)
262#define	TDQ_CPU(x)	(&tdq_cpu)
263#endif
264
265static struct td_sched *sched_choose(void);		/* XXX Should be thread * */
266static void sched_slice(struct td_sched *);
267static void sched_priority(struct thread *);
268static void sched_thread_priority(struct thread *, u_char);
269static int sched_interact_score(struct thread *);
270static void sched_interact_update(struct thread *);
271static void sched_interact_fork(struct thread *);
272static void sched_pctcpu_update(struct td_sched *);
273
274/* Operations on per processor queues */
275static struct td_sched * tdq_choose(struct tdq *);
276static void tdq_setup(struct tdq *);
277static void tdq_load_add(struct tdq *, struct td_sched *);
278static void tdq_load_rem(struct tdq *, struct td_sched *);
279static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
280static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
281static void tdq_nice_add(struct tdq *, int);
282static void tdq_nice_rem(struct tdq *, int);
283void tdq_print(int cpu);
284#ifdef SMP
285static int tdq_transfer(struct tdq *, struct td_sched *, int);
286static struct td_sched *runq_steal(struct runq *);
287static void sched_balance(void);
288static void sched_balance_groups(void);
289static void sched_balance_group(struct tdq_group *);
290static void sched_balance_pair(struct tdq *, struct tdq *);
291static void tdq_move(struct tdq *, int);
292static int tdq_idled(struct tdq *);
293static void tdq_notify(struct td_sched *, int);
294static void tdq_assign(struct tdq *);
295static struct td_sched *tdq_steal(struct tdq *, int);
296#define	THREAD_CAN_MIGRATE(ts)						\
297    ((ts)->ts_thread->td_pinned == 0 && ((ts)->ts_flags & TSF_BOUND) == 0)
298#endif
299
300void
301tdq_print(int cpu)
302{
303	struct tdq *tdq;
304	int i;
305
306	tdq = TDQ_CPU(cpu);
307
308	printf("tdq:\n");
309	printf("\tload:           %d\n", tdq->tdq_load);
310	printf("\tload TIMESHARE: %d\n", tdq->tdq_load_timeshare);
311#ifdef SMP
312	printf("\tload transferable: %d\n", tdq->tdq_transferable);
313#endif
314	printf("\tnicemin:\t%d\n", tdq->tdq_nicemin);
315	printf("\tnice counts:\n");
316	for (i = 0; i < SCHED_PRI_NRESV; i++)
317		if (tdq->tdq_nice[i])
318			printf("\t\t%d = %d\n",
319			    i - SCHED_PRI_NHALF, tdq->tdq_nice[i]);
320}
321
322static __inline void
323tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
324{
325#ifdef SMP
326	if (THREAD_CAN_MIGRATE(ts)) {
327		tdq->tdq_transferable++;
328		tdq->tdq_group->tdg_transferable++;
329		ts->ts_flags |= TSF_XFERABLE;
330	}
331#endif
332	if (ts->ts_flags & TSF_PREEMPTED)
333		flags |= SRQ_PREEMPTED;
334	runq_add(ts->ts_runq, ts, flags);
335}
336
337static __inline void
338tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
339{
340#ifdef SMP
341	if (ts->ts_flags & TSF_XFERABLE) {
342		tdq->tdq_transferable--;
343		tdq->tdq_group->tdg_transferable--;
344		ts->ts_flags &= ~TSF_XFERABLE;
345	}
346#endif
347	runq_remove(ts->ts_runq, ts);
348}
349
350static void
351tdq_load_add(struct tdq *tdq, struct td_sched *ts)
352{
353	int class;
354	mtx_assert(&sched_lock, MA_OWNED);
355	class = PRI_BASE(ts->ts_thread->td_pri_class);
356	if (class == PRI_TIMESHARE)
357		tdq->tdq_load_timeshare++;
358	tdq->tdq_load++;
359	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
360	if (class != PRI_ITHD && (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
361#ifdef SMP
362		tdq->tdq_group->tdg_load++;
363#else
364		tdq->tdq_sysload++;
365#endif
366	if (ts->ts_thread->td_pri_class == PRI_TIMESHARE)
367		tdq_nice_add(tdq, ts->ts_thread->td_proc->p_nice);
368}
369
370static void
371tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
372{
373	int class;
374	mtx_assert(&sched_lock, MA_OWNED);
375	class = PRI_BASE(ts->ts_thread->td_pri_class);
376	if (class == PRI_TIMESHARE)
377		tdq->tdq_load_timeshare--;
378	if (class != PRI_ITHD  && (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
379#ifdef SMP
380		tdq->tdq_group->tdg_load--;
381#else
382		tdq->tdq_sysload--;
383#endif
384	tdq->tdq_load--;
385	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
386	ts->ts_runq = NULL;
387	if (ts->ts_thread->td_pri_class == PRI_TIMESHARE)
388		tdq_nice_rem(tdq, ts->ts_thread->td_proc->p_nice);
389}
390
391static void
392tdq_nice_add(struct tdq *tdq, int nice)
393{
394	mtx_assert(&sched_lock, MA_OWNED);
395	/* Normalize to zero. */
396	tdq->tdq_nice[nice + SCHED_PRI_NHALF]++;
397	if (nice < tdq->tdq_nicemin || tdq->tdq_load_timeshare == 1)
398		tdq->tdq_nicemin = nice;
399}
400
401static void
402tdq_nice_rem(struct tdq *tdq, int nice)
403{
404	int n;
405
406	mtx_assert(&sched_lock, MA_OWNED);
407	/* Normalize to zero. */
408	n = nice + SCHED_PRI_NHALF;
409	tdq->tdq_nice[n]--;
410	KASSERT(tdq->tdq_nice[n] >= 0, ("Negative nice count."));
411
412	/*
413	 * If this wasn't the smallest nice value or there are more in
414	 * this bucket we can just return.  Otherwise we have to recalculate
415	 * the smallest nice.
416	 */
417	if (nice != tdq->tdq_nicemin ||
418	    tdq->tdq_nice[n] != 0 ||
419	    tdq->tdq_load_timeshare == 0)
420		return;
421
422	for (; n < SCHED_PRI_NRESV; n++)
423		if (tdq->tdq_nice[n]) {
424			tdq->tdq_nicemin = n - SCHED_PRI_NHALF;
425			return;
426		}
427}
428
429#ifdef SMP
430/*
431 * sched_balance is a simple CPU load balancing algorithm.  It operates by
432 * finding the least loaded and most loaded cpu and equalizing their load
433 * by migrating some processes.
434 *
435 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
436 * installations will only have 2 cpus.  Secondly, load balancing too much at
437 * once can have an unpleasant effect on the system.  The scheduler rarely has
438 * enough information to make perfect decisions.  So this algorithm chooses
439 * algorithm simplicity and more gradual effects on load in larger systems.
440 *
441 * It could be improved by considering the priorities and slices assigned to
442 * each task prior to balancing them.  There are many pathological cases with
443 * any approach and so the semi random algorithm below may work as well as any.
444 *
445 */
446static void
447sched_balance(void)
448{
449	struct tdq_group *high;
450	struct tdq_group *low;
451	struct tdq_group *tdg;
452	int cnt;
453	int i;
454
455	bal_tick = ticks + (random() % (hz * 2));
456	if (smp_started == 0)
457		return;
458	low = high = NULL;
459	i = random() % (tdg_maxid + 1);
460	for (cnt = 0; cnt <= tdg_maxid; cnt++) {
461		tdg = TDQ_GROUP(i);
462		/*
463		 * Find the CPU with the highest load that has some
464		 * threads to transfer.
465		 */
466		if ((high == NULL || tdg->tdg_load > high->tdg_load)
467		    && tdg->tdg_transferable)
468			high = tdg;
469		if (low == NULL || tdg->tdg_load < low->tdg_load)
470			low = tdg;
471		if (++i > tdg_maxid)
472			i = 0;
473	}
474	if (low != NULL && high != NULL && high != low)
475		sched_balance_pair(LIST_FIRST(&high->tdg_members),
476		    LIST_FIRST(&low->tdg_members));
477}
478
479static void
480sched_balance_groups(void)
481{
482	int i;
483
484	gbal_tick = ticks + (random() % (hz * 2));
485	mtx_assert(&sched_lock, MA_OWNED);
486	if (smp_started)
487		for (i = 0; i <= tdg_maxid; i++)
488			sched_balance_group(TDQ_GROUP(i));
489}
490
491static void
492sched_balance_group(struct tdq_group *tdg)
493{
494	struct tdq *tdq;
495	struct tdq *high;
496	struct tdq *low;
497	int load;
498
499	if (tdg->tdg_transferable == 0)
500		return;
501	low = NULL;
502	high = NULL;
503	LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
504		load = tdq->tdq_load;
505		if (high == NULL || load > high->tdq_load)
506			high = tdq;
507		if (low == NULL || load < low->tdq_load)
508			low = tdq;
509	}
510	if (high != NULL && low != NULL && high != low)
511		sched_balance_pair(high, low);
512}
513
514static void
515sched_balance_pair(struct tdq *high, struct tdq *low)
516{
517	int transferable;
518	int high_load;
519	int low_load;
520	int move;
521	int diff;
522	int i;
523
524	/*
525	 * If we're transfering within a group we have to use this specific
526	 * tdq's transferable count, otherwise we can steal from other members
527	 * of the group.
528	 */
529	if (high->tdq_group == low->tdq_group) {
530		transferable = high->tdq_transferable;
531		high_load = high->tdq_load;
532		low_load = low->tdq_load;
533	} else {
534		transferable = high->tdq_group->tdg_transferable;
535		high_load = high->tdq_group->tdg_load;
536		low_load = low->tdq_group->tdg_load;
537	}
538	if (transferable == 0)
539		return;
540	/*
541	 * Determine what the imbalance is and then adjust that to how many
542	 * threads we actually have to give up (transferable).
543	 */
544	diff = high_load - low_load;
545	move = diff / 2;
546	if (diff & 0x1)
547		move++;
548	move = min(move, transferable);
549	for (i = 0; i < move; i++)
550		tdq_move(high, TDQ_ID(low));
551	return;
552}
553
554static void
555tdq_move(struct tdq *from, int cpu)
556{
557	struct tdq *tdq;
558	struct tdq *to;
559	struct td_sched *ts;
560
561	tdq = from;
562	to = TDQ_CPU(cpu);
563	ts = tdq_steal(tdq, 1);
564	if (ts == NULL) {
565		struct tdq_group *tdg;
566
567		tdg = tdq->tdq_group;
568		LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
569			if (tdq == from || tdq->tdq_transferable == 0)
570				continue;
571			ts = tdq_steal(tdq, 1);
572			break;
573		}
574		if (ts == NULL)
575			panic("tdq_move: No threads available with a "
576			    "transferable count of %d\n",
577			    tdg->tdg_transferable);
578	}
579	if (tdq == to)
580		return;
581	ts->ts_state = TSS_THREAD;
582	tdq_runq_rem(tdq, ts);
583	tdq_load_rem(tdq, ts);
584	tdq_notify(ts, cpu);
585}
586
587static int
588tdq_idled(struct tdq *tdq)
589{
590	struct tdq_group *tdg;
591	struct tdq *steal;
592	struct td_sched *ts;
593
594	tdg = tdq->tdq_group;
595	/*
596	 * If we're in a cpu group, try and steal threads from another cpu in
597	 * the group before idling.
598	 */
599	if (tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
600		LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
601			if (steal == tdq || steal->tdq_transferable == 0)
602				continue;
603			ts = tdq_steal(steal, 0);
604			if (ts == NULL)
605				continue;
606			ts->ts_state = TSS_THREAD;
607			tdq_runq_rem(steal, ts);
608			tdq_load_rem(steal, ts);
609			ts->ts_cpu = PCPU_GET(cpuid);
610			ts->ts_flags |= TSF_INTERNAL | TSF_HOLD;
611			sched_add(ts->ts_thread, SRQ_YIELDING);
612			return (0);
613		}
614	}
615	/*
616	 * We only set the idled bit when all of the cpus in the group are
617	 * idle.  Otherwise we could get into a situation where a thread bounces
618	 * back and forth between two idle cores on seperate physical CPUs.
619	 */
620	tdg->tdg_idlemask |= PCPU_GET(cpumask);
621	if (tdg->tdg_idlemask != tdg->tdg_cpumask)
622		return (1);
623	atomic_set_int(&tdq_idle, tdg->tdg_mask);
624	return (1);
625}
626
627static void
628tdq_assign(struct tdq *tdq)
629{
630	struct td_sched *nts;
631	struct td_sched *ts;
632
633	do {
634		*(volatile struct td_sched **)&ts = tdq->tdq_assigned;
635	} while(!atomic_cmpset_ptr((volatile uintptr_t *)&tdq->tdq_assigned,
636		(uintptr_t)ts, (uintptr_t)NULL));
637	for (; ts != NULL; ts = nts) {
638		nts = ts->ts_assign;
639		tdq->tdq_group->tdg_load--;
640		tdq->tdq_load--;
641		ts->ts_flags &= ~TSF_ASSIGNED;
642		if (ts->ts_flags & TSF_REMOVED) {
643			ts->ts_flags &= ~TSF_REMOVED;
644			continue;
645		}
646		ts->ts_flags |= TSF_INTERNAL | TSF_HOLD;
647		sched_add(ts->ts_thread, SRQ_YIELDING);
648	}
649}
650
651static void
652tdq_notify(struct td_sched *ts, int cpu)
653{
654	struct tdq *tdq;
655	struct thread *td;
656	struct pcpu *pcpu;
657	int class;
658	int prio;
659
660	tdq = TDQ_CPU(cpu);
661	/* XXX */
662	class = PRI_BASE(ts->ts_thread->td_pri_class);
663	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
664	    (tdq_idle & tdq->tdq_group->tdg_mask))
665		atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
666	tdq->tdq_group->tdg_load++;
667	tdq->tdq_load++;
668	ts->ts_cpu = cpu;
669	ts->ts_flags |= TSF_ASSIGNED;
670	prio = ts->ts_thread->td_priority;
671
672	/*
673	 * Place a thread on another cpu's queue and force a resched.
674	 */
675	do {
676		*(volatile struct td_sched **)&ts->ts_assign = tdq->tdq_assigned;
677	} while(!atomic_cmpset_ptr((volatile uintptr_t *)&tdq->tdq_assigned,
678		(uintptr_t)ts->ts_assign, (uintptr_t)ts));
679	/*
680	 * Without sched_lock we could lose a race where we set NEEDRESCHED
681	 * on a thread that is switched out before the IPI is delivered.  This
682	 * would lead us to miss the resched.  This will be a problem once
683	 * sched_lock is pushed down.
684	 */
685	pcpu = pcpu_find(cpu);
686	td = pcpu->pc_curthread;
687	if (ts->ts_thread->td_priority < td->td_priority ||
688	    td == pcpu->pc_idlethread) {
689		td->td_flags |= TDF_NEEDRESCHED;
690		ipi_selected(1 << cpu, IPI_AST);
691	}
692}
693
694static struct td_sched *
695runq_steal(struct runq *rq)
696{
697	struct rqhead *rqh;
698	struct rqbits *rqb;
699	struct td_sched *ts;
700	int word;
701	int bit;
702
703	mtx_assert(&sched_lock, MA_OWNED);
704	rqb = &rq->rq_status;
705	for (word = 0; word < RQB_LEN; word++) {
706		if (rqb->rqb_bits[word] == 0)
707			continue;
708		for (bit = 0; bit < RQB_BPW; bit++) {
709			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
710				continue;
711			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
712			TAILQ_FOREACH(ts, rqh, ts_procq) {
713				if (THREAD_CAN_MIGRATE(ts))
714					return (ts);
715			}
716		}
717	}
718	return (NULL);
719}
720
721static struct td_sched *
722tdq_steal(struct tdq *tdq, int stealidle)
723{
724	struct td_sched *ts;
725
726	/*
727	 * Steal from next first to try to get a non-interactive task that
728	 * may not have run for a while.
729	 */
730	if ((ts = runq_steal(tdq->tdq_next)) != NULL)
731		return (ts);
732	if ((ts = runq_steal(tdq->tdq_curr)) != NULL)
733		return (ts);
734	if (stealidle)
735		return (runq_steal(&tdq->tdq_idle));
736	return (NULL);
737}
738
739int
740tdq_transfer(struct tdq *tdq, struct td_sched *ts, int class)
741{
742	struct tdq_group *ntdg;
743	struct tdq_group *tdg;
744	struct tdq *old;
745	int cpu;
746	int idx;
747
748	if (smp_started == 0)
749		return (0);
750	cpu = 0;
751	/*
752	 * If our load exceeds a certain threshold we should attempt to
753	 * reassign this thread.  The first candidate is the cpu that
754	 * originally ran the thread.  If it is idle, assign it there,
755	 * otherwise, pick an idle cpu.
756	 *
757	 * The threshold at which we start to reassign has a large impact
758	 * on the overall performance of the system.  Tuned too high and
759	 * some CPUs may idle.  Too low and there will be excess migration
760	 * and context switches.
761	 */
762	old = TDQ_CPU(ts->ts_cpu);
763	ntdg = old->tdq_group;
764	tdg = tdq->tdq_group;
765	if (tdq_idle) {
766		if (tdq_idle & ntdg->tdg_mask) {
767			cpu = ffs(ntdg->tdg_idlemask);
768			if (cpu) {
769				CTR2(KTR_SCHED,
770				    "tdq_transfer: %p found old cpu %X "
771				    "in idlemask.", ts, cpu);
772				goto migrate;
773			}
774		}
775		/*
776		 * Multiple cpus could find this bit simultaneously
777		 * but the race shouldn't be terrible.
778		 */
779		cpu = ffs(tdq_idle);
780		if (cpu) {
781			CTR2(KTR_SCHED, "tdq_transfer: %p found %X "
782			    "in idlemask.", ts, cpu);
783			goto migrate;
784		}
785	}
786	idx = 0;
787#if 0
788	if (old->tdq_load < tdq->tdq_load) {
789		cpu = ts->ts_cpu + 1;
790		CTR2(KTR_SCHED, "tdq_transfer: %p old cpu %X "
791		    "load less than ours.", ts, cpu);
792		goto migrate;
793	}
794	/*
795	 * No new CPU was found, look for one with less load.
796	 */
797	for (idx = 0; idx <= tdg_maxid; idx++) {
798		ntdg = TDQ_GROUP(idx);
799		if (ntdg->tdg_load /*+ (ntdg->tdg_cpus  * 2)*/ < tdg->tdg_load) {
800			cpu = ffs(ntdg->tdg_cpumask);
801			CTR2(KTR_SCHED, "tdq_transfer: %p cpu %X load less "
802			    "than ours.", ts, cpu);
803			goto migrate;
804		}
805	}
806#endif
807	/*
808	 * If another cpu in this group has idled, assign a thread over
809	 * to them after checking to see if there are idled groups.
810	 */
811	if (tdg->tdg_idlemask) {
812		cpu = ffs(tdg->tdg_idlemask);
813		if (cpu) {
814			CTR2(KTR_SCHED, "tdq_transfer: %p cpu %X idle in "
815			    "group.", ts, cpu);
816			goto migrate;
817		}
818	}
819	return (0);
820migrate:
821	/*
822	 * Now that we've found an idle CPU, migrate the thread.
823	 */
824	cpu--;
825	ts->ts_runq = NULL;
826	tdq_notify(ts, cpu);
827
828	return (1);
829}
830
831#endif	/* SMP */
832
833/*
834 * Pick the highest priority task we have and return it.
835 */
836
837static struct td_sched *
838tdq_choose(struct tdq *tdq)
839{
840	struct runq *swap;
841	struct td_sched *ts;
842	int nice;
843
844	mtx_assert(&sched_lock, MA_OWNED);
845	swap = NULL;
846
847	for (;;) {
848		ts = runq_choose(tdq->tdq_curr);
849		if (ts == NULL) {
850			/*
851			 * We already swapped once and didn't get anywhere.
852			 */
853			if (swap)
854				break;
855			swap = tdq->tdq_curr;
856			tdq->tdq_curr = tdq->tdq_next;
857			tdq->tdq_next = swap;
858			continue;
859		}
860		/*
861		 * If we encounter a slice of 0 the td_sched is in a
862		 * TIMESHARE td_sched group and its nice was too far out
863		 * of the range that receives slices.
864		 */
865		nice = ts->ts_thread->td_proc->p_nice + (0 - tdq->tdq_nicemin);
866#if 0
867		if (ts->ts_slice == 0 || (nice > SCHED_SLICE_NTHRESH &&
868		    ts->ts_thread->td_proc->p_nice != 0)) {
869			runq_remove(ts->ts_runq, ts);
870			sched_slice(ts);
871			ts->ts_runq = tdq->tdq_next;
872			runq_add(ts->ts_runq, ts, 0);
873			continue;
874		}
875#endif
876		return (ts);
877	}
878
879	return (runq_choose(&tdq->tdq_idle));
880}
881
882static void
883tdq_setup(struct tdq *tdq)
884{
885	runq_init(&tdq->tdq_timeshare[0]);
886	runq_init(&tdq->tdq_timeshare[1]);
887	runq_init(&tdq->tdq_idle);
888	tdq->tdq_curr = &tdq->tdq_timeshare[0];
889	tdq->tdq_next = &tdq->tdq_timeshare[1];
890	tdq->tdq_load = 0;
891	tdq->tdq_load_timeshare = 0;
892}
893
894static void
895sched_setup(void *dummy)
896{
897#ifdef SMP
898	int i;
899#endif
900
901	/*
902	 * To avoid divide-by-zero, we set realstathz a dummy value
903	 * in case which sched_clock() called before sched_initticks().
904	 */
905	realstathz = hz;
906	slice_min = (hz/100);	/* 10ms */
907	slice_max = (hz/7);	/* ~140ms */
908
909#ifdef SMP
910	balance_groups = 0;
911	/*
912	 * Initialize the tdqs.
913	 */
914	for (i = 0; i < MAXCPU; i++) {
915		struct tdq *tdq;
916
917		tdq = &tdq_cpu[i];
918		tdq->tdq_assigned = NULL;
919		tdq_setup(&tdq_cpu[i]);
920	}
921	if (smp_topology == NULL) {
922		struct tdq_group *tdg;
923		struct tdq *tdq;
924		int cpus;
925
926		for (cpus = 0, i = 0; i < MAXCPU; i++) {
927			if (CPU_ABSENT(i))
928				continue;
929			tdq = &tdq_cpu[i];
930			tdg = &tdq_groups[cpus];
931			/*
932			 * Setup a tdq group with one member.
933			 */
934			tdq->tdq_transferable = 0;
935			tdq->tdq_group = tdg;
936			tdg->tdg_cpus = 1;
937			tdg->tdg_idlemask = 0;
938			tdg->tdg_cpumask = tdg->tdg_mask = 1 << i;
939			tdg->tdg_load = 0;
940			tdg->tdg_transferable = 0;
941			LIST_INIT(&tdg->tdg_members);
942			LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
943			cpus++;
944		}
945		tdg_maxid = cpus - 1;
946	} else {
947		struct tdq_group *tdg;
948		struct cpu_group *cg;
949		int j;
950
951		for (i = 0; i < smp_topology->ct_count; i++) {
952			cg = &smp_topology->ct_group[i];
953			tdg = &tdq_groups[i];
954			/*
955			 * Initialize the group.
956			 */
957			tdg->tdg_idlemask = 0;
958			tdg->tdg_load = 0;
959			tdg->tdg_transferable = 0;
960			tdg->tdg_cpus = cg->cg_count;
961			tdg->tdg_cpumask = cg->cg_mask;
962			LIST_INIT(&tdg->tdg_members);
963			/*
964			 * Find all of the group members and add them.
965			 */
966			for (j = 0; j < MAXCPU; j++) {
967				if ((cg->cg_mask & (1 << j)) != 0) {
968					if (tdg->tdg_mask == 0)
969						tdg->tdg_mask = 1 << j;
970					tdq_cpu[j].tdq_transferable = 0;
971					tdq_cpu[j].tdq_group = tdg;
972					LIST_INSERT_HEAD(&tdg->tdg_members,
973					    &tdq_cpu[j], tdq_siblings);
974				}
975			}
976			if (tdg->tdg_cpus > 1)
977				balance_groups = 1;
978		}
979		tdg_maxid = smp_topology->ct_count - 1;
980	}
981	/*
982	 * Stagger the group and global load balancer so they do not
983	 * interfere with each other.
984	 */
985	bal_tick = ticks + hz;
986	if (balance_groups)
987		gbal_tick = ticks + (hz / 2);
988#else
989	tdq_setup(TDQ_SELF());
990#endif
991	mtx_lock_spin(&sched_lock);
992	tdq_load_add(TDQ_SELF(), &td_sched0);
993	mtx_unlock_spin(&sched_lock);
994}
995
996/* ARGSUSED */
997static void
998sched_initticks(void *dummy)
999{
1000	mtx_lock_spin(&sched_lock);
1001	realstathz = stathz ? stathz : hz;
1002	slice_min = (realstathz/100);	/* 10ms */
1003	slice_max = (realstathz/7);	/* ~140ms */
1004
1005	tickincr = (hz << 10) / realstathz;
1006	/*
1007	 * XXX This does not work for values of stathz that are much
1008	 * larger than hz.
1009	 */
1010	if (tickincr == 0)
1011		tickincr = 1;
1012	mtx_unlock_spin(&sched_lock);
1013}
1014
1015
1016/*
1017 * Scale the scheduling priority according to the "interactivity" of this
1018 * process.
1019 */
1020static void
1021sched_priority(struct thread *td)
1022{
1023	int pri;
1024
1025	if (td->td_pri_class != PRI_TIMESHARE)
1026		return;
1027
1028	pri = SCHED_PRI_INTERACT(sched_interact_score(td));
1029	pri += SCHED_PRI_BASE;
1030	pri += td->td_proc->p_nice;
1031
1032	if (pri > PRI_MAX_TIMESHARE)
1033		pri = PRI_MAX_TIMESHARE;
1034	else if (pri < PRI_MIN_TIMESHARE)
1035		pri = PRI_MIN_TIMESHARE;
1036
1037	sched_user_prio(td, pri);
1038
1039	return;
1040}
1041
1042/*
1043 * Calculate a time slice based on the properties of the process
1044 * and the runq that we're on.  This is only for PRI_TIMESHARE threads.
1045 */
1046static void
1047sched_slice(struct td_sched *ts)
1048{
1049	struct tdq *tdq;
1050	struct thread *td;
1051
1052	td = ts->ts_thread;
1053	tdq = TDQ_CPU(ts->ts_cpu);
1054
1055	if (td->td_flags & TDF_BORROWING) {
1056		ts->ts_slice = SCHED_SLICE_MIN;
1057		return;
1058	}
1059
1060	/*
1061	 * Rationale:
1062	 * Threads in interactive procs get a minimal slice so that we
1063	 * quickly notice if it abuses its advantage.
1064	 *
1065	 * Threads in non-interactive procs are assigned a slice that is
1066	 * based on the procs nice value relative to the least nice procs
1067	 * on the run queue for this cpu.
1068	 *
1069	 * If the thread is less nice than all others it gets the maximum
1070	 * slice and other threads will adjust their slice relative to
1071	 * this when they first expire.
1072	 *
1073	 * There is 20 point window that starts relative to the least
1074	 * nice td_sched on the run queue.  Slice size is determined by
1075	 * the td_sched distance from the last nice thread.
1076	 *
1077	 * If the td_sched is outside of the window it will get no slice
1078	 * and will be reevaluated each time it is selected on the
1079	 * run queue.  The exception to this is nice 0 procs when
1080	 * a nice -20 is running.  They are always granted a minimum
1081	 * slice.
1082	 */
1083	if (!SCHED_INTERACTIVE(td)) {
1084		int nice;
1085
1086		nice = td->td_proc->p_nice + (0 - tdq->tdq_nicemin);
1087		if (tdq->tdq_load_timeshare == 0 ||
1088		    td->td_proc->p_nice < tdq->tdq_nicemin)
1089			ts->ts_slice = SCHED_SLICE_MAX;
1090		else if (nice <= SCHED_SLICE_NTHRESH)
1091			ts->ts_slice = SCHED_SLICE_NICE(nice);
1092		else if (td->td_proc->p_nice == 0)
1093			ts->ts_slice = SCHED_SLICE_MIN;
1094		else
1095			ts->ts_slice = SCHED_SLICE_MIN; /* 0 */
1096	} else
1097		ts->ts_slice = SCHED_SLICE_INTERACTIVE;
1098
1099	return;
1100}
1101
1102/*
1103 * This routine enforces a maximum limit on the amount of scheduling history
1104 * kept.  It is called after either the slptime or runtime is adjusted.
1105 * This routine will not operate correctly when slp or run times have been
1106 * adjusted to more than double their maximum.
1107 */
1108static void
1109sched_interact_update(struct thread *td)
1110{
1111	int sum;
1112
1113	sum = td->td_sched->skg_runtime + td->td_sched->skg_slptime;
1114	if (sum < SCHED_SLP_RUN_MAX)
1115		return;
1116	/*
1117	 * If we have exceeded by more than 1/5th then the algorithm below
1118	 * will not bring us back into range.  Dividing by two here forces
1119	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1120	 */
1121	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1122		td->td_sched->skg_runtime /= 2;
1123		td->td_sched->skg_slptime /= 2;
1124		return;
1125	}
1126	td->td_sched->skg_runtime = (td->td_sched->skg_runtime / 5) * 4;
1127	td->td_sched->skg_slptime = (td->td_sched->skg_slptime / 5) * 4;
1128}
1129
1130static void
1131sched_interact_fork(struct thread *td)
1132{
1133	int ratio;
1134	int sum;
1135
1136	sum = td->td_sched->skg_runtime + td->td_sched->skg_slptime;
1137	if (sum > SCHED_SLP_RUN_FORK) {
1138		ratio = sum / SCHED_SLP_RUN_FORK;
1139		td->td_sched->skg_runtime /= ratio;
1140		td->td_sched->skg_slptime /= ratio;
1141	}
1142}
1143
1144static int
1145sched_interact_score(struct thread *td)
1146{
1147	int div;
1148
1149	if (td->td_sched->skg_runtime > td->td_sched->skg_slptime) {
1150		div = max(1, td->td_sched->skg_runtime / SCHED_INTERACT_HALF);
1151		return (SCHED_INTERACT_HALF +
1152		    (SCHED_INTERACT_HALF - (td->td_sched->skg_slptime / div)));
1153	} if (td->td_sched->skg_slptime > td->td_sched->skg_runtime) {
1154		div = max(1, td->td_sched->skg_slptime / SCHED_INTERACT_HALF);
1155		return (td->td_sched->skg_runtime / div);
1156	}
1157
1158	/*
1159	 * This can happen if slptime and runtime are 0.
1160	 */
1161	return (0);
1162
1163}
1164
1165/*
1166 * Very early in the boot some setup of scheduler-specific
1167 * parts of proc0 and of soem scheduler resources needs to be done.
1168 * Called from:
1169 *  proc0_init()
1170 */
1171void
1172schedinit(void)
1173{
1174	/*
1175	 * Set up the scheduler specific parts of proc0.
1176	 */
1177	proc0.p_sched = NULL; /* XXX */
1178	thread0.td_sched = &td_sched0;
1179	td_sched0.ts_thread = &thread0;
1180	td_sched0.ts_state = TSS_THREAD;
1181}
1182
1183/*
1184 * This is only somewhat accurate since given many processes of the same
1185 * priority they will switch when their slices run out, which will be
1186 * at most SCHED_SLICE_MAX.
1187 */
1188int
1189sched_rr_interval(void)
1190{
1191	return (SCHED_SLICE_MAX);
1192}
1193
1194static void
1195sched_pctcpu_update(struct td_sched *ts)
1196{
1197	/*
1198	 * Adjust counters and watermark for pctcpu calc.
1199	 */
1200	if (ts->ts_ltick > ticks - SCHED_CPU_TICKS) {
1201		/*
1202		 * Shift the tick count out so that the divide doesn't
1203		 * round away our results.
1204		 */
1205		ts->ts_ticks <<= 10;
1206		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1207			    SCHED_CPU_TICKS;
1208		ts->ts_ticks >>= 10;
1209	} else
1210		ts->ts_ticks = 0;
1211	ts->ts_ltick = ticks;
1212	ts->ts_ftick = ts->ts_ltick - SCHED_CPU_TICKS;
1213}
1214
1215void
1216sched_thread_priority(struct thread *td, u_char prio)
1217{
1218	struct td_sched *ts;
1219
1220	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1221	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1222	    curthread->td_proc->p_comm);
1223	ts = td->td_sched;
1224	mtx_assert(&sched_lock, MA_OWNED);
1225	if (td->td_priority == prio)
1226		return;
1227	if (TD_ON_RUNQ(td)) {
1228		/*
1229		 * If the priority has been elevated due to priority
1230		 * propagation, we may have to move ourselves to a new
1231		 * queue.  We still call adjustrunqueue below in case kse
1232		 * needs to fix things up.
1233		 */
1234		if (prio < td->td_priority && ts->ts_runq != NULL &&
1235		    (ts->ts_flags & TSF_ASSIGNED) == 0 &&
1236		    ts->ts_runq != TDQ_CPU(ts->ts_cpu)->tdq_curr) {
1237			runq_remove(ts->ts_runq, ts);
1238			ts->ts_runq = TDQ_CPU(ts->ts_cpu)->tdq_curr;
1239			runq_add(ts->ts_runq, ts, 0);
1240		}
1241		/*
1242		 * Hold this td_sched on this cpu so that sched_prio() doesn't
1243		 * cause excessive migration.  We only want migration to
1244		 * happen as the result of a wakeup.
1245		 */
1246		ts->ts_flags |= TSF_HOLD;
1247		adjustrunqueue(td, prio);
1248		ts->ts_flags &= ~TSF_HOLD;
1249	} else
1250		td->td_priority = prio;
1251}
1252
1253/*
1254 * Update a thread's priority when it is lent another thread's
1255 * priority.
1256 */
1257void
1258sched_lend_prio(struct thread *td, u_char prio)
1259{
1260
1261	td->td_flags |= TDF_BORROWING;
1262	sched_thread_priority(td, prio);
1263}
1264
1265/*
1266 * Restore a thread's priority when priority propagation is
1267 * over.  The prio argument is the minimum priority the thread
1268 * needs to have to satisfy other possible priority lending
1269 * requests.  If the thread's regular priority is less
1270 * important than prio, the thread will keep a priority boost
1271 * of prio.
1272 */
1273void
1274sched_unlend_prio(struct thread *td, u_char prio)
1275{
1276	u_char base_pri;
1277
1278	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1279	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1280		base_pri = td->td_user_pri;
1281	else
1282		base_pri = td->td_base_pri;
1283	if (prio >= base_pri) {
1284		td->td_flags &= ~TDF_BORROWING;
1285		sched_thread_priority(td, base_pri);
1286	} else
1287		sched_lend_prio(td, prio);
1288}
1289
1290void
1291sched_prio(struct thread *td, u_char prio)
1292{
1293	u_char oldprio;
1294
1295	/* First, update the base priority. */
1296	td->td_base_pri = prio;
1297
1298	/*
1299	 * If the thread is borrowing another thread's priority, don't
1300	 * ever lower the priority.
1301	 */
1302	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1303		return;
1304
1305	/* Change the real priority. */
1306	oldprio = td->td_priority;
1307	sched_thread_priority(td, prio);
1308
1309	/*
1310	 * If the thread is on a turnstile, then let the turnstile update
1311	 * its state.
1312	 */
1313	if (TD_ON_LOCK(td) && oldprio != prio)
1314		turnstile_adjust(td, oldprio);
1315}
1316
1317void
1318sched_user_prio(struct thread *td, u_char prio)
1319{
1320	u_char oldprio;
1321
1322	td->td_base_user_pri = prio;
1323	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1324                return;
1325	oldprio = td->td_user_pri;
1326	td->td_user_pri = prio;
1327
1328	if (TD_ON_UPILOCK(td) && oldprio != prio)
1329		umtx_pi_adjust(td, oldprio);
1330}
1331
1332void
1333sched_lend_user_prio(struct thread *td, u_char prio)
1334{
1335	u_char oldprio;
1336
1337	td->td_flags |= TDF_UBORROWING;
1338
1339	oldprio = td->td_user_pri;
1340	td->td_user_pri = prio;
1341
1342	if (TD_ON_UPILOCK(td) && oldprio != prio)
1343		umtx_pi_adjust(td, oldprio);
1344}
1345
1346void
1347sched_unlend_user_prio(struct thread *td, u_char prio)
1348{
1349	u_char base_pri;
1350
1351	base_pri = td->td_base_user_pri;
1352	if (prio >= base_pri) {
1353		td->td_flags &= ~TDF_UBORROWING;
1354		sched_user_prio(td, base_pri);
1355	} else
1356		sched_lend_user_prio(td, prio);
1357}
1358
1359void
1360sched_switch(struct thread *td, struct thread *newtd, int flags)
1361{
1362	struct tdq *tdq;
1363	struct td_sched *ts;
1364
1365	mtx_assert(&sched_lock, MA_OWNED);
1366
1367	ts = td->td_sched;
1368	tdq = TDQ_SELF();
1369
1370	td->td_lastcpu = td->td_oncpu;
1371	td->td_oncpu = NOCPU;
1372	td->td_flags &= ~TDF_NEEDRESCHED;
1373	td->td_owepreempt = 0;
1374
1375	/*
1376	 * If the thread has been assigned it may be in the process of switching
1377	 * to the new cpu.  This is the case in sched_bind().
1378	 */
1379	if (td == PCPU_GET(idlethread)) {
1380		TD_SET_CAN_RUN(td);
1381	} else if ((ts->ts_flags & TSF_ASSIGNED) == 0) {
1382		/* We are ending our run so make our slot available again */
1383		tdq_load_rem(tdq, ts);
1384		if (TD_IS_RUNNING(td)) {
1385			/*
1386			 * Don't allow the thread to migrate
1387			 * from a preemption.
1388			 */
1389			ts->ts_flags |= TSF_HOLD;
1390			setrunqueue(td, (flags & SW_PREEMPT) ?
1391			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1392			    SRQ_OURSELF|SRQ_YIELDING);
1393			ts->ts_flags &= ~TSF_HOLD;
1394		}
1395	}
1396	if (newtd != NULL) {
1397		/*
1398		 * If we bring in a thread account for it as if it had been
1399		 * added to the run queue and then chosen.
1400		 */
1401		newtd->td_sched->ts_flags |= TSF_DIDRUN;
1402		newtd->td_sched->ts_runq = tdq->tdq_curr;
1403		TD_SET_RUNNING(newtd);
1404		tdq_load_add(TDQ_SELF(), newtd->td_sched);
1405	} else
1406		newtd = choosethread();
1407	if (td != newtd) {
1408#ifdef	HWPMC_HOOKS
1409		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1410			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1411#endif
1412
1413		cpu_switch(td, newtd);
1414#ifdef	HWPMC_HOOKS
1415		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1416			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1417#endif
1418	}
1419
1420	sched_lock.mtx_lock = (uintptr_t)td;
1421
1422	td->td_oncpu = PCPU_GET(cpuid);
1423}
1424
1425void
1426sched_nice(struct proc *p, int nice)
1427{
1428	struct td_sched *ts;
1429	struct thread *td;
1430	struct tdq *tdq;
1431
1432	PROC_LOCK_ASSERT(p, MA_OWNED);
1433	mtx_assert(&sched_lock, MA_OWNED);
1434	/*
1435	 * We need to adjust the nice counts for running threads.
1436	 */
1437	FOREACH_THREAD_IN_PROC(p, td) {
1438		if (td->td_pri_class == PRI_TIMESHARE) {
1439			ts = td->td_sched;
1440			if (ts->ts_runq == NULL)
1441				continue;
1442			tdq = TDQ_CPU(ts->ts_cpu);
1443			tdq_nice_rem(tdq, p->p_nice);
1444			tdq_nice_add(tdq, nice);
1445		}
1446	}
1447	p->p_nice = nice;
1448	FOREACH_THREAD_IN_PROC(p, td) {
1449		sched_priority(td);
1450		td->td_flags |= TDF_NEEDRESCHED;
1451	}
1452}
1453
1454void
1455sched_sleep(struct thread *td)
1456{
1457	mtx_assert(&sched_lock, MA_OWNED);
1458
1459	td->td_sched->ts_slptime = ticks;
1460}
1461
1462void
1463sched_wakeup(struct thread *td)
1464{
1465	mtx_assert(&sched_lock, MA_OWNED);
1466
1467	/*
1468	 * Let the procs know how long we slept for.  This is because process
1469	 * interactivity behavior is modeled in the procs.
1470	 */
1471	if (td->td_sched->ts_slptime) {
1472		int hzticks;
1473
1474		hzticks = (ticks - td->td_sched->ts_slptime) << 10;
1475		if (hzticks >= SCHED_SLP_RUN_MAX) {
1476			td->td_sched->skg_slptime = SCHED_SLP_RUN_MAX;
1477			td->td_sched->skg_runtime = 1;
1478		} else {
1479			td->td_sched->skg_slptime += hzticks;
1480			sched_interact_update(td);
1481		}
1482		sched_priority(td);
1483		sched_slice(td->td_sched);
1484		td->td_sched->ts_slptime = 0;
1485	}
1486	setrunqueue(td, SRQ_BORING);
1487}
1488
1489/*
1490 * Penalize the parent for creating a new child and initialize the child's
1491 * priority.
1492 */
1493void
1494sched_fork(struct thread *td, struct thread *child)
1495{
1496	mtx_assert(&sched_lock, MA_OWNED);
1497	sched_fork_thread(td, child);
1498}
1499
1500void
1501sched_fork_thread(struct thread *td, struct thread *child)
1502{
1503	struct td_sched *ts;
1504	struct td_sched *ts2;
1505
1506	child->td_sched->skg_slptime = td->td_sched->skg_slptime;
1507	child->td_sched->skg_runtime = td->td_sched->skg_runtime;
1508	child->td_user_pri = td->td_user_pri;
1509	child->td_base_user_pri = td->td_base_user_pri;
1510	sched_interact_fork(child);
1511	td->td_sched->skg_runtime += tickincr;
1512	sched_interact_update(td);
1513
1514	sched_newthread(child);
1515
1516	ts = td->td_sched;
1517	ts2 = child->td_sched;
1518	ts2->ts_slice = 1;	/* Attempt to quickly learn interactivity. */
1519	ts2->ts_cpu = ts->ts_cpu;
1520	ts2->ts_runq = NULL;
1521
1522	/* Grab our parents cpu estimation information. */
1523	ts2->ts_ticks = ts->ts_ticks;
1524	ts2->ts_ltick = ts->ts_ltick;
1525	ts2->ts_ftick = ts->ts_ftick;
1526}
1527
1528void
1529sched_class(struct thread *td, int class)
1530{
1531	struct tdq *tdq;
1532	struct td_sched *ts;
1533	int nclass;
1534	int oclass;
1535
1536	mtx_assert(&sched_lock, MA_OWNED);
1537	if (td->td_pri_class == class)
1538		return;
1539
1540	nclass = PRI_BASE(class);
1541	oclass = PRI_BASE(td->td_pri_class);
1542	ts = td->td_sched;
1543	if (!((ts->ts_state != TSS_ONRUNQ &&
1544	    ts->ts_state != TSS_THREAD) || ts->ts_runq == NULL)) {
1545		tdq = TDQ_CPU(ts->ts_cpu);
1546
1547#ifdef SMP
1548		/*
1549		 * On SMP if we're on the RUNQ we must adjust the transferable
1550		 * count because could be changing to or from an interrupt
1551		 * class.
1552		 */
1553		if (ts->ts_state == TSS_ONRUNQ) {
1554			if (THREAD_CAN_MIGRATE(ts)) {
1555				tdq->tdq_transferable--;
1556				tdq->tdq_group->tdg_transferable--;
1557			}
1558			if (THREAD_CAN_MIGRATE(ts)) {
1559				tdq->tdq_transferable++;
1560				tdq->tdq_group->tdg_transferable++;
1561			}
1562		}
1563#endif
1564		if (oclass == PRI_TIMESHARE) {
1565			tdq->tdq_load_timeshare--;
1566			tdq_nice_rem(tdq, td->td_proc->p_nice);
1567		}
1568		if (nclass == PRI_TIMESHARE) {
1569			tdq->tdq_load_timeshare++;
1570			tdq_nice_add(tdq, td->td_proc->p_nice);
1571		}
1572	}
1573
1574	td->td_pri_class = class;
1575}
1576
1577/*
1578 * Return some of the child's priority and interactivity to the parent.
1579 */
1580void
1581sched_exit(struct proc *p, struct thread *child)
1582{
1583
1584	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
1585	    child, child->td_proc->p_comm, child->td_priority);
1586
1587	sched_exit_thread(FIRST_THREAD_IN_PROC(p), child);
1588}
1589
1590void
1591sched_exit_thread(struct thread *td, struct thread *child)
1592{
1593	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
1594	    child, childproc->p_comm, child->td_priority);
1595
1596	td->td_sched->skg_runtime += child->td_sched->skg_runtime;
1597	sched_interact_update(td);
1598	tdq_load_rem(TDQ_CPU(child->td_sched->ts_cpu), child->td_sched);
1599}
1600
1601void
1602sched_userret(struct thread *td)
1603{
1604	/*
1605	 * XXX we cheat slightly on the locking here to avoid locking in
1606	 * the usual case.  Setting td_priority here is essentially an
1607	 * incomplete workaround for not setting it properly elsewhere.
1608	 * Now that some interrupt handlers are threads, not setting it
1609	 * properly elsewhere can clobber it in the window between setting
1610	 * it here and returning to user mode, so don't waste time setting
1611	 * it perfectly here.
1612	 */
1613	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1614	    ("thread with borrowed priority returning to userland"));
1615	if (td->td_priority != td->td_user_pri) {
1616		mtx_lock_spin(&sched_lock);
1617		td->td_priority = td->td_user_pri;
1618		td->td_base_pri = td->td_user_pri;
1619		mtx_unlock_spin(&sched_lock);
1620        }
1621}
1622
1623void
1624sched_clock(struct thread *td)
1625{
1626	struct tdq *tdq;
1627	struct td_sched *ts;
1628
1629	mtx_assert(&sched_lock, MA_OWNED);
1630	tdq = TDQ_SELF();
1631#ifdef SMP
1632	if (ticks >= bal_tick)
1633		sched_balance();
1634	if (ticks >= gbal_tick && balance_groups)
1635		sched_balance_groups();
1636	/*
1637	 * We could have been assigned a non real-time thread without an
1638	 * IPI.
1639	 */
1640	if (tdq->tdq_assigned)
1641		tdq_assign(tdq);	/* Potentially sets NEEDRESCHED */
1642#endif
1643	ts = td->td_sched;
1644
1645	/* Adjust ticks for pctcpu */
1646	ts->ts_ticks++;
1647	ts->ts_ltick = ticks;
1648
1649	/* Go up to one second beyond our max and then trim back down */
1650	if (ts->ts_ftick + SCHED_CPU_TICKS + hz < ts->ts_ltick)
1651		sched_pctcpu_update(ts);
1652
1653	if (td->td_flags & TDF_IDLETD)
1654		return;
1655	/*
1656	 * We only do slicing code for TIMESHARE threads.
1657	 */
1658	if (td->td_pri_class != PRI_TIMESHARE)
1659		return;
1660	/*
1661	 * We used a tick charge it to the thread so that we can compute our
1662	 * interactivity.
1663	 */
1664	td->td_sched->skg_runtime += tickincr;
1665	sched_interact_update(td);
1666
1667	/*
1668	 * We used up one time slice.
1669	 */
1670	if (--ts->ts_slice > 0)
1671		return;
1672	/*
1673	 * We're out of time, recompute priorities and requeue.
1674	 */
1675	tdq_load_rem(tdq, ts);
1676	sched_priority(td);
1677	sched_slice(ts);
1678	if (SCHED_CURR(td, ts))
1679		ts->ts_runq = tdq->tdq_curr;
1680	else
1681		ts->ts_runq = tdq->tdq_next;
1682	tdq_load_add(tdq, ts);
1683	td->td_flags |= TDF_NEEDRESCHED;
1684}
1685
1686int
1687sched_runnable(void)
1688{
1689	struct tdq *tdq;
1690	int load;
1691
1692	load = 1;
1693
1694	tdq = TDQ_SELF();
1695#ifdef SMP
1696	if (tdq->tdq_assigned) {
1697		mtx_lock_spin(&sched_lock);
1698		tdq_assign(tdq);
1699		mtx_unlock_spin(&sched_lock);
1700	}
1701#endif
1702	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1703		if (tdq->tdq_load > 0)
1704			goto out;
1705	} else
1706		if (tdq->tdq_load - 1 > 0)
1707			goto out;
1708	load = 0;
1709out:
1710	return (load);
1711}
1712
1713struct td_sched *
1714sched_choose(void)
1715{
1716	struct tdq *tdq;
1717	struct td_sched *ts;
1718
1719	mtx_assert(&sched_lock, MA_OWNED);
1720	tdq = TDQ_SELF();
1721#ifdef SMP
1722restart:
1723	if (tdq->tdq_assigned)
1724		tdq_assign(tdq);
1725#endif
1726	ts = tdq_choose(tdq);
1727	if (ts) {
1728#ifdef SMP
1729		if (ts->ts_thread->td_pri_class == PRI_IDLE)
1730			if (tdq_idled(tdq) == 0)
1731				goto restart;
1732#endif
1733		tdq_runq_rem(tdq, ts);
1734		ts->ts_state = TSS_THREAD;
1735		ts->ts_flags &= ~TSF_PREEMPTED;
1736		return (ts);
1737	}
1738#ifdef SMP
1739	if (tdq_idled(tdq) == 0)
1740		goto restart;
1741#endif
1742	return (NULL);
1743}
1744
1745void
1746sched_add(struct thread *td, int flags)
1747{
1748	struct tdq *tdq;
1749	struct td_sched *ts;
1750	int preemptive;
1751	int canmigrate;
1752	int class;
1753
1754	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1755	    td, td->td_proc->p_comm, td->td_priority, curthread,
1756	    curthread->td_proc->p_comm);
1757	mtx_assert(&sched_lock, MA_OWNED);
1758	ts = td->td_sched;
1759	canmigrate = 1;
1760	preemptive = !(flags & SRQ_YIELDING);
1761	class = PRI_BASE(td->td_pri_class);
1762	tdq = TDQ_SELF();
1763	ts->ts_flags &= ~TSF_INTERNAL;
1764#ifdef SMP
1765	if (ts->ts_flags & TSF_ASSIGNED) {
1766		if (ts->ts_flags & TSF_REMOVED)
1767			ts->ts_flags &= ~TSF_REMOVED;
1768		return;
1769	}
1770	canmigrate = THREAD_CAN_MIGRATE(ts);
1771	/*
1772	 * Don't migrate running threads here.  Force the long term balancer
1773	 * to do it.
1774	 */
1775	if (ts->ts_flags & TSF_HOLD) {
1776		ts->ts_flags &= ~TSF_HOLD;
1777		canmigrate = 0;
1778	}
1779#endif
1780	KASSERT(ts->ts_state != TSS_ONRUNQ,
1781	    ("sched_add: thread %p (%s) already in run queue", td,
1782	    td->td_proc->p_comm));
1783	KASSERT(td->td_proc->p_sflag & PS_INMEM,
1784	    ("sched_add: process swapped out"));
1785	KASSERT(ts->ts_runq == NULL,
1786	    ("sched_add: thread %p is still assigned to a run queue", td));
1787	if (flags & SRQ_PREEMPTED)
1788		ts->ts_flags |= TSF_PREEMPTED;
1789	switch (class) {
1790	case PRI_ITHD:
1791	case PRI_REALTIME:
1792		ts->ts_runq = tdq->tdq_curr;
1793		ts->ts_slice = SCHED_SLICE_MAX;
1794		if (canmigrate)
1795			ts->ts_cpu = PCPU_GET(cpuid);
1796		break;
1797	case PRI_TIMESHARE:
1798		if (SCHED_CURR(td, ts))
1799			ts->ts_runq = tdq->tdq_curr;
1800		else
1801			ts->ts_runq = tdq->tdq_next;
1802		break;
1803	case PRI_IDLE:
1804		/*
1805		 * This is for priority prop.
1806		 */
1807		if (ts->ts_thread->td_priority < PRI_MIN_IDLE)
1808			ts->ts_runq = tdq->tdq_curr;
1809		else
1810			ts->ts_runq = &tdq->tdq_idle;
1811		ts->ts_slice = SCHED_SLICE_MIN;
1812		break;
1813	default:
1814		panic("Unknown pri class.");
1815		break;
1816	}
1817#ifdef SMP
1818	/*
1819	 * If this thread is pinned or bound, notify the target cpu.
1820	 */
1821	if (!canmigrate && ts->ts_cpu != PCPU_GET(cpuid) ) {
1822		ts->ts_runq = NULL;
1823		tdq_notify(ts, ts->ts_cpu);
1824		return;
1825	}
1826	/*
1827	 * If we had been idle, clear our bit in the group and potentially
1828	 * the global bitmap.  If not, see if we should transfer this thread.
1829	 */
1830	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1831	    (tdq->tdq_group->tdg_idlemask & PCPU_GET(cpumask)) != 0) {
1832		/*
1833		 * Check to see if our group is unidling, and if so, remove it
1834		 * from the global idle mask.
1835		 */
1836		if (tdq->tdq_group->tdg_idlemask ==
1837		    tdq->tdq_group->tdg_cpumask)
1838			atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
1839		/*
1840		 * Now remove ourselves from the group specific idle mask.
1841		 */
1842		tdq->tdq_group->tdg_idlemask &= ~PCPU_GET(cpumask);
1843	} else if (canmigrate && tdq->tdq_load > 1 && class != PRI_ITHD)
1844		if (tdq_transfer(tdq, ts, class))
1845			return;
1846	ts->ts_cpu = PCPU_GET(cpuid);
1847#endif
1848	if (td->td_priority < curthread->td_priority &&
1849	    ts->ts_runq == tdq->tdq_curr)
1850		curthread->td_flags |= TDF_NEEDRESCHED;
1851	if (preemptive && maybe_preempt(td))
1852		return;
1853	ts->ts_state = TSS_ONRUNQ;
1854
1855	tdq_runq_add(tdq, ts, flags);
1856	tdq_load_add(tdq, ts);
1857}
1858
1859void
1860sched_rem(struct thread *td)
1861{
1862	struct tdq *tdq;
1863	struct td_sched *ts;
1864
1865	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1866	    td, td->td_proc->p_comm, td->td_priority, curthread,
1867	    curthread->td_proc->p_comm);
1868	mtx_assert(&sched_lock, MA_OWNED);
1869	ts = td->td_sched;
1870	ts->ts_flags &= ~TSF_PREEMPTED;
1871	if (ts->ts_flags & TSF_ASSIGNED) {
1872		ts->ts_flags |= TSF_REMOVED;
1873		return;
1874	}
1875	KASSERT((ts->ts_state == TSS_ONRUNQ),
1876	    ("sched_rem: thread not on run queue"));
1877
1878	ts->ts_state = TSS_THREAD;
1879	tdq = TDQ_CPU(ts->ts_cpu);
1880	tdq_runq_rem(tdq, ts);
1881	tdq_load_rem(tdq, ts);
1882}
1883
1884fixpt_t
1885sched_pctcpu(struct thread *td)
1886{
1887	fixpt_t pctcpu;
1888	struct td_sched *ts;
1889
1890	pctcpu = 0;
1891	ts = td->td_sched;
1892	if (ts == NULL)
1893		return (0);
1894
1895	mtx_lock_spin(&sched_lock);
1896	if (ts->ts_ticks) {
1897		int rtick;
1898
1899		/*
1900		 * Don't update more frequently than twice a second.  Allowing
1901		 * this causes the cpu usage to decay away too quickly due to
1902		 * rounding errors.
1903		 */
1904		if (ts->ts_ftick + SCHED_CPU_TICKS < ts->ts_ltick ||
1905		    ts->ts_ltick < (ticks - (hz / 2)))
1906			sched_pctcpu_update(ts);
1907		/* How many rtick per second ? */
1908		rtick = min(ts->ts_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1909		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1910	}
1911
1912	td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick;
1913	mtx_unlock_spin(&sched_lock);
1914
1915	return (pctcpu);
1916}
1917
1918void
1919sched_bind(struct thread *td, int cpu)
1920{
1921	struct td_sched *ts;
1922
1923	mtx_assert(&sched_lock, MA_OWNED);
1924	ts = td->td_sched;
1925	ts->ts_flags |= TSF_BOUND;
1926#ifdef SMP
1927	if (PCPU_GET(cpuid) == cpu)
1928		return;
1929	/* sched_rem without the runq_remove */
1930	ts->ts_state = TSS_THREAD;
1931	tdq_load_rem(TDQ_CPU(ts->ts_cpu), ts);
1932	tdq_notify(ts, cpu);
1933	/* When we return from mi_switch we'll be on the correct cpu. */
1934	mi_switch(SW_VOL, NULL);
1935#endif
1936}
1937
1938void
1939sched_unbind(struct thread *td)
1940{
1941	mtx_assert(&sched_lock, MA_OWNED);
1942	td->td_sched->ts_flags &= ~TSF_BOUND;
1943}
1944
1945int
1946sched_is_bound(struct thread *td)
1947{
1948	mtx_assert(&sched_lock, MA_OWNED);
1949	return (td->td_sched->ts_flags & TSF_BOUND);
1950}
1951
1952void
1953sched_relinquish(struct thread *td)
1954{
1955	mtx_lock_spin(&sched_lock);
1956	if (td->td_pri_class == PRI_TIMESHARE)
1957		sched_prio(td, PRI_MAX_TIMESHARE);
1958	mi_switch(SW_VOL, NULL);
1959	mtx_unlock_spin(&sched_lock);
1960}
1961
1962int
1963sched_load(void)
1964{
1965#ifdef SMP
1966	int total;
1967	int i;
1968
1969	total = 0;
1970	for (i = 0; i <= tdg_maxid; i++)
1971		total += TDQ_GROUP(i)->tdg_load;
1972	return (total);
1973#else
1974	return (TDQ_SELF()->tdq_sysload);
1975#endif
1976}
1977
1978int
1979sched_sizeof_proc(void)
1980{
1981	return (sizeof(struct proc));
1982}
1983
1984int
1985sched_sizeof_thread(void)
1986{
1987	return (sizeof(struct thread) + sizeof(struct td_sched));
1988}
1989
1990void
1991sched_tick(void)
1992{
1993}
1994#define KERN_SWITCH_INCLUDE 1
1995#include "kern/kern_switch.c"
1996