sched_ule.c revision 165819
1/*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 165819 2007-01-05 23:45:38Z jeff $");
29
30#include "opt_hwpmc_hooks.h"
31#include "opt_sched.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kdb.h>
36#include <sys/kernel.h>
37#include <sys/ktr.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/proc.h>
41#include <sys/resource.h>
42#include <sys/resourcevar.h>
43#include <sys/sched.h>
44#include <sys/smp.h>
45#include <sys/sx.h>
46#include <sys/sysctl.h>
47#include <sys/sysproto.h>
48#include <sys/turnstile.h>
49#include <sys/umtx.h>
50#include <sys/vmmeter.h>
51#ifdef KTRACE
52#include <sys/uio.h>
53#include <sys/ktrace.h>
54#endif
55
56#ifdef HWPMC_HOOKS
57#include <sys/pmckern.h>
58#endif
59
60#include <machine/cpu.h>
61#include <machine/smp.h>
62
63/*
64 * Thread scheduler specific section.
65 */
66struct td_sched {
67	TAILQ_ENTRY(td_sched) ts_procq;	/* (j/z) Run queue. */
68	int		ts_flags;	/* (j) TSF_* flags. */
69	struct thread	*ts_thread;	/* (*) Active associated thread. */
70	fixpt_t		ts_pctcpu;	/* (j) %cpu during p_swtime. */
71	u_char		ts_rqindex;	/* (j) Run queue index. */
72	enum {
73		TSS_THREAD,
74		TSS_ONRUNQ
75	} ts_state;			/* (j) thread sched specific status. */
76	int		ts_slptime;
77	int		ts_slice;
78	struct runq	*ts_runq;
79	u_char		ts_cpu;		/* CPU that we have affinity for. */
80	/* The following variables are only used for pctcpu calculation */
81	int		ts_ltick;	/* Last tick that we were running on */
82	int		ts_ftick;	/* First tick that we were running on */
83	int		ts_ticks;	/* Tick count */
84
85	/* originally from kg_sched */
86	int	skg_slptime;		/* Number of ticks we vol. slept */
87	int	skg_runtime;		/* Number of ticks we were running */
88};
89#define	ts_assign		ts_procq.tqe_next
90/* flags kept in ts_flags */
91#define	TSF_ASSIGNED	0x0001		/* Thread is being migrated. */
92#define	TSF_BOUND	0x0002		/* Thread can not migrate. */
93#define	TSF_XFERABLE	0x0004		/* Thread was added as transferable. */
94#define	TSF_HOLD	0x0008		/* Thread is temporarily bound. */
95#define	TSF_REMOVED	0x0010		/* Thread was removed while ASSIGNED */
96#define	TSF_INTERNAL	0x0020		/* Thread added due to migration. */
97#define	TSF_DIDRUN	0x2000		/* Thread actually ran. */
98#define	TSF_EXIT	0x4000		/* Thread is being killed. */
99
100static struct td_sched td_sched0;
101
102/*
103 * Cpu percentage computation macros and defines.
104 *
105 * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
106 * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
107 * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
108 * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
109 * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
110 * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
111 */
112#define	SCHED_TICK_SECS		10
113#define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
114#define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
115#define	SCHED_TICK_SHIFT	10
116#define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
117#define	SCHED_TICK_TOTAL(ts)	((ts)->ts_ltick - (ts)->ts_ftick)
118
119/*
120 * These macros determine priorities for non-interactive threads.  They are
121 * assigned a priority based on their recent cpu utilization as expressed
122 * by the ratio of ticks to the tick total.  NHALF priorities at the start
123 * and end of the MIN to MAX timeshare range are only reachable with negative
124 * or positive nice respectively.
125 *
126 * PRI_RANGE:	Priority range for utilization dependent priorities.
127 * PRI_NRESV:	Number of nice values.
128 * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
129 * PRI_NICE:	Determines the part of the priority inherited from nice.
130 */
131#define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
132#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
133#define	SCHED_PRI_MIN		(PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
134#define	SCHED_PRI_MAX		(PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
135#define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN + 1)
136#define	SCHED_PRI_TICKS(ts)						\
137    (SCHED_TICK_HZ((ts)) /						\
138    (max(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
139#define	SCHED_PRI_NICE(nice)	(nice)
140
141/*
142 * These determine the interactivity of a process.  Interactivity differs from
143 * cpu utilization in that it expresses the voluntary time slept vs time ran
144 * while cpu utilization includes all time not running.  This more accurately
145 * models the intent of the thread.
146 *
147 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
148 *		before throttling back.
149 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
150 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
151 * INTERACT_THRESH:	Threshhold for placement on the current runq.
152 */
153#define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
154#define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
155#define	SCHED_INTERACT_MAX	(100)
156#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
157#define	SCHED_INTERACT_THRESH	(30)
158
159/*
160 * tickincr:		Converts a stathz tick into a hz domain scaled by
161 *			the shift factor.  Without the shift the error rate
162 *			due to rounding would be unacceptably high.
163 * realstathz:		stathz is sometimes 0 and run off of hz.
164 * sched_slice:		Runtime of each thread before rescheduling.
165 */
166static int sched_interact = SCHED_INTERACT_THRESH;
167static int realstathz;
168static int tickincr;
169static int sched_slice;
170static int sched_rebalance;
171
172/*
173 * tdq - per processor runqs and statistics.
174 */
175struct tdq {
176	struct runq	tdq_idle;		/* Queue of IDLE threads. */
177	struct runq	tdq_timeshare;		/* timeshare run queue. */
178	struct runq	tdq_realtime;		/* real-time run queue. */
179	int		tdq_idx;		/* Current insert index. */
180	int		tdq_ridx;		/* Current removal index. */
181	int		tdq_load_timeshare;	/* Load for timeshare. */
182	int		tdq_load;		/* Aggregate load. */
183#ifdef SMP
184	int		tdq_transferable;
185	LIST_ENTRY(tdq)	tdq_siblings;		/* Next in tdq group. */
186	struct tdq_group *tdq_group;		/* Our processor group. */
187	volatile struct td_sched *tdq_assigned;	/* assigned by another CPU. */
188#else
189	int		tdq_sysload;		/* For loadavg, !ITHD load. */
190#endif
191};
192
193#ifdef SMP
194/*
195 * tdq groups are groups of processors which can cheaply share threads.  When
196 * one processor in the group goes idle it will check the runqs of the other
197 * processors in its group prior to halting and waiting for an interrupt.
198 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
199 * In a numa environment we'd want an idle bitmap per group and a two tiered
200 * load balancer.
201 */
202struct tdq_group {
203	int	tdg_cpus;		/* Count of CPUs in this tdq group. */
204	cpumask_t tdg_cpumask;		/* Mask of cpus in this group. */
205	cpumask_t tdg_idlemask;		/* Idle cpus in this group. */
206	cpumask_t tdg_mask;		/* Bit mask for first cpu. */
207	int	tdg_load;		/* Total load of this group. */
208	int	tdg_transferable;	/* Transferable load of this group. */
209	LIST_HEAD(, tdq) tdg_members;	/* Linked list of all members. */
210};
211#endif
212
213/*
214 * One thread queue per processor.
215 */
216#ifdef SMP
217static cpumask_t tdq_idle;
218static int tdg_maxid;
219static struct tdq	tdq_cpu[MAXCPU];
220static struct tdq_group tdq_groups[MAXCPU];
221static int bal_tick;
222static int gbal_tick;
223static int balance_groups;
224
225#define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
226#define	TDQ_CPU(x)	(&tdq_cpu[(x)])
227#define	TDQ_ID(x)	((x) - tdq_cpu)
228#define	TDQ_GROUP(x)	(&tdq_groups[(x)])
229#else	/* !SMP */
230static struct tdq	tdq_cpu;
231
232#define	TDQ_SELF()	(&tdq_cpu)
233#define	TDQ_CPU(x)	(&tdq_cpu)
234#endif
235
236static struct td_sched *sched_choose(void);	/* XXX Should be thread * */
237static void sched_priority(struct thread *);
238static void sched_thread_priority(struct thread *, u_char);
239static int sched_interact_score(struct thread *);
240static void sched_interact_update(struct thread *);
241static void sched_interact_fork(struct thread *);
242static void sched_pctcpu_update(struct td_sched *);
243
244/* Operations on per processor queues */
245static struct td_sched * tdq_choose(struct tdq *);
246static void tdq_setup(struct tdq *);
247static void tdq_load_add(struct tdq *, struct td_sched *);
248static void tdq_load_rem(struct tdq *, struct td_sched *);
249static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
250static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
251void tdq_print(int cpu);
252static void runq_print(struct runq *rq);
253#ifdef SMP
254static int tdq_transfer(struct tdq *, struct td_sched *, int);
255static struct td_sched *runq_steal(struct runq *);
256static void sched_balance(void);
257static void sched_balance_groups(void);
258static void sched_balance_group(struct tdq_group *);
259static void sched_balance_pair(struct tdq *, struct tdq *);
260static void sched_smp_tick(void);
261static void tdq_move(struct tdq *, int);
262static int tdq_idled(struct tdq *);
263static void tdq_notify(struct td_sched *, int);
264static void tdq_assign(struct tdq *);
265static struct td_sched *tdq_steal(struct tdq *, int);
266#define	THREAD_CAN_MIGRATE(td)						\
267    ((td)->td_pinned == 0 && (td)->td_pri_class != PRI_ITHD)
268#endif
269
270static void sched_setup(void *dummy);
271SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
272
273static void sched_initticks(void *dummy);
274SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
275
276static void
277runq_print(struct runq *rq)
278{
279	struct rqhead *rqh;
280	struct td_sched *ts;
281	int pri;
282	int j;
283	int i;
284
285	for (i = 0; i < RQB_LEN; i++) {
286		printf("\t\trunq bits %d 0x%zx\n",
287		    i, rq->rq_status.rqb_bits[i]);
288		for (j = 0; j < RQB_BPW; j++)
289			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
290				pri = j + (i << RQB_L2BPW);
291				rqh = &rq->rq_queues[pri];
292				TAILQ_FOREACH(ts, rqh, ts_procq) {
293					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
294					    ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
295				}
296			}
297	}
298}
299
300void
301tdq_print(int cpu)
302{
303	struct tdq *tdq;
304
305	tdq = TDQ_CPU(cpu);
306
307	printf("tdq:\n");
308	printf("\tload:           %d\n", tdq->tdq_load);
309	printf("\tload TIMESHARE: %d\n", tdq->tdq_load_timeshare);
310	printf("\ttimeshare idx: %d\n", tdq->tdq_idx);
311	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
312	printf("\trealtime runq:\n");
313	runq_print(&tdq->tdq_realtime);
314	printf("\ttimeshare runq:\n");
315	runq_print(&tdq->tdq_timeshare);
316	printf("\tidle runq:\n");
317	runq_print(&tdq->tdq_idle);
318#ifdef SMP
319	printf("\tload transferable: %d\n", tdq->tdq_transferable);
320#endif
321}
322
323static __inline void
324tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
325{
326#ifdef SMP
327	if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
328		tdq->tdq_transferable++;
329		tdq->tdq_group->tdg_transferable++;
330		ts->ts_flags |= TSF_XFERABLE;
331	}
332#endif
333	if (ts->ts_runq == &tdq->tdq_timeshare) {
334		int pri;
335
336		pri = ts->ts_thread->td_priority;
337		KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
338			("Invalid priority %d on timeshare runq", pri));
339		/*
340		 * This queue contains only priorities between MIN and MAX
341		 * realtime.  Use the whole queue to represent these values.
342		 */
343#define	TS_RQ_PPQ	(((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
344		if ((flags & SRQ_BORROWING) == 0) {
345			pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
346			pri = (pri + tdq->tdq_idx) % RQ_NQS;
347			/*
348			 * This effectively shortens the queue by one so we
349			 * can have a one slot difference between idx and
350			 * ridx while we wait for threads to drain.
351			 */
352			if (tdq->tdq_ridx != tdq->tdq_idx &&
353			    pri == tdq->tdq_ridx)
354				pri = (pri - 1) % RQ_NQS;
355		} else
356			pri = tdq->tdq_ridx;
357		runq_add_pri(ts->ts_runq, ts, pri, flags);
358	} else
359		runq_add(ts->ts_runq, ts, flags);
360}
361
362static __inline void
363tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
364{
365#ifdef SMP
366	if (ts->ts_flags & TSF_XFERABLE) {
367		tdq->tdq_transferable--;
368		tdq->tdq_group->tdg_transferable--;
369		ts->ts_flags &= ~TSF_XFERABLE;
370	}
371#endif
372	if (ts->ts_runq == &tdq->tdq_timeshare) {
373		if (tdq->tdq_idx != tdq->tdq_ridx)
374			runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
375		else
376			runq_remove_idx(ts->ts_runq, ts, NULL);
377		/*
378		 * For timeshare threads we update the priority here so
379		 * the priority reflects the time we've been sleeping.
380		 */
381		ts->ts_ltick = ticks;
382		sched_pctcpu_update(ts);
383		sched_priority(ts->ts_thread);
384	} else
385		runq_remove(ts->ts_runq, ts);
386}
387
388static void
389tdq_load_add(struct tdq *tdq, struct td_sched *ts)
390{
391	int class;
392	mtx_assert(&sched_lock, MA_OWNED);
393	class = PRI_BASE(ts->ts_thread->td_pri_class);
394	if (class == PRI_TIMESHARE)
395		tdq->tdq_load_timeshare++;
396	tdq->tdq_load++;
397	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
398	if (class != PRI_ITHD && (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
399#ifdef SMP
400		tdq->tdq_group->tdg_load++;
401#else
402		tdq->tdq_sysload++;
403#endif
404}
405
406static void
407tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
408{
409	int class;
410	mtx_assert(&sched_lock, MA_OWNED);
411	class = PRI_BASE(ts->ts_thread->td_pri_class);
412	if (class == PRI_TIMESHARE)
413		tdq->tdq_load_timeshare--;
414	if (class != PRI_ITHD  && (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
415#ifdef SMP
416		tdq->tdq_group->tdg_load--;
417#else
418		tdq->tdq_sysload--;
419#endif
420	tdq->tdq_load--;
421	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
422	ts->ts_runq = NULL;
423}
424
425#ifdef SMP
426static void
427sched_smp_tick(void)
428{
429	struct tdq *tdq;
430
431	tdq = TDQ_SELF();
432	if (sched_rebalance) {
433		if (ticks >= bal_tick)
434			sched_balance();
435		if (ticks >= gbal_tick && balance_groups)
436			sched_balance_groups();
437	}
438	/*
439	 * We could have been assigned a non real-time thread without an
440	 * IPI.
441	 */
442	if (tdq->tdq_assigned)
443		tdq_assign(tdq);	/* Potentially sets NEEDRESCHED */
444}
445
446/*
447 * sched_balance is a simple CPU load balancing algorithm.  It operates by
448 * finding the least loaded and most loaded cpu and equalizing their load
449 * by migrating some processes.
450 *
451 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
452 * installations will only have 2 cpus.  Secondly, load balancing too much at
453 * once can have an unpleasant effect on the system.  The scheduler rarely has
454 * enough information to make perfect decisions.  So this algorithm chooses
455 * algorithm simplicity and more gradual effects on load in larger systems.
456 *
457 * It could be improved by considering the priorities and slices assigned to
458 * each task prior to balancing them.  There are many pathological cases with
459 * any approach and so the semi random algorithm below may work as well as any.
460 *
461 */
462static void
463sched_balance(void)
464{
465	struct tdq_group *high;
466	struct tdq_group *low;
467	struct tdq_group *tdg;
468	int cnt;
469	int i;
470
471	bal_tick = ticks + (random() % (hz * 2));
472	if (smp_started == 0)
473		return;
474	low = high = NULL;
475	i = random() % (tdg_maxid + 1);
476	for (cnt = 0; cnt <= tdg_maxid; cnt++) {
477		tdg = TDQ_GROUP(i);
478		/*
479		 * Find the CPU with the highest load that has some
480		 * threads to transfer.
481		 */
482		if ((high == NULL || tdg->tdg_load > high->tdg_load)
483		    && tdg->tdg_transferable)
484			high = tdg;
485		if (low == NULL || tdg->tdg_load < low->tdg_load)
486			low = tdg;
487		if (++i > tdg_maxid)
488			i = 0;
489	}
490	if (low != NULL && high != NULL && high != low)
491		sched_balance_pair(LIST_FIRST(&high->tdg_members),
492		    LIST_FIRST(&low->tdg_members));
493}
494
495static void
496sched_balance_groups(void)
497{
498	int i;
499
500	gbal_tick = ticks + (random() % (hz * 2));
501	mtx_assert(&sched_lock, MA_OWNED);
502	if (smp_started)
503		for (i = 0; i <= tdg_maxid; i++)
504			sched_balance_group(TDQ_GROUP(i));
505}
506
507static void
508sched_balance_group(struct tdq_group *tdg)
509{
510	struct tdq *tdq;
511	struct tdq *high;
512	struct tdq *low;
513	int load;
514
515	if (tdg->tdg_transferable == 0)
516		return;
517	low = NULL;
518	high = NULL;
519	LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
520		load = tdq->tdq_load;
521		if (high == NULL || load > high->tdq_load)
522			high = tdq;
523		if (low == NULL || load < low->tdq_load)
524			low = tdq;
525	}
526	if (high != NULL && low != NULL && high != low)
527		sched_balance_pair(high, low);
528}
529
530static void
531sched_balance_pair(struct tdq *high, struct tdq *low)
532{
533	int transferable;
534	int high_load;
535	int low_load;
536	int move;
537	int diff;
538	int i;
539
540	/*
541	 * If we're transfering within a group we have to use this specific
542	 * tdq's transferable count, otherwise we can steal from other members
543	 * of the group.
544	 */
545	if (high->tdq_group == low->tdq_group) {
546		transferable = high->tdq_transferable;
547		high_load = high->tdq_load;
548		low_load = low->tdq_load;
549	} else {
550		transferable = high->tdq_group->tdg_transferable;
551		high_load = high->tdq_group->tdg_load;
552		low_load = low->tdq_group->tdg_load;
553	}
554	if (transferable == 0)
555		return;
556	/*
557	 * Determine what the imbalance is and then adjust that to how many
558	 * threads we actually have to give up (transferable).
559	 */
560	diff = high_load - low_load;
561	move = diff / 2;
562	if (diff & 0x1)
563		move++;
564	move = min(move, transferable);
565	for (i = 0; i < move; i++)
566		tdq_move(high, TDQ_ID(low));
567	return;
568}
569
570static void
571tdq_move(struct tdq *from, int cpu)
572{
573	struct tdq *tdq;
574	struct tdq *to;
575	struct td_sched *ts;
576
577	tdq = from;
578	to = TDQ_CPU(cpu);
579	ts = tdq_steal(tdq, 1);
580	if (ts == NULL) {
581		struct tdq_group *tdg;
582
583		tdg = tdq->tdq_group;
584		LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
585			if (tdq == from || tdq->tdq_transferable == 0)
586				continue;
587			ts = tdq_steal(tdq, 1);
588			break;
589		}
590		if (ts == NULL)
591			panic("tdq_move: No threads available with a "
592			    "transferable count of %d\n",
593			    tdg->tdg_transferable);
594	}
595	if (tdq == to)
596		return;
597	ts->ts_state = TSS_THREAD;
598	tdq_runq_rem(tdq, ts);
599	tdq_load_rem(tdq, ts);
600	tdq_notify(ts, cpu);
601}
602
603static int
604tdq_idled(struct tdq *tdq)
605{
606	struct tdq_group *tdg;
607	struct tdq *steal;
608	struct td_sched *ts;
609
610	tdg = tdq->tdq_group;
611	/*
612	 * If we're in a cpu group, try and steal threads from another cpu in
613	 * the group before idling.
614	 */
615	if (tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
616		LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
617			if (steal == tdq || steal->tdq_transferable == 0)
618				continue;
619			ts = tdq_steal(steal, 0);
620			if (ts == NULL)
621				continue;
622			ts->ts_state = TSS_THREAD;
623			tdq_runq_rem(steal, ts);
624			tdq_load_rem(steal, ts);
625			ts->ts_cpu = PCPU_GET(cpuid);
626			ts->ts_flags |= TSF_INTERNAL | TSF_HOLD;
627			sched_add(ts->ts_thread, SRQ_YIELDING);
628			return (0);
629		}
630	}
631	/*
632	 * We only set the idled bit when all of the cpus in the group are
633	 * idle.  Otherwise we could get into a situation where a thread bounces
634	 * back and forth between two idle cores on seperate physical CPUs.
635	 */
636	tdg->tdg_idlemask |= PCPU_GET(cpumask);
637	if (tdg->tdg_idlemask != tdg->tdg_cpumask)
638		return (1);
639	atomic_set_int(&tdq_idle, tdg->tdg_mask);
640	return (1);
641}
642
643static void
644tdq_assign(struct tdq *tdq)
645{
646	struct td_sched *nts;
647	struct td_sched *ts;
648
649	do {
650		*(volatile struct td_sched **)&ts = tdq->tdq_assigned;
651	} while(!atomic_cmpset_ptr((volatile uintptr_t *)&tdq->tdq_assigned,
652		(uintptr_t)ts, (uintptr_t)NULL));
653	for (; ts != NULL; ts = nts) {
654		nts = ts->ts_assign;
655		tdq->tdq_group->tdg_load--;
656		tdq->tdq_load--;
657		ts->ts_flags &= ~TSF_ASSIGNED;
658		if (ts->ts_flags & TSF_REMOVED) {
659			ts->ts_flags &= ~TSF_REMOVED;
660			continue;
661		}
662		ts->ts_flags |= TSF_INTERNAL | TSF_HOLD;
663		sched_add(ts->ts_thread, SRQ_YIELDING);
664	}
665}
666
667static void
668tdq_notify(struct td_sched *ts, int cpu)
669{
670	struct tdq *tdq;
671	struct thread *td;
672	struct pcpu *pcpu;
673	int class;
674	int prio;
675
676	tdq = TDQ_CPU(cpu);
677	class = PRI_BASE(ts->ts_thread->td_pri_class);
678	if ((class != PRI_IDLE && class != PRI_ITHD)
679	    && (tdq_idle & tdq->tdq_group->tdg_mask))
680		atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
681	tdq->tdq_group->tdg_load++;
682	tdq->tdq_load++;
683	ts->ts_cpu = cpu;
684	ts->ts_flags |= TSF_ASSIGNED;
685	prio = ts->ts_thread->td_priority;
686
687	/*
688	 * Place a thread on another cpu's queue and force a resched.
689	 */
690	do {
691		*(volatile struct td_sched **)&ts->ts_assign = tdq->tdq_assigned;
692	} while(!atomic_cmpset_ptr((volatile uintptr_t *)&tdq->tdq_assigned,
693		(uintptr_t)ts->ts_assign, (uintptr_t)ts));
694	/* Only ipi for realtime/ithd priorities */
695	if (ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE)
696		return;
697	/*
698	 * Without sched_lock we could lose a race where we set NEEDRESCHED
699	 * on a thread that is switched out before the IPI is delivered.  This
700	 * would lead us to miss the resched.  This will be a problem once
701	 * sched_lock is pushed down.
702	 */
703	pcpu = pcpu_find(cpu);
704	td = pcpu->pc_curthread;
705	if (ts->ts_thread->td_priority < td->td_priority) {
706		td->td_flags |= TDF_NEEDRESCHED;
707		ipi_selected(1 << cpu, IPI_AST);
708	}
709}
710
711static struct td_sched *
712runq_steal(struct runq *rq)
713{
714	struct rqhead *rqh;
715	struct rqbits *rqb;
716	struct td_sched *ts;
717	int word;
718	int bit;
719
720	mtx_assert(&sched_lock, MA_OWNED);
721	rqb = &rq->rq_status;
722	for (word = 0; word < RQB_LEN; word++) {
723		if (rqb->rqb_bits[word] == 0)
724			continue;
725		for (bit = 0; bit < RQB_BPW; bit++) {
726			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
727				continue;
728			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
729			TAILQ_FOREACH(ts, rqh, ts_procq) {
730				if (THREAD_CAN_MIGRATE(ts->ts_thread))
731					return (ts);
732			}
733		}
734	}
735	return (NULL);
736}
737
738static struct td_sched *
739tdq_steal(struct tdq *tdq, int stealidle)
740{
741	struct td_sched *ts;
742
743	/*
744	 * Steal from next first to try to get a non-interactive task that
745	 * may not have run for a while.
746	 * XXX Need to effect steal order for timeshare threads.
747	 */
748	if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
749		return (ts);
750	if ((ts = runq_steal(&tdq->tdq_timeshare)) != NULL)
751		return (ts);
752	if (stealidle)
753		return (runq_steal(&tdq->tdq_idle));
754	return (NULL);
755}
756
757int
758tdq_transfer(struct tdq *tdq, struct td_sched *ts, int class)
759{
760	struct tdq_group *ntdg;
761	struct tdq_group *tdg;
762	struct tdq *old;
763	int cpu;
764	int idx;
765
766	if (smp_started == 0)
767		return (0);
768	cpu = 0;
769	/*
770	 * If our load exceeds a certain threshold we should attempt to
771	 * reassign this thread.  The first candidate is the cpu that
772	 * originally ran the thread.  If it is idle, assign it there,
773	 * otherwise, pick an idle cpu.
774	 *
775	 * The threshold at which we start to reassign has a large impact
776	 * on the overall performance of the system.  Tuned too high and
777	 * some CPUs may idle.  Too low and there will be excess migration
778	 * and context switches.
779	 */
780	old = TDQ_CPU(ts->ts_cpu);
781	ntdg = old->tdq_group;
782	tdg = tdq->tdq_group;
783	if (tdq_idle) {
784		if (tdq_idle & ntdg->tdg_mask) {
785			cpu = ffs(ntdg->tdg_idlemask);
786			if (cpu) {
787				CTR2(KTR_SCHED,
788				    "tdq_transfer: %p found old cpu %X "
789				    "in idlemask.", ts, cpu);
790				goto migrate;
791			}
792		}
793		/*
794		 * Multiple cpus could find this bit simultaneously
795		 * but the race shouldn't be terrible.
796		 */
797		cpu = ffs(tdq_idle);
798		if (cpu) {
799			CTR2(KTR_SCHED, "tdq_transfer: %p found %X "
800			    "in idlemask.", ts, cpu);
801			goto migrate;
802		}
803	}
804	idx = 0;
805#if 0
806	if (old->tdq_load < tdq->tdq_load) {
807		cpu = ts->ts_cpu + 1;
808		CTR2(KTR_SCHED, "tdq_transfer: %p old cpu %X "
809		    "load less than ours.", ts, cpu);
810		goto migrate;
811	}
812	/*
813	 * No new CPU was found, look for one with less load.
814	 */
815	for (idx = 0; idx <= tdg_maxid; idx++) {
816		ntdg = TDQ_GROUP(idx);
817		if (ntdg->tdg_load /*+ (ntdg->tdg_cpus  * 2)*/ < tdg->tdg_load) {
818			cpu = ffs(ntdg->tdg_cpumask);
819			CTR2(KTR_SCHED, "tdq_transfer: %p cpu %X load less "
820			    "than ours.", ts, cpu);
821			goto migrate;
822		}
823	}
824#endif
825	/*
826	 * If another cpu in this group has idled, assign a thread over
827	 * to them after checking to see if there are idled groups.
828	 */
829	if (tdg->tdg_idlemask) {
830		cpu = ffs(tdg->tdg_idlemask);
831		if (cpu) {
832			CTR2(KTR_SCHED, "tdq_transfer: %p cpu %X idle in "
833			    "group.", ts, cpu);
834			goto migrate;
835		}
836	}
837	return (0);
838migrate:
839	/*
840	 * Now that we've found an idle CPU, migrate the thread.
841	 */
842	cpu--;
843	ts->ts_runq = NULL;
844	tdq_notify(ts, cpu);
845
846	return (1);
847}
848
849#endif	/* SMP */
850
851/*
852 * Pick the highest priority task we have and return it.
853 */
854
855static struct td_sched *
856tdq_choose(struct tdq *tdq)
857{
858	struct td_sched *ts;
859
860	mtx_assert(&sched_lock, MA_OWNED);
861
862	ts = runq_choose(&tdq->tdq_realtime);
863	if (ts != NULL) {
864		KASSERT(ts->ts_thread->td_priority <= PRI_MAX_REALTIME,
865		    ("tdq_choose: Invalid priority on realtime queue %d",
866		    ts->ts_thread->td_priority));
867		return (ts);
868	}
869	ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
870	if (ts != NULL) {
871		KASSERT(ts->ts_thread->td_priority <= PRI_MAX_TIMESHARE &&
872		    ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
873		    ("tdq_choose: Invalid priority on timeshare queue %d",
874		    ts->ts_thread->td_priority));
875		return (ts);
876	}
877
878	ts = runq_choose(&tdq->tdq_idle);
879	if (ts != NULL) {
880		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
881		    ("tdq_choose: Invalid priority on idle queue %d",
882		    ts->ts_thread->td_priority));
883		return (ts);
884	}
885
886	return (NULL);
887}
888
889static void
890tdq_setup(struct tdq *tdq)
891{
892	runq_init(&tdq->tdq_realtime);
893	runq_init(&tdq->tdq_timeshare);
894	runq_init(&tdq->tdq_idle);
895	tdq->tdq_load = 0;
896	tdq->tdq_load_timeshare = 0;
897}
898
899static void
900sched_setup(void *dummy)
901{
902#ifdef SMP
903	int i;
904#endif
905
906	/*
907	 * To avoid divide-by-zero, we set realstathz a dummy value
908	 * in case which sched_clock() called before sched_initticks().
909	 */
910	realstathz = hz;
911	sched_slice = (realstathz/7);	/* 140ms */
912	tickincr = 1 << SCHED_TICK_SHIFT;
913
914#ifdef SMP
915	balance_groups = 0;
916	/*
917	 * Initialize the tdqs.
918	 */
919	for (i = 0; i < MAXCPU; i++) {
920		struct tdq *tdq;
921
922		tdq = &tdq_cpu[i];
923		tdq->tdq_assigned = NULL;
924		tdq_setup(&tdq_cpu[i]);
925	}
926	if (smp_topology == NULL) {
927		struct tdq_group *tdg;
928		struct tdq *tdq;
929		int cpus;
930
931		for (cpus = 0, i = 0; i < MAXCPU; i++) {
932			if (CPU_ABSENT(i))
933				continue;
934			tdq = &tdq_cpu[i];
935			tdg = &tdq_groups[cpus];
936			/*
937			 * Setup a tdq group with one member.
938			 */
939			tdq->tdq_transferable = 0;
940			tdq->tdq_group = tdg;
941			tdg->tdg_cpus = 1;
942			tdg->tdg_idlemask = 0;
943			tdg->tdg_cpumask = tdg->tdg_mask = 1 << i;
944			tdg->tdg_load = 0;
945			tdg->tdg_transferable = 0;
946			LIST_INIT(&tdg->tdg_members);
947			LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
948			cpus++;
949		}
950		tdg_maxid = cpus - 1;
951	} else {
952		struct tdq_group *tdg;
953		struct cpu_group *cg;
954		int j;
955
956		for (i = 0; i < smp_topology->ct_count; i++) {
957			cg = &smp_topology->ct_group[i];
958			tdg = &tdq_groups[i];
959			/*
960			 * Initialize the group.
961			 */
962			tdg->tdg_idlemask = 0;
963			tdg->tdg_load = 0;
964			tdg->tdg_transferable = 0;
965			tdg->tdg_cpus = cg->cg_count;
966			tdg->tdg_cpumask = cg->cg_mask;
967			LIST_INIT(&tdg->tdg_members);
968			/*
969			 * Find all of the group members and add them.
970			 */
971			for (j = 0; j < MAXCPU; j++) {
972				if ((cg->cg_mask & (1 << j)) != 0) {
973					if (tdg->tdg_mask == 0)
974						tdg->tdg_mask = 1 << j;
975					tdq_cpu[j].tdq_transferable = 0;
976					tdq_cpu[j].tdq_group = tdg;
977					LIST_INSERT_HEAD(&tdg->tdg_members,
978					    &tdq_cpu[j], tdq_siblings);
979				}
980			}
981			if (tdg->tdg_cpus > 1)
982				balance_groups = 1;
983		}
984		tdg_maxid = smp_topology->ct_count - 1;
985	}
986	/*
987	 * Stagger the group and global load balancer so they do not
988	 * interfere with each other.
989	 */
990	bal_tick = ticks + hz;
991	if (balance_groups)
992		gbal_tick = ticks + (hz / 2);
993#else
994	tdq_setup(TDQ_SELF());
995#endif
996	mtx_lock_spin(&sched_lock);
997	tdq_load_add(TDQ_SELF(), &td_sched0);
998	mtx_unlock_spin(&sched_lock);
999}
1000
1001/* ARGSUSED */
1002static void
1003sched_initticks(void *dummy)
1004{
1005	mtx_lock_spin(&sched_lock);
1006	realstathz = stathz ? stathz : hz;
1007	sched_slice = (realstathz/7);	/* ~140ms */
1008
1009	/*
1010	 * tickincr is shifted out by 10 to avoid rounding errors due to
1011	 * hz not being evenly divisible by stathz on all platforms.
1012	 */
1013	tickincr = (hz << SCHED_TICK_SHIFT) / realstathz;
1014	/*
1015	 * This does not work for values of stathz that are more than
1016	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1017	 */
1018	if (tickincr == 0)
1019		tickincr = 1;
1020	mtx_unlock_spin(&sched_lock);
1021}
1022
1023
1024/*
1025 * Scale the scheduling priority according to the "interactivity" of this
1026 * process.
1027 */
1028static void
1029sched_priority(struct thread *td)
1030{
1031	int score;
1032	int pri;
1033
1034	if (td->td_pri_class != PRI_TIMESHARE)
1035		return;
1036	/*
1037	 * If the score is interactive we place the thread in the realtime
1038	 * queue with a priority that is less than kernel and interrupt
1039	 * priorities.  These threads are not subject to nice restrictions.
1040	 *
1041	 * Scores greater than this are placed on the normal realtime queue
1042	 * where the priority is partially decided by the most recent cpu
1043	 * utilization and the rest is decided by nice value.
1044	 */
1045	score = sched_interact_score(td);
1046	if (score < sched_interact) {
1047		pri = PRI_MIN_REALTIME;
1048		pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1049		    * score;
1050		KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
1051		    ("sched_priority: invalid interactive priority %d", pri));
1052	} else {
1053		pri = SCHED_PRI_MIN;
1054		if (td->td_sched->ts_ticks)
1055			pri += SCHED_PRI_TICKS(td->td_sched);
1056		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1057		if (!(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE)) {
1058			static int once = 1;
1059			if (once) {
1060				printf("sched_priority: invalid priority %d",
1061				    pri);
1062				printf("nice %d, ticks %d ftick %d ltick %d tick pri %d\n",
1063				    td->td_proc->p_nice,
1064				    td->td_sched->ts_ticks,
1065				    td->td_sched->ts_ftick,
1066				    td->td_sched->ts_ltick,
1067				    SCHED_PRI_TICKS(td->td_sched));
1068				once = 0;
1069			}
1070			pri = min(max(pri, PRI_MIN_TIMESHARE),
1071			    PRI_MAX_TIMESHARE);
1072		}
1073	}
1074	sched_user_prio(td, pri);
1075
1076	return;
1077}
1078
1079/*
1080 * This routine enforces a maximum limit on the amount of scheduling history
1081 * kept.  It is called after either the slptime or runtime is adjusted.
1082 */
1083static void
1084sched_interact_update(struct thread *td)
1085{
1086	struct td_sched *ts;
1087	int sum;
1088
1089	ts = td->td_sched;
1090	sum = ts->skg_runtime + ts->skg_slptime;
1091	if (sum < SCHED_SLP_RUN_MAX)
1092		return;
1093	/*
1094	 * This only happens from two places:
1095	 * 1) We have added an unusual amount of run time from fork_exit.
1096	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1097	 */
1098	if (sum > SCHED_SLP_RUN_MAX * 2) {
1099		if (ts->skg_runtime > ts->skg_slptime) {
1100			ts->skg_runtime = SCHED_SLP_RUN_MAX;
1101			ts->skg_slptime = 1;
1102		} else {
1103			ts->skg_slptime = SCHED_SLP_RUN_MAX;
1104			ts->skg_runtime = 1;
1105		}
1106		return;
1107	}
1108	/*
1109	 * If we have exceeded by more than 1/5th then the algorithm below
1110	 * will not bring us back into range.  Dividing by two here forces
1111	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1112	 */
1113	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1114		ts->skg_runtime /= 2;
1115		ts->skg_slptime /= 2;
1116		return;
1117	}
1118	ts->skg_runtime = (ts->skg_runtime / 5) * 4;
1119	ts->skg_slptime = (ts->skg_slptime / 5) * 4;
1120}
1121
1122static void
1123sched_interact_fork(struct thread *td)
1124{
1125	int ratio;
1126	int sum;
1127
1128	sum = td->td_sched->skg_runtime + td->td_sched->skg_slptime;
1129	if (sum > SCHED_SLP_RUN_FORK) {
1130		ratio = sum / SCHED_SLP_RUN_FORK;
1131		td->td_sched->skg_runtime /= ratio;
1132		td->td_sched->skg_slptime /= ratio;
1133	}
1134}
1135
1136static int
1137sched_interact_score(struct thread *td)
1138{
1139	int div;
1140
1141	if (td->td_sched->skg_runtime > td->td_sched->skg_slptime) {
1142		div = max(1, td->td_sched->skg_runtime / SCHED_INTERACT_HALF);
1143		return (SCHED_INTERACT_HALF +
1144		    (SCHED_INTERACT_HALF - (td->td_sched->skg_slptime / div)));
1145	} if (td->td_sched->skg_slptime > td->td_sched->skg_runtime) {
1146		div = max(1, td->td_sched->skg_slptime / SCHED_INTERACT_HALF);
1147		return (td->td_sched->skg_runtime / div);
1148	}
1149
1150	/*
1151	 * This can happen if slptime and runtime are 0.
1152	 */
1153	return (0);
1154
1155}
1156
1157/*
1158 * Called from proc0_init() to bootstrap the scheduler.
1159 */
1160void
1161schedinit(void)
1162{
1163
1164	/*
1165	 * Set up the scheduler specific parts of proc0.
1166	 */
1167	proc0.p_sched = NULL; /* XXX */
1168	thread0.td_sched = &td_sched0;
1169	td_sched0.ts_ltick = ticks;
1170	td_sched0.ts_ftick = ticks;
1171	td_sched0.ts_thread = &thread0;
1172	td_sched0.ts_state = TSS_THREAD;
1173}
1174
1175/*
1176 * This is only somewhat accurate since given many processes of the same
1177 * priority they will switch when their slices run out, which will be
1178 * at most sched_slice stathz ticks.
1179 */
1180int
1181sched_rr_interval(void)
1182{
1183
1184	/* Convert sched_slice to hz */
1185	return (hz/(realstathz/sched_slice));
1186}
1187
1188static void
1189sched_pctcpu_update(struct td_sched *ts)
1190{
1191
1192	if (ts->ts_ticks == 0)
1193		return;
1194	if (ticks - (hz / 10) < ts->ts_ltick &&
1195	    SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
1196		return;
1197	/*
1198	 * Adjust counters and watermark for pctcpu calc.
1199	 */
1200	if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1201		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1202			    SCHED_TICK_TARG;
1203	else
1204		ts->ts_ticks = 0;
1205	ts->ts_ltick = ticks;
1206	ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
1207}
1208
1209static void
1210sched_thread_priority(struct thread *td, u_char prio)
1211{
1212	struct td_sched *ts;
1213
1214	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1215	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1216	    curthread->td_proc->p_comm);
1217	ts = td->td_sched;
1218	mtx_assert(&sched_lock, MA_OWNED);
1219	if (td->td_priority == prio)
1220		return;
1221
1222	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1223		/*
1224		 * If the priority has been elevated due to priority
1225		 * propagation, we may have to move ourselves to a new
1226		 * queue.  This could be optimized to not re-add in some
1227		 * cases.
1228		 *
1229		 * Hold this td_sched on this cpu so that sched_prio() doesn't
1230		 * cause excessive migration.  We only want migration to
1231		 * happen as the result of a wakeup.
1232		 */
1233		ts->ts_flags |= TSF_HOLD;
1234		sched_rem(td);
1235		td->td_priority = prio;
1236		sched_add(td, SRQ_BORROWING);
1237		ts->ts_flags &= ~TSF_HOLD;
1238	} else
1239		td->td_priority = prio;
1240}
1241
1242/*
1243 * Update a thread's priority when it is lent another thread's
1244 * priority.
1245 */
1246void
1247sched_lend_prio(struct thread *td, u_char prio)
1248{
1249
1250	td->td_flags |= TDF_BORROWING;
1251	sched_thread_priority(td, prio);
1252}
1253
1254/*
1255 * Restore a thread's priority when priority propagation is
1256 * over.  The prio argument is the minimum priority the thread
1257 * needs to have to satisfy other possible priority lending
1258 * requests.  If the thread's regular priority is less
1259 * important than prio, the thread will keep a priority boost
1260 * of prio.
1261 */
1262void
1263sched_unlend_prio(struct thread *td, u_char prio)
1264{
1265	u_char base_pri;
1266
1267	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1268	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1269		base_pri = td->td_user_pri;
1270	else
1271		base_pri = td->td_base_pri;
1272	if (prio >= base_pri) {
1273		td->td_flags &= ~TDF_BORROWING;
1274		sched_thread_priority(td, base_pri);
1275	} else
1276		sched_lend_prio(td, prio);
1277}
1278
1279void
1280sched_prio(struct thread *td, u_char prio)
1281{
1282	u_char oldprio;
1283
1284	/* First, update the base priority. */
1285	td->td_base_pri = prio;
1286
1287	/*
1288	 * If the thread is borrowing another thread's priority, don't
1289	 * ever lower the priority.
1290	 */
1291	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1292		return;
1293
1294	/* Change the real priority. */
1295	oldprio = td->td_priority;
1296	sched_thread_priority(td, prio);
1297
1298	/*
1299	 * If the thread is on a turnstile, then let the turnstile update
1300	 * its state.
1301	 */
1302	if (TD_ON_LOCK(td) && oldprio != prio)
1303		turnstile_adjust(td, oldprio);
1304}
1305
1306void
1307sched_user_prio(struct thread *td, u_char prio)
1308{
1309	u_char oldprio;
1310
1311	td->td_base_user_pri = prio;
1312	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1313                return;
1314	oldprio = td->td_user_pri;
1315	td->td_user_pri = prio;
1316
1317	if (TD_ON_UPILOCK(td) && oldprio != prio)
1318		umtx_pi_adjust(td, oldprio);
1319}
1320
1321void
1322sched_lend_user_prio(struct thread *td, u_char prio)
1323{
1324	u_char oldprio;
1325
1326	td->td_flags |= TDF_UBORROWING;
1327
1328	oldprio = td->td_user_pri;
1329	td->td_user_pri = prio;
1330
1331	if (TD_ON_UPILOCK(td) && oldprio != prio)
1332		umtx_pi_adjust(td, oldprio);
1333}
1334
1335void
1336sched_unlend_user_prio(struct thread *td, u_char prio)
1337{
1338	u_char base_pri;
1339
1340	base_pri = td->td_base_user_pri;
1341	if (prio >= base_pri) {
1342		td->td_flags &= ~TDF_UBORROWING;
1343		sched_user_prio(td, base_pri);
1344	} else
1345		sched_lend_user_prio(td, prio);
1346}
1347
1348void
1349sched_switch(struct thread *td, struct thread *newtd, int flags)
1350{
1351	struct tdq *tdq;
1352	struct td_sched *ts;
1353
1354	mtx_assert(&sched_lock, MA_OWNED);
1355
1356	tdq = TDQ_SELF();
1357	ts = td->td_sched;
1358	td->td_lastcpu = td->td_oncpu;
1359	td->td_oncpu = NOCPU;
1360	td->td_flags &= ~TDF_NEEDRESCHED;
1361	td->td_owepreempt = 0;
1362	/*
1363	 * If the thread has been assigned it may be in the process of switching
1364	 * to the new cpu.  This is the case in sched_bind().
1365	 */
1366	if (td == PCPU_GET(idlethread)) {
1367		TD_SET_CAN_RUN(td);
1368	} else if ((ts->ts_flags & TSF_ASSIGNED) == 0) {
1369		/* We are ending our run so make our slot available again */
1370		tdq_load_rem(tdq, ts);
1371		if (TD_IS_RUNNING(td)) {
1372			/*
1373			 * Don't allow the thread to migrate
1374			 * from a preemption.
1375			 */
1376			ts->ts_flags |= TSF_HOLD;
1377			setrunqueue(td, (flags & SW_PREEMPT) ?
1378			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1379			    SRQ_OURSELF|SRQ_YIELDING);
1380			ts->ts_flags &= ~TSF_HOLD;
1381		}
1382	}
1383	if (newtd != NULL) {
1384		/*
1385		 * If we bring in a thread account for it as if it had been
1386		 * added to the run queue and then chosen.
1387		 */
1388		newtd->td_sched->ts_flags |= TSF_DIDRUN;
1389		TD_SET_RUNNING(newtd);
1390		tdq_load_add(TDQ_SELF(), newtd->td_sched);
1391	} else
1392		newtd = choosethread();
1393	if (td != newtd) {
1394#ifdef	HWPMC_HOOKS
1395		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1396			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1397#endif
1398
1399		cpu_switch(td, newtd);
1400#ifdef	HWPMC_HOOKS
1401		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1402			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1403#endif
1404	}
1405	sched_lock.mtx_lock = (uintptr_t)td;
1406	td->td_oncpu = PCPU_GET(cpuid);
1407}
1408
1409void
1410sched_nice(struct proc *p, int nice)
1411{
1412	struct thread *td;
1413
1414	PROC_LOCK_ASSERT(p, MA_OWNED);
1415	mtx_assert(&sched_lock, MA_OWNED);
1416
1417	p->p_nice = nice;
1418	FOREACH_THREAD_IN_PROC(p, td) {
1419		sched_priority(td);
1420		sched_prio(td, td->td_base_user_pri);
1421	}
1422}
1423
1424void
1425sched_sleep(struct thread *td)
1426{
1427
1428	mtx_assert(&sched_lock, MA_OWNED);
1429
1430	td->td_sched->ts_slptime = ticks;
1431}
1432
1433void
1434sched_wakeup(struct thread *td)
1435{
1436	int slptime;
1437
1438	mtx_assert(&sched_lock, MA_OWNED);
1439
1440	/*
1441	 * If we slept for more than a tick update our interactivity and
1442	 * priority.
1443	 */
1444	slptime = td->td_sched->ts_slptime;
1445	td->td_sched->ts_slptime = 0;
1446	if (slptime && slptime != ticks) {
1447		int hzticks;
1448
1449		hzticks = (ticks - slptime) << SCHED_TICK_SHIFT;
1450		td->td_sched->skg_slptime += hzticks;
1451		sched_interact_update(td);
1452		sched_pctcpu_update(td->td_sched);
1453		sched_priority(td);
1454	}
1455	setrunqueue(td, SRQ_BORING);
1456}
1457
1458/*
1459 * Penalize the parent for creating a new child and initialize the child's
1460 * priority.
1461 */
1462void
1463sched_fork(struct thread *td, struct thread *child)
1464{
1465	mtx_assert(&sched_lock, MA_OWNED);
1466	sched_fork_thread(td, child);
1467	/*
1468	 * Penalize the parent and child for forking.
1469	 */
1470	sched_interact_fork(child);
1471	sched_priority(child);
1472	td->td_sched->skg_runtime += tickincr;
1473	sched_interact_update(td);
1474	sched_priority(td);
1475}
1476
1477void
1478sched_fork_thread(struct thread *td, struct thread *child)
1479{
1480	struct td_sched *ts;
1481	struct td_sched *ts2;
1482
1483	/*
1484	 * Initialize child.
1485	 */
1486	sched_newthread(child);
1487	ts = td->td_sched;
1488	ts2 = child->td_sched;
1489	ts2->ts_cpu = ts->ts_cpu;
1490	ts2->ts_runq = NULL;
1491	/*
1492	 * Grab our parents cpu estimation information and priority.
1493	 */
1494	ts2->ts_ticks = ts->ts_ticks;
1495	ts2->ts_ltick = ts->ts_ltick;
1496	ts2->ts_ftick = ts->ts_ftick;
1497	child->td_user_pri = td->td_user_pri;
1498	child->td_base_user_pri = td->td_base_user_pri;
1499	/*
1500	 * And update interactivity score.
1501	 */
1502	ts2->skg_slptime = ts->skg_slptime;
1503	ts2->skg_runtime = ts->skg_runtime;
1504	ts2->ts_slice = 1;	/* Attempt to quickly learn interactivity. */
1505}
1506
1507void
1508sched_class(struct thread *td, int class)
1509{
1510	struct tdq *tdq;
1511	struct td_sched *ts;
1512	int nclass;
1513	int oclass;
1514
1515	mtx_assert(&sched_lock, MA_OWNED);
1516	if (td->td_pri_class == class)
1517		return;
1518
1519	nclass = PRI_BASE(class);
1520	oclass = PRI_BASE(td->td_pri_class);
1521	ts = td->td_sched;
1522	if (ts->ts_state == TSS_ONRUNQ || td->td_state == TDS_RUNNING) {
1523		tdq = TDQ_CPU(ts->ts_cpu);
1524#ifdef SMP
1525		/*
1526		 * On SMP if we're on the RUNQ we must adjust the transferable
1527		 * count because could be changing to or from an interrupt
1528		 * class.
1529		 */
1530		if (ts->ts_state == TSS_ONRUNQ) {
1531			if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
1532				tdq->tdq_transferable--;
1533				tdq->tdq_group->tdg_transferable--;
1534			}
1535			if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
1536				tdq->tdq_transferable++;
1537				tdq->tdq_group->tdg_transferable++;
1538			}
1539		}
1540#endif
1541		if (oclass == PRI_TIMESHARE)
1542			tdq->tdq_load_timeshare--;
1543		if (nclass == PRI_TIMESHARE)
1544			tdq->tdq_load_timeshare++;
1545	}
1546
1547	td->td_pri_class = class;
1548}
1549
1550/*
1551 * Return some of the child's priority and interactivity to the parent.
1552 */
1553void
1554sched_exit(struct proc *p, struct thread *child)
1555{
1556	struct thread *td;
1557
1558	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
1559	    child, child->td_proc->p_comm, child->td_priority);
1560
1561	td = FIRST_THREAD_IN_PROC(p);
1562	sched_exit_thread(td, child);
1563}
1564
1565void
1566sched_exit_thread(struct thread *td, struct thread *child)
1567{
1568
1569	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
1570	    child, child->td_proc->p_comm, child->td_priority);
1571
1572	tdq_load_rem(TDQ_CPU(child->td_sched->ts_cpu), child->td_sched);
1573#ifdef KSE
1574	/*
1575	 * KSE forks and exits so often that this penalty causes short-lived
1576	 * threads to always be non-interactive.  This causes mozilla to
1577	 * crawl under load.
1578	 */
1579	if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
1580		return;
1581#endif
1582	/*
1583	 * Give the child's runtime to the parent without returning the
1584	 * sleep time as a penalty to the parent.  This causes shells that
1585	 * launch expensive things to mark their children as expensive.
1586	 */
1587	td->td_sched->skg_runtime += child->td_sched->skg_runtime;
1588	sched_interact_update(td);
1589	sched_priority(td);
1590}
1591
1592void
1593sched_userret(struct thread *td)
1594{
1595	/*
1596	 * XXX we cheat slightly on the locking here to avoid locking in
1597	 * the usual case.  Setting td_priority here is essentially an
1598	 * incomplete workaround for not setting it properly elsewhere.
1599	 * Now that some interrupt handlers are threads, not setting it
1600	 * properly elsewhere can clobber it in the window between setting
1601	 * it here and returning to user mode, so don't waste time setting
1602	 * it perfectly here.
1603	 */
1604	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1605	    ("thread with borrowed priority returning to userland"));
1606	if (td->td_priority != td->td_user_pri) {
1607		mtx_lock_spin(&sched_lock);
1608		td->td_priority = td->td_user_pri;
1609		td->td_base_pri = td->td_user_pri;
1610		mtx_unlock_spin(&sched_lock);
1611        }
1612}
1613
1614void
1615sched_clock(struct thread *td)
1616{
1617	struct tdq *tdq;
1618	struct td_sched *ts;
1619
1620	mtx_assert(&sched_lock, MA_OWNED);
1621#ifdef SMP
1622	sched_smp_tick();
1623#endif
1624	tdq = TDQ_SELF();
1625	/*
1626	 * Advance the insert index once for each tick to ensure that all
1627	 * threads get a chance to run.
1628	 */
1629	if (tdq->tdq_idx == tdq->tdq_ridx) {
1630		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
1631		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
1632			tdq->tdq_ridx = tdq->tdq_idx;
1633	}
1634	/* Adjust ticks for pctcpu */
1635	ts = td->td_sched;
1636	ts->ts_ticks += tickincr;
1637	ts->ts_ltick = ticks;
1638	/*
1639	 * Update if we've exceeded our desired tick threshhold by over one
1640	 * second.
1641	 */
1642	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
1643		sched_pctcpu_update(ts);
1644	/*
1645	 * We only do slicing code for TIMESHARE threads.
1646	 */
1647	if (td->td_pri_class != PRI_TIMESHARE)
1648		return;
1649	/*
1650	 * We used a tick; charge it to the thread so that we can compute our
1651	 * interactivity.
1652	 */
1653	td->td_sched->skg_runtime += tickincr;
1654	sched_interact_update(td);
1655	/*
1656	 * We used up one time slice.
1657	 */
1658	if (--ts->ts_slice > 0)
1659		return;
1660	/*
1661	 * We're out of time, recompute priorities and requeue.
1662	 */
1663	sched_priority(td);
1664	tdq_load_rem(tdq, ts);
1665	ts->ts_slice = sched_slice;
1666	tdq_load_add(tdq, ts);
1667	td->td_flags |= TDF_NEEDRESCHED;
1668}
1669
1670int
1671sched_runnable(void)
1672{
1673	struct tdq *tdq;
1674	int load;
1675
1676	load = 1;
1677
1678	tdq = TDQ_SELF();
1679#ifdef SMP
1680	if (tdq->tdq_assigned) {
1681		mtx_lock_spin(&sched_lock);
1682		tdq_assign(tdq);
1683		mtx_unlock_spin(&sched_lock);
1684	}
1685#endif
1686	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1687		if (tdq->tdq_load > 0)
1688			goto out;
1689	} else
1690		if (tdq->tdq_load - 1 > 0)
1691			goto out;
1692	load = 0;
1693out:
1694	return (load);
1695}
1696
1697struct td_sched *
1698sched_choose(void)
1699{
1700	struct tdq *tdq;
1701	struct td_sched *ts;
1702
1703	mtx_assert(&sched_lock, MA_OWNED);
1704	tdq = TDQ_SELF();
1705#ifdef SMP
1706restart:
1707	if (tdq->tdq_assigned)
1708		tdq_assign(tdq);
1709#endif
1710	ts = tdq_choose(tdq);
1711	if (ts) {
1712#ifdef SMP
1713		if (ts->ts_thread->td_priority > PRI_MIN_IDLE)
1714			if (tdq_idled(tdq) == 0)
1715				goto restart;
1716#endif
1717		tdq_runq_rem(tdq, ts);
1718		ts->ts_state = TSS_THREAD;
1719		return (ts);
1720	}
1721#ifdef SMP
1722	if (tdq_idled(tdq) == 0)
1723		goto restart;
1724#endif
1725	return (NULL);
1726}
1727
1728void
1729sched_add(struct thread *td, int flags)
1730{
1731	struct tdq *tdq;
1732	struct td_sched *ts;
1733	int preemptive;
1734	int canmigrate;
1735	int class;
1736
1737	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1738	    td, td->td_proc->p_comm, td->td_priority, curthread,
1739	    curthread->td_proc->p_comm);
1740	mtx_assert(&sched_lock, MA_OWNED);
1741	tdq = TDQ_SELF();
1742	ts = td->td_sched;
1743	ts->ts_flags &= ~TSF_INTERNAL;
1744	class = PRI_BASE(td->td_pri_class);
1745	preemptive = !(flags & SRQ_YIELDING);
1746	canmigrate = 1;
1747#ifdef SMP
1748	if (ts->ts_flags & TSF_ASSIGNED) {
1749		if (ts->ts_flags & TSF_REMOVED)
1750			ts->ts_flags &= ~TSF_REMOVED;
1751		return;
1752	}
1753	canmigrate = THREAD_CAN_MIGRATE(td);
1754	/*
1755	 * Don't migrate running threads here.  Force the long term balancer
1756	 * to do it.
1757	 */
1758	if (ts->ts_flags & TSF_HOLD) {
1759		ts->ts_flags &= ~TSF_HOLD;
1760		canmigrate = 0;
1761	}
1762#endif
1763	KASSERT(ts->ts_state != TSS_ONRUNQ,
1764	    ("sched_add: thread %p (%s) already in run queue", td,
1765	    td->td_proc->p_comm));
1766	KASSERT(td->td_proc->p_sflag & PS_INMEM,
1767	    ("sched_add: process swapped out"));
1768	KASSERT(ts->ts_runq == NULL,
1769	    ("sched_add: thread %p is still assigned to a run queue", td));
1770	/*
1771	 * Set the slice and pick the run queue.
1772	 */
1773	if (ts->ts_slice == 0)
1774		ts->ts_slice = sched_slice;
1775	if (class == PRI_TIMESHARE)
1776		sched_priority(td);
1777	if (td->td_priority <= PRI_MAX_REALTIME) {
1778		ts->ts_runq = &tdq->tdq_realtime;
1779		/*
1780		 * If the thread is not artificially pinned and it's in
1781		 * the realtime queue we directly dispatch it on this cpu
1782		 * for minimum latency.  Interrupt handlers may also have
1783		 * to complete on the cpu that dispatched them.
1784		 */
1785		if (td->td_pinned == 0 && class == PRI_ITHD)
1786			ts->ts_cpu = PCPU_GET(cpuid);
1787	} else if (td->td_priority <= PRI_MAX_TIMESHARE)
1788		ts->ts_runq = &tdq->tdq_timeshare;
1789	else
1790		ts->ts_runq = &tdq->tdq_idle;
1791
1792#ifdef SMP
1793	/*
1794	 * If this thread is pinned or bound, notify the target cpu.
1795	 */
1796	if (!canmigrate && ts->ts_cpu != PCPU_GET(cpuid) ) {
1797		ts->ts_runq = NULL;
1798		tdq_notify(ts, ts->ts_cpu);
1799		return;
1800	}
1801	/*
1802	 * If we had been idle, clear our bit in the group and potentially
1803	 * the global bitmap.  If not, see if we should transfer this thread.
1804	 */
1805	if ((class != PRI_IDLE && class != PRI_ITHD) &&
1806	    (tdq->tdq_group->tdg_idlemask & PCPU_GET(cpumask)) != 0) {
1807		/*
1808		 * Check to see if our group is unidling, and if so, remove it
1809		 * from the global idle mask.
1810		 */
1811		if (tdq->tdq_group->tdg_idlemask ==
1812		    tdq->tdq_group->tdg_cpumask)
1813			atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
1814		/*
1815		 * Now remove ourselves from the group specific idle mask.
1816		 */
1817		tdq->tdq_group->tdg_idlemask &= ~PCPU_GET(cpumask);
1818	} else if (canmigrate && tdq->tdq_load > 1)
1819		if (tdq_transfer(tdq, ts, class))
1820			return;
1821	ts->ts_cpu = PCPU_GET(cpuid);
1822#endif
1823	if (td->td_priority < curthread->td_priority)
1824		curthread->td_flags |= TDF_NEEDRESCHED;
1825	if (preemptive && maybe_preempt(td))
1826		return;
1827	ts->ts_state = TSS_ONRUNQ;
1828
1829	tdq_runq_add(tdq, ts, flags);
1830	tdq_load_add(tdq, ts);
1831}
1832
1833void
1834sched_rem(struct thread *td)
1835{
1836	struct tdq *tdq;
1837	struct td_sched *ts;
1838
1839	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1840	    td, td->td_proc->p_comm, td->td_priority, curthread,
1841	    curthread->td_proc->p_comm);
1842	mtx_assert(&sched_lock, MA_OWNED);
1843	ts = td->td_sched;
1844	if (ts->ts_flags & TSF_ASSIGNED) {
1845		ts->ts_flags |= TSF_REMOVED;
1846		return;
1847	}
1848	KASSERT((ts->ts_state == TSS_ONRUNQ),
1849	    ("sched_rem: thread not on run queue"));
1850
1851	ts->ts_state = TSS_THREAD;
1852	tdq = TDQ_CPU(ts->ts_cpu);
1853	tdq_runq_rem(tdq, ts);
1854	tdq_load_rem(tdq, ts);
1855}
1856
1857fixpt_t
1858sched_pctcpu(struct thread *td)
1859{
1860	fixpt_t pctcpu;
1861	struct td_sched *ts;
1862
1863	pctcpu = 0;
1864	ts = td->td_sched;
1865	if (ts == NULL)
1866		return (0);
1867
1868	mtx_lock_spin(&sched_lock);
1869	if (ts->ts_ticks) {
1870		int rtick;
1871
1872		sched_pctcpu_update(ts);
1873		/* How many rtick per second ? */
1874		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
1875		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
1876	}
1877	td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick;
1878	mtx_unlock_spin(&sched_lock);
1879
1880	return (pctcpu);
1881}
1882
1883void
1884sched_bind(struct thread *td, int cpu)
1885{
1886	struct td_sched *ts;
1887
1888	mtx_assert(&sched_lock, MA_OWNED);
1889	ts = td->td_sched;
1890	KASSERT((ts->ts_flags & TSF_BOUND) == 0,
1891	    ("sched_bind: thread %p already bound.", td));
1892	ts->ts_flags |= TSF_BOUND;
1893#ifdef SMP
1894	if (PCPU_GET(cpuid) == cpu)
1895		return;
1896	/* sched_rem without the runq_remove */
1897	ts->ts_state = TSS_THREAD;
1898	tdq_load_rem(TDQ_CPU(ts->ts_cpu), ts);
1899	tdq_notify(ts, cpu);
1900	/* When we return from mi_switch we'll be on the correct cpu. */
1901	mi_switch(SW_VOL, NULL);
1902	sched_pin();
1903#endif
1904}
1905
1906void
1907sched_unbind(struct thread *td)
1908{
1909	struct td_sched *ts;
1910
1911	mtx_assert(&sched_lock, MA_OWNED);
1912	ts = td->td_sched;
1913	KASSERT(ts->ts_flags & TSF_BOUND,
1914	    ("sched_unbind: thread %p not bound.", td));
1915	mtx_assert(&sched_lock, MA_OWNED);
1916	ts->ts_flags &= ~TSF_BOUND;
1917#ifdef SMP
1918	sched_unpin();
1919#endif
1920}
1921
1922int
1923sched_is_bound(struct thread *td)
1924{
1925	mtx_assert(&sched_lock, MA_OWNED);
1926	return (td->td_sched->ts_flags & TSF_BOUND);
1927}
1928
1929void
1930sched_relinquish(struct thread *td)
1931{
1932	mtx_lock_spin(&sched_lock);
1933	if (td->td_pri_class == PRI_TIMESHARE)
1934		sched_prio(td, PRI_MAX_TIMESHARE);
1935	mi_switch(SW_VOL, NULL);
1936	mtx_unlock_spin(&sched_lock);
1937}
1938
1939int
1940sched_load(void)
1941{
1942#ifdef SMP
1943	int total;
1944	int i;
1945
1946	total = 0;
1947	for (i = 0; i <= tdg_maxid; i++)
1948		total += TDQ_GROUP(i)->tdg_load;
1949	return (total);
1950#else
1951	return (TDQ_SELF()->tdq_sysload);
1952#endif
1953}
1954
1955int
1956sched_sizeof_proc(void)
1957{
1958	return (sizeof(struct proc));
1959}
1960
1961int
1962sched_sizeof_thread(void)
1963{
1964	return (sizeof(struct thread) + sizeof(struct td_sched));
1965}
1966
1967void
1968sched_tick(void)
1969{
1970}
1971
1972static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
1973SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
1974    "Scheduler name");
1975SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, "");
1976SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, "");
1977SYSCTL_INT(_kern_sched, OID_AUTO, tickincr, CTLFLAG_RD, &tickincr, 0, "");
1978SYSCTL_INT(_kern_sched, OID_AUTO, realstathz, CTLFLAG_RD, &realstathz, 0, "");
1979SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RD, &sched_rebalance, 0, "");
1980
1981/* ps compat */
1982static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
1983SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
1984
1985
1986#define KERN_SWITCH_INCLUDE 1
1987#include "kern/kern_switch.c"
1988