sched_ule.c revision 175348
1/*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/*
28 * This file implements the ULE scheduler.  ULE supports independent CPU
29 * run queues and fine grain locking.  It has superior interactive
30 * performance under load even on uni-processor systems.
31 *
32 * etymology:
33 *   ULE is the last three letters in schedule.  It owes its name to a
34 * generic user created for a scheduling system by Paul Mikesell at
35 * Isilon Systems and a general lack of creativity on the part of the author.
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 175348 2008-01-15 09:03:09Z jeff $");
40
41#include "opt_hwpmc_hooks.h"
42#include "opt_sched.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/kdb.h>
47#include <sys/kernel.h>
48#include <sys/ktr.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/proc.h>
52#include <sys/resource.h>
53#include <sys/resourcevar.h>
54#include <sys/sched.h>
55#include <sys/smp.h>
56#include <sys/sx.h>
57#include <sys/sysctl.h>
58#include <sys/sysproto.h>
59#include <sys/turnstile.h>
60#include <sys/umtx.h>
61#include <sys/vmmeter.h>
62#ifdef KTRACE
63#include <sys/uio.h>
64#include <sys/ktrace.h>
65#endif
66
67#ifdef HWPMC_HOOKS
68#include <sys/pmckern.h>
69#endif
70
71#include <machine/cpu.h>
72#include <machine/smp.h>
73
74#if !defined(__i386__) && !defined(__amd64__) && !defined(__powerpc__) && !defined(__arm__)
75#error "This architecture is not currently compatible with ULE"
76#endif
77
78#define	KTR_ULE	0
79
80/*
81 * Thread scheduler specific section.  All fields are protected
82 * by the thread lock.
83 */
84struct td_sched {
85	TAILQ_ENTRY(td_sched) ts_procq;	/* Run queue. */
86	struct thread	*ts_thread;	/* Active associated thread. */
87	struct runq	*ts_runq;	/* Run-queue we're queued on. */
88	short		ts_flags;	/* TSF_* flags. */
89	u_char		ts_rqindex;	/* Run queue index. */
90	u_char		ts_cpu;		/* CPU that we have affinity for. */
91	int		ts_slice;	/* Ticks of slice remaining. */
92	u_int		ts_slptime;	/* Number of ticks we vol. slept */
93	u_int		ts_runtime;	/* Number of ticks we were running */
94	/* The following variables are only used for pctcpu calculation */
95	int		ts_ltick;	/* Last tick that we were running on */
96	int		ts_ftick;	/* First tick that we were running on */
97	int		ts_ticks;	/* Tick count */
98#ifdef SMP
99	int		ts_rltick;	/* Real last tick, for affinity. */
100#endif
101};
102/* flags kept in ts_flags */
103#define	TSF_BOUND	0x0001		/* Thread can not migrate. */
104#define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
105
106static struct td_sched td_sched0;
107
108/*
109 * Cpu percentage computation macros and defines.
110 *
111 * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
112 * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
113 * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
114 * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
115 * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
116 * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
117 */
118#define	SCHED_TICK_SECS		10
119#define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
120#define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
121#define	SCHED_TICK_SHIFT	10
122#define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
123#define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
124
125/*
126 * These macros determine priorities for non-interactive threads.  They are
127 * assigned a priority based on their recent cpu utilization as expressed
128 * by the ratio of ticks to the tick total.  NHALF priorities at the start
129 * and end of the MIN to MAX timeshare range are only reachable with negative
130 * or positive nice respectively.
131 *
132 * PRI_RANGE:	Priority range for utilization dependent priorities.
133 * PRI_NRESV:	Number of nice values.
134 * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
135 * PRI_NICE:	Determines the part of the priority inherited from nice.
136 */
137#define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
138#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
139#define	SCHED_PRI_MIN		(PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
140#define	SCHED_PRI_MAX		(PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
141#define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN)
142#define	SCHED_PRI_TICKS(ts)						\
143    (SCHED_TICK_HZ((ts)) /						\
144    (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
145#define	SCHED_PRI_NICE(nice)	(nice)
146
147/*
148 * These determine the interactivity of a process.  Interactivity differs from
149 * cpu utilization in that it expresses the voluntary time slept vs time ran
150 * while cpu utilization includes all time not running.  This more accurately
151 * models the intent of the thread.
152 *
153 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
154 *		before throttling back.
155 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
156 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
157 * INTERACT_THRESH:	Threshhold for placement on the current runq.
158 */
159#define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
160#define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
161#define	SCHED_INTERACT_MAX	(100)
162#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
163#define	SCHED_INTERACT_THRESH	(30)
164
165/*
166 * tickincr:		Converts a stathz tick into a hz domain scaled by
167 *			the shift factor.  Without the shift the error rate
168 *			due to rounding would be unacceptably high.
169 * realstathz:		stathz is sometimes 0 and run off of hz.
170 * sched_slice:		Runtime of each thread before rescheduling.
171 * preempt_thresh:	Priority threshold for preemption and remote IPIs.
172 */
173static int sched_interact = SCHED_INTERACT_THRESH;
174static int realstathz;
175static int tickincr;
176static int sched_slice;
177#ifdef PREEMPTION
178#ifdef FULL_PREEMPTION
179static int preempt_thresh = PRI_MAX_IDLE;
180#else
181static int preempt_thresh = PRI_MIN_KERN;
182#endif
183#else
184static int preempt_thresh = 0;
185#endif
186
187/*
188 * tdq - per processor runqs and statistics.  All fields are protected by the
189 * tdq_lock.  The load and lowpri may be accessed without to avoid excess
190 * locking in sched_pickcpu();
191 */
192struct tdq {
193	struct mtx	*tdq_lock;		/* Pointer to group lock. */
194	struct runq	tdq_realtime;		/* real-time run queue. */
195	struct runq	tdq_timeshare;		/* timeshare run queue. */
196	struct runq	tdq_idle;		/* Queue of IDLE threads. */
197	int		tdq_load;		/* Aggregate load. */
198	u_char		tdq_idx;		/* Current insert index. */
199	u_char		tdq_ridx;		/* Current removal index. */
200#ifdef SMP
201	u_char		tdq_lowpri;		/* Lowest priority thread. */
202	int		tdq_transferable;	/* Transferable thread count. */
203	LIST_ENTRY(tdq)	tdq_siblings;		/* Next in tdq group. */
204	struct tdq_group *tdq_group;		/* Our processor group. */
205#else
206	int		tdq_sysload;		/* For loadavg, !ITHD load. */
207#endif
208} __aligned(64);
209
210
211#ifdef SMP
212/*
213 * tdq groups are groups of processors which can cheaply share threads.  When
214 * one processor in the group goes idle it will check the runqs of the other
215 * processors in its group prior to halting and waiting for an interrupt.
216 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
217 * In a numa environment we'd want an idle bitmap per group and a two tiered
218 * load balancer.
219 */
220struct tdq_group {
221	struct mtx	tdg_lock;	/* Protects all fields below. */
222	int		tdg_cpus;	/* Count of CPUs in this tdq group. */
223	cpumask_t 	tdg_cpumask;	/* Mask of cpus in this group. */
224	cpumask_t 	tdg_idlemask;	/* Idle cpus in this group. */
225	cpumask_t 	tdg_mask;	/* Bit mask for first cpu. */
226	int		tdg_load;	/* Total load of this group. */
227	int	tdg_transferable;	/* Transferable load of this group. */
228	LIST_HEAD(, tdq) tdg_members;	/* Linked list of all members. */
229	char		tdg_name[16];	/* lock name. */
230} __aligned(64);
231
232#define	SCHED_AFFINITY_DEFAULT	(max(1, hz / 300))
233#define	SCHED_AFFINITY(ts)	((ts)->ts_rltick > ticks - affinity)
234
235/*
236 * Run-time tunables.
237 */
238static int rebalance = 1;
239static int balance_interval = 128;	/* Default set in sched_initticks(). */
240static int pick_pri = 1;
241static int affinity;
242static int tryself = 1;
243static int steal_htt = 1;
244static int steal_idle = 1;
245static int steal_thresh = 2;
246static int topology = 0;
247
248/*
249 * One thread queue per processor.
250 */
251static volatile cpumask_t tdq_idle;
252static int tdg_maxid;
253static struct tdq	tdq_cpu[MAXCPU];
254static struct tdq_group tdq_groups[MAXCPU];
255static struct tdq	*balance_tdq;
256static int balance_group_ticks;
257static int balance_ticks;
258
259#define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
260#define	TDQ_CPU(x)	(&tdq_cpu[(x)])
261#define	TDQ_ID(x)	((int)((x) - tdq_cpu))
262#define	TDQ_GROUP(x)	(&tdq_groups[(x)])
263#define	TDG_ID(x)	((int)((x) - tdq_groups))
264#else	/* !SMP */
265static struct tdq	tdq_cpu;
266static struct mtx	tdq_lock;
267
268#define	TDQ_ID(x)	(0)
269#define	TDQ_SELF()	(&tdq_cpu)
270#define	TDQ_CPU(x)	(&tdq_cpu)
271#endif
272
273#define	TDQ_LOCK_ASSERT(t, type)	mtx_assert(TDQ_LOCKPTR((t)), (type))
274#define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
275#define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
276#define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
277#define	TDQ_LOCKPTR(t)		((t)->tdq_lock)
278
279static void sched_priority(struct thread *);
280static void sched_thread_priority(struct thread *, u_char);
281static int sched_interact_score(struct thread *);
282static void sched_interact_update(struct thread *);
283static void sched_interact_fork(struct thread *);
284static void sched_pctcpu_update(struct td_sched *);
285
286/* Operations on per processor queues */
287static struct td_sched * tdq_choose(struct tdq *);
288static void tdq_setup(struct tdq *);
289static void tdq_load_add(struct tdq *, struct td_sched *);
290static void tdq_load_rem(struct tdq *, struct td_sched *);
291static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
292static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
293void tdq_print(int cpu);
294static void runq_print(struct runq *rq);
295static void tdq_add(struct tdq *, struct thread *, int);
296#ifdef SMP
297static void tdq_move(struct tdq *, struct tdq *);
298static int tdq_idled(struct tdq *);
299static void tdq_notify(struct td_sched *);
300static struct td_sched *tdq_steal(struct tdq *);
301static struct td_sched *runq_steal(struct runq *);
302static int sched_pickcpu(struct td_sched *, int);
303static void sched_balance(void);
304static void sched_balance_groups(void);
305static void sched_balance_group(struct tdq_group *);
306static void sched_balance_pair(struct tdq *, struct tdq *);
307static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
308static inline struct mtx *thread_block_switch(struct thread *);
309static inline void thread_unblock_switch(struct thread *, struct mtx *);
310static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
311
312#define	THREAD_CAN_MIGRATE(td)	 ((td)->td_pinned == 0)
313#endif
314
315static void sched_setup(void *dummy);
316SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
317
318static void sched_initticks(void *dummy);
319SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
320
321/*
322 * Print the threads waiting on a run-queue.
323 */
324static void
325runq_print(struct runq *rq)
326{
327	struct rqhead *rqh;
328	struct td_sched *ts;
329	int pri;
330	int j;
331	int i;
332
333	for (i = 0; i < RQB_LEN; i++) {
334		printf("\t\trunq bits %d 0x%zx\n",
335		    i, rq->rq_status.rqb_bits[i]);
336		for (j = 0; j < RQB_BPW; j++)
337			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
338				pri = j + (i << RQB_L2BPW);
339				rqh = &rq->rq_queues[pri];
340				TAILQ_FOREACH(ts, rqh, ts_procq) {
341					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
342					    ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
343				}
344			}
345	}
346}
347
348/*
349 * Print the status of a per-cpu thread queue.  Should be a ddb show cmd.
350 */
351void
352tdq_print(int cpu)
353{
354	struct tdq *tdq;
355
356	tdq = TDQ_CPU(cpu);
357
358	printf("tdq %d:\n", TDQ_ID(tdq));
359	printf("\tlockptr         %p\n", TDQ_LOCKPTR(tdq));
360	printf("\tload:           %d\n", tdq->tdq_load);
361	printf("\ttimeshare idx:  %d\n", tdq->tdq_idx);
362	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
363	printf("\trealtime runq:\n");
364	runq_print(&tdq->tdq_realtime);
365	printf("\ttimeshare runq:\n");
366	runq_print(&tdq->tdq_timeshare);
367	printf("\tidle runq:\n");
368	runq_print(&tdq->tdq_idle);
369#ifdef SMP
370	printf("\tload transferable: %d\n", tdq->tdq_transferable);
371	printf("\tlowest priority:   %d\n", tdq->tdq_lowpri);
372	printf("\tgroup:             %d\n", TDG_ID(tdq->tdq_group));
373	printf("\tLock name:         %s\n", tdq->tdq_group->tdg_name);
374#endif
375}
376
377#define	TS_RQ_PPQ	(((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
378/*
379 * Add a thread to the actual run-queue.  Keeps transferable counts up to
380 * date with what is actually on the run-queue.  Selects the correct
381 * queue position for timeshare threads.
382 */
383static __inline void
384tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
385{
386	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
387	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
388#ifdef SMP
389	if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
390		tdq->tdq_transferable++;
391		tdq->tdq_group->tdg_transferable++;
392		ts->ts_flags |= TSF_XFERABLE;
393	}
394#endif
395	if (ts->ts_runq == &tdq->tdq_timeshare) {
396		u_char pri;
397
398		pri = ts->ts_thread->td_priority;
399		KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
400			("Invalid priority %d on timeshare runq", pri));
401		/*
402		 * This queue contains only priorities between MIN and MAX
403		 * realtime.  Use the whole queue to represent these values.
404		 */
405		if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
406			pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
407			pri = (pri + tdq->tdq_idx) % RQ_NQS;
408			/*
409			 * This effectively shortens the queue by one so we
410			 * can have a one slot difference between idx and
411			 * ridx while we wait for threads to drain.
412			 */
413			if (tdq->tdq_ridx != tdq->tdq_idx &&
414			    pri == tdq->tdq_ridx)
415				pri = (unsigned char)(pri - 1) % RQ_NQS;
416		} else
417			pri = tdq->tdq_ridx;
418		runq_add_pri(ts->ts_runq, ts, pri, flags);
419	} else
420		runq_add(ts->ts_runq, ts, flags);
421}
422
423/*
424 * Remove a thread from a run-queue.  This typically happens when a thread
425 * is selected to run.  Running threads are not on the queue and the
426 * transferable count does not reflect them.
427 */
428static __inline void
429tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
430{
431	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
432	KASSERT(ts->ts_runq != NULL,
433	    ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
434#ifdef SMP
435	if (ts->ts_flags & TSF_XFERABLE) {
436		tdq->tdq_transferable--;
437		tdq->tdq_group->tdg_transferable--;
438		ts->ts_flags &= ~TSF_XFERABLE;
439	}
440#endif
441	if (ts->ts_runq == &tdq->tdq_timeshare) {
442		if (tdq->tdq_idx != tdq->tdq_ridx)
443			runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
444		else
445			runq_remove_idx(ts->ts_runq, ts, NULL);
446		/*
447		 * For timeshare threads we update the priority here so
448		 * the priority reflects the time we've been sleeping.
449		 */
450		ts->ts_ltick = ticks;
451		sched_pctcpu_update(ts);
452		sched_priority(ts->ts_thread);
453	} else
454		runq_remove(ts->ts_runq, ts);
455}
456
457/*
458 * Load is maintained for all threads RUNNING and ON_RUNQ.  Add the load
459 * for this thread to the referenced thread queue.
460 */
461static void
462tdq_load_add(struct tdq *tdq, struct td_sched *ts)
463{
464	int class;
465
466	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
467	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
468	class = PRI_BASE(ts->ts_thread->td_pri_class);
469	tdq->tdq_load++;
470	CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
471	if (class != PRI_ITHD &&
472	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
473#ifdef SMP
474		tdq->tdq_group->tdg_load++;
475#else
476		tdq->tdq_sysload++;
477#endif
478}
479
480/*
481 * Remove the load from a thread that is transitioning to a sleep state or
482 * exiting.
483 */
484static void
485tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
486{
487	int class;
488
489	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
490	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
491	class = PRI_BASE(ts->ts_thread->td_pri_class);
492	if (class != PRI_ITHD &&
493	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
494#ifdef SMP
495		tdq->tdq_group->tdg_load--;
496#else
497		tdq->tdq_sysload--;
498#endif
499	KASSERT(tdq->tdq_load != 0,
500	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
501	tdq->tdq_load--;
502	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
503	ts->ts_runq = NULL;
504}
505
506#ifdef SMP
507/*
508 * sched_balance is a simple CPU load balancing algorithm.  It operates by
509 * finding the least loaded and most loaded cpu and equalizing their load
510 * by migrating some processes.
511 *
512 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
513 * installations will only have 2 cpus.  Secondly, load balancing too much at
514 * once can have an unpleasant effect on the system.  The scheduler rarely has
515 * enough information to make perfect decisions.  So this algorithm chooses
516 * simplicity and more gradual effects on load in larger systems.
517 *
518 */
519static void
520sched_balance()
521{
522	struct tdq_group *high;
523	struct tdq_group *low;
524	struct tdq_group *tdg;
525	struct tdq *tdq;
526	int cnt;
527	int i;
528
529	/*
530	 * Select a random time between .5 * balance_interval and
531	 * 1.5 * balance_interval.
532	 */
533	balance_ticks = max(balance_interval / 2, 1);
534	balance_ticks += random() % balance_interval;
535	if (smp_started == 0 || rebalance == 0)
536		return;
537	tdq = TDQ_SELF();
538	TDQ_UNLOCK(tdq);
539	low = high = NULL;
540	i = random() % (tdg_maxid + 1);
541	for (cnt = 0; cnt <= tdg_maxid; cnt++) {
542		tdg = TDQ_GROUP(i);
543		/*
544		 * Find the CPU with the highest load that has some
545		 * threads to transfer.
546		 */
547		if ((high == NULL || tdg->tdg_load > high->tdg_load)
548		    && tdg->tdg_transferable)
549			high = tdg;
550		if (low == NULL || tdg->tdg_load < low->tdg_load)
551			low = tdg;
552		if (++i > tdg_maxid)
553			i = 0;
554	}
555	if (low != NULL && high != NULL && high != low)
556		sched_balance_pair(LIST_FIRST(&high->tdg_members),
557		    LIST_FIRST(&low->tdg_members));
558	TDQ_LOCK(tdq);
559}
560
561/*
562 * Balance load between CPUs in a group.  Will only migrate within the group.
563 */
564static void
565sched_balance_groups()
566{
567	struct tdq *tdq;
568	int i;
569
570	/*
571	 * Select a random time between .5 * balance_interval and
572	 * 1.5 * balance_interval.
573	 */
574	balance_group_ticks = max(balance_interval / 2, 1);
575	balance_group_ticks += random() % balance_interval;
576	if (smp_started == 0 || rebalance == 0)
577		return;
578	tdq = TDQ_SELF();
579	TDQ_UNLOCK(tdq);
580	for (i = 0; i <= tdg_maxid; i++)
581		sched_balance_group(TDQ_GROUP(i));
582	TDQ_LOCK(tdq);
583}
584
585/*
586 * Finds the greatest imbalance between two tdqs in a group.
587 */
588static void
589sched_balance_group(struct tdq_group *tdg)
590{
591	struct tdq *tdq;
592	struct tdq *high;
593	struct tdq *low;
594	int load;
595
596	if (tdg->tdg_transferable == 0)
597		return;
598	low = NULL;
599	high = NULL;
600	LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
601		load = tdq->tdq_load;
602		if (high == NULL || load > high->tdq_load)
603			high = tdq;
604		if (low == NULL || load < low->tdq_load)
605			low = tdq;
606	}
607	if (high != NULL && low != NULL && high != low)
608		sched_balance_pair(high, low);
609}
610
611/*
612 * Lock two thread queues using their address to maintain lock order.
613 */
614static void
615tdq_lock_pair(struct tdq *one, struct tdq *two)
616{
617	if (one < two) {
618		TDQ_LOCK(one);
619		TDQ_LOCK_FLAGS(two, MTX_DUPOK);
620	} else {
621		TDQ_LOCK(two);
622		TDQ_LOCK_FLAGS(one, MTX_DUPOK);
623	}
624}
625
626/*
627 * Unlock two thread queues.  Order is not important here.
628 */
629static void
630tdq_unlock_pair(struct tdq *one, struct tdq *two)
631{
632	TDQ_UNLOCK(one);
633	TDQ_UNLOCK(two);
634}
635
636/*
637 * Transfer load between two imbalanced thread queues.
638 */
639static void
640sched_balance_pair(struct tdq *high, struct tdq *low)
641{
642	int transferable;
643	int high_load;
644	int low_load;
645	int move;
646	int diff;
647	int i;
648
649	tdq_lock_pair(high, low);
650	/*
651	 * If we're transfering within a group we have to use this specific
652	 * tdq's transferable count, otherwise we can steal from other members
653	 * of the group.
654	 */
655	if (high->tdq_group == low->tdq_group) {
656		transferable = high->tdq_transferable;
657		high_load = high->tdq_load;
658		low_load = low->tdq_load;
659	} else {
660		transferable = high->tdq_group->tdg_transferable;
661		high_load = high->tdq_group->tdg_load;
662		low_load = low->tdq_group->tdg_load;
663	}
664	/*
665	 * Determine what the imbalance is and then adjust that to how many
666	 * threads we actually have to give up (transferable).
667	 */
668	if (transferable != 0) {
669		diff = high_load - low_load;
670		move = diff / 2;
671		if (diff & 0x1)
672			move++;
673		move = min(move, transferable);
674		for (i = 0; i < move; i++)
675			tdq_move(high, low);
676		/*
677		 * IPI the target cpu to force it to reschedule with the new
678		 * workload.
679		 */
680		ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
681	}
682	tdq_unlock_pair(high, low);
683	return;
684}
685
686/*
687 * Move a thread from one thread queue to another.
688 */
689static void
690tdq_move(struct tdq *from, struct tdq *to)
691{
692	struct td_sched *ts;
693	struct thread *td;
694	struct tdq *tdq;
695	int cpu;
696
697	TDQ_LOCK_ASSERT(from, MA_OWNED);
698	TDQ_LOCK_ASSERT(to, MA_OWNED);
699
700	tdq = from;
701	cpu = TDQ_ID(to);
702	ts = tdq_steal(tdq);
703	if (ts == NULL) {
704		struct tdq_group *tdg;
705
706		tdg = tdq->tdq_group;
707		LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
708			if (tdq == from || tdq->tdq_transferable == 0)
709				continue;
710			ts = tdq_steal(tdq);
711			break;
712		}
713		if (ts == NULL)
714			return;
715	}
716	if (tdq == to)
717		return;
718	td = ts->ts_thread;
719	/*
720	 * Although the run queue is locked the thread may be blocked.  Lock
721	 * it to clear this and acquire the run-queue lock.
722	 */
723	thread_lock(td);
724	/* Drop recursive lock on from acquired via thread_lock(). */
725	TDQ_UNLOCK(from);
726	sched_rem(td);
727	ts->ts_cpu = cpu;
728	td->td_lock = TDQ_LOCKPTR(to);
729	tdq_add(to, td, SRQ_YIELDING);
730}
731
732/*
733 * This tdq has idled.  Try to steal a thread from another cpu and switch
734 * to it.
735 */
736static int
737tdq_idled(struct tdq *tdq)
738{
739	struct tdq_group *tdg;
740	struct tdq *steal;
741	int highload;
742	int highcpu;
743	int cpu;
744
745	if (smp_started == 0 || steal_idle == 0)
746		return (1);
747	/* We don't want to be preempted while we're iterating over tdqs */
748	spinlock_enter();
749	tdg = tdq->tdq_group;
750	/*
751	 * If we're in a cpu group, try and steal threads from another cpu in
752	 * the group before idling.  In a HTT group all cpus share the same
753	 * run-queue lock, however, we still need a recursive lock to
754	 * call tdq_move().
755	 */
756	if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
757		TDQ_LOCK(tdq);
758		LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
759			if (steal == tdq || steal->tdq_transferable == 0)
760				continue;
761			TDQ_LOCK(steal);
762			goto steal;
763		}
764		TDQ_UNLOCK(tdq);
765	}
766	/*
767	 * Find the least loaded CPU with a transferable thread and attempt
768	 * to steal it.  We make a lockless pass and then verify that the
769	 * thread is still available after locking.
770	 */
771	for (;;) {
772		highcpu = 0;
773		highload = 0;
774		for (cpu = 0; cpu <= mp_maxid; cpu++) {
775			if (CPU_ABSENT(cpu))
776				continue;
777			steal = TDQ_CPU(cpu);
778			if (steal->tdq_transferable == 0)
779				continue;
780			if (steal->tdq_load < highload)
781				continue;
782			highload = steal->tdq_load;
783			highcpu = cpu;
784		}
785		if (highload < steal_thresh)
786			break;
787		steal = TDQ_CPU(highcpu);
788		if (steal == tdq)
789			break;
790		tdq_lock_pair(tdq, steal);
791		if (steal->tdq_load >= steal_thresh && steal->tdq_transferable)
792			goto steal;
793		tdq_unlock_pair(tdq, steal);
794	}
795	spinlock_exit();
796	return (1);
797steal:
798	spinlock_exit();
799	tdq_move(steal, tdq);
800	TDQ_UNLOCK(steal);
801	mi_switch(SW_VOL, NULL);
802	thread_unlock(curthread);
803
804	return (0);
805}
806
807/*
808 * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
809 */
810static void
811tdq_notify(struct td_sched *ts)
812{
813	struct thread *ctd;
814	struct pcpu *pcpu;
815	int cpri;
816	int pri;
817	int cpu;
818
819	cpu = ts->ts_cpu;
820	pri = ts->ts_thread->td_priority;
821	pcpu = pcpu_find(cpu);
822	ctd = pcpu->pc_curthread;
823	cpri = ctd->td_priority;
824
825	/*
826	 * If our priority is not better than the current priority there is
827	 * nothing to do.
828	 */
829	if (pri > cpri)
830		return;
831	/*
832	 * Always IPI idle.
833	 */
834	if (cpri > PRI_MIN_IDLE)
835		goto sendipi;
836	/*
837	 * If we're realtime or better and there is timeshare or worse running
838	 * send an IPI.
839	 */
840	if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
841		goto sendipi;
842	/*
843	 * Otherwise only IPI if we exceed the threshold.
844	 */
845	if (pri > preempt_thresh)
846		return;
847sendipi:
848	ctd->td_flags |= TDF_NEEDRESCHED;
849	ipi_selected(1 << cpu, IPI_PREEMPT);
850}
851
852/*
853 * Steals load from a timeshare queue.  Honors the rotating queue head
854 * index.
855 */
856static struct td_sched *
857runq_steal_from(struct runq *rq, u_char start)
858{
859	struct td_sched *ts;
860	struct rqbits *rqb;
861	struct rqhead *rqh;
862	int first;
863	int bit;
864	int pri;
865	int i;
866
867	rqb = &rq->rq_status;
868	bit = start & (RQB_BPW -1);
869	pri = 0;
870	first = 0;
871again:
872	for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
873		if (rqb->rqb_bits[i] == 0)
874			continue;
875		if (bit != 0) {
876			for (pri = bit; pri < RQB_BPW; pri++)
877				if (rqb->rqb_bits[i] & (1ul << pri))
878					break;
879			if (pri >= RQB_BPW)
880				continue;
881		} else
882			pri = RQB_FFS(rqb->rqb_bits[i]);
883		pri += (i << RQB_L2BPW);
884		rqh = &rq->rq_queues[pri];
885		TAILQ_FOREACH(ts, rqh, ts_procq) {
886			if (first && THREAD_CAN_MIGRATE(ts->ts_thread))
887				return (ts);
888			first = 1;
889		}
890	}
891	if (start != 0) {
892		start = 0;
893		goto again;
894	}
895
896	return (NULL);
897}
898
899/*
900 * Steals load from a standard linear queue.
901 */
902static struct td_sched *
903runq_steal(struct runq *rq)
904{
905	struct rqhead *rqh;
906	struct rqbits *rqb;
907	struct td_sched *ts;
908	int word;
909	int bit;
910
911	rqb = &rq->rq_status;
912	for (word = 0; word < RQB_LEN; word++) {
913		if (rqb->rqb_bits[word] == 0)
914			continue;
915		for (bit = 0; bit < RQB_BPW; bit++) {
916			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
917				continue;
918			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
919			TAILQ_FOREACH(ts, rqh, ts_procq)
920				if (THREAD_CAN_MIGRATE(ts->ts_thread))
921					return (ts);
922		}
923	}
924	return (NULL);
925}
926
927/*
928 * Attempt to steal a thread in priority order from a thread queue.
929 */
930static struct td_sched *
931tdq_steal(struct tdq *tdq)
932{
933	struct td_sched *ts;
934
935	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
936	if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
937		return (ts);
938	if ((ts = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL)
939		return (ts);
940	return (runq_steal(&tdq->tdq_idle));
941}
942
943/*
944 * Sets the thread lock and ts_cpu to match the requested cpu.  Unlocks the
945 * current lock and returns with the assigned queue locked.
946 */
947static inline struct tdq *
948sched_setcpu(struct td_sched *ts, int cpu, int flags)
949{
950	struct thread *td;
951	struct tdq *tdq;
952
953	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
954
955	tdq = TDQ_CPU(cpu);
956	td = ts->ts_thread;
957	ts->ts_cpu = cpu;
958
959	/* If the lock matches just return the queue. */
960	if (td->td_lock == TDQ_LOCKPTR(tdq))
961		return (tdq);
962#ifdef notyet
963	/*
964	 * If the thread isn't running its lockptr is a
965	 * turnstile or a sleepqueue.  We can just lock_set without
966	 * blocking.
967	 */
968	if (TD_CAN_RUN(td)) {
969		TDQ_LOCK(tdq);
970		thread_lock_set(td, TDQ_LOCKPTR(tdq));
971		return (tdq);
972	}
973#endif
974	/*
975	 * The hard case, migration, we need to block the thread first to
976	 * prevent order reversals with other cpus locks.
977	 */
978	thread_lock_block(td);
979	TDQ_LOCK(tdq);
980	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
981	return (tdq);
982}
983
984/*
985 * Find the thread queue running the lowest priority thread.
986 */
987static int
988tdq_lowestpri(void)
989{
990	struct tdq *tdq;
991	int lowpri;
992	int lowcpu;
993	int lowload;
994	int load;
995	int cpu;
996	int pri;
997
998	lowload = 0;
999	lowpri = lowcpu = 0;
1000	for (cpu = 0; cpu <= mp_maxid; cpu++) {
1001		if (CPU_ABSENT(cpu))
1002			continue;
1003		tdq = TDQ_CPU(cpu);
1004		pri = tdq->tdq_lowpri;
1005		load = TDQ_CPU(cpu)->tdq_load;
1006		CTR4(KTR_ULE,
1007		    "cpu %d pri %d lowcpu %d lowpri %d",
1008		    cpu, pri, lowcpu, lowpri);
1009		if (pri < lowpri)
1010			continue;
1011		if (lowpri && lowpri == pri && load > lowload)
1012			continue;
1013		lowpri = pri;
1014		lowcpu = cpu;
1015		lowload = load;
1016	}
1017
1018	return (lowcpu);
1019}
1020
1021/*
1022 * Find the thread queue with the least load.
1023 */
1024static int
1025tdq_lowestload(void)
1026{
1027	struct tdq *tdq;
1028	int lowload;
1029	int lowpri;
1030	int lowcpu;
1031	int load;
1032	int cpu;
1033	int pri;
1034
1035	lowcpu = 0;
1036	lowload = TDQ_CPU(0)->tdq_load;
1037	lowpri = TDQ_CPU(0)->tdq_lowpri;
1038	for (cpu = 1; cpu <= mp_maxid; cpu++) {
1039		if (CPU_ABSENT(cpu))
1040			continue;
1041		tdq = TDQ_CPU(cpu);
1042		load = tdq->tdq_load;
1043		pri = tdq->tdq_lowpri;
1044		CTR4(KTR_ULE, "cpu %d load %d lowcpu %d lowload %d",
1045		    cpu, load, lowcpu, lowload);
1046		if (load > lowload)
1047			continue;
1048		if (load == lowload && pri < lowpri)
1049			continue;
1050		lowcpu = cpu;
1051		lowload = load;
1052		lowpri = pri;
1053	}
1054
1055	return (lowcpu);
1056}
1057
1058/*
1059 * Pick the destination cpu for sched_add().  Respects affinity and makes
1060 * a determination based on load or priority of available processors.
1061 */
1062static int
1063sched_pickcpu(struct td_sched *ts, int flags)
1064{
1065	struct tdq *tdq;
1066	int self;
1067	int pri;
1068	int cpu;
1069
1070	cpu = self = PCPU_GET(cpuid);
1071	if (smp_started == 0)
1072		return (self);
1073	/*
1074	 * Don't migrate a running thread from sched_switch().
1075	 */
1076	if (flags & SRQ_OURSELF) {
1077		CTR1(KTR_ULE, "YIELDING %d",
1078		    curthread->td_priority);
1079		return (self);
1080	}
1081	pri = ts->ts_thread->td_priority;
1082	cpu = ts->ts_cpu;
1083	/*
1084	 * Regardless of affinity, if the last cpu is idle send it there.
1085	 */
1086	tdq = TDQ_CPU(cpu);
1087	if (tdq->tdq_lowpri > PRI_MIN_IDLE) {
1088		CTR5(KTR_ULE,
1089		    "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d",
1090		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
1091		    tdq->tdq_lowpri);
1092		return (ts->ts_cpu);
1093	}
1094	/*
1095	 * If we have affinity, try to place it on the cpu we last ran on.
1096	 */
1097	if (SCHED_AFFINITY(ts) && tdq->tdq_lowpri > pri) {
1098		CTR5(KTR_ULE,
1099		    "affinity for %d, ltick %d ticks %d pri %d curthread %d",
1100		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
1101		    tdq->tdq_lowpri);
1102		return (ts->ts_cpu);
1103	}
1104	/*
1105	 * Look for an idle group.
1106	 */
1107	CTR1(KTR_ULE, "tdq_idle %X", tdq_idle);
1108	cpu = ffs(tdq_idle);
1109	if (cpu)
1110		return (--cpu);
1111	/*
1112	 * If there are no idle cores see if we can run the thread locally.
1113	 * This may improve locality among sleepers and wakers when there
1114	 * is shared data.
1115	 */
1116	if (tryself && pri < TDQ_CPU(self)->tdq_lowpri) {
1117		CTR1(KTR_ULE, "tryself %d",
1118		    curthread->td_priority);
1119		return (self);
1120	}
1121	/*
1122 	 * Now search for the cpu running the lowest priority thread with
1123	 * the least load.
1124	 */
1125	if (pick_pri)
1126		cpu = tdq_lowestpri();
1127	else
1128		cpu = tdq_lowestload();
1129	return (cpu);
1130}
1131
1132#endif	/* SMP */
1133
1134/*
1135 * Pick the highest priority task we have and return it.
1136 */
1137static struct td_sched *
1138tdq_choose(struct tdq *tdq)
1139{
1140	struct td_sched *ts;
1141
1142	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1143	ts = runq_choose(&tdq->tdq_realtime);
1144	if (ts != NULL)
1145		return (ts);
1146	ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1147	if (ts != NULL) {
1148		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
1149		    ("tdq_choose: Invalid priority on timeshare queue %d",
1150		    ts->ts_thread->td_priority));
1151		return (ts);
1152	}
1153
1154	ts = runq_choose(&tdq->tdq_idle);
1155	if (ts != NULL) {
1156		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
1157		    ("tdq_choose: Invalid priority on idle queue %d",
1158		    ts->ts_thread->td_priority));
1159		return (ts);
1160	}
1161
1162	return (NULL);
1163}
1164
1165/*
1166 * Initialize a thread queue.
1167 */
1168static void
1169tdq_setup(struct tdq *tdq)
1170{
1171
1172	if (bootverbose)
1173		printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
1174	runq_init(&tdq->tdq_realtime);
1175	runq_init(&tdq->tdq_timeshare);
1176	runq_init(&tdq->tdq_idle);
1177	tdq->tdq_load = 0;
1178}
1179
1180#ifdef SMP
1181static void
1182tdg_setup(struct tdq_group *tdg)
1183{
1184	if (bootverbose)
1185		printf("ULE: setup cpu group %d\n", TDG_ID(tdg));
1186	snprintf(tdg->tdg_name, sizeof(tdg->tdg_name),
1187	    "sched lock %d", (int)TDG_ID(tdg));
1188	mtx_init(&tdg->tdg_lock, tdg->tdg_name, "sched lock",
1189	    MTX_SPIN | MTX_RECURSE);
1190	LIST_INIT(&tdg->tdg_members);
1191	tdg->tdg_load = 0;
1192	tdg->tdg_transferable = 0;
1193	tdg->tdg_cpus = 0;
1194	tdg->tdg_mask = 0;
1195	tdg->tdg_cpumask = 0;
1196	tdg->tdg_idlemask = 0;
1197}
1198
1199static void
1200tdg_add(struct tdq_group *tdg, struct tdq *tdq)
1201{
1202	if (tdg->tdg_mask == 0)
1203		tdg->tdg_mask |= 1 << TDQ_ID(tdq);
1204	tdg->tdg_cpumask |= 1 << TDQ_ID(tdq);
1205	tdg->tdg_cpus++;
1206	tdq->tdq_group = tdg;
1207	tdq->tdq_lock = &tdg->tdg_lock;
1208	LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
1209	if (bootverbose)
1210		printf("ULE: adding cpu %d to group %d: cpus %d mask 0x%X\n",
1211		    TDQ_ID(tdq), TDG_ID(tdg), tdg->tdg_cpus, tdg->tdg_cpumask);
1212}
1213
1214static void
1215sched_setup_topology(void)
1216{
1217	struct tdq_group *tdg;
1218	struct cpu_group *cg;
1219	int balance_groups;
1220	struct tdq *tdq;
1221	int i;
1222	int j;
1223
1224	topology = 1;
1225	balance_groups = 0;
1226	for (i = 0; i < smp_topology->ct_count; i++) {
1227		cg = &smp_topology->ct_group[i];
1228		tdg = &tdq_groups[i];
1229		/*
1230		 * Initialize the group.
1231		 */
1232		tdg_setup(tdg);
1233		/*
1234		 * Find all of the group members and add them.
1235		 */
1236		for (j = 0; j < MAXCPU; j++) {
1237			if ((cg->cg_mask & (1 << j)) != 0) {
1238				tdq = TDQ_CPU(j);
1239				tdq_setup(tdq);
1240				tdg_add(tdg, tdq);
1241			}
1242		}
1243		if (tdg->tdg_cpus > 1)
1244			balance_groups = 1;
1245	}
1246	tdg_maxid = smp_topology->ct_count - 1;
1247	if (balance_groups)
1248		sched_balance_groups();
1249}
1250
1251static void
1252sched_setup_smp(void)
1253{
1254	struct tdq_group *tdg;
1255	struct tdq *tdq;
1256	int cpus;
1257	int i;
1258
1259	for (cpus = 0, i = 0; i < MAXCPU; i++) {
1260		if (CPU_ABSENT(i))
1261			continue;
1262		tdq = &tdq_cpu[i];
1263		tdg = &tdq_groups[i];
1264		/*
1265		 * Setup a tdq group with one member.
1266		 */
1267		tdg_setup(tdg);
1268		tdq_setup(tdq);
1269		tdg_add(tdg, tdq);
1270		cpus++;
1271	}
1272	tdg_maxid = cpus - 1;
1273}
1274
1275/*
1276 * Fake a topology with one group containing all CPUs.
1277 */
1278static void
1279sched_fake_topo(void)
1280{
1281#ifdef SCHED_FAKE_TOPOLOGY
1282	static struct cpu_top top;
1283	static struct cpu_group group;
1284
1285	top.ct_count = 1;
1286	top.ct_group = &group;
1287	group.cg_mask = all_cpus;
1288	group.cg_count = mp_ncpus;
1289	group.cg_children = 0;
1290	smp_topology = &top;
1291#endif
1292}
1293#endif
1294
1295/*
1296 * Setup the thread queues and initialize the topology based on MD
1297 * information.
1298 */
1299static void
1300sched_setup(void *dummy)
1301{
1302	struct tdq *tdq;
1303
1304	tdq = TDQ_SELF();
1305#ifdef SMP
1306	sched_fake_topo();
1307	/*
1308	 * Setup tdqs based on a topology configuration or vanilla SMP based
1309	 * on mp_maxid.
1310	 */
1311	if (smp_topology == NULL)
1312		sched_setup_smp();
1313	else
1314		sched_setup_topology();
1315	balance_tdq = tdq;
1316	sched_balance();
1317#else
1318	tdq_setup(tdq);
1319	mtx_init(&tdq_lock, "sched lock", "sched lock", MTX_SPIN | MTX_RECURSE);
1320	tdq->tdq_lock = &tdq_lock;
1321#endif
1322	/*
1323	 * To avoid divide-by-zero, we set realstathz a dummy value
1324	 * in case which sched_clock() called before sched_initticks().
1325	 */
1326	realstathz = hz;
1327	sched_slice = (realstathz/10);	/* ~100ms */
1328	tickincr = 1 << SCHED_TICK_SHIFT;
1329
1330	/* Add thread0's load since it's running. */
1331	TDQ_LOCK(tdq);
1332	thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1333	tdq_load_add(tdq, &td_sched0);
1334	TDQ_UNLOCK(tdq);
1335}
1336
1337/*
1338 * This routine determines the tickincr after stathz and hz are setup.
1339 */
1340/* ARGSUSED */
1341static void
1342sched_initticks(void *dummy)
1343{
1344	int incr;
1345
1346	realstathz = stathz ? stathz : hz;
1347	sched_slice = (realstathz/10);	/* ~100ms */
1348
1349	/*
1350	 * tickincr is shifted out by 10 to avoid rounding errors due to
1351	 * hz not being evenly divisible by stathz on all platforms.
1352	 */
1353	incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1354	/*
1355	 * This does not work for values of stathz that are more than
1356	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1357	 */
1358	if (incr == 0)
1359		incr = 1;
1360	tickincr = incr;
1361#ifdef SMP
1362	/*
1363	 * Set the default balance interval now that we know
1364	 * what realstathz is.
1365	 */
1366	balance_interval = realstathz;
1367	/*
1368	 * Set steal thresh to log2(mp_ncpu) but no greater than 4.  This
1369	 * prevents excess thrashing on large machines and excess idle on
1370	 * smaller machines.
1371	 */
1372	steal_thresh = min(ffs(mp_ncpus) - 1, 4);
1373	affinity = SCHED_AFFINITY_DEFAULT;
1374#endif
1375}
1376
1377
1378/*
1379 * This is the core of the interactivity algorithm.  Determines a score based
1380 * on past behavior.  It is the ratio of sleep time to run time scaled to
1381 * a [0, 100] integer.  This is the voluntary sleep time of a process, which
1382 * differs from the cpu usage because it does not account for time spent
1383 * waiting on a run-queue.  Would be prettier if we had floating point.
1384 */
1385static int
1386sched_interact_score(struct thread *td)
1387{
1388	struct td_sched *ts;
1389	int div;
1390
1391	ts = td->td_sched;
1392	/*
1393	 * The score is only needed if this is likely to be an interactive
1394	 * task.  Don't go through the expense of computing it if there's
1395	 * no chance.
1396	 */
1397	if (sched_interact <= SCHED_INTERACT_HALF &&
1398		ts->ts_runtime >= ts->ts_slptime)
1399			return (SCHED_INTERACT_HALF);
1400
1401	if (ts->ts_runtime > ts->ts_slptime) {
1402		div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1403		return (SCHED_INTERACT_HALF +
1404		    (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1405	}
1406	if (ts->ts_slptime > ts->ts_runtime) {
1407		div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1408		return (ts->ts_runtime / div);
1409	}
1410	/* runtime == slptime */
1411	if (ts->ts_runtime)
1412		return (SCHED_INTERACT_HALF);
1413
1414	/*
1415	 * This can happen if slptime and runtime are 0.
1416	 */
1417	return (0);
1418
1419}
1420
1421/*
1422 * Scale the scheduling priority according to the "interactivity" of this
1423 * process.
1424 */
1425static void
1426sched_priority(struct thread *td)
1427{
1428	int score;
1429	int pri;
1430
1431	if (td->td_pri_class != PRI_TIMESHARE)
1432		return;
1433	/*
1434	 * If the score is interactive we place the thread in the realtime
1435	 * queue with a priority that is less than kernel and interrupt
1436	 * priorities.  These threads are not subject to nice restrictions.
1437	 *
1438	 * Scores greater than this are placed on the normal timeshare queue
1439	 * where the priority is partially decided by the most recent cpu
1440	 * utilization and the rest is decided by nice value.
1441	 *
1442	 * The nice value of the process has a linear effect on the calculated
1443	 * score.  Negative nice values make it easier for a thread to be
1444	 * considered interactive.
1445	 */
1446	score = imax(0, sched_interact_score(td) - td->td_proc->p_nice);
1447	if (score < sched_interact) {
1448		pri = PRI_MIN_REALTIME;
1449		pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1450		    * score;
1451		KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
1452		    ("sched_priority: invalid interactive priority %d score %d",
1453		    pri, score));
1454	} else {
1455		pri = SCHED_PRI_MIN;
1456		if (td->td_sched->ts_ticks)
1457			pri += SCHED_PRI_TICKS(td->td_sched);
1458		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1459		KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
1460		    ("sched_priority: invalid priority %d: nice %d, "
1461		    "ticks %d ftick %d ltick %d tick pri %d",
1462		    pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
1463		    td->td_sched->ts_ftick, td->td_sched->ts_ltick,
1464		    SCHED_PRI_TICKS(td->td_sched)));
1465	}
1466	sched_user_prio(td, pri);
1467
1468	return;
1469}
1470
1471/*
1472 * This routine enforces a maximum limit on the amount of scheduling history
1473 * kept.  It is called after either the slptime or runtime is adjusted.  This
1474 * function is ugly due to integer math.
1475 */
1476static void
1477sched_interact_update(struct thread *td)
1478{
1479	struct td_sched *ts;
1480	u_int sum;
1481
1482	ts = td->td_sched;
1483	sum = ts->ts_runtime + ts->ts_slptime;
1484	if (sum < SCHED_SLP_RUN_MAX)
1485		return;
1486	/*
1487	 * This only happens from two places:
1488	 * 1) We have added an unusual amount of run time from fork_exit.
1489	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1490	 */
1491	if (sum > SCHED_SLP_RUN_MAX * 2) {
1492		if (ts->ts_runtime > ts->ts_slptime) {
1493			ts->ts_runtime = SCHED_SLP_RUN_MAX;
1494			ts->ts_slptime = 1;
1495		} else {
1496			ts->ts_slptime = SCHED_SLP_RUN_MAX;
1497			ts->ts_runtime = 1;
1498		}
1499		return;
1500	}
1501	/*
1502	 * If we have exceeded by more than 1/5th then the algorithm below
1503	 * will not bring us back into range.  Dividing by two here forces
1504	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1505	 */
1506	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1507		ts->ts_runtime /= 2;
1508		ts->ts_slptime /= 2;
1509		return;
1510	}
1511	ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1512	ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1513}
1514
1515/*
1516 * Scale back the interactivity history when a child thread is created.  The
1517 * history is inherited from the parent but the thread may behave totally
1518 * differently.  For example, a shell spawning a compiler process.  We want
1519 * to learn that the compiler is behaving badly very quickly.
1520 */
1521static void
1522sched_interact_fork(struct thread *td)
1523{
1524	int ratio;
1525	int sum;
1526
1527	sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
1528	if (sum > SCHED_SLP_RUN_FORK) {
1529		ratio = sum / SCHED_SLP_RUN_FORK;
1530		td->td_sched->ts_runtime /= ratio;
1531		td->td_sched->ts_slptime /= ratio;
1532	}
1533}
1534
1535/*
1536 * Called from proc0_init() to setup the scheduler fields.
1537 */
1538void
1539schedinit(void)
1540{
1541
1542	/*
1543	 * Set up the scheduler specific parts of proc0.
1544	 */
1545	proc0.p_sched = NULL; /* XXX */
1546	thread0.td_sched = &td_sched0;
1547	td_sched0.ts_ltick = ticks;
1548	td_sched0.ts_ftick = ticks;
1549	td_sched0.ts_thread = &thread0;
1550}
1551
1552/*
1553 * This is only somewhat accurate since given many processes of the same
1554 * priority they will switch when their slices run out, which will be
1555 * at most sched_slice stathz ticks.
1556 */
1557int
1558sched_rr_interval(void)
1559{
1560
1561	/* Convert sched_slice to hz */
1562	return (hz/(realstathz/sched_slice));
1563}
1564
1565/*
1566 * Update the percent cpu tracking information when it is requested or
1567 * the total history exceeds the maximum.  We keep a sliding history of
1568 * tick counts that slowly decays.  This is less precise than the 4BSD
1569 * mechanism since it happens with less regular and frequent events.
1570 */
1571static void
1572sched_pctcpu_update(struct td_sched *ts)
1573{
1574
1575	if (ts->ts_ticks == 0)
1576		return;
1577	if (ticks - (hz / 10) < ts->ts_ltick &&
1578	    SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
1579		return;
1580	/*
1581	 * Adjust counters and watermark for pctcpu calc.
1582	 */
1583	if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1584		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1585			    SCHED_TICK_TARG;
1586	else
1587		ts->ts_ticks = 0;
1588	ts->ts_ltick = ticks;
1589	ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
1590}
1591
1592/*
1593 * Adjust the priority of a thread.  Move it to the appropriate run-queue
1594 * if necessary.  This is the back-end for several priority related
1595 * functions.
1596 */
1597static void
1598sched_thread_priority(struct thread *td, u_char prio)
1599{
1600	struct td_sched *ts;
1601
1602	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1603	    td, td->td_name, td->td_priority, prio, curthread,
1604	    curthread->td_name);
1605	ts = td->td_sched;
1606	THREAD_LOCK_ASSERT(td, MA_OWNED);
1607	if (td->td_priority == prio)
1608		return;
1609
1610	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1611		/*
1612		 * If the priority has been elevated due to priority
1613		 * propagation, we may have to move ourselves to a new
1614		 * queue.  This could be optimized to not re-add in some
1615		 * cases.
1616		 */
1617		sched_rem(td);
1618		td->td_priority = prio;
1619		sched_add(td, SRQ_BORROWING);
1620	} else {
1621#ifdef SMP
1622		struct tdq *tdq;
1623
1624		tdq = TDQ_CPU(ts->ts_cpu);
1625		if (prio < tdq->tdq_lowpri)
1626			tdq->tdq_lowpri = prio;
1627#endif
1628		td->td_priority = prio;
1629	}
1630}
1631
1632/*
1633 * Update a thread's priority when it is lent another thread's
1634 * priority.
1635 */
1636void
1637sched_lend_prio(struct thread *td, u_char prio)
1638{
1639
1640	td->td_flags |= TDF_BORROWING;
1641	sched_thread_priority(td, prio);
1642}
1643
1644/*
1645 * Restore a thread's priority when priority propagation is
1646 * over.  The prio argument is the minimum priority the thread
1647 * needs to have to satisfy other possible priority lending
1648 * requests.  If the thread's regular priority is less
1649 * important than prio, the thread will keep a priority boost
1650 * of prio.
1651 */
1652void
1653sched_unlend_prio(struct thread *td, u_char prio)
1654{
1655	u_char base_pri;
1656
1657	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1658	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1659		base_pri = td->td_user_pri;
1660	else
1661		base_pri = td->td_base_pri;
1662	if (prio >= base_pri) {
1663		td->td_flags &= ~TDF_BORROWING;
1664		sched_thread_priority(td, base_pri);
1665	} else
1666		sched_lend_prio(td, prio);
1667}
1668
1669/*
1670 * Standard entry for setting the priority to an absolute value.
1671 */
1672void
1673sched_prio(struct thread *td, u_char prio)
1674{
1675	u_char oldprio;
1676
1677	/* First, update the base priority. */
1678	td->td_base_pri = prio;
1679
1680	/*
1681	 * If the thread is borrowing another thread's priority, don't
1682	 * ever lower the priority.
1683	 */
1684	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1685		return;
1686
1687	/* Change the real priority. */
1688	oldprio = td->td_priority;
1689	sched_thread_priority(td, prio);
1690
1691	/*
1692	 * If the thread is on a turnstile, then let the turnstile update
1693	 * its state.
1694	 */
1695	if (TD_ON_LOCK(td) && oldprio != prio)
1696		turnstile_adjust(td, oldprio);
1697}
1698
1699/*
1700 * Set the base user priority, does not effect current running priority.
1701 */
1702void
1703sched_user_prio(struct thread *td, u_char prio)
1704{
1705	u_char oldprio;
1706
1707	td->td_base_user_pri = prio;
1708	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1709                return;
1710	oldprio = td->td_user_pri;
1711	td->td_user_pri = prio;
1712}
1713
1714void
1715sched_lend_user_prio(struct thread *td, u_char prio)
1716{
1717	u_char oldprio;
1718
1719	THREAD_LOCK_ASSERT(td, MA_OWNED);
1720	td->td_flags |= TDF_UBORROWING;
1721	oldprio = td->td_user_pri;
1722	td->td_user_pri = prio;
1723}
1724
1725void
1726sched_unlend_user_prio(struct thread *td, u_char prio)
1727{
1728	u_char base_pri;
1729
1730	THREAD_LOCK_ASSERT(td, MA_OWNED);
1731	base_pri = td->td_base_user_pri;
1732	if (prio >= base_pri) {
1733		td->td_flags &= ~TDF_UBORROWING;
1734		sched_user_prio(td, base_pri);
1735	} else {
1736		sched_lend_user_prio(td, prio);
1737	}
1738}
1739
1740/*
1741 * Add the thread passed as 'newtd' to the run queue before selecting
1742 * the next thread to run.  This is only used for KSE.
1743 */
1744static void
1745sched_switchin(struct tdq *tdq, struct thread *td)
1746{
1747#ifdef SMP
1748	spinlock_enter();
1749	TDQ_UNLOCK(tdq);
1750	thread_lock(td);
1751	spinlock_exit();
1752	sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
1753#else
1754	td->td_lock = TDQ_LOCKPTR(tdq);
1755#endif
1756	tdq_add(tdq, td, SRQ_YIELDING);
1757	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1758}
1759
1760/*
1761 * Block a thread for switching.  Similar to thread_block() but does not
1762 * bump the spin count.
1763 */
1764static inline struct mtx *
1765thread_block_switch(struct thread *td)
1766{
1767	struct mtx *lock;
1768
1769	THREAD_LOCK_ASSERT(td, MA_OWNED);
1770	lock = td->td_lock;
1771	td->td_lock = &blocked_lock;
1772	mtx_unlock_spin(lock);
1773
1774	return (lock);
1775}
1776
1777/*
1778 * Handle migration from sched_switch().  This happens only for
1779 * cpu binding.
1780 */
1781static struct mtx *
1782sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1783{
1784	struct tdq *tdn;
1785
1786	tdn = TDQ_CPU(td->td_sched->ts_cpu);
1787#ifdef SMP
1788	/*
1789	 * Do the lock dance required to avoid LOR.  We grab an extra
1790	 * spinlock nesting to prevent preemption while we're
1791	 * not holding either run-queue lock.
1792	 */
1793	spinlock_enter();
1794	thread_block_switch(td);	/* This releases the lock on tdq. */
1795	TDQ_LOCK(tdn);
1796	tdq_add(tdn, td, flags);
1797	tdq_notify(td->td_sched);
1798	/*
1799	 * After we unlock tdn the new cpu still can't switch into this
1800	 * thread until we've unblocked it in cpu_switch().  The lock
1801	 * pointers may match in the case of HTT cores.  Don't unlock here
1802	 * or we can deadlock when the other CPU runs the IPI handler.
1803	 */
1804	if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
1805		TDQ_UNLOCK(tdn);
1806		TDQ_LOCK(tdq);
1807	}
1808	spinlock_exit();
1809#endif
1810	return (TDQ_LOCKPTR(tdn));
1811}
1812
1813/*
1814 * Release a thread that was blocked with thread_block_switch().
1815 */
1816static inline void
1817thread_unblock_switch(struct thread *td, struct mtx *mtx)
1818{
1819	atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
1820	    (uintptr_t)mtx);
1821}
1822
1823/*
1824 * Switch threads.  This function has to handle threads coming in while
1825 * blocked for some reason, running, or idle.  It also must deal with
1826 * migrating a thread from one queue to another as running threads may
1827 * be assigned elsewhere via binding.
1828 */
1829void
1830sched_switch(struct thread *td, struct thread *newtd, int flags)
1831{
1832	struct tdq *tdq;
1833	struct td_sched *ts;
1834	struct mtx *mtx;
1835	int srqflag;
1836	int cpuid;
1837
1838	THREAD_LOCK_ASSERT(td, MA_OWNED);
1839
1840	cpuid = PCPU_GET(cpuid);
1841	tdq = TDQ_CPU(cpuid);
1842	ts = td->td_sched;
1843	mtx = td->td_lock;
1844#ifdef SMP
1845	ts->ts_rltick = ticks;
1846	if (newtd && newtd->td_priority < tdq->tdq_lowpri)
1847		tdq->tdq_lowpri = newtd->td_priority;
1848#endif
1849	td->td_lastcpu = td->td_oncpu;
1850	td->td_oncpu = NOCPU;
1851	td->td_flags &= ~TDF_NEEDRESCHED;
1852	td->td_owepreempt = 0;
1853	/*
1854	 * The lock pointer in an idle thread should never change.  Reset it
1855	 * to CAN_RUN as well.
1856	 */
1857	if (TD_IS_IDLETHREAD(td)) {
1858		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1859		TD_SET_CAN_RUN(td);
1860	} else if (TD_IS_RUNNING(td)) {
1861		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1862		tdq_load_rem(tdq, ts);
1863		srqflag = (flags & SW_PREEMPT) ?
1864		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1865		    SRQ_OURSELF|SRQ_YIELDING;
1866		if (ts->ts_cpu == cpuid)
1867			tdq_add(tdq, td, srqflag);
1868		else
1869			mtx = sched_switch_migrate(tdq, td, srqflag);
1870	} else {
1871		/* This thread must be going to sleep. */
1872		TDQ_LOCK(tdq);
1873		mtx = thread_block_switch(td);
1874		tdq_load_rem(tdq, ts);
1875	}
1876	/*
1877	 * We enter here with the thread blocked and assigned to the
1878	 * appropriate cpu run-queue or sleep-queue and with the current
1879	 * thread-queue locked.
1880	 */
1881	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
1882	/*
1883	 * If KSE assigned a new thread just add it here and let choosethread
1884	 * select the best one.
1885	 */
1886	if (newtd != NULL)
1887		sched_switchin(tdq, newtd);
1888	newtd = choosethread();
1889	/*
1890	 * Call the MD code to switch contexts if necessary.
1891	 */
1892	if (td != newtd) {
1893#ifdef	HWPMC_HOOKS
1894		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1895			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1896#endif
1897		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
1898		TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
1899		cpu_switch(td, newtd, mtx);
1900		/*
1901		 * We may return from cpu_switch on a different cpu.  However,
1902		 * we always return with td_lock pointing to the current cpu's
1903		 * run queue lock.
1904		 */
1905		cpuid = PCPU_GET(cpuid);
1906		tdq = TDQ_CPU(cpuid);
1907		lock_profile_obtain_lock_success(
1908		    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
1909#ifdef	HWPMC_HOOKS
1910		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1911			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1912#endif
1913	} else
1914		thread_unblock_switch(td, mtx);
1915	/*
1916	 * Assert that all went well and return.
1917	 */
1918#ifdef SMP
1919	/* We should always get here with the lowest priority td possible */
1920	tdq->tdq_lowpri = td->td_priority;
1921#endif
1922	TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
1923	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1924	td->td_oncpu = cpuid;
1925}
1926
1927/*
1928 * Adjust thread priorities as a result of a nice request.
1929 */
1930void
1931sched_nice(struct proc *p, int nice)
1932{
1933	struct thread *td;
1934
1935	PROC_LOCK_ASSERT(p, MA_OWNED);
1936	PROC_SLOCK_ASSERT(p, MA_OWNED);
1937
1938	p->p_nice = nice;
1939	FOREACH_THREAD_IN_PROC(p, td) {
1940		thread_lock(td);
1941		sched_priority(td);
1942		sched_prio(td, td->td_base_user_pri);
1943		thread_unlock(td);
1944	}
1945}
1946
1947/*
1948 * Record the sleep time for the interactivity scorer.
1949 */
1950void
1951sched_sleep(struct thread *td)
1952{
1953
1954	THREAD_LOCK_ASSERT(td, MA_OWNED);
1955
1956	td->td_slptick = ticks;
1957}
1958
1959/*
1960 * Schedule a thread to resume execution and record how long it voluntarily
1961 * slept.  We also update the pctcpu, interactivity, and priority.
1962 */
1963void
1964sched_wakeup(struct thread *td)
1965{
1966	struct td_sched *ts;
1967	int slptick;
1968
1969	THREAD_LOCK_ASSERT(td, MA_OWNED);
1970	ts = td->td_sched;
1971	/*
1972	 * If we slept for more than a tick update our interactivity and
1973	 * priority.
1974	 */
1975	slptick = td->td_slptick;
1976	td->td_slptick = 0;
1977	if (slptick && slptick != ticks) {
1978		u_int hzticks;
1979
1980		hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
1981		ts->ts_slptime += hzticks;
1982		sched_interact_update(td);
1983		sched_pctcpu_update(ts);
1984		sched_priority(td);
1985	}
1986	/* Reset the slice value after we sleep. */
1987	ts->ts_slice = sched_slice;
1988	sched_add(td, SRQ_BORING);
1989}
1990
1991/*
1992 * Penalize the parent for creating a new child and initialize the child's
1993 * priority.
1994 */
1995void
1996sched_fork(struct thread *td, struct thread *child)
1997{
1998	THREAD_LOCK_ASSERT(td, MA_OWNED);
1999	sched_fork_thread(td, child);
2000	/*
2001	 * Penalize the parent and child for forking.
2002	 */
2003	sched_interact_fork(child);
2004	sched_priority(child);
2005	td->td_sched->ts_runtime += tickincr;
2006	sched_interact_update(td);
2007	sched_priority(td);
2008}
2009
2010/*
2011 * Fork a new thread, may be within the same process.
2012 */
2013void
2014sched_fork_thread(struct thread *td, struct thread *child)
2015{
2016	struct td_sched *ts;
2017	struct td_sched *ts2;
2018
2019	/*
2020	 * Initialize child.
2021	 */
2022	THREAD_LOCK_ASSERT(td, MA_OWNED);
2023	sched_newthread(child);
2024	child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
2025	ts = td->td_sched;
2026	ts2 = child->td_sched;
2027	ts2->ts_cpu = ts->ts_cpu;
2028	ts2->ts_runq = NULL;
2029	/*
2030	 * Grab our parents cpu estimation information and priority.
2031	 */
2032	ts2->ts_ticks = ts->ts_ticks;
2033	ts2->ts_ltick = ts->ts_ltick;
2034	ts2->ts_ftick = ts->ts_ftick;
2035	child->td_user_pri = td->td_user_pri;
2036	child->td_base_user_pri = td->td_base_user_pri;
2037	/*
2038	 * And update interactivity score.
2039	 */
2040	ts2->ts_slptime = ts->ts_slptime;
2041	ts2->ts_runtime = ts->ts_runtime;
2042	ts2->ts_slice = 1;	/* Attempt to quickly learn interactivity. */
2043}
2044
2045/*
2046 * Adjust the priority class of a thread.
2047 */
2048void
2049sched_class(struct thread *td, int class)
2050{
2051
2052	THREAD_LOCK_ASSERT(td, MA_OWNED);
2053	if (td->td_pri_class == class)
2054		return;
2055
2056#ifdef SMP
2057	/*
2058	 * On SMP if we're on the RUNQ we must adjust the transferable
2059	 * count because could be changing to or from an interrupt
2060	 * class.
2061	 */
2062	if (TD_ON_RUNQ(td)) {
2063		struct tdq *tdq;
2064
2065		tdq = TDQ_CPU(td->td_sched->ts_cpu);
2066		if (THREAD_CAN_MIGRATE(td)) {
2067			tdq->tdq_transferable--;
2068			tdq->tdq_group->tdg_transferable--;
2069		}
2070		td->td_pri_class = class;
2071		if (THREAD_CAN_MIGRATE(td)) {
2072			tdq->tdq_transferable++;
2073			tdq->tdq_group->tdg_transferable++;
2074		}
2075	}
2076#endif
2077	td->td_pri_class = class;
2078}
2079
2080/*
2081 * Return some of the child's priority and interactivity to the parent.
2082 */
2083void
2084sched_exit(struct proc *p, struct thread *child)
2085{
2086	struct thread *td;
2087
2088	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
2089	    child, child->td_name, child->td_priority);
2090
2091	PROC_SLOCK_ASSERT(p, MA_OWNED);
2092	td = FIRST_THREAD_IN_PROC(p);
2093	sched_exit_thread(td, child);
2094}
2095
2096/*
2097 * Penalize another thread for the time spent on this one.  This helps to
2098 * worsen the priority and interactivity of processes which schedule batch
2099 * jobs such as make.  This has little effect on the make process itself but
2100 * causes new processes spawned by it to receive worse scores immediately.
2101 */
2102void
2103sched_exit_thread(struct thread *td, struct thread *child)
2104{
2105
2106	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
2107	    child, child->td_name, child->td_priority);
2108
2109#ifdef KSE
2110	/*
2111	 * KSE forks and exits so often that this penalty causes short-lived
2112	 * threads to always be non-interactive.  This causes mozilla to
2113	 * crawl under load.
2114	 */
2115	if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
2116		return;
2117#endif
2118	/*
2119	 * Give the child's runtime to the parent without returning the
2120	 * sleep time as a penalty to the parent.  This causes shells that
2121	 * launch expensive things to mark their children as expensive.
2122	 */
2123	thread_lock(td);
2124	td->td_sched->ts_runtime += child->td_sched->ts_runtime;
2125	sched_interact_update(td);
2126	sched_priority(td);
2127	thread_unlock(td);
2128}
2129
2130/*
2131 * Fix priorities on return to user-space.  Priorities may be elevated due
2132 * to static priorities in msleep() or similar.
2133 */
2134void
2135sched_userret(struct thread *td)
2136{
2137	/*
2138	 * XXX we cheat slightly on the locking here to avoid locking in
2139	 * the usual case.  Setting td_priority here is essentially an
2140	 * incomplete workaround for not setting it properly elsewhere.
2141	 * Now that some interrupt handlers are threads, not setting it
2142	 * properly elsewhere can clobber it in the window between setting
2143	 * it here and returning to user mode, so don't waste time setting
2144	 * it perfectly here.
2145	 */
2146	KASSERT((td->td_flags & TDF_BORROWING) == 0,
2147	    ("thread with borrowed priority returning to userland"));
2148	if (td->td_priority != td->td_user_pri) {
2149		thread_lock(td);
2150		td->td_priority = td->td_user_pri;
2151		td->td_base_pri = td->td_user_pri;
2152		thread_unlock(td);
2153        }
2154}
2155
2156/*
2157 * Handle a stathz tick.  This is really only relevant for timeshare
2158 * threads.
2159 */
2160void
2161sched_clock(struct thread *td)
2162{
2163	struct tdq *tdq;
2164	struct td_sched *ts;
2165
2166	THREAD_LOCK_ASSERT(td, MA_OWNED);
2167	tdq = TDQ_SELF();
2168#ifdef SMP
2169	/*
2170	 * We run the long term load balancer infrequently on the first cpu.
2171	 */
2172	if (balance_tdq == tdq) {
2173		if (balance_ticks && --balance_ticks == 0)
2174			sched_balance();
2175		if (balance_group_ticks && --balance_group_ticks == 0)
2176			sched_balance_groups();
2177	}
2178#endif
2179	/*
2180	 * Advance the insert index once for each tick to ensure that all
2181	 * threads get a chance to run.
2182	 */
2183	if (tdq->tdq_idx == tdq->tdq_ridx) {
2184		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
2185		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
2186			tdq->tdq_ridx = tdq->tdq_idx;
2187	}
2188	ts = td->td_sched;
2189	if (td->td_pri_class & PRI_FIFO_BIT)
2190		return;
2191	if (td->td_pri_class == PRI_TIMESHARE) {
2192		/*
2193		 * We used a tick; charge it to the thread so
2194		 * that we can compute our interactivity.
2195		 */
2196		td->td_sched->ts_runtime += tickincr;
2197		sched_interact_update(td);
2198	}
2199	/*
2200	 * We used up one time slice.
2201	 */
2202	if (--ts->ts_slice > 0)
2203		return;
2204	/*
2205	 * We're out of time, recompute priorities and requeue.
2206	 */
2207	sched_priority(td);
2208	td->td_flags |= TDF_NEEDRESCHED;
2209}
2210
2211/*
2212 * Called once per hz tick.  Used for cpu utilization information.  This
2213 * is easier than trying to scale based on stathz.
2214 */
2215void
2216sched_tick(void)
2217{
2218	struct td_sched *ts;
2219
2220	ts = curthread->td_sched;
2221	/* Adjust ticks for pctcpu */
2222	ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2223	ts->ts_ltick = ticks;
2224	/*
2225	 * Update if we've exceeded our desired tick threshhold by over one
2226	 * second.
2227	 */
2228	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2229		sched_pctcpu_update(ts);
2230}
2231
2232/*
2233 * Return whether the current CPU has runnable tasks.  Used for in-kernel
2234 * cooperative idle threads.
2235 */
2236int
2237sched_runnable(void)
2238{
2239	struct tdq *tdq;
2240	int load;
2241
2242	load = 1;
2243
2244	tdq = TDQ_SELF();
2245	if ((curthread->td_flags & TDF_IDLETD) != 0) {
2246		if (tdq->tdq_load > 0)
2247			goto out;
2248	} else
2249		if (tdq->tdq_load - 1 > 0)
2250			goto out;
2251	load = 0;
2252out:
2253	return (load);
2254}
2255
2256/*
2257 * Choose the highest priority thread to run.  The thread is removed from
2258 * the run-queue while running however the load remains.  For SMP we set
2259 * the tdq in the global idle bitmask if it idles here.
2260 */
2261struct thread *
2262sched_choose(void)
2263{
2264#ifdef SMP
2265	struct tdq_group *tdg;
2266#endif
2267	struct td_sched *ts;
2268	struct tdq *tdq;
2269
2270	tdq = TDQ_SELF();
2271	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2272	ts = tdq_choose(tdq);
2273	if (ts) {
2274		tdq_runq_rem(tdq, ts);
2275		return (ts->ts_thread);
2276	}
2277#ifdef SMP
2278	/*
2279	 * We only set the idled bit when all of the cpus in the group are
2280	 * idle.  Otherwise we could get into a situation where a thread bounces
2281	 * back and forth between two idle cores on seperate physical CPUs.
2282	 */
2283	tdg = tdq->tdq_group;
2284	tdg->tdg_idlemask |= PCPU_GET(cpumask);
2285	if (tdg->tdg_idlemask == tdg->tdg_cpumask)
2286		atomic_set_int(&tdq_idle, tdg->tdg_mask);
2287	tdq->tdq_lowpri = PRI_MAX_IDLE;
2288#endif
2289	return (PCPU_GET(idlethread));
2290}
2291
2292/*
2293 * Set owepreempt if necessary.  Preemption never happens directly in ULE,
2294 * we always request it once we exit a critical section.
2295 */
2296static inline void
2297sched_setpreempt(struct thread *td)
2298{
2299	struct thread *ctd;
2300	int cpri;
2301	int pri;
2302
2303	ctd = curthread;
2304	pri = td->td_priority;
2305	cpri = ctd->td_priority;
2306	if (td->td_priority < ctd->td_priority)
2307		curthread->td_flags |= TDF_NEEDRESCHED;
2308	if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2309		return;
2310	/*
2311	 * Always preempt IDLE threads.  Otherwise only if the preempting
2312	 * thread is an ithread.
2313	 */
2314	if (pri > preempt_thresh && cpri < PRI_MIN_IDLE)
2315		return;
2316	ctd->td_owepreempt = 1;
2317	return;
2318}
2319
2320/*
2321 * Add a thread to a thread queue.  Initializes priority, slice, runq, and
2322 * add it to the appropriate queue.  This is the internal function called
2323 * when the tdq is predetermined.
2324 */
2325void
2326tdq_add(struct tdq *tdq, struct thread *td, int flags)
2327{
2328	struct td_sched *ts;
2329	int class;
2330#ifdef SMP
2331	int cpumask;
2332#endif
2333
2334	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2335	KASSERT((td->td_inhibitors == 0),
2336	    ("sched_add: trying to run inhibited thread"));
2337	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
2338	    ("sched_add: bad thread state"));
2339	KASSERT(td->td_flags & TDF_INMEM,
2340	    ("sched_add: thread swapped out"));
2341
2342	ts = td->td_sched;
2343	class = PRI_BASE(td->td_pri_class);
2344        TD_SET_RUNQ(td);
2345	if (ts->ts_slice == 0)
2346		ts->ts_slice = sched_slice;
2347	/*
2348	 * Pick the run queue based on priority.
2349	 */
2350	if (td->td_priority <= PRI_MAX_REALTIME)
2351		ts->ts_runq = &tdq->tdq_realtime;
2352	else if (td->td_priority <= PRI_MAX_TIMESHARE)
2353		ts->ts_runq = &tdq->tdq_timeshare;
2354	else
2355		ts->ts_runq = &tdq->tdq_idle;
2356#ifdef SMP
2357	cpumask = 1 << ts->ts_cpu;
2358	/*
2359	 * If we had been idle, clear our bit in the group and potentially
2360	 * the global bitmap.
2361	 */
2362	if ((class != PRI_IDLE && class != PRI_ITHD) &&
2363	    (tdq->tdq_group->tdg_idlemask & cpumask) != 0) {
2364		/*
2365		 * Check to see if our group is unidling, and if so, remove it
2366		 * from the global idle mask.
2367		 */
2368		if (tdq->tdq_group->tdg_idlemask ==
2369		    tdq->tdq_group->tdg_cpumask)
2370			atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
2371		/*
2372		 * Now remove ourselves from the group specific idle mask.
2373		 */
2374		tdq->tdq_group->tdg_idlemask &= ~cpumask;
2375	}
2376	if (td->td_priority < tdq->tdq_lowpri)
2377		tdq->tdq_lowpri = td->td_priority;
2378#endif
2379	tdq_runq_add(tdq, ts, flags);
2380	tdq_load_add(tdq, ts);
2381}
2382
2383/*
2384 * Select the target thread queue and add a thread to it.  Request
2385 * preemption or IPI a remote processor if required.
2386 */
2387void
2388sched_add(struct thread *td, int flags)
2389{
2390	struct td_sched *ts;
2391	struct tdq *tdq;
2392#ifdef SMP
2393	int cpuid;
2394	int cpu;
2395#endif
2396	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
2397	    td, td->td_name, td->td_priority, curthread,
2398	    curthread->td_name);
2399	THREAD_LOCK_ASSERT(td, MA_OWNED);
2400	ts = td->td_sched;
2401	/*
2402	 * Recalculate the priority before we select the target cpu or
2403	 * run-queue.
2404	 */
2405	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2406		sched_priority(td);
2407#ifdef SMP
2408	cpuid = PCPU_GET(cpuid);
2409	/*
2410	 * Pick the destination cpu and if it isn't ours transfer to the
2411	 * target cpu.
2412	 */
2413	if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_MIGRATE(td) &&
2414	    curthread->td_intr_nesting_level)
2415		ts->ts_cpu = cpuid;
2416	if (!THREAD_CAN_MIGRATE(td))
2417		cpu = ts->ts_cpu;
2418	else
2419		cpu = sched_pickcpu(ts, flags);
2420	tdq = sched_setcpu(ts, cpu, flags);
2421	tdq_add(tdq, td, flags);
2422	if (cpu != cpuid) {
2423		tdq_notify(ts);
2424		return;
2425	}
2426#else
2427	tdq = TDQ_SELF();
2428	TDQ_LOCK(tdq);
2429	/*
2430	 * Now that the thread is moving to the run-queue, set the lock
2431	 * to the scheduler's lock.
2432	 */
2433	thread_lock_set(td, TDQ_LOCKPTR(tdq));
2434	tdq_add(tdq, td, flags);
2435#endif
2436	if (!(flags & SRQ_YIELDING))
2437		sched_setpreempt(td);
2438}
2439
2440/*
2441 * Remove a thread from a run-queue without running it.  This is used
2442 * when we're stealing a thread from a remote queue.  Otherwise all threads
2443 * exit by calling sched_exit_thread() and sched_throw() themselves.
2444 */
2445void
2446sched_rem(struct thread *td)
2447{
2448	struct tdq *tdq;
2449	struct td_sched *ts;
2450
2451	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
2452	    td, td->td_name, td->td_priority, curthread,
2453	    curthread->td_name);
2454	ts = td->td_sched;
2455	tdq = TDQ_CPU(ts->ts_cpu);
2456	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2457	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2458	KASSERT(TD_ON_RUNQ(td),
2459	    ("sched_rem: thread not on run queue"));
2460	tdq_runq_rem(tdq, ts);
2461	tdq_load_rem(tdq, ts);
2462	TD_SET_CAN_RUN(td);
2463}
2464
2465/*
2466 * Fetch cpu utilization information.  Updates on demand.
2467 */
2468fixpt_t
2469sched_pctcpu(struct thread *td)
2470{
2471	fixpt_t pctcpu;
2472	struct td_sched *ts;
2473
2474	pctcpu = 0;
2475	ts = td->td_sched;
2476	if (ts == NULL)
2477		return (0);
2478
2479	thread_lock(td);
2480	if (ts->ts_ticks) {
2481		int rtick;
2482
2483		sched_pctcpu_update(ts);
2484		/* How many rtick per second ? */
2485		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2486		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
2487	}
2488	thread_unlock(td);
2489
2490	return (pctcpu);
2491}
2492
2493/*
2494 * Bind a thread to a target cpu.
2495 */
2496void
2497sched_bind(struct thread *td, int cpu)
2498{
2499	struct td_sched *ts;
2500
2501	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
2502	ts = td->td_sched;
2503	if (ts->ts_flags & TSF_BOUND)
2504		sched_unbind(td);
2505	ts->ts_flags |= TSF_BOUND;
2506#ifdef SMP
2507	sched_pin();
2508	if (PCPU_GET(cpuid) == cpu)
2509		return;
2510	ts->ts_cpu = cpu;
2511	/* When we return from mi_switch we'll be on the correct cpu. */
2512	mi_switch(SW_VOL, NULL);
2513#endif
2514}
2515
2516/*
2517 * Release a bound thread.
2518 */
2519void
2520sched_unbind(struct thread *td)
2521{
2522	struct td_sched *ts;
2523
2524	THREAD_LOCK_ASSERT(td, MA_OWNED);
2525	ts = td->td_sched;
2526	if ((ts->ts_flags & TSF_BOUND) == 0)
2527		return;
2528	ts->ts_flags &= ~TSF_BOUND;
2529#ifdef SMP
2530	sched_unpin();
2531#endif
2532}
2533
2534int
2535sched_is_bound(struct thread *td)
2536{
2537	THREAD_LOCK_ASSERT(td, MA_OWNED);
2538	return (td->td_sched->ts_flags & TSF_BOUND);
2539}
2540
2541/*
2542 * Basic yield call.
2543 */
2544void
2545sched_relinquish(struct thread *td)
2546{
2547	thread_lock(td);
2548	SCHED_STAT_INC(switch_relinquish);
2549	mi_switch(SW_VOL, NULL);
2550	thread_unlock(td);
2551}
2552
2553/*
2554 * Return the total system load.
2555 */
2556int
2557sched_load(void)
2558{
2559#ifdef SMP
2560	int total;
2561	int i;
2562
2563	total = 0;
2564	for (i = 0; i <= tdg_maxid; i++)
2565		total += TDQ_GROUP(i)->tdg_load;
2566	return (total);
2567#else
2568	return (TDQ_SELF()->tdq_sysload);
2569#endif
2570}
2571
2572int
2573sched_sizeof_proc(void)
2574{
2575	return (sizeof(struct proc));
2576}
2577
2578int
2579sched_sizeof_thread(void)
2580{
2581	return (sizeof(struct thread) + sizeof(struct td_sched));
2582}
2583
2584/*
2585 * The actual idle process.
2586 */
2587void
2588sched_idletd(void *dummy)
2589{
2590	struct thread *td;
2591	struct tdq *tdq;
2592
2593	td = curthread;
2594	tdq = TDQ_SELF();
2595	mtx_assert(&Giant, MA_NOTOWNED);
2596	/* ULE relies on preemption for idle interruption. */
2597	for (;;) {
2598#ifdef SMP
2599		if (tdq_idled(tdq))
2600			cpu_idle();
2601#else
2602		cpu_idle();
2603#endif
2604	}
2605}
2606
2607/*
2608 * A CPU is entering for the first time or a thread is exiting.
2609 */
2610void
2611sched_throw(struct thread *td)
2612{
2613	struct thread *newtd;
2614	struct tdq *tdq;
2615
2616	tdq = TDQ_SELF();
2617	if (td == NULL) {
2618		/* Correct spinlock nesting and acquire the correct lock. */
2619		TDQ_LOCK(tdq);
2620		spinlock_exit();
2621	} else {
2622		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2623		tdq_load_rem(tdq, td->td_sched);
2624		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
2625	}
2626	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
2627	newtd = choosethread();
2628	TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
2629	PCPU_SET(switchtime, cpu_ticks());
2630	PCPU_SET(switchticks, ticks);
2631	cpu_throw(td, newtd);		/* doesn't return */
2632}
2633
2634/*
2635 * This is called from fork_exit().  Just acquire the correct locks and
2636 * let fork do the rest of the work.
2637 */
2638void
2639sched_fork_exit(struct thread *td)
2640{
2641	struct td_sched *ts;
2642	struct tdq *tdq;
2643	int cpuid;
2644
2645	/*
2646	 * Finish setting up thread glue so that it begins execution in a
2647	 * non-nested critical section with the scheduler lock held.
2648	 */
2649	cpuid = PCPU_GET(cpuid);
2650	tdq = TDQ_CPU(cpuid);
2651	ts = td->td_sched;
2652	if (TD_IS_IDLETHREAD(td))
2653		td->td_lock = TDQ_LOCKPTR(tdq);
2654	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2655	td->td_oncpu = cpuid;
2656	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2657	lock_profile_obtain_lock_success(
2658	    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
2659}
2660
2661static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
2662    "Scheduler");
2663SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2664    "Scheduler name");
2665SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2666    "Slice size for timeshare threads");
2667SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2668     "Interactivity score threshold");
2669SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2670     0,"Min priority for preemption, lower priorities have greater precedence");
2671#ifdef SMP
2672SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0,
2673    "Pick the target cpu based on priority rather than load.");
2674SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
2675    "Number of hz ticks to keep thread affinity for");
2676SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, "");
2677SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
2678    "Enables the long-term load balancer");
2679SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
2680    &balance_interval, 0,
2681    "Average frequency in stathz ticks to run the long-term balancer");
2682SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0,
2683    "Steals work from another hyper-threaded core on idle");
2684SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
2685    "Attempts to steal work from other cores before idling");
2686SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
2687    "Minimum load on remote cpu before we'll steal");
2688SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0,
2689    "True when a topology has been specified by the MD code.");
2690#endif
2691
2692/* ps compat.  All cpu percentages from ULE are weighted. */
2693static int ccpu = 0;
2694SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2695
2696
2697#define KERN_SWITCH_INCLUDE 1
2698#include "kern/kern_switch.c"
2699