sched_ule.c revision 177005
1/*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/*
28 * This file implements the ULE scheduler.  ULE supports independent CPU
29 * run queues and fine grain locking.  It has superior interactive
30 * performance under load even on uni-processor systems.
31 *
32 * etymology:
33 *   ULE is the last three letters in schedule.  It owes its name to a
34 * generic user created for a scheduling system by Paul Mikesell at
35 * Isilon Systems and a general lack of creativity on the part of the author.
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 177005 2008-03-10 01:32:01Z jeff $");
40
41#include "opt_hwpmc_hooks.h"
42#include "opt_sched.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/kdb.h>
47#include <sys/kernel.h>
48#include <sys/ktr.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/proc.h>
52#include <sys/resource.h>
53#include <sys/resourcevar.h>
54#include <sys/sched.h>
55#include <sys/smp.h>
56#include <sys/sx.h>
57#include <sys/sysctl.h>
58#include <sys/sysproto.h>
59#include <sys/turnstile.h>
60#include <sys/umtx.h>
61#include <sys/vmmeter.h>
62#include <sys/cpuset.h>
63#ifdef KTRACE
64#include <sys/uio.h>
65#include <sys/ktrace.h>
66#endif
67
68#ifdef HWPMC_HOOKS
69#include <sys/pmckern.h>
70#endif
71
72#include <machine/cpu.h>
73#include <machine/smp.h>
74
75#if !defined(__i386__) && !defined(__amd64__) && !defined(__powerpc__) && !defined(__arm__)
76#error "This architecture is not currently compatible with ULE"
77#endif
78
79#define	KTR_ULE	0
80
81/*
82 * Thread scheduler specific section.  All fields are protected
83 * by the thread lock.
84 */
85struct td_sched {
86	TAILQ_ENTRY(td_sched) ts_procq;	/* Run queue. */
87	struct thread	*ts_thread;	/* Active associated thread. */
88	struct runq	*ts_runq;	/* Run-queue we're queued on. */
89	short		ts_flags;	/* TSF_* flags. */
90	u_char		ts_rqindex;	/* Run queue index. */
91	u_char		ts_cpu;		/* CPU that we have affinity for. */
92	int		ts_slice;	/* Ticks of slice remaining. */
93	u_int		ts_slptime;	/* Number of ticks we vol. slept */
94	u_int		ts_runtime;	/* Number of ticks we were running */
95	/* The following variables are only used for pctcpu calculation */
96	int		ts_ltick;	/* Last tick that we were running on */
97	int		ts_ftick;	/* First tick that we were running on */
98	int		ts_ticks;	/* Tick count */
99	int		ts_rltick;	/* Real last tick, for affinity. */
100};
101/* flags kept in ts_flags */
102#define	TSF_BOUND	0x0001		/* Thread can not migrate. */
103#define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
104
105static struct td_sched td_sched0;
106
107#define	THREAD_CAN_MIGRATE(td)	((td)->td_pinned == 0)
108#define	THREAD_CAN_SCHED(td, cpu)	\
109    CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
110
111/*
112 * Cpu percentage computation macros and defines.
113 *
114 * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
115 * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
116 * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
117 * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
118 * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
119 * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
120 */
121#define	SCHED_TICK_SECS		10
122#define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
123#define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
124#define	SCHED_TICK_SHIFT	10
125#define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
126#define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
127
128/*
129 * These macros determine priorities for non-interactive threads.  They are
130 * assigned a priority based on their recent cpu utilization as expressed
131 * by the ratio of ticks to the tick total.  NHALF priorities at the start
132 * and end of the MIN to MAX timeshare range are only reachable with negative
133 * or positive nice respectively.
134 *
135 * PRI_RANGE:	Priority range for utilization dependent priorities.
136 * PRI_NRESV:	Number of nice values.
137 * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
138 * PRI_NICE:	Determines the part of the priority inherited from nice.
139 */
140#define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
141#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
142#define	SCHED_PRI_MIN		(PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
143#define	SCHED_PRI_MAX		(PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
144#define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN)
145#define	SCHED_PRI_TICKS(ts)						\
146    (SCHED_TICK_HZ((ts)) /						\
147    (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
148#define	SCHED_PRI_NICE(nice)	(nice)
149
150/*
151 * These determine the interactivity of a process.  Interactivity differs from
152 * cpu utilization in that it expresses the voluntary time slept vs time ran
153 * while cpu utilization includes all time not running.  This more accurately
154 * models the intent of the thread.
155 *
156 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
157 *		before throttling back.
158 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
159 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
160 * INTERACT_THRESH:	Threshhold for placement on the current runq.
161 */
162#define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
163#define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
164#define	SCHED_INTERACT_MAX	(100)
165#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
166#define	SCHED_INTERACT_THRESH	(30)
167
168/*
169 * tickincr:		Converts a stathz tick into a hz domain scaled by
170 *			the shift factor.  Without the shift the error rate
171 *			due to rounding would be unacceptably high.
172 * realstathz:		stathz is sometimes 0 and run off of hz.
173 * sched_slice:		Runtime of each thread before rescheduling.
174 * preempt_thresh:	Priority threshold for preemption and remote IPIs.
175 */
176static int sched_interact = SCHED_INTERACT_THRESH;
177static int realstathz;
178static int tickincr;
179static int sched_slice;
180#ifdef PREEMPTION
181#ifdef FULL_PREEMPTION
182static int preempt_thresh = PRI_MAX_IDLE;
183#else
184static int preempt_thresh = PRI_MIN_KERN;
185#endif
186#else
187static int preempt_thresh = 0;
188#endif
189
190/*
191 * tdq - per processor runqs and statistics.  All fields are protected by the
192 * tdq_lock.  The load and lowpri may be accessed without to avoid excess
193 * locking in sched_pickcpu();
194 */
195struct tdq {
196	struct cpu_group *tdq_cg;		/* Pointer to cpu topology. */
197	struct mtx	tdq_lock;		/* run queue lock. */
198	struct runq	tdq_realtime;		/* real-time run queue. */
199	struct runq	tdq_timeshare;		/* timeshare run queue. */
200	struct runq	tdq_idle;		/* Queue of IDLE threads. */
201	int		tdq_load;		/* Aggregate load. */
202	int		tdq_sysload;		/* For loadavg, !ITHD load. */
203	u_char		tdq_idx;		/* Current insert index. */
204	u_char		tdq_ridx;		/* Current removal index. */
205	u_char		tdq_lowpri;		/* Lowest priority thread. */
206	u_char		tdq_ipipending;		/* IPI pending. */
207	int		tdq_transferable;	/* Transferable thread count. */
208	char		tdq_name[sizeof("sched lock") + 6];
209} __aligned(64);
210
211
212#ifdef SMP
213struct cpu_group *cpu_top;
214
215#define	SCHED_AFFINITY_DEFAULT	(max(1, hz / 1000))
216#define	SCHED_AFFINITY(ts, t)	((ts)->ts_rltick > ticks - ((t) * affinity))
217
218/*
219 * Run-time tunables.
220 */
221static int rebalance = 1;
222static int balance_interval = 128;	/* Default set in sched_initticks(). */
223static int affinity;
224static int steal_htt = 1;
225static int steal_idle = 1;
226static int steal_thresh = 2;
227
228/*
229 * One thread queue per processor.
230 */
231static struct tdq	tdq_cpu[MAXCPU];
232static struct tdq	*balance_tdq;
233static int balance_ticks;
234
235#define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
236#define	TDQ_CPU(x)	(&tdq_cpu[(x)])
237#define	TDQ_ID(x)	((int)((x) - tdq_cpu))
238#else	/* !SMP */
239static struct tdq	tdq_cpu;
240
241#define	TDQ_ID(x)	(0)
242#define	TDQ_SELF()	(&tdq_cpu)
243#define	TDQ_CPU(x)	(&tdq_cpu)
244#endif
245
246#define	TDQ_LOCK_ASSERT(t, type)	mtx_assert(TDQ_LOCKPTR((t)), (type))
247#define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
248#define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
249#define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
250#define	TDQ_LOCKPTR(t)		(&(t)->tdq_lock)
251
252static void sched_priority(struct thread *);
253static void sched_thread_priority(struct thread *, u_char);
254static int sched_interact_score(struct thread *);
255static void sched_interact_update(struct thread *);
256static void sched_interact_fork(struct thread *);
257static void sched_pctcpu_update(struct td_sched *);
258
259/* Operations on per processor queues */
260static struct td_sched * tdq_choose(struct tdq *);
261static void tdq_setup(struct tdq *);
262static void tdq_load_add(struct tdq *, struct td_sched *);
263static void tdq_load_rem(struct tdq *, struct td_sched *);
264static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
265static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
266static inline int sched_shouldpreempt(int, int, int);
267void tdq_print(int cpu);
268static void runq_print(struct runq *rq);
269static void tdq_add(struct tdq *, struct thread *, int);
270#ifdef SMP
271static int tdq_move(struct tdq *, struct tdq *);
272static int tdq_idled(struct tdq *);
273static void tdq_notify(struct tdq *, struct td_sched *);
274static struct td_sched *tdq_steal(struct tdq *, int);
275static struct td_sched *runq_steal(struct runq *, int);
276static int sched_pickcpu(struct td_sched *, int);
277static void sched_balance(void);
278static int sched_balance_pair(struct tdq *, struct tdq *);
279static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
280static inline struct mtx *thread_block_switch(struct thread *);
281static inline void thread_unblock_switch(struct thread *, struct mtx *);
282static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
283#endif
284
285static void sched_setup(void *dummy);
286SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
287
288static void sched_initticks(void *dummy);
289SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
290
291/*
292 * Print the threads waiting on a run-queue.
293 */
294static void
295runq_print(struct runq *rq)
296{
297	struct rqhead *rqh;
298	struct td_sched *ts;
299	int pri;
300	int j;
301	int i;
302
303	for (i = 0; i < RQB_LEN; i++) {
304		printf("\t\trunq bits %d 0x%zx\n",
305		    i, rq->rq_status.rqb_bits[i]);
306		for (j = 0; j < RQB_BPW; j++)
307			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
308				pri = j + (i << RQB_L2BPW);
309				rqh = &rq->rq_queues[pri];
310				TAILQ_FOREACH(ts, rqh, ts_procq) {
311					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
312					    ts->ts_thread, ts->ts_thread->td_name, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
313				}
314			}
315	}
316}
317
318/*
319 * Print the status of a per-cpu thread queue.  Should be a ddb show cmd.
320 */
321void
322tdq_print(int cpu)
323{
324	struct tdq *tdq;
325
326	tdq = TDQ_CPU(cpu);
327
328	printf("tdq %d:\n", TDQ_ID(tdq));
329	printf("\tlock            %p\n", TDQ_LOCKPTR(tdq));
330	printf("\tLock name:      %s\n", tdq->tdq_name);
331	printf("\tload:           %d\n", tdq->tdq_load);
332	printf("\ttimeshare idx:  %d\n", tdq->tdq_idx);
333	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
334	printf("\trealtime runq:\n");
335	runq_print(&tdq->tdq_realtime);
336	printf("\ttimeshare runq:\n");
337	runq_print(&tdq->tdq_timeshare);
338	printf("\tidle runq:\n");
339	runq_print(&tdq->tdq_idle);
340	printf("\tload transferable: %d\n", tdq->tdq_transferable);
341	printf("\tlowest priority:   %d\n", tdq->tdq_lowpri);
342}
343
344static inline int
345sched_shouldpreempt(int pri, int cpri, int remote)
346{
347	/*
348	 * If the new priority is not better than the current priority there is
349	 * nothing to do.
350	 */
351	if (pri >= cpri)
352		return (0);
353	/*
354	 * Always preempt idle.
355	 */
356	if (cpri >= PRI_MIN_IDLE)
357		return (1);
358	/*
359	 * If preemption is disabled don't preempt others.
360	 */
361	if (preempt_thresh == 0)
362		return (0);
363	/*
364	 * Preempt if we exceed the threshold.
365	 */
366	if (pri <= preempt_thresh)
367		return (1);
368	/*
369	 * If we're realtime or better and there is timeshare or worse running
370	 * preempt only remote processors.
371	 */
372	if (remote && pri <= PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
373		return (1);
374	return (0);
375}
376
377#define	TS_RQ_PPQ	(((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
378/*
379 * Add a thread to the actual run-queue.  Keeps transferable counts up to
380 * date with what is actually on the run-queue.  Selects the correct
381 * queue position for timeshare threads.
382 */
383static __inline void
384tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
385{
386	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
387	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
388	if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
389		tdq->tdq_transferable++;
390		ts->ts_flags |= TSF_XFERABLE;
391	}
392	if (ts->ts_runq == &tdq->tdq_timeshare) {
393		u_char pri;
394
395		pri = ts->ts_thread->td_priority;
396		KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
397			("Invalid priority %d on timeshare runq", pri));
398		/*
399		 * This queue contains only priorities between MIN and MAX
400		 * realtime.  Use the whole queue to represent these values.
401		 */
402		if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
403			pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
404			pri = (pri + tdq->tdq_idx) % RQ_NQS;
405			/*
406			 * This effectively shortens the queue by one so we
407			 * can have a one slot difference between idx and
408			 * ridx while we wait for threads to drain.
409			 */
410			if (tdq->tdq_ridx != tdq->tdq_idx &&
411			    pri == tdq->tdq_ridx)
412				pri = (unsigned char)(pri - 1) % RQ_NQS;
413		} else
414			pri = tdq->tdq_ridx;
415		runq_add_pri(ts->ts_runq, ts, pri, flags);
416	} else
417		runq_add(ts->ts_runq, ts, flags);
418}
419
420/*
421 * Remove a thread from a run-queue.  This typically happens when a thread
422 * is selected to run.  Running threads are not on the queue and the
423 * transferable count does not reflect them.
424 */
425static __inline void
426tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
427{
428	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
429	KASSERT(ts->ts_runq != NULL,
430	    ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
431	if (ts->ts_flags & TSF_XFERABLE) {
432		tdq->tdq_transferable--;
433		ts->ts_flags &= ~TSF_XFERABLE;
434	}
435	if (ts->ts_runq == &tdq->tdq_timeshare) {
436		if (tdq->tdq_idx != tdq->tdq_ridx)
437			runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
438		else
439			runq_remove_idx(ts->ts_runq, ts, NULL);
440		/*
441		 * For timeshare threads we update the priority here so
442		 * the priority reflects the time we've been sleeping.
443		 */
444		ts->ts_ltick = ticks;
445		sched_pctcpu_update(ts);
446		sched_priority(ts->ts_thread);
447	} else
448		runq_remove(ts->ts_runq, ts);
449}
450
451/*
452 * Load is maintained for all threads RUNNING and ON_RUNQ.  Add the load
453 * for this thread to the referenced thread queue.
454 */
455static void
456tdq_load_add(struct tdq *tdq, struct td_sched *ts)
457{
458	int class;
459
460	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
461	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
462	class = PRI_BASE(ts->ts_thread->td_pri_class);
463	tdq->tdq_load++;
464	CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
465	if (class != PRI_ITHD &&
466	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
467		tdq->tdq_sysload++;
468}
469
470/*
471 * Remove the load from a thread that is transitioning to a sleep state or
472 * exiting.
473 */
474static void
475tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
476{
477	int class;
478
479	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
480	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
481	class = PRI_BASE(ts->ts_thread->td_pri_class);
482	if (class != PRI_ITHD &&
483	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
484		tdq->tdq_sysload--;
485	KASSERT(tdq->tdq_load != 0,
486	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
487	tdq->tdq_load--;
488	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
489	ts->ts_runq = NULL;
490}
491
492/*
493 * Set lowpri to its exact value by searching the run-queue and
494 * evaluating curthread.  curthread may be passed as an optimization.
495 */
496static void
497tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
498{
499	struct td_sched *ts;
500	struct thread *td;
501
502	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
503	if (ctd == NULL)
504		ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread;
505	ts = tdq_choose(tdq);
506	if (ts)
507		td = ts->ts_thread;
508	if (ts == NULL || td->td_priority > ctd->td_priority)
509		tdq->tdq_lowpri = ctd->td_priority;
510	else
511		tdq->tdq_lowpri = td->td_priority;
512}
513
514#ifdef SMP
515struct cpu_search {
516	cpumask_t cs_mask;	/* Mask of valid cpus. */
517	u_int	cs_load;
518	u_int	cs_cpu;
519	int	cs_limit;	/* Min priority for low min load for high. */
520};
521
522#define	CPU_SEARCH_LOWEST	0x1
523#define	CPU_SEARCH_HIGHEST	0x2
524#define	CPU_SEARCH_BOTH		(CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST)
525
526#define	CPUMASK_FOREACH(cpu, mask)				\
527	for ((cpu) = 0; (cpu) < sizeof((mask)) * 8; (cpu)++)	\
528		if ((mask) & 1 << (cpu))
529
530__inline int cpu_search(struct cpu_group *cg, struct cpu_search *low,
531    struct cpu_search *high, const int match);
532int cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low);
533int cpu_search_highest(struct cpu_group *cg, struct cpu_search *high);
534int cpu_search_both(struct cpu_group *cg, struct cpu_search *low,
535    struct cpu_search *high);
536
537/*
538 * This routine compares according to the match argument and should be
539 * reduced in actual instantiations via constant propagation and dead code
540 * elimination.
541 */
542static __inline int
543cpu_compare(int cpu, struct cpu_search *low, struct cpu_search *high,
544    const int match)
545{
546	struct tdq *tdq;
547
548	tdq = TDQ_CPU(cpu);
549	if (match & CPU_SEARCH_LOWEST)
550		if (low->cs_mask & (1 << cpu) &&
551		    tdq->tdq_load < low->cs_load &&
552		    tdq->tdq_lowpri > low->cs_limit) {
553			low->cs_cpu = cpu;
554			low->cs_load = tdq->tdq_load;
555		}
556	if (match & CPU_SEARCH_HIGHEST)
557		if (high->cs_mask & (1 << cpu) &&
558		    tdq->tdq_load >= high->cs_limit &&
559		    tdq->tdq_load > high->cs_load &&
560		    tdq->tdq_transferable) {
561			high->cs_cpu = cpu;
562			high->cs_load = tdq->tdq_load;
563		}
564	return (tdq->tdq_load);
565}
566
567/*
568 * Search the tree of cpu_groups for the lowest or highest loaded cpu
569 * according to the match argument.  This routine actually compares the
570 * load on all paths through the tree and finds the least loaded cpu on
571 * the least loaded path, which may differ from the least loaded cpu in
572 * the system.  This balances work among caches and busses.
573 *
574 * This inline is instantiated in three forms below using constants for the
575 * match argument.  It is reduced to the minimum set for each case.  It is
576 * also recursive to the depth of the tree.
577 */
578static inline int
579cpu_search(struct cpu_group *cg, struct cpu_search *low,
580    struct cpu_search *high, const int match)
581{
582	int total;
583
584	total = 0;
585	if (cg->cg_children) {
586		struct cpu_search lgroup;
587		struct cpu_search hgroup;
588		struct cpu_group *child;
589		u_int lload;
590		int hload;
591		int load;
592		int i;
593
594		lload = -1;
595		hload = -1;
596		for (i = 0; i < cg->cg_children; i++) {
597			child = &cg->cg_child[i];
598			if (match & CPU_SEARCH_LOWEST) {
599				lgroup = *low;
600				lgroup.cs_load = -1;
601			}
602			if (match & CPU_SEARCH_HIGHEST) {
603				hgroup = *high;
604				lgroup.cs_load = 0;
605			}
606			switch (match) {
607			case CPU_SEARCH_LOWEST:
608				load = cpu_search_lowest(child, &lgroup);
609				break;
610			case CPU_SEARCH_HIGHEST:
611				load = cpu_search_highest(child, &hgroup);
612				break;
613			case CPU_SEARCH_BOTH:
614				load = cpu_search_both(child, &lgroup, &hgroup);
615				break;
616			}
617			total += load;
618			if (match & CPU_SEARCH_LOWEST)
619				if (load < lload || low->cs_cpu == -1) {
620					*low = lgroup;
621					lload = load;
622				}
623			if (match & CPU_SEARCH_HIGHEST)
624				if (load > hload || high->cs_cpu == -1) {
625					hload = load;
626					*high = hgroup;
627				}
628		}
629	} else {
630		int cpu;
631
632		CPUMASK_FOREACH(cpu, cg->cg_mask)
633			total += cpu_compare(cpu, low, high, match);
634	}
635	return (total);
636}
637
638/*
639 * cpu_search instantiations must pass constants to maintain the inline
640 * optimization.
641 */
642int
643cpu_search_lowest(struct cpu_group *cg, struct cpu_search *low)
644{
645	return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST);
646}
647
648int
649cpu_search_highest(struct cpu_group *cg, struct cpu_search *high)
650{
651	return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST);
652}
653
654int
655cpu_search_both(struct cpu_group *cg, struct cpu_search *low,
656    struct cpu_search *high)
657{
658	return cpu_search(cg, low, high, CPU_SEARCH_BOTH);
659}
660
661/*
662 * Find the cpu with the least load via the least loaded path that has a
663 * lowpri greater than pri  pri.  A pri of -1 indicates any priority is
664 * acceptable.
665 */
666static inline int
667sched_lowest(struct cpu_group *cg, cpumask_t mask, int pri)
668{
669	struct cpu_search low;
670
671	low.cs_cpu = -1;
672	low.cs_load = -1;
673	low.cs_mask = mask;
674	low.cs_limit = pri;
675	cpu_search_lowest(cg, &low);
676	return low.cs_cpu;
677}
678
679/*
680 * Find the cpu with the highest load via the highest loaded path.
681 */
682static inline int
683sched_highest(struct cpu_group *cg, cpumask_t mask, int minload)
684{
685	struct cpu_search high;
686
687	high.cs_cpu = -1;
688	high.cs_load = 0;
689	high.cs_mask = mask;
690	high.cs_limit = minload;
691	cpu_search_highest(cg, &high);
692	return high.cs_cpu;
693}
694
695/*
696 * Simultaneously find the highest and lowest loaded cpu reachable via
697 * cg.
698 */
699static inline void
700sched_both(struct cpu_group *cg, cpumask_t mask, int *lowcpu, int *highcpu)
701{
702	struct cpu_search high;
703	struct cpu_search low;
704
705	low.cs_cpu = -1;
706	low.cs_limit = -1;
707	low.cs_load = -1;
708	low.cs_mask = mask;
709	high.cs_load = 0;
710	high.cs_cpu = -1;
711	high.cs_limit = -1;
712	high.cs_mask = mask;
713	cpu_search_both(cg, &low, &high);
714	*lowcpu = low.cs_cpu;
715	*highcpu = high.cs_cpu;
716	return;
717}
718
719static void
720sched_balance_group(struct cpu_group *cg)
721{
722	cpumask_t mask;
723	int high;
724	int low;
725	int i;
726
727	mask = -1;
728	for (;;) {
729		sched_both(cg, mask, &low, &high);
730		if (low == high || low == -1 || high == -1)
731			break;
732		if (sched_balance_pair(TDQ_CPU(high), TDQ_CPU(low)))
733			break;
734		/*
735		 * If we failed to move any threads determine which cpu
736		 * to kick out of the set and try again.
737	 	 */
738		if (TDQ_CPU(high)->tdq_transferable == 0)
739			mask &= ~(1 << high);
740		else
741			mask &= ~(1 << low);
742	}
743
744	for (i = 0; i < cg->cg_children; i++)
745		sched_balance_group(&cg->cg_child[i]);
746}
747
748static void
749sched_balance()
750{
751	struct tdq *tdq;
752
753	/*
754	 * Select a random time between .5 * balance_interval and
755	 * 1.5 * balance_interval.
756	 */
757	balance_ticks = max(balance_interval / 2, 1);
758	balance_ticks += random() % balance_interval;
759	if (smp_started == 0 || rebalance == 0)
760		return;
761	tdq = TDQ_SELF();
762	TDQ_UNLOCK(tdq);
763	sched_balance_group(cpu_top);
764	TDQ_LOCK(tdq);
765}
766
767/*
768 * Lock two thread queues using their address to maintain lock order.
769 */
770static void
771tdq_lock_pair(struct tdq *one, struct tdq *two)
772{
773	if (one < two) {
774		TDQ_LOCK(one);
775		TDQ_LOCK_FLAGS(two, MTX_DUPOK);
776	} else {
777		TDQ_LOCK(two);
778		TDQ_LOCK_FLAGS(one, MTX_DUPOK);
779	}
780}
781
782/*
783 * Unlock two thread queues.  Order is not important here.
784 */
785static void
786tdq_unlock_pair(struct tdq *one, struct tdq *two)
787{
788	TDQ_UNLOCK(one);
789	TDQ_UNLOCK(two);
790}
791
792/*
793 * Transfer load between two imbalanced thread queues.
794 */
795static int
796sched_balance_pair(struct tdq *high, struct tdq *low)
797{
798	int transferable;
799	int high_load;
800	int low_load;
801	int moved;
802	int move;
803	int diff;
804	int i;
805
806	tdq_lock_pair(high, low);
807	transferable = high->tdq_transferable;
808	high_load = high->tdq_load;
809	low_load = low->tdq_load;
810	moved = 0;
811	/*
812	 * Determine what the imbalance is and then adjust that to how many
813	 * threads we actually have to give up (transferable).
814	 */
815	if (transferable != 0) {
816		diff = high_load - low_load;
817		move = diff / 2;
818		if (diff & 0x1)
819			move++;
820		move = min(move, transferable);
821		for (i = 0; i < move; i++)
822			moved += tdq_move(high, low);
823		/*
824		 * IPI the target cpu to force it to reschedule with the new
825		 * workload.
826		 */
827		ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
828	}
829	tdq_unlock_pair(high, low);
830	return (moved);
831}
832
833/*
834 * Move a thread from one thread queue to another.
835 */
836static int
837tdq_move(struct tdq *from, struct tdq *to)
838{
839	struct td_sched *ts;
840	struct thread *td;
841	struct tdq *tdq;
842	int cpu;
843
844	TDQ_LOCK_ASSERT(from, MA_OWNED);
845	TDQ_LOCK_ASSERT(to, MA_OWNED);
846
847	tdq = from;
848	cpu = TDQ_ID(to);
849	ts = tdq_steal(tdq, cpu);
850	if (ts == NULL)
851		return (0);
852	td = ts->ts_thread;
853	/*
854	 * Although the run queue is locked the thread may be blocked.  Lock
855	 * it to clear this and acquire the run-queue lock.
856	 */
857	thread_lock(td);
858	/* Drop recursive lock on from acquired via thread_lock(). */
859	TDQ_UNLOCK(from);
860	sched_rem(td);
861	ts->ts_cpu = cpu;
862	td->td_lock = TDQ_LOCKPTR(to);
863	tdq_add(to, td, SRQ_YIELDING);
864	return (1);
865}
866
867/*
868 * This tdq has idled.  Try to steal a thread from another cpu and switch
869 * to it.
870 */
871static int
872tdq_idled(struct tdq *tdq)
873{
874	struct cpu_group *cg;
875	struct tdq *steal;
876	cpumask_t mask;
877	int thresh;
878	int cpu;
879
880	if (smp_started == 0 || steal_idle == 0)
881		return (1);
882	mask = -1;
883	mask &= ~PCPU_GET(cpumask);
884	/* We don't want to be preempted while we're iterating. */
885	spinlock_enter();
886	for (cg = tdq->tdq_cg; cg != NULL; ) {
887		if ((cg->cg_flags & (CG_FLAG_HTT | CG_FLAG_THREAD)) == 0)
888			thresh = steal_thresh;
889		else
890			thresh = 1;
891		cpu = sched_highest(cg, mask, thresh);
892		if (cpu == -1) {
893			cg = cg->cg_parent;
894			continue;
895		}
896		steal = TDQ_CPU(cpu);
897		mask &= ~(1 << cpu);
898		tdq_lock_pair(tdq, steal);
899		if (steal->tdq_load < thresh || steal->tdq_transferable == 0) {
900			tdq_unlock_pair(tdq, steal);
901			continue;
902		}
903		/*
904		 * If a thread was added while interrupts were disabled don't
905		 * steal one here.  If we fail to acquire one due to affinity
906		 * restrictions loop again with this cpu removed from the
907		 * set.
908		 */
909		if (tdq->tdq_load == 0 && tdq_move(steal, tdq) == 0) {
910			tdq_unlock_pair(tdq, steal);
911			continue;
912		}
913		spinlock_exit();
914		TDQ_UNLOCK(steal);
915		mi_switch(SW_VOL, NULL);
916		thread_unlock(curthread);
917
918		return (0);
919	}
920	spinlock_exit();
921	return (1);
922}
923
924/*
925 * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
926 */
927static void
928tdq_notify(struct tdq *tdq, struct td_sched *ts)
929{
930	int cpri;
931	int pri;
932	int cpu;
933
934	if (tdq->tdq_ipipending)
935		return;
936	cpu = ts->ts_cpu;
937	pri = ts->ts_thread->td_priority;
938	cpri = pcpu_find(cpu)->pc_curthread->td_priority;
939	if (!sched_shouldpreempt(pri, cpri, 1))
940		return;
941	tdq->tdq_ipipending = 1;
942	ipi_selected(1 << cpu, IPI_PREEMPT);
943}
944
945/*
946 * Steals load from a timeshare queue.  Honors the rotating queue head
947 * index.
948 */
949static struct td_sched *
950runq_steal_from(struct runq *rq, int cpu, u_char start)
951{
952	struct td_sched *ts;
953	struct rqbits *rqb;
954	struct rqhead *rqh;
955	int first;
956	int bit;
957	int pri;
958	int i;
959
960	rqb = &rq->rq_status;
961	bit = start & (RQB_BPW -1);
962	pri = 0;
963	first = 0;
964again:
965	for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
966		if (rqb->rqb_bits[i] == 0)
967			continue;
968		if (bit != 0) {
969			for (pri = bit; pri < RQB_BPW; pri++)
970				if (rqb->rqb_bits[i] & (1ul << pri))
971					break;
972			if (pri >= RQB_BPW)
973				continue;
974		} else
975			pri = RQB_FFS(rqb->rqb_bits[i]);
976		pri += (i << RQB_L2BPW);
977		rqh = &rq->rq_queues[pri];
978		TAILQ_FOREACH(ts, rqh, ts_procq) {
979			if (first && THREAD_CAN_MIGRATE(ts->ts_thread) &&
980			    THREAD_CAN_SCHED(ts->ts_thread, cpu))
981				return (ts);
982			first = 1;
983		}
984	}
985	if (start != 0) {
986		start = 0;
987		goto again;
988	}
989
990	return (NULL);
991}
992
993/*
994 * Steals load from a standard linear queue.
995 */
996static struct td_sched *
997runq_steal(struct runq *rq, int cpu)
998{
999	struct rqhead *rqh;
1000	struct rqbits *rqb;
1001	struct td_sched *ts;
1002	int word;
1003	int bit;
1004
1005	rqb = &rq->rq_status;
1006	for (word = 0; word < RQB_LEN; word++) {
1007		if (rqb->rqb_bits[word] == 0)
1008			continue;
1009		for (bit = 0; bit < RQB_BPW; bit++) {
1010			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
1011				continue;
1012			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
1013			TAILQ_FOREACH(ts, rqh, ts_procq)
1014				if (THREAD_CAN_MIGRATE(ts->ts_thread) &&
1015				    THREAD_CAN_SCHED(ts->ts_thread, cpu))
1016					return (ts);
1017		}
1018	}
1019	return (NULL);
1020}
1021
1022/*
1023 * Attempt to steal a thread in priority order from a thread queue.
1024 */
1025static struct td_sched *
1026tdq_steal(struct tdq *tdq, int cpu)
1027{
1028	struct td_sched *ts;
1029
1030	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1031	if ((ts = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
1032		return (ts);
1033	if ((ts = runq_steal_from(&tdq->tdq_timeshare, cpu, tdq->tdq_ridx))
1034	    != NULL)
1035		return (ts);
1036	return (runq_steal(&tdq->tdq_idle, cpu));
1037}
1038
1039/*
1040 * Sets the thread lock and ts_cpu to match the requested cpu.  Unlocks the
1041 * current lock and returns with the assigned queue locked.
1042 */
1043static inline struct tdq *
1044sched_setcpu(struct td_sched *ts, int cpu, int flags)
1045{
1046	struct thread *td;
1047	struct tdq *tdq;
1048
1049	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
1050
1051	tdq = TDQ_CPU(cpu);
1052	td = ts->ts_thread;
1053	ts->ts_cpu = cpu;
1054
1055	/* If the lock matches just return the queue. */
1056	if (td->td_lock == TDQ_LOCKPTR(tdq))
1057		return (tdq);
1058#ifdef notyet
1059	/*
1060	 * If the thread isn't running its lockptr is a
1061	 * turnstile or a sleepqueue.  We can just lock_set without
1062	 * blocking.
1063	 */
1064	if (TD_CAN_RUN(td)) {
1065		TDQ_LOCK(tdq);
1066		thread_lock_set(td, TDQ_LOCKPTR(tdq));
1067		return (tdq);
1068	}
1069#endif
1070	/*
1071	 * The hard case, migration, we need to block the thread first to
1072	 * prevent order reversals with other cpus locks.
1073	 */
1074	thread_lock_block(td);
1075	TDQ_LOCK(tdq);
1076	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
1077	return (tdq);
1078}
1079
1080static int
1081sched_pickcpu(struct td_sched *ts, int flags)
1082{
1083	struct cpu_group *cg;
1084	struct thread *td;
1085	struct tdq *tdq;
1086	cpumask_t mask;
1087	int self;
1088	int pri;
1089	int cpu;
1090
1091	self = PCPU_GET(cpuid);
1092	td = ts->ts_thread;
1093	if (smp_started == 0)
1094		return (self);
1095	/*
1096	 * Don't migrate a running thread from sched_switch().
1097	 */
1098	if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td))
1099		return (ts->ts_cpu);
1100	/*
1101	 * Prefer to run interrupt threads on the processors that generate
1102	 * the interrupt.
1103	 */
1104	if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) &&
1105	    curthread->td_intr_nesting_level)
1106		ts->ts_cpu = self;
1107	/*
1108	 * If the thread can run on the last cpu and the affinity has not
1109	 * expired or it is idle run it there.
1110	 */
1111	pri = td->td_priority;
1112	tdq = TDQ_CPU(ts->ts_cpu);
1113	if (THREAD_CAN_SCHED(td, ts->ts_cpu)) {
1114		if (tdq->tdq_lowpri > PRI_MIN_IDLE)
1115			return (ts->ts_cpu);
1116		if (SCHED_AFFINITY(ts, CG_SHARE_L2) && tdq->tdq_lowpri > pri)
1117			return (ts->ts_cpu);
1118	}
1119	/*
1120	 * Search for the highest level in the tree that still has affinity.
1121	 */
1122	cg = NULL;
1123	for (cg = tdq->tdq_cg; cg != NULL; cg = cg->cg_parent)
1124		if (SCHED_AFFINITY(ts, cg->cg_level))
1125			break;
1126	cpu = -1;
1127	mask = td->td_cpuset->cs_mask.__bits[0];
1128	if (cg)
1129		cpu = sched_lowest(cg, mask, pri);
1130	if (cpu == -1)
1131		cpu = sched_lowest(cpu_top, mask, -1);
1132	/*
1133	 * Compare the lowest loaded cpu to current cpu.
1134	 */
1135	if (THREAD_CAN_SCHED(td, self) && TDQ_CPU(self)->tdq_lowpri > pri &&
1136	    TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE)
1137		cpu = self;
1138	KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu."));
1139	return (cpu);
1140}
1141#endif
1142
1143/*
1144 * Pick the highest priority task we have and return it.
1145 */
1146static struct td_sched *
1147tdq_choose(struct tdq *tdq)
1148{
1149	struct td_sched *ts;
1150
1151	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1152	ts = runq_choose(&tdq->tdq_realtime);
1153	if (ts != NULL)
1154		return (ts);
1155	ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1156	if (ts != NULL) {
1157		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
1158		    ("tdq_choose: Invalid priority on timeshare queue %d",
1159		    ts->ts_thread->td_priority));
1160		return (ts);
1161	}
1162
1163	ts = runq_choose(&tdq->tdq_idle);
1164	if (ts != NULL) {
1165		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
1166		    ("tdq_choose: Invalid priority on idle queue %d",
1167		    ts->ts_thread->td_priority));
1168		return (ts);
1169	}
1170
1171	return (NULL);
1172}
1173
1174/*
1175 * Initialize a thread queue.
1176 */
1177static void
1178tdq_setup(struct tdq *tdq)
1179{
1180
1181	if (bootverbose)
1182		printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
1183	runq_init(&tdq->tdq_realtime);
1184	runq_init(&tdq->tdq_timeshare);
1185	runq_init(&tdq->tdq_idle);
1186	snprintf(tdq->tdq_name, sizeof(tdq->tdq_name),
1187	    "sched lock %d", (int)TDQ_ID(tdq));
1188	mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock",
1189	    MTX_SPIN | MTX_RECURSE);
1190}
1191
1192#ifdef SMP
1193static void
1194sched_setup_smp(void)
1195{
1196	struct tdq *tdq;
1197	int i;
1198
1199	cpu_top = smp_topo();
1200	for (i = 0; i < MAXCPU; i++) {
1201		if (CPU_ABSENT(i))
1202			continue;
1203		tdq = TDQ_CPU(i);
1204		tdq_setup(tdq);
1205		tdq->tdq_cg = smp_topo_find(cpu_top, i);
1206		if (tdq->tdq_cg == NULL)
1207			panic("Can't find cpu group for %d\n", i);
1208	}
1209	balance_tdq = TDQ_SELF();
1210	sched_balance();
1211}
1212#endif
1213
1214/*
1215 * Setup the thread queues and initialize the topology based on MD
1216 * information.
1217 */
1218static void
1219sched_setup(void *dummy)
1220{
1221	struct tdq *tdq;
1222
1223	tdq = TDQ_SELF();
1224#ifdef SMP
1225	sched_setup_smp();
1226#else
1227	tdq_setup(tdq);
1228#endif
1229	/*
1230	 * To avoid divide-by-zero, we set realstathz a dummy value
1231	 * in case which sched_clock() called before sched_initticks().
1232	 */
1233	realstathz = hz;
1234	sched_slice = (realstathz/10);	/* ~100ms */
1235	tickincr = 1 << SCHED_TICK_SHIFT;
1236
1237	/* Add thread0's load since it's running. */
1238	TDQ_LOCK(tdq);
1239	thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1240	tdq_load_add(tdq, &td_sched0);
1241	tdq->tdq_lowpri = thread0.td_priority;
1242	TDQ_UNLOCK(tdq);
1243}
1244
1245/*
1246 * This routine determines the tickincr after stathz and hz are setup.
1247 */
1248/* ARGSUSED */
1249static void
1250sched_initticks(void *dummy)
1251{
1252	int incr;
1253
1254	realstathz = stathz ? stathz : hz;
1255	sched_slice = (realstathz/10);	/* ~100ms */
1256
1257	/*
1258	 * tickincr is shifted out by 10 to avoid rounding errors due to
1259	 * hz not being evenly divisible by stathz on all platforms.
1260	 */
1261	incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1262	/*
1263	 * This does not work for values of stathz that are more than
1264	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1265	 */
1266	if (incr == 0)
1267		incr = 1;
1268	tickincr = incr;
1269#ifdef SMP
1270	/*
1271	 * Set the default balance interval now that we know
1272	 * what realstathz is.
1273	 */
1274	balance_interval = realstathz;
1275	/*
1276	 * Set steal thresh to log2(mp_ncpu) but no greater than 4.  This
1277	 * prevents excess thrashing on large machines and excess idle on
1278	 * smaller machines.
1279	 */
1280	steal_thresh = min(ffs(mp_ncpus) - 1, 3);
1281	affinity = SCHED_AFFINITY_DEFAULT;
1282#endif
1283}
1284
1285
1286/*
1287 * This is the core of the interactivity algorithm.  Determines a score based
1288 * on past behavior.  It is the ratio of sleep time to run time scaled to
1289 * a [0, 100] integer.  This is the voluntary sleep time of a process, which
1290 * differs from the cpu usage because it does not account for time spent
1291 * waiting on a run-queue.  Would be prettier if we had floating point.
1292 */
1293static int
1294sched_interact_score(struct thread *td)
1295{
1296	struct td_sched *ts;
1297	int div;
1298
1299	ts = td->td_sched;
1300	/*
1301	 * The score is only needed if this is likely to be an interactive
1302	 * task.  Don't go through the expense of computing it if there's
1303	 * no chance.
1304	 */
1305	if (sched_interact <= SCHED_INTERACT_HALF &&
1306		ts->ts_runtime >= ts->ts_slptime)
1307			return (SCHED_INTERACT_HALF);
1308
1309	if (ts->ts_runtime > ts->ts_slptime) {
1310		div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1311		return (SCHED_INTERACT_HALF +
1312		    (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1313	}
1314	if (ts->ts_slptime > ts->ts_runtime) {
1315		div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1316		return (ts->ts_runtime / div);
1317	}
1318	/* runtime == slptime */
1319	if (ts->ts_runtime)
1320		return (SCHED_INTERACT_HALF);
1321
1322	/*
1323	 * This can happen if slptime and runtime are 0.
1324	 */
1325	return (0);
1326
1327}
1328
1329/*
1330 * Scale the scheduling priority according to the "interactivity" of this
1331 * process.
1332 */
1333static void
1334sched_priority(struct thread *td)
1335{
1336	int score;
1337	int pri;
1338
1339	if (td->td_pri_class != PRI_TIMESHARE)
1340		return;
1341	/*
1342	 * If the score is interactive we place the thread in the realtime
1343	 * queue with a priority that is less than kernel and interrupt
1344	 * priorities.  These threads are not subject to nice restrictions.
1345	 *
1346	 * Scores greater than this are placed on the normal timeshare queue
1347	 * where the priority is partially decided by the most recent cpu
1348	 * utilization and the rest is decided by nice value.
1349	 *
1350	 * The nice value of the process has a linear effect on the calculated
1351	 * score.  Negative nice values make it easier for a thread to be
1352	 * considered interactive.
1353	 */
1354	score = imax(0, sched_interact_score(td) - td->td_proc->p_nice);
1355	if (score < sched_interact) {
1356		pri = PRI_MIN_REALTIME;
1357		pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1358		    * score;
1359		KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
1360		    ("sched_priority: invalid interactive priority %d score %d",
1361		    pri, score));
1362	} else {
1363		pri = SCHED_PRI_MIN;
1364		if (td->td_sched->ts_ticks)
1365			pri += SCHED_PRI_TICKS(td->td_sched);
1366		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1367		KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
1368		    ("sched_priority: invalid priority %d: nice %d, "
1369		    "ticks %d ftick %d ltick %d tick pri %d",
1370		    pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
1371		    td->td_sched->ts_ftick, td->td_sched->ts_ltick,
1372		    SCHED_PRI_TICKS(td->td_sched)));
1373	}
1374	sched_user_prio(td, pri);
1375
1376	return;
1377}
1378
1379/*
1380 * This routine enforces a maximum limit on the amount of scheduling history
1381 * kept.  It is called after either the slptime or runtime is adjusted.  This
1382 * function is ugly due to integer math.
1383 */
1384static void
1385sched_interact_update(struct thread *td)
1386{
1387	struct td_sched *ts;
1388	u_int sum;
1389
1390	ts = td->td_sched;
1391	sum = ts->ts_runtime + ts->ts_slptime;
1392	if (sum < SCHED_SLP_RUN_MAX)
1393		return;
1394	/*
1395	 * This only happens from two places:
1396	 * 1) We have added an unusual amount of run time from fork_exit.
1397	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1398	 */
1399	if (sum > SCHED_SLP_RUN_MAX * 2) {
1400		if (ts->ts_runtime > ts->ts_slptime) {
1401			ts->ts_runtime = SCHED_SLP_RUN_MAX;
1402			ts->ts_slptime = 1;
1403		} else {
1404			ts->ts_slptime = SCHED_SLP_RUN_MAX;
1405			ts->ts_runtime = 1;
1406		}
1407		return;
1408	}
1409	/*
1410	 * If we have exceeded by more than 1/5th then the algorithm below
1411	 * will not bring us back into range.  Dividing by two here forces
1412	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1413	 */
1414	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1415		ts->ts_runtime /= 2;
1416		ts->ts_slptime /= 2;
1417		return;
1418	}
1419	ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1420	ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1421}
1422
1423/*
1424 * Scale back the interactivity history when a child thread is created.  The
1425 * history is inherited from the parent but the thread may behave totally
1426 * differently.  For example, a shell spawning a compiler process.  We want
1427 * to learn that the compiler is behaving badly very quickly.
1428 */
1429static void
1430sched_interact_fork(struct thread *td)
1431{
1432	int ratio;
1433	int sum;
1434
1435	sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
1436	if (sum > SCHED_SLP_RUN_FORK) {
1437		ratio = sum / SCHED_SLP_RUN_FORK;
1438		td->td_sched->ts_runtime /= ratio;
1439		td->td_sched->ts_slptime /= ratio;
1440	}
1441}
1442
1443/*
1444 * Called from proc0_init() to setup the scheduler fields.
1445 */
1446void
1447schedinit(void)
1448{
1449
1450	/*
1451	 * Set up the scheduler specific parts of proc0.
1452	 */
1453	proc0.p_sched = NULL; /* XXX */
1454	thread0.td_sched = &td_sched0;
1455	td_sched0.ts_ltick = ticks;
1456	td_sched0.ts_ftick = ticks;
1457	td_sched0.ts_thread = &thread0;
1458}
1459
1460/*
1461 * This is only somewhat accurate since given many processes of the same
1462 * priority they will switch when their slices run out, which will be
1463 * at most sched_slice stathz ticks.
1464 */
1465int
1466sched_rr_interval(void)
1467{
1468
1469	/* Convert sched_slice to hz */
1470	return (hz/(realstathz/sched_slice));
1471}
1472
1473/*
1474 * Update the percent cpu tracking information when it is requested or
1475 * the total history exceeds the maximum.  We keep a sliding history of
1476 * tick counts that slowly decays.  This is less precise than the 4BSD
1477 * mechanism since it happens with less regular and frequent events.
1478 */
1479static void
1480sched_pctcpu_update(struct td_sched *ts)
1481{
1482
1483	if (ts->ts_ticks == 0)
1484		return;
1485	if (ticks - (hz / 10) < ts->ts_ltick &&
1486	    SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
1487		return;
1488	/*
1489	 * Adjust counters and watermark for pctcpu calc.
1490	 */
1491	if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1492		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1493			    SCHED_TICK_TARG;
1494	else
1495		ts->ts_ticks = 0;
1496	ts->ts_ltick = ticks;
1497	ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
1498}
1499
1500/*
1501 * Adjust the priority of a thread.  Move it to the appropriate run-queue
1502 * if necessary.  This is the back-end for several priority related
1503 * functions.
1504 */
1505static void
1506sched_thread_priority(struct thread *td, u_char prio)
1507{
1508	struct td_sched *ts;
1509
1510	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1511	    td, td->td_name, td->td_priority, prio, curthread,
1512	    curthread->td_name);
1513	ts = td->td_sched;
1514	THREAD_LOCK_ASSERT(td, MA_OWNED);
1515	if (td->td_priority == prio)
1516		return;
1517
1518	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1519		/*
1520		 * If the priority has been elevated due to priority
1521		 * propagation, we may have to move ourselves to a new
1522		 * queue.  This could be optimized to not re-add in some
1523		 * cases.
1524		 */
1525		sched_rem(td);
1526		td->td_priority = prio;
1527		sched_add(td, SRQ_BORROWING);
1528	} else if (TD_IS_RUNNING(td)) {
1529		struct tdq *tdq;
1530		int oldpri;
1531
1532		tdq = TDQ_CPU(ts->ts_cpu);
1533		oldpri = td->td_priority;
1534		td->td_priority = prio;
1535		if (prio < tdq->tdq_lowpri)
1536			tdq->tdq_lowpri = prio;
1537		else if (tdq->tdq_lowpri == oldpri)
1538			tdq_setlowpri(tdq, td);
1539	} else
1540		td->td_priority = prio;
1541}
1542
1543/*
1544 * Update a thread's priority when it is lent another thread's
1545 * priority.
1546 */
1547void
1548sched_lend_prio(struct thread *td, u_char prio)
1549{
1550
1551	td->td_flags |= TDF_BORROWING;
1552	sched_thread_priority(td, prio);
1553}
1554
1555/*
1556 * Restore a thread's priority when priority propagation is
1557 * over.  The prio argument is the minimum priority the thread
1558 * needs to have to satisfy other possible priority lending
1559 * requests.  If the thread's regular priority is less
1560 * important than prio, the thread will keep a priority boost
1561 * of prio.
1562 */
1563void
1564sched_unlend_prio(struct thread *td, u_char prio)
1565{
1566	u_char base_pri;
1567
1568	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1569	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1570		base_pri = td->td_user_pri;
1571	else
1572		base_pri = td->td_base_pri;
1573	if (prio >= base_pri) {
1574		td->td_flags &= ~TDF_BORROWING;
1575		sched_thread_priority(td, base_pri);
1576	} else
1577		sched_lend_prio(td, prio);
1578}
1579
1580/*
1581 * Standard entry for setting the priority to an absolute value.
1582 */
1583void
1584sched_prio(struct thread *td, u_char prio)
1585{
1586	u_char oldprio;
1587
1588	/* First, update the base priority. */
1589	td->td_base_pri = prio;
1590
1591	/*
1592	 * If the thread is borrowing another thread's priority, don't
1593	 * ever lower the priority.
1594	 */
1595	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1596		return;
1597
1598	/* Change the real priority. */
1599	oldprio = td->td_priority;
1600	sched_thread_priority(td, prio);
1601
1602	/*
1603	 * If the thread is on a turnstile, then let the turnstile update
1604	 * its state.
1605	 */
1606	if (TD_ON_LOCK(td) && oldprio != prio)
1607		turnstile_adjust(td, oldprio);
1608}
1609
1610/*
1611 * Set the base user priority, does not effect current running priority.
1612 */
1613void
1614sched_user_prio(struct thread *td, u_char prio)
1615{
1616	u_char oldprio;
1617
1618	td->td_base_user_pri = prio;
1619	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1620                return;
1621	oldprio = td->td_user_pri;
1622	td->td_user_pri = prio;
1623}
1624
1625void
1626sched_lend_user_prio(struct thread *td, u_char prio)
1627{
1628	u_char oldprio;
1629
1630	THREAD_LOCK_ASSERT(td, MA_OWNED);
1631	td->td_flags |= TDF_UBORROWING;
1632	oldprio = td->td_user_pri;
1633	td->td_user_pri = prio;
1634}
1635
1636void
1637sched_unlend_user_prio(struct thread *td, u_char prio)
1638{
1639	u_char base_pri;
1640
1641	THREAD_LOCK_ASSERT(td, MA_OWNED);
1642	base_pri = td->td_base_user_pri;
1643	if (prio >= base_pri) {
1644		td->td_flags &= ~TDF_UBORROWING;
1645		sched_user_prio(td, base_pri);
1646	} else {
1647		sched_lend_user_prio(td, prio);
1648	}
1649}
1650
1651/*
1652 * Add the thread passed as 'newtd' to the run queue before selecting
1653 * the next thread to run.  This is only used for KSE.
1654 */
1655static void
1656sched_switchin(struct tdq *tdq, struct thread *td)
1657{
1658#ifdef SMP
1659	spinlock_enter();
1660	TDQ_UNLOCK(tdq);
1661	thread_lock(td);
1662	spinlock_exit();
1663	sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
1664#else
1665	td->td_lock = TDQ_LOCKPTR(tdq);
1666#endif
1667	tdq_add(tdq, td, SRQ_YIELDING);
1668	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1669}
1670
1671/*
1672 * Block a thread for switching.  Similar to thread_block() but does not
1673 * bump the spin count.
1674 */
1675static inline struct mtx *
1676thread_block_switch(struct thread *td)
1677{
1678	struct mtx *lock;
1679
1680	THREAD_LOCK_ASSERT(td, MA_OWNED);
1681	lock = td->td_lock;
1682	td->td_lock = &blocked_lock;
1683	mtx_unlock_spin(lock);
1684
1685	return (lock);
1686}
1687
1688/*
1689 * Handle migration from sched_switch().  This happens only for
1690 * cpu binding.
1691 */
1692static struct mtx *
1693sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1694{
1695	struct tdq *tdn;
1696
1697	tdn = TDQ_CPU(td->td_sched->ts_cpu);
1698#ifdef SMP
1699	/*
1700	 * Do the lock dance required to avoid LOR.  We grab an extra
1701	 * spinlock nesting to prevent preemption while we're
1702	 * not holding either run-queue lock.
1703	 */
1704	spinlock_enter();
1705	thread_block_switch(td);	/* This releases the lock on tdq. */
1706	TDQ_LOCK(tdn);
1707	tdq_add(tdn, td, flags);
1708	tdq_notify(tdn, td->td_sched);
1709	/*
1710	 * After we unlock tdn the new cpu still can't switch into this
1711	 * thread until we've unblocked it in cpu_switch().  The lock
1712	 * pointers may match in the case of HTT cores.  Don't unlock here
1713	 * or we can deadlock when the other CPU runs the IPI handler.
1714	 */
1715	if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
1716		TDQ_UNLOCK(tdn);
1717		TDQ_LOCK(tdq);
1718	}
1719	spinlock_exit();
1720#endif
1721	return (TDQ_LOCKPTR(tdn));
1722}
1723
1724/*
1725 * Release a thread that was blocked with thread_block_switch().
1726 */
1727static inline void
1728thread_unblock_switch(struct thread *td, struct mtx *mtx)
1729{
1730	atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
1731	    (uintptr_t)mtx);
1732}
1733
1734/*
1735 * Switch threads.  This function has to handle threads coming in while
1736 * blocked for some reason, running, or idle.  It also must deal with
1737 * migrating a thread from one queue to another as running threads may
1738 * be assigned elsewhere via binding.
1739 */
1740void
1741sched_switch(struct thread *td, struct thread *newtd, int flags)
1742{
1743	struct tdq *tdq;
1744	struct td_sched *ts;
1745	struct mtx *mtx;
1746	int srqflag;
1747	int cpuid;
1748
1749	THREAD_LOCK_ASSERT(td, MA_OWNED);
1750
1751	cpuid = PCPU_GET(cpuid);
1752	tdq = TDQ_CPU(cpuid);
1753	ts = td->td_sched;
1754	mtx = td->td_lock;
1755	ts->ts_rltick = ticks;
1756	td->td_lastcpu = td->td_oncpu;
1757	td->td_oncpu = NOCPU;
1758	td->td_flags &= ~TDF_NEEDRESCHED;
1759	td->td_owepreempt = 0;
1760	/*
1761	 * The lock pointer in an idle thread should never change.  Reset it
1762	 * to CAN_RUN as well.
1763	 */
1764	if (TD_IS_IDLETHREAD(td)) {
1765		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1766		TD_SET_CAN_RUN(td);
1767	} else if (TD_IS_RUNNING(td)) {
1768		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1769		tdq_load_rem(tdq, ts);
1770		srqflag = (flags & SW_PREEMPT) ?
1771		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1772		    SRQ_OURSELF|SRQ_YIELDING;
1773		if (ts->ts_cpu == cpuid)
1774			tdq_add(tdq, td, srqflag);
1775		else
1776			mtx = sched_switch_migrate(tdq, td, srqflag);
1777	} else {
1778		/* This thread must be going to sleep. */
1779		TDQ_LOCK(tdq);
1780		mtx = thread_block_switch(td);
1781		tdq_load_rem(tdq, ts);
1782	}
1783	/*
1784	 * We enter here with the thread blocked and assigned to the
1785	 * appropriate cpu run-queue or sleep-queue and with the current
1786	 * thread-queue locked.
1787	 */
1788	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
1789	/*
1790	 * If KSE assigned a new thread just add it here and let choosethread
1791	 * select the best one.
1792	 */
1793	if (newtd != NULL)
1794		sched_switchin(tdq, newtd);
1795	newtd = choosethread();
1796	/*
1797	 * Call the MD code to switch contexts if necessary.
1798	 */
1799	if (td != newtd) {
1800#ifdef	HWPMC_HOOKS
1801		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1802			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1803#endif
1804		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
1805		TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
1806		cpu_switch(td, newtd, mtx);
1807		/*
1808		 * We may return from cpu_switch on a different cpu.  However,
1809		 * we always return with td_lock pointing to the current cpu's
1810		 * run queue lock.
1811		 */
1812		cpuid = PCPU_GET(cpuid);
1813		tdq = TDQ_CPU(cpuid);
1814		lock_profile_obtain_lock_success(
1815		    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
1816#ifdef	HWPMC_HOOKS
1817		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1818			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1819#endif
1820	} else
1821		thread_unblock_switch(td, mtx);
1822	/*
1823	 * We should always get here with the lowest priority td possible.
1824	 */
1825	tdq->tdq_lowpri = td->td_priority;
1826	/*
1827	 * Assert that all went well and return.
1828	 */
1829	TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
1830	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1831	td->td_oncpu = cpuid;
1832}
1833
1834/*
1835 * Adjust thread priorities as a result of a nice request.
1836 */
1837void
1838sched_nice(struct proc *p, int nice)
1839{
1840	struct thread *td;
1841
1842	PROC_LOCK_ASSERT(p, MA_OWNED);
1843	PROC_SLOCK_ASSERT(p, MA_OWNED);
1844
1845	p->p_nice = nice;
1846	FOREACH_THREAD_IN_PROC(p, td) {
1847		thread_lock(td);
1848		sched_priority(td);
1849		sched_prio(td, td->td_base_user_pri);
1850		thread_unlock(td);
1851	}
1852}
1853
1854/*
1855 * Record the sleep time for the interactivity scorer.
1856 */
1857void
1858sched_sleep(struct thread *td)
1859{
1860
1861	THREAD_LOCK_ASSERT(td, MA_OWNED);
1862
1863	td->td_slptick = ticks;
1864}
1865
1866/*
1867 * Schedule a thread to resume execution and record how long it voluntarily
1868 * slept.  We also update the pctcpu, interactivity, and priority.
1869 */
1870void
1871sched_wakeup(struct thread *td)
1872{
1873	struct td_sched *ts;
1874	int slptick;
1875
1876	THREAD_LOCK_ASSERT(td, MA_OWNED);
1877	ts = td->td_sched;
1878	/*
1879	 * If we slept for more than a tick update our interactivity and
1880	 * priority.
1881	 */
1882	slptick = td->td_slptick;
1883	td->td_slptick = 0;
1884	if (slptick && slptick != ticks) {
1885		u_int hzticks;
1886
1887		hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
1888		ts->ts_slptime += hzticks;
1889		sched_interact_update(td);
1890		sched_pctcpu_update(ts);
1891		sched_priority(td);
1892	}
1893	/* Reset the slice value after we sleep. */
1894	ts->ts_slice = sched_slice;
1895	sched_add(td, SRQ_BORING);
1896}
1897
1898/*
1899 * Penalize the parent for creating a new child and initialize the child's
1900 * priority.
1901 */
1902void
1903sched_fork(struct thread *td, struct thread *child)
1904{
1905	THREAD_LOCK_ASSERT(td, MA_OWNED);
1906	sched_fork_thread(td, child);
1907	/*
1908	 * Penalize the parent and child for forking.
1909	 */
1910	sched_interact_fork(child);
1911	sched_priority(child);
1912	td->td_sched->ts_runtime += tickincr;
1913	sched_interact_update(td);
1914	sched_priority(td);
1915}
1916
1917/*
1918 * Fork a new thread, may be within the same process.
1919 */
1920void
1921sched_fork_thread(struct thread *td, struct thread *child)
1922{
1923	struct td_sched *ts;
1924	struct td_sched *ts2;
1925
1926	/*
1927	 * Initialize child.
1928	 */
1929	THREAD_LOCK_ASSERT(td, MA_OWNED);
1930	sched_newthread(child);
1931	child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
1932	child->td_cpuset = cpuset_ref(td->td_cpuset);
1933	ts = td->td_sched;
1934	ts2 = child->td_sched;
1935	ts2->ts_cpu = ts->ts_cpu;
1936	ts2->ts_runq = NULL;
1937	/*
1938	 * Grab our parents cpu estimation information and priority.
1939	 */
1940	ts2->ts_ticks = ts->ts_ticks;
1941	ts2->ts_ltick = ts->ts_ltick;
1942	ts2->ts_ftick = ts->ts_ftick;
1943	child->td_user_pri = td->td_user_pri;
1944	child->td_base_user_pri = td->td_base_user_pri;
1945	/*
1946	 * And update interactivity score.
1947	 */
1948	ts2->ts_slptime = ts->ts_slptime;
1949	ts2->ts_runtime = ts->ts_runtime;
1950	ts2->ts_slice = 1;	/* Attempt to quickly learn interactivity. */
1951}
1952
1953/*
1954 * Adjust the priority class of a thread.
1955 */
1956void
1957sched_class(struct thread *td, int class)
1958{
1959
1960	THREAD_LOCK_ASSERT(td, MA_OWNED);
1961	if (td->td_pri_class == class)
1962		return;
1963	/*
1964	 * On SMP if we're on the RUNQ we must adjust the transferable
1965	 * count because could be changing to or from an interrupt
1966	 * class.
1967	 */
1968	if (TD_ON_RUNQ(td)) {
1969		struct tdq *tdq;
1970
1971		tdq = TDQ_CPU(td->td_sched->ts_cpu);
1972		if (THREAD_CAN_MIGRATE(td))
1973			tdq->tdq_transferable--;
1974		td->td_pri_class = class;
1975		if (THREAD_CAN_MIGRATE(td))
1976			tdq->tdq_transferable++;
1977	}
1978	td->td_pri_class = class;
1979}
1980
1981/*
1982 * Return some of the child's priority and interactivity to the parent.
1983 */
1984void
1985sched_exit(struct proc *p, struct thread *child)
1986{
1987	struct thread *td;
1988
1989	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
1990	    child, child->td_name, child->td_priority);
1991
1992	PROC_SLOCK_ASSERT(p, MA_OWNED);
1993	td = FIRST_THREAD_IN_PROC(p);
1994	sched_exit_thread(td, child);
1995}
1996
1997/*
1998 * Penalize another thread for the time spent on this one.  This helps to
1999 * worsen the priority and interactivity of processes which schedule batch
2000 * jobs such as make.  This has little effect on the make process itself but
2001 * causes new processes spawned by it to receive worse scores immediately.
2002 */
2003void
2004sched_exit_thread(struct thread *td, struct thread *child)
2005{
2006
2007	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
2008	    child, child->td_name, child->td_priority);
2009
2010#ifdef KSE
2011	/*
2012	 * KSE forks and exits so often that this penalty causes short-lived
2013	 * threads to always be non-interactive.  This causes mozilla to
2014	 * crawl under load.
2015	 */
2016	if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
2017		return;
2018#endif
2019	/*
2020	 * Give the child's runtime to the parent without returning the
2021	 * sleep time as a penalty to the parent.  This causes shells that
2022	 * launch expensive things to mark their children as expensive.
2023	 */
2024	thread_lock(td);
2025	td->td_sched->ts_runtime += child->td_sched->ts_runtime;
2026	sched_interact_update(td);
2027	sched_priority(td);
2028	thread_unlock(td);
2029}
2030
2031void
2032sched_preempt(struct thread *td)
2033{
2034	struct tdq *tdq;
2035
2036	thread_lock(td);
2037	tdq = TDQ_SELF();
2038	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2039	tdq->tdq_ipipending = 0;
2040	if (td->td_priority > tdq->tdq_lowpri) {
2041		if (td->td_critnest > 1)
2042			td->td_owepreempt = 1;
2043		else
2044			mi_switch(SW_INVOL | SW_PREEMPT, NULL);
2045	}
2046	thread_unlock(td);
2047}
2048
2049/*
2050 * Fix priorities on return to user-space.  Priorities may be elevated due
2051 * to static priorities in msleep() or similar.
2052 */
2053void
2054sched_userret(struct thread *td)
2055{
2056	/*
2057	 * XXX we cheat slightly on the locking here to avoid locking in
2058	 * the usual case.  Setting td_priority here is essentially an
2059	 * incomplete workaround for not setting it properly elsewhere.
2060	 * Now that some interrupt handlers are threads, not setting it
2061	 * properly elsewhere can clobber it in the window between setting
2062	 * it here and returning to user mode, so don't waste time setting
2063	 * it perfectly here.
2064	 */
2065	KASSERT((td->td_flags & TDF_BORROWING) == 0,
2066	    ("thread with borrowed priority returning to userland"));
2067	if (td->td_priority != td->td_user_pri) {
2068		thread_lock(td);
2069		td->td_priority = td->td_user_pri;
2070		td->td_base_pri = td->td_user_pri;
2071		tdq_setlowpri(TDQ_SELF(), td);
2072		thread_unlock(td);
2073        }
2074}
2075
2076/*
2077 * Handle a stathz tick.  This is really only relevant for timeshare
2078 * threads.
2079 */
2080void
2081sched_clock(struct thread *td)
2082{
2083	struct tdq *tdq;
2084	struct td_sched *ts;
2085
2086	THREAD_LOCK_ASSERT(td, MA_OWNED);
2087	tdq = TDQ_SELF();
2088#ifdef SMP
2089	/*
2090	 * We run the long term load balancer infrequently on the first cpu.
2091	 */
2092	if (balance_tdq == tdq) {
2093		if (balance_ticks && --balance_ticks == 0)
2094			sched_balance();
2095	}
2096#endif
2097	/*
2098	 * Advance the insert index once for each tick to ensure that all
2099	 * threads get a chance to run.
2100	 */
2101	if (tdq->tdq_idx == tdq->tdq_ridx) {
2102		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
2103		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
2104			tdq->tdq_ridx = tdq->tdq_idx;
2105	}
2106	ts = td->td_sched;
2107	if (td->td_pri_class & PRI_FIFO_BIT)
2108		return;
2109	if (td->td_pri_class == PRI_TIMESHARE) {
2110		/*
2111		 * We used a tick; charge it to the thread so
2112		 * that we can compute our interactivity.
2113		 */
2114		td->td_sched->ts_runtime += tickincr;
2115		sched_interact_update(td);
2116	}
2117	/*
2118	 * We used up one time slice.
2119	 */
2120	if (--ts->ts_slice > 0)
2121		return;
2122	/*
2123	 * We're out of time, recompute priorities and requeue.
2124	 */
2125	sched_priority(td);
2126	td->td_flags |= TDF_NEEDRESCHED;
2127}
2128
2129/*
2130 * Called once per hz tick.  Used for cpu utilization information.  This
2131 * is easier than trying to scale based on stathz.
2132 */
2133void
2134sched_tick(void)
2135{
2136	struct td_sched *ts;
2137
2138	ts = curthread->td_sched;
2139	/* Adjust ticks for pctcpu */
2140	ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2141	ts->ts_ltick = ticks;
2142	/*
2143	 * Update if we've exceeded our desired tick threshhold by over one
2144	 * second.
2145	 */
2146	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2147		sched_pctcpu_update(ts);
2148}
2149
2150/*
2151 * Return whether the current CPU has runnable tasks.  Used for in-kernel
2152 * cooperative idle threads.
2153 */
2154int
2155sched_runnable(void)
2156{
2157	struct tdq *tdq;
2158	int load;
2159
2160	load = 1;
2161
2162	tdq = TDQ_SELF();
2163	if ((curthread->td_flags & TDF_IDLETD) != 0) {
2164		if (tdq->tdq_load > 0)
2165			goto out;
2166	} else
2167		if (tdq->tdq_load - 1 > 0)
2168			goto out;
2169	load = 0;
2170out:
2171	return (load);
2172}
2173
2174/*
2175 * Choose the highest priority thread to run.  The thread is removed from
2176 * the run-queue while running however the load remains.  For SMP we set
2177 * the tdq in the global idle bitmask if it idles here.
2178 */
2179struct thread *
2180sched_choose(void)
2181{
2182	struct td_sched *ts;
2183	struct tdq *tdq;
2184
2185	tdq = TDQ_SELF();
2186	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2187	ts = tdq_choose(tdq);
2188	if (ts) {
2189		tdq_runq_rem(tdq, ts);
2190		return (ts->ts_thread);
2191	}
2192	return (PCPU_GET(idlethread));
2193}
2194
2195/*
2196 * Set owepreempt if necessary.  Preemption never happens directly in ULE,
2197 * we always request it once we exit a critical section.
2198 */
2199static inline void
2200sched_setpreempt(struct thread *td)
2201{
2202	struct thread *ctd;
2203	int cpri;
2204	int pri;
2205
2206	THREAD_LOCK_ASSERT(curthread, MA_OWNED);
2207
2208	ctd = curthread;
2209	pri = td->td_priority;
2210	cpri = ctd->td_priority;
2211	if (pri < cpri)
2212		ctd->td_flags |= TDF_NEEDRESCHED;
2213	if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2214		return;
2215	if (!sched_shouldpreempt(pri, cpri, 0))
2216		return;
2217	ctd->td_owepreempt = 1;
2218}
2219
2220/*
2221 * Add a thread to a thread queue.  Initializes priority, slice, runq, and
2222 * add it to the appropriate queue.  This is the internal function called
2223 * when the tdq is predetermined.
2224 */
2225void
2226tdq_add(struct tdq *tdq, struct thread *td, int flags)
2227{
2228	struct td_sched *ts;
2229	int class;
2230
2231	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2232	KASSERT((td->td_inhibitors == 0),
2233	    ("sched_add: trying to run inhibited thread"));
2234	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
2235	    ("sched_add: bad thread state"));
2236	KASSERT(td->td_flags & TDF_INMEM,
2237	    ("sched_add: thread swapped out"));
2238
2239	ts = td->td_sched;
2240	class = PRI_BASE(td->td_pri_class);
2241        TD_SET_RUNQ(td);
2242	if (ts->ts_slice == 0)
2243		ts->ts_slice = sched_slice;
2244	/*
2245	 * Pick the run queue based on priority.
2246	 */
2247	if (td->td_priority <= PRI_MAX_REALTIME)
2248		ts->ts_runq = &tdq->tdq_realtime;
2249	else if (td->td_priority <= PRI_MAX_TIMESHARE)
2250		ts->ts_runq = &tdq->tdq_timeshare;
2251	else
2252		ts->ts_runq = &tdq->tdq_idle;
2253	if (td->td_priority < tdq->tdq_lowpri)
2254		tdq->tdq_lowpri = td->td_priority;
2255	tdq_runq_add(tdq, ts, flags);
2256	tdq_load_add(tdq, ts);
2257}
2258
2259/*
2260 * Select the target thread queue and add a thread to it.  Request
2261 * preemption or IPI a remote processor if required.
2262 */
2263void
2264sched_add(struct thread *td, int flags)
2265{
2266	struct td_sched *ts;
2267	struct tdq *tdq;
2268#ifdef SMP
2269	int cpuid;
2270	int cpu;
2271#endif
2272	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
2273	    td, td->td_name, td->td_priority, curthread,
2274	    curthread->td_name);
2275	THREAD_LOCK_ASSERT(td, MA_OWNED);
2276	ts = td->td_sched;
2277	/*
2278	 * Recalculate the priority before we select the target cpu or
2279	 * run-queue.
2280	 */
2281	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2282		sched_priority(td);
2283#ifdef SMP
2284	cpuid = PCPU_GET(cpuid);
2285	/*
2286	 * Pick the destination cpu and if it isn't ours transfer to the
2287	 * target cpu.
2288	 */
2289	cpu = sched_pickcpu(ts, flags);
2290	tdq = sched_setcpu(ts, cpu, flags);
2291	tdq_add(tdq, td, flags);
2292	if (cpu != cpuid) {
2293		tdq_notify(tdq, ts);
2294		return;
2295	}
2296#else
2297	tdq = TDQ_SELF();
2298	TDQ_LOCK(tdq);
2299	/*
2300	 * Now that the thread is moving to the run-queue, set the lock
2301	 * to the scheduler's lock.
2302	 */
2303	thread_lock_set(td, TDQ_LOCKPTR(tdq));
2304	tdq_add(tdq, td, flags);
2305#endif
2306	if (!(flags & SRQ_YIELDING))
2307		sched_setpreempt(td);
2308}
2309
2310/*
2311 * Remove a thread from a run-queue without running it.  This is used
2312 * when we're stealing a thread from a remote queue.  Otherwise all threads
2313 * exit by calling sched_exit_thread() and sched_throw() themselves.
2314 */
2315void
2316sched_rem(struct thread *td)
2317{
2318	struct tdq *tdq;
2319	struct td_sched *ts;
2320
2321	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
2322	    td, td->td_name, td->td_priority, curthread,
2323	    curthread->td_name);
2324	ts = td->td_sched;
2325	tdq = TDQ_CPU(ts->ts_cpu);
2326	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2327	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2328	KASSERT(TD_ON_RUNQ(td),
2329	    ("sched_rem: thread not on run queue"));
2330	tdq_runq_rem(tdq, ts);
2331	tdq_load_rem(tdq, ts);
2332	TD_SET_CAN_RUN(td);
2333	if (td->td_priority == tdq->tdq_lowpri)
2334		tdq_setlowpri(tdq, NULL);
2335}
2336
2337/*
2338 * Fetch cpu utilization information.  Updates on demand.
2339 */
2340fixpt_t
2341sched_pctcpu(struct thread *td)
2342{
2343	fixpt_t pctcpu;
2344	struct td_sched *ts;
2345
2346	pctcpu = 0;
2347	ts = td->td_sched;
2348	if (ts == NULL)
2349		return (0);
2350
2351	thread_lock(td);
2352	if (ts->ts_ticks) {
2353		int rtick;
2354
2355		sched_pctcpu_update(ts);
2356		/* How many rtick per second ? */
2357		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2358		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
2359	}
2360	thread_unlock(td);
2361
2362	return (pctcpu);
2363}
2364
2365/*
2366 * Enforce affinity settings for a thread.  Called after adjustments to
2367 * cpumask.
2368 */
2369void
2370sched_affinity(struct thread *td)
2371{
2372#ifdef SMP
2373	struct td_sched *ts;
2374	int cpu;
2375
2376	THREAD_LOCK_ASSERT(td, MA_OWNED);
2377	ts = td->td_sched;
2378	if (THREAD_CAN_SCHED(td, ts->ts_cpu))
2379		return;
2380	if (!TD_IS_RUNNING(td))
2381		return;
2382	td->td_flags |= TDF_NEEDRESCHED;
2383	if (!THREAD_CAN_MIGRATE(td))
2384		return;
2385	/*
2386	 * Assign the new cpu and force a switch before returning to
2387	 * userspace.  If the target thread is not running locally send
2388	 * an ipi to force the issue.
2389	 */
2390	cpu = ts->ts_cpu;
2391	ts->ts_cpu = sched_pickcpu(ts, 0);
2392	if (cpu != PCPU_GET(cpuid))
2393		ipi_selected(1 << cpu, IPI_PREEMPT);
2394#endif
2395}
2396
2397/*
2398 * Bind a thread to a target cpu.
2399 */
2400void
2401sched_bind(struct thread *td, int cpu)
2402{
2403	struct td_sched *ts;
2404
2405	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
2406	ts = td->td_sched;
2407	if (ts->ts_flags & TSF_BOUND)
2408		sched_unbind(td);
2409	ts->ts_flags |= TSF_BOUND;
2410	sched_pin();
2411	if (PCPU_GET(cpuid) == cpu)
2412		return;
2413	ts->ts_cpu = cpu;
2414	/* When we return from mi_switch we'll be on the correct cpu. */
2415	mi_switch(SW_VOL, NULL);
2416}
2417
2418/*
2419 * Release a bound thread.
2420 */
2421void
2422sched_unbind(struct thread *td)
2423{
2424	struct td_sched *ts;
2425
2426	THREAD_LOCK_ASSERT(td, MA_OWNED);
2427	ts = td->td_sched;
2428	if ((ts->ts_flags & TSF_BOUND) == 0)
2429		return;
2430	ts->ts_flags &= ~TSF_BOUND;
2431	sched_unpin();
2432}
2433
2434int
2435sched_is_bound(struct thread *td)
2436{
2437	THREAD_LOCK_ASSERT(td, MA_OWNED);
2438	return (td->td_sched->ts_flags & TSF_BOUND);
2439}
2440
2441/*
2442 * Basic yield call.
2443 */
2444void
2445sched_relinquish(struct thread *td)
2446{
2447	thread_lock(td);
2448	SCHED_STAT_INC(switch_relinquish);
2449	mi_switch(SW_VOL, NULL);
2450	thread_unlock(td);
2451}
2452
2453/*
2454 * Return the total system load.
2455 */
2456int
2457sched_load(void)
2458{
2459#ifdef SMP
2460	int total;
2461	int i;
2462
2463	total = 0;
2464	for (i = 0; i <= mp_maxid; i++)
2465		total += TDQ_CPU(i)->tdq_sysload;
2466	return (total);
2467#else
2468	return (TDQ_SELF()->tdq_sysload);
2469#endif
2470}
2471
2472int
2473sched_sizeof_proc(void)
2474{
2475	return (sizeof(struct proc));
2476}
2477
2478int
2479sched_sizeof_thread(void)
2480{
2481	return (sizeof(struct thread) + sizeof(struct td_sched));
2482}
2483
2484/*
2485 * The actual idle process.
2486 */
2487void
2488sched_idletd(void *dummy)
2489{
2490	struct thread *td;
2491	struct tdq *tdq;
2492
2493	td = curthread;
2494	tdq = TDQ_SELF();
2495	mtx_assert(&Giant, MA_NOTOWNED);
2496	/* ULE relies on preemption for idle interruption. */
2497	for (;;) {
2498#ifdef SMP
2499		if (tdq_idled(tdq))
2500			cpu_idle();
2501#else
2502		cpu_idle();
2503#endif
2504	}
2505}
2506
2507/*
2508 * A CPU is entering for the first time or a thread is exiting.
2509 */
2510void
2511sched_throw(struct thread *td)
2512{
2513	struct thread *newtd;
2514	struct tdq *tdq;
2515
2516	tdq = TDQ_SELF();
2517	if (td == NULL) {
2518		/* Correct spinlock nesting and acquire the correct lock. */
2519		TDQ_LOCK(tdq);
2520		spinlock_exit();
2521	} else {
2522		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2523		tdq_load_rem(tdq, td->td_sched);
2524		lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
2525	}
2526	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
2527	newtd = choosethread();
2528	TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
2529	PCPU_SET(switchtime, cpu_ticks());
2530	PCPU_SET(switchticks, ticks);
2531	cpu_throw(td, newtd);		/* doesn't return */
2532}
2533
2534/*
2535 * This is called from fork_exit().  Just acquire the correct locks and
2536 * let fork do the rest of the work.
2537 */
2538void
2539sched_fork_exit(struct thread *td)
2540{
2541	struct td_sched *ts;
2542	struct tdq *tdq;
2543	int cpuid;
2544
2545	/*
2546	 * Finish setting up thread glue so that it begins execution in a
2547	 * non-nested critical section with the scheduler lock held.
2548	 */
2549	cpuid = PCPU_GET(cpuid);
2550	tdq = TDQ_CPU(cpuid);
2551	ts = td->td_sched;
2552	if (TD_IS_IDLETHREAD(td))
2553		td->td_lock = TDQ_LOCKPTR(tdq);
2554	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2555	td->td_oncpu = cpuid;
2556	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2557	lock_profile_obtain_lock_success(
2558	    &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
2559	tdq->tdq_lowpri = td->td_priority;
2560}
2561
2562static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
2563    "Scheduler");
2564SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2565    "Scheduler name");
2566SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2567    "Slice size for timeshare threads");
2568SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2569     "Interactivity score threshold");
2570SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2571     0,"Min priority for preemption, lower priorities have greater precedence");
2572#ifdef SMP
2573SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
2574    "Number of hz ticks to keep thread affinity for");
2575SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
2576    "Enables the long-term load balancer");
2577SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
2578    &balance_interval, 0,
2579    "Average frequency in stathz ticks to run the long-term balancer");
2580SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0,
2581    "Steals work from another hyper-threaded core on idle");
2582SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
2583    "Attempts to steal work from other cores before idling");
2584SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
2585    "Minimum load on remote cpu before we'll steal");
2586#endif
2587
2588/* ps compat.  All cpu percentages from ULE are weighted. */
2589static int ccpu = 0;
2590SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2591
2592
2593#define KERN_SWITCH_INCLUDE 1
2594#include "kern/kern_switch.c"
2595