sched_ule.c revision 171899
1/*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27/*
28 * This file implements the ULE scheduler.  ULE supports independent CPU
29 * run queues and fine grain locking.  It has superior interactive
30 * performance under load even on uni-processor systems.
31 *
32 * etymology:
33 *   ULE is the last three letters in schedule.  It owes it's name to a
34 * generic user created for a scheduling system by Paul Mikesell at
35 * Isilon Systems and a general lack of creativity on the part of the author.
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 171899 2007-08-20 06:34:20Z jeff $");
40
41#include "opt_hwpmc_hooks.h"
42#include "opt_sched.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/kdb.h>
47#include <sys/kernel.h>
48#include <sys/ktr.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/proc.h>
52#include <sys/resource.h>
53#include <sys/resourcevar.h>
54#include <sys/sched.h>
55#include <sys/smp.h>
56#include <sys/sx.h>
57#include <sys/sysctl.h>
58#include <sys/sysproto.h>
59#include <sys/turnstile.h>
60#include <sys/umtx.h>
61#include <sys/vmmeter.h>
62#ifdef KTRACE
63#include <sys/uio.h>
64#include <sys/ktrace.h>
65#endif
66
67#ifdef HWPMC_HOOKS
68#include <sys/pmckern.h>
69#endif
70
71#include <machine/cpu.h>
72#include <machine/smp.h>
73
74#ifndef PREEMPTION
75#error	"SCHED_ULE requires options PREEMPTION"
76#endif
77
78#define	KTR_ULE	0
79
80/*
81 * Thread scheduler specific section.  All fields are protected
82 * by the thread lock.
83 */
84struct td_sched {
85	TAILQ_ENTRY(td_sched) ts_procq;	/* Run queue. */
86	struct thread	*ts_thread;	/* Active associated thread. */
87	struct runq	*ts_runq;	/* Run-queue we're queued on. */
88	short		ts_flags;	/* TSF_* flags. */
89	u_char		ts_rqindex;	/* Run queue index. */
90	u_char		ts_cpu;		/* CPU that we have affinity for. */
91	int		ts_slptick;	/* Tick when we went to sleep. */
92	int		ts_slice;	/* Ticks of slice remaining. */
93	u_int		ts_slptime;	/* Number of ticks we vol. slept */
94	u_int		ts_runtime;	/* Number of ticks we were running */
95	/* The following variables are only used for pctcpu calculation */
96	int		ts_ltick;	/* Last tick that we were running on */
97	int		ts_ftick;	/* First tick that we were running on */
98	int		ts_ticks;	/* Tick count */
99#ifdef SMP
100	int		ts_rltick;	/* Real last tick, for affinity. */
101#endif
102};
103/* flags kept in ts_flags */
104#define	TSF_BOUND	0x0001		/* Thread can not migrate. */
105#define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
106
107static struct td_sched td_sched0;
108
109/*
110 * Cpu percentage computation macros and defines.
111 *
112 * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
113 * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
114 * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
115 * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
116 * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
117 * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
118 */
119#define	SCHED_TICK_SECS		10
120#define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
121#define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
122#define	SCHED_TICK_SHIFT	10
123#define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
124#define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
125
126/*
127 * These macros determine priorities for non-interactive threads.  They are
128 * assigned a priority based on their recent cpu utilization as expressed
129 * by the ratio of ticks to the tick total.  NHALF priorities at the start
130 * and end of the MIN to MAX timeshare range are only reachable with negative
131 * or positive nice respectively.
132 *
133 * PRI_RANGE:	Priority range for utilization dependent priorities.
134 * PRI_NRESV:	Number of nice values.
135 * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
136 * PRI_NICE:	Determines the part of the priority inherited from nice.
137 */
138#define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
139#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
140#define	SCHED_PRI_MIN		(PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
141#define	SCHED_PRI_MAX		(PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
142#define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN)
143#define	SCHED_PRI_TICKS(ts)						\
144    (SCHED_TICK_HZ((ts)) /						\
145    (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
146#define	SCHED_PRI_NICE(nice)	(nice)
147
148/*
149 * These determine the interactivity of a process.  Interactivity differs from
150 * cpu utilization in that it expresses the voluntary time slept vs time ran
151 * while cpu utilization includes all time not running.  This more accurately
152 * models the intent of the thread.
153 *
154 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
155 *		before throttling back.
156 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
157 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
158 * INTERACT_THRESH:	Threshhold for placement on the current runq.
159 */
160#define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
161#define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
162#define	SCHED_INTERACT_MAX	(100)
163#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
164#define	SCHED_INTERACT_THRESH	(30)
165
166/*
167 * tickincr:		Converts a stathz tick into a hz domain scaled by
168 *			the shift factor.  Without the shift the error rate
169 *			due to rounding would be unacceptably high.
170 * realstathz:		stathz is sometimes 0 and run off of hz.
171 * sched_slice:		Runtime of each thread before rescheduling.
172 * preempt_thresh:	Priority threshold for preemption and remote IPIs.
173 */
174static int sched_interact = SCHED_INTERACT_THRESH;
175static int realstathz;
176static int tickincr;
177static int sched_slice;
178static int preempt_thresh = PRI_MIN_KERN;
179
180/*
181 * tdq - per processor runqs and statistics.  All fields are protected by the
182 * tdq_lock.  The load and lowpri may be accessed without to avoid excess
183 * locking in sched_pickcpu();
184 */
185struct tdq {
186	struct mtx	*tdq_lock;		/* Pointer to group lock. */
187	struct runq	tdq_realtime;		/* real-time run queue. */
188	struct runq	tdq_timeshare;		/* timeshare run queue. */
189	struct runq	tdq_idle;		/* Queue of IDLE threads. */
190	int		tdq_load;		/* Aggregate load. */
191	u_char		tdq_idx;		/* Current insert index. */
192	u_char		tdq_ridx;		/* Current removal index. */
193#ifdef SMP
194	u_char		tdq_lowpri;		/* Lowest priority thread. */
195	int		tdq_transferable;	/* Transferable thread count. */
196	LIST_ENTRY(tdq)	tdq_siblings;		/* Next in tdq group. */
197	struct tdq_group *tdq_group;		/* Our processor group. */
198#else
199	int		tdq_sysload;		/* For loadavg, !ITHD load. */
200#endif
201} __aligned(64);
202
203
204#ifdef SMP
205/*
206 * tdq groups are groups of processors which can cheaply share threads.  When
207 * one processor in the group goes idle it will check the runqs of the other
208 * processors in its group prior to halting and waiting for an interrupt.
209 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
210 * In a numa environment we'd want an idle bitmap per group and a two tiered
211 * load balancer.
212 */
213struct tdq_group {
214	struct mtx	tdg_lock;	/* Protects all fields below. */
215	int		tdg_cpus;	/* Count of CPUs in this tdq group. */
216	cpumask_t 	tdg_cpumask;	/* Mask of cpus in this group. */
217	cpumask_t 	tdg_idlemask;	/* Idle cpus in this group. */
218	cpumask_t 	tdg_mask;	/* Bit mask for first cpu. */
219	int		tdg_load;	/* Total load of this group. */
220	int	tdg_transferable;	/* Transferable load of this group. */
221	LIST_HEAD(, tdq) tdg_members;	/* Linked list of all members. */
222	char		tdg_name[16];	/* lock name. */
223} __aligned(64);
224
225#define	SCHED_AFFINITY_DEFAULT	(max(1, hz / 300))
226#define	SCHED_AFFINITY(ts)	((ts)->ts_rltick > ticks - affinity)
227
228/*
229 * Run-time tunables.
230 */
231static int rebalance = 1;
232static int balance_secs = 1;
233static int pick_pri = 1;
234static int affinity;
235static int tryself = 1;
236static int steal_htt = 0;
237static int steal_idle = 1;
238static int steal_thresh = 2;
239static int topology = 0;
240
241/*
242 * One thread queue per processor.
243 */
244static volatile cpumask_t tdq_idle;
245static int tdg_maxid;
246static struct tdq	tdq_cpu[MAXCPU];
247static struct tdq_group tdq_groups[MAXCPU];
248static struct callout balco;
249static struct callout gbalco;
250
251#define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
252#define	TDQ_CPU(x)	(&tdq_cpu[(x)])
253#define	TDQ_ID(x)	((int)((x) - tdq_cpu))
254#define	TDQ_GROUP(x)	(&tdq_groups[(x)])
255#define	TDG_ID(x)	((int)((x) - tdq_groups))
256#else	/* !SMP */
257static struct tdq	tdq_cpu;
258static struct mtx	tdq_lock;
259
260#define	TDQ_ID(x)	(0)
261#define	TDQ_SELF()	(&tdq_cpu)
262#define	TDQ_CPU(x)	(&tdq_cpu)
263#endif
264
265#define	TDQ_LOCK_ASSERT(t, type)	mtx_assert(TDQ_LOCKPTR((t)), (type))
266#define	TDQ_LOCK(t)		mtx_lock_spin(TDQ_LOCKPTR((t)))
267#define	TDQ_LOCK_FLAGS(t, f)	mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
268#define	TDQ_UNLOCK(t)		mtx_unlock_spin(TDQ_LOCKPTR((t)))
269#define	TDQ_LOCKPTR(t)		((t)->tdq_lock)
270
271static void sched_priority(struct thread *);
272static void sched_thread_priority(struct thread *, u_char);
273static int sched_interact_score(struct thread *);
274static void sched_interact_update(struct thread *);
275static void sched_interact_fork(struct thread *);
276static void sched_pctcpu_update(struct td_sched *);
277
278/* Operations on per processor queues */
279static struct td_sched * tdq_choose(struct tdq *);
280static void tdq_setup(struct tdq *);
281static void tdq_load_add(struct tdq *, struct td_sched *);
282static void tdq_load_rem(struct tdq *, struct td_sched *);
283static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
284static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
285void tdq_print(int cpu);
286static void runq_print(struct runq *rq);
287static void tdq_add(struct tdq *, struct thread *, int);
288#ifdef SMP
289static void tdq_move(struct tdq *, struct tdq *);
290static int tdq_idled(struct tdq *);
291static void tdq_notify(struct td_sched *);
292static struct td_sched *tdq_steal(struct tdq *, int);
293static struct td_sched *runq_steal(struct runq *);
294static int sched_pickcpu(struct td_sched *, int);
295static void sched_balance(void *);
296static void sched_balance_groups(void *);
297static void sched_balance_group(struct tdq_group *);
298static void sched_balance_pair(struct tdq *, struct tdq *);
299static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
300static inline struct mtx *thread_block_switch(struct thread *);
301static inline void thread_unblock_switch(struct thread *, struct mtx *);
302static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
303
304#define	THREAD_CAN_MIGRATE(td)	 ((td)->td_pinned == 0)
305#endif
306
307static void sched_setup(void *dummy);
308SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
309
310static void sched_initticks(void *dummy);
311SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
312
313/*
314 * Print the threads waiting on a run-queue.
315 */
316static void
317runq_print(struct runq *rq)
318{
319	struct rqhead *rqh;
320	struct td_sched *ts;
321	int pri;
322	int j;
323	int i;
324
325	for (i = 0; i < RQB_LEN; i++) {
326		printf("\t\trunq bits %d 0x%zx\n",
327		    i, rq->rq_status.rqb_bits[i]);
328		for (j = 0; j < RQB_BPW; j++)
329			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
330				pri = j + (i << RQB_L2BPW);
331				rqh = &rq->rq_queues[pri];
332				TAILQ_FOREACH(ts, rqh, ts_procq) {
333					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
334					    ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
335				}
336			}
337	}
338}
339
340/*
341 * Print the status of a per-cpu thread queue.  Should be a ddb show cmd.
342 */
343void
344tdq_print(int cpu)
345{
346	struct tdq *tdq;
347
348	tdq = TDQ_CPU(cpu);
349
350	printf("tdq %d:\n", TDQ_ID(tdq));
351	printf("\tlockptr         %p\n", TDQ_LOCKPTR(tdq));
352	printf("\tload:           %d\n", tdq->tdq_load);
353	printf("\ttimeshare idx:  %d\n", tdq->tdq_idx);
354	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
355	printf("\trealtime runq:\n");
356	runq_print(&tdq->tdq_realtime);
357	printf("\ttimeshare runq:\n");
358	runq_print(&tdq->tdq_timeshare);
359	printf("\tidle runq:\n");
360	runq_print(&tdq->tdq_idle);
361#ifdef SMP
362	printf("\tload transferable: %d\n", tdq->tdq_transferable);
363	printf("\tlowest priority:   %d\n", tdq->tdq_lowpri);
364	printf("\tgroup:             %d\n", TDG_ID(tdq->tdq_group));
365	printf("\tLock name:         %s\n", tdq->tdq_group->tdg_name);
366#endif
367}
368
369#define	TS_RQ_PPQ	(((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
370/*
371 * Add a thread to the actual run-queue.  Keeps transferable counts up to
372 * date with what is actually on the run-queue.  Selects the correct
373 * queue position for timeshare threads.
374 */
375static __inline void
376tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
377{
378	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
379	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
380#ifdef SMP
381	if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
382		tdq->tdq_transferable++;
383		tdq->tdq_group->tdg_transferable++;
384		ts->ts_flags |= TSF_XFERABLE;
385	}
386#endif
387	if (ts->ts_runq == &tdq->tdq_timeshare) {
388		u_char pri;
389
390		pri = ts->ts_thread->td_priority;
391		KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
392			("Invalid priority %d on timeshare runq", pri));
393		/*
394		 * This queue contains only priorities between MIN and MAX
395		 * realtime.  Use the whole queue to represent these values.
396		 */
397		if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
398			pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
399			pri = (pri + tdq->tdq_idx) % RQ_NQS;
400			/*
401			 * This effectively shortens the queue by one so we
402			 * can have a one slot difference between idx and
403			 * ridx while we wait for threads to drain.
404			 */
405			if (tdq->tdq_ridx != tdq->tdq_idx &&
406			    pri == tdq->tdq_ridx)
407				pri = (unsigned char)(pri - 1) % RQ_NQS;
408		} else
409			pri = tdq->tdq_ridx;
410		runq_add_pri(ts->ts_runq, ts, pri, flags);
411	} else
412		runq_add(ts->ts_runq, ts, flags);
413}
414
415/*
416 * Remove a thread from a run-queue.  This typically happens when a thread
417 * is selected to run.  Running threads are not on the queue and the
418 * transferable count does not reflect them.
419 */
420static __inline void
421tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
422{
423	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
424	KASSERT(ts->ts_runq != NULL,
425	    ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
426#ifdef SMP
427	if (ts->ts_flags & TSF_XFERABLE) {
428		tdq->tdq_transferable--;
429		tdq->tdq_group->tdg_transferable--;
430		ts->ts_flags &= ~TSF_XFERABLE;
431	}
432#endif
433	if (ts->ts_runq == &tdq->tdq_timeshare) {
434		if (tdq->tdq_idx != tdq->tdq_ridx)
435			runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
436		else
437			runq_remove_idx(ts->ts_runq, ts, NULL);
438		/*
439		 * For timeshare threads we update the priority here so
440		 * the priority reflects the time we've been sleeping.
441		 */
442		ts->ts_ltick = ticks;
443		sched_pctcpu_update(ts);
444		sched_priority(ts->ts_thread);
445	} else
446		runq_remove(ts->ts_runq, ts);
447}
448
449/*
450 * Load is maintained for all threads RUNNING and ON_RUNQ.  Add the load
451 * for this thread to the referenced thread queue.
452 */
453static void
454tdq_load_add(struct tdq *tdq, struct td_sched *ts)
455{
456	int class;
457
458	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
459	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
460	class = PRI_BASE(ts->ts_thread->td_pri_class);
461	tdq->tdq_load++;
462	CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
463	if (class != PRI_ITHD &&
464	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
465#ifdef SMP
466		tdq->tdq_group->tdg_load++;
467#else
468		tdq->tdq_sysload++;
469#endif
470}
471
472/*
473 * Remove the load from a thread that is transitioning to a sleep state or
474 * exiting.
475 */
476static void
477tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
478{
479	int class;
480
481	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
482	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
483	class = PRI_BASE(ts->ts_thread->td_pri_class);
484	if (class != PRI_ITHD &&
485	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
486#ifdef SMP
487		tdq->tdq_group->tdg_load--;
488#else
489		tdq->tdq_sysload--;
490#endif
491	KASSERT(tdq->tdq_load != 0,
492	    ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
493	tdq->tdq_load--;
494	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
495	ts->ts_runq = NULL;
496}
497
498#ifdef SMP
499/*
500 * sched_balance is a simple CPU load balancing algorithm.  It operates by
501 * finding the least loaded and most loaded cpu and equalizing their load
502 * by migrating some processes.
503 *
504 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
505 * installations will only have 2 cpus.  Secondly, load balancing too much at
506 * once can have an unpleasant effect on the system.  The scheduler rarely has
507 * enough information to make perfect decisions.  So this algorithm chooses
508 * simplicity and more gradual effects on load in larger systems.
509 *
510 */
511static void
512sched_balance(void *arg)
513{
514	struct tdq_group *high;
515	struct tdq_group *low;
516	struct tdq_group *tdg;
517	int cnt;
518	int i;
519
520	callout_reset(&balco, max(hz / 2, random() % (hz * balance_secs)),
521	    sched_balance, NULL);
522	if (smp_started == 0 || rebalance == 0)
523		return;
524	low = high = NULL;
525	i = random() % (tdg_maxid + 1);
526	for (cnt = 0; cnt <= tdg_maxid; cnt++) {
527		tdg = TDQ_GROUP(i);
528		/*
529		 * Find the CPU with the highest load that has some
530		 * threads to transfer.
531		 */
532		if ((high == NULL || tdg->tdg_load > high->tdg_load)
533		    && tdg->tdg_transferable)
534			high = tdg;
535		if (low == NULL || tdg->tdg_load < low->tdg_load)
536			low = tdg;
537		if (++i > tdg_maxid)
538			i = 0;
539	}
540	if (low != NULL && high != NULL && high != low)
541		sched_balance_pair(LIST_FIRST(&high->tdg_members),
542		    LIST_FIRST(&low->tdg_members));
543}
544
545/*
546 * Balance load between CPUs in a group.  Will only migrate within the group.
547 */
548static void
549sched_balance_groups(void *arg)
550{
551	int i;
552
553	callout_reset(&gbalco, max(hz / 2, random() % (hz * balance_secs)),
554	    sched_balance_groups, NULL);
555	if (smp_started == 0 || rebalance == 0)
556		return;
557	for (i = 0; i <= tdg_maxid; i++)
558		sched_balance_group(TDQ_GROUP(i));
559}
560
561/*
562 * Finds the greatest imbalance between two tdqs in a group.
563 */
564static void
565sched_balance_group(struct tdq_group *tdg)
566{
567	struct tdq *tdq;
568	struct tdq *high;
569	struct tdq *low;
570	int load;
571
572	if (tdg->tdg_transferable == 0)
573		return;
574	low = NULL;
575	high = NULL;
576	LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
577		load = tdq->tdq_load;
578		if (high == NULL || load > high->tdq_load)
579			high = tdq;
580		if (low == NULL || load < low->tdq_load)
581			low = tdq;
582	}
583	if (high != NULL && low != NULL && high != low)
584		sched_balance_pair(high, low);
585}
586
587/*
588 * Lock two thread queues using their address to maintain lock order.
589 */
590static void
591tdq_lock_pair(struct tdq *one, struct tdq *two)
592{
593	if (one < two) {
594		TDQ_LOCK(one);
595		TDQ_LOCK_FLAGS(two, MTX_DUPOK);
596	} else {
597		TDQ_LOCK(two);
598		TDQ_LOCK_FLAGS(one, MTX_DUPOK);
599	}
600}
601
602/*
603 * Transfer load between two imbalanced thread queues.
604 */
605static void
606sched_balance_pair(struct tdq *high, struct tdq *low)
607{
608	int transferable;
609	int high_load;
610	int low_load;
611	int move;
612	int diff;
613	int i;
614
615	tdq_lock_pair(high, low);
616	/*
617	 * If we're transfering within a group we have to use this specific
618	 * tdq's transferable count, otherwise we can steal from other members
619	 * of the group.
620	 */
621	if (high->tdq_group == low->tdq_group) {
622		transferable = high->tdq_transferable;
623		high_load = high->tdq_load;
624		low_load = low->tdq_load;
625	} else {
626		transferable = high->tdq_group->tdg_transferable;
627		high_load = high->tdq_group->tdg_load;
628		low_load = low->tdq_group->tdg_load;
629	}
630	/*
631	 * Determine what the imbalance is and then adjust that to how many
632	 * threads we actually have to give up (transferable).
633	 */
634	if (transferable != 0) {
635		diff = high_load - low_load;
636		move = diff / 2;
637		if (diff & 0x1)
638			move++;
639		move = min(move, transferable);
640		for (i = 0; i < move; i++)
641			tdq_move(high, low);
642	}
643	TDQ_UNLOCK(high);
644	TDQ_UNLOCK(low);
645	return;
646}
647
648/*
649 * Move a thread from one thread queue to another.
650 */
651static void
652tdq_move(struct tdq *from, struct tdq *to)
653{
654	struct td_sched *ts;
655	struct thread *td;
656	struct tdq *tdq;
657	int cpu;
658
659	tdq = from;
660	cpu = TDQ_ID(to);
661	ts = tdq_steal(tdq, 1);
662	if (ts == NULL) {
663		struct tdq_group *tdg;
664
665		tdg = tdq->tdq_group;
666		LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
667			if (tdq == from || tdq->tdq_transferable == 0)
668				continue;
669			ts = tdq_steal(tdq, 1);
670			break;
671		}
672		if (ts == NULL)
673			return;
674	}
675	if (tdq == to)
676		return;
677	td = ts->ts_thread;
678	/*
679	 * Although the run queue is locked the thread may be blocked.  Lock
680	 * it to clear this.
681	 */
682	thread_lock(td);
683	/* Drop recursive lock on from. */
684	TDQ_UNLOCK(from);
685	sched_rem(td);
686	ts->ts_cpu = cpu;
687	td->td_lock = TDQ_LOCKPTR(to);
688	tdq_add(to, td, SRQ_YIELDING);
689	tdq_notify(ts);
690}
691
692/*
693 * This tdq has idled.  Try to steal a thread from another cpu and switch
694 * to it.
695 */
696static int
697tdq_idled(struct tdq *tdq)
698{
699	struct tdq_group *tdg;
700	struct tdq *steal;
701	struct td_sched *ts;
702	struct thread *td;
703	int highload;
704	int highcpu;
705	int load;
706	int cpu;
707
708	/* We don't want to be preempted while we're iterating over tdqs */
709	spinlock_enter();
710	tdg = tdq->tdq_group;
711	/*
712	 * If we're in a cpu group, try and steal threads from another cpu in
713	 * the group before idling.
714	 */
715	if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
716		LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
717			if (steal == tdq || steal->tdq_transferable == 0)
718				continue;
719			TDQ_LOCK(steal);
720			ts = tdq_steal(steal, 0);
721			if (ts)
722				goto steal;
723			TDQ_UNLOCK(steal);
724		}
725	}
726	for (;;) {
727		if (steal_idle == 0)
728			break;
729		highcpu = 0;
730		highload = 0;
731		for (cpu = 0; cpu <= mp_maxid; cpu++) {
732			if (CPU_ABSENT(cpu))
733				continue;
734			steal = TDQ_CPU(cpu);
735			load = TDQ_CPU(cpu)->tdq_transferable;
736			if (load < highload)
737				continue;
738			highload = load;
739			highcpu = cpu;
740		}
741		if (highload < steal_thresh)
742			break;
743		steal = TDQ_CPU(highcpu);
744		TDQ_LOCK(steal);
745		if (steal->tdq_transferable >= steal_thresh &&
746		    (ts = tdq_steal(steal, 1)) != NULL)
747			goto steal;
748		TDQ_UNLOCK(steal);
749		break;
750	}
751	spinlock_exit();
752	return (1);
753steal:
754	td = ts->ts_thread;
755	thread_lock(td);
756	spinlock_exit();
757	MPASS(td->td_lock == TDQ_LOCKPTR(steal));
758	TDQ_UNLOCK(steal);
759	sched_rem(td);
760	sched_setcpu(ts, PCPU_GET(cpuid), SRQ_YIELDING);
761	tdq_add(tdq, td, SRQ_YIELDING);
762	MPASS(td->td_lock == curthread->td_lock);
763	mi_switch(SW_VOL, NULL);
764	thread_unlock(curthread);
765
766	return (0);
767}
768
769/*
770 * Notify a remote cpu of new work.  Sends an IPI if criteria are met.
771 */
772static void
773tdq_notify(struct td_sched *ts)
774{
775	struct thread *ctd;
776	struct pcpu *pcpu;
777	int cpri;
778	int pri;
779	int cpu;
780
781	cpu = ts->ts_cpu;
782	pri = ts->ts_thread->td_priority;
783	pcpu = pcpu_find(cpu);
784	ctd = pcpu->pc_curthread;
785	cpri = ctd->td_priority;
786
787	/*
788	 * If our priority is not better than the current priority there is
789	 * nothing to do.
790	 */
791	if (pri > cpri)
792		return;
793	/*
794	 * Always IPI idle.
795	 */
796	if (cpri > PRI_MIN_IDLE)
797		goto sendipi;
798	/*
799	 * If we're realtime or better and there is timeshare or worse running
800	 * send an IPI.
801	 */
802	if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
803		goto sendipi;
804	/*
805	 * Otherwise only IPI if we exceed the threshold.
806	 */
807	if (pri > preempt_thresh)
808		return;
809sendipi:
810	ctd->td_flags |= TDF_NEEDRESCHED;
811	ipi_selected(1 << cpu, IPI_PREEMPT);
812}
813
814/*
815 * Steals load from a timeshare queue.  Honors the rotating queue head
816 * index.
817 */
818static struct td_sched *
819runq_steal_from(struct runq *rq, u_char start)
820{
821	struct td_sched *ts;
822	struct rqbits *rqb;
823	struct rqhead *rqh;
824	int first;
825	int bit;
826	int pri;
827	int i;
828
829	rqb = &rq->rq_status;
830	bit = start & (RQB_BPW -1);
831	pri = 0;
832	first = 0;
833again:
834	for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
835		if (rqb->rqb_bits[i] == 0)
836			continue;
837		if (bit != 0) {
838			for (pri = bit; pri < RQB_BPW; pri++)
839				if (rqb->rqb_bits[i] & (1ul << pri))
840					break;
841			if (pri >= RQB_BPW)
842				continue;
843		} else
844			pri = RQB_FFS(rqb->rqb_bits[i]);
845		pri += (i << RQB_L2BPW);
846		rqh = &rq->rq_queues[pri];
847		TAILQ_FOREACH(ts, rqh, ts_procq) {
848			if (first && THREAD_CAN_MIGRATE(ts->ts_thread))
849				return (ts);
850			first = 1;
851		}
852	}
853	if (start != 0) {
854		start = 0;
855		goto again;
856	}
857
858	return (NULL);
859}
860
861/*
862 * Steals load from a standard linear queue.
863 */
864static struct td_sched *
865runq_steal(struct runq *rq)
866{
867	struct rqhead *rqh;
868	struct rqbits *rqb;
869	struct td_sched *ts;
870	int word;
871	int bit;
872
873	rqb = &rq->rq_status;
874	for (word = 0; word < RQB_LEN; word++) {
875		if (rqb->rqb_bits[word] == 0)
876			continue;
877		for (bit = 0; bit < RQB_BPW; bit++) {
878			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
879				continue;
880			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
881			TAILQ_FOREACH(ts, rqh, ts_procq)
882				if (THREAD_CAN_MIGRATE(ts->ts_thread))
883					return (ts);
884		}
885	}
886	return (NULL);
887}
888
889/*
890 * Attempt to steal a thread in priority order from a thread queue.
891 */
892static struct td_sched *
893tdq_steal(struct tdq *tdq, int stealidle)
894{
895	struct td_sched *ts;
896
897	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
898	if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
899		return (ts);
900	if ((ts = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL)
901		return (ts);
902	if (stealidle)
903		return (runq_steal(&tdq->tdq_idle));
904	return (NULL);
905}
906
907/*
908 * Sets the thread lock and ts_cpu to match the requested cpu.  Unlocks the
909 * current lock and returns with the assigned queue locked.  If this is
910 * via sched_switch() we leave the thread in a blocked state as an
911 * optimization.
912 */
913static inline struct tdq *
914sched_setcpu(struct td_sched *ts, int cpu, int flags)
915{
916	struct thread *td;
917	struct tdq *tdq;
918
919	THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
920
921	tdq = TDQ_CPU(cpu);
922	td = ts->ts_thread;
923	ts->ts_cpu = cpu;
924
925	/* If the lock matches just return the queue. */
926	if (td->td_lock == TDQ_LOCKPTR(tdq))
927		return (tdq);
928#ifdef notyet
929	/*
930	 * If the thread isn't running it's lockptr is a
931	 * turnstile or a sleepqueue.  We can just lock_set without
932	 * blocking.
933	 */
934	if (TD_CAN_RUN(td)) {
935		TDQ_LOCK(tdq);
936		thread_lock_set(td, TDQ_LOCKPTR(tdq));
937		return (tdq);
938	}
939#endif
940	/*
941	 * The hard case, migration, we need to block the thread first to
942	 * prevent order reversals with other cpus locks.
943	 */
944	thread_lock_block(td);
945	TDQ_LOCK(tdq);
946	thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
947	return (tdq);
948}
949
950/*
951 * Find the thread queue running the lowest priority thread.
952 */
953static int
954tdq_lowestpri(void)
955{
956	struct tdq *tdq;
957	int lowpri;
958	int lowcpu;
959	int lowload;
960	int load;
961	int cpu;
962	int pri;
963
964	lowload = 0;
965	lowpri = lowcpu = 0;
966	for (cpu = 0; cpu <= mp_maxid; cpu++) {
967		if (CPU_ABSENT(cpu))
968			continue;
969		tdq = TDQ_CPU(cpu);
970		pri = tdq->tdq_lowpri;
971		load = TDQ_CPU(cpu)->tdq_load;
972		CTR4(KTR_ULE,
973		    "cpu %d pri %d lowcpu %d lowpri %d",
974		    cpu, pri, lowcpu, lowpri);
975		if (pri < lowpri)
976			continue;
977		if (lowpri && lowpri == pri && load > lowload)
978			continue;
979		lowpri = pri;
980		lowcpu = cpu;
981		lowload = load;
982	}
983
984	return (lowcpu);
985}
986
987/*
988 * Find the thread queue with the least load.
989 */
990static int
991tdq_lowestload(void)
992{
993	struct tdq *tdq;
994	int lowload;
995	int lowpri;
996	int lowcpu;
997	int load;
998	int cpu;
999	int pri;
1000
1001	lowcpu = 0;
1002	lowload = TDQ_CPU(0)->tdq_load;
1003	lowpri = TDQ_CPU(0)->tdq_lowpri;
1004	for (cpu = 1; cpu <= mp_maxid; cpu++) {
1005		if (CPU_ABSENT(cpu))
1006			continue;
1007		tdq = TDQ_CPU(cpu);
1008		load = tdq->tdq_load;
1009		pri = tdq->tdq_lowpri;
1010		CTR4(KTR_ULE, "cpu %d load %d lowcpu %d lowload %d",
1011		    cpu, load, lowcpu, lowload);
1012		if (load > lowload)
1013			continue;
1014		if (load == lowload && pri < lowpri)
1015			continue;
1016		lowcpu = cpu;
1017		lowload = load;
1018		lowpri = pri;
1019	}
1020
1021	return (lowcpu);
1022}
1023
1024/*
1025 * Pick the destination cpu for sched_add().  Respects affinity and makes
1026 * a determination based on load or priority of available processors.
1027 */
1028static int
1029sched_pickcpu(struct td_sched *ts, int flags)
1030{
1031	struct tdq *tdq;
1032	int self;
1033	int pri;
1034	int cpu;
1035
1036	cpu = self = PCPU_GET(cpuid);
1037	if (smp_started == 0)
1038		return (self);
1039	/*
1040	 * Don't migrate a running thread from sched_switch().
1041	 */
1042	if (flags & SRQ_OURSELF) {
1043		CTR1(KTR_ULE, "YIELDING %d",
1044		    curthread->td_priority);
1045		return (self);
1046	}
1047	pri = ts->ts_thread->td_priority;
1048	cpu = ts->ts_cpu;
1049	/*
1050	 * Regardless of affinity, if the last cpu is idle send it there.
1051	 */
1052	tdq = TDQ_CPU(cpu);
1053	if (tdq->tdq_lowpri > PRI_MIN_IDLE) {
1054		CTR5(KTR_ULE,
1055		    "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d",
1056		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
1057		    tdq->tdq_lowpri);
1058		return (ts->ts_cpu);
1059	}
1060	/*
1061	 * If we have affinity, try to place it on the cpu we last ran on.
1062	 */
1063	if (SCHED_AFFINITY(ts) && tdq->tdq_lowpri > pri) {
1064		CTR5(KTR_ULE,
1065		    "affinity for %d, ltick %d ticks %d pri %d curthread %d",
1066		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
1067		    tdq->tdq_lowpri);
1068		return (ts->ts_cpu);
1069	}
1070	/*
1071	 * Look for an idle group.
1072	 */
1073	CTR1(KTR_ULE, "tdq_idle %X", tdq_idle);
1074	cpu = ffs(tdq_idle);
1075	if (cpu)
1076		return (--cpu);
1077	/*
1078	 * If there are no idle cores see if we can run the thread locally.  This may
1079	 * improve locality among sleepers and wakers when there is shared data.
1080	 */
1081	if (tryself && pri < curthread->td_priority) {
1082		CTR1(KTR_ULE, "tryself %d",
1083		    curthread->td_priority);
1084		return (self);
1085	}
1086	/*
1087 	 * Now search for the cpu running the lowest priority thread with
1088	 * the least load.
1089	 */
1090	if (pick_pri)
1091		cpu = tdq_lowestpri();
1092	else
1093		cpu = tdq_lowestload();
1094	return (cpu);
1095}
1096
1097#endif	/* SMP */
1098
1099/*
1100 * Pick the highest priority task we have and return it.
1101 */
1102static struct td_sched *
1103tdq_choose(struct tdq *tdq)
1104{
1105	struct td_sched *ts;
1106
1107	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1108	ts = runq_choose(&tdq->tdq_realtime);
1109	if (ts != NULL)
1110		return (ts);
1111	ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1112	if (ts != NULL) {
1113		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
1114		    ("tdq_choose: Invalid priority on timeshare queue %d",
1115		    ts->ts_thread->td_priority));
1116		return (ts);
1117	}
1118
1119	ts = runq_choose(&tdq->tdq_idle);
1120	if (ts != NULL) {
1121		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
1122		    ("tdq_choose: Invalid priority on idle queue %d",
1123		    ts->ts_thread->td_priority));
1124		return (ts);
1125	}
1126
1127	return (NULL);
1128}
1129
1130/*
1131 * Initialize a thread queue.
1132 */
1133static void
1134tdq_setup(struct tdq *tdq)
1135{
1136
1137	if (bootverbose)
1138		printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
1139	runq_init(&tdq->tdq_realtime);
1140	runq_init(&tdq->tdq_timeshare);
1141	runq_init(&tdq->tdq_idle);
1142	tdq->tdq_load = 0;
1143}
1144
1145#ifdef SMP
1146static void
1147tdg_setup(struct tdq_group *tdg)
1148{
1149	if (bootverbose)
1150		printf("ULE: setup cpu group %d\n", TDG_ID(tdg));
1151	snprintf(tdg->tdg_name, sizeof(tdg->tdg_name),
1152	    "sched lock %d", (int)TDG_ID(tdg));
1153	mtx_init(&tdg->tdg_lock, tdg->tdg_name, "sched lock",
1154	    MTX_SPIN | MTX_RECURSE);
1155	LIST_INIT(&tdg->tdg_members);
1156	tdg->tdg_load = 0;
1157	tdg->tdg_transferable = 0;
1158	tdg->tdg_cpus = 0;
1159	tdg->tdg_mask = 0;
1160	tdg->tdg_cpumask = 0;
1161	tdg->tdg_idlemask = 0;
1162}
1163
1164static void
1165tdg_add(struct tdq_group *tdg, struct tdq *tdq)
1166{
1167	if (tdg->tdg_mask == 0)
1168		tdg->tdg_mask |= 1 << TDQ_ID(tdq);
1169	tdg->tdg_cpumask |= 1 << TDQ_ID(tdq);
1170	tdg->tdg_cpus++;
1171	tdq->tdq_group = tdg;
1172	tdq->tdq_lock = &tdg->tdg_lock;
1173	LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
1174	if (bootverbose)
1175		printf("ULE: adding cpu %d to group %d: cpus %d mask 0x%X\n",
1176		    TDQ_ID(tdq), TDG_ID(tdg), tdg->tdg_cpus, tdg->tdg_cpumask);
1177}
1178
1179static void
1180sched_setup_topology(void)
1181{
1182	struct tdq_group *tdg;
1183	struct cpu_group *cg;
1184	int balance_groups;
1185	struct tdq *tdq;
1186	int i;
1187	int j;
1188
1189	topology = 1;
1190	balance_groups = 0;
1191	for (i = 0; i < smp_topology->ct_count; i++) {
1192		cg = &smp_topology->ct_group[i];
1193		tdg = &tdq_groups[i];
1194		/*
1195		 * Initialize the group.
1196		 */
1197		tdg_setup(tdg);
1198		/*
1199		 * Find all of the group members and add them.
1200		 */
1201		for (j = 0; j < MAXCPU; j++) {
1202			if ((cg->cg_mask & (1 << j)) != 0) {
1203				tdq = TDQ_CPU(j);
1204				tdq_setup(tdq);
1205				tdg_add(tdg, tdq);
1206			}
1207		}
1208		if (tdg->tdg_cpus > 1)
1209			balance_groups = 1;
1210	}
1211	tdg_maxid = smp_topology->ct_count - 1;
1212	if (balance_groups)
1213		sched_balance_groups(NULL);
1214}
1215
1216static void
1217sched_setup_smp(void)
1218{
1219	struct tdq_group *tdg;
1220	struct tdq *tdq;
1221	int cpus;
1222	int i;
1223
1224	for (cpus = 0, i = 0; i < MAXCPU; i++) {
1225		if (CPU_ABSENT(i))
1226			continue;
1227		tdq = &tdq_cpu[i];
1228		tdg = &tdq_groups[i];
1229		/*
1230		 * Setup a tdq group with one member.
1231		 */
1232		tdg_setup(tdg);
1233		tdq_setup(tdq);
1234		tdg_add(tdg, tdq);
1235		cpus++;
1236	}
1237	tdg_maxid = cpus - 1;
1238}
1239
1240/*
1241 * Fake a topology with one group containing all CPUs.
1242 */
1243static void
1244sched_fake_topo(void)
1245{
1246#ifdef SCHED_FAKE_TOPOLOGY
1247	static struct cpu_top top;
1248	static struct cpu_group group;
1249
1250	top.ct_count = 1;
1251	top.ct_group = &group;
1252	group.cg_mask = all_cpus;
1253	group.cg_count = mp_ncpus;
1254	group.cg_children = 0;
1255	smp_topology = &top;
1256#endif
1257}
1258#endif
1259
1260/*
1261 * Setup the thread queues and initialize the topology based on MD
1262 * information.
1263 */
1264static void
1265sched_setup(void *dummy)
1266{
1267	struct tdq *tdq;
1268
1269	tdq = TDQ_SELF();
1270#ifdef SMP
1271	/*
1272	 * Initialize long-term cpu balancing algorithm.
1273	 */
1274	callout_init(&balco, CALLOUT_MPSAFE);
1275	callout_init(&gbalco, CALLOUT_MPSAFE);
1276	sched_fake_topo();
1277	/*
1278	 * Setup tdqs based on a topology configuration or vanilla SMP based
1279	 * on mp_maxid.
1280	 */
1281	if (smp_topology == NULL)
1282		sched_setup_smp();
1283	else
1284		sched_setup_topology();
1285	sched_balance(NULL);
1286#else
1287	tdq_setup(tdq);
1288	mtx_init(&tdq_lock, "sched lock", "sched lock", MTX_SPIN | MTX_RECURSE);
1289	tdq->tdq_lock = &tdq_lock;
1290#endif
1291	/*
1292	 * To avoid divide-by-zero, we set realstathz a dummy value
1293	 * in case which sched_clock() called before sched_initticks().
1294	 */
1295	realstathz = hz;
1296	sched_slice = (realstathz/10);	/* ~100ms */
1297	tickincr = 1 << SCHED_TICK_SHIFT;
1298
1299	/* Add thread0's load since it's running. */
1300	TDQ_LOCK(tdq);
1301	thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1302	tdq_load_add(tdq, &td_sched0);
1303	TDQ_UNLOCK(tdq);
1304}
1305
1306/*
1307 * This routine determines the tickincr after stathz and hz are setup.
1308 */
1309/* ARGSUSED */
1310static void
1311sched_initticks(void *dummy)
1312{
1313	int incr;
1314
1315	realstathz = stathz ? stathz : hz;
1316	sched_slice = (realstathz/10);	/* ~100ms */
1317
1318	/*
1319	 * tickincr is shifted out by 10 to avoid rounding errors due to
1320	 * hz not being evenly divisible by stathz on all platforms.
1321	 */
1322	incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1323	/*
1324	 * This does not work for values of stathz that are more than
1325	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1326	 */
1327	if (incr == 0)
1328		incr = 1;
1329	tickincr = incr;
1330#ifdef SMP
1331	/*
1332	 * Set steal thresh to log2(mp_ncpu) but no greater than 4.  This
1333	 * prevents excess thrashing on large machines and excess idle on
1334	 * smaller machines.
1335	 */
1336	steal_thresh = min(ffs(mp_ncpus) - 1, 4);
1337	affinity = SCHED_AFFINITY_DEFAULT;
1338#endif
1339}
1340
1341
1342/*
1343 * This is the core of the interactivity algorithm.  Determines a score based
1344 * on past behavior.  It is the ratio of sleep time to run time scaled to
1345 * a [0, 100] integer.  This is the voluntary sleep time of a process, which
1346 * differs from the cpu usage because it does not account for time spent
1347 * waiting on a run-queue.  Would be prettier if we had floating point.
1348 */
1349static int
1350sched_interact_score(struct thread *td)
1351{
1352	struct td_sched *ts;
1353	int div;
1354
1355	ts = td->td_sched;
1356	/*
1357	 * The score is only needed if this is likely to be an interactive
1358	 * task.  Don't go through the expense of computing it if there's
1359	 * no chance.
1360	 */
1361	if (sched_interact <= SCHED_INTERACT_HALF &&
1362		ts->ts_runtime >= ts->ts_slptime)
1363			return (SCHED_INTERACT_HALF);
1364
1365	if (ts->ts_runtime > ts->ts_slptime) {
1366		div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1367		return (SCHED_INTERACT_HALF +
1368		    (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1369	}
1370	if (ts->ts_slptime > ts->ts_runtime) {
1371		div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1372		return (ts->ts_runtime / div);
1373	}
1374	/* runtime == slptime */
1375	if (ts->ts_runtime)
1376		return (SCHED_INTERACT_HALF);
1377
1378	/*
1379	 * This can happen if slptime and runtime are 0.
1380	 */
1381	return (0);
1382
1383}
1384
1385/*
1386 * Scale the scheduling priority according to the "interactivity" of this
1387 * process.
1388 */
1389static void
1390sched_priority(struct thread *td)
1391{
1392	int score;
1393	int pri;
1394
1395	if (td->td_pri_class != PRI_TIMESHARE)
1396		return;
1397	/*
1398	 * If the score is interactive we place the thread in the realtime
1399	 * queue with a priority that is less than kernel and interrupt
1400	 * priorities.  These threads are not subject to nice restrictions.
1401	 *
1402	 * Scores greater than this are placed on the normal timeshare queue
1403	 * where the priority is partially decided by the most recent cpu
1404	 * utilization and the rest is decided by nice value.
1405	 */
1406	score = sched_interact_score(td);
1407	if (score < sched_interact) {
1408		pri = PRI_MIN_REALTIME;
1409		pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1410		    * score;
1411		KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
1412		    ("sched_priority: invalid interactive priority %d score %d",
1413		    pri, score));
1414	} else {
1415		pri = SCHED_PRI_MIN;
1416		if (td->td_sched->ts_ticks)
1417			pri += SCHED_PRI_TICKS(td->td_sched);
1418		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1419		KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
1420		    ("sched_priority: invalid priority %d: nice %d, "
1421		    "ticks %d ftick %d ltick %d tick pri %d",
1422		    pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
1423		    td->td_sched->ts_ftick, td->td_sched->ts_ltick,
1424		    SCHED_PRI_TICKS(td->td_sched)));
1425	}
1426	sched_user_prio(td, pri);
1427
1428	return;
1429}
1430
1431/*
1432 * This routine enforces a maximum limit on the amount of scheduling history
1433 * kept.  It is called after either the slptime or runtime is adjusted.  This
1434 * function is ugly due to integer math.
1435 */
1436static void
1437sched_interact_update(struct thread *td)
1438{
1439	struct td_sched *ts;
1440	u_int sum;
1441
1442	ts = td->td_sched;
1443	sum = ts->ts_runtime + ts->ts_slptime;
1444	if (sum < SCHED_SLP_RUN_MAX)
1445		return;
1446	/*
1447	 * This only happens from two places:
1448	 * 1) We have added an unusual amount of run time from fork_exit.
1449	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1450	 */
1451	if (sum > SCHED_SLP_RUN_MAX * 2) {
1452		if (ts->ts_runtime > ts->ts_slptime) {
1453			ts->ts_runtime = SCHED_SLP_RUN_MAX;
1454			ts->ts_slptime = 1;
1455		} else {
1456			ts->ts_slptime = SCHED_SLP_RUN_MAX;
1457			ts->ts_runtime = 1;
1458		}
1459		return;
1460	}
1461	/*
1462	 * If we have exceeded by more than 1/5th then the algorithm below
1463	 * will not bring us back into range.  Dividing by two here forces
1464	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1465	 */
1466	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1467		ts->ts_runtime /= 2;
1468		ts->ts_slptime /= 2;
1469		return;
1470	}
1471	ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1472	ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1473}
1474
1475/*
1476 * Scale back the interactivity history when a child thread is created.  The
1477 * history is inherited from the parent but the thread may behave totally
1478 * differently.  For example, a shell spawning a compiler process.  We want
1479 * to learn that the compiler is behaving badly very quickly.
1480 */
1481static void
1482sched_interact_fork(struct thread *td)
1483{
1484	int ratio;
1485	int sum;
1486
1487	sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
1488	if (sum > SCHED_SLP_RUN_FORK) {
1489		ratio = sum / SCHED_SLP_RUN_FORK;
1490		td->td_sched->ts_runtime /= ratio;
1491		td->td_sched->ts_slptime /= ratio;
1492	}
1493}
1494
1495/*
1496 * Called from proc0_init() to setup the scheduler fields.
1497 */
1498void
1499schedinit(void)
1500{
1501
1502	/*
1503	 * Set up the scheduler specific parts of proc0.
1504	 */
1505	proc0.p_sched = NULL; /* XXX */
1506	thread0.td_sched = &td_sched0;
1507	td_sched0.ts_ltick = ticks;
1508	td_sched0.ts_ftick = ticks;
1509	td_sched0.ts_thread = &thread0;
1510}
1511
1512/*
1513 * This is only somewhat accurate since given many processes of the same
1514 * priority they will switch when their slices run out, which will be
1515 * at most sched_slice stathz ticks.
1516 */
1517int
1518sched_rr_interval(void)
1519{
1520
1521	/* Convert sched_slice to hz */
1522	return (hz/(realstathz/sched_slice));
1523}
1524
1525/*
1526 * Update the percent cpu tracking information when it is requested or
1527 * the total history exceeds the maximum.  We keep a sliding history of
1528 * tick counts that slowly decays.  This is less precise than the 4BSD
1529 * mechanism since it happens with less regular and frequent events.
1530 */
1531static void
1532sched_pctcpu_update(struct td_sched *ts)
1533{
1534
1535	if (ts->ts_ticks == 0)
1536		return;
1537	if (ticks - (hz / 10) < ts->ts_ltick &&
1538	    SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
1539		return;
1540	/*
1541	 * Adjust counters and watermark for pctcpu calc.
1542	 */
1543	if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1544		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1545			    SCHED_TICK_TARG;
1546	else
1547		ts->ts_ticks = 0;
1548	ts->ts_ltick = ticks;
1549	ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
1550}
1551
1552/*
1553 * Adjust the priority of a thread.  Move it to the appropriate run-queue
1554 * if necessary.  This is the back-end for several priority related
1555 * functions.
1556 */
1557static void
1558sched_thread_priority(struct thread *td, u_char prio)
1559{
1560	struct td_sched *ts;
1561
1562	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1563	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1564	    curthread->td_proc->p_comm);
1565	ts = td->td_sched;
1566	THREAD_LOCK_ASSERT(td, MA_OWNED);
1567	if (td->td_priority == prio)
1568		return;
1569
1570	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1571		/*
1572		 * If the priority has been elevated due to priority
1573		 * propagation, we may have to move ourselves to a new
1574		 * queue.  This could be optimized to not re-add in some
1575		 * cases.
1576		 */
1577		sched_rem(td);
1578		td->td_priority = prio;
1579		sched_add(td, SRQ_BORROWING);
1580	} else {
1581#ifdef SMP
1582		struct tdq *tdq;
1583
1584		tdq = TDQ_CPU(ts->ts_cpu);
1585		if (prio < tdq->tdq_lowpri)
1586			tdq->tdq_lowpri = prio;
1587#endif
1588		td->td_priority = prio;
1589	}
1590}
1591
1592/*
1593 * Update a thread's priority when it is lent another thread's
1594 * priority.
1595 */
1596void
1597sched_lend_prio(struct thread *td, u_char prio)
1598{
1599
1600	td->td_flags |= TDF_BORROWING;
1601	sched_thread_priority(td, prio);
1602}
1603
1604/*
1605 * Restore a thread's priority when priority propagation is
1606 * over.  The prio argument is the minimum priority the thread
1607 * needs to have to satisfy other possible priority lending
1608 * requests.  If the thread's regular priority is less
1609 * important than prio, the thread will keep a priority boost
1610 * of prio.
1611 */
1612void
1613sched_unlend_prio(struct thread *td, u_char prio)
1614{
1615	u_char base_pri;
1616
1617	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1618	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1619		base_pri = td->td_user_pri;
1620	else
1621		base_pri = td->td_base_pri;
1622	if (prio >= base_pri) {
1623		td->td_flags &= ~TDF_BORROWING;
1624		sched_thread_priority(td, base_pri);
1625	} else
1626		sched_lend_prio(td, prio);
1627}
1628
1629/*
1630 * Standard entry for setting the priority to an absolute value.
1631 */
1632void
1633sched_prio(struct thread *td, u_char prio)
1634{
1635	u_char oldprio;
1636
1637	/* First, update the base priority. */
1638	td->td_base_pri = prio;
1639
1640	/*
1641	 * If the thread is borrowing another thread's priority, don't
1642	 * ever lower the priority.
1643	 */
1644	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1645		return;
1646
1647	/* Change the real priority. */
1648	oldprio = td->td_priority;
1649	sched_thread_priority(td, prio);
1650
1651	/*
1652	 * If the thread is on a turnstile, then let the turnstile update
1653	 * its state.
1654	 */
1655	if (TD_ON_LOCK(td) && oldprio != prio)
1656		turnstile_adjust(td, oldprio);
1657}
1658
1659/*
1660 * Set the base user priority, does not effect current running priority.
1661 */
1662void
1663sched_user_prio(struct thread *td, u_char prio)
1664{
1665	u_char oldprio;
1666
1667	td->td_base_user_pri = prio;
1668	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1669                return;
1670	oldprio = td->td_user_pri;
1671	td->td_user_pri = prio;
1672
1673	if (TD_ON_UPILOCK(td) && oldprio != prio)
1674		umtx_pi_adjust(td, oldprio);
1675}
1676
1677void
1678sched_lend_user_prio(struct thread *td, u_char prio)
1679{
1680	u_char oldprio;
1681
1682	td->td_flags |= TDF_UBORROWING;
1683
1684	oldprio = td->td_user_pri;
1685	td->td_user_pri = prio;
1686
1687	if (TD_ON_UPILOCK(td) && oldprio != prio)
1688		umtx_pi_adjust(td, oldprio);
1689}
1690
1691void
1692sched_unlend_user_prio(struct thread *td, u_char prio)
1693{
1694	u_char base_pri;
1695
1696	base_pri = td->td_base_user_pri;
1697	if (prio >= base_pri) {
1698		td->td_flags &= ~TDF_UBORROWING;
1699		sched_user_prio(td, base_pri);
1700	} else
1701		sched_lend_user_prio(td, prio);
1702}
1703
1704/*
1705 * Add the thread passed as 'newtd' to the run queue before selecting
1706 * the next thread to run.  This is only used for KSE.
1707 */
1708static void
1709sched_switchin(struct tdq *tdq, struct thread *td)
1710{
1711#ifdef SMP
1712	spinlock_enter();
1713	TDQ_UNLOCK(tdq);
1714	thread_lock(td);
1715	spinlock_exit();
1716	sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
1717#else
1718	td->td_lock = TDQ_LOCKPTR(tdq);
1719#endif
1720	tdq_add(tdq, td, SRQ_YIELDING);
1721	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1722}
1723
1724/*
1725 * Handle migration from sched_switch().  This happens only for
1726 * cpu binding.
1727 */
1728static struct mtx *
1729sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1730{
1731	struct tdq *tdn;
1732
1733	tdn = TDQ_CPU(td->td_sched->ts_cpu);
1734#ifdef SMP
1735	/*
1736	 * Do the lock dance required to avoid LOR.  We grab an extra
1737	 * spinlock nesting to prevent preemption while we're
1738	 * not holding either run-queue lock.
1739	 */
1740	spinlock_enter();
1741	thread_block_switch(td);	/* This releases the lock on tdq. */
1742	TDQ_LOCK(tdn);
1743	tdq_add(tdn, td, flags);
1744	tdq_notify(td->td_sched);
1745	/*
1746	 * After we unlock tdn the new cpu still can't switch into this
1747	 * thread until we've unblocked it in cpu_switch().  The lock
1748	 * pointers may match in the case of HTT cores.  Don't unlock here
1749	 * or we can deadlock when the other CPU runs the IPI handler.
1750	 */
1751	if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
1752		TDQ_UNLOCK(tdn);
1753		TDQ_LOCK(tdq);
1754	}
1755	spinlock_exit();
1756#endif
1757	return (TDQ_LOCKPTR(tdn));
1758}
1759
1760/*
1761 * Block a thread for switching.  Similar to thread_block() but does not
1762 * bump the spin count.
1763 */
1764static inline struct mtx *
1765thread_block_switch(struct thread *td)
1766{
1767	struct mtx *lock;
1768
1769	THREAD_LOCK_ASSERT(td, MA_OWNED);
1770	lock = td->td_lock;
1771	td->td_lock = &blocked_lock;
1772	mtx_unlock_spin(lock);
1773
1774	return (lock);
1775}
1776
1777/*
1778 * Release a thread that was blocked with thread_block_switch().
1779 */
1780static inline void
1781thread_unblock_switch(struct thread *td, struct mtx *mtx)
1782{
1783	atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
1784	    (uintptr_t)mtx);
1785}
1786
1787/*
1788 * Switch threads.  This function has to handle threads coming in while
1789 * blocked for some reason, running, or idle.  It also must deal with
1790 * migrating a thread from one queue to another as running threads may
1791 * be assigned elsewhere via binding.
1792 */
1793void
1794sched_switch(struct thread *td, struct thread *newtd, int flags)
1795{
1796	struct tdq *tdq;
1797	struct td_sched *ts;
1798	struct mtx *mtx;
1799	int srqflag;
1800	int cpuid;
1801
1802	THREAD_LOCK_ASSERT(td, MA_OWNED);
1803
1804	cpuid = PCPU_GET(cpuid);
1805	tdq = TDQ_CPU(cpuid);
1806	ts = td->td_sched;
1807	mtx = td->td_lock;
1808#ifdef SMP
1809	ts->ts_rltick = ticks;
1810	if (newtd && newtd->td_priority < tdq->tdq_lowpri)
1811		tdq->tdq_lowpri = newtd->td_priority;
1812#endif
1813	td->td_lastcpu = td->td_oncpu;
1814	td->td_oncpu = NOCPU;
1815	td->td_flags &= ~TDF_NEEDRESCHED;
1816	td->td_owepreempt = 0;
1817	/*
1818	 * The lock pointer in an idle thread should never change.  Reset it
1819	 * to CAN_RUN as well.
1820	 */
1821	if (TD_IS_IDLETHREAD(td)) {
1822		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1823		TD_SET_CAN_RUN(td);
1824	} else if (TD_IS_RUNNING(td)) {
1825		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1826		tdq_load_rem(tdq, ts);
1827		srqflag = (flags & SW_PREEMPT) ?
1828		    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1829		    SRQ_OURSELF|SRQ_YIELDING;
1830		if (ts->ts_cpu == cpuid)
1831			tdq_add(tdq, td, srqflag);
1832		else
1833			mtx = sched_switch_migrate(tdq, td, srqflag);
1834	} else {
1835		/* This thread must be going to sleep. */
1836		TDQ_LOCK(tdq);
1837		mtx = thread_block_switch(td);
1838		tdq_load_rem(tdq, ts);
1839	}
1840	/*
1841	 * We enter here with the thread blocked and assigned to the
1842	 * appropriate cpu run-queue or sleep-queue and with the current
1843	 * thread-queue locked.
1844	 */
1845	TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
1846	/*
1847	 * If KSE assigned a new thread just add it here and let choosethread
1848	 * select the best one.
1849	 */
1850	if (newtd != NULL)
1851		sched_switchin(tdq, newtd);
1852	newtd = choosethread();
1853	/*
1854	 * Call the MD code to switch contexts if necessary.
1855	 */
1856	if (td != newtd) {
1857#ifdef	HWPMC_HOOKS
1858		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1859			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1860#endif
1861		cpu_switch(td, newtd, mtx);
1862		/*
1863		 * We may return from cpu_switch on a different cpu.  However,
1864		 * we always return with td_lock pointing to the current cpu's
1865		 * run queue lock.
1866		 */
1867		cpuid = PCPU_GET(cpuid);
1868		tdq = TDQ_CPU(cpuid);
1869		TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)td;
1870#ifdef	HWPMC_HOOKS
1871		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1872			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1873#endif
1874	} else
1875		thread_unblock_switch(td, mtx);
1876	/*
1877	 * Assert that all went well and return.
1878	 */
1879#ifdef SMP
1880	/* We should always get here with the lowest priority td possible */
1881	tdq->tdq_lowpri = td->td_priority;
1882#endif
1883	TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
1884	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1885	td->td_oncpu = cpuid;
1886}
1887
1888/*
1889 * Adjust thread priorities as a result of a nice request.
1890 */
1891void
1892sched_nice(struct proc *p, int nice)
1893{
1894	struct thread *td;
1895
1896	PROC_LOCK_ASSERT(p, MA_OWNED);
1897	PROC_SLOCK_ASSERT(p, MA_OWNED);
1898
1899	p->p_nice = nice;
1900	FOREACH_THREAD_IN_PROC(p, td) {
1901		thread_lock(td);
1902		sched_priority(td);
1903		sched_prio(td, td->td_base_user_pri);
1904		thread_unlock(td);
1905	}
1906}
1907
1908/*
1909 * Record the sleep time for the interactivity scorer.
1910 */
1911void
1912sched_sleep(struct thread *td)
1913{
1914
1915	THREAD_LOCK_ASSERT(td, MA_OWNED);
1916
1917	td->td_sched->ts_slptick = ticks;
1918}
1919
1920/*
1921 * Schedule a thread to resume execution and record how long it voluntarily
1922 * slept.  We also update the pctcpu, interactivity, and priority.
1923 */
1924void
1925sched_wakeup(struct thread *td)
1926{
1927	struct td_sched *ts;
1928	int slptick;
1929
1930	THREAD_LOCK_ASSERT(td, MA_OWNED);
1931	ts = td->td_sched;
1932	/*
1933	 * If we slept for more than a tick update our interactivity and
1934	 * priority.
1935	 */
1936	slptick = ts->ts_slptick;
1937	ts->ts_slptick = 0;
1938	if (slptick && slptick != ticks) {
1939		u_int hzticks;
1940
1941		hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
1942		ts->ts_slptime += hzticks;
1943		sched_interact_update(td);
1944		sched_pctcpu_update(ts);
1945		sched_priority(td);
1946	}
1947	/* Reset the slice value after we sleep. */
1948	ts->ts_slice = sched_slice;
1949	sched_add(td, SRQ_BORING);
1950}
1951
1952/*
1953 * Penalize the parent for creating a new child and initialize the child's
1954 * priority.
1955 */
1956void
1957sched_fork(struct thread *td, struct thread *child)
1958{
1959	THREAD_LOCK_ASSERT(td, MA_OWNED);
1960	sched_fork_thread(td, child);
1961	/*
1962	 * Penalize the parent and child for forking.
1963	 */
1964	sched_interact_fork(child);
1965	sched_priority(child);
1966	td->td_sched->ts_runtime += tickincr;
1967	sched_interact_update(td);
1968	sched_priority(td);
1969}
1970
1971/*
1972 * Fork a new thread, may be within the same process.
1973 */
1974void
1975sched_fork_thread(struct thread *td, struct thread *child)
1976{
1977	struct td_sched *ts;
1978	struct td_sched *ts2;
1979
1980	/*
1981	 * Initialize child.
1982	 */
1983	THREAD_LOCK_ASSERT(td, MA_OWNED);
1984	sched_newthread(child);
1985	child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
1986	ts = td->td_sched;
1987	ts2 = child->td_sched;
1988	ts2->ts_cpu = ts->ts_cpu;
1989	ts2->ts_runq = NULL;
1990	/*
1991	 * Grab our parents cpu estimation information and priority.
1992	 */
1993	ts2->ts_ticks = ts->ts_ticks;
1994	ts2->ts_ltick = ts->ts_ltick;
1995	ts2->ts_ftick = ts->ts_ftick;
1996	child->td_user_pri = td->td_user_pri;
1997	child->td_base_user_pri = td->td_base_user_pri;
1998	/*
1999	 * And update interactivity score.
2000	 */
2001	ts2->ts_slptime = ts->ts_slptime;
2002	ts2->ts_runtime = ts->ts_runtime;
2003	ts2->ts_slice = 1;	/* Attempt to quickly learn interactivity. */
2004}
2005
2006/*
2007 * Adjust the priority class of a thread.
2008 */
2009void
2010sched_class(struct thread *td, int class)
2011{
2012
2013	THREAD_LOCK_ASSERT(td, MA_OWNED);
2014	if (td->td_pri_class == class)
2015		return;
2016
2017#ifdef SMP
2018	/*
2019	 * On SMP if we're on the RUNQ we must adjust the transferable
2020	 * count because could be changing to or from an interrupt
2021	 * class.
2022	 */
2023	if (TD_ON_RUNQ(td)) {
2024		struct tdq *tdq;
2025
2026		tdq = TDQ_CPU(td->td_sched->ts_cpu);
2027		if (THREAD_CAN_MIGRATE(td)) {
2028			tdq->tdq_transferable--;
2029			tdq->tdq_group->tdg_transferable--;
2030		}
2031		td->td_pri_class = class;
2032		if (THREAD_CAN_MIGRATE(td)) {
2033			tdq->tdq_transferable++;
2034			tdq->tdq_group->tdg_transferable++;
2035		}
2036	}
2037#endif
2038	td->td_pri_class = class;
2039}
2040
2041/*
2042 * Return some of the child's priority and interactivity to the parent.
2043 */
2044void
2045sched_exit(struct proc *p, struct thread *child)
2046{
2047	struct thread *td;
2048
2049	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
2050	    child, child->td_proc->p_comm, child->td_priority);
2051
2052	PROC_SLOCK_ASSERT(p, MA_OWNED);
2053	td = FIRST_THREAD_IN_PROC(p);
2054	sched_exit_thread(td, child);
2055}
2056
2057/*
2058 * Penalize another thread for the time spent on this one.  This helps to
2059 * worsen the priority and interactivity of processes which schedule batch
2060 * jobs such as make.  This has little effect on the make process itself but
2061 * causes new processes spawned by it to receive worse scores immediately.
2062 */
2063void
2064sched_exit_thread(struct thread *td, struct thread *child)
2065{
2066
2067	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
2068	    child, child->td_proc->p_comm, child->td_priority);
2069
2070#ifdef KSE
2071	/*
2072	 * KSE forks and exits so often that this penalty causes short-lived
2073	 * threads to always be non-interactive.  This causes mozilla to
2074	 * crawl under load.
2075	 */
2076	if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
2077		return;
2078#endif
2079	/*
2080	 * Give the child's runtime to the parent without returning the
2081	 * sleep time as a penalty to the parent.  This causes shells that
2082	 * launch expensive things to mark their children as expensive.
2083	 */
2084	thread_lock(td);
2085	td->td_sched->ts_runtime += child->td_sched->ts_runtime;
2086	sched_interact_update(td);
2087	sched_priority(td);
2088	thread_unlock(td);
2089}
2090
2091/*
2092 * Fix priorities on return to user-space.  Priorities may be elevated due
2093 * to static priorities in msleep() or similar.
2094 */
2095void
2096sched_userret(struct thread *td)
2097{
2098	/*
2099	 * XXX we cheat slightly on the locking here to avoid locking in
2100	 * the usual case.  Setting td_priority here is essentially an
2101	 * incomplete workaround for not setting it properly elsewhere.
2102	 * Now that some interrupt handlers are threads, not setting it
2103	 * properly elsewhere can clobber it in the window between setting
2104	 * it here and returning to user mode, so don't waste time setting
2105	 * it perfectly here.
2106	 */
2107	KASSERT((td->td_flags & TDF_BORROWING) == 0,
2108	    ("thread with borrowed priority returning to userland"));
2109	if (td->td_priority != td->td_user_pri) {
2110		thread_lock(td);
2111		td->td_priority = td->td_user_pri;
2112		td->td_base_pri = td->td_user_pri;
2113		thread_unlock(td);
2114        }
2115}
2116
2117/*
2118 * Handle a stathz tick.  This is really only relevant for timeshare
2119 * threads.
2120 */
2121void
2122sched_clock(struct thread *td)
2123{
2124	struct tdq *tdq;
2125	struct td_sched *ts;
2126
2127	THREAD_LOCK_ASSERT(td, MA_OWNED);
2128	tdq = TDQ_SELF();
2129	/*
2130	 * Advance the insert index once for each tick to ensure that all
2131	 * threads get a chance to run.
2132	 */
2133	if (tdq->tdq_idx == tdq->tdq_ridx) {
2134		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
2135		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
2136			tdq->tdq_ridx = tdq->tdq_idx;
2137	}
2138	ts = td->td_sched;
2139	/*
2140	 * We only do slicing code for TIMESHARE threads.
2141	 */
2142	if (td->td_pri_class != PRI_TIMESHARE)
2143		return;
2144	/*
2145	 * We used a tick; charge it to the thread so that we can compute our
2146	 * interactivity.
2147	 */
2148	td->td_sched->ts_runtime += tickincr;
2149	sched_interact_update(td);
2150	/*
2151	 * We used up one time slice.
2152	 */
2153	if (--ts->ts_slice > 0)
2154		return;
2155	/*
2156	 * We're out of time, recompute priorities and requeue.
2157	 */
2158	sched_priority(td);
2159	td->td_flags |= TDF_NEEDRESCHED;
2160}
2161
2162/*
2163 * Called once per hz tick.  Used for cpu utilization information.  This
2164 * is easier than trying to scale based on stathz.
2165 */
2166void
2167sched_tick(void)
2168{
2169	struct td_sched *ts;
2170
2171	ts = curthread->td_sched;
2172	/* Adjust ticks for pctcpu */
2173	ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2174	ts->ts_ltick = ticks;
2175	/*
2176	 * Update if we've exceeded our desired tick threshhold by over one
2177	 * second.
2178	 */
2179	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2180		sched_pctcpu_update(ts);
2181}
2182
2183/*
2184 * Return whether the current CPU has runnable tasks.  Used for in-kernel
2185 * cooperative idle threads.
2186 */
2187int
2188sched_runnable(void)
2189{
2190	struct tdq *tdq;
2191	int load;
2192
2193	load = 1;
2194
2195	tdq = TDQ_SELF();
2196	if ((curthread->td_flags & TDF_IDLETD) != 0) {
2197		if (tdq->tdq_load > 0)
2198			goto out;
2199	} else
2200		if (tdq->tdq_load - 1 > 0)
2201			goto out;
2202	load = 0;
2203out:
2204	return (load);
2205}
2206
2207/*
2208 * Choose the highest priority thread to run.  The thread is removed from
2209 * the run-queue while running however the load remains.  For SMP we set
2210 * the tdq in the global idle bitmask if it idles here.
2211 */
2212struct thread *
2213sched_choose(void)
2214{
2215#ifdef SMP
2216	struct tdq_group *tdg;
2217#endif
2218	struct td_sched *ts;
2219	struct tdq *tdq;
2220
2221	tdq = TDQ_SELF();
2222	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2223	ts = tdq_choose(tdq);
2224	if (ts) {
2225		tdq_runq_rem(tdq, ts);
2226		return (ts->ts_thread);
2227	}
2228#ifdef SMP
2229	/*
2230	 * We only set the idled bit when all of the cpus in the group are
2231	 * idle.  Otherwise we could get into a situation where a thread bounces
2232	 * back and forth between two idle cores on seperate physical CPUs.
2233	 */
2234	tdg = tdq->tdq_group;
2235	tdg->tdg_idlemask |= PCPU_GET(cpumask);
2236	if (tdg->tdg_idlemask == tdg->tdg_cpumask)
2237		atomic_set_int(&tdq_idle, tdg->tdg_mask);
2238	tdq->tdq_lowpri = PRI_MAX_IDLE;
2239#endif
2240	return (PCPU_GET(idlethread));
2241}
2242
2243/*
2244 * Set owepreempt if necessary.  Preemption never happens directly in ULE,
2245 * we always request it once we exit a critical section.
2246 */
2247static inline void
2248sched_setpreempt(struct thread *td)
2249{
2250	struct thread *ctd;
2251	int cpri;
2252	int pri;
2253
2254	ctd = curthread;
2255	pri = td->td_priority;
2256	cpri = ctd->td_priority;
2257	if (td->td_priority < ctd->td_priority)
2258		curthread->td_flags |= TDF_NEEDRESCHED;
2259	if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2260		return;
2261	/*
2262	 * Always preempt IDLE threads.  Otherwise only if the preempting
2263	 * thread is an ithread.
2264	 */
2265	if (pri > preempt_thresh && cpri < PRI_MIN_IDLE)
2266		return;
2267	ctd->td_owepreempt = 1;
2268	return;
2269}
2270
2271/*
2272 * Add a thread to a thread queue.  Initializes priority, slice, runq, and
2273 * add it to the appropriate queue.  This is the internal function called
2274 * when the tdq is predetermined.
2275 */
2276void
2277tdq_add(struct tdq *tdq, struct thread *td, int flags)
2278{
2279	struct td_sched *ts;
2280	int class;
2281#ifdef SMP
2282	int cpumask;
2283#endif
2284
2285	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2286	KASSERT((td->td_inhibitors == 0),
2287	    ("sched_add: trying to run inhibited thread"));
2288	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
2289	    ("sched_add: bad thread state"));
2290	KASSERT(td->td_proc->p_sflag & PS_INMEM,
2291	    ("sched_add: process swapped out"));
2292
2293	ts = td->td_sched;
2294	class = PRI_BASE(td->td_pri_class);
2295        TD_SET_RUNQ(td);
2296	if (ts->ts_slice == 0)
2297		ts->ts_slice = sched_slice;
2298	/*
2299	 * Pick the run queue based on priority.
2300	 */
2301	if (td->td_priority <= PRI_MAX_REALTIME)
2302		ts->ts_runq = &tdq->tdq_realtime;
2303	else if (td->td_priority <= PRI_MAX_TIMESHARE)
2304		ts->ts_runq = &tdq->tdq_timeshare;
2305	else
2306		ts->ts_runq = &tdq->tdq_idle;
2307#ifdef SMP
2308	cpumask = 1 << ts->ts_cpu;
2309	/*
2310	 * If we had been idle, clear our bit in the group and potentially
2311	 * the global bitmap.
2312	 */
2313	if ((class != PRI_IDLE && class != PRI_ITHD) &&
2314	    (tdq->tdq_group->tdg_idlemask & cpumask) != 0) {
2315		/*
2316		 * Check to see if our group is unidling, and if so, remove it
2317		 * from the global idle mask.
2318		 */
2319		if (tdq->tdq_group->tdg_idlemask ==
2320		    tdq->tdq_group->tdg_cpumask)
2321			atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
2322		/*
2323		 * Now remove ourselves from the group specific idle mask.
2324		 */
2325		tdq->tdq_group->tdg_idlemask &= ~cpumask;
2326	}
2327	if (td->td_priority < tdq->tdq_lowpri)
2328		tdq->tdq_lowpri = td->td_priority;
2329#endif
2330	tdq_runq_add(tdq, ts, flags);
2331	tdq_load_add(tdq, ts);
2332}
2333
2334/*
2335 * Select the target thread queue and add a thread to it.  Request
2336 * preemption or IPI a remote processor if required.
2337 */
2338void
2339sched_add(struct thread *td, int flags)
2340{
2341	struct td_sched *ts;
2342	struct tdq *tdq;
2343#ifdef SMP
2344	int cpuid;
2345	int cpu;
2346#endif
2347	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
2348	    td, td->td_proc->p_comm, td->td_priority, curthread,
2349	    curthread->td_proc->p_comm);
2350	THREAD_LOCK_ASSERT(td, MA_OWNED);
2351	ts = td->td_sched;
2352	/*
2353	 * Recalculate the priority before we select the target cpu or
2354	 * run-queue.
2355	 */
2356	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2357		sched_priority(td);
2358#ifdef SMP
2359	cpuid = PCPU_GET(cpuid);
2360	/*
2361	 * Pick the destination cpu and if it isn't ours transfer to the
2362	 * target cpu.
2363	 */
2364	if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_MIGRATE(td))
2365		cpu = cpuid;
2366	else if (!THREAD_CAN_MIGRATE(td))
2367		cpu = ts->ts_cpu;
2368	else
2369		cpu = sched_pickcpu(ts, flags);
2370	tdq = sched_setcpu(ts, cpu, flags);
2371	tdq_add(tdq, td, flags);
2372	if (cpu != cpuid) {
2373		tdq_notify(ts);
2374		return;
2375	}
2376#else
2377	tdq = TDQ_SELF();
2378	TDQ_LOCK(tdq);
2379	/*
2380	 * Now that the thread is moving to the run-queue, set the lock
2381	 * to the scheduler's lock.
2382	 */
2383	thread_lock_set(td, TDQ_LOCKPTR(tdq));
2384	tdq_add(tdq, td, flags);
2385#endif
2386	if (!(flags & SRQ_YIELDING))
2387		sched_setpreempt(td);
2388}
2389
2390/*
2391 * Remove a thread from a run-queue without running it.  This is used
2392 * when we're stealing a thread from a remote queue.  Otherwise all threads
2393 * exit by calling sched_exit_thread() and sched_throw() themselves.
2394 */
2395void
2396sched_rem(struct thread *td)
2397{
2398	struct tdq *tdq;
2399	struct td_sched *ts;
2400
2401	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
2402	    td, td->td_proc->p_comm, td->td_priority, curthread,
2403	    curthread->td_proc->p_comm);
2404	ts = td->td_sched;
2405	tdq = TDQ_CPU(ts->ts_cpu);
2406	TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2407	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2408	KASSERT(TD_ON_RUNQ(td),
2409	    ("sched_rem: thread not on run queue"));
2410	tdq_runq_rem(tdq, ts);
2411	tdq_load_rem(tdq, ts);
2412	TD_SET_CAN_RUN(td);
2413}
2414
2415/*
2416 * Fetch cpu utilization information.  Updates on demand.
2417 */
2418fixpt_t
2419sched_pctcpu(struct thread *td)
2420{
2421	fixpt_t pctcpu;
2422	struct td_sched *ts;
2423
2424	pctcpu = 0;
2425	ts = td->td_sched;
2426	if (ts == NULL)
2427		return (0);
2428
2429	thread_lock(td);
2430	if (ts->ts_ticks) {
2431		int rtick;
2432
2433		sched_pctcpu_update(ts);
2434		/* How many rtick per second ? */
2435		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2436		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
2437	}
2438	td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick;
2439	thread_unlock(td);
2440
2441	return (pctcpu);
2442}
2443
2444/*
2445 * Bind a thread to a target cpu.
2446 */
2447void
2448sched_bind(struct thread *td, int cpu)
2449{
2450	struct td_sched *ts;
2451
2452	THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
2453	ts = td->td_sched;
2454	if (ts->ts_flags & TSF_BOUND)
2455		sched_unbind(td);
2456	ts->ts_flags |= TSF_BOUND;
2457#ifdef SMP
2458	sched_pin();
2459	if (PCPU_GET(cpuid) == cpu)
2460		return;
2461	ts->ts_cpu = cpu;
2462	/* When we return from mi_switch we'll be on the correct cpu. */
2463	mi_switch(SW_VOL, NULL);
2464#endif
2465}
2466
2467/*
2468 * Release a bound thread.
2469 */
2470void
2471sched_unbind(struct thread *td)
2472{
2473	struct td_sched *ts;
2474
2475	THREAD_LOCK_ASSERT(td, MA_OWNED);
2476	ts = td->td_sched;
2477	if ((ts->ts_flags & TSF_BOUND) == 0)
2478		return;
2479	ts->ts_flags &= ~TSF_BOUND;
2480#ifdef SMP
2481	sched_unpin();
2482#endif
2483}
2484
2485int
2486sched_is_bound(struct thread *td)
2487{
2488	THREAD_LOCK_ASSERT(td, MA_OWNED);
2489	return (td->td_sched->ts_flags & TSF_BOUND);
2490}
2491
2492/*
2493 * Basic yield call.
2494 */
2495void
2496sched_relinquish(struct thread *td)
2497{
2498	thread_lock(td);
2499	if (td->td_pri_class == PRI_TIMESHARE)
2500		sched_prio(td, PRI_MAX_TIMESHARE);
2501	SCHED_STAT_INC(switch_relinquish);
2502	mi_switch(SW_VOL, NULL);
2503	thread_unlock(td);
2504}
2505
2506/*
2507 * Return the total system load.
2508 */
2509int
2510sched_load(void)
2511{
2512#ifdef SMP
2513	int total;
2514	int i;
2515
2516	total = 0;
2517	for (i = 0; i <= tdg_maxid; i++)
2518		total += TDQ_GROUP(i)->tdg_load;
2519	return (total);
2520#else
2521	return (TDQ_SELF()->tdq_sysload);
2522#endif
2523}
2524
2525int
2526sched_sizeof_proc(void)
2527{
2528	return (sizeof(struct proc));
2529}
2530
2531int
2532sched_sizeof_thread(void)
2533{
2534	return (sizeof(struct thread) + sizeof(struct td_sched));
2535}
2536
2537/*
2538 * The actual idle process.
2539 */
2540void
2541sched_idletd(void *dummy)
2542{
2543	struct thread *td;
2544	struct tdq *tdq;
2545
2546	td = curthread;
2547	tdq = TDQ_SELF();
2548	mtx_assert(&Giant, MA_NOTOWNED);
2549	/* ULE relies on preemption for idle interruption. */
2550	for (;;) {
2551#ifdef SMP
2552		if (tdq_idled(tdq))
2553			cpu_idle();
2554#else
2555		cpu_idle();
2556#endif
2557	}
2558}
2559
2560/*
2561 * A CPU is entering for the first time or a thread is exiting.
2562 */
2563void
2564sched_throw(struct thread *td)
2565{
2566	struct tdq *tdq;
2567
2568	tdq = TDQ_SELF();
2569	if (td == NULL) {
2570		/* Correct spinlock nesting and acquire the correct lock. */
2571		TDQ_LOCK(tdq);
2572		spinlock_exit();
2573	} else {
2574		MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2575		tdq_load_rem(tdq, td->td_sched);
2576	}
2577	KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
2578	PCPU_SET(switchtime, cpu_ticks());
2579	PCPU_SET(switchticks, ticks);
2580	cpu_throw(td, choosethread());	/* doesn't return */
2581}
2582
2583/*
2584 * This is called from fork_exit().  Just acquire the correct locks and
2585 * let fork do the rest of the work.
2586 */
2587void
2588sched_fork_exit(struct thread *td)
2589{
2590	struct td_sched *ts;
2591	struct tdq *tdq;
2592	int cpuid;
2593
2594	/*
2595	 * Finish setting up thread glue so that it begins execution in a
2596	 * non-nested critical section with the scheduler lock held.
2597	 */
2598	cpuid = PCPU_GET(cpuid);
2599	tdq = TDQ_CPU(cpuid);
2600	ts = td->td_sched;
2601	if (TD_IS_IDLETHREAD(td))
2602		td->td_lock = TDQ_LOCKPTR(tdq);
2603	MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2604	td->td_oncpu = cpuid;
2605	TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)td;
2606	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
2607}
2608
2609static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
2610    "Scheduler");
2611SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2612    "Scheduler name");
2613SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2614    "Slice size for timeshare threads");
2615SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2616     "Interactivity score threshold");
2617SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2618     0,"Min priority for preemption, lower priorities have greater precedence");
2619#ifdef SMP
2620SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0,
2621    "Pick the target cpu based on priority rather than load.");
2622SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
2623    "Number of hz ticks to keep thread affinity for");
2624SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, "");
2625SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
2626    "Enables the long-term load balancer");
2627SYSCTL_INT(_kern_sched, OID_AUTO, balance_secs, CTLFLAG_RW, &balance_secs, 0,
2628    "Average frequence in seconds to run the long-term balancer");
2629SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0,
2630    "Steals work from another hyper-threaded core on idle");
2631SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
2632    "Attempts to steal work from other cores before idling");
2633SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
2634    "Minimum load on remote cpu before we'll steal");
2635SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0,
2636    "True when a topology has been specified by the MD code.");
2637#endif
2638
2639/* ps compat */
2640static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
2641SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2642
2643
2644#define KERN_SWITCH_INCLUDE 1
2645#include "kern/kern_switch.c"
2646