sched_ule.c revision 167012
1/*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 167012 2007-02-26 08:26:44Z kmacy $");
29
30#include "opt_hwpmc_hooks.h"
31#include "opt_sched.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kdb.h>
36#include <sys/kernel.h>
37#include <sys/ktr.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/proc.h>
41#include <sys/resource.h>
42#include <sys/resourcevar.h>
43#include <sys/sched.h>
44#include <sys/smp.h>
45#include <sys/sx.h>
46#include <sys/sysctl.h>
47#include <sys/sysproto.h>
48#include <sys/turnstile.h>
49#include <sys/umtx.h>
50#include <sys/vmmeter.h>
51#ifdef KTRACE
52#include <sys/uio.h>
53#include <sys/ktrace.h>
54#endif
55
56#ifdef HWPMC_HOOKS
57#include <sys/pmckern.h>
58#endif
59
60#include <machine/cpu.h>
61#include <machine/smp.h>
62
63#ifndef PREEMPTION
64#error	"SCHED_ULE requires options PREEMPTION"
65#endif
66
67/*
68 * TODO:
69 *	Pick idle from affinity group or self group first.
70 *	Implement pick_score.
71 */
72
73#define	KTR_ULE	0x0		/* Enable for pickpri debugging. */
74
75/*
76 * Thread scheduler specific section.
77 */
78struct td_sched {
79	TAILQ_ENTRY(td_sched) ts_procq;	/* (j/z) Run queue. */
80	int		ts_flags;	/* (j) TSF_* flags. */
81	struct thread	*ts_thread;	/* (*) Active associated thread. */
82	u_char		ts_rqindex;	/* (j) Run queue index. */
83	int		ts_slptime;
84	int		ts_slice;
85	struct runq	*ts_runq;
86	u_char		ts_cpu;		/* CPU that we have affinity for. */
87	/* The following variables are only used for pctcpu calculation */
88	int		ts_ltick;	/* Last tick that we were running on */
89	int		ts_ftick;	/* First tick that we were running on */
90	int		ts_ticks;	/* Tick count */
91#ifdef SMP
92	int		ts_rltick;	/* Real last tick, for affinity. */
93#endif
94
95	/* originally from kg_sched */
96	u_int	skg_slptime;		/* Number of ticks we vol. slept */
97	u_int	skg_runtime;		/* Number of ticks we were running */
98};
99/* flags kept in ts_flags */
100#define	TSF_BOUND	0x0001		/* Thread can not migrate. */
101#define	TSF_XFERABLE	0x0002		/* Thread was added as transferable. */
102
103static struct td_sched td_sched0;
104
105/*
106 * Cpu percentage computation macros and defines.
107 *
108 * SCHED_TICK_SECS:	Number of seconds to average the cpu usage across.
109 * SCHED_TICK_TARG:	Number of hz ticks to average the cpu usage across.
110 * SCHED_TICK_MAX:	Maximum number of ticks before scaling back.
111 * SCHED_TICK_SHIFT:	Shift factor to avoid rounding away results.
112 * SCHED_TICK_HZ:	Compute the number of hz ticks for a given ticks count.
113 * SCHED_TICK_TOTAL:	Gives the amount of time we've been recording ticks.
114 */
115#define	SCHED_TICK_SECS		10
116#define	SCHED_TICK_TARG		(hz * SCHED_TICK_SECS)
117#define	SCHED_TICK_MAX		(SCHED_TICK_TARG + hz)
118#define	SCHED_TICK_SHIFT	10
119#define	SCHED_TICK_HZ(ts)	((ts)->ts_ticks >> SCHED_TICK_SHIFT)
120#define	SCHED_TICK_TOTAL(ts)	(max((ts)->ts_ltick - (ts)->ts_ftick, hz))
121
122/*
123 * These macros determine priorities for non-interactive threads.  They are
124 * assigned a priority based on their recent cpu utilization as expressed
125 * by the ratio of ticks to the tick total.  NHALF priorities at the start
126 * and end of the MIN to MAX timeshare range are only reachable with negative
127 * or positive nice respectively.
128 *
129 * PRI_RANGE:	Priority range for utilization dependent priorities.
130 * PRI_NRESV:	Number of nice values.
131 * PRI_TICKS:	Compute a priority in PRI_RANGE from the ticks count and total.
132 * PRI_NICE:	Determines the part of the priority inherited from nice.
133 */
134#define	SCHED_PRI_NRESV		(PRIO_MAX - PRIO_MIN)
135#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
136#define	SCHED_PRI_MIN		(PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
137#define	SCHED_PRI_MAX		(PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
138#define	SCHED_PRI_RANGE		(SCHED_PRI_MAX - SCHED_PRI_MIN + 1)
139#define	SCHED_PRI_TICKS(ts)						\
140    (SCHED_TICK_HZ((ts)) /						\
141    (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
142#define	SCHED_PRI_NICE(nice)	(nice)
143
144/*
145 * These determine the interactivity of a process.  Interactivity differs from
146 * cpu utilization in that it expresses the voluntary time slept vs time ran
147 * while cpu utilization includes all time not running.  This more accurately
148 * models the intent of the thread.
149 *
150 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
151 *		before throttling back.
152 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
153 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
154 * INTERACT_THRESH:	Threshhold for placement on the current runq.
155 */
156#define	SCHED_SLP_RUN_MAX	((hz * 5) << SCHED_TICK_SHIFT)
157#define	SCHED_SLP_RUN_FORK	((hz / 2) << SCHED_TICK_SHIFT)
158#define	SCHED_INTERACT_MAX	(100)
159#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
160#define	SCHED_INTERACT_THRESH	(30)
161
162/*
163 * tickincr:		Converts a stathz tick into a hz domain scaled by
164 *			the shift factor.  Without the shift the error rate
165 *			due to rounding would be unacceptably high.
166 * realstathz:		stathz is sometimes 0 and run off of hz.
167 * sched_slice:		Runtime of each thread before rescheduling.
168 */
169static int sched_interact = SCHED_INTERACT_THRESH;
170static int realstathz;
171static int tickincr;
172static int sched_slice;
173
174/*
175 * tdq - per processor runqs and statistics.
176 */
177struct tdq {
178	struct runq	tdq_idle;		/* Queue of IDLE threads. */
179	struct runq	tdq_timeshare;		/* timeshare run queue. */
180	struct runq	tdq_realtime;		/* real-time run queue. */
181	u_char		tdq_idx;		/* Current insert index. */
182	u_char		tdq_ridx;		/* Current removal index. */
183	short		tdq_flags;		/* Thread queue flags */
184	int		tdq_load;		/* Aggregate load. */
185#ifdef SMP
186	int		tdq_transferable;
187	LIST_ENTRY(tdq)	tdq_siblings;		/* Next in tdq group. */
188	struct tdq_group *tdq_group;		/* Our processor group. */
189#else
190	int		tdq_sysload;		/* For loadavg, !ITHD load. */
191#endif
192};
193
194#define	TDQF_BUSY	0x0001			/* Queue is marked as busy */
195
196#ifdef SMP
197/*
198 * tdq groups are groups of processors which can cheaply share threads.  When
199 * one processor in the group goes idle it will check the runqs of the other
200 * processors in its group prior to halting and waiting for an interrupt.
201 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
202 * In a numa environment we'd want an idle bitmap per group and a two tiered
203 * load balancer.
204 */
205struct tdq_group {
206	int	tdg_cpus;		/* Count of CPUs in this tdq group. */
207	cpumask_t tdg_cpumask;		/* Mask of cpus in this group. */
208	cpumask_t tdg_idlemask;		/* Idle cpus in this group. */
209	cpumask_t tdg_mask;		/* Bit mask for first cpu. */
210	int	tdg_load;		/* Total load of this group. */
211	int	tdg_transferable;	/* Transferable load of this group. */
212	LIST_HEAD(, tdq) tdg_members;	/* Linked list of all members. */
213};
214
215#define	SCHED_AFFINITY_DEFAULT	(hz / 100)
216#define	SCHED_AFFINITY(ts)	((ts)->ts_rltick > ticks - affinity)
217
218/*
219 * Run-time tunables.
220 */
221static int rebalance = 0;
222static int pick_pri = 1;
223static int affinity;
224static int tryself = 1;
225static int tryselfidle = 1;
226static int ipi_ast = 0;
227static int ipi_preempt = 1;
228static int ipi_thresh = PRI_MIN_KERN;
229static int steal_htt = 1;
230static int steal_busy = 1;
231static int busy_thresh = 4;
232
233/*
234 * One thread queue per processor.
235 */
236static volatile cpumask_t tdq_idle;
237static volatile cpumask_t tdq_busy;
238static int tdg_maxid;
239static struct tdq	tdq_cpu[MAXCPU];
240static struct tdq_group tdq_groups[MAXCPU];
241static int bal_tick;
242static int gbal_tick;
243static int balance_groups;
244
245#define	TDQ_SELF()	(&tdq_cpu[PCPU_GET(cpuid)])
246#define	TDQ_CPU(x)	(&tdq_cpu[(x)])
247#define	TDQ_ID(x)	((x) - tdq_cpu)
248#define	TDQ_GROUP(x)	(&tdq_groups[(x)])
249#else	/* !SMP */
250static struct tdq	tdq_cpu;
251
252#define	TDQ_SELF()	(&tdq_cpu)
253#define	TDQ_CPU(x)	(&tdq_cpu)
254#endif
255
256static void sched_priority(struct thread *);
257static void sched_thread_priority(struct thread *, u_char);
258static int sched_interact_score(struct thread *);
259static void sched_interact_update(struct thread *);
260static void sched_interact_fork(struct thread *);
261static void sched_pctcpu_update(struct td_sched *);
262static inline void sched_pin_td(struct thread *td);
263static inline void sched_unpin_td(struct thread *td);
264
265/* Operations on per processor queues */
266static struct td_sched * tdq_choose(struct tdq *);
267static void tdq_setup(struct tdq *);
268static void tdq_load_add(struct tdq *, struct td_sched *);
269static void tdq_load_rem(struct tdq *, struct td_sched *);
270static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
271static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
272void tdq_print(int cpu);
273static void runq_print(struct runq *rq);
274#ifdef SMP
275static int tdq_pickidle(struct tdq *, struct td_sched *);
276static int tdq_pickpri(struct tdq *, struct td_sched *, int);
277static struct td_sched *runq_steal(struct runq *);
278static void sched_balance(void);
279static void sched_balance_groups(void);
280static void sched_balance_group(struct tdq_group *);
281static void sched_balance_pair(struct tdq *, struct tdq *);
282static void sched_smp_tick(struct thread *);
283static void tdq_move(struct tdq *, int);
284static int tdq_idled(struct tdq *);
285static void tdq_notify(struct td_sched *);
286static struct td_sched *tdq_steal(struct tdq *, int);
287
288#define	THREAD_CAN_MIGRATE(td)	 ((td)->td_pinned == 0)
289#endif
290
291static void sched_setup(void *dummy);
292SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
293
294static void sched_initticks(void *dummy);
295SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
296
297static inline void
298sched_pin_td(struct thread *td)
299{
300	td->td_pinned++;
301}
302
303static inline void
304sched_unpin_td(struct thread *td)
305{
306	td->td_pinned--;
307}
308
309static void
310runq_print(struct runq *rq)
311{
312	struct rqhead *rqh;
313	struct td_sched *ts;
314	int pri;
315	int j;
316	int i;
317
318	for (i = 0; i < RQB_LEN; i++) {
319		printf("\t\trunq bits %d 0x%zx\n",
320		    i, rq->rq_status.rqb_bits[i]);
321		for (j = 0; j < RQB_BPW; j++)
322			if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
323				pri = j + (i << RQB_L2BPW);
324				rqh = &rq->rq_queues[pri];
325				TAILQ_FOREACH(ts, rqh, ts_procq) {
326					printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
327					    ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
328				}
329			}
330	}
331}
332
333void
334tdq_print(int cpu)
335{
336	struct tdq *tdq;
337
338	tdq = TDQ_CPU(cpu);
339
340	printf("tdq:\n");
341	printf("\tload:           %d\n", tdq->tdq_load);
342	printf("\ttimeshare idx: %d\n", tdq->tdq_idx);
343	printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
344	printf("\trealtime runq:\n");
345	runq_print(&tdq->tdq_realtime);
346	printf("\ttimeshare runq:\n");
347	runq_print(&tdq->tdq_timeshare);
348	printf("\tidle runq:\n");
349	runq_print(&tdq->tdq_idle);
350#ifdef SMP
351	printf("\tload transferable: %d\n", tdq->tdq_transferable);
352#endif
353}
354
355static __inline void
356tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
357{
358#ifdef SMP
359	if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
360		tdq->tdq_transferable++;
361		tdq->tdq_group->tdg_transferable++;
362		ts->ts_flags |= TSF_XFERABLE;
363		if (tdq->tdq_transferable >= busy_thresh &&
364		    (tdq->tdq_flags & TDQF_BUSY) == 0) {
365			tdq->tdq_flags |= TDQF_BUSY;
366			atomic_set_int(&tdq_busy, 1 << TDQ_ID(tdq));
367		}
368	}
369#endif
370	if (ts->ts_runq == &tdq->tdq_timeshare) {
371		u_char pri;
372
373		pri = ts->ts_thread->td_priority;
374		KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
375			("Invalid priority %d on timeshare runq", pri));
376		/*
377		 * This queue contains only priorities between MIN and MAX
378		 * realtime.  Use the whole queue to represent these values.
379		 */
380#define	TS_RQ_PPQ	(((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
381		if ((flags & SRQ_BORROWING) == 0) {
382			pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
383			pri = (pri + tdq->tdq_idx) % RQ_NQS;
384			/*
385			 * This effectively shortens the queue by one so we
386			 * can have a one slot difference between idx and
387			 * ridx while we wait for threads to drain.
388			 */
389			if (tdq->tdq_ridx != tdq->tdq_idx &&
390			    pri == tdq->tdq_ridx)
391				pri = (pri - 1) % RQ_NQS;
392		} else
393			pri = tdq->tdq_ridx;
394		runq_add_pri(ts->ts_runq, ts, pri, flags);
395	} else
396		runq_add(ts->ts_runq, ts, flags);
397}
398
399static __inline void
400tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
401{
402#ifdef SMP
403	if (ts->ts_flags & TSF_XFERABLE) {
404		tdq->tdq_transferable--;
405		tdq->tdq_group->tdg_transferable--;
406		ts->ts_flags &= ~TSF_XFERABLE;
407		if (tdq->tdq_transferable < busy_thresh &&
408		    (tdq->tdq_flags & TDQF_BUSY)) {
409			atomic_clear_int(&tdq_busy, 1 << TDQ_ID(tdq));
410			tdq->tdq_flags &= ~TDQF_BUSY;
411		}
412	}
413#endif
414	if (ts->ts_runq == &tdq->tdq_timeshare) {
415		if (tdq->tdq_idx != tdq->tdq_ridx)
416			runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
417		else
418			runq_remove_idx(ts->ts_runq, ts, NULL);
419		/*
420		 * For timeshare threads we update the priority here so
421		 * the priority reflects the time we've been sleeping.
422		 */
423		ts->ts_ltick = ticks;
424		sched_pctcpu_update(ts);
425		sched_priority(ts->ts_thread);
426	} else
427		runq_remove(ts->ts_runq, ts);
428}
429
430static void
431tdq_load_add(struct tdq *tdq, struct td_sched *ts)
432{
433	int class;
434	mtx_assert(&sched_lock, MA_OWNED);
435	class = PRI_BASE(ts->ts_thread->td_pri_class);
436	tdq->tdq_load++;
437	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
438	if (class != PRI_ITHD &&
439	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
440#ifdef SMP
441		tdq->tdq_group->tdg_load++;
442#else
443		tdq->tdq_sysload++;
444#endif
445}
446
447static void
448tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
449{
450	int class;
451	mtx_assert(&sched_lock, MA_OWNED);
452	class = PRI_BASE(ts->ts_thread->td_pri_class);
453	if (class != PRI_ITHD &&
454	    (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
455#ifdef SMP
456		tdq->tdq_group->tdg_load--;
457#else
458		tdq->tdq_sysload--;
459#endif
460	tdq->tdq_load--;
461	CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
462	ts->ts_runq = NULL;
463}
464
465#ifdef SMP
466static void
467sched_smp_tick(struct thread *td)
468{
469	struct tdq *tdq;
470
471	tdq = TDQ_SELF();
472	if (rebalance) {
473		if (ticks >= bal_tick)
474			sched_balance();
475		if (ticks >= gbal_tick && balance_groups)
476			sched_balance_groups();
477	}
478	td->td_sched->ts_rltick = ticks;
479}
480
481/*
482 * sched_balance is a simple CPU load balancing algorithm.  It operates by
483 * finding the least loaded and most loaded cpu and equalizing their load
484 * by migrating some processes.
485 *
486 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
487 * installations will only have 2 cpus.  Secondly, load balancing too much at
488 * once can have an unpleasant effect on the system.  The scheduler rarely has
489 * enough information to make perfect decisions.  So this algorithm chooses
490 * algorithm simplicity and more gradual effects on load in larger systems.
491 *
492 * It could be improved by considering the priorities and slices assigned to
493 * each task prior to balancing them.  There are many pathological cases with
494 * any approach and so the semi random algorithm below may work as well as any.
495 *
496 */
497static void
498sched_balance(void)
499{
500	struct tdq_group *high;
501	struct tdq_group *low;
502	struct tdq_group *tdg;
503	int cnt;
504	int i;
505
506	bal_tick = ticks + (random() % (hz * 2));
507	if (smp_started == 0)
508		return;
509	low = high = NULL;
510	i = random() % (tdg_maxid + 1);
511	for (cnt = 0; cnt <= tdg_maxid; cnt++) {
512		tdg = TDQ_GROUP(i);
513		/*
514		 * Find the CPU with the highest load that has some
515		 * threads to transfer.
516		 */
517		if ((high == NULL || tdg->tdg_load > high->tdg_load)
518		    && tdg->tdg_transferable)
519			high = tdg;
520		if (low == NULL || tdg->tdg_load < low->tdg_load)
521			low = tdg;
522		if (++i > tdg_maxid)
523			i = 0;
524	}
525	if (low != NULL && high != NULL && high != low)
526		sched_balance_pair(LIST_FIRST(&high->tdg_members),
527		    LIST_FIRST(&low->tdg_members));
528}
529
530static void
531sched_balance_groups(void)
532{
533	int i;
534
535	gbal_tick = ticks + (random() % (hz * 2));
536	mtx_assert(&sched_lock, MA_OWNED);
537	if (smp_started)
538		for (i = 0; i <= tdg_maxid; i++)
539			sched_balance_group(TDQ_GROUP(i));
540}
541
542static void
543sched_balance_group(struct tdq_group *tdg)
544{
545	struct tdq *tdq;
546	struct tdq *high;
547	struct tdq *low;
548	int load;
549
550	if (tdg->tdg_transferable == 0)
551		return;
552	low = NULL;
553	high = NULL;
554	LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
555		load = tdq->tdq_load;
556		if (high == NULL || load > high->tdq_load)
557			high = tdq;
558		if (low == NULL || load < low->tdq_load)
559			low = tdq;
560	}
561	if (high != NULL && low != NULL && high != low)
562		sched_balance_pair(high, low);
563}
564
565static void
566sched_balance_pair(struct tdq *high, struct tdq *low)
567{
568	int transferable;
569	int high_load;
570	int low_load;
571	int move;
572	int diff;
573	int i;
574
575	/*
576	 * If we're transfering within a group we have to use this specific
577	 * tdq's transferable count, otherwise we can steal from other members
578	 * of the group.
579	 */
580	if (high->tdq_group == low->tdq_group) {
581		transferable = high->tdq_transferable;
582		high_load = high->tdq_load;
583		low_load = low->tdq_load;
584	} else {
585		transferable = high->tdq_group->tdg_transferable;
586		high_load = high->tdq_group->tdg_load;
587		low_load = low->tdq_group->tdg_load;
588	}
589	if (transferable == 0)
590		return;
591	/*
592	 * Determine what the imbalance is and then adjust that to how many
593	 * threads we actually have to give up (transferable).
594	 */
595	diff = high_load - low_load;
596	move = diff / 2;
597	if (diff & 0x1)
598		move++;
599	move = min(move, transferable);
600	for (i = 0; i < move; i++)
601		tdq_move(high, TDQ_ID(low));
602	return;
603}
604
605static void
606tdq_move(struct tdq *from, int cpu)
607{
608	struct tdq *tdq;
609	struct tdq *to;
610	struct td_sched *ts;
611
612	tdq = from;
613	to = TDQ_CPU(cpu);
614	ts = tdq_steal(tdq, 1);
615	if (ts == NULL) {
616		struct tdq_group *tdg;
617
618		tdg = tdq->tdq_group;
619		LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
620			if (tdq == from || tdq->tdq_transferable == 0)
621				continue;
622			ts = tdq_steal(tdq, 1);
623			break;
624		}
625		if (ts == NULL)
626			panic("tdq_move: No threads available with a "
627			    "transferable count of %d\n",
628			    tdg->tdg_transferable);
629	}
630	if (tdq == to)
631		return;
632	sched_rem(ts->ts_thread);
633	ts->ts_cpu = cpu;
634	sched_pin_td(ts->ts_thread);
635	sched_add(ts->ts_thread, SRQ_YIELDING);
636	sched_unpin_td(ts->ts_thread);
637}
638
639static int
640tdq_idled(struct tdq *tdq)
641{
642	struct tdq_group *tdg;
643	struct tdq *steal;
644	struct td_sched *ts;
645
646	tdg = tdq->tdq_group;
647	/*
648	 * If we're in a cpu group, try and steal threads from another cpu in
649	 * the group before idling.
650	 */
651	if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
652		LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
653			if (steal == tdq || steal->tdq_transferable == 0)
654				continue;
655			ts = tdq_steal(steal, 0);
656			if (ts)
657				goto steal;
658		}
659	}
660	if (steal_busy) {
661		while (tdq_busy) {
662			int cpu;
663
664			cpu = ffs(tdq_busy);
665			if (cpu == 0)
666				break;
667			cpu--;
668			steal = TDQ_CPU(cpu);
669			if (steal->tdq_transferable == 0)
670				continue;
671			ts = tdq_steal(steal, 1);
672			if (ts == NULL)
673				continue;
674			CTR5(KTR_ULE,
675			    "tdq_idled: stealing td %p(%s) pri %d from %d busy 0x%X",
676			    ts->ts_thread, ts->ts_thread->td_proc->p_comm,
677			    ts->ts_thread->td_priority, cpu, tdq_busy);
678			goto steal;
679		}
680	}
681	/*
682	 * We only set the idled bit when all of the cpus in the group are
683	 * idle.  Otherwise we could get into a situation where a thread bounces
684	 * back and forth between two idle cores on seperate physical CPUs.
685	 */
686	tdg->tdg_idlemask |= PCPU_GET(cpumask);
687	if (tdg->tdg_idlemask == tdg->tdg_cpumask)
688		atomic_set_int(&tdq_idle, tdg->tdg_mask);
689	return (1);
690steal:
691	sched_rem(ts->ts_thread);
692	ts->ts_cpu = PCPU_GET(cpuid);
693	sched_pin_td(ts->ts_thread);
694	sched_add(ts->ts_thread, SRQ_YIELDING);
695	sched_unpin_td(ts->ts_thread);
696
697	return (0);
698}
699
700static void
701tdq_notify(struct td_sched *ts)
702{
703	struct thread *ctd;
704	struct pcpu *pcpu;
705	int cpri;
706	int pri;
707	int cpu;
708
709	cpu = ts->ts_cpu;
710	pri = ts->ts_thread->td_priority;
711	pcpu = pcpu_find(cpu);
712	ctd = pcpu->pc_curthread;
713	cpri = ctd->td_priority;
714
715	/*
716	 * If our priority is not better than the current priority there is
717	 * nothing to do.
718	 */
719	if (pri > cpri)
720		return;
721	/*
722	 * Always IPI idle.
723	 */
724	if (cpri > PRI_MIN_IDLE)
725		goto sendipi;
726	/*
727	 * If we're realtime or better and there is timeshare or worse running
728	 * send an IPI.
729	 */
730	if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
731		goto sendipi;
732	/*
733	 * Otherwise only IPI if we exceed the threshold.
734	 */
735	if (pri > ipi_thresh)
736		return;
737sendipi:
738	ctd->td_flags |= TDF_NEEDRESCHED;
739	if (cpri < PRI_MIN_IDLE) {
740		if (ipi_ast)
741			ipi_selected(1 << cpu, IPI_AST);
742		else if (ipi_preempt)
743			ipi_selected(1 << cpu, IPI_PREEMPT);
744	} else
745		ipi_selected(1 << cpu, IPI_PREEMPT);
746}
747
748static struct td_sched *
749runq_steal(struct runq *rq)
750{
751	struct rqhead *rqh;
752	struct rqbits *rqb;
753	struct td_sched *ts;
754	int word;
755	int bit;
756
757	mtx_assert(&sched_lock, MA_OWNED);
758	rqb = &rq->rq_status;
759	for (word = 0; word < RQB_LEN; word++) {
760		if (rqb->rqb_bits[word] == 0)
761			continue;
762		for (bit = 0; bit < RQB_BPW; bit++) {
763			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
764				continue;
765			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
766			TAILQ_FOREACH(ts, rqh, ts_procq) {
767				if (THREAD_CAN_MIGRATE(ts->ts_thread))
768					return (ts);
769			}
770		}
771	}
772	return (NULL);
773}
774
775static struct td_sched *
776tdq_steal(struct tdq *tdq, int stealidle)
777{
778	struct td_sched *ts;
779
780	/*
781	 * Steal from next first to try to get a non-interactive task that
782	 * may not have run for a while.
783	 * XXX Need to effect steal order for timeshare threads.
784	 */
785	if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
786		return (ts);
787	if ((ts = runq_steal(&tdq->tdq_timeshare)) != NULL)
788		return (ts);
789	if (stealidle)
790		return (runq_steal(&tdq->tdq_idle));
791	return (NULL);
792}
793
794int
795tdq_pickidle(struct tdq *tdq, struct td_sched *ts)
796{
797	struct tdq_group *tdg;
798	int self;
799	int cpu;
800
801	self = PCPU_GET(cpuid);
802	if (smp_started == 0)
803		return (self);
804	/*
805	 * If the current CPU has idled, just run it here.
806	 */
807	if ((tdq->tdq_group->tdg_idlemask & PCPU_GET(cpumask)) != 0)
808		return (self);
809	/*
810	 * Try the last group we ran on.
811	 */
812	tdg = TDQ_CPU(ts->ts_cpu)->tdq_group;
813	cpu = ffs(tdg->tdg_idlemask);
814	if (cpu)
815		return (cpu - 1);
816	/*
817	 * Search for an idle group.
818	 */
819	cpu = ffs(tdq_idle);
820	if (cpu)
821		return (cpu - 1);
822	/*
823	 * XXX If there are no idle groups, check for an idle core.
824	 */
825	/*
826	 * No idle CPUs?
827	 */
828	return (self);
829}
830
831static int
832tdq_pickpri(struct tdq *tdq, struct td_sched *ts, int flags)
833{
834	struct pcpu *pcpu;
835	int lowpri;
836	int lowcpu;
837	int lowload;
838	int load;
839	int self;
840	int pri;
841	int cpu;
842
843	self = PCPU_GET(cpuid);
844	if (smp_started == 0)
845		return (self);
846
847	pri = ts->ts_thread->td_priority;
848	/*
849	 * Regardless of affinity, if the last cpu is idle send it there.
850	 */
851	pcpu = pcpu_find(ts->ts_cpu);
852	if (pcpu->pc_curthread->td_priority > PRI_MIN_IDLE) {
853		CTR5(KTR_ULE,
854		    "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d",
855		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
856		    pcpu->pc_curthread->td_priority);
857		return (ts->ts_cpu);
858	}
859	/*
860	 * If we have affinity, try to place it on the cpu we last ran on.
861	 */
862	if (SCHED_AFFINITY(ts) && pcpu->pc_curthread->td_priority > pri) {
863		CTR5(KTR_ULE,
864		    "affinity for %d, ltick %d ticks %d pri %d curthread %d",
865		    ts->ts_cpu, ts->ts_rltick, ticks, pri,
866		    pcpu->pc_curthread->td_priority);
867		return (ts->ts_cpu);
868	}
869	/*
870	 * Try ourself first; If we're running something lower priority this
871	 * may have some locality with the waking thread and execute faster
872	 * here.
873	 */
874	if (tryself) {
875		/*
876		 * If we're being awoken by an interrupt thread or the waker
877		 * is going right to sleep run here as well.
878		 */
879		if ((TDQ_SELF()->tdq_load == 1) && (flags & SRQ_YIELDING ||
880		    curthread->td_pri_class == PRI_ITHD)) {
881			CTR2(KTR_ULE, "tryself load %d flags %d",
882			    TDQ_SELF()->tdq_load, flags);
883			return (self);
884		}
885	}
886	/*
887	 * Look for an idle group.
888	 */
889	CTR1(KTR_ULE, "tdq_idle %X", tdq_idle);
890	cpu = ffs(tdq_idle);
891	if (cpu)
892		return (cpu - 1);
893	if (tryselfidle && pri < curthread->td_priority) {
894		CTR1(KTR_ULE, "tryself %d",
895		    curthread->td_priority);
896		return (self);
897	}
898	/*
899 	 * Now search for the cpu running the lowest priority thread with
900	 * the least load.
901	 */
902	lowload = 0;
903	lowpri = lowcpu = 0;
904	for (cpu = 0; cpu <= mp_maxid; cpu++) {
905		if (CPU_ABSENT(cpu))
906			continue;
907		pcpu = pcpu_find(cpu);
908		pri = pcpu->pc_curthread->td_priority;
909		CTR4(KTR_ULE,
910		    "cpu %d pri %d lowcpu %d lowpri %d",
911		    cpu, pri, lowcpu, lowpri);
912		if (pri < lowpri)
913			continue;
914		load = TDQ_CPU(cpu)->tdq_load;
915		if (lowpri && lowpri == pri && load > lowload)
916			continue;
917		lowpri = pri;
918		lowcpu = cpu;
919		lowload = load;
920	}
921
922	return (lowcpu);
923}
924
925#endif	/* SMP */
926
927/*
928 * Pick the highest priority task we have and return it.
929 */
930
931static struct td_sched *
932tdq_choose(struct tdq *tdq)
933{
934	struct td_sched *ts;
935
936	mtx_assert(&sched_lock, MA_OWNED);
937
938	ts = runq_choose(&tdq->tdq_realtime);
939	if (ts != NULL) {
940		KASSERT(ts->ts_thread->td_priority <= PRI_MAX_REALTIME,
941		    ("tdq_choose: Invalid priority on realtime queue %d",
942		    ts->ts_thread->td_priority));
943		return (ts);
944	}
945	ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
946	if (ts != NULL) {
947		KASSERT(ts->ts_thread->td_priority <= PRI_MAX_TIMESHARE &&
948		    ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
949		    ("tdq_choose: Invalid priority on timeshare queue %d",
950		    ts->ts_thread->td_priority));
951		return (ts);
952	}
953
954	ts = runq_choose(&tdq->tdq_idle);
955	if (ts != NULL) {
956		KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
957		    ("tdq_choose: Invalid priority on idle queue %d",
958		    ts->ts_thread->td_priority));
959		return (ts);
960	}
961
962	return (NULL);
963}
964
965static void
966tdq_setup(struct tdq *tdq)
967{
968	runq_init(&tdq->tdq_realtime);
969	runq_init(&tdq->tdq_timeshare);
970	runq_init(&tdq->tdq_idle);
971	tdq->tdq_load = 0;
972}
973
974static void
975sched_setup(void *dummy)
976{
977#ifdef SMP
978	int i;
979#endif
980
981	/*
982	 * To avoid divide-by-zero, we set realstathz a dummy value
983	 * in case which sched_clock() called before sched_initticks().
984	 */
985	realstathz = hz;
986	sched_slice = (realstathz/10);	/* ~100ms */
987	tickincr = 1 << SCHED_TICK_SHIFT;
988
989#ifdef SMP
990	balance_groups = 0;
991	/*
992	 * Initialize the tdqs.
993	 */
994	for (i = 0; i < MAXCPU; i++) {
995		struct tdq *tdq;
996
997		tdq = &tdq_cpu[i];
998		tdq_setup(&tdq_cpu[i]);
999	}
1000	if (1) {
1001		struct tdq_group *tdg;
1002		struct tdq *tdq;
1003		int cpus;
1004
1005		for (cpus = 0, i = 0; i < MAXCPU; i++) {
1006			if (CPU_ABSENT(i))
1007				continue;
1008			tdq = &tdq_cpu[i];
1009			tdg = &tdq_groups[cpus];
1010			/*
1011			 * Setup a tdq group with one member.
1012			 */
1013			tdq->tdq_transferable = 0;
1014			tdq->tdq_group = tdg;
1015			tdg->tdg_cpus = 1;
1016			tdg->tdg_idlemask = 0;
1017			tdg->tdg_cpumask = tdg->tdg_mask = 1 << i;
1018			tdg->tdg_load = 0;
1019			tdg->tdg_transferable = 0;
1020			LIST_INIT(&tdg->tdg_members);
1021			LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
1022			cpus++;
1023		}
1024		tdg_maxid = cpus - 1;
1025	} else {
1026		struct tdq_group *tdg;
1027		struct cpu_group *cg;
1028		int j;
1029
1030		for (i = 0; i < smp_topology->ct_count; i++) {
1031			cg = &smp_topology->ct_group[i];
1032			tdg = &tdq_groups[i];
1033			/*
1034			 * Initialize the group.
1035			 */
1036			tdg->tdg_idlemask = 0;
1037			tdg->tdg_load = 0;
1038			tdg->tdg_transferable = 0;
1039			tdg->tdg_cpus = cg->cg_count;
1040			tdg->tdg_cpumask = cg->cg_mask;
1041			LIST_INIT(&tdg->tdg_members);
1042			/*
1043			 * Find all of the group members and add them.
1044			 */
1045			for (j = 0; j < MAXCPU; j++) {
1046				if ((cg->cg_mask & (1 << j)) != 0) {
1047					if (tdg->tdg_mask == 0)
1048						tdg->tdg_mask = 1 << j;
1049					tdq_cpu[j].tdq_transferable = 0;
1050					tdq_cpu[j].tdq_group = tdg;
1051					LIST_INSERT_HEAD(&tdg->tdg_members,
1052					    &tdq_cpu[j], tdq_siblings);
1053				}
1054			}
1055			if (tdg->tdg_cpus > 1)
1056				balance_groups = 1;
1057		}
1058		tdg_maxid = smp_topology->ct_count - 1;
1059	}
1060	/*
1061	 * Stagger the group and global load balancer so they do not
1062	 * interfere with each other.
1063	 */
1064	bal_tick = ticks + hz;
1065	if (balance_groups)
1066		gbal_tick = ticks + (hz / 2);
1067#else
1068	tdq_setup(TDQ_SELF());
1069#endif
1070	mtx_lock_spin(&sched_lock);
1071	tdq_load_add(TDQ_SELF(), &td_sched0);
1072	mtx_unlock_spin(&sched_lock);
1073}
1074
1075/* ARGSUSED */
1076static void
1077sched_initticks(void *dummy)
1078{
1079	mtx_lock_spin(&sched_lock);
1080	realstathz = stathz ? stathz : hz;
1081	sched_slice = (realstathz/10);	/* ~100ms */
1082
1083	/*
1084	 * tickincr is shifted out by 10 to avoid rounding errors due to
1085	 * hz not being evenly divisible by stathz on all platforms.
1086	 */
1087	tickincr = (hz << SCHED_TICK_SHIFT) / realstathz;
1088	/*
1089	 * This does not work for values of stathz that are more than
1090	 * 1 << SCHED_TICK_SHIFT * hz.  In practice this does not happen.
1091	 */
1092	if (tickincr == 0)
1093		tickincr = 1;
1094#ifdef SMP
1095	affinity = SCHED_AFFINITY_DEFAULT;
1096#endif
1097	mtx_unlock_spin(&sched_lock);
1098}
1099
1100
1101/*
1102 * Scale the scheduling priority according to the "interactivity" of this
1103 * process.
1104 */
1105static void
1106sched_priority(struct thread *td)
1107{
1108	int score;
1109	int pri;
1110
1111	if (td->td_pri_class != PRI_TIMESHARE)
1112		return;
1113	/*
1114	 * If the score is interactive we place the thread in the realtime
1115	 * queue with a priority that is less than kernel and interrupt
1116	 * priorities.  These threads are not subject to nice restrictions.
1117	 *
1118	 * Scores greater than this are placed on the normal realtime queue
1119	 * where the priority is partially decided by the most recent cpu
1120	 * utilization and the rest is decided by nice value.
1121	 */
1122	score = sched_interact_score(td);
1123	if (score < sched_interact) {
1124		pri = PRI_MIN_REALTIME;
1125		pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1126		    * score;
1127		KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
1128		    ("sched_priority: invalid interactive priority %d score %d",
1129		    pri, score));
1130	} else {
1131		pri = SCHED_PRI_MIN;
1132		if (td->td_sched->ts_ticks)
1133			pri += SCHED_PRI_TICKS(td->td_sched);
1134		pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1135		if (!(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE)) {
1136			static int once = 1;
1137			if (once) {
1138				printf("sched_priority: invalid priority %d",
1139				    pri);
1140				printf("nice %d, ticks %d ftick %d ltick %d tick pri %d\n",
1141				    td->td_proc->p_nice,
1142				    td->td_sched->ts_ticks,
1143				    td->td_sched->ts_ftick,
1144				    td->td_sched->ts_ltick,
1145				    SCHED_PRI_TICKS(td->td_sched));
1146				once = 0;
1147			}
1148			pri = min(max(pri, PRI_MIN_TIMESHARE),
1149			    PRI_MAX_TIMESHARE);
1150		}
1151	}
1152	sched_user_prio(td, pri);
1153
1154	return;
1155}
1156
1157/*
1158 * This routine enforces a maximum limit on the amount of scheduling history
1159 * kept.  It is called after either the slptime or runtime is adjusted.
1160 */
1161static void
1162sched_interact_update(struct thread *td)
1163{
1164	struct td_sched *ts;
1165	u_int sum;
1166
1167	ts = td->td_sched;
1168	sum = ts->skg_runtime + ts->skg_slptime;
1169	if (sum < SCHED_SLP_RUN_MAX)
1170		return;
1171	/*
1172	 * This only happens from two places:
1173	 * 1) We have added an unusual amount of run time from fork_exit.
1174	 * 2) We have added an unusual amount of sleep time from sched_sleep().
1175	 */
1176	if (sum > SCHED_SLP_RUN_MAX * 2) {
1177		if (ts->skg_runtime > ts->skg_slptime) {
1178			ts->skg_runtime = SCHED_SLP_RUN_MAX;
1179			ts->skg_slptime = 1;
1180		} else {
1181			ts->skg_slptime = SCHED_SLP_RUN_MAX;
1182			ts->skg_runtime = 1;
1183		}
1184		return;
1185	}
1186	/*
1187	 * If we have exceeded by more than 1/5th then the algorithm below
1188	 * will not bring us back into range.  Dividing by two here forces
1189	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1190	 */
1191	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1192		ts->skg_runtime /= 2;
1193		ts->skg_slptime /= 2;
1194		return;
1195	}
1196	ts->skg_runtime = (ts->skg_runtime / 5) * 4;
1197	ts->skg_slptime = (ts->skg_slptime / 5) * 4;
1198}
1199
1200static void
1201sched_interact_fork(struct thread *td)
1202{
1203	int ratio;
1204	int sum;
1205
1206	sum = td->td_sched->skg_runtime + td->td_sched->skg_slptime;
1207	if (sum > SCHED_SLP_RUN_FORK) {
1208		ratio = sum / SCHED_SLP_RUN_FORK;
1209		td->td_sched->skg_runtime /= ratio;
1210		td->td_sched->skg_slptime /= ratio;
1211	}
1212}
1213
1214static int
1215sched_interact_score(struct thread *td)
1216{
1217	int div;
1218
1219	if (td->td_sched->skg_runtime > td->td_sched->skg_slptime) {
1220		div = max(1, td->td_sched->skg_runtime / SCHED_INTERACT_HALF);
1221		return (SCHED_INTERACT_HALF +
1222		    (SCHED_INTERACT_HALF - (td->td_sched->skg_slptime / div)));
1223	} if (td->td_sched->skg_slptime > td->td_sched->skg_runtime) {
1224		div = max(1, td->td_sched->skg_slptime / SCHED_INTERACT_HALF);
1225		return (td->td_sched->skg_runtime / div);
1226	}
1227
1228	/*
1229	 * This can happen if slptime and runtime are 0.
1230	 */
1231	return (0);
1232
1233}
1234
1235/*
1236 * Called from proc0_init() to bootstrap the scheduler.
1237 */
1238void
1239schedinit(void)
1240{
1241
1242	/*
1243	 * Set up the scheduler specific parts of proc0.
1244	 */
1245	proc0.p_sched = NULL; /* XXX */
1246	thread0.td_sched = &td_sched0;
1247	td_sched0.ts_ltick = ticks;
1248	td_sched0.ts_ftick = ticks;
1249	td_sched0.ts_thread = &thread0;
1250}
1251
1252/*
1253 * This is only somewhat accurate since given many processes of the same
1254 * priority they will switch when their slices run out, which will be
1255 * at most sched_slice stathz ticks.
1256 */
1257int
1258sched_rr_interval(void)
1259{
1260
1261	/* Convert sched_slice to hz */
1262	return (hz/(realstathz/sched_slice));
1263}
1264
1265static void
1266sched_pctcpu_update(struct td_sched *ts)
1267{
1268
1269	if (ts->ts_ticks == 0)
1270		return;
1271	if (ticks - (hz / 10) < ts->ts_ltick &&
1272	    SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
1273		return;
1274	/*
1275	 * Adjust counters and watermark for pctcpu calc.
1276	 */
1277	if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1278		ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1279			    SCHED_TICK_TARG;
1280	else
1281		ts->ts_ticks = 0;
1282	ts->ts_ltick = ticks;
1283	ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
1284}
1285
1286static void
1287sched_thread_priority(struct thread *td, u_char prio)
1288{
1289	struct td_sched *ts;
1290
1291	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1292	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1293	    curthread->td_proc->p_comm);
1294	ts = td->td_sched;
1295	mtx_assert(&sched_lock, MA_OWNED);
1296	if (td->td_priority == prio)
1297		return;
1298
1299	if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1300		/*
1301		 * If the priority has been elevated due to priority
1302		 * propagation, we may have to move ourselves to a new
1303		 * queue.  This could be optimized to not re-add in some
1304		 * cases.
1305		 */
1306		sched_rem(td);
1307		td->td_priority = prio;
1308		sched_add(td, SRQ_BORROWING);
1309	} else
1310		td->td_priority = prio;
1311}
1312
1313/*
1314 * Update a thread's priority when it is lent another thread's
1315 * priority.
1316 */
1317void
1318sched_lend_prio(struct thread *td, u_char prio)
1319{
1320
1321	td->td_flags |= TDF_BORROWING;
1322	sched_thread_priority(td, prio);
1323}
1324
1325/*
1326 * Restore a thread's priority when priority propagation is
1327 * over.  The prio argument is the minimum priority the thread
1328 * needs to have to satisfy other possible priority lending
1329 * requests.  If the thread's regular priority is less
1330 * important than prio, the thread will keep a priority boost
1331 * of prio.
1332 */
1333void
1334sched_unlend_prio(struct thread *td, u_char prio)
1335{
1336	u_char base_pri;
1337
1338	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1339	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1340		base_pri = td->td_user_pri;
1341	else
1342		base_pri = td->td_base_pri;
1343	if (prio >= base_pri) {
1344		td->td_flags &= ~TDF_BORROWING;
1345		sched_thread_priority(td, base_pri);
1346	} else
1347		sched_lend_prio(td, prio);
1348}
1349
1350void
1351sched_prio(struct thread *td, u_char prio)
1352{
1353	u_char oldprio;
1354
1355	/* First, update the base priority. */
1356	td->td_base_pri = prio;
1357
1358	/*
1359	 * If the thread is borrowing another thread's priority, don't
1360	 * ever lower the priority.
1361	 */
1362	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1363		return;
1364
1365	/* Change the real priority. */
1366	oldprio = td->td_priority;
1367	sched_thread_priority(td, prio);
1368
1369	/*
1370	 * If the thread is on a turnstile, then let the turnstile update
1371	 * its state.
1372	 */
1373	if (TD_ON_LOCK(td) && oldprio != prio)
1374		turnstile_adjust(td, oldprio);
1375}
1376
1377void
1378sched_user_prio(struct thread *td, u_char prio)
1379{
1380	u_char oldprio;
1381
1382	td->td_base_user_pri = prio;
1383	if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1384                return;
1385	oldprio = td->td_user_pri;
1386	td->td_user_pri = prio;
1387
1388	if (TD_ON_UPILOCK(td) && oldprio != prio)
1389		umtx_pi_adjust(td, oldprio);
1390}
1391
1392void
1393sched_lend_user_prio(struct thread *td, u_char prio)
1394{
1395	u_char oldprio;
1396
1397	td->td_flags |= TDF_UBORROWING;
1398
1399	oldprio = td->td_user_pri;
1400	td->td_user_pri = prio;
1401
1402	if (TD_ON_UPILOCK(td) && oldprio != prio)
1403		umtx_pi_adjust(td, oldprio);
1404}
1405
1406void
1407sched_unlend_user_prio(struct thread *td, u_char prio)
1408{
1409	u_char base_pri;
1410
1411	base_pri = td->td_base_user_pri;
1412	if (prio >= base_pri) {
1413		td->td_flags &= ~TDF_UBORROWING;
1414		sched_user_prio(td, base_pri);
1415	} else
1416		sched_lend_user_prio(td, prio);
1417}
1418
1419void
1420sched_switch(struct thread *td, struct thread *newtd, int flags)
1421{
1422	struct tdq *tdq;
1423	struct td_sched *ts;
1424	int preempt;
1425
1426	mtx_assert(&sched_lock, MA_OWNED);
1427
1428	preempt = flags & SW_PREEMPT;
1429	tdq = TDQ_SELF();
1430	ts = td->td_sched;
1431	td->td_lastcpu = td->td_oncpu;
1432	td->td_oncpu = NOCPU;
1433	td->td_flags &= ~TDF_NEEDRESCHED;
1434	td->td_owepreempt = 0;
1435	/*
1436	 * If the thread has been assigned it may be in the process of switching
1437	 * to the new cpu.  This is the case in sched_bind().
1438	 */
1439	if (td == PCPU_GET(idlethread)) {
1440		TD_SET_CAN_RUN(td);
1441	} else {
1442		tdq_load_rem(tdq, ts);
1443		if (TD_IS_RUNNING(td)) {
1444			/*
1445			 * Don't allow the thread to migrate
1446			 * from a preemption.
1447			 */
1448			if (preempt)
1449				sched_pin_td(td);
1450			sched_add(td, preempt ?
1451			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1452			    SRQ_OURSELF|SRQ_YIELDING);
1453			if (preempt)
1454				sched_unpin_td(td);
1455		}
1456	}
1457	if (newtd != NULL) {
1458		/*
1459		 * If we bring in a thread account for it as if it had been
1460		 * added to the run queue and then chosen.
1461		 */
1462		TD_SET_RUNNING(newtd);
1463		tdq_load_add(TDQ_SELF(), newtd->td_sched);
1464	} else
1465		newtd = choosethread();
1466	if (td != newtd) {
1467#ifdef	HWPMC_HOOKS
1468		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1469			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1470#endif
1471
1472		cpu_switch(td, newtd);
1473#ifdef	HWPMC_HOOKS
1474		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1475			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1476#endif
1477	}
1478	sched_lock.mtx_lock = (uintptr_t)td;
1479	td->td_oncpu = PCPU_GET(cpuid);
1480}
1481
1482void
1483sched_nice(struct proc *p, int nice)
1484{
1485	struct thread *td;
1486
1487	PROC_LOCK_ASSERT(p, MA_OWNED);
1488	mtx_assert(&sched_lock, MA_OWNED);
1489
1490	p->p_nice = nice;
1491	FOREACH_THREAD_IN_PROC(p, td) {
1492		sched_priority(td);
1493		sched_prio(td, td->td_base_user_pri);
1494	}
1495}
1496
1497void
1498sched_sleep(struct thread *td)
1499{
1500
1501	mtx_assert(&sched_lock, MA_OWNED);
1502
1503	td->td_sched->ts_slptime = ticks;
1504}
1505
1506void
1507sched_wakeup(struct thread *td)
1508{
1509	struct td_sched *ts;
1510	int slptime;
1511
1512	mtx_assert(&sched_lock, MA_OWNED);
1513	ts = td->td_sched;
1514	/*
1515	 * If we slept for more than a tick update our interactivity and
1516	 * priority.
1517	 */
1518	slptime = ts->ts_slptime;
1519	ts->ts_slptime = 0;
1520	if (slptime && slptime != ticks) {
1521		u_int hzticks;
1522
1523		hzticks = (ticks - slptime) << SCHED_TICK_SHIFT;
1524		ts->skg_slptime += hzticks;
1525		sched_interact_update(td);
1526		sched_pctcpu_update(ts);
1527		sched_priority(td);
1528	}
1529	/* Reset the slice value after we sleep. */
1530	ts->ts_slice = sched_slice;
1531	sched_add(td, SRQ_BORING);
1532}
1533
1534/*
1535 * Penalize the parent for creating a new child and initialize the child's
1536 * priority.
1537 */
1538void
1539sched_fork(struct thread *td, struct thread *child)
1540{
1541	mtx_assert(&sched_lock, MA_OWNED);
1542	sched_fork_thread(td, child);
1543	/*
1544	 * Penalize the parent and child for forking.
1545	 */
1546	sched_interact_fork(child);
1547	sched_priority(child);
1548	td->td_sched->skg_runtime += tickincr;
1549	sched_interact_update(td);
1550	sched_priority(td);
1551}
1552
1553void
1554sched_fork_thread(struct thread *td, struct thread *child)
1555{
1556	struct td_sched *ts;
1557	struct td_sched *ts2;
1558
1559	/*
1560	 * Initialize child.
1561	 */
1562	sched_newthread(child);
1563	ts = td->td_sched;
1564	ts2 = child->td_sched;
1565	ts2->ts_cpu = ts->ts_cpu;
1566	ts2->ts_runq = NULL;
1567	/*
1568	 * Grab our parents cpu estimation information and priority.
1569	 */
1570	ts2->ts_ticks = ts->ts_ticks;
1571	ts2->ts_ltick = ts->ts_ltick;
1572	ts2->ts_ftick = ts->ts_ftick;
1573	child->td_user_pri = td->td_user_pri;
1574	child->td_base_user_pri = td->td_base_user_pri;
1575	/*
1576	 * And update interactivity score.
1577	 */
1578	ts2->skg_slptime = ts->skg_slptime;
1579	ts2->skg_runtime = ts->skg_runtime;
1580	ts2->ts_slice = 1;	/* Attempt to quickly learn interactivity. */
1581}
1582
1583void
1584sched_class(struct thread *td, int class)
1585{
1586
1587	mtx_assert(&sched_lock, MA_OWNED);
1588	if (td->td_pri_class == class)
1589		return;
1590
1591#ifdef SMP
1592	/*
1593	 * On SMP if we're on the RUNQ we must adjust the transferable
1594	 * count because could be changing to or from an interrupt
1595	 * class.
1596	 */
1597	if (TD_ON_RUNQ(td)) {
1598		struct tdq *tdq;
1599
1600		tdq = TDQ_CPU(td->td_sched->ts_cpu);
1601		if (THREAD_CAN_MIGRATE(td)) {
1602			tdq->tdq_transferable--;
1603			tdq->tdq_group->tdg_transferable--;
1604		}
1605		td->td_pri_class = class;
1606		if (THREAD_CAN_MIGRATE(td)) {
1607			tdq->tdq_transferable++;
1608			tdq->tdq_group->tdg_transferable++;
1609		}
1610	}
1611#endif
1612	td->td_pri_class = class;
1613}
1614
1615/*
1616 * Return some of the child's priority and interactivity to the parent.
1617 */
1618void
1619sched_exit(struct proc *p, struct thread *child)
1620{
1621	struct thread *td;
1622
1623	CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
1624	    child, child->td_proc->p_comm, child->td_priority);
1625
1626	td = FIRST_THREAD_IN_PROC(p);
1627	sched_exit_thread(td, child);
1628}
1629
1630void
1631sched_exit_thread(struct thread *td, struct thread *child)
1632{
1633
1634	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
1635	    child, child->td_proc->p_comm, child->td_priority);
1636
1637	tdq_load_rem(TDQ_CPU(child->td_sched->ts_cpu), child->td_sched);
1638#ifdef KSE
1639	/*
1640	 * KSE forks and exits so often that this penalty causes short-lived
1641	 * threads to always be non-interactive.  This causes mozilla to
1642	 * crawl under load.
1643	 */
1644	if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
1645		return;
1646#endif
1647	/*
1648	 * Give the child's runtime to the parent without returning the
1649	 * sleep time as a penalty to the parent.  This causes shells that
1650	 * launch expensive things to mark their children as expensive.
1651	 */
1652	td->td_sched->skg_runtime += child->td_sched->skg_runtime;
1653	sched_interact_update(td);
1654	sched_priority(td);
1655}
1656
1657void
1658sched_userret(struct thread *td)
1659{
1660	/*
1661	 * XXX we cheat slightly on the locking here to avoid locking in
1662	 * the usual case.  Setting td_priority here is essentially an
1663	 * incomplete workaround for not setting it properly elsewhere.
1664	 * Now that some interrupt handlers are threads, not setting it
1665	 * properly elsewhere can clobber it in the window between setting
1666	 * it here and returning to user mode, so don't waste time setting
1667	 * it perfectly here.
1668	 */
1669	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1670	    ("thread with borrowed priority returning to userland"));
1671	if (td->td_priority != td->td_user_pri) {
1672		mtx_lock_spin(&sched_lock);
1673		td->td_priority = td->td_user_pri;
1674		td->td_base_pri = td->td_user_pri;
1675		mtx_unlock_spin(&sched_lock);
1676        }
1677}
1678
1679void
1680sched_clock(struct thread *td)
1681{
1682	struct tdq *tdq;
1683	struct td_sched *ts;
1684
1685	mtx_assert(&sched_lock, MA_OWNED);
1686#ifdef SMP
1687	sched_smp_tick(td);
1688#endif
1689	tdq = TDQ_SELF();
1690	/*
1691	 * Advance the insert index once for each tick to ensure that all
1692	 * threads get a chance to run.
1693	 */
1694	if (tdq->tdq_idx == tdq->tdq_ridx) {
1695		tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
1696		if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
1697			tdq->tdq_ridx = tdq->tdq_idx;
1698	}
1699	ts = td->td_sched;
1700	/*
1701	 * We only do slicing code for TIMESHARE threads.
1702	 */
1703	if (td->td_pri_class != PRI_TIMESHARE)
1704		return;
1705	/*
1706	 * We used a tick; charge it to the thread so that we can compute our
1707	 * interactivity.
1708	 */
1709	td->td_sched->skg_runtime += tickincr;
1710	sched_interact_update(td);
1711	/*
1712	 * We used up one time slice.
1713	 */
1714	if (--ts->ts_slice > 0)
1715		return;
1716	/*
1717	 * We're out of time, recompute priorities and requeue.
1718	 */
1719	sched_priority(td);
1720	td->td_flags |= TDF_NEEDRESCHED;
1721}
1722
1723int
1724sched_runnable(void)
1725{
1726	struct tdq *tdq;
1727	int load;
1728
1729	load = 1;
1730
1731	tdq = TDQ_SELF();
1732#ifdef SMP
1733	if (tdq_busy)
1734		goto out;
1735#endif
1736	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1737		if (tdq->tdq_load > 0)
1738			goto out;
1739	} else
1740		if (tdq->tdq_load - 1 > 0)
1741			goto out;
1742	load = 0;
1743out:
1744	return (load);
1745}
1746
1747struct thread *
1748sched_choose(void)
1749{
1750	struct tdq *tdq;
1751	struct td_sched *ts;
1752
1753	mtx_assert(&sched_lock, MA_OWNED);
1754	tdq = TDQ_SELF();
1755#ifdef SMP
1756restart:
1757#endif
1758	ts = tdq_choose(tdq);
1759	if (ts) {
1760#ifdef SMP
1761		if (ts->ts_thread->td_priority > PRI_MIN_IDLE)
1762			if (tdq_idled(tdq) == 0)
1763				goto restart;
1764#endif
1765		tdq_runq_rem(tdq, ts);
1766		return (ts->ts_thread);
1767	}
1768#ifdef SMP
1769	if (tdq_idled(tdq) == 0)
1770		goto restart;
1771#endif
1772	return (PCPU_GET(idlethread));
1773}
1774
1775static int
1776sched_preempt(struct thread *td)
1777{
1778	struct thread *ctd;
1779	int cpri;
1780	int pri;
1781
1782	ctd = curthread;
1783	pri = td->td_priority;
1784	cpri = ctd->td_priority;
1785	if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
1786		return (0);
1787	/*
1788	 * Always preempt IDLE threads.  Otherwise only if the preempting
1789	 * thread is an ithread.
1790	 */
1791	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
1792		return (0);
1793	if (ctd->td_critnest > 1) {
1794		CTR1(KTR_PROC, "sched_preempt: in critical section %d",
1795		    ctd->td_critnest);
1796		ctd->td_owepreempt = 1;
1797		return (0);
1798	}
1799	/*
1800	 * Thread is runnable but not yet put on system run queue.
1801	 */
1802	MPASS(TD_ON_RUNQ(td));
1803	TD_SET_RUNNING(td);
1804	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
1805	    td->td_proc->p_pid, td->td_proc->p_comm);
1806	mi_switch(SW_INVOL|SW_PREEMPT, td);
1807	return (1);
1808}
1809
1810void
1811sched_add(struct thread *td, int flags)
1812{
1813	struct tdq *tdq;
1814	struct td_sched *ts;
1815	int preemptive;
1816	int class;
1817#ifdef SMP
1818	int cpuid;
1819	int cpumask;
1820#endif
1821	ts = td->td_sched;
1822
1823	mtx_assert(&sched_lock, MA_OWNED);
1824	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1825	    td, td->td_proc->p_comm, td->td_priority, curthread,
1826	    curthread->td_proc->p_comm);
1827	KASSERT((td->td_inhibitors == 0),
1828	    ("sched_add: trying to run inhibited thread"));
1829	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
1830	    ("sched_add: bad thread state"));
1831	KASSERT(td->td_proc->p_sflag & PS_INMEM,
1832	    ("sched_add: process swapped out"));
1833	KASSERT(ts->ts_runq == NULL,
1834	    ("sched_add: thread %p is still assigned to a run queue", td));
1835        TD_SET_RUNQ(td);
1836	tdq = TDQ_SELF();
1837	class = PRI_BASE(td->td_pri_class);
1838	preemptive = !(flags & SRQ_YIELDING);
1839	/*
1840	 * Recalculate the priority before we select the target cpu or
1841	 * run-queue.
1842	 */
1843	if (class == PRI_TIMESHARE)
1844		sched_priority(td);
1845	if (ts->ts_slice == 0)
1846		ts->ts_slice = sched_slice;
1847#ifdef SMP
1848	cpuid = PCPU_GET(cpuid);
1849	/*
1850	 * Pick the destination cpu and if it isn't ours transfer to the
1851	 * target cpu.
1852	 */
1853	if (THREAD_CAN_MIGRATE(td)) {
1854		if (td->td_priority <= PRI_MAX_ITHD) {
1855			CTR2(KTR_ULE, "ithd %d < %d",
1856			    td->td_priority, PRI_MAX_ITHD);
1857			ts->ts_cpu = cpuid;
1858		}
1859		if (pick_pri)
1860			ts->ts_cpu = tdq_pickpri(tdq, ts, flags);
1861		else
1862			ts->ts_cpu = tdq_pickidle(tdq, ts);
1863	} else
1864		CTR1(KTR_ULE, "pinned %d", td->td_pinned);
1865	if (ts->ts_cpu != cpuid)
1866		preemptive = 0;
1867	tdq = TDQ_CPU(ts->ts_cpu);
1868	cpumask = 1 << ts->ts_cpu;
1869	/*
1870	 * If we had been idle, clear our bit in the group and potentially
1871	 * the global bitmap.
1872	 */
1873	if ((class != PRI_IDLE && class != PRI_ITHD) &&
1874	    (tdq->tdq_group->tdg_idlemask & cpumask) != 0) {
1875		/*
1876		 * Check to see if our group is unidling, and if so, remove it
1877		 * from the global idle mask.
1878		 */
1879		if (tdq->tdq_group->tdg_idlemask ==
1880		    tdq->tdq_group->tdg_cpumask)
1881			atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
1882		/*
1883		 * Now remove ourselves from the group specific idle mask.
1884		 */
1885		tdq->tdq_group->tdg_idlemask &= ~cpumask;
1886	}
1887#endif
1888	/*
1889	 * Pick the run queue based on priority.
1890	 */
1891	if (td->td_priority <= PRI_MAX_REALTIME)
1892		ts->ts_runq = &tdq->tdq_realtime;
1893	else if (td->td_priority <= PRI_MAX_TIMESHARE)
1894		ts->ts_runq = &tdq->tdq_timeshare;
1895	else
1896		ts->ts_runq = &tdq->tdq_idle;
1897	if (preemptive && sched_preempt(td))
1898		return;
1899	tdq_runq_add(tdq, ts, flags);
1900	tdq_load_add(tdq, ts);
1901#ifdef SMP
1902	if (ts->ts_cpu != cpuid) {
1903		tdq_notify(ts);
1904		return;
1905	}
1906#endif
1907	if (td->td_priority < curthread->td_priority)
1908		curthread->td_flags |= TDF_NEEDRESCHED;
1909}
1910
1911void
1912sched_rem(struct thread *td)
1913{
1914	struct tdq *tdq;
1915	struct td_sched *ts;
1916
1917	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1918	    td, td->td_proc->p_comm, td->td_priority, curthread,
1919	    curthread->td_proc->p_comm);
1920	mtx_assert(&sched_lock, MA_OWNED);
1921	ts = td->td_sched;
1922	KASSERT(TD_ON_RUNQ(td),
1923	    ("sched_rem: thread not on run queue"));
1924
1925	tdq = TDQ_CPU(ts->ts_cpu);
1926	tdq_runq_rem(tdq, ts);
1927	tdq_load_rem(tdq, ts);
1928	TD_SET_CAN_RUN(td);
1929}
1930
1931fixpt_t
1932sched_pctcpu(struct thread *td)
1933{
1934	fixpt_t pctcpu;
1935	struct td_sched *ts;
1936
1937	pctcpu = 0;
1938	ts = td->td_sched;
1939	if (ts == NULL)
1940		return (0);
1941
1942	mtx_lock_spin(&sched_lock);
1943	if (ts->ts_ticks) {
1944		int rtick;
1945
1946		sched_pctcpu_update(ts);
1947		/* How many rtick per second ? */
1948		rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
1949		pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
1950	}
1951	td->td_proc->p_swtime = ts->ts_ltick - ts->ts_ftick;
1952	mtx_unlock_spin(&sched_lock);
1953
1954	return (pctcpu);
1955}
1956
1957void
1958sched_bind(struct thread *td, int cpu)
1959{
1960	struct td_sched *ts;
1961
1962	mtx_assert(&sched_lock, MA_OWNED);
1963	ts = td->td_sched;
1964	if (ts->ts_flags & TSF_BOUND)
1965		sched_unbind(td);
1966	ts->ts_flags |= TSF_BOUND;
1967#ifdef SMP
1968	sched_pin();
1969	if (PCPU_GET(cpuid) == cpu)
1970		return;
1971	ts->ts_cpu = cpu;
1972	/* When we return from mi_switch we'll be on the correct cpu. */
1973	mi_switch(SW_VOL, NULL);
1974#endif
1975}
1976
1977void
1978sched_unbind(struct thread *td)
1979{
1980	struct td_sched *ts;
1981
1982	mtx_assert(&sched_lock, MA_OWNED);
1983	ts = td->td_sched;
1984	if ((ts->ts_flags & TSF_BOUND) == 0)
1985		return;
1986	ts->ts_flags &= ~TSF_BOUND;
1987#ifdef SMP
1988	sched_unpin();
1989#endif
1990}
1991
1992int
1993sched_is_bound(struct thread *td)
1994{
1995	mtx_assert(&sched_lock, MA_OWNED);
1996	return (td->td_sched->ts_flags & TSF_BOUND);
1997}
1998
1999void
2000sched_relinquish(struct thread *td)
2001{
2002	mtx_lock_spin(&sched_lock);
2003	if (td->td_pri_class == PRI_TIMESHARE)
2004		sched_prio(td, PRI_MAX_TIMESHARE);
2005	mi_switch(SW_VOL, NULL);
2006	mtx_unlock_spin(&sched_lock);
2007}
2008
2009int
2010sched_load(void)
2011{
2012#ifdef SMP
2013	int total;
2014	int i;
2015
2016	total = 0;
2017	for (i = 0; i <= tdg_maxid; i++)
2018		total += TDQ_GROUP(i)->tdg_load;
2019	return (total);
2020#else
2021	return (TDQ_SELF()->tdq_sysload);
2022#endif
2023}
2024
2025int
2026sched_sizeof_proc(void)
2027{
2028	return (sizeof(struct proc));
2029}
2030
2031int
2032sched_sizeof_thread(void)
2033{
2034	return (sizeof(struct thread) + sizeof(struct td_sched));
2035}
2036
2037void
2038sched_tick(void)
2039{
2040	struct td_sched *ts;
2041
2042	ts = curthread->td_sched;
2043	/* Adjust ticks for pctcpu */
2044	ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2045	ts->ts_ltick = ticks;
2046	/*
2047	 * Update if we've exceeded our desired tick threshhold by over one
2048	 * second.
2049	 */
2050	if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2051		sched_pctcpu_update(ts);
2052}
2053
2054/*
2055 * The actual idle process.
2056 */
2057void
2058sched_idletd(void *dummy)
2059{
2060	struct proc *p;
2061	struct thread *td;
2062
2063	td = curthread;
2064	p = td->td_proc;
2065	mtx_assert(&Giant, MA_NOTOWNED);
2066	/* ULE Relies on preemption for idle interruption. */
2067	for (;;)
2068		cpu_idle();
2069}
2070
2071static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
2072SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
2073    "Scheduler name");
2074SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, "");
2075SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0, "");
2076SYSCTL_INT(_kern_sched, OID_AUTO, tickincr, CTLFLAG_RD, &tickincr, 0, "");
2077SYSCTL_INT(_kern_sched, OID_AUTO, realstathz, CTLFLAG_RD, &realstathz, 0, "");
2078#ifdef SMP
2079SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0, "");
2080SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_affinity, CTLFLAG_RW,
2081    &affinity, 0, "");
2082SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_tryself, CTLFLAG_RW,
2083    &tryself, 0, "");
2084SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri_tryselfidle, CTLFLAG_RW,
2085    &tryselfidle, 0, "");
2086SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0, "");
2087SYSCTL_INT(_kern_sched, OID_AUTO, ipi_preempt, CTLFLAG_RW, &ipi_preempt, 0, "");
2088SYSCTL_INT(_kern_sched, OID_AUTO, ipi_ast, CTLFLAG_RW, &ipi_ast, 0, "");
2089SYSCTL_INT(_kern_sched, OID_AUTO, ipi_thresh, CTLFLAG_RW, &ipi_thresh, 0, "");
2090SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0, "");
2091SYSCTL_INT(_kern_sched, OID_AUTO, steal_busy, CTLFLAG_RW, &steal_busy, 0, "");
2092SYSCTL_INT(_kern_sched, OID_AUTO, busy_thresh, CTLFLAG_RW, &busy_thresh, 0, "");
2093#endif
2094
2095/* ps compat */
2096static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
2097SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2098
2099
2100#define KERN_SWITCH_INCLUDE 1
2101#include "kern/kern_switch.c"
2102