sched_ule.c revision 137061
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 137061 2004-10-30 07:35:53Z jeff $");
29
30#include <opt_sched.h>
31
32#define kse td_sched
33
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kdb.h>
37#include <sys/kernel.h>
38#include <sys/ktr.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/resource.h>
43#include <sys/resourcevar.h>
44#include <sys/sched.h>
45#include <sys/smp.h>
46#include <sys/sx.h>
47#include <sys/sysctl.h>
48#include <sys/sysproto.h>
49#include <sys/vmmeter.h>
50#ifdef KTRACE
51#include <sys/uio.h>
52#include <sys/ktrace.h>
53#endif
54
55#include <machine/cpu.h>
56#include <machine/smp.h>
57
58#define KTR_ULE	KTR_NFS
59
60/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
61/* XXX This is bogus compatability crap for ps */
62static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
63SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
64
65static void sched_setup(void *dummy);
66SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
67
68static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
69
70SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
71    "Scheduler name");
72
73static int slice_min = 1;
74SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
75
76static int slice_max = 10;
77SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
78
79int realstathz;
80int tickincr = 1;
81
82#ifdef PREEMPTION
83static void
84printf_caddr_t(void *data)
85{
86	printf("%s", (char *)data);
87}
88static char preempt_warning[] =
89    "WARNING: Kernel PREEMPTION is unstable under SCHED_ULE.\n";
90SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
91    preempt_warning)
92#endif
93
94/*
95 * The schedulable entity that can be given a context to run.
96 * A process may have several of these. Probably one per processor
97 * but posibly a few more. In this universe they are grouped
98 * with a KSEG that contains the priority and niceness
99 * for the group.
100 */
101struct kse {
102	TAILQ_ENTRY(kse) ke_kglist;	/* (*) Queue of threads in ke_ksegrp. */
103	TAILQ_ENTRY(kse) ke_kgrlist;	/* (*) Queue of threads in this state.*/
104	TAILQ_ENTRY(kse) ke_procq;	/* (j/z) Run queue. */
105	int		ke_flags;	/* (j) KEF_* flags. */
106	struct thread	*ke_thread;	/* (*) Active associated thread. */
107	fixpt_t		ke_pctcpu;	/* (j) %cpu during p_swtime. */
108	u_char		ke_oncpu;	/* (j) Which cpu we are on. */
109	char		ke_rqindex;	/* (j) Run queue index. */
110	enum {
111		KES_THREAD = 0x0,	/* slaved to thread state */
112		KES_ONRUNQ
113	} ke_state;			/* (j) thread sched specific status. */
114	int		ke_slptime;
115	int		ke_slice;
116	struct runq	*ke_runq;
117	u_char		ke_cpu;		/* CPU that we have affinity for. */
118	/* The following variables are only used for pctcpu calculation */
119	int		ke_ltick;	/* Last tick that we were running on */
120	int		ke_ftick;	/* First tick that we were running on */
121	int		ke_ticks;	/* Tick count */
122
123};
124
125
126#define td_kse td_sched
127#define	td_slptime		td_kse->ke_slptime
128#define ke_proc			ke_thread->td_proc
129#define ke_ksegrp		ke_thread->td_ksegrp
130
131/* flags kept in ke_flags */
132#define	KEF_SCHED0	0x00001	/* For scheduler-specific use. */
133#define	KEF_SCHED1	0x00002	/* For scheduler-specific use. */
134#define	KEF_SCHED2	0x00004	/* For scheduler-specific use. */
135#define	KEF_SCHED3	0x00008	/* For scheduler-specific use. */
136#define	KEF_DIDRUN	0x02000	/* Thread actually ran. */
137#define	KEF_EXIT	0x04000	/* Thread is being killed. */
138
139/*
140 * These datastructures are allocated within their parent datastructure but
141 * are scheduler specific.
142 */
143
144#define	ke_assign	ke_procq.tqe_next
145
146#define	KEF_ASSIGNED	KEF_SCHED0	/* Thread is being migrated. */
147#define	KEF_BOUND	KEF_SCHED1	/* Thread can not migrate. */
148#define	KEF_XFERABLE	KEF_SCHED2	/* Thread was added as transferable. */
149#define	KEF_HOLD	KEF_SCHED3	/* Thread is temporarily bound. */
150
151struct kg_sched {
152	struct thread	*skg_last_assigned; /* (j) Last thread assigned to */
153					   /* the system scheduler */
154	int	skg_slptime;		/* Number of ticks we vol. slept */
155	int	skg_runtime;		/* Number of ticks we were running */
156	int	skg_avail_opennings;	/* (j) Num unfilled slots in group.*/
157	int	skg_concurrency;	/* (j) Num threads requested in group.*/
158	int	skg_runq_threads;	/* (j) Num KSEs on runq. */
159};
160#define kg_last_assigned	kg_sched->skg_last_assigned
161#define kg_avail_opennings	kg_sched->skg_avail_opennings
162#define kg_concurrency		kg_sched->skg_concurrency
163#define kg_runq_threads		kg_sched->skg_runq_threads
164#define kg_runtime		kg_sched->skg_runtime
165#define kg_slptime		kg_sched->skg_slptime
166
167#define SLOT_RELEASE(kg)						\
168do {									\
169	kg->kg_avail_opennings++; 					\
170	CTR3(KTR_RUNQ, "kg %p(%d) Slot released (->%d)",		\
171	kg,								\
172	kg->kg_concurrency,						\
173	 kg->kg_avail_opennings);					\
174	/*KASSERT((kg->kg_avail_opennings <= kg->kg_concurrency),	\
175	    ("slots out of whack")); */					\
176} while (0)
177
178#define SLOT_USE(kg)							\
179do {									\
180	kg->kg_avail_opennings--; 					\
181	CTR3(KTR_RUNQ, "kg %p(%d) Slot used (->%d)",			\
182	kg,								\
183	kg->kg_concurrency,						\
184	 kg->kg_avail_opennings);					\
185	/*KASSERT((kg->kg_avail_opennings >= 0),			\
186	    ("slots out of whack"));*/ 					\
187} while (0)
188
189static struct kse kse0;
190static struct kg_sched kg_sched0;
191
192/*
193 * The priority is primarily determined by the interactivity score.  Thus, we
194 * give lower(better) priorities to kse groups that use less CPU.  The nice
195 * value is then directly added to this to allow nice to have some effect
196 * on latency.
197 *
198 * PRI_RANGE:	Total priority range for timeshare threads.
199 * PRI_NRESV:	Number of nice values.
200 * PRI_BASE:	The start of the dynamic range.
201 */
202#define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
203#define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
204#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
205#define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
206#define	SCHED_PRI_INTERACT(score)					\
207    ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
208
209/*
210 * These determine the interactivity of a process.
211 *
212 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
213 *		before throttling back.
214 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
215 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
216 * INTERACT_THRESH:	Threshhold for placement on the current runq.
217 */
218#define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
219#define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
220#define	SCHED_INTERACT_MAX	(100)
221#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
222#define	SCHED_INTERACT_THRESH	(30)
223
224/*
225 * These parameters and macros determine the size of the time slice that is
226 * granted to each thread.
227 *
228 * SLICE_MIN:	Minimum time slice granted, in units of ticks.
229 * SLICE_MAX:	Maximum time slice granted.
230 * SLICE_RANGE:	Range of available time slices scaled by hz.
231 * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
232 * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
233 * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
234 */
235#define	SCHED_SLICE_MIN			(slice_min)
236#define	SCHED_SLICE_MAX			(slice_max)
237#define	SCHED_SLICE_INTERACTIVE		(slice_max)
238#define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
239#define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
240#define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
241#define	SCHED_SLICE_NICE(nice)						\
242    (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
243
244/*
245 * This macro determines whether or not the thread belongs on the current or
246 * next run queue.
247 */
248#define	SCHED_INTERACTIVE(kg)						\
249    (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
250#define	SCHED_CURR(kg, ke)						\
251    (ke->ke_thread->td_priority < kg->kg_user_pri ||			\
252    SCHED_INTERACTIVE(kg))
253
254/*
255 * Cpu percentage computation macros and defines.
256 *
257 * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
258 * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
259 */
260
261#define	SCHED_CPU_TIME	10
262#define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
263
264/*
265 * kseq - per processor runqs and statistics.
266 */
267struct kseq {
268	struct runq	ksq_idle;		/* Queue of IDLE threads. */
269	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
270	struct runq	*ksq_next;		/* Next timeshare queue. */
271	struct runq	*ksq_curr;		/* Current queue. */
272	int		ksq_load_timeshare;	/* Load for timeshare. */
273	int		ksq_load;		/* Aggregate load. */
274	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
275	short		ksq_nicemin;		/* Least nice. */
276#ifdef SMP
277	int			ksq_transferable;
278	LIST_ENTRY(kseq)	ksq_siblings;	/* Next in kseq group. */
279	struct kseq_group	*ksq_group;	/* Our processor group. */
280	volatile struct kse	*ksq_assigned;	/* assigned by another CPU. */
281#else
282	int		ksq_sysload;		/* For loadavg, !ITHD load. */
283#endif
284};
285
286#ifdef SMP
287/*
288 * kseq groups are groups of processors which can cheaply share threads.  When
289 * one processor in the group goes idle it will check the runqs of the other
290 * processors in its group prior to halting and waiting for an interrupt.
291 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
292 * In a numa environment we'd want an idle bitmap per group and a two tiered
293 * load balancer.
294 */
295struct kseq_group {
296	int	ksg_cpus;		/* Count of CPUs in this kseq group. */
297	cpumask_t ksg_cpumask;		/* Mask of cpus in this group. */
298	cpumask_t ksg_idlemask;		/* Idle cpus in this group. */
299	cpumask_t ksg_mask;		/* Bit mask for first cpu. */
300	int	ksg_load;		/* Total load of this group. */
301	int	ksg_transferable;	/* Transferable load of this group. */
302	LIST_HEAD(, kseq)	ksg_members; /* Linked list of all members. */
303};
304#endif
305
306/*
307 * One kse queue per processor.
308 */
309#ifdef SMP
310static cpumask_t kseq_idle;
311static int ksg_maxid;
312static struct kseq	kseq_cpu[MAXCPU];
313static struct kseq_group kseq_groups[MAXCPU];
314static int bal_tick;
315static int gbal_tick;
316
317#define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
318#define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
319#define	KSEQ_ID(x)	((x) - kseq_cpu)
320#define	KSEQ_GROUP(x)	(&kseq_groups[(x)])
321#else	/* !SMP */
322static struct kseq	kseq_cpu;
323
324#define	KSEQ_SELF()	(&kseq_cpu)
325#define	KSEQ_CPU(x)	(&kseq_cpu)
326#endif
327
328static void	slot_fill(struct ksegrp *kg);
329static struct kse *sched_choose(void);		/* XXX Should be thread * */
330static void sched_add_internal(struct thread *td, int preemptive);
331static void sched_slice(struct kse *ke);
332static void sched_priority(struct ksegrp *kg);
333static int sched_interact_score(struct ksegrp *kg);
334static void sched_interact_update(struct ksegrp *kg);
335static void sched_interact_fork(struct ksegrp *kg);
336static void sched_pctcpu_update(struct kse *ke);
337
338/* Operations on per processor queues */
339static struct kse * kseq_choose(struct kseq *kseq);
340static void kseq_setup(struct kseq *kseq);
341static void kseq_load_add(struct kseq *kseq, struct kse *ke);
342static void kseq_load_rem(struct kseq *kseq, struct kse *ke);
343static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke);
344static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke);
345static void kseq_nice_add(struct kseq *kseq, int nice);
346static void kseq_nice_rem(struct kseq *kseq, int nice);
347void kseq_print(int cpu);
348#ifdef SMP
349static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class);
350static struct kse *runq_steal(struct runq *rq);
351static void sched_balance(void);
352static void sched_balance_groups(void);
353static void sched_balance_group(struct kseq_group *ksg);
354static void sched_balance_pair(struct kseq *high, struct kseq *low);
355static void kseq_move(struct kseq *from, int cpu);
356static int kseq_idled(struct kseq *kseq);
357static void kseq_notify(struct kse *ke, int cpu);
358static void kseq_assign(struct kseq *);
359static struct kse *kseq_steal(struct kseq *kseq, int stealidle);
360/*
361 * On P4 Xeons the round-robin interrupt delivery is broken.  As a result of
362 * this, we can't pin interrupts to the cpu that they were delivered to,
363 * otherwise all ithreads only run on CPU 0.
364 */
365#ifdef __i386__
366#define	KSE_CAN_MIGRATE(ke, class)					\
367    ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
368#else /* !__i386__ */
369#define	KSE_CAN_MIGRATE(ke, class)					\
370    ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 &&		\
371    ((ke)->ke_flags & KEF_BOUND) == 0)
372#endif /* !__i386__ */
373#endif
374
375void
376kseq_print(int cpu)
377{
378	struct kseq *kseq;
379	int i;
380
381	kseq = KSEQ_CPU(cpu);
382
383	printf("kseq:\n");
384	printf("\tload:           %d\n", kseq->ksq_load);
385	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
386#ifdef SMP
387	printf("\tload transferable: %d\n", kseq->ksq_transferable);
388#endif
389	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
390	printf("\tnice counts:\n");
391	for (i = 0; i < SCHED_PRI_NRESV; i++)
392		if (kseq->ksq_nice[i])
393			printf("\t\t%d = %d\n",
394			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
395}
396
397static __inline void
398kseq_runq_add(struct kseq *kseq, struct kse *ke)
399{
400#ifdef SMP
401	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
402		kseq->ksq_transferable++;
403		kseq->ksq_group->ksg_transferable++;
404		ke->ke_flags |= KEF_XFERABLE;
405	}
406#endif
407	runq_add(ke->ke_runq, ke, 0);
408}
409
410static __inline void
411kseq_runq_rem(struct kseq *kseq, struct kse *ke)
412{
413#ifdef SMP
414	if (ke->ke_flags & KEF_XFERABLE) {
415		kseq->ksq_transferable--;
416		kseq->ksq_group->ksg_transferable--;
417		ke->ke_flags &= ~KEF_XFERABLE;
418	}
419#endif
420	runq_remove(ke->ke_runq, ke);
421}
422
423static void
424kseq_load_add(struct kseq *kseq, struct kse *ke)
425{
426	int class;
427	mtx_assert(&sched_lock, MA_OWNED);
428	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
429	if (class == PRI_TIMESHARE)
430		kseq->ksq_load_timeshare++;
431	kseq->ksq_load++;
432	if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
433#ifdef SMP
434		kseq->ksq_group->ksg_load++;
435#else
436		kseq->ksq_sysload++;
437#endif
438	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
439		CTR6(KTR_ULE,
440		    "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
441		    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
442		    ke->ke_proc->p_nice, kseq->ksq_nicemin);
443	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
444		kseq_nice_add(kseq, ke->ke_proc->p_nice);
445}
446
447static void
448kseq_load_rem(struct kseq *kseq, struct kse *ke)
449{
450	int class;
451	mtx_assert(&sched_lock, MA_OWNED);
452	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
453	if (class == PRI_TIMESHARE)
454		kseq->ksq_load_timeshare--;
455	if (class != PRI_ITHD  && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
456#ifdef SMP
457		kseq->ksq_group->ksg_load--;
458#else
459		kseq->ksq_sysload--;
460#endif
461	kseq->ksq_load--;
462	ke->ke_runq = NULL;
463	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
464		kseq_nice_rem(kseq, ke->ke_proc->p_nice);
465}
466
467static void
468kseq_nice_add(struct kseq *kseq, int nice)
469{
470	mtx_assert(&sched_lock, MA_OWNED);
471	/* Normalize to zero. */
472	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
473	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
474		kseq->ksq_nicemin = nice;
475}
476
477static void
478kseq_nice_rem(struct kseq *kseq, int nice)
479{
480	int n;
481
482	mtx_assert(&sched_lock, MA_OWNED);
483	/* Normalize to zero. */
484	n = nice + SCHED_PRI_NHALF;
485	kseq->ksq_nice[n]--;
486	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
487
488	/*
489	 * If this wasn't the smallest nice value or there are more in
490	 * this bucket we can just return.  Otherwise we have to recalculate
491	 * the smallest nice.
492	 */
493	if (nice != kseq->ksq_nicemin ||
494	    kseq->ksq_nice[n] != 0 ||
495	    kseq->ksq_load_timeshare == 0)
496		return;
497
498	for (; n < SCHED_PRI_NRESV; n++)
499		if (kseq->ksq_nice[n]) {
500			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
501			return;
502		}
503}
504
505#ifdef SMP
506/*
507 * sched_balance is a simple CPU load balancing algorithm.  It operates by
508 * finding the least loaded and most loaded cpu and equalizing their load
509 * by migrating some processes.
510 *
511 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
512 * installations will only have 2 cpus.  Secondly, load balancing too much at
513 * once can have an unpleasant effect on the system.  The scheduler rarely has
514 * enough information to make perfect decisions.  So this algorithm chooses
515 * algorithm simplicity and more gradual effects on load in larger systems.
516 *
517 * It could be improved by considering the priorities and slices assigned to
518 * each task prior to balancing them.  There are many pathological cases with
519 * any approach and so the semi random algorithm below may work as well as any.
520 *
521 */
522static void
523sched_balance(void)
524{
525	struct kseq_group *high;
526	struct kseq_group *low;
527	struct kseq_group *ksg;
528	int cnt;
529	int i;
530
531	if (smp_started == 0)
532		goto out;
533	low = high = NULL;
534	i = random() % (ksg_maxid + 1);
535	for (cnt = 0; cnt <= ksg_maxid; cnt++) {
536		ksg = KSEQ_GROUP(i);
537		/*
538		 * Find the CPU with the highest load that has some
539		 * threads to transfer.
540		 */
541		if ((high == NULL || ksg->ksg_load > high->ksg_load)
542		    && ksg->ksg_transferable)
543			high = ksg;
544		if (low == NULL || ksg->ksg_load < low->ksg_load)
545			low = ksg;
546		if (++i > ksg_maxid)
547			i = 0;
548	}
549	if (low != NULL && high != NULL && high != low)
550		sched_balance_pair(LIST_FIRST(&high->ksg_members),
551		    LIST_FIRST(&low->ksg_members));
552out:
553	bal_tick = ticks + (random() % (hz * 2));
554}
555
556static void
557sched_balance_groups(void)
558{
559	int i;
560
561	mtx_assert(&sched_lock, MA_OWNED);
562	if (smp_started)
563		for (i = 0; i <= ksg_maxid; i++)
564			sched_balance_group(KSEQ_GROUP(i));
565	gbal_tick = ticks + (random() % (hz * 2));
566}
567
568static void
569sched_balance_group(struct kseq_group *ksg)
570{
571	struct kseq *kseq;
572	struct kseq *high;
573	struct kseq *low;
574	int load;
575
576	if (ksg->ksg_transferable == 0)
577		return;
578	low = NULL;
579	high = NULL;
580	LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
581		load = kseq->ksq_load;
582		if (high == NULL || load > high->ksq_load)
583			high = kseq;
584		if (low == NULL || load < low->ksq_load)
585			low = kseq;
586	}
587	if (high != NULL && low != NULL && high != low)
588		sched_balance_pair(high, low);
589}
590
591static void
592sched_balance_pair(struct kseq *high, struct kseq *low)
593{
594	int transferable;
595	int high_load;
596	int low_load;
597	int move;
598	int diff;
599	int i;
600
601	/*
602	 * If we're transfering within a group we have to use this specific
603	 * kseq's transferable count, otherwise we can steal from other members
604	 * of the group.
605	 */
606	if (high->ksq_group == low->ksq_group) {
607		transferable = high->ksq_transferable;
608		high_load = high->ksq_load;
609		low_load = low->ksq_load;
610	} else {
611		transferable = high->ksq_group->ksg_transferable;
612		high_load = high->ksq_group->ksg_load;
613		low_load = low->ksq_group->ksg_load;
614	}
615	if (transferable == 0)
616		return;
617	/*
618	 * Determine what the imbalance is and then adjust that to how many
619	 * kses we actually have to give up (transferable).
620	 */
621	diff = high_load - low_load;
622	move = diff / 2;
623	if (diff & 0x1)
624		move++;
625	move = min(move, transferable);
626	for (i = 0; i < move; i++)
627		kseq_move(high, KSEQ_ID(low));
628	return;
629}
630
631static void
632kseq_move(struct kseq *from, int cpu)
633{
634	struct kseq *kseq;
635	struct kseq *to;
636	struct kse *ke;
637
638	kseq = from;
639	to = KSEQ_CPU(cpu);
640	ke = kseq_steal(kseq, 1);
641	if (ke == NULL) {
642		struct kseq_group *ksg;
643
644		ksg = kseq->ksq_group;
645		LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
646			if (kseq == from || kseq->ksq_transferable == 0)
647				continue;
648			ke = kseq_steal(kseq, 1);
649			break;
650		}
651		if (ke == NULL)
652			panic("kseq_move: No KSEs available with a "
653			    "transferable count of %d\n",
654			    ksg->ksg_transferable);
655	}
656	if (kseq == to)
657		return;
658	ke->ke_state = KES_THREAD;
659	kseq_runq_rem(kseq, ke);
660	kseq_load_rem(kseq, ke);
661	kseq_notify(ke, cpu);
662}
663
664static int
665kseq_idled(struct kseq *kseq)
666{
667	struct kseq_group *ksg;
668	struct kseq *steal;
669	struct kse *ke;
670
671	ksg = kseq->ksq_group;
672	/*
673	 * If we're in a cpu group, try and steal kses from another cpu in
674	 * the group before idling.
675	 */
676	if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
677		LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
678			if (steal == kseq || steal->ksq_transferable == 0)
679				continue;
680			ke = kseq_steal(steal, 0);
681			if (ke == NULL)
682				continue;
683			ke->ke_state = KES_THREAD;
684			kseq_runq_rem(steal, ke);
685			kseq_load_rem(steal, ke);
686			ke->ke_cpu = PCPU_GET(cpuid);
687			sched_add_internal(ke->ke_thread, 0);
688			return (0);
689		}
690	}
691	/*
692	 * We only set the idled bit when all of the cpus in the group are
693	 * idle.  Otherwise we could get into a situation where a KSE bounces
694	 * back and forth between two idle cores on seperate physical CPUs.
695	 */
696	ksg->ksg_idlemask |= PCPU_GET(cpumask);
697	if (ksg->ksg_idlemask != ksg->ksg_cpumask)
698		return (1);
699	atomic_set_int(&kseq_idle, ksg->ksg_mask);
700	return (1);
701}
702
703static void
704kseq_assign(struct kseq *kseq)
705{
706	struct kse *nke;
707	struct kse *ke;
708
709	do {
710		*(volatile struct kse **)&ke = kseq->ksq_assigned;
711	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
712	for (; ke != NULL; ke = nke) {
713		nke = ke->ke_assign;
714		ke->ke_flags &= ~KEF_ASSIGNED;
715		sched_add_internal(ke->ke_thread, 0);
716	}
717}
718
719static void
720kseq_notify(struct kse *ke, int cpu)
721{
722	struct kseq *kseq;
723	struct thread *td;
724	struct pcpu *pcpu;
725	int prio;
726
727	ke->ke_cpu = cpu;
728	ke->ke_flags |= KEF_ASSIGNED;
729	prio = ke->ke_thread->td_priority;
730
731	kseq = KSEQ_CPU(cpu);
732
733	/*
734	 * Place a KSE on another cpu's queue and force a resched.
735	 */
736	do {
737		*(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned;
738	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
739	/*
740	 * Without sched_lock we could lose a race where we set NEEDRESCHED
741	 * on a thread that is switched out before the IPI is delivered.  This
742	 * would lead us to miss the resched.  This will be a problem once
743	 * sched_lock is pushed down.
744	 */
745	pcpu = pcpu_find(cpu);
746	td = pcpu->pc_curthread;
747	if (ke->ke_thread->td_priority < td->td_priority ||
748	    td == pcpu->pc_idlethread) {
749		td->td_flags |= TDF_NEEDRESCHED;
750		ipi_selected(1 << cpu, IPI_AST);
751	}
752}
753
754static struct kse *
755runq_steal(struct runq *rq)
756{
757	struct rqhead *rqh;
758	struct rqbits *rqb;
759	struct kse *ke;
760	int word;
761	int bit;
762
763	mtx_assert(&sched_lock, MA_OWNED);
764	rqb = &rq->rq_status;
765	for (word = 0; word < RQB_LEN; word++) {
766		if (rqb->rqb_bits[word] == 0)
767			continue;
768		for (bit = 0; bit < RQB_BPW; bit++) {
769			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
770				continue;
771			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
772			TAILQ_FOREACH(ke, rqh, ke_procq) {
773				if (KSE_CAN_MIGRATE(ke,
774				    PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
775					return (ke);
776			}
777		}
778	}
779	return (NULL);
780}
781
782static struct kse *
783kseq_steal(struct kseq *kseq, int stealidle)
784{
785	struct kse *ke;
786
787	/*
788	 * Steal from next first to try to get a non-interactive task that
789	 * may not have run for a while.
790	 */
791	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
792		return (ke);
793	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
794		return (ke);
795	if (stealidle)
796		return (runq_steal(&kseq->ksq_idle));
797	return (NULL);
798}
799
800int
801kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
802{
803	struct kseq_group *ksg;
804	int cpu;
805
806	if (smp_started == 0)
807		return (0);
808	cpu = 0;
809	/*
810	 * If our load exceeds a certain threshold we should attempt to
811	 * reassign this thread.  The first candidate is the cpu that
812	 * originally ran the thread.  If it is idle, assign it there,
813	 * otherwise, pick an idle cpu.
814	 *
815	 * The threshold at which we start to reassign kses has a large impact
816	 * on the overall performance of the system.  Tuned too high and
817	 * some CPUs may idle.  Too low and there will be excess migration
818	 * and context switches.
819	 */
820	ksg = kseq->ksq_group;
821	if (ksg->ksg_load > ksg->ksg_cpus && kseq_idle) {
822		ksg = KSEQ_CPU(ke->ke_cpu)->ksq_group;
823		if (kseq_idle & ksg->ksg_mask) {
824			cpu = ffs(ksg->ksg_idlemask);
825			if (cpu)
826				goto migrate;
827		}
828		/*
829		 * Multiple cpus could find this bit simultaneously
830		 * but the race shouldn't be terrible.
831		 */
832		cpu = ffs(kseq_idle);
833		if (cpu)
834			goto migrate;
835	}
836	/*
837	 * If another cpu in this group has idled, assign a thread over
838	 * to them after checking to see if there are idled groups.
839	 */
840	ksg = kseq->ksq_group;
841	if (ksg->ksg_idlemask) {
842		cpu = ffs(ksg->ksg_idlemask);
843		if (cpu)
844			goto migrate;
845	}
846	/*
847	 * No new CPU was found.
848	 */
849	return (0);
850migrate:
851	/*
852	 * Now that we've found an idle CPU, migrate the thread.
853	 */
854	cpu--;
855	ke->ke_runq = NULL;
856	kseq_notify(ke, cpu);
857
858	return (1);
859}
860
861#endif	/* SMP */
862
863/*
864 * Pick the highest priority task we have and return it.
865 */
866
867static struct kse *
868kseq_choose(struct kseq *kseq)
869{
870	struct kse *ke;
871	struct runq *swap;
872
873	mtx_assert(&sched_lock, MA_OWNED);
874	swap = NULL;
875
876	for (;;) {
877		ke = runq_choose(kseq->ksq_curr);
878		if (ke == NULL) {
879			/*
880			 * We already swapped once and didn't get anywhere.
881			 */
882			if (swap)
883				break;
884			swap = kseq->ksq_curr;
885			kseq->ksq_curr = kseq->ksq_next;
886			kseq->ksq_next = swap;
887			continue;
888		}
889		/*
890		 * If we encounter a slice of 0 the kse is in a
891		 * TIMESHARE kse group and its nice was too far out
892		 * of the range that receives slices.
893		 */
894		if (ke->ke_slice == 0) {
895			runq_remove(ke->ke_runq, ke);
896			sched_slice(ke);
897			ke->ke_runq = kseq->ksq_next;
898			runq_add(ke->ke_runq, ke, 0);
899			continue;
900		}
901		return (ke);
902	}
903
904	return (runq_choose(&kseq->ksq_idle));
905}
906
907static void
908kseq_setup(struct kseq *kseq)
909{
910	runq_init(&kseq->ksq_timeshare[0]);
911	runq_init(&kseq->ksq_timeshare[1]);
912	runq_init(&kseq->ksq_idle);
913	kseq->ksq_curr = &kseq->ksq_timeshare[0];
914	kseq->ksq_next = &kseq->ksq_timeshare[1];
915	kseq->ksq_load = 0;
916	kseq->ksq_load_timeshare = 0;
917}
918
919static void
920sched_setup(void *dummy)
921{
922#ifdef SMP
923	int balance_groups;
924	int i;
925#endif
926
927	slice_min = (hz/100);	/* 10ms */
928	slice_max = (hz/7);	/* ~140ms */
929
930#ifdef SMP
931	balance_groups = 0;
932	/*
933	 * Initialize the kseqs.
934	 */
935	for (i = 0; i < MAXCPU; i++) {
936		struct kseq *ksq;
937
938		ksq = &kseq_cpu[i];
939		ksq->ksq_assigned = NULL;
940		kseq_setup(&kseq_cpu[i]);
941	}
942	if (smp_topology == NULL) {
943		struct kseq_group *ksg;
944		struct kseq *ksq;
945
946		for (i = 0; i < MAXCPU; i++) {
947			ksq = &kseq_cpu[i];
948			ksg = &kseq_groups[i];
949			/*
950			 * Setup a kseq group with one member.
951			 */
952			ksq->ksq_transferable = 0;
953			ksq->ksq_group = ksg;
954			ksg->ksg_cpus = 1;
955			ksg->ksg_idlemask = 0;
956			ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
957			ksg->ksg_load = 0;
958			ksg->ksg_transferable = 0;
959			LIST_INIT(&ksg->ksg_members);
960			LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
961		}
962	} else {
963		struct kseq_group *ksg;
964		struct cpu_group *cg;
965		int j;
966
967		for (i = 0; i < smp_topology->ct_count; i++) {
968			cg = &smp_topology->ct_group[i];
969			ksg = &kseq_groups[i];
970			/*
971			 * Initialize the group.
972			 */
973			ksg->ksg_idlemask = 0;
974			ksg->ksg_load = 0;
975			ksg->ksg_transferable = 0;
976			ksg->ksg_cpus = cg->cg_count;
977			ksg->ksg_cpumask = cg->cg_mask;
978			LIST_INIT(&ksg->ksg_members);
979			/*
980			 * Find all of the group members and add them.
981			 */
982			for (j = 0; j < MAXCPU; j++) {
983				if ((cg->cg_mask & (1 << j)) != 0) {
984					if (ksg->ksg_mask == 0)
985						ksg->ksg_mask = 1 << j;
986					kseq_cpu[j].ksq_transferable = 0;
987					kseq_cpu[j].ksq_group = ksg;
988					LIST_INSERT_HEAD(&ksg->ksg_members,
989					    &kseq_cpu[j], ksq_siblings);
990				}
991			}
992			if (ksg->ksg_cpus > 1)
993				balance_groups = 1;
994		}
995		ksg_maxid = smp_topology->ct_count - 1;
996	}
997	/*
998	 * Stagger the group and global load balancer so they do not
999	 * interfere with each other.
1000	 */
1001	bal_tick = ticks + hz;
1002	if (balance_groups)
1003		gbal_tick = ticks + (hz / 2);
1004#else
1005	kseq_setup(KSEQ_SELF());
1006#endif
1007	mtx_lock_spin(&sched_lock);
1008	kseq_load_add(KSEQ_SELF(), &kse0);
1009	mtx_unlock_spin(&sched_lock);
1010}
1011
1012/*
1013 * Scale the scheduling priority according to the "interactivity" of this
1014 * process.
1015 */
1016static void
1017sched_priority(struct ksegrp *kg)
1018{
1019	int pri;
1020
1021	if (kg->kg_pri_class != PRI_TIMESHARE)
1022		return;
1023
1024	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
1025	pri += SCHED_PRI_BASE;
1026	pri += kg->kg_proc->p_nice;
1027
1028	if (pri > PRI_MAX_TIMESHARE)
1029		pri = PRI_MAX_TIMESHARE;
1030	else if (pri < PRI_MIN_TIMESHARE)
1031		pri = PRI_MIN_TIMESHARE;
1032
1033	kg->kg_user_pri = pri;
1034
1035	return;
1036}
1037
1038/*
1039 * Calculate a time slice based on the properties of the kseg and the runq
1040 * that we're on.  This is only for PRI_TIMESHARE ksegrps.
1041 */
1042static void
1043sched_slice(struct kse *ke)
1044{
1045	struct kseq *kseq;
1046	struct ksegrp *kg;
1047
1048	kg = ke->ke_ksegrp;
1049	kseq = KSEQ_CPU(ke->ke_cpu);
1050
1051	/*
1052	 * Rationale:
1053	 * KSEs in interactive ksegs get a minimal slice so that we
1054	 * quickly notice if it abuses its advantage.
1055	 *
1056	 * KSEs in non-interactive ksegs are assigned a slice that is
1057	 * based on the ksegs nice value relative to the least nice kseg
1058	 * on the run queue for this cpu.
1059	 *
1060	 * If the KSE is less nice than all others it gets the maximum
1061	 * slice and other KSEs will adjust their slice relative to
1062	 * this when they first expire.
1063	 *
1064	 * There is 20 point window that starts relative to the least
1065	 * nice kse on the run queue.  Slice size is determined by
1066	 * the kse distance from the last nice ksegrp.
1067	 *
1068	 * If the kse is outside of the window it will get no slice
1069	 * and will be reevaluated each time it is selected on the
1070	 * run queue.  The exception to this is nice 0 ksegs when
1071	 * a nice -20 is running.  They are always granted a minimum
1072	 * slice.
1073	 */
1074	if (!SCHED_INTERACTIVE(kg)) {
1075		int nice;
1076
1077		nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
1078		if (kseq->ksq_load_timeshare == 0 ||
1079		    kg->kg_proc->p_nice < kseq->ksq_nicemin)
1080			ke->ke_slice = SCHED_SLICE_MAX;
1081		else if (nice <= SCHED_SLICE_NTHRESH)
1082			ke->ke_slice = SCHED_SLICE_NICE(nice);
1083		else if (kg->kg_proc->p_nice == 0)
1084			ke->ke_slice = SCHED_SLICE_MIN;
1085		else
1086			ke->ke_slice = 0;
1087	} else
1088		ke->ke_slice = SCHED_SLICE_INTERACTIVE;
1089
1090	CTR6(KTR_ULE,
1091	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
1092	    ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin,
1093	    kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
1094
1095	return;
1096}
1097
1098/*
1099 * This routine enforces a maximum limit on the amount of scheduling history
1100 * kept.  It is called after either the slptime or runtime is adjusted.
1101 * This routine will not operate correctly when slp or run times have been
1102 * adjusted to more than double their maximum.
1103 */
1104static void
1105sched_interact_update(struct ksegrp *kg)
1106{
1107	int sum;
1108
1109	sum = kg->kg_runtime + kg->kg_slptime;
1110	if (sum < SCHED_SLP_RUN_MAX)
1111		return;
1112	/*
1113	 * If we have exceeded by more than 1/5th then the algorithm below
1114	 * will not bring us back into range.  Dividing by two here forces
1115	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1116	 */
1117	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1118		kg->kg_runtime /= 2;
1119		kg->kg_slptime /= 2;
1120		return;
1121	}
1122	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1123	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1124}
1125
1126static void
1127sched_interact_fork(struct ksegrp *kg)
1128{
1129	int ratio;
1130	int sum;
1131
1132	sum = kg->kg_runtime + kg->kg_slptime;
1133	if (sum > SCHED_SLP_RUN_FORK) {
1134		ratio = sum / SCHED_SLP_RUN_FORK;
1135		kg->kg_runtime /= ratio;
1136		kg->kg_slptime /= ratio;
1137	}
1138}
1139
1140static int
1141sched_interact_score(struct ksegrp *kg)
1142{
1143	int div;
1144
1145	if (kg->kg_runtime > kg->kg_slptime) {
1146		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1147		return (SCHED_INTERACT_HALF +
1148		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1149	} if (kg->kg_slptime > kg->kg_runtime) {
1150		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1151		return (kg->kg_runtime / div);
1152	}
1153
1154	/*
1155	 * This can happen if slptime and runtime are 0.
1156	 */
1157	return (0);
1158
1159}
1160
1161/*
1162 * Very early in the boot some setup of scheduler-specific
1163 * parts of proc0 and of soem scheduler resources needs to be done.
1164 * Called from:
1165 *  proc0_init()
1166 */
1167void
1168schedinit(void)
1169{
1170	/*
1171	 * Set up the scheduler specific parts of proc0.
1172	 */
1173	proc0.p_sched = NULL; /* XXX */
1174	ksegrp0.kg_sched = &kg_sched0;
1175	thread0.td_sched = &kse0;
1176	kse0.ke_thread = &thread0;
1177	kse0.ke_oncpu = NOCPU; /* wrong.. can we use PCPU(cpuid) yet? */
1178	kse0.ke_state = KES_THREAD;
1179	kg_sched0.skg_concurrency = 1;
1180	kg_sched0.skg_avail_opennings = 0; /* we are already running */
1181}
1182
1183/*
1184 * This is only somewhat accurate since given many processes of the same
1185 * priority they will switch when their slices run out, which will be
1186 * at most SCHED_SLICE_MAX.
1187 */
1188int
1189sched_rr_interval(void)
1190{
1191	return (SCHED_SLICE_MAX);
1192}
1193
1194static void
1195sched_pctcpu_update(struct kse *ke)
1196{
1197	/*
1198	 * Adjust counters and watermark for pctcpu calc.
1199	 */
1200	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1201		/*
1202		 * Shift the tick count out so that the divide doesn't
1203		 * round away our results.
1204		 */
1205		ke->ke_ticks <<= 10;
1206		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1207			    SCHED_CPU_TICKS;
1208		ke->ke_ticks >>= 10;
1209	} else
1210		ke->ke_ticks = 0;
1211	ke->ke_ltick = ticks;
1212	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1213}
1214
1215void
1216sched_prio(struct thread *td, u_char prio)
1217{
1218	struct kse *ke;
1219
1220	ke = td->td_kse;
1221	mtx_assert(&sched_lock, MA_OWNED);
1222	if (TD_ON_RUNQ(td)) {
1223		/*
1224		 * If the priority has been elevated due to priority
1225		 * propagation, we may have to move ourselves to a new
1226		 * queue.  We still call adjustrunqueue below in case kse
1227		 * needs to fix things up.
1228		 */
1229		if (prio < td->td_priority && ke && ke->ke_runq != NULL &&
1230		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1231		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1232			runq_remove(ke->ke_runq, ke);
1233			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1234			runq_add(ke->ke_runq, ke, 0);
1235		}
1236		/*
1237		 * Hold this kse on this cpu so that sched_prio() doesn't
1238		 * cause excessive migration.  We only want migration to
1239		 * happen as the result of a wakeup.
1240		 */
1241		ke->ke_flags |= KEF_HOLD;
1242		adjustrunqueue(td, prio);
1243	} else
1244		td->td_priority = prio;
1245}
1246
1247void
1248sched_switch(struct thread *td, struct thread *newtd, int flags)
1249{
1250	struct kse *ke;
1251
1252	mtx_assert(&sched_lock, MA_OWNED);
1253
1254	ke = td->td_kse;
1255
1256	td->td_lastcpu = td->td_oncpu;
1257	td->td_oncpu = NOCPU;
1258	td->td_flags &= ~TDF_NEEDRESCHED;
1259	td->td_pflags &= ~TDP_OWEPREEMPT;
1260
1261	/*
1262	 * If the KSE has been assigned it may be in the process of switching
1263	 * to the new cpu.  This is the case in sched_bind().
1264	 */
1265	if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1266		if (td == PCPU_GET(idlethread)) {
1267			TD_SET_CAN_RUN(td);
1268		} else {
1269			/* We are ending our run so make our slot available again */
1270			SLOT_RELEASE(td->td_ksegrp);
1271			if (TD_IS_RUNNING(td)) {
1272				kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1273				/*
1274				 * Don't allow the thread to migrate
1275				 * from a preemption.
1276				 */
1277				ke->ke_flags |= KEF_HOLD;
1278				setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING);
1279			} else {
1280				if (ke->ke_runq) {
1281					kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1282				} else if ((td->td_flags & TDF_IDLETD) == 0)
1283					kdb_backtrace();
1284				/*
1285				 * We will not be on the run queue.
1286				 * So we must be sleeping or similar.
1287				 * Don't use the slot if we will need it
1288				 * for newtd.
1289				 */
1290				if ((td->td_proc->p_flag & P_HADTHREADS) &&
1291				    (newtd == NULL ||
1292				    newtd->td_ksegrp != td->td_ksegrp))
1293					slot_fill(td->td_ksegrp);
1294			}
1295		}
1296	}
1297	if (newtd != NULL) {
1298		/*
1299		 * If we bring in a thread,
1300		 * then account for it as if it had been added to the
1301		 * run queue and then chosen.
1302		 */
1303		newtd->td_kse->ke_flags |= KEF_DIDRUN;
1304		SLOT_USE(newtd->td_ksegrp);
1305		TD_SET_RUNNING(newtd);
1306		kseq_load_add(KSEQ_SELF(), newtd->td_kse);
1307	} else
1308		newtd = choosethread();
1309	if (td != newtd)
1310		cpu_switch(td, newtd);
1311	sched_lock.mtx_lock = (uintptr_t)td;
1312
1313	td->td_oncpu = PCPU_GET(cpuid);
1314}
1315
1316void
1317sched_nice(struct proc *p, int nice)
1318{
1319	struct ksegrp *kg;
1320	struct kse *ke;
1321	struct thread *td;
1322	struct kseq *kseq;
1323
1324	PROC_LOCK_ASSERT(p, MA_OWNED);
1325	mtx_assert(&sched_lock, MA_OWNED);
1326	/*
1327	 * We need to adjust the nice counts for running KSEs.
1328	 */
1329	FOREACH_KSEGRP_IN_PROC(p, kg) {
1330		if (kg->kg_pri_class == PRI_TIMESHARE) {
1331			FOREACH_THREAD_IN_GROUP(kg, td) {
1332				ke = td->td_kse;
1333				if (ke->ke_runq == NULL)
1334					continue;
1335				kseq = KSEQ_CPU(ke->ke_cpu);
1336				kseq_nice_rem(kseq, p->p_nice);
1337				kseq_nice_add(kseq, nice);
1338			}
1339		}
1340	}
1341	p->p_nice = nice;
1342	FOREACH_KSEGRP_IN_PROC(p, kg) {
1343		sched_priority(kg);
1344		FOREACH_THREAD_IN_GROUP(kg, td)
1345			td->td_flags |= TDF_NEEDRESCHED;
1346	}
1347}
1348
1349void
1350sched_sleep(struct thread *td)
1351{
1352	mtx_assert(&sched_lock, MA_OWNED);
1353
1354	td->td_slptime = ticks;
1355	td->td_base_pri = td->td_priority;
1356
1357	CTR2(KTR_ULE, "sleep thread %p (tick: %d)",
1358	    td, td->td_slptime);
1359}
1360
1361void
1362sched_wakeup(struct thread *td)
1363{
1364	mtx_assert(&sched_lock, MA_OWNED);
1365
1366	/*
1367	 * Let the kseg know how long we slept for.  This is because process
1368	 * interactivity behavior is modeled in the kseg.
1369	 */
1370	if (td->td_slptime) {
1371		struct ksegrp *kg;
1372		int hzticks;
1373
1374		kg = td->td_ksegrp;
1375		hzticks = (ticks - td->td_slptime) << 10;
1376		if (hzticks >= SCHED_SLP_RUN_MAX) {
1377			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1378			kg->kg_runtime = 1;
1379		} else {
1380			kg->kg_slptime += hzticks;
1381			sched_interact_update(kg);
1382		}
1383		sched_priority(kg);
1384		sched_slice(td->td_kse);
1385		CTR2(KTR_ULE, "wakeup thread %p (%d ticks)", td, hzticks);
1386		td->td_slptime = 0;
1387	}
1388	setrunqueue(td, SRQ_BORING);
1389}
1390
1391/*
1392 * Penalize the parent for creating a new child and initialize the child's
1393 * priority.
1394 */
1395void
1396sched_fork(struct thread *td, struct thread *childtd)
1397{
1398
1399	mtx_assert(&sched_lock, MA_OWNED);
1400
1401	sched_fork_ksegrp(td, childtd->td_ksegrp);
1402	sched_fork_thread(td, childtd);
1403}
1404
1405void
1406sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
1407{
1408	struct ksegrp *kg = td->td_ksegrp;
1409	mtx_assert(&sched_lock, MA_OWNED);
1410
1411	child->kg_slptime = kg->kg_slptime;
1412	child->kg_runtime = kg->kg_runtime;
1413	child->kg_user_pri = kg->kg_user_pri;
1414	sched_interact_fork(child);
1415	kg->kg_runtime += tickincr << 10;
1416	sched_interact_update(kg);
1417
1418	CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1419	    kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1420	    child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1421}
1422
1423void
1424sched_fork_thread(struct thread *td, struct thread *child)
1425{
1426	struct kse *ke;
1427	struct kse *ke2;
1428
1429	sched_newthread(child);
1430	ke = td->td_kse;
1431	ke2 = child->td_kse;
1432	ke2->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1433	ke2->ke_cpu = ke->ke_cpu;
1434	ke2->ke_runq = NULL;
1435
1436	/* Grab our parents cpu estimation information. */
1437	ke2->ke_ticks = ke->ke_ticks;
1438	ke2->ke_ltick = ke->ke_ltick;
1439	ke2->ke_ftick = ke->ke_ftick;
1440}
1441
1442void
1443sched_class(struct ksegrp *kg, int class)
1444{
1445	struct kseq *kseq;
1446	struct kse *ke;
1447	struct thread *td;
1448	int nclass;
1449	int oclass;
1450
1451	mtx_assert(&sched_lock, MA_OWNED);
1452	if (kg->kg_pri_class == class)
1453		return;
1454
1455	nclass = PRI_BASE(class);
1456	oclass = PRI_BASE(kg->kg_pri_class);
1457	FOREACH_THREAD_IN_GROUP(kg, td) {
1458		ke = td->td_kse;
1459		if (ke->ke_state != KES_ONRUNQ &&
1460		    ke->ke_state != KES_THREAD)
1461			continue;
1462		kseq = KSEQ_CPU(ke->ke_cpu);
1463
1464#ifdef SMP
1465		/*
1466		 * On SMP if we're on the RUNQ we must adjust the transferable
1467		 * count because could be changing to or from an interrupt
1468		 * class.
1469		 */
1470		if (ke->ke_state == KES_ONRUNQ) {
1471			if (KSE_CAN_MIGRATE(ke, oclass)) {
1472				kseq->ksq_transferable--;
1473				kseq->ksq_group->ksg_transferable--;
1474			}
1475			if (KSE_CAN_MIGRATE(ke, nclass)) {
1476				kseq->ksq_transferable++;
1477				kseq->ksq_group->ksg_transferable++;
1478			}
1479		}
1480#endif
1481		if (oclass == PRI_TIMESHARE) {
1482			kseq->ksq_load_timeshare--;
1483			kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1484		}
1485		if (nclass == PRI_TIMESHARE) {
1486			kseq->ksq_load_timeshare++;
1487			kseq_nice_add(kseq, kg->kg_proc->p_nice);
1488		}
1489	}
1490
1491	kg->kg_pri_class = class;
1492}
1493
1494/*
1495 * Return some of the child's priority and interactivity to the parent.
1496 * Avoid using sched_exit_thread to avoid having to decide which
1497 * thread in the parent gets the honour since it isn't used.
1498 */
1499void
1500sched_exit(struct proc *p, struct thread *childtd)
1501{
1502	mtx_assert(&sched_lock, MA_OWNED);
1503	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd);
1504	kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse);
1505}
1506
1507void
1508sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
1509{
1510	/* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
1511	kg->kg_runtime += td->td_ksegrp->kg_runtime;
1512	sched_interact_update(kg);
1513}
1514
1515void
1516sched_exit_thread(struct thread *td, struct thread *childtd)
1517{
1518	kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse);
1519}
1520
1521void
1522sched_clock(struct thread *td)
1523{
1524	struct kseq *kseq;
1525	struct ksegrp *kg;
1526	struct kse *ke;
1527
1528	mtx_assert(&sched_lock, MA_OWNED);
1529	kseq = KSEQ_SELF();
1530#ifdef SMP
1531	if (ticks == bal_tick)
1532		sched_balance();
1533	if (ticks == gbal_tick)
1534		sched_balance_groups();
1535	/*
1536	 * We could have been assigned a non real-time thread without an
1537	 * IPI.
1538	 */
1539	if (kseq->ksq_assigned)
1540		kseq_assign(kseq);	/* Potentially sets NEEDRESCHED */
1541#endif
1542	/*
1543	 * sched_setup() apparently happens prior to stathz being set.  We
1544	 * need to resolve the timers earlier in the boot so we can avoid
1545	 * calculating this here.
1546	 */
1547	if (realstathz == 0) {
1548		realstathz = stathz ? stathz : hz;
1549		tickincr = hz / realstathz;
1550		/*
1551		 * XXX This does not work for values of stathz that are much
1552		 * larger than hz.
1553		 */
1554		if (tickincr == 0)
1555			tickincr = 1;
1556	}
1557
1558	ke = td->td_kse;
1559	kg = ke->ke_ksegrp;
1560
1561	/* Adjust ticks for pctcpu */
1562	ke->ke_ticks++;
1563	ke->ke_ltick = ticks;
1564
1565	/* Go up to one second beyond our max and then trim back down */
1566	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1567		sched_pctcpu_update(ke);
1568
1569	if (td->td_flags & TDF_IDLETD)
1570		return;
1571
1572	CTR4(KTR_ULE, "Tick thread %p (slice: %d, slptime: %d, runtime: %d)",
1573	    td, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1574	/*
1575	 * We only do slicing code for TIMESHARE ksegrps.
1576	 */
1577	if (kg->kg_pri_class != PRI_TIMESHARE)
1578		return;
1579	/*
1580	 * We used a tick charge it to the ksegrp so that we can compute our
1581	 * interactivity.
1582	 */
1583	kg->kg_runtime += tickincr << 10;
1584	sched_interact_update(kg);
1585
1586	/*
1587	 * We used up one time slice.
1588	 */
1589	if (--ke->ke_slice > 0)
1590		return;
1591	/*
1592	 * We're out of time, recompute priorities and requeue.
1593	 */
1594	kseq_load_rem(kseq, ke);
1595	sched_priority(kg);
1596	sched_slice(ke);
1597	if (SCHED_CURR(kg, ke))
1598		ke->ke_runq = kseq->ksq_curr;
1599	else
1600		ke->ke_runq = kseq->ksq_next;
1601	kseq_load_add(kseq, ke);
1602	td->td_flags |= TDF_NEEDRESCHED;
1603}
1604
1605int
1606sched_runnable(void)
1607{
1608	struct kseq *kseq;
1609	int load;
1610
1611	load = 1;
1612
1613	kseq = KSEQ_SELF();
1614#ifdef SMP
1615	if (kseq->ksq_assigned) {
1616		mtx_lock_spin(&sched_lock);
1617		kseq_assign(kseq);
1618		mtx_unlock_spin(&sched_lock);
1619	}
1620#endif
1621	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1622		if (kseq->ksq_load > 0)
1623			goto out;
1624	} else
1625		if (kseq->ksq_load - 1 > 0)
1626			goto out;
1627	load = 0;
1628out:
1629	return (load);
1630}
1631
1632void
1633sched_userret(struct thread *td)
1634{
1635	struct ksegrp *kg;
1636
1637	kg = td->td_ksegrp;
1638
1639	if (td->td_priority != kg->kg_user_pri) {
1640		mtx_lock_spin(&sched_lock);
1641		td->td_priority = kg->kg_user_pri;
1642		mtx_unlock_spin(&sched_lock);
1643	}
1644}
1645
1646struct kse *
1647sched_choose(void)
1648{
1649	struct kseq *kseq;
1650	struct kse *ke;
1651
1652	mtx_assert(&sched_lock, MA_OWNED);
1653	kseq = KSEQ_SELF();
1654#ifdef SMP
1655restart:
1656	if (kseq->ksq_assigned)
1657		kseq_assign(kseq);
1658#endif
1659	ke = kseq_choose(kseq);
1660	if (ke) {
1661#ifdef SMP
1662		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1663			if (kseq_idled(kseq) == 0)
1664				goto restart;
1665#endif
1666		kseq_runq_rem(kseq, ke);
1667		ke->ke_state = KES_THREAD;
1668
1669		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1670			CTR4(KTR_ULE, "Run thread %p from %p (slice: %d, pri: %d)",
1671			    ke->ke_thread, ke->ke_runq, ke->ke_slice,
1672			    ke->ke_thread->td_priority);
1673		}
1674		return (ke);
1675	}
1676#ifdef SMP
1677	if (kseq_idled(kseq) == 0)
1678		goto restart;
1679#endif
1680	return (NULL);
1681}
1682
1683void
1684sched_add(struct thread *td, int flags)
1685{
1686
1687	/* let jeff work out how to map the flags better */
1688	/* I'm open to suggestions */
1689	if (flags & SRQ_YIELDING)
1690		/*
1691		 * Preempting during switching can be bad JUJU
1692		 * especially for KSE processes
1693		 */
1694		sched_add_internal(td, 0);
1695	else
1696		sched_add_internal(td, 1);
1697}
1698
1699static void
1700sched_add_internal(struct thread *td, int preemptive)
1701{
1702	struct kseq *kseq;
1703	struct ksegrp *kg;
1704	struct kse *ke;
1705#ifdef SMP
1706	int canmigrate;
1707#endif
1708	int class;
1709
1710	mtx_assert(&sched_lock, MA_OWNED);
1711	ke = td->td_kse;
1712	kg = td->td_ksegrp;
1713	if (ke->ke_flags & KEF_ASSIGNED)
1714		return;
1715	kseq = KSEQ_SELF();
1716	KASSERT(ke->ke_state != KES_ONRUNQ,
1717	    ("sched_add: kse %p (%s) already in run queue", ke,
1718	    ke->ke_proc->p_comm));
1719	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1720	    ("sched_add: process swapped out"));
1721	KASSERT(ke->ke_runq == NULL,
1722	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1723
1724	class = PRI_BASE(kg->kg_pri_class);
1725	switch (class) {
1726	case PRI_ITHD:
1727	case PRI_REALTIME:
1728		ke->ke_runq = kseq->ksq_curr;
1729		ke->ke_slice = SCHED_SLICE_MAX;
1730		ke->ke_cpu = PCPU_GET(cpuid);
1731		break;
1732	case PRI_TIMESHARE:
1733		if (SCHED_CURR(kg, ke))
1734			ke->ke_runq = kseq->ksq_curr;
1735		else
1736			ke->ke_runq = kseq->ksq_next;
1737		break;
1738	case PRI_IDLE:
1739		/*
1740		 * This is for priority prop.
1741		 */
1742		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1743			ke->ke_runq = kseq->ksq_curr;
1744		else
1745			ke->ke_runq = &kseq->ksq_idle;
1746		ke->ke_slice = SCHED_SLICE_MIN;
1747		break;
1748	default:
1749		panic("Unknown pri class.");
1750		break;
1751	}
1752#ifdef SMP
1753	/*
1754	 * Don't migrate running threads here.  Force the long term balancer
1755	 * to do it.
1756	 */
1757	canmigrate = KSE_CAN_MIGRATE(ke, class);
1758	if (ke->ke_flags & KEF_HOLD) {
1759		ke->ke_flags &= ~KEF_HOLD;
1760		canmigrate = 0;
1761	}
1762	/*
1763	 * If this thread is pinned or bound, notify the target cpu.
1764	 */
1765	if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) {
1766		ke->ke_runq = NULL;
1767		kseq_notify(ke, ke->ke_cpu);
1768		return;
1769	}
1770	/*
1771	 * If we had been idle, clear our bit in the group and potentially
1772	 * the global bitmap.  If not, see if we should transfer this thread.
1773	 */
1774	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1775	    (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1776		/*
1777		 * Check to see if our group is unidling, and if so, remove it
1778		 * from the global idle mask.
1779		 */
1780		if (kseq->ksq_group->ksg_idlemask ==
1781		    kseq->ksq_group->ksg_cpumask)
1782			atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1783		/*
1784		 * Now remove ourselves from the group specific idle mask.
1785		 */
1786		kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1787	} else if (kseq->ksq_load > 1 && canmigrate)
1788		if (kseq_transfer(kseq, ke, class))
1789			return;
1790	ke->ke_cpu = PCPU_GET(cpuid);
1791#endif
1792	/*
1793	 * XXX With preemption this is not necessary.
1794	 */
1795	if (td->td_priority < curthread->td_priority &&
1796	    ke->ke_runq == kseq->ksq_curr)
1797		curthread->td_flags |= TDF_NEEDRESCHED;
1798	if (preemptive && maybe_preempt(td))
1799		return;
1800	SLOT_USE(td->td_ksegrp);
1801	ke->ke_ksegrp->kg_runq_threads++;
1802	ke->ke_state = KES_ONRUNQ;
1803
1804	kseq_runq_add(kseq, ke);
1805	kseq_load_add(kseq, ke);
1806}
1807
1808void
1809sched_rem(struct thread *td)
1810{
1811	struct kseq *kseq;
1812	struct kse *ke;
1813
1814	ke = td->td_kse;
1815	/*
1816	 * It is safe to just return here because sched_rem() is only ever
1817	 * used in places where we're immediately going to add the
1818	 * kse back on again.  In that case it'll be added with the correct
1819	 * thread and priority when the caller drops the sched_lock.
1820	 */
1821	if (ke->ke_flags & KEF_ASSIGNED)
1822		return;
1823	mtx_assert(&sched_lock, MA_OWNED);
1824	KASSERT((ke->ke_state == KES_ONRUNQ),
1825	    ("sched_rem: KSE not on run queue"));
1826
1827	ke->ke_state = KES_THREAD;
1828	SLOT_RELEASE(td->td_ksegrp);
1829	ke->ke_ksegrp->kg_runq_threads--;
1830	kseq = KSEQ_CPU(ke->ke_cpu);
1831	kseq_runq_rem(kseq, ke);
1832	kseq_load_rem(kseq, ke);
1833}
1834
1835fixpt_t
1836sched_pctcpu(struct thread *td)
1837{
1838	fixpt_t pctcpu;
1839	struct kse *ke;
1840
1841	pctcpu = 0;
1842	ke = td->td_kse;
1843	if (ke == NULL)
1844		return (0);
1845
1846	mtx_lock_spin(&sched_lock);
1847	if (ke->ke_ticks) {
1848		int rtick;
1849
1850		/*
1851		 * Don't update more frequently than twice a second.  Allowing
1852		 * this causes the cpu usage to decay away too quickly due to
1853		 * rounding errors.
1854		 */
1855		if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1856		    ke->ke_ltick < (ticks - (hz / 2)))
1857			sched_pctcpu_update(ke);
1858		/* How many rtick per second ? */
1859		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1860		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1861	}
1862
1863	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1864	mtx_unlock_spin(&sched_lock);
1865
1866	return (pctcpu);
1867}
1868
1869void
1870sched_bind(struct thread *td, int cpu)
1871{
1872	struct kse *ke;
1873
1874	mtx_assert(&sched_lock, MA_OWNED);
1875	ke = td->td_kse;
1876	ke->ke_flags |= KEF_BOUND;
1877#ifdef SMP
1878	if (PCPU_GET(cpuid) == cpu)
1879		return;
1880	/* sched_rem without the runq_remove */
1881	ke->ke_state = KES_THREAD;
1882	ke->ke_ksegrp->kg_runq_threads--;
1883	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1884	kseq_notify(ke, cpu);
1885	/* When we return from mi_switch we'll be on the correct cpu. */
1886	mi_switch(SW_VOL, NULL);
1887#endif
1888}
1889
1890void
1891sched_unbind(struct thread *td)
1892{
1893	mtx_assert(&sched_lock, MA_OWNED);
1894	td->td_kse->ke_flags &= ~KEF_BOUND;
1895}
1896
1897int
1898sched_load(void)
1899{
1900#ifdef SMP
1901	int total;
1902	int i;
1903
1904	total = 0;
1905	for (i = 0; i <= ksg_maxid; i++)
1906		total += KSEQ_GROUP(i)->ksg_load;
1907	return (total);
1908#else
1909	return (KSEQ_SELF()->ksq_sysload);
1910#endif
1911}
1912
1913int
1914sched_sizeof_ksegrp(void)
1915{
1916	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1917}
1918
1919int
1920sched_sizeof_proc(void)
1921{
1922	return (sizeof(struct proc));
1923}
1924
1925int
1926sched_sizeof_thread(void)
1927{
1928	return (sizeof(struct thread) + sizeof(struct td_sched));
1929}
1930#define KERN_SWITCH_INCLUDE 1
1931#include "kern/kern_switch.c"
1932