sched_ule.c revision 130881
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 130881 2004-06-21 22:05:46Z scottl $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/ktr.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/proc.h>
37#include <sys/resource.h>
38#include <sys/resourcevar.h>
39#include <sys/sched.h>
40#include <sys/smp.h>
41#include <sys/sx.h>
42#include <sys/sysctl.h>
43#include <sys/sysproto.h>
44#include <sys/vmmeter.h>
45#ifdef DDB
46#include <ddb/ddb.h>
47#endif
48#ifdef KTRACE
49#include <sys/uio.h>
50#include <sys/ktrace.h>
51#endif
52
53#include <machine/cpu.h>
54#include <machine/smp.h>
55
56#define KTR_ULE         KTR_NFS
57
58/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
59/* XXX This is bogus compatability crap for ps */
60static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
61SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
62
63static void sched_setup(void *dummy);
64SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
65
66static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
67
68#define ULE_NAME	"ule"
69#define ULE_NAME_LEN	3
70SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, ULE_NAME, ULE_NAME_LEN,
71	      "System is using the ULE scheduler");
72
73static int slice_min = 1;
74SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
75
76static int slice_max = 10;
77SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
78
79int realstathz;
80int tickincr = 1;
81
82/*
83 * These datastructures are allocated within their parent datastructure but
84 * are scheduler specific.
85 */
86
87struct ke_sched {
88	int		ske_slice;
89	struct runq	*ske_runq;
90	/* The following variables are only used for pctcpu calculation */
91	int		ske_ltick;	/* Last tick that we were running on */
92	int		ske_ftick;	/* First tick that we were running on */
93	int		ske_ticks;	/* Tick count */
94	/* CPU that we have affinity for. */
95	u_char		ske_cpu;
96};
97#define	ke_slice	ke_sched->ske_slice
98#define	ke_runq		ke_sched->ske_runq
99#define	ke_ltick	ke_sched->ske_ltick
100#define	ke_ftick	ke_sched->ske_ftick
101#define	ke_ticks	ke_sched->ske_ticks
102#define	ke_cpu		ke_sched->ske_cpu
103#define	ke_assign	ke_procq.tqe_next
104
105#define	KEF_ASSIGNED	KEF_SCHED0	/* KSE is being migrated. */
106#define	KEF_BOUND	KEF_SCHED1	/* KSE can not migrate. */
107
108struct kg_sched {
109	int	skg_slptime;		/* Number of ticks we vol. slept */
110	int	skg_runtime;		/* Number of ticks we were running */
111};
112#define	kg_slptime	kg_sched->skg_slptime
113#define	kg_runtime	kg_sched->skg_runtime
114
115struct td_sched {
116	int	std_slptime;
117};
118#define	td_slptime	td_sched->std_slptime
119
120struct td_sched td_sched;
121struct ke_sched ke_sched;
122struct kg_sched kg_sched;
123
124struct ke_sched *kse0_sched = &ke_sched;
125struct kg_sched *ksegrp0_sched = &kg_sched;
126struct p_sched *proc0_sched = NULL;
127struct td_sched *thread0_sched = &td_sched;
128
129/*
130 * The priority is primarily determined by the interactivity score.  Thus, we
131 * give lower(better) priorities to kse groups that use less CPU.  The nice
132 * value is then directly added to this to allow nice to have some effect
133 * on latency.
134 *
135 * PRI_RANGE:	Total priority range for timeshare threads.
136 * PRI_NRESV:	Number of nice values.
137 * PRI_BASE:	The start of the dynamic range.
138 */
139#define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
140#define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
141#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
142#define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
143#define	SCHED_PRI_INTERACT(score)					\
144    ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
145
146/*
147 * These determine the interactivity of a process.
148 *
149 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
150 *		before throttling back.
151 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
152 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
153 * INTERACT_THRESH:	Threshhold for placement on the current runq.
154 */
155#define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
156#define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
157#define	SCHED_INTERACT_MAX	(100)
158#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
159#define	SCHED_INTERACT_THRESH	(30)
160
161/*
162 * These parameters and macros determine the size of the time slice that is
163 * granted to each thread.
164 *
165 * SLICE_MIN:	Minimum time slice granted, in units of ticks.
166 * SLICE_MAX:	Maximum time slice granted.
167 * SLICE_RANGE:	Range of available time slices scaled by hz.
168 * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
169 * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
170 * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
171 */
172#define	SCHED_SLICE_MIN			(slice_min)
173#define	SCHED_SLICE_MAX			(slice_max)
174#define	SCHED_SLICE_INTERACTIVE		(slice_max)
175#define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
176#define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
177#define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
178#define	SCHED_SLICE_NICE(nice)						\
179    (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
180
181/*
182 * This macro determines whether or not the kse belongs on the current or
183 * next run queue.
184 */
185#define	SCHED_INTERACTIVE(kg)						\
186    (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
187#define	SCHED_CURR(kg, ke)						\
188    (ke->ke_thread->td_priority < kg->kg_user_pri ||			\
189    SCHED_INTERACTIVE(kg))
190
191/*
192 * Cpu percentage computation macros and defines.
193 *
194 * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
195 * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
196 */
197
198#define	SCHED_CPU_TIME	10
199#define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
200
201/*
202 * kseq - per processor runqs and statistics.
203 */
204struct kseq {
205	struct runq	ksq_idle;		/* Queue of IDLE threads. */
206	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
207	struct runq	*ksq_next;		/* Next timeshare queue. */
208	struct runq	*ksq_curr;		/* Current queue. */
209	int		ksq_load_timeshare;	/* Load for timeshare. */
210	int		ksq_load;		/* Aggregate load. */
211	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
212	short		ksq_nicemin;		/* Least nice. */
213#ifdef SMP
214	int			ksq_transferable;
215	LIST_ENTRY(kseq)	ksq_siblings;	/* Next in kseq group. */
216	struct kseq_group	*ksq_group;	/* Our processor group. */
217	volatile struct kse	*ksq_assigned;	/* assigned by another CPU. */
218#else
219	int		ksq_sysload;		/* For loadavg, !ITHD load. */
220#endif
221};
222
223#ifdef SMP
224/*
225 * kseq groups are groups of processors which can cheaply share threads.  When
226 * one processor in the group goes idle it will check the runqs of the other
227 * processors in its group prior to halting and waiting for an interrupt.
228 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
229 * In a numa environment we'd want an idle bitmap per group and a two tiered
230 * load balancer.
231 */
232struct kseq_group {
233	int	ksg_cpus;		/* Count of CPUs in this kseq group. */
234	cpumask_t ksg_cpumask;		/* Mask of cpus in this group. */
235	cpumask_t ksg_idlemask;		/* Idle cpus in this group. */
236	cpumask_t ksg_mask;		/* Bit mask for first cpu. */
237	int	ksg_load;		/* Total load of this group. */
238	int	ksg_transferable;	/* Transferable load of this group. */
239	LIST_HEAD(, kseq)	ksg_members; /* Linked list of all members. */
240};
241#endif
242
243/*
244 * One kse queue per processor.
245 */
246#ifdef SMP
247static cpumask_t kseq_idle;
248static int ksg_maxid;
249static struct kseq	kseq_cpu[MAXCPU];
250static struct kseq_group kseq_groups[MAXCPU];
251static int bal_tick;
252static int gbal_tick;
253
254#define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
255#define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
256#define	KSEQ_ID(x)	((x) - kseq_cpu)
257#define	KSEQ_GROUP(x)	(&kseq_groups[(x)])
258#else	/* !SMP */
259static struct kseq	kseq_cpu;
260
261#define	KSEQ_SELF()	(&kseq_cpu)
262#define	KSEQ_CPU(x)	(&kseq_cpu)
263#endif
264
265static void sched_slice(struct kse *ke);
266static void sched_priority(struct ksegrp *kg);
267static int sched_interact_score(struct ksegrp *kg);
268static void sched_interact_update(struct ksegrp *kg);
269static void sched_interact_fork(struct ksegrp *kg);
270static void sched_pctcpu_update(struct kse *ke);
271
272/* Operations on per processor queues */
273static struct kse * kseq_choose(struct kseq *kseq);
274static void kseq_setup(struct kseq *kseq);
275static void kseq_load_add(struct kseq *kseq, struct kse *ke);
276static void kseq_load_rem(struct kseq *kseq, struct kse *ke);
277static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke);
278static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke);
279static void kseq_nice_add(struct kseq *kseq, int nice);
280static void kseq_nice_rem(struct kseq *kseq, int nice);
281void kseq_print(int cpu);
282#ifdef SMP
283static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class);
284static struct kse *runq_steal(struct runq *rq);
285static void sched_balance(void);
286static void sched_balance_groups(void);
287static void sched_balance_group(struct kseq_group *ksg);
288static void sched_balance_pair(struct kseq *high, struct kseq *low);
289static void kseq_move(struct kseq *from, int cpu);
290static int kseq_idled(struct kseq *kseq);
291static void kseq_notify(struct kse *ke, int cpu);
292static void kseq_assign(struct kseq *);
293static struct kse *kseq_steal(struct kseq *kseq, int stealidle);
294/*
295 * On P4 Xeons the round-robin interrupt delivery is broken.  As a result of
296 * this, we can't pin interrupts to the cpu that they were delivered to,
297 * otherwise all ithreads only run on CPU 0.
298 */
299#ifdef __i386__
300#define	KSE_CAN_MIGRATE(ke, class)					\
301    ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
302#else /* !__i386__ */
303#define	KSE_CAN_MIGRATE(ke, class)					\
304    ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 &&		\
305    ((ke)->ke_flags & KEF_BOUND) == 0)
306#endif /* !__i386__ */
307#endif
308
309void
310kseq_print(int cpu)
311{
312	struct kseq *kseq;
313	int i;
314
315	kseq = KSEQ_CPU(cpu);
316
317	printf("kseq:\n");
318	printf("\tload:           %d\n", kseq->ksq_load);
319	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
320#ifdef SMP
321	printf("\tload transferable: %d\n", kseq->ksq_transferable);
322#endif
323	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
324	printf("\tnice counts:\n");
325	for (i = 0; i < SCHED_PRI_NRESV; i++)
326		if (kseq->ksq_nice[i])
327			printf("\t\t%d = %d\n",
328			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
329}
330
331static __inline void
332kseq_runq_add(struct kseq *kseq, struct kse *ke)
333{
334#ifdef SMP
335	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
336		kseq->ksq_transferable++;
337		kseq->ksq_group->ksg_transferable++;
338	}
339#endif
340	runq_add(ke->ke_runq, ke);
341}
342
343static __inline void
344kseq_runq_rem(struct kseq *kseq, struct kse *ke)
345{
346#ifdef SMP
347	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
348		kseq->ksq_transferable--;
349		kseq->ksq_group->ksg_transferable--;
350	}
351#endif
352	runq_remove(ke->ke_runq, ke);
353}
354
355static void
356kseq_load_add(struct kseq *kseq, struct kse *ke)
357{
358	int class;
359	mtx_assert(&sched_lock, MA_OWNED);
360	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
361	if (class == PRI_TIMESHARE)
362		kseq->ksq_load_timeshare++;
363	kseq->ksq_load++;
364	if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
365#ifdef SMP
366		kseq->ksq_group->ksg_load++;
367#else
368		kseq->ksq_sysload++;
369#endif
370	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
371		CTR6(KTR_ULE,
372		    "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
373		    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
374		    ke->ke_proc->p_nice, kseq->ksq_nicemin);
375	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
376		kseq_nice_add(kseq, ke->ke_proc->p_nice);
377}
378
379static void
380kseq_load_rem(struct kseq *kseq, struct kse *ke)
381{
382	int class;
383	mtx_assert(&sched_lock, MA_OWNED);
384	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
385	if (class == PRI_TIMESHARE)
386		kseq->ksq_load_timeshare--;
387	if (class != PRI_ITHD  && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
388#ifdef SMP
389		kseq->ksq_group->ksg_load--;
390#else
391		kseq->ksq_sysload--;
392#endif
393	kseq->ksq_load--;
394	ke->ke_runq = NULL;
395	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
396		kseq_nice_rem(kseq, ke->ke_proc->p_nice);
397}
398
399static void
400kseq_nice_add(struct kseq *kseq, int nice)
401{
402	mtx_assert(&sched_lock, MA_OWNED);
403	/* Normalize to zero. */
404	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
405	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
406		kseq->ksq_nicemin = nice;
407}
408
409static void
410kseq_nice_rem(struct kseq *kseq, int nice)
411{
412	int n;
413
414	mtx_assert(&sched_lock, MA_OWNED);
415	/* Normalize to zero. */
416	n = nice + SCHED_PRI_NHALF;
417	kseq->ksq_nice[n]--;
418	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
419
420	/*
421	 * If this wasn't the smallest nice value or there are more in
422	 * this bucket we can just return.  Otherwise we have to recalculate
423	 * the smallest nice.
424	 */
425	if (nice != kseq->ksq_nicemin ||
426	    kseq->ksq_nice[n] != 0 ||
427	    kseq->ksq_load_timeshare == 0)
428		return;
429
430	for (; n < SCHED_PRI_NRESV; n++)
431		if (kseq->ksq_nice[n]) {
432			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
433			return;
434		}
435}
436
437#ifdef SMP
438/*
439 * sched_balance is a simple CPU load balancing algorithm.  It operates by
440 * finding the least loaded and most loaded cpu and equalizing their load
441 * by migrating some processes.
442 *
443 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
444 * installations will only have 2 cpus.  Secondly, load balancing too much at
445 * once can have an unpleasant effect on the system.  The scheduler rarely has
446 * enough information to make perfect decisions.  So this algorithm chooses
447 * algorithm simplicity and more gradual effects on load in larger systems.
448 *
449 * It could be improved by considering the priorities and slices assigned to
450 * each task prior to balancing them.  There are many pathological cases with
451 * any approach and so the semi random algorithm below may work as well as any.
452 *
453 */
454static void
455sched_balance(void)
456{
457	struct kseq_group *high;
458	struct kseq_group *low;
459	struct kseq_group *ksg;
460	int cnt;
461	int i;
462
463	if (smp_started == 0)
464		goto out;
465	low = high = NULL;
466	i = random() % (ksg_maxid + 1);
467	for (cnt = 0; cnt <= ksg_maxid; cnt++) {
468		ksg = KSEQ_GROUP(i);
469		/*
470		 * Find the CPU with the highest load that has some
471		 * threads to transfer.
472		 */
473		if ((high == NULL || ksg->ksg_load > high->ksg_load)
474		    && ksg->ksg_transferable)
475			high = ksg;
476		if (low == NULL || ksg->ksg_load < low->ksg_load)
477			low = ksg;
478		if (++i > ksg_maxid)
479			i = 0;
480	}
481	if (low != NULL && high != NULL && high != low)
482		sched_balance_pair(LIST_FIRST(&high->ksg_members),
483		    LIST_FIRST(&low->ksg_members));
484out:
485	bal_tick = ticks + (random() % (hz * 2));
486}
487
488static void
489sched_balance_groups(void)
490{
491	int i;
492
493	mtx_assert(&sched_lock, MA_OWNED);
494	if (smp_started)
495		for (i = 0; i <= ksg_maxid; i++)
496			sched_balance_group(KSEQ_GROUP(i));
497	gbal_tick = ticks + (random() % (hz * 2));
498}
499
500static void
501sched_balance_group(struct kseq_group *ksg)
502{
503	struct kseq *kseq;
504	struct kseq *high;
505	struct kseq *low;
506	int load;
507
508	if (ksg->ksg_transferable == 0)
509		return;
510	low = NULL;
511	high = NULL;
512	LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
513		load = kseq->ksq_load;
514		if (high == NULL || load > high->ksq_load)
515			high = kseq;
516		if (low == NULL || load < low->ksq_load)
517			low = kseq;
518	}
519	if (high != NULL && low != NULL && high != low)
520		sched_balance_pair(high, low);
521}
522
523static void
524sched_balance_pair(struct kseq *high, struct kseq *low)
525{
526	int transferable;
527	int high_load;
528	int low_load;
529	int move;
530	int diff;
531	int i;
532
533	/*
534	 * If we're transfering within a group we have to use this specific
535	 * kseq's transferable count, otherwise we can steal from other members
536	 * of the group.
537	 */
538	if (high->ksq_group == low->ksq_group) {
539		transferable = high->ksq_transferable;
540		high_load = high->ksq_load;
541		low_load = low->ksq_load;
542	} else {
543		transferable = high->ksq_group->ksg_transferable;
544		high_load = high->ksq_group->ksg_load;
545		low_load = low->ksq_group->ksg_load;
546	}
547	if (transferable == 0)
548		return;
549	/*
550	 * Determine what the imbalance is and then adjust that to how many
551	 * kses we actually have to give up (transferable).
552	 */
553	diff = high_load - low_load;
554	move = diff / 2;
555	if (diff & 0x1)
556		move++;
557	move = min(move, transferable);
558	for (i = 0; i < move; i++)
559		kseq_move(high, KSEQ_ID(low));
560	return;
561}
562
563static void
564kseq_move(struct kseq *from, int cpu)
565{
566	struct kseq *kseq;
567	struct kseq *to;
568	struct kse *ke;
569
570	kseq = from;
571	to = KSEQ_CPU(cpu);
572	ke = kseq_steal(kseq, 1);
573	if (ke == NULL) {
574		struct kseq_group *ksg;
575
576		ksg = kseq->ksq_group;
577		LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
578			if (kseq == from || kseq->ksq_transferable == 0)
579				continue;
580			ke = kseq_steal(kseq, 1);
581			break;
582		}
583		if (ke == NULL)
584			panic("kseq_move: No KSEs available with a "
585			    "transferable count of %d\n",
586			    ksg->ksg_transferable);
587	}
588	if (kseq == to)
589		return;
590	ke->ke_state = KES_THREAD;
591	kseq_runq_rem(kseq, ke);
592	kseq_load_rem(kseq, ke);
593	kseq_notify(ke, cpu);
594}
595
596static int
597kseq_idled(struct kseq *kseq)
598{
599	struct kseq_group *ksg;
600	struct kseq *steal;
601	struct kse *ke;
602
603	ksg = kseq->ksq_group;
604	/*
605	 * If we're in a cpu group, try and steal kses from another cpu in
606	 * the group before idling.
607	 */
608	if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
609		LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
610			if (steal == kseq || steal->ksq_transferable == 0)
611				continue;
612			ke = kseq_steal(steal, 0);
613			if (ke == NULL)
614				continue;
615			ke->ke_state = KES_THREAD;
616			kseq_runq_rem(steal, ke);
617			kseq_load_rem(steal, ke);
618			ke->ke_cpu = PCPU_GET(cpuid);
619			sched_add(ke->ke_thread);
620			return (0);
621		}
622	}
623	/*
624	 * We only set the idled bit when all of the cpus in the group are
625	 * idle.  Otherwise we could get into a situation where a KSE bounces
626	 * back and forth between two idle cores on seperate physical CPUs.
627	 */
628	ksg->ksg_idlemask |= PCPU_GET(cpumask);
629	if (ksg->ksg_idlemask != ksg->ksg_cpumask)
630		return (1);
631	atomic_set_int(&kseq_idle, ksg->ksg_mask);
632	return (1);
633}
634
635static void
636kseq_assign(struct kseq *kseq)
637{
638	struct kse *nke;
639	struct kse *ke;
640
641	do {
642		(volatile struct kse *)ke = kseq->ksq_assigned;
643	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
644	for (; ke != NULL; ke = nke) {
645		nke = ke->ke_assign;
646		ke->ke_flags &= ~KEF_ASSIGNED;
647		sched_add(ke->ke_thread);
648	}
649}
650
651static void
652kseq_notify(struct kse *ke, int cpu)
653{
654	struct kseq *kseq;
655	struct thread *td;
656	struct pcpu *pcpu;
657
658	ke->ke_cpu = cpu;
659	ke->ke_flags |= KEF_ASSIGNED;
660
661	kseq = KSEQ_CPU(cpu);
662
663	/*
664	 * Place a KSE on another cpu's queue and force a resched.
665	 */
666	do {
667		(volatile struct kse *)ke->ke_assign = kseq->ksq_assigned;
668	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
669	pcpu = pcpu_find(cpu);
670	td = pcpu->pc_curthread;
671	if (ke->ke_thread->td_priority < td->td_priority ||
672	    td == pcpu->pc_idlethread) {
673		td->td_flags |= TDF_NEEDRESCHED;
674		ipi_selected(1 << cpu, IPI_AST);
675	}
676}
677
678static struct kse *
679runq_steal(struct runq *rq)
680{
681	struct rqhead *rqh;
682	struct rqbits *rqb;
683	struct kse *ke;
684	int word;
685	int bit;
686
687	mtx_assert(&sched_lock, MA_OWNED);
688	rqb = &rq->rq_status;
689	for (word = 0; word < RQB_LEN; word++) {
690		if (rqb->rqb_bits[word] == 0)
691			continue;
692		for (bit = 0; bit < RQB_BPW; bit++) {
693			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
694				continue;
695			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
696			TAILQ_FOREACH(ke, rqh, ke_procq) {
697				if (KSE_CAN_MIGRATE(ke,
698				    PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
699					return (ke);
700			}
701		}
702	}
703	return (NULL);
704}
705
706static struct kse *
707kseq_steal(struct kseq *kseq, int stealidle)
708{
709	struct kse *ke;
710
711	/*
712	 * Steal from next first to try to get a non-interactive task that
713	 * may not have run for a while.
714	 */
715	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
716		return (ke);
717	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
718		return (ke);
719	if (stealidle)
720		return (runq_steal(&kseq->ksq_idle));
721	return (NULL);
722}
723
724int
725kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
726{
727	struct kseq_group *ksg;
728	int cpu;
729
730	if (smp_started == 0)
731		return (0);
732	cpu = 0;
733	ksg = kseq->ksq_group;
734
735	/*
736	 * If there are any idle groups, give them our extra load.  The
737	 * threshold at which we start to reassign kses has a large impact
738	 * on the overall performance of the system.  Tuned too high and
739	 * some CPUs may idle.  Too low and there will be excess migration
740	 * and context switches.
741	 */
742	if (ksg->ksg_load > (ksg->ksg_cpus * 2) && kseq_idle) {
743		/*
744		 * Multiple cpus could find this bit simultaneously
745		 * but the race shouldn't be terrible.
746		 */
747		cpu = ffs(kseq_idle);
748		if (cpu)
749			atomic_clear_int(&kseq_idle, 1 << (cpu - 1));
750	}
751	/*
752	 * If another cpu in this group has idled, assign a thread over
753	 * to them after checking to see if there are idled groups.
754	 */
755	if (cpu == 0 && kseq->ksq_load > 1 && ksg->ksg_idlemask) {
756		cpu = ffs(ksg->ksg_idlemask);
757		if (cpu)
758			ksg->ksg_idlemask &= ~(1 << (cpu - 1));
759	}
760	/*
761	 * Now that we've found an idle CPU, migrate the thread.
762	 */
763	if (cpu) {
764		cpu--;
765		ke->ke_runq = NULL;
766		kseq_notify(ke, cpu);
767		return (1);
768	}
769	return (0);
770}
771
772#endif	/* SMP */
773
774/*
775 * Pick the highest priority task we have and return it.
776 */
777
778static struct kse *
779kseq_choose(struct kseq *kseq)
780{
781	struct kse *ke;
782	struct runq *swap;
783
784	mtx_assert(&sched_lock, MA_OWNED);
785	swap = NULL;
786
787	for (;;) {
788		ke = runq_choose(kseq->ksq_curr);
789		if (ke == NULL) {
790			/*
791			 * We already swaped once and didn't get anywhere.
792			 */
793			if (swap)
794				break;
795			swap = kseq->ksq_curr;
796			kseq->ksq_curr = kseq->ksq_next;
797			kseq->ksq_next = swap;
798			continue;
799		}
800		/*
801		 * If we encounter a slice of 0 the kse is in a
802		 * TIMESHARE kse group and its nice was too far out
803		 * of the range that receives slices.
804		 */
805		if (ke->ke_slice == 0) {
806			runq_remove(ke->ke_runq, ke);
807			sched_slice(ke);
808			ke->ke_runq = kseq->ksq_next;
809			runq_add(ke->ke_runq, ke);
810			continue;
811		}
812		return (ke);
813	}
814
815	return (runq_choose(&kseq->ksq_idle));
816}
817
818static void
819kseq_setup(struct kseq *kseq)
820{
821	runq_init(&kseq->ksq_timeshare[0]);
822	runq_init(&kseq->ksq_timeshare[1]);
823	runq_init(&kseq->ksq_idle);
824	kseq->ksq_curr = &kseq->ksq_timeshare[0];
825	kseq->ksq_next = &kseq->ksq_timeshare[1];
826	kseq->ksq_load = 0;
827	kseq->ksq_load_timeshare = 0;
828}
829
830static void
831sched_setup(void *dummy)
832{
833#ifdef SMP
834	int balance_groups;
835	int i;
836#endif
837
838	slice_min = (hz/100);	/* 10ms */
839	slice_max = (hz/7);	/* ~140ms */
840
841#ifdef SMP
842	balance_groups = 0;
843	/*
844	 * Initialize the kseqs.
845	 */
846	for (i = 0; i < MAXCPU; i++) {
847		struct kseq *ksq;
848
849		ksq = &kseq_cpu[i];
850		ksq->ksq_assigned = NULL;
851		kseq_setup(&kseq_cpu[i]);
852	}
853	if (smp_topology == NULL) {
854		struct kseq_group *ksg;
855		struct kseq *ksq;
856
857		for (i = 0; i < MAXCPU; i++) {
858			ksq = &kseq_cpu[i];
859			ksg = &kseq_groups[i];
860			/*
861			 * Setup a kseq group with one member.
862			 */
863			ksq->ksq_transferable = 0;
864			ksq->ksq_group = ksg;
865			ksg->ksg_cpus = 1;
866			ksg->ksg_idlemask = 0;
867			ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
868			ksg->ksg_load = 0;
869			ksg->ksg_transferable = 0;
870			LIST_INIT(&ksg->ksg_members);
871			LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
872		}
873	} else {
874		struct kseq_group *ksg;
875		struct cpu_group *cg;
876		int j;
877
878		for (i = 0; i < smp_topology->ct_count; i++) {
879			cg = &smp_topology->ct_group[i];
880			ksg = &kseq_groups[i];
881			/*
882			 * Initialize the group.
883			 */
884			ksg->ksg_idlemask = 0;
885			ksg->ksg_load = 0;
886			ksg->ksg_transferable = 0;
887			ksg->ksg_cpus = cg->cg_count;
888			ksg->ksg_cpumask = cg->cg_mask;
889			LIST_INIT(&ksg->ksg_members);
890			/*
891			 * Find all of the group members and add them.
892			 */
893			for (j = 0; j < MAXCPU; j++) {
894				if ((cg->cg_mask & (1 << j)) != 0) {
895					if (ksg->ksg_mask == 0)
896						ksg->ksg_mask = 1 << j;
897					kseq_cpu[j].ksq_transferable = 0;
898					kseq_cpu[j].ksq_group = ksg;
899					LIST_INSERT_HEAD(&ksg->ksg_members,
900					    &kseq_cpu[j], ksq_siblings);
901				}
902			}
903			if (ksg->ksg_cpus > 1)
904				balance_groups = 1;
905		}
906		ksg_maxid = smp_topology->ct_count - 1;
907	}
908	/*
909	 * Stagger the group and global load balancer so they do not
910	 * interfere with each other.
911	 */
912	bal_tick = ticks + hz;
913	if (balance_groups)
914		gbal_tick = ticks + (hz / 2);
915#else
916	kseq_setup(KSEQ_SELF());
917#endif
918	mtx_lock_spin(&sched_lock);
919	kseq_load_add(KSEQ_SELF(), &kse0);
920	mtx_unlock_spin(&sched_lock);
921}
922
923/*
924 * Scale the scheduling priority according to the "interactivity" of this
925 * process.
926 */
927static void
928sched_priority(struct ksegrp *kg)
929{
930	int pri;
931
932	if (kg->kg_pri_class != PRI_TIMESHARE)
933		return;
934
935	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
936	pri += SCHED_PRI_BASE;
937	pri += kg->kg_proc->p_nice;
938
939	if (pri > PRI_MAX_TIMESHARE)
940		pri = PRI_MAX_TIMESHARE;
941	else if (pri < PRI_MIN_TIMESHARE)
942		pri = PRI_MIN_TIMESHARE;
943
944	kg->kg_user_pri = pri;
945
946	return;
947}
948
949/*
950 * Calculate a time slice based on the properties of the kseg and the runq
951 * that we're on.  This is only for PRI_TIMESHARE ksegrps.
952 */
953static void
954sched_slice(struct kse *ke)
955{
956	struct kseq *kseq;
957	struct ksegrp *kg;
958
959	kg = ke->ke_ksegrp;
960	kseq = KSEQ_CPU(ke->ke_cpu);
961
962	/*
963	 * Rationale:
964	 * KSEs in interactive ksegs get the minimum slice so that we
965	 * quickly notice if it abuses its advantage.
966	 *
967	 * KSEs in non-interactive ksegs are assigned a slice that is
968	 * based on the ksegs nice value relative to the least nice kseg
969	 * on the run queue for this cpu.
970	 *
971	 * If the KSE is less nice than all others it gets the maximum
972	 * slice and other KSEs will adjust their slice relative to
973	 * this when they first expire.
974	 *
975	 * There is 20 point window that starts relative to the least
976	 * nice kse on the run queue.  Slice size is determined by
977	 * the kse distance from the last nice ksegrp.
978	 *
979	 * If the kse is outside of the window it will get no slice
980	 * and will be reevaluated each time it is selected on the
981	 * run queue.  The exception to this is nice 0 ksegs when
982	 * a nice -20 is running.  They are always granted a minimum
983	 * slice.
984	 */
985	if (!SCHED_INTERACTIVE(kg)) {
986		int nice;
987
988		nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
989		if (kseq->ksq_load_timeshare == 0 ||
990		    kg->kg_proc->p_nice < kseq->ksq_nicemin)
991			ke->ke_slice = SCHED_SLICE_MAX;
992		else if (nice <= SCHED_SLICE_NTHRESH)
993			ke->ke_slice = SCHED_SLICE_NICE(nice);
994		else if (kg->kg_proc->p_nice == 0)
995			ke->ke_slice = SCHED_SLICE_MIN;
996		else
997			ke->ke_slice = 0;
998	} else
999		ke->ke_slice = SCHED_SLICE_INTERACTIVE;
1000
1001	CTR6(KTR_ULE,
1002	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
1003	    ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin,
1004	    kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
1005
1006	return;
1007}
1008
1009/*
1010 * This routine enforces a maximum limit on the amount of scheduling history
1011 * kept.  It is called after either the slptime or runtime is adjusted.
1012 * This routine will not operate correctly when slp or run times have been
1013 * adjusted to more than double their maximum.
1014 */
1015static void
1016sched_interact_update(struct ksegrp *kg)
1017{
1018	int sum;
1019
1020	sum = kg->kg_runtime + kg->kg_slptime;
1021	if (sum < SCHED_SLP_RUN_MAX)
1022		return;
1023	/*
1024	 * If we have exceeded by more than 1/5th then the algorithm below
1025	 * will not bring us back into range.  Dividing by two here forces
1026	 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1027	 */
1028	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1029		kg->kg_runtime /= 2;
1030		kg->kg_slptime /= 2;
1031		return;
1032	}
1033	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1034	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1035}
1036
1037static void
1038sched_interact_fork(struct ksegrp *kg)
1039{
1040	int ratio;
1041	int sum;
1042
1043	sum = kg->kg_runtime + kg->kg_slptime;
1044	if (sum > SCHED_SLP_RUN_FORK) {
1045		ratio = sum / SCHED_SLP_RUN_FORK;
1046		kg->kg_runtime /= ratio;
1047		kg->kg_slptime /= ratio;
1048	}
1049}
1050
1051static int
1052sched_interact_score(struct ksegrp *kg)
1053{
1054	int div;
1055
1056	if (kg->kg_runtime > kg->kg_slptime) {
1057		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1058		return (SCHED_INTERACT_HALF +
1059		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1060	} if (kg->kg_slptime > kg->kg_runtime) {
1061		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1062		return (kg->kg_runtime / div);
1063	}
1064
1065	/*
1066	 * This can happen if slptime and runtime are 0.
1067	 */
1068	return (0);
1069
1070}
1071
1072/*
1073 * This is only somewhat accurate since given many processes of the same
1074 * priority they will switch when their slices run out, which will be
1075 * at most SCHED_SLICE_MAX.
1076 */
1077int
1078sched_rr_interval(void)
1079{
1080	return (SCHED_SLICE_MAX);
1081}
1082
1083static void
1084sched_pctcpu_update(struct kse *ke)
1085{
1086	/*
1087	 * Adjust counters and watermark for pctcpu calc.
1088	 */
1089	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1090		/*
1091		 * Shift the tick count out so that the divide doesn't
1092		 * round away our results.
1093		 */
1094		ke->ke_ticks <<= 10;
1095		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1096			    SCHED_CPU_TICKS;
1097		ke->ke_ticks >>= 10;
1098	} else
1099		ke->ke_ticks = 0;
1100	ke->ke_ltick = ticks;
1101	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1102}
1103
1104void
1105sched_prio(struct thread *td, u_char prio)
1106{
1107	struct kse *ke;
1108
1109	ke = td->td_kse;
1110	mtx_assert(&sched_lock, MA_OWNED);
1111	if (TD_ON_RUNQ(td)) {
1112		/*
1113		 * If the priority has been elevated due to priority
1114		 * propagation, we may have to move ourselves to a new
1115		 * queue.  We still call adjustrunqueue below in case kse
1116		 * needs to fix things up.
1117		 */
1118		if (prio < td->td_priority && ke &&
1119		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1120		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1121			runq_remove(ke->ke_runq, ke);
1122			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1123			runq_add(ke->ke_runq, ke);
1124		}
1125		adjustrunqueue(td, prio);
1126	} else
1127		td->td_priority = prio;
1128}
1129
1130void
1131sched_switch(struct thread *td)
1132{
1133	struct thread *newtd;
1134	struct kse *ke;
1135
1136	mtx_assert(&sched_lock, MA_OWNED);
1137
1138	ke = td->td_kse;
1139
1140	td->td_last_kse = ke;
1141        td->td_lastcpu = td->td_oncpu;
1142	td->td_oncpu = NOCPU;
1143        td->td_flags &= ~TDF_NEEDRESCHED;
1144
1145	/*
1146	 * If the KSE has been assigned it may be in the process of switching
1147	 * to the new cpu.  This is the case in sched_bind().
1148	 */
1149	if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1150		if (TD_IS_RUNNING(td)) {
1151			kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1152			setrunqueue(td);
1153		} else {
1154			if (ke->ke_runq) {
1155				kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1156			} else if ((td->td_flags & TDF_IDLETD) == 0)
1157				backtrace();
1158			/*
1159			 * We will not be on the run queue. So we must be
1160			 * sleeping or similar.
1161			 */
1162			if (td->td_proc->p_flag & P_SA)
1163				kse_reassign(ke);
1164		}
1165	}
1166	newtd = choosethread();
1167	if (td != newtd)
1168		cpu_switch(td, newtd);
1169	sched_lock.mtx_lock = (uintptr_t)td;
1170
1171	td->td_oncpu = PCPU_GET(cpuid);
1172}
1173
1174void
1175sched_nice(struct proc *p, int nice)
1176{
1177	struct ksegrp *kg;
1178	struct kse *ke;
1179	struct thread *td;
1180	struct kseq *kseq;
1181
1182	PROC_LOCK_ASSERT(p, MA_OWNED);
1183	mtx_assert(&sched_lock, MA_OWNED);
1184	/*
1185	 * We need to adjust the nice counts for running KSEs.
1186	 */
1187	FOREACH_KSEGRP_IN_PROC(p, kg) {
1188		if (kg->kg_pri_class == PRI_TIMESHARE) {
1189			FOREACH_KSE_IN_GROUP(kg, ke) {
1190				if (ke->ke_runq == NULL)
1191					continue;
1192				kseq = KSEQ_CPU(ke->ke_cpu);
1193				kseq_nice_rem(kseq, p->p_nice);
1194				kseq_nice_add(kseq, nice);
1195			}
1196		}
1197	}
1198	p->p_nice = nice;
1199	FOREACH_KSEGRP_IN_PROC(p, kg) {
1200		sched_priority(kg);
1201		FOREACH_THREAD_IN_GROUP(kg, td)
1202			td->td_flags |= TDF_NEEDRESCHED;
1203	}
1204}
1205
1206void
1207sched_sleep(struct thread *td)
1208{
1209	mtx_assert(&sched_lock, MA_OWNED);
1210
1211	td->td_slptime = ticks;
1212	td->td_base_pri = td->td_priority;
1213
1214	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
1215	    td->td_kse, td->td_slptime);
1216}
1217
1218void
1219sched_wakeup(struct thread *td)
1220{
1221	mtx_assert(&sched_lock, MA_OWNED);
1222
1223	/*
1224	 * Let the kseg know how long we slept for.  This is because process
1225	 * interactivity behavior is modeled in the kseg.
1226	 */
1227	if (td->td_slptime) {
1228		struct ksegrp *kg;
1229		int hzticks;
1230
1231		kg = td->td_ksegrp;
1232		hzticks = (ticks - td->td_slptime) << 10;
1233		if (hzticks >= SCHED_SLP_RUN_MAX) {
1234			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1235			kg->kg_runtime = 1;
1236		} else {
1237			kg->kg_slptime += hzticks;
1238			sched_interact_update(kg);
1239		}
1240		sched_priority(kg);
1241		if (td->td_kse)
1242			sched_slice(td->td_kse);
1243		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
1244		    td->td_kse, hzticks);
1245		td->td_slptime = 0;
1246	}
1247	setrunqueue(td);
1248}
1249
1250/*
1251 * Penalize the parent for creating a new child and initialize the child's
1252 * priority.
1253 */
1254void
1255sched_fork(struct proc *p, struct proc *p1)
1256{
1257
1258	mtx_assert(&sched_lock, MA_OWNED);
1259
1260	p1->p_nice = p->p_nice;
1261	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
1262	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
1263	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
1264}
1265
1266void
1267sched_fork_kse(struct kse *ke, struct kse *child)
1268{
1269
1270	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1271	child->ke_cpu = ke->ke_cpu;
1272	child->ke_runq = NULL;
1273
1274	/* Grab our parents cpu estimation information. */
1275	child->ke_ticks = ke->ke_ticks;
1276	child->ke_ltick = ke->ke_ltick;
1277	child->ke_ftick = ke->ke_ftick;
1278}
1279
1280void
1281sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1282{
1283	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
1284
1285	child->kg_slptime = kg->kg_slptime;
1286	child->kg_runtime = kg->kg_runtime;
1287	child->kg_user_pri = kg->kg_user_pri;
1288	sched_interact_fork(child);
1289	kg->kg_runtime += tickincr << 10;
1290	sched_interact_update(kg);
1291
1292	CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1293	    kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1294	    child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1295}
1296
1297void
1298sched_fork_thread(struct thread *td, struct thread *child)
1299{
1300}
1301
1302void
1303sched_class(struct ksegrp *kg, int class)
1304{
1305	struct kseq *kseq;
1306	struct kse *ke;
1307	int nclass;
1308	int oclass;
1309
1310	mtx_assert(&sched_lock, MA_OWNED);
1311	if (kg->kg_pri_class == class)
1312		return;
1313
1314	nclass = PRI_BASE(class);
1315	oclass = PRI_BASE(kg->kg_pri_class);
1316	FOREACH_KSE_IN_GROUP(kg, ke) {
1317		if (ke->ke_state != KES_ONRUNQ &&
1318		    ke->ke_state != KES_THREAD)
1319			continue;
1320		kseq = KSEQ_CPU(ke->ke_cpu);
1321
1322#ifdef SMP
1323		/*
1324		 * On SMP if we're on the RUNQ we must adjust the transferable
1325		 * count because could be changing to or from an interrupt
1326		 * class.
1327		 */
1328		if (ke->ke_state == KES_ONRUNQ) {
1329			if (KSE_CAN_MIGRATE(ke, oclass)) {
1330				kseq->ksq_transferable--;
1331				kseq->ksq_group->ksg_transferable--;
1332			}
1333			if (KSE_CAN_MIGRATE(ke, nclass)) {
1334				kseq->ksq_transferable++;
1335				kseq->ksq_group->ksg_transferable++;
1336			}
1337		}
1338#endif
1339		if (oclass == PRI_TIMESHARE) {
1340			kseq->ksq_load_timeshare--;
1341			kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1342		}
1343		if (nclass == PRI_TIMESHARE) {
1344			kseq->ksq_load_timeshare++;
1345			kseq_nice_add(kseq, kg->kg_proc->p_nice);
1346		}
1347	}
1348
1349	kg->kg_pri_class = class;
1350}
1351
1352/*
1353 * Return some of the child's priority and interactivity to the parent.
1354 */
1355void
1356sched_exit(struct proc *p, struct proc *child)
1357{
1358	mtx_assert(&sched_lock, MA_OWNED);
1359	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
1360	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
1361}
1362
1363void
1364sched_exit_kse(struct kse *ke, struct kse *child)
1365{
1366	kseq_load_rem(KSEQ_CPU(child->ke_cpu), child);
1367}
1368
1369void
1370sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1371{
1372	/* kg->kg_slptime += child->kg_slptime; */
1373	kg->kg_runtime += child->kg_runtime;
1374	sched_interact_update(kg);
1375}
1376
1377void
1378sched_exit_thread(struct thread *td, struct thread *child)
1379{
1380}
1381
1382void
1383sched_clock(struct thread *td)
1384{
1385	struct kseq *kseq;
1386	struct ksegrp *kg;
1387	struct kse *ke;
1388
1389	mtx_assert(&sched_lock, MA_OWNED);
1390#ifdef SMP
1391	if (ticks == bal_tick)
1392		sched_balance();
1393	if (ticks == gbal_tick)
1394		sched_balance_groups();
1395#endif
1396	/*
1397	 * sched_setup() apparently happens prior to stathz being set.  We
1398	 * need to resolve the timers earlier in the boot so we can avoid
1399	 * calculating this here.
1400	 */
1401	if (realstathz == 0) {
1402		realstathz = stathz ? stathz : hz;
1403		tickincr = hz / realstathz;
1404		/*
1405		 * XXX This does not work for values of stathz that are much
1406		 * larger than hz.
1407		 */
1408		if (tickincr == 0)
1409			tickincr = 1;
1410	}
1411
1412	ke = td->td_kse;
1413	kg = ke->ke_ksegrp;
1414
1415	/* Adjust ticks for pctcpu */
1416	ke->ke_ticks++;
1417	ke->ke_ltick = ticks;
1418
1419	/* Go up to one second beyond our max and then trim back down */
1420	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1421		sched_pctcpu_update(ke);
1422
1423	if (td->td_flags & TDF_IDLETD)
1424		return;
1425
1426	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1427	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1428	/*
1429	 * We only do slicing code for TIMESHARE ksegrps.
1430	 */
1431	if (kg->kg_pri_class != PRI_TIMESHARE)
1432		return;
1433	/*
1434	 * We used a tick charge it to the ksegrp so that we can compute our
1435	 * interactivity.
1436	 */
1437	kg->kg_runtime += tickincr << 10;
1438	sched_interact_update(kg);
1439
1440	/*
1441	 * We used up one time slice.
1442	 */
1443	if (--ke->ke_slice > 0)
1444		return;
1445	/*
1446	 * We're out of time, recompute priorities and requeue.
1447	 */
1448	kseq = KSEQ_SELF();
1449	kseq_load_rem(kseq, ke);
1450	sched_priority(kg);
1451	sched_slice(ke);
1452	if (SCHED_CURR(kg, ke))
1453		ke->ke_runq = kseq->ksq_curr;
1454	else
1455		ke->ke_runq = kseq->ksq_next;
1456	kseq_load_add(kseq, ke);
1457	td->td_flags |= TDF_NEEDRESCHED;
1458}
1459
1460int
1461sched_runnable(void)
1462{
1463	struct kseq *kseq;
1464	int load;
1465
1466	load = 1;
1467
1468	kseq = KSEQ_SELF();
1469#ifdef SMP
1470	if (kseq->ksq_assigned) {
1471		mtx_lock_spin(&sched_lock);
1472		kseq_assign(kseq);
1473		mtx_unlock_spin(&sched_lock);
1474	}
1475#endif
1476	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1477		if (kseq->ksq_load > 0)
1478			goto out;
1479	} else
1480		if (kseq->ksq_load - 1 > 0)
1481			goto out;
1482	load = 0;
1483out:
1484	return (load);
1485}
1486
1487void
1488sched_userret(struct thread *td)
1489{
1490	struct ksegrp *kg;
1491
1492	kg = td->td_ksegrp;
1493
1494	if (td->td_priority != kg->kg_user_pri) {
1495		mtx_lock_spin(&sched_lock);
1496		td->td_priority = kg->kg_user_pri;
1497		mtx_unlock_spin(&sched_lock);
1498	}
1499}
1500
1501struct kse *
1502sched_choose(void)
1503{
1504	struct kseq *kseq;
1505	struct kse *ke;
1506
1507	mtx_assert(&sched_lock, MA_OWNED);
1508	kseq = KSEQ_SELF();
1509#ifdef SMP
1510restart:
1511	if (kseq->ksq_assigned)
1512		kseq_assign(kseq);
1513#endif
1514	ke = kseq_choose(kseq);
1515	if (ke) {
1516#ifdef SMP
1517		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1518			if (kseq_idled(kseq) == 0)
1519				goto restart;
1520#endif
1521		kseq_runq_rem(kseq, ke);
1522		ke->ke_state = KES_THREAD;
1523
1524		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1525			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1526			    ke, ke->ke_runq, ke->ke_slice,
1527			    ke->ke_thread->td_priority);
1528		}
1529		return (ke);
1530	}
1531#ifdef SMP
1532	if (kseq_idled(kseq) == 0)
1533		goto restart;
1534#endif
1535	return (NULL);
1536}
1537
1538void
1539sched_add(struct thread *td)
1540{
1541	struct kseq *kseq;
1542	struct ksegrp *kg;
1543	struct kse *ke;
1544	int class;
1545
1546	mtx_assert(&sched_lock, MA_OWNED);
1547	ke = td->td_kse;
1548	kg = td->td_ksegrp;
1549	if (ke->ke_flags & KEF_ASSIGNED)
1550		return;
1551	kseq = KSEQ_SELF();
1552	KASSERT((ke->ke_thread != NULL),
1553	    ("sched_add: No thread on KSE"));
1554	KASSERT((ke->ke_thread->td_kse != NULL),
1555	    ("sched_add: No KSE on thread"));
1556	KASSERT(ke->ke_state != KES_ONRUNQ,
1557	    ("sched_add: kse %p (%s) already in run queue", ke,
1558	    ke->ke_proc->p_comm));
1559	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1560	    ("sched_add: process swapped out"));
1561	KASSERT(ke->ke_runq == NULL,
1562	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1563
1564	class = PRI_BASE(kg->kg_pri_class);
1565	switch (class) {
1566	case PRI_ITHD:
1567	case PRI_REALTIME:
1568		ke->ke_runq = kseq->ksq_curr;
1569		ke->ke_slice = SCHED_SLICE_MAX;
1570		ke->ke_cpu = PCPU_GET(cpuid);
1571		break;
1572	case PRI_TIMESHARE:
1573		if (SCHED_CURR(kg, ke))
1574			ke->ke_runq = kseq->ksq_curr;
1575		else
1576			ke->ke_runq = kseq->ksq_next;
1577		break;
1578	case PRI_IDLE:
1579		/*
1580		 * This is for priority prop.
1581		 */
1582		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1583			ke->ke_runq = kseq->ksq_curr;
1584		else
1585			ke->ke_runq = &kseq->ksq_idle;
1586		ke->ke_slice = SCHED_SLICE_MIN;
1587		break;
1588	default:
1589		panic("Unknown pri class.");
1590		break;
1591	}
1592#ifdef SMP
1593	if (ke->ke_cpu != PCPU_GET(cpuid)) {
1594		ke->ke_runq = NULL;
1595		kseq_notify(ke, ke->ke_cpu);
1596		return;
1597	}
1598	/*
1599	 * If we had been idle, clear our bit in the group and potentially
1600	 * the global bitmap.  If not, see if we should transfer this thread.
1601	 */
1602	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1603	    (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1604		/*
1605		 * Check to see if our group is unidling, and if so, remove it
1606		 * from the global idle mask.
1607		 */
1608		if (kseq->ksq_group->ksg_idlemask ==
1609		    kseq->ksq_group->ksg_cpumask)
1610			atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1611		/*
1612		 * Now remove ourselves from the group specific idle mask.
1613		 */
1614		kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1615	} else if (kseq->ksq_load > 1 && KSE_CAN_MIGRATE(ke, class))
1616		if (kseq_transfer(kseq, ke, class))
1617			return;
1618#endif
1619        if (td->td_priority < curthread->td_priority)
1620                curthread->td_flags |= TDF_NEEDRESCHED;
1621
1622	ke->ke_ksegrp->kg_runq_kses++;
1623	ke->ke_state = KES_ONRUNQ;
1624
1625	kseq_runq_add(kseq, ke);
1626	kseq_load_add(kseq, ke);
1627}
1628
1629void
1630sched_rem(struct thread *td)
1631{
1632	struct kseq *kseq;
1633	struct kse *ke;
1634
1635	ke = td->td_kse;
1636	/*
1637	 * It is safe to just return here because sched_rem() is only ever
1638	 * used in places where we're immediately going to add the
1639	 * kse back on again.  In that case it'll be added with the correct
1640	 * thread and priority when the caller drops the sched_lock.
1641	 */
1642	if (ke->ke_flags & KEF_ASSIGNED)
1643		return;
1644	mtx_assert(&sched_lock, MA_OWNED);
1645	KASSERT((ke->ke_state == KES_ONRUNQ),
1646	    ("sched_rem: KSE not on run queue"));
1647
1648	ke->ke_state = KES_THREAD;
1649	ke->ke_ksegrp->kg_runq_kses--;
1650	kseq = KSEQ_CPU(ke->ke_cpu);
1651	kseq_runq_rem(kseq, ke);
1652	kseq_load_rem(kseq, ke);
1653}
1654
1655fixpt_t
1656sched_pctcpu(struct thread *td)
1657{
1658	fixpt_t pctcpu;
1659	struct kse *ke;
1660
1661	pctcpu = 0;
1662	ke = td->td_kse;
1663	if (ke == NULL)
1664		return (0);
1665
1666	mtx_lock_spin(&sched_lock);
1667	if (ke->ke_ticks) {
1668		int rtick;
1669
1670		/*
1671		 * Don't update more frequently than twice a second.  Allowing
1672		 * this causes the cpu usage to decay away too quickly due to
1673		 * rounding errors.
1674		 */
1675		if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1676		    ke->ke_ltick < (ticks - (hz / 2)))
1677			sched_pctcpu_update(ke);
1678		/* How many rtick per second ? */
1679		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1680		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1681	}
1682
1683	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1684	mtx_unlock_spin(&sched_lock);
1685
1686	return (pctcpu);
1687}
1688
1689void
1690sched_bind(struct thread *td, int cpu)
1691{
1692	struct kse *ke;
1693
1694	mtx_assert(&sched_lock, MA_OWNED);
1695	ke = td->td_kse;
1696	ke->ke_flags |= KEF_BOUND;
1697#ifdef SMP
1698	if (PCPU_GET(cpuid) == cpu)
1699		return;
1700	/* sched_rem without the runq_remove */
1701	ke->ke_state = KES_THREAD;
1702	ke->ke_ksegrp->kg_runq_kses--;
1703	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1704	kseq_notify(ke, cpu);
1705	/* When we return from mi_switch we'll be on the correct cpu. */
1706	mi_switch(SW_VOL);
1707#endif
1708}
1709
1710void
1711sched_unbind(struct thread *td)
1712{
1713	mtx_assert(&sched_lock, MA_OWNED);
1714	td->td_kse->ke_flags &= ~KEF_BOUND;
1715}
1716
1717int
1718sched_load(void)
1719{
1720#ifdef SMP
1721	int total;
1722	int i;
1723
1724	total = 0;
1725	for (i = 0; i <= ksg_maxid; i++)
1726		total += KSEQ_GROUP(i)->ksg_load;
1727	return (total);
1728#else
1729	return (KSEQ_SELF()->ksq_sysload);
1730#endif
1731}
1732
1733int
1734sched_sizeof_kse(void)
1735{
1736	return (sizeof(struct kse) + sizeof(struct ke_sched));
1737}
1738
1739int
1740sched_sizeof_ksegrp(void)
1741{
1742	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1743}
1744
1745int
1746sched_sizeof_proc(void)
1747{
1748	return (sizeof(struct proc));
1749}
1750
1751int
1752sched_sizeof_thread(void)
1753{
1754	return (sizeof(struct thread) + sizeof(struct td_sched));
1755}
1756