sched_ule.c revision 159570
1/*-
2 * Copyright (c) 2002-2005, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 159570 2006-06-13 13:12:56Z davidxu $");
29
30#include "opt_hwpmc_hooks.h"
31#include "opt_sched.h"
32
33#define kse td_sched
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kdb.h>
38#include <sys/kernel.h>
39#include <sys/ktr.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/proc.h>
43#include <sys/resource.h>
44#include <sys/resourcevar.h>
45#include <sys/sched.h>
46#include <sys/smp.h>
47#include <sys/sx.h>
48#include <sys/sysctl.h>
49#include <sys/sysproto.h>
50#include <sys/turnstile.h>
51#include <sys/vmmeter.h>
52#ifdef KTRACE
53#include <sys/uio.h>
54#include <sys/ktrace.h>
55#endif
56
57#ifdef HWPMC_HOOKS
58#include <sys/pmckern.h>
59#endif
60
61#include <machine/cpu.h>
62#include <machine/smp.h>
63
64/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
65/* XXX This is bogus compatability crap for ps */
66static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
67SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
68
69static void sched_setup(void *dummy);
70SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
71
72static void sched_initticks(void *dummy);
73SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
74
75static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
76
77SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
78    "Scheduler name");
79
80static int slice_min = 1;
81SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
82
83static int slice_max = 10;
84SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
85
86int realstathz;
87int tickincr = 1 << 10;
88
89/*
90 * The following datastructures are allocated within their parent structure
91 * but are scheduler specific.
92 */
93/*
94 * The schedulable entity that can be given a context to run.  A process may
95 * have several of these.
96 */
97struct kse {
98	TAILQ_ENTRY(kse) ke_procq;	/* (j/z) Run queue. */
99	int		ke_flags;	/* (j) KEF_* flags. */
100	struct thread	*ke_thread;	/* (*) Active associated thread. */
101	fixpt_t		ke_pctcpu;	/* (j) %cpu during p_swtime. */
102	u_char		ke_rqindex;	/* (j) Run queue index. */
103	enum {
104		KES_THREAD = 0x0,	/* slaved to thread state */
105		KES_ONRUNQ
106	} ke_state;			/* (j) thread sched specific status. */
107	int		ke_slptime;
108	int		ke_slice;
109	struct runq	*ke_runq;
110	u_char		ke_cpu;		/* CPU that we have affinity for. */
111	/* The following variables are only used for pctcpu calculation */
112	int		ke_ltick;	/* Last tick that we were running on */
113	int		ke_ftick;	/* First tick that we were running on */
114	int		ke_ticks;	/* Tick count */
115
116};
117#define	td_kse			td_sched
118#define	td_slptime		td_kse->ke_slptime
119#define ke_proc			ke_thread->td_proc
120#define ke_ksegrp		ke_thread->td_ksegrp
121#define	ke_assign		ke_procq.tqe_next
122/* flags kept in ke_flags */
123#define	KEF_ASSIGNED	0x0001		/* Thread is being migrated. */
124#define	KEF_BOUND	0x0002		/* Thread can not migrate. */
125#define	KEF_XFERABLE	0x0004		/* Thread was added as transferable. */
126#define	KEF_HOLD	0x0008		/* Thread is temporarily bound. */
127#define	KEF_REMOVED	0x0010		/* Thread was removed while ASSIGNED */
128#define	KEF_INTERNAL	0x0020		/* Thread added due to migration. */
129#define	KEF_PREEMPTED	0x0040		/* Thread was preempted */
130#define	KEF_DIDRUN	0x02000		/* Thread actually ran. */
131#define	KEF_EXIT	0x04000		/* Thread is being killed. */
132
133struct kg_sched {
134	struct thread	*skg_last_assigned; /* (j) Last thread assigned to */
135					   /* the system scheduler */
136	int	skg_slptime;		/* Number of ticks we vol. slept */
137	int	skg_runtime;		/* Number of ticks we were running */
138	int	skg_avail_opennings;	/* (j) Num unfilled slots in group.*/
139	int	skg_concurrency;	/* (j) Num threads requested in group.*/
140};
141#define kg_last_assigned	kg_sched->skg_last_assigned
142#define kg_avail_opennings	kg_sched->skg_avail_opennings
143#define kg_concurrency		kg_sched->skg_concurrency
144#define kg_runtime		kg_sched->skg_runtime
145#define kg_slptime		kg_sched->skg_slptime
146
147#define SLOT_RELEASE(kg)	(kg)->kg_avail_opennings++
148#define	SLOT_USE(kg)		(kg)->kg_avail_opennings--
149
150static struct kse kse0;
151static struct kg_sched kg_sched0;
152
153/*
154 * The priority is primarily determined by the interactivity score.  Thus, we
155 * give lower(better) priorities to kse groups that use less CPU.  The nice
156 * value is then directly added to this to allow nice to have some effect
157 * on latency.
158 *
159 * PRI_RANGE:	Total priority range for timeshare threads.
160 * PRI_NRESV:	Number of nice values.
161 * PRI_BASE:	The start of the dynamic range.
162 */
163#define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
164#define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
165#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
166#define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
167#define	SCHED_PRI_INTERACT(score)					\
168    ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
169
170/*
171 * These determine the interactivity of a process.
172 *
173 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
174 *		before throttling back.
175 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
176 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
177 * INTERACT_THRESH:	Threshhold for placement on the current runq.
178 */
179#define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
180#define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
181#define	SCHED_INTERACT_MAX	(100)
182#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
183#define	SCHED_INTERACT_THRESH	(30)
184
185/*
186 * These parameters and macros determine the size of the time slice that is
187 * granted to each thread.
188 *
189 * SLICE_MIN:	Minimum time slice granted, in units of ticks.
190 * SLICE_MAX:	Maximum time slice granted.
191 * SLICE_RANGE:	Range of available time slices scaled by hz.
192 * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
193 * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
194 * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
195 */
196#define	SCHED_SLICE_MIN			(slice_min)
197#define	SCHED_SLICE_MAX			(slice_max)
198#define	SCHED_SLICE_INTERACTIVE		(slice_max)
199#define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
200#define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
201#define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
202#define	SCHED_SLICE_NICE(nice)						\
203    (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
204
205/*
206 * This macro determines whether or not the thread belongs on the current or
207 * next run queue.
208 */
209#define	SCHED_INTERACTIVE(kg)						\
210    (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
211#define	SCHED_CURR(kg, ke)						\
212    ((ke->ke_thread->td_flags & TDF_BORROWING) ||			\
213     (ke->ke_flags & KEF_PREEMPTED) || SCHED_INTERACTIVE(kg))
214
215/*
216 * Cpu percentage computation macros and defines.
217 *
218 * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
219 * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
220 */
221
222#define	SCHED_CPU_TIME	10
223#define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
224
225/*
226 * kseq - per processor runqs and statistics.
227 */
228struct kseq {
229	struct runq	ksq_idle;		/* Queue of IDLE threads. */
230	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
231	struct runq	*ksq_next;		/* Next timeshare queue. */
232	struct runq	*ksq_curr;		/* Current queue. */
233	int		ksq_load_timeshare;	/* Load for timeshare. */
234	int		ksq_load;		/* Aggregate load. */
235	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
236	short		ksq_nicemin;		/* Least nice. */
237#ifdef SMP
238	int			ksq_transferable;
239	LIST_ENTRY(kseq)	ksq_siblings;	/* Next in kseq group. */
240	struct kseq_group	*ksq_group;	/* Our processor group. */
241	volatile struct kse	*ksq_assigned;	/* assigned by another CPU. */
242#else
243	int		ksq_sysload;		/* For loadavg, !ITHD load. */
244#endif
245};
246
247#ifdef SMP
248/*
249 * kseq groups are groups of processors which can cheaply share threads.  When
250 * one processor in the group goes idle it will check the runqs of the other
251 * processors in its group prior to halting and waiting for an interrupt.
252 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
253 * In a numa environment we'd want an idle bitmap per group and a two tiered
254 * load balancer.
255 */
256struct kseq_group {
257	int	ksg_cpus;		/* Count of CPUs in this kseq group. */
258	cpumask_t ksg_cpumask;		/* Mask of cpus in this group. */
259	cpumask_t ksg_idlemask;		/* Idle cpus in this group. */
260	cpumask_t ksg_mask;		/* Bit mask for first cpu. */
261	int	ksg_load;		/* Total load of this group. */
262	int	ksg_transferable;	/* Transferable load of this group. */
263	LIST_HEAD(, kseq)	ksg_members; /* Linked list of all members. */
264};
265#endif
266
267/*
268 * One kse queue per processor.
269 */
270#ifdef SMP
271static cpumask_t kseq_idle;
272static int ksg_maxid;
273static struct kseq	kseq_cpu[MAXCPU];
274static struct kseq_group kseq_groups[MAXCPU];
275static int bal_tick;
276static int gbal_tick;
277static int balance_groups;
278
279#define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
280#define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
281#define	KSEQ_ID(x)	((x) - kseq_cpu)
282#define	KSEQ_GROUP(x)	(&kseq_groups[(x)])
283#else	/* !SMP */
284static struct kseq	kseq_cpu;
285
286#define	KSEQ_SELF()	(&kseq_cpu)
287#define	KSEQ_CPU(x)	(&kseq_cpu)
288#endif
289
290static void slot_fill(struct ksegrp *);
291static struct kse *sched_choose(void);		/* XXX Should be thread * */
292static void sched_slice(struct kse *);
293static void sched_priority(struct ksegrp *);
294static void sched_thread_priority(struct thread *, u_char);
295static int sched_interact_score(struct ksegrp *);
296static void sched_interact_update(struct ksegrp *);
297static void sched_interact_fork(struct ksegrp *);
298static void sched_pctcpu_update(struct kse *);
299
300/* Operations on per processor queues */
301static struct kse * kseq_choose(struct kseq *);
302static void kseq_setup(struct kseq *);
303static void kseq_load_add(struct kseq *, struct kse *);
304static void kseq_load_rem(struct kseq *, struct kse *);
305static __inline void kseq_runq_add(struct kseq *, struct kse *, int);
306static __inline void kseq_runq_rem(struct kseq *, struct kse *);
307static void kseq_nice_add(struct kseq *, int);
308static void kseq_nice_rem(struct kseq *, int);
309void kseq_print(int cpu);
310#ifdef SMP
311static int kseq_transfer(struct kseq *, struct kse *, int);
312static struct kse *runq_steal(struct runq *);
313static void sched_balance(void);
314static void sched_balance_groups(void);
315static void sched_balance_group(struct kseq_group *);
316static void sched_balance_pair(struct kseq *, struct kseq *);
317static void kseq_move(struct kseq *, int);
318static int kseq_idled(struct kseq *);
319static void kseq_notify(struct kse *, int);
320static void kseq_assign(struct kseq *);
321static struct kse *kseq_steal(struct kseq *, int);
322#define	KSE_CAN_MIGRATE(ke)						\
323    ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
324#endif
325
326void
327kseq_print(int cpu)
328{
329	struct kseq *kseq;
330	int i;
331
332	kseq = KSEQ_CPU(cpu);
333
334	printf("kseq:\n");
335	printf("\tload:           %d\n", kseq->ksq_load);
336	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
337#ifdef SMP
338	printf("\tload transferable: %d\n", kseq->ksq_transferable);
339#endif
340	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
341	printf("\tnice counts:\n");
342	for (i = 0; i < SCHED_PRI_NRESV; i++)
343		if (kseq->ksq_nice[i])
344			printf("\t\t%d = %d\n",
345			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
346}
347
348static __inline void
349kseq_runq_add(struct kseq *kseq, struct kse *ke, int flags)
350{
351#ifdef SMP
352	if (KSE_CAN_MIGRATE(ke)) {
353		kseq->ksq_transferable++;
354		kseq->ksq_group->ksg_transferable++;
355		ke->ke_flags |= KEF_XFERABLE;
356	}
357#endif
358	if (ke->ke_flags & KEF_PREEMPTED)
359		flags |= SRQ_PREEMPTED;
360	runq_add(ke->ke_runq, ke, flags);
361}
362
363static __inline void
364kseq_runq_rem(struct kseq *kseq, struct kse *ke)
365{
366#ifdef SMP
367	if (ke->ke_flags & KEF_XFERABLE) {
368		kseq->ksq_transferable--;
369		kseq->ksq_group->ksg_transferable--;
370		ke->ke_flags &= ~KEF_XFERABLE;
371	}
372#endif
373	runq_remove(ke->ke_runq, ke);
374}
375
376static void
377kseq_load_add(struct kseq *kseq, struct kse *ke)
378{
379	int class;
380	mtx_assert(&sched_lock, MA_OWNED);
381	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
382	if (class == PRI_TIMESHARE)
383		kseq->ksq_load_timeshare++;
384	kseq->ksq_load++;
385	CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
386	if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
387#ifdef SMP
388		kseq->ksq_group->ksg_load++;
389#else
390		kseq->ksq_sysload++;
391#endif
392	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
393		kseq_nice_add(kseq, ke->ke_proc->p_nice);
394}
395
396static void
397kseq_load_rem(struct kseq *kseq, struct kse *ke)
398{
399	int class;
400	mtx_assert(&sched_lock, MA_OWNED);
401	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
402	if (class == PRI_TIMESHARE)
403		kseq->ksq_load_timeshare--;
404	if (class != PRI_ITHD  && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
405#ifdef SMP
406		kseq->ksq_group->ksg_load--;
407#else
408		kseq->ksq_sysload--;
409#endif
410	kseq->ksq_load--;
411	CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
412	ke->ke_runq = NULL;
413	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
414		kseq_nice_rem(kseq, ke->ke_proc->p_nice);
415}
416
417static void
418kseq_nice_add(struct kseq *kseq, int nice)
419{
420	mtx_assert(&sched_lock, MA_OWNED);
421	/* Normalize to zero. */
422	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
423	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
424		kseq->ksq_nicemin = nice;
425}
426
427static void
428kseq_nice_rem(struct kseq *kseq, int nice)
429{
430	int n;
431
432	mtx_assert(&sched_lock, MA_OWNED);
433	/* Normalize to zero. */
434	n = nice + SCHED_PRI_NHALF;
435	kseq->ksq_nice[n]--;
436	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
437
438	/*
439	 * If this wasn't the smallest nice value or there are more in
440	 * this bucket we can just return.  Otherwise we have to recalculate
441	 * the smallest nice.
442	 */
443	if (nice != kseq->ksq_nicemin ||
444	    kseq->ksq_nice[n] != 0 ||
445	    kseq->ksq_load_timeshare == 0)
446		return;
447
448	for (; n < SCHED_PRI_NRESV; n++)
449		if (kseq->ksq_nice[n]) {
450			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
451			return;
452		}
453}
454
455#ifdef SMP
456/*
457 * sched_balance is a simple CPU load balancing algorithm.  It operates by
458 * finding the least loaded and most loaded cpu and equalizing their load
459 * by migrating some processes.
460 *
461 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
462 * installations will only have 2 cpus.  Secondly, load balancing too much at
463 * once can have an unpleasant effect on the system.  The scheduler rarely has
464 * enough information to make perfect decisions.  So this algorithm chooses
465 * algorithm simplicity and more gradual effects on load in larger systems.
466 *
467 * It could be improved by considering the priorities and slices assigned to
468 * each task prior to balancing them.  There are many pathological cases with
469 * any approach and so the semi random algorithm below may work as well as any.
470 *
471 */
472static void
473sched_balance(void)
474{
475	struct kseq_group *high;
476	struct kseq_group *low;
477	struct kseq_group *ksg;
478	int cnt;
479	int i;
480
481	bal_tick = ticks + (random() % (hz * 2));
482	if (smp_started == 0)
483		return;
484	low = high = NULL;
485	i = random() % (ksg_maxid + 1);
486	for (cnt = 0; cnt <= ksg_maxid; cnt++) {
487		ksg = KSEQ_GROUP(i);
488		/*
489		 * Find the CPU with the highest load that has some
490		 * threads to transfer.
491		 */
492		if ((high == NULL || ksg->ksg_load > high->ksg_load)
493		    && ksg->ksg_transferable)
494			high = ksg;
495		if (low == NULL || ksg->ksg_load < low->ksg_load)
496			low = ksg;
497		if (++i > ksg_maxid)
498			i = 0;
499	}
500	if (low != NULL && high != NULL && high != low)
501		sched_balance_pair(LIST_FIRST(&high->ksg_members),
502		    LIST_FIRST(&low->ksg_members));
503}
504
505static void
506sched_balance_groups(void)
507{
508	int i;
509
510	gbal_tick = ticks + (random() % (hz * 2));
511	mtx_assert(&sched_lock, MA_OWNED);
512	if (smp_started)
513		for (i = 0; i <= ksg_maxid; i++)
514			sched_balance_group(KSEQ_GROUP(i));
515}
516
517static void
518sched_balance_group(struct kseq_group *ksg)
519{
520	struct kseq *kseq;
521	struct kseq *high;
522	struct kseq *low;
523	int load;
524
525	if (ksg->ksg_transferable == 0)
526		return;
527	low = NULL;
528	high = NULL;
529	LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
530		load = kseq->ksq_load;
531		if (high == NULL || load > high->ksq_load)
532			high = kseq;
533		if (low == NULL || load < low->ksq_load)
534			low = kseq;
535	}
536	if (high != NULL && low != NULL && high != low)
537		sched_balance_pair(high, low);
538}
539
540static void
541sched_balance_pair(struct kseq *high, struct kseq *low)
542{
543	int transferable;
544	int high_load;
545	int low_load;
546	int move;
547	int diff;
548	int i;
549
550	/*
551	 * If we're transfering within a group we have to use this specific
552	 * kseq's transferable count, otherwise we can steal from other members
553	 * of the group.
554	 */
555	if (high->ksq_group == low->ksq_group) {
556		transferable = high->ksq_transferable;
557		high_load = high->ksq_load;
558		low_load = low->ksq_load;
559	} else {
560		transferable = high->ksq_group->ksg_transferable;
561		high_load = high->ksq_group->ksg_load;
562		low_load = low->ksq_group->ksg_load;
563	}
564	if (transferable == 0)
565		return;
566	/*
567	 * Determine what the imbalance is and then adjust that to how many
568	 * kses we actually have to give up (transferable).
569	 */
570	diff = high_load - low_load;
571	move = diff / 2;
572	if (diff & 0x1)
573		move++;
574	move = min(move, transferable);
575	for (i = 0; i < move; i++)
576		kseq_move(high, KSEQ_ID(low));
577	return;
578}
579
580static void
581kseq_move(struct kseq *from, int cpu)
582{
583	struct kseq *kseq;
584	struct kseq *to;
585	struct kse *ke;
586
587	kseq = from;
588	to = KSEQ_CPU(cpu);
589	ke = kseq_steal(kseq, 1);
590	if (ke == NULL) {
591		struct kseq_group *ksg;
592
593		ksg = kseq->ksq_group;
594		LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
595			if (kseq == from || kseq->ksq_transferable == 0)
596				continue;
597			ke = kseq_steal(kseq, 1);
598			break;
599		}
600		if (ke == NULL)
601			panic("kseq_move: No KSEs available with a "
602			    "transferable count of %d\n",
603			    ksg->ksg_transferable);
604	}
605	if (kseq == to)
606		return;
607	ke->ke_state = KES_THREAD;
608	kseq_runq_rem(kseq, ke);
609	kseq_load_rem(kseq, ke);
610	kseq_notify(ke, cpu);
611}
612
613static int
614kseq_idled(struct kseq *kseq)
615{
616	struct kseq_group *ksg;
617	struct kseq *steal;
618	struct kse *ke;
619
620	ksg = kseq->ksq_group;
621	/*
622	 * If we're in a cpu group, try and steal kses from another cpu in
623	 * the group before idling.
624	 */
625	if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
626		LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
627			if (steal == kseq || steal->ksq_transferable == 0)
628				continue;
629			ke = kseq_steal(steal, 0);
630			if (ke == NULL)
631				continue;
632			ke->ke_state = KES_THREAD;
633			kseq_runq_rem(steal, ke);
634			kseq_load_rem(steal, ke);
635			ke->ke_cpu = PCPU_GET(cpuid);
636			ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
637			sched_add(ke->ke_thread, SRQ_YIELDING);
638			return (0);
639		}
640	}
641	/*
642	 * We only set the idled bit when all of the cpus in the group are
643	 * idle.  Otherwise we could get into a situation where a KSE bounces
644	 * back and forth between two idle cores on seperate physical CPUs.
645	 */
646	ksg->ksg_idlemask |= PCPU_GET(cpumask);
647	if (ksg->ksg_idlemask != ksg->ksg_cpumask)
648		return (1);
649	atomic_set_int(&kseq_idle, ksg->ksg_mask);
650	return (1);
651}
652
653static void
654kseq_assign(struct kseq *kseq)
655{
656	struct kse *nke;
657	struct kse *ke;
658
659	do {
660		*(volatile struct kse **)&ke = kseq->ksq_assigned;
661	} while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
662		(uintptr_t)ke, (uintptr_t)NULL));
663	for (; ke != NULL; ke = nke) {
664		nke = ke->ke_assign;
665		kseq->ksq_group->ksg_load--;
666		kseq->ksq_load--;
667		ke->ke_flags &= ~KEF_ASSIGNED;
668		if (ke->ke_flags & KEF_REMOVED) {
669			ke->ke_flags &= ~KEF_REMOVED;
670			continue;
671		}
672		ke->ke_flags |= KEF_INTERNAL | KEF_HOLD;
673		sched_add(ke->ke_thread, SRQ_YIELDING);
674	}
675}
676
677static void
678kseq_notify(struct kse *ke, int cpu)
679{
680	struct kseq *kseq;
681	struct thread *td;
682	struct pcpu *pcpu;
683	int class;
684	int prio;
685
686	kseq = KSEQ_CPU(cpu);
687	/* XXX */
688	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
689	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
690	    (kseq_idle & kseq->ksq_group->ksg_mask))
691		atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
692	kseq->ksq_group->ksg_load++;
693	kseq->ksq_load++;
694	ke->ke_cpu = cpu;
695	ke->ke_flags |= KEF_ASSIGNED;
696	prio = ke->ke_thread->td_priority;
697
698	/*
699	 * Place a KSE on another cpu's queue and force a resched.
700	 */
701	do {
702		*(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned;
703	} while(!atomic_cmpset_ptr((volatile uintptr_t *)&kseq->ksq_assigned,
704		(uintptr_t)ke->ke_assign, (uintptr_t)ke));
705	/*
706	 * Without sched_lock we could lose a race where we set NEEDRESCHED
707	 * on a thread that is switched out before the IPI is delivered.  This
708	 * would lead us to miss the resched.  This will be a problem once
709	 * sched_lock is pushed down.
710	 */
711	pcpu = pcpu_find(cpu);
712	td = pcpu->pc_curthread;
713	if (ke->ke_thread->td_priority < td->td_priority ||
714	    td == pcpu->pc_idlethread) {
715		td->td_flags |= TDF_NEEDRESCHED;
716		ipi_selected(1 << cpu, IPI_AST);
717	}
718}
719
720static struct kse *
721runq_steal(struct runq *rq)
722{
723	struct rqhead *rqh;
724	struct rqbits *rqb;
725	struct kse *ke;
726	int word;
727	int bit;
728
729	mtx_assert(&sched_lock, MA_OWNED);
730	rqb = &rq->rq_status;
731	for (word = 0; word < RQB_LEN; word++) {
732		if (rqb->rqb_bits[word] == 0)
733			continue;
734		for (bit = 0; bit < RQB_BPW; bit++) {
735			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
736				continue;
737			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
738			TAILQ_FOREACH(ke, rqh, ke_procq) {
739				if (KSE_CAN_MIGRATE(ke))
740					return (ke);
741			}
742		}
743	}
744	return (NULL);
745}
746
747static struct kse *
748kseq_steal(struct kseq *kseq, int stealidle)
749{
750	struct kse *ke;
751
752	/*
753	 * Steal from next first to try to get a non-interactive task that
754	 * may not have run for a while.
755	 */
756	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
757		return (ke);
758	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
759		return (ke);
760	if (stealidle)
761		return (runq_steal(&kseq->ksq_idle));
762	return (NULL);
763}
764
765int
766kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
767{
768	struct kseq_group *nksg;
769	struct kseq_group *ksg;
770	struct kseq *old;
771	int cpu;
772	int idx;
773
774	if (smp_started == 0)
775		return (0);
776	cpu = 0;
777	/*
778	 * If our load exceeds a certain threshold we should attempt to
779	 * reassign this thread.  The first candidate is the cpu that
780	 * originally ran the thread.  If it is idle, assign it there,
781	 * otherwise, pick an idle cpu.
782	 *
783	 * The threshold at which we start to reassign kses has a large impact
784	 * on the overall performance of the system.  Tuned too high and
785	 * some CPUs may idle.  Too low and there will be excess migration
786	 * and context switches.
787	 */
788	old = KSEQ_CPU(ke->ke_cpu);
789	nksg = old->ksq_group;
790	ksg = kseq->ksq_group;
791	if (kseq_idle) {
792		if (kseq_idle & nksg->ksg_mask) {
793			cpu = ffs(nksg->ksg_idlemask);
794			if (cpu) {
795				CTR2(KTR_SCHED,
796				    "kseq_transfer: %p found old cpu %X "
797				    "in idlemask.", ke, cpu);
798				goto migrate;
799			}
800		}
801		/*
802		 * Multiple cpus could find this bit simultaneously
803		 * but the race shouldn't be terrible.
804		 */
805		cpu = ffs(kseq_idle);
806		if (cpu) {
807			CTR2(KTR_SCHED, "kseq_transfer: %p found %X "
808			    "in idlemask.", ke, cpu);
809			goto migrate;
810		}
811	}
812	idx = 0;
813#if 0
814	if (old->ksq_load < kseq->ksq_load) {
815		cpu = ke->ke_cpu + 1;
816		CTR2(KTR_SCHED, "kseq_transfer: %p old cpu %X "
817		    "load less than ours.", ke, cpu);
818		goto migrate;
819	}
820	/*
821	 * No new CPU was found, look for one with less load.
822	 */
823	for (idx = 0; idx <= ksg_maxid; idx++) {
824		nksg = KSEQ_GROUP(idx);
825		if (nksg->ksg_load /*+ (nksg->ksg_cpus  * 2)*/ < ksg->ksg_load) {
826			cpu = ffs(nksg->ksg_cpumask);
827			CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X load less "
828			    "than ours.", ke, cpu);
829			goto migrate;
830		}
831	}
832#endif
833	/*
834	 * If another cpu in this group has idled, assign a thread over
835	 * to them after checking to see if there are idled groups.
836	 */
837	if (ksg->ksg_idlemask) {
838		cpu = ffs(ksg->ksg_idlemask);
839		if (cpu) {
840			CTR2(KTR_SCHED, "kseq_transfer: %p cpu %X idle in "
841			    "group.", ke, cpu);
842			goto migrate;
843		}
844	}
845	return (0);
846migrate:
847	/*
848	 * Now that we've found an idle CPU, migrate the thread.
849	 */
850	cpu--;
851	ke->ke_runq = NULL;
852	kseq_notify(ke, cpu);
853
854	return (1);
855}
856
857#endif	/* SMP */
858
859/*
860 * Pick the highest priority task we have and return it.
861 */
862
863static struct kse *
864kseq_choose(struct kseq *kseq)
865{
866	struct runq *swap;
867	struct kse *ke;
868	int nice;
869
870	mtx_assert(&sched_lock, MA_OWNED);
871	swap = NULL;
872
873	for (;;) {
874		ke = runq_choose(kseq->ksq_curr);
875		if (ke == NULL) {
876			/*
877			 * We already swapped once and didn't get anywhere.
878			 */
879			if (swap)
880				break;
881			swap = kseq->ksq_curr;
882			kseq->ksq_curr = kseq->ksq_next;
883			kseq->ksq_next = swap;
884			continue;
885		}
886		/*
887		 * If we encounter a slice of 0 the kse is in a
888		 * TIMESHARE kse group and its nice was too far out
889		 * of the range that receives slices.
890		 */
891		nice = ke->ke_proc->p_nice + (0 - kseq->ksq_nicemin);
892#if 0
893		if (ke->ke_slice == 0 || (nice > SCHED_SLICE_NTHRESH &&
894		    ke->ke_proc->p_nice != 0)) {
895			runq_remove(ke->ke_runq, ke);
896			sched_slice(ke);
897			ke->ke_runq = kseq->ksq_next;
898			runq_add(ke->ke_runq, ke, 0);
899			continue;
900		}
901#endif
902		return (ke);
903	}
904
905	return (runq_choose(&kseq->ksq_idle));
906}
907
908static void
909kseq_setup(struct kseq *kseq)
910{
911	runq_init(&kseq->ksq_timeshare[0]);
912	runq_init(&kseq->ksq_timeshare[1]);
913	runq_init(&kseq->ksq_idle);
914	kseq->ksq_curr = &kseq->ksq_timeshare[0];
915	kseq->ksq_next = &kseq->ksq_timeshare[1];
916	kseq->ksq_load = 0;
917	kseq->ksq_load_timeshare = 0;
918}
919
920static void
921sched_setup(void *dummy)
922{
923#ifdef SMP
924	int i;
925#endif
926
927	/*
928	 * To avoid divide-by-zero, we set realstathz a dummy value
929	 * in case which sched_clock() called before sched_initticks().
930	 */
931	realstathz = hz;
932	slice_min = (hz/100);	/* 10ms */
933	slice_max = (hz/7);	/* ~140ms */
934
935#ifdef SMP
936	balance_groups = 0;
937	/*
938	 * Initialize the kseqs.
939	 */
940	for (i = 0; i < MAXCPU; i++) {
941		struct kseq *ksq;
942
943		ksq = &kseq_cpu[i];
944		ksq->ksq_assigned = NULL;
945		kseq_setup(&kseq_cpu[i]);
946	}
947	if (smp_topology == NULL) {
948		struct kseq_group *ksg;
949		struct kseq *ksq;
950		int cpus;
951
952		for (cpus = 0, i = 0; i < MAXCPU; i++) {
953			if (CPU_ABSENT(i))
954				continue;
955			ksq = &kseq_cpu[i];
956			ksg = &kseq_groups[cpus];
957			/*
958			 * Setup a kseq group with one member.
959			 */
960			ksq->ksq_transferable = 0;
961			ksq->ksq_group = ksg;
962			ksg->ksg_cpus = 1;
963			ksg->ksg_idlemask = 0;
964			ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
965			ksg->ksg_load = 0;
966			ksg->ksg_transferable = 0;
967			LIST_INIT(&ksg->ksg_members);
968			LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
969			cpus++;
970		}
971		ksg_maxid = cpus - 1;
972	} else {
973		struct kseq_group *ksg;
974		struct cpu_group *cg;
975		int j;
976
977		for (i = 0; i < smp_topology->ct_count; i++) {
978			cg = &smp_topology->ct_group[i];
979			ksg = &kseq_groups[i];
980			/*
981			 * Initialize the group.
982			 */
983			ksg->ksg_idlemask = 0;
984			ksg->ksg_load = 0;
985			ksg->ksg_transferable = 0;
986			ksg->ksg_cpus = cg->cg_count;
987			ksg->ksg_cpumask = cg->cg_mask;
988			LIST_INIT(&ksg->ksg_members);
989			/*
990			 * Find all of the group members and add them.
991			 */
992			for (j = 0; j < MAXCPU; j++) {
993				if ((cg->cg_mask & (1 << j)) != 0) {
994					if (ksg->ksg_mask == 0)
995						ksg->ksg_mask = 1 << j;
996					kseq_cpu[j].ksq_transferable = 0;
997					kseq_cpu[j].ksq_group = ksg;
998					LIST_INSERT_HEAD(&ksg->ksg_members,
999					    &kseq_cpu[j], ksq_siblings);
1000				}
1001			}
1002			if (ksg->ksg_cpus > 1)
1003				balance_groups = 1;
1004		}
1005		ksg_maxid = smp_topology->ct_count - 1;
1006	}
1007	/*
1008	 * Stagger the group and global load balancer so they do not
1009	 * interfere with each other.
1010	 */
1011	bal_tick = ticks + hz;
1012	if (balance_groups)
1013		gbal_tick = ticks + (hz / 2);
1014#else
1015	kseq_setup(KSEQ_SELF());
1016#endif
1017	mtx_lock_spin(&sched_lock);
1018	kseq_load_add(KSEQ_SELF(), &kse0);
1019	mtx_unlock_spin(&sched_lock);
1020}
1021
1022/* ARGSUSED */
1023static void
1024sched_initticks(void *dummy)
1025{
1026	mtx_lock_spin(&sched_lock);
1027	realstathz = stathz ? stathz : hz;
1028	slice_min = (realstathz/100);	/* 10ms */
1029	slice_max = (realstathz/7);	/* ~140ms */
1030
1031	tickincr = (hz << 10) / realstathz;
1032	/*
1033	 * XXX This does not work for values of stathz that are much
1034	 * larger than hz.
1035	 */
1036	if (tickincr == 0)
1037		tickincr = 1;
1038	mtx_unlock_spin(&sched_lock);
1039}
1040
1041
1042/*
1043 * Scale the scheduling priority according to the "interactivity" of this
1044 * process.
1045 */
1046static void
1047sched_priority(struct ksegrp *kg)
1048{
1049	int pri;
1050
1051	if (kg->kg_pri_class != PRI_TIMESHARE)
1052		return;
1053
1054	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
1055	pri += SCHED_PRI_BASE;
1056	pri += kg->kg_proc->p_nice;
1057
1058	if (pri > PRI_MAX_TIMESHARE)
1059		pri = PRI_MAX_TIMESHARE;
1060	else if (pri < PRI_MIN_TIMESHARE)
1061		pri = PRI_MIN_TIMESHARE;
1062
1063	kg->kg_user_pri = pri;
1064
1065	return;
1066}
1067
1068/*
1069 * Calculate a time slice based on the properties of the kseg and the runq
1070 * that we're on.  This is only for PRI_TIMESHARE ksegrps.
1071 */
1072static void
1073sched_slice(struct kse *ke)
1074{
1075	struct kseq *kseq;
1076	struct ksegrp *kg;
1077
1078	kg = ke->ke_ksegrp;
1079	kseq = KSEQ_CPU(ke->ke_cpu);
1080
1081	if (ke->ke_thread->td_flags & TDF_BORROWING) {
1082		ke->ke_slice = SCHED_SLICE_MIN;
1083		return;
1084	}
1085
1086	/*
1087	 * Rationale:
1088	 * KSEs in interactive ksegs get a minimal slice so that we
1089	 * quickly notice if it abuses its advantage.
1090	 *
1091	 * KSEs in non-interactive ksegs are assigned a slice that is
1092	 * based on the ksegs nice value relative to the least nice kseg
1093	 * on the run queue for this cpu.
1094	 *
1095	 * If the KSE is less nice than all others it gets the maximum
1096	 * slice and other KSEs will adjust their slice relative to
1097	 * this when they first expire.
1098	 *
1099	 * There is 20 point window that starts relative to the least
1100	 * nice kse on the run queue.  Slice size is determined by
1101	 * the kse distance from the last nice ksegrp.
1102	 *
1103	 * If the kse is outside of the window it will get no slice
1104	 * and will be reevaluated each time it is selected on the
1105	 * run queue.  The exception to this is nice 0 ksegs when
1106	 * a nice -20 is running.  They are always granted a minimum
1107	 * slice.
1108	 */
1109	if (!SCHED_INTERACTIVE(kg)) {
1110		int nice;
1111
1112		nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
1113		if (kseq->ksq_load_timeshare == 0 ||
1114		    kg->kg_proc->p_nice < kseq->ksq_nicemin)
1115			ke->ke_slice = SCHED_SLICE_MAX;
1116		else if (nice <= SCHED_SLICE_NTHRESH)
1117			ke->ke_slice = SCHED_SLICE_NICE(nice);
1118		else if (kg->kg_proc->p_nice == 0)
1119			ke->ke_slice = SCHED_SLICE_MIN;
1120		else
1121			ke->ke_slice = SCHED_SLICE_MIN; /* 0 */
1122	} else
1123		ke->ke_slice = SCHED_SLICE_INTERACTIVE;
1124
1125	return;
1126}
1127
1128/*
1129 * This routine enforces a maximum limit on the amount of scheduling history
1130 * kept.  It is called after either the slptime or runtime is adjusted.
1131 * This routine will not operate correctly when slp or run times have been
1132 * adjusted to more than double their maximum.
1133 */
1134static void
1135sched_interact_update(struct ksegrp *kg)
1136{
1137	int sum;
1138
1139	sum = kg->kg_runtime + kg->kg_slptime;
1140	if (sum < SCHED_SLP_RUN_MAX)
1141		return;
1142	/*
1143	 * If we have exceeded by more than 1/5th then the algorithm below
1144	 * will not bring us back into range.  Dividing by two here forces
1145	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1146	 */
1147	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1148		kg->kg_runtime /= 2;
1149		kg->kg_slptime /= 2;
1150		return;
1151	}
1152	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1153	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1154}
1155
1156static void
1157sched_interact_fork(struct ksegrp *kg)
1158{
1159	int ratio;
1160	int sum;
1161
1162	sum = kg->kg_runtime + kg->kg_slptime;
1163	if (sum > SCHED_SLP_RUN_FORK) {
1164		ratio = sum / SCHED_SLP_RUN_FORK;
1165		kg->kg_runtime /= ratio;
1166		kg->kg_slptime /= ratio;
1167	}
1168}
1169
1170static int
1171sched_interact_score(struct ksegrp *kg)
1172{
1173	int div;
1174
1175	if (kg->kg_runtime > kg->kg_slptime) {
1176		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1177		return (SCHED_INTERACT_HALF +
1178		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1179	} if (kg->kg_slptime > kg->kg_runtime) {
1180		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1181		return (kg->kg_runtime / div);
1182	}
1183
1184	/*
1185	 * This can happen if slptime and runtime are 0.
1186	 */
1187	return (0);
1188
1189}
1190
1191/*
1192 * Very early in the boot some setup of scheduler-specific
1193 * parts of proc0 and of soem scheduler resources needs to be done.
1194 * Called from:
1195 *  proc0_init()
1196 */
1197void
1198schedinit(void)
1199{
1200	/*
1201	 * Set up the scheduler specific parts of proc0.
1202	 */
1203	proc0.p_sched = NULL; /* XXX */
1204	ksegrp0.kg_sched = &kg_sched0;
1205	thread0.td_sched = &kse0;
1206	kse0.ke_thread = &thread0;
1207	kse0.ke_state = KES_THREAD;
1208	kg_sched0.skg_concurrency = 1;
1209	kg_sched0.skg_avail_opennings = 0; /* we are already running */
1210}
1211
1212/*
1213 * This is only somewhat accurate since given many processes of the same
1214 * priority they will switch when their slices run out, which will be
1215 * at most SCHED_SLICE_MAX.
1216 */
1217int
1218sched_rr_interval(void)
1219{
1220	return (SCHED_SLICE_MAX);
1221}
1222
1223static void
1224sched_pctcpu_update(struct kse *ke)
1225{
1226	/*
1227	 * Adjust counters and watermark for pctcpu calc.
1228	 */
1229	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1230		/*
1231		 * Shift the tick count out so that the divide doesn't
1232		 * round away our results.
1233		 */
1234		ke->ke_ticks <<= 10;
1235		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1236			    SCHED_CPU_TICKS;
1237		ke->ke_ticks >>= 10;
1238	} else
1239		ke->ke_ticks = 0;
1240	ke->ke_ltick = ticks;
1241	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1242}
1243
1244void
1245sched_thread_priority(struct thread *td, u_char prio)
1246{
1247	struct kse *ke;
1248
1249	CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1250	    td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1251	    curthread->td_proc->p_comm);
1252	ke = td->td_kse;
1253	mtx_assert(&sched_lock, MA_OWNED);
1254	if (td->td_priority == prio)
1255		return;
1256	if (TD_ON_RUNQ(td)) {
1257		/*
1258		 * If the priority has been elevated due to priority
1259		 * propagation, we may have to move ourselves to a new
1260		 * queue.  We still call adjustrunqueue below in case kse
1261		 * needs to fix things up.
1262		 */
1263		if (prio < td->td_priority && ke->ke_runq != NULL &&
1264		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1265		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1266			runq_remove(ke->ke_runq, ke);
1267			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1268			runq_add(ke->ke_runq, ke, 0);
1269		}
1270		/*
1271		 * Hold this kse on this cpu so that sched_prio() doesn't
1272		 * cause excessive migration.  We only want migration to
1273		 * happen as the result of a wakeup.
1274		 */
1275		ke->ke_flags |= KEF_HOLD;
1276		adjustrunqueue(td, prio);
1277		ke->ke_flags &= ~KEF_HOLD;
1278	} else
1279		td->td_priority = prio;
1280}
1281
1282/*
1283 * Update a thread's priority when it is lent another thread's
1284 * priority.
1285 */
1286void
1287sched_lend_prio(struct thread *td, u_char prio)
1288{
1289
1290	td->td_flags |= TDF_BORROWING;
1291	sched_thread_priority(td, prio);
1292}
1293
1294/*
1295 * Restore a thread's priority when priority propagation is
1296 * over.  The prio argument is the minimum priority the thread
1297 * needs to have to satisfy other possible priority lending
1298 * requests.  If the thread's regular priority is less
1299 * important than prio, the thread will keep a priority boost
1300 * of prio.
1301 */
1302void
1303sched_unlend_prio(struct thread *td, u_char prio)
1304{
1305	u_char base_pri;
1306
1307	if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1308	    td->td_base_pri <= PRI_MAX_TIMESHARE)
1309		base_pri = td->td_ksegrp->kg_user_pri;
1310	else
1311		base_pri = td->td_base_pri;
1312	if (prio >= base_pri) {
1313		td->td_flags &= ~TDF_BORROWING;
1314		sched_thread_priority(td, base_pri);
1315	} else
1316		sched_lend_prio(td, prio);
1317}
1318
1319void
1320sched_prio(struct thread *td, u_char prio)
1321{
1322	u_char oldprio;
1323
1324	/* First, update the base priority. */
1325	td->td_base_pri = prio;
1326
1327	/*
1328	 * If the thread is borrowing another thread's priority, don't
1329	 * ever lower the priority.
1330	 */
1331	if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1332		return;
1333
1334	/* Change the real priority. */
1335	oldprio = td->td_priority;
1336	sched_thread_priority(td, prio);
1337
1338	/*
1339	 * If the thread is on a turnstile, then let the turnstile update
1340	 * its state.
1341	 */
1342	if (TD_ON_LOCK(td) && oldprio != prio)
1343		turnstile_adjust(td, oldprio);
1344}
1345
1346void
1347sched_switch(struct thread *td, struct thread *newtd, int flags)
1348{
1349	struct kseq *ksq;
1350	struct kse *ke;
1351
1352	mtx_assert(&sched_lock, MA_OWNED);
1353
1354	ke = td->td_kse;
1355	ksq = KSEQ_SELF();
1356
1357	td->td_lastcpu = td->td_oncpu;
1358	td->td_oncpu = NOCPU;
1359	td->td_flags &= ~TDF_NEEDRESCHED;
1360	td->td_owepreempt = 0;
1361
1362	/*
1363	 * If the KSE has been assigned it may be in the process of switching
1364	 * to the new cpu.  This is the case in sched_bind().
1365	 */
1366	if (td == PCPU_GET(idlethread)) {
1367		TD_SET_CAN_RUN(td);
1368	} else if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1369		/* We are ending our run so make our slot available again */
1370		SLOT_RELEASE(td->td_ksegrp);
1371		kseq_load_rem(ksq, ke);
1372		if (TD_IS_RUNNING(td)) {
1373			/*
1374			 * Don't allow the thread to migrate
1375			 * from a preemption.
1376			 */
1377			ke->ke_flags |= KEF_HOLD;
1378			setrunqueue(td, (flags & SW_PREEMPT) ?
1379			    SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1380			    SRQ_OURSELF|SRQ_YIELDING);
1381			ke->ke_flags &= ~KEF_HOLD;
1382		} else if ((td->td_proc->p_flag & P_HADTHREADS) &&
1383		    (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp))
1384			/*
1385			 * We will not be on the run queue.
1386			 * So we must be sleeping or similar.
1387			 * Don't use the slot if we will need it
1388			 * for newtd.
1389			 */
1390			slot_fill(td->td_ksegrp);
1391	}
1392	if (newtd != NULL) {
1393		/*
1394		 * If we bring in a thread account for it as if it had been
1395		 * added to the run queue and then chosen.
1396		 */
1397		newtd->td_kse->ke_flags |= KEF_DIDRUN;
1398		newtd->td_kse->ke_runq = ksq->ksq_curr;
1399		TD_SET_RUNNING(newtd);
1400		kseq_load_add(KSEQ_SELF(), newtd->td_kse);
1401		/*
1402		 * XXX When we preempt, we've already consumed a slot because
1403		 * we got here through sched_add().  However, newtd can come
1404		 * from thread_switchout() which can't SLOT_USE() because
1405		 * the SLOT code is scheduler dependent.  We must use the
1406		 * slot here otherwise.
1407		 */
1408		if ((flags & SW_PREEMPT) == 0)
1409			SLOT_USE(newtd->td_ksegrp);
1410	} else
1411		newtd = choosethread();
1412	if (td != newtd) {
1413#ifdef	HWPMC_HOOKS
1414		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1415			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1416#endif
1417		cpu_switch(td, newtd);
1418#ifdef	HWPMC_HOOKS
1419		if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1420			PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1421#endif
1422	}
1423
1424	sched_lock.mtx_lock = (uintptr_t)td;
1425
1426	td->td_oncpu = PCPU_GET(cpuid);
1427}
1428
1429void
1430sched_nice(struct proc *p, int nice)
1431{
1432	struct ksegrp *kg;
1433	struct kse *ke;
1434	struct thread *td;
1435	struct kseq *kseq;
1436
1437	PROC_LOCK_ASSERT(p, MA_OWNED);
1438	mtx_assert(&sched_lock, MA_OWNED);
1439	/*
1440	 * We need to adjust the nice counts for running KSEs.
1441	 */
1442	FOREACH_KSEGRP_IN_PROC(p, kg) {
1443		if (kg->kg_pri_class == PRI_TIMESHARE) {
1444			FOREACH_THREAD_IN_GROUP(kg, td) {
1445				ke = td->td_kse;
1446				if (ke->ke_runq == NULL)
1447					continue;
1448				kseq = KSEQ_CPU(ke->ke_cpu);
1449				kseq_nice_rem(kseq, p->p_nice);
1450				kseq_nice_add(kseq, nice);
1451			}
1452		}
1453	}
1454	p->p_nice = nice;
1455	FOREACH_KSEGRP_IN_PROC(p, kg) {
1456		sched_priority(kg);
1457		FOREACH_THREAD_IN_GROUP(kg, td)
1458			td->td_flags |= TDF_NEEDRESCHED;
1459	}
1460}
1461
1462void
1463sched_sleep(struct thread *td)
1464{
1465	mtx_assert(&sched_lock, MA_OWNED);
1466
1467	td->td_slptime = ticks;
1468}
1469
1470void
1471sched_wakeup(struct thread *td)
1472{
1473	mtx_assert(&sched_lock, MA_OWNED);
1474
1475	/*
1476	 * Let the kseg know how long we slept for.  This is because process
1477	 * interactivity behavior is modeled in the kseg.
1478	 */
1479	if (td->td_slptime) {
1480		struct ksegrp *kg;
1481		int hzticks;
1482
1483		kg = td->td_ksegrp;
1484		hzticks = (ticks - td->td_slptime) << 10;
1485		if (hzticks >= SCHED_SLP_RUN_MAX) {
1486			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1487			kg->kg_runtime = 1;
1488		} else {
1489			kg->kg_slptime += hzticks;
1490			sched_interact_update(kg);
1491		}
1492		sched_priority(kg);
1493		sched_slice(td->td_kse);
1494		td->td_slptime = 0;
1495	}
1496	setrunqueue(td, SRQ_BORING);
1497}
1498
1499/*
1500 * Penalize the parent for creating a new child and initialize the child's
1501 * priority.
1502 */
1503void
1504sched_fork(struct thread *td, struct thread *childtd)
1505{
1506
1507	mtx_assert(&sched_lock, MA_OWNED);
1508
1509	sched_fork_ksegrp(td, childtd->td_ksegrp);
1510	sched_fork_thread(td, childtd);
1511}
1512
1513void
1514sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
1515{
1516	struct ksegrp *kg = td->td_ksegrp;
1517	mtx_assert(&sched_lock, MA_OWNED);
1518
1519	child->kg_slptime = kg->kg_slptime;
1520	child->kg_runtime = kg->kg_runtime;
1521	child->kg_user_pri = kg->kg_user_pri;
1522	sched_interact_fork(child);
1523	kg->kg_runtime += tickincr;
1524	sched_interact_update(kg);
1525}
1526
1527void
1528sched_fork_thread(struct thread *td, struct thread *child)
1529{
1530	struct kse *ke;
1531	struct kse *ke2;
1532
1533	sched_newthread(child);
1534	ke = td->td_kse;
1535	ke2 = child->td_kse;
1536	ke2->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1537	ke2->ke_cpu = ke->ke_cpu;
1538	ke2->ke_runq = NULL;
1539
1540	/* Grab our parents cpu estimation information. */
1541	ke2->ke_ticks = ke->ke_ticks;
1542	ke2->ke_ltick = ke->ke_ltick;
1543	ke2->ke_ftick = ke->ke_ftick;
1544}
1545
1546void
1547sched_class(struct ksegrp *kg, int class)
1548{
1549	struct kseq *kseq;
1550	struct kse *ke;
1551	struct thread *td;
1552	int nclass;
1553	int oclass;
1554
1555	mtx_assert(&sched_lock, MA_OWNED);
1556	if (kg->kg_pri_class == class)
1557		return;
1558
1559	nclass = PRI_BASE(class);
1560	oclass = PRI_BASE(kg->kg_pri_class);
1561	FOREACH_THREAD_IN_GROUP(kg, td) {
1562		ke = td->td_kse;
1563		if ((ke->ke_state != KES_ONRUNQ &&
1564		    ke->ke_state != KES_THREAD) || ke->ke_runq == NULL)
1565			continue;
1566		kseq = KSEQ_CPU(ke->ke_cpu);
1567
1568#ifdef SMP
1569		/*
1570		 * On SMP if we're on the RUNQ we must adjust the transferable
1571		 * count because could be changing to or from an interrupt
1572		 * class.
1573		 */
1574		if (ke->ke_state == KES_ONRUNQ) {
1575			if (KSE_CAN_MIGRATE(ke)) {
1576				kseq->ksq_transferable--;
1577				kseq->ksq_group->ksg_transferable--;
1578			}
1579			if (KSE_CAN_MIGRATE(ke)) {
1580				kseq->ksq_transferable++;
1581				kseq->ksq_group->ksg_transferable++;
1582			}
1583		}
1584#endif
1585		if (oclass == PRI_TIMESHARE) {
1586			kseq->ksq_load_timeshare--;
1587			kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1588		}
1589		if (nclass == PRI_TIMESHARE) {
1590			kseq->ksq_load_timeshare++;
1591			kseq_nice_add(kseq, kg->kg_proc->p_nice);
1592		}
1593	}
1594
1595	kg->kg_pri_class = class;
1596}
1597
1598/*
1599 * Return some of the child's priority and interactivity to the parent.
1600 */
1601void
1602sched_exit(struct proc *p, struct thread *childtd)
1603{
1604	mtx_assert(&sched_lock, MA_OWNED);
1605	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd);
1606	sched_exit_thread(NULL, childtd);
1607}
1608
1609void
1610sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
1611{
1612	/* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
1613	kg->kg_runtime += td->td_ksegrp->kg_runtime;
1614	sched_interact_update(kg);
1615}
1616
1617void
1618sched_exit_thread(struct thread *td, struct thread *childtd)
1619{
1620	CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
1621	    childtd, childtd->td_proc->p_comm, childtd->td_priority);
1622	kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse);
1623}
1624
1625void
1626sched_clock(struct thread *td)
1627{
1628	struct kseq *kseq;
1629	struct ksegrp *kg;
1630	struct kse *ke;
1631
1632	mtx_assert(&sched_lock, MA_OWNED);
1633	kseq = KSEQ_SELF();
1634#ifdef SMP
1635	if (ticks >= bal_tick)
1636		sched_balance();
1637	if (ticks >= gbal_tick && balance_groups)
1638		sched_balance_groups();
1639	/*
1640	 * We could have been assigned a non real-time thread without an
1641	 * IPI.
1642	 */
1643	if (kseq->ksq_assigned)
1644		kseq_assign(kseq);	/* Potentially sets NEEDRESCHED */
1645#endif
1646	ke = td->td_kse;
1647	kg = ke->ke_ksegrp;
1648
1649	/* Adjust ticks for pctcpu */
1650	ke->ke_ticks++;
1651	ke->ke_ltick = ticks;
1652
1653	/* Go up to one second beyond our max and then trim back down */
1654	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1655		sched_pctcpu_update(ke);
1656
1657	if (td->td_flags & TDF_IDLETD)
1658		return;
1659	/*
1660	 * We only do slicing code for TIMESHARE ksegrps.
1661	 */
1662	if (kg->kg_pri_class != PRI_TIMESHARE)
1663		return;
1664	/*
1665	 * We used a tick charge it to the ksegrp so that we can compute our
1666	 * interactivity.
1667	 */
1668	kg->kg_runtime += tickincr;
1669	sched_interact_update(kg);
1670
1671	/*
1672	 * We used up one time slice.
1673	 */
1674	if (--ke->ke_slice > 0)
1675		return;
1676	/*
1677	 * We're out of time, recompute priorities and requeue.
1678	 */
1679	kseq_load_rem(kseq, ke);
1680	sched_priority(kg);
1681	sched_slice(ke);
1682	if (SCHED_CURR(kg, ke))
1683		ke->ke_runq = kseq->ksq_curr;
1684	else
1685		ke->ke_runq = kseq->ksq_next;
1686	kseq_load_add(kseq, ke);
1687	td->td_flags |= TDF_NEEDRESCHED;
1688}
1689
1690int
1691sched_runnable(void)
1692{
1693	struct kseq *kseq;
1694	int load;
1695
1696	load = 1;
1697
1698	kseq = KSEQ_SELF();
1699#ifdef SMP
1700	if (kseq->ksq_assigned) {
1701		mtx_lock_spin(&sched_lock);
1702		kseq_assign(kseq);
1703		mtx_unlock_spin(&sched_lock);
1704	}
1705#endif
1706	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1707		if (kseq->ksq_load > 0)
1708			goto out;
1709	} else
1710		if (kseq->ksq_load - 1 > 0)
1711			goto out;
1712	load = 0;
1713out:
1714	return (load);
1715}
1716
1717void
1718sched_userret(struct thread *td)
1719{
1720	struct ksegrp *kg;
1721
1722	KASSERT((td->td_flags & TDF_BORROWING) == 0,
1723	    ("thread with borrowed priority returning to userland"));
1724	kg = td->td_ksegrp;
1725	if (td->td_priority != kg->kg_user_pri) {
1726		mtx_lock_spin(&sched_lock);
1727		td->td_priority = kg->kg_user_pri;
1728		td->td_base_pri = kg->kg_user_pri;
1729		mtx_unlock_spin(&sched_lock);
1730	}
1731}
1732
1733struct kse *
1734sched_choose(void)
1735{
1736	struct kseq *kseq;
1737	struct kse *ke;
1738
1739	mtx_assert(&sched_lock, MA_OWNED);
1740	kseq = KSEQ_SELF();
1741#ifdef SMP
1742restart:
1743	if (kseq->ksq_assigned)
1744		kseq_assign(kseq);
1745#endif
1746	ke = kseq_choose(kseq);
1747	if (ke) {
1748#ifdef SMP
1749		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1750			if (kseq_idled(kseq) == 0)
1751				goto restart;
1752#endif
1753		kseq_runq_rem(kseq, ke);
1754		ke->ke_state = KES_THREAD;
1755		ke->ke_flags &= ~KEF_PREEMPTED;
1756		return (ke);
1757	}
1758#ifdef SMP
1759	if (kseq_idled(kseq) == 0)
1760		goto restart;
1761#endif
1762	return (NULL);
1763}
1764
1765void
1766sched_add(struct thread *td, int flags)
1767{
1768	struct kseq *kseq;
1769	struct ksegrp *kg;
1770	struct kse *ke;
1771	int preemptive;
1772	int canmigrate;
1773	int class;
1774
1775	CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
1776	    td, td->td_proc->p_comm, td->td_priority, curthread,
1777	    curthread->td_proc->p_comm);
1778	mtx_assert(&sched_lock, MA_OWNED);
1779	ke = td->td_kse;
1780	kg = td->td_ksegrp;
1781	canmigrate = 1;
1782	preemptive = !(flags & SRQ_YIELDING);
1783	class = PRI_BASE(kg->kg_pri_class);
1784	kseq = KSEQ_SELF();
1785	if ((ke->ke_flags & KEF_INTERNAL) == 0)
1786		SLOT_USE(td->td_ksegrp);
1787	ke->ke_flags &= ~KEF_INTERNAL;
1788#ifdef SMP
1789	if (ke->ke_flags & KEF_ASSIGNED) {
1790		if (ke->ke_flags & KEF_REMOVED)
1791			ke->ke_flags &= ~KEF_REMOVED;
1792		return;
1793	}
1794	canmigrate = KSE_CAN_MIGRATE(ke);
1795	/*
1796	 * Don't migrate running threads here.  Force the long term balancer
1797	 * to do it.
1798	 */
1799	if (ke->ke_flags & KEF_HOLD) {
1800		ke->ke_flags &= ~KEF_HOLD;
1801		canmigrate = 0;
1802	}
1803#endif
1804	KASSERT(ke->ke_state != KES_ONRUNQ,
1805	    ("sched_add: kse %p (%s) already in run queue", ke,
1806	    ke->ke_proc->p_comm));
1807	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1808	    ("sched_add: process swapped out"));
1809	KASSERT(ke->ke_runq == NULL,
1810	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1811	if (flags & SRQ_PREEMPTED)
1812		ke->ke_flags |= KEF_PREEMPTED;
1813	switch (class) {
1814	case PRI_ITHD:
1815	case PRI_REALTIME:
1816		ke->ke_runq = kseq->ksq_curr;
1817		ke->ke_slice = SCHED_SLICE_MAX;
1818		if (canmigrate)
1819			ke->ke_cpu = PCPU_GET(cpuid);
1820		break;
1821	case PRI_TIMESHARE:
1822		if (SCHED_CURR(kg, ke))
1823			ke->ke_runq = kseq->ksq_curr;
1824		else
1825			ke->ke_runq = kseq->ksq_next;
1826		break;
1827	case PRI_IDLE:
1828		/*
1829		 * This is for priority prop.
1830		 */
1831		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1832			ke->ke_runq = kseq->ksq_curr;
1833		else
1834			ke->ke_runq = &kseq->ksq_idle;
1835		ke->ke_slice = SCHED_SLICE_MIN;
1836		break;
1837	default:
1838		panic("Unknown pri class.");
1839		break;
1840	}
1841#ifdef SMP
1842	/*
1843	 * If this thread is pinned or bound, notify the target cpu.
1844	 */
1845	if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) {
1846		ke->ke_runq = NULL;
1847		kseq_notify(ke, ke->ke_cpu);
1848		return;
1849	}
1850	/*
1851	 * If we had been idle, clear our bit in the group and potentially
1852	 * the global bitmap.  If not, see if we should transfer this thread.
1853	 */
1854	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1855	    (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1856		/*
1857		 * Check to see if our group is unidling, and if so, remove it
1858		 * from the global idle mask.
1859		 */
1860		if (kseq->ksq_group->ksg_idlemask ==
1861		    kseq->ksq_group->ksg_cpumask)
1862			atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1863		/*
1864		 * Now remove ourselves from the group specific idle mask.
1865		 */
1866		kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1867	} else if (canmigrate && kseq->ksq_load > 1 && class != PRI_ITHD)
1868		if (kseq_transfer(kseq, ke, class))
1869			return;
1870	ke->ke_cpu = PCPU_GET(cpuid);
1871#endif
1872	if (td->td_priority < curthread->td_priority &&
1873	    ke->ke_runq == kseq->ksq_curr)
1874		curthread->td_flags |= TDF_NEEDRESCHED;
1875	if (preemptive && maybe_preempt(td))
1876		return;
1877	ke->ke_state = KES_ONRUNQ;
1878
1879	kseq_runq_add(kseq, ke, flags);
1880	kseq_load_add(kseq, ke);
1881}
1882
1883void
1884sched_rem(struct thread *td)
1885{
1886	struct kseq *kseq;
1887	struct kse *ke;
1888
1889	CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
1890	    td, td->td_proc->p_comm, td->td_priority, curthread,
1891	    curthread->td_proc->p_comm);
1892	mtx_assert(&sched_lock, MA_OWNED);
1893	ke = td->td_kse;
1894	SLOT_RELEASE(td->td_ksegrp);
1895	ke->ke_flags &= ~KEF_PREEMPTED;
1896	if (ke->ke_flags & KEF_ASSIGNED) {
1897		ke->ke_flags |= KEF_REMOVED;
1898		return;
1899	}
1900	KASSERT((ke->ke_state == KES_ONRUNQ),
1901	    ("sched_rem: KSE not on run queue"));
1902
1903	ke->ke_state = KES_THREAD;
1904	kseq = KSEQ_CPU(ke->ke_cpu);
1905	kseq_runq_rem(kseq, ke);
1906	kseq_load_rem(kseq, ke);
1907}
1908
1909fixpt_t
1910sched_pctcpu(struct thread *td)
1911{
1912	fixpt_t pctcpu;
1913	struct kse *ke;
1914
1915	pctcpu = 0;
1916	ke = td->td_kse;
1917	if (ke == NULL)
1918		return (0);
1919
1920	mtx_lock_spin(&sched_lock);
1921	if (ke->ke_ticks) {
1922		int rtick;
1923
1924		/*
1925		 * Don't update more frequently than twice a second.  Allowing
1926		 * this causes the cpu usage to decay away too quickly due to
1927		 * rounding errors.
1928		 */
1929		if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1930		    ke->ke_ltick < (ticks - (hz / 2)))
1931			sched_pctcpu_update(ke);
1932		/* How many rtick per second ? */
1933		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1934		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1935	}
1936
1937	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1938	mtx_unlock_spin(&sched_lock);
1939
1940	return (pctcpu);
1941}
1942
1943void
1944sched_bind(struct thread *td, int cpu)
1945{
1946	struct kse *ke;
1947
1948	mtx_assert(&sched_lock, MA_OWNED);
1949	ke = td->td_kse;
1950	ke->ke_flags |= KEF_BOUND;
1951#ifdef SMP
1952	if (PCPU_GET(cpuid) == cpu)
1953		return;
1954	/* sched_rem without the runq_remove */
1955	ke->ke_state = KES_THREAD;
1956	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1957	kseq_notify(ke, cpu);
1958	/* When we return from mi_switch we'll be on the correct cpu. */
1959	mi_switch(SW_VOL, NULL);
1960#endif
1961}
1962
1963void
1964sched_unbind(struct thread *td)
1965{
1966	mtx_assert(&sched_lock, MA_OWNED);
1967	td->td_kse->ke_flags &= ~KEF_BOUND;
1968}
1969
1970int
1971sched_is_bound(struct thread *td)
1972{
1973	mtx_assert(&sched_lock, MA_OWNED);
1974	return (td->td_kse->ke_flags & KEF_BOUND);
1975}
1976
1977int
1978sched_load(void)
1979{
1980#ifdef SMP
1981	int total;
1982	int i;
1983
1984	total = 0;
1985	for (i = 0; i <= ksg_maxid; i++)
1986		total += KSEQ_GROUP(i)->ksg_load;
1987	return (total);
1988#else
1989	return (KSEQ_SELF()->ksq_sysload);
1990#endif
1991}
1992
1993int
1994sched_sizeof_ksegrp(void)
1995{
1996	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1997}
1998
1999int
2000sched_sizeof_proc(void)
2001{
2002	return (sizeof(struct proc));
2003}
2004
2005int
2006sched_sizeof_thread(void)
2007{
2008	return (sizeof(struct thread) + sizeof(struct td_sched));
2009}
2010
2011void
2012sched_tick(void)
2013{
2014}
2015#define KERN_SWITCH_INCLUDE 1
2016#include "kern/kern_switch.c"
2017