sched_ule.c revision 134586
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 134586 2004-09-01 02:11:28Z julian $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kdb.h>
33#include <sys/kernel.h>
34#include <sys/ktr.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/resource.h>
39#include <sys/resourcevar.h>
40#include <sys/sched.h>
41#include <sys/smp.h>
42#include <sys/sx.h>
43#include <sys/sysctl.h>
44#include <sys/sysproto.h>
45#include <sys/vmmeter.h>
46#ifdef KTRACE
47#include <sys/uio.h>
48#include <sys/ktrace.h>
49#endif
50
51#include <machine/cpu.h>
52#include <machine/smp.h>
53
54#define KTR_ULE	KTR_NFS
55
56/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
57/* XXX This is bogus compatability crap for ps */
58static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
59SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
60
61static void sched_setup(void *dummy);
62SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
63
64static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
65
66SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ule", 0,
67    "Scheduler name");
68
69static int slice_min = 1;
70SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
71
72static int slice_max = 10;
73SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
74
75int realstathz;
76int tickincr = 1;
77
78/*
79 * These datastructures are allocated within their parent datastructure but
80 * are scheduler specific.
81 */
82
83struct ke_sched {
84	int		ske_slice;
85	struct runq	*ske_runq;
86	/* The following variables are only used for pctcpu calculation */
87	int		ske_ltick;	/* Last tick that we were running on */
88	int		ske_ftick;	/* First tick that we were running on */
89	int		ske_ticks;	/* Tick count */
90	/* CPU that we have affinity for. */
91	u_char		ske_cpu;
92};
93#define	ke_slice	ke_sched->ske_slice
94#define	ke_runq		ke_sched->ske_runq
95#define	ke_ltick	ke_sched->ske_ltick
96#define	ke_ftick	ke_sched->ske_ftick
97#define	ke_ticks	ke_sched->ske_ticks
98#define	ke_cpu		ke_sched->ske_cpu
99#define	ke_assign	ke_procq.tqe_next
100
101#define	KEF_ASSIGNED	KEF_SCHED0	/* KSE is being migrated. */
102#define	KEF_BOUND	KEF_SCHED1	/* KSE can not migrate. */
103#define	KEF_XFERABLE	KEF_SCHED2	/* KSE was added as transferable. */
104#define	KEF_HOLD	KEF_SCHED3	/* KSE is temporarily bound. */
105
106struct kg_sched {
107	int	skg_slptime;		/* Number of ticks we vol. slept */
108	int	skg_runtime;		/* Number of ticks we were running */
109};
110#define	kg_slptime	kg_sched->skg_slptime
111#define	kg_runtime	kg_sched->skg_runtime
112
113struct td_sched {
114	int	std_slptime;
115};
116#define	td_slptime	td_sched->std_slptime
117
118struct td_sched td_sched;
119struct ke_sched ke_sched;
120struct kg_sched kg_sched;
121
122struct ke_sched *kse0_sched = &ke_sched;
123struct kg_sched *ksegrp0_sched = &kg_sched;
124struct p_sched *proc0_sched = NULL;
125struct td_sched *thread0_sched = &td_sched;
126
127/*
128 * The priority is primarily determined by the interactivity score.  Thus, we
129 * give lower(better) priorities to kse groups that use less CPU.  The nice
130 * value is then directly added to this to allow nice to have some effect
131 * on latency.
132 *
133 * PRI_RANGE:	Total priority range for timeshare threads.
134 * PRI_NRESV:	Number of nice values.
135 * PRI_BASE:	The start of the dynamic range.
136 */
137#define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
138#define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
139#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
140#define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
141#define	SCHED_PRI_INTERACT(score)					\
142    ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
143
144/*
145 * These determine the interactivity of a process.
146 *
147 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
148 *		before throttling back.
149 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
150 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
151 * INTERACT_THRESH:	Threshhold for placement on the current runq.
152 */
153#define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
154#define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
155#define	SCHED_INTERACT_MAX	(100)
156#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
157#define	SCHED_INTERACT_THRESH	(30)
158
159/*
160 * These parameters and macros determine the size of the time slice that is
161 * granted to each thread.
162 *
163 * SLICE_MIN:	Minimum time slice granted, in units of ticks.
164 * SLICE_MAX:	Maximum time slice granted.
165 * SLICE_RANGE:	Range of available time slices scaled by hz.
166 * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
167 * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
168 * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
169 */
170#define	SCHED_SLICE_MIN			(slice_min)
171#define	SCHED_SLICE_MAX			(slice_max)
172#define	SCHED_SLICE_INTERACTIVE		(slice_max)
173#define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
174#define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
175#define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
176#define	SCHED_SLICE_NICE(nice)						\
177    (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
178
179/*
180 * This macro determines whether or not the kse belongs on the current or
181 * next run queue.
182 */
183#define	SCHED_INTERACTIVE(kg)						\
184    (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
185#define	SCHED_CURR(kg, ke)						\
186    (ke->ke_thread->td_priority < kg->kg_user_pri ||			\
187    SCHED_INTERACTIVE(kg))
188
189/*
190 * Cpu percentage computation macros and defines.
191 *
192 * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
193 * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
194 */
195
196#define	SCHED_CPU_TIME	10
197#define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
198
199/*
200 * kseq - per processor runqs and statistics.
201 */
202struct kseq {
203	struct runq	ksq_idle;		/* Queue of IDLE threads. */
204	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
205	struct runq	*ksq_next;		/* Next timeshare queue. */
206	struct runq	*ksq_curr;		/* Current queue. */
207	int		ksq_load_timeshare;	/* Load for timeshare. */
208	int		ksq_load;		/* Aggregate load. */
209	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
210	short		ksq_nicemin;		/* Least nice. */
211#ifdef SMP
212	int			ksq_transferable;
213	LIST_ENTRY(kseq)	ksq_siblings;	/* Next in kseq group. */
214	struct kseq_group	*ksq_group;	/* Our processor group. */
215	volatile struct kse	*ksq_assigned;	/* assigned by another CPU. */
216#else
217	int		ksq_sysload;		/* For loadavg, !ITHD load. */
218#endif
219};
220
221#ifdef SMP
222/*
223 * kseq groups are groups of processors which can cheaply share threads.  When
224 * one processor in the group goes idle it will check the runqs of the other
225 * processors in its group prior to halting and waiting for an interrupt.
226 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
227 * In a numa environment we'd want an idle bitmap per group and a two tiered
228 * load balancer.
229 */
230struct kseq_group {
231	int	ksg_cpus;		/* Count of CPUs in this kseq group. */
232	cpumask_t ksg_cpumask;		/* Mask of cpus in this group. */
233	cpumask_t ksg_idlemask;		/* Idle cpus in this group. */
234	cpumask_t ksg_mask;		/* Bit mask for first cpu. */
235	int	ksg_load;		/* Total load of this group. */
236	int	ksg_transferable;	/* Transferable load of this group. */
237	LIST_HEAD(, kseq)	ksg_members; /* Linked list of all members. */
238};
239#endif
240
241/*
242 * One kse queue per processor.
243 */
244#ifdef SMP
245static cpumask_t kseq_idle;
246static int ksg_maxid;
247static struct kseq	kseq_cpu[MAXCPU];
248static struct kseq_group kseq_groups[MAXCPU];
249static int bal_tick;
250static int gbal_tick;
251
252#define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
253#define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
254#define	KSEQ_ID(x)	((x) - kseq_cpu)
255#define	KSEQ_GROUP(x)	(&kseq_groups[(x)])
256#else	/* !SMP */
257static struct kseq	kseq_cpu;
258
259#define	KSEQ_SELF()	(&kseq_cpu)
260#define	KSEQ_CPU(x)	(&kseq_cpu)
261#endif
262
263static void sched_add_internal(struct thread *td, int preemptive);
264static void sched_slice(struct kse *ke);
265static void sched_priority(struct ksegrp *kg);
266static int sched_interact_score(struct ksegrp *kg);
267static void sched_interact_update(struct ksegrp *kg);
268static void sched_interact_fork(struct ksegrp *kg);
269static void sched_pctcpu_update(struct kse *ke);
270
271/* Operations on per processor queues */
272static struct kse * kseq_choose(struct kseq *kseq);
273static void kseq_setup(struct kseq *kseq);
274static void kseq_load_add(struct kseq *kseq, struct kse *ke);
275static void kseq_load_rem(struct kseq *kseq, struct kse *ke);
276static __inline void kseq_runq_add(struct kseq *kseq, struct kse *ke);
277static __inline void kseq_runq_rem(struct kseq *kseq, struct kse *ke);
278static void kseq_nice_add(struct kseq *kseq, int nice);
279static void kseq_nice_rem(struct kseq *kseq, int nice);
280void kseq_print(int cpu);
281#ifdef SMP
282static int kseq_transfer(struct kseq *ksq, struct kse *ke, int class);
283static struct kse *runq_steal(struct runq *rq);
284static void sched_balance(void);
285static void sched_balance_groups(void);
286static void sched_balance_group(struct kseq_group *ksg);
287static void sched_balance_pair(struct kseq *high, struct kseq *low);
288static void kseq_move(struct kseq *from, int cpu);
289static int kseq_idled(struct kseq *kseq);
290static void kseq_notify(struct kse *ke, int cpu);
291static void kseq_assign(struct kseq *);
292static struct kse *kseq_steal(struct kseq *kseq, int stealidle);
293/*
294 * On P4 Xeons the round-robin interrupt delivery is broken.  As a result of
295 * this, we can't pin interrupts to the cpu that they were delivered to,
296 * otherwise all ithreads only run on CPU 0.
297 */
298#ifdef __i386__
299#define	KSE_CAN_MIGRATE(ke, class)					\
300    ((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
301#else /* !__i386__ */
302#define	KSE_CAN_MIGRATE(ke, class)					\
303    ((class) != PRI_ITHD && (ke)->ke_thread->td_pinned == 0 &&		\
304    ((ke)->ke_flags & KEF_BOUND) == 0)
305#endif /* !__i386__ */
306#endif
307
308void
309kseq_print(int cpu)
310{
311	struct kseq *kseq;
312	int i;
313
314	kseq = KSEQ_CPU(cpu);
315
316	printf("kseq:\n");
317	printf("\tload:           %d\n", kseq->ksq_load);
318	printf("\tload TIMESHARE: %d\n", kseq->ksq_load_timeshare);
319#ifdef SMP
320	printf("\tload transferable: %d\n", kseq->ksq_transferable);
321#endif
322	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
323	printf("\tnice counts:\n");
324	for (i = 0; i < SCHED_PRI_NRESV; i++)
325		if (kseq->ksq_nice[i])
326			printf("\t\t%d = %d\n",
327			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
328}
329
330static __inline void
331kseq_runq_add(struct kseq *kseq, struct kse *ke)
332{
333#ifdef SMP
334	if (KSE_CAN_MIGRATE(ke, PRI_BASE(ke->ke_ksegrp->kg_pri_class))) {
335		kseq->ksq_transferable++;
336		kseq->ksq_group->ksg_transferable++;
337		ke->ke_flags |= KEF_XFERABLE;
338	}
339#endif
340	runq_add(ke->ke_runq, ke);
341}
342
343static __inline void
344kseq_runq_rem(struct kseq *kseq, struct kse *ke)
345{
346#ifdef SMP
347	if (ke->ke_flags & KEF_XFERABLE) {
348		kseq->ksq_transferable--;
349		kseq->ksq_group->ksg_transferable--;
350		ke->ke_flags &= ~KEF_XFERABLE;
351	}
352#endif
353	runq_remove(ke->ke_runq, ke);
354}
355
356static void
357kseq_load_add(struct kseq *kseq, struct kse *ke)
358{
359	int class;
360	mtx_assert(&sched_lock, MA_OWNED);
361	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
362	if (class == PRI_TIMESHARE)
363		kseq->ksq_load_timeshare++;
364	kseq->ksq_load++;
365	if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
366#ifdef SMP
367		kseq->ksq_group->ksg_load++;
368#else
369		kseq->ksq_sysload++;
370#endif
371	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
372		CTR6(KTR_ULE,
373		    "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
374		    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
375		    ke->ke_proc->p_nice, kseq->ksq_nicemin);
376	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
377		kseq_nice_add(kseq, ke->ke_proc->p_nice);
378}
379
380static void
381kseq_load_rem(struct kseq *kseq, struct kse *ke)
382{
383	int class;
384	mtx_assert(&sched_lock, MA_OWNED);
385	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
386	if (class == PRI_TIMESHARE)
387		kseq->ksq_load_timeshare--;
388	if (class != PRI_ITHD  && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
389#ifdef SMP
390		kseq->ksq_group->ksg_load--;
391#else
392		kseq->ksq_sysload--;
393#endif
394	kseq->ksq_load--;
395	ke->ke_runq = NULL;
396	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
397		kseq_nice_rem(kseq, ke->ke_proc->p_nice);
398}
399
400static void
401kseq_nice_add(struct kseq *kseq, int nice)
402{
403	mtx_assert(&sched_lock, MA_OWNED);
404	/* Normalize to zero. */
405	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
406	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
407		kseq->ksq_nicemin = nice;
408}
409
410static void
411kseq_nice_rem(struct kseq *kseq, int nice)
412{
413	int n;
414
415	mtx_assert(&sched_lock, MA_OWNED);
416	/* Normalize to zero. */
417	n = nice + SCHED_PRI_NHALF;
418	kseq->ksq_nice[n]--;
419	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
420
421	/*
422	 * If this wasn't the smallest nice value or there are more in
423	 * this bucket we can just return.  Otherwise we have to recalculate
424	 * the smallest nice.
425	 */
426	if (nice != kseq->ksq_nicemin ||
427	    kseq->ksq_nice[n] != 0 ||
428	    kseq->ksq_load_timeshare == 0)
429		return;
430
431	for (; n < SCHED_PRI_NRESV; n++)
432		if (kseq->ksq_nice[n]) {
433			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
434			return;
435		}
436}
437
438#ifdef SMP
439/*
440 * sched_balance is a simple CPU load balancing algorithm.  It operates by
441 * finding the least loaded and most loaded cpu and equalizing their load
442 * by migrating some processes.
443 *
444 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
445 * installations will only have 2 cpus.  Secondly, load balancing too much at
446 * once can have an unpleasant effect on the system.  The scheduler rarely has
447 * enough information to make perfect decisions.  So this algorithm chooses
448 * algorithm simplicity and more gradual effects on load in larger systems.
449 *
450 * It could be improved by considering the priorities and slices assigned to
451 * each task prior to balancing them.  There are many pathological cases with
452 * any approach and so the semi random algorithm below may work as well as any.
453 *
454 */
455static void
456sched_balance(void)
457{
458	struct kseq_group *high;
459	struct kseq_group *low;
460	struct kseq_group *ksg;
461	int cnt;
462	int i;
463
464	if (smp_started == 0)
465		goto out;
466	low = high = NULL;
467	i = random() % (ksg_maxid + 1);
468	for (cnt = 0; cnt <= ksg_maxid; cnt++) {
469		ksg = KSEQ_GROUP(i);
470		/*
471		 * Find the CPU with the highest load that has some
472		 * threads to transfer.
473		 */
474		if ((high == NULL || ksg->ksg_load > high->ksg_load)
475		    && ksg->ksg_transferable)
476			high = ksg;
477		if (low == NULL || ksg->ksg_load < low->ksg_load)
478			low = ksg;
479		if (++i > ksg_maxid)
480			i = 0;
481	}
482	if (low != NULL && high != NULL && high != low)
483		sched_balance_pair(LIST_FIRST(&high->ksg_members),
484		    LIST_FIRST(&low->ksg_members));
485out:
486	bal_tick = ticks + (random() % (hz * 2));
487}
488
489static void
490sched_balance_groups(void)
491{
492	int i;
493
494	mtx_assert(&sched_lock, MA_OWNED);
495	if (smp_started)
496		for (i = 0; i <= ksg_maxid; i++)
497			sched_balance_group(KSEQ_GROUP(i));
498	gbal_tick = ticks + (random() % (hz * 2));
499}
500
501static void
502sched_balance_group(struct kseq_group *ksg)
503{
504	struct kseq *kseq;
505	struct kseq *high;
506	struct kseq *low;
507	int load;
508
509	if (ksg->ksg_transferable == 0)
510		return;
511	low = NULL;
512	high = NULL;
513	LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
514		load = kseq->ksq_load;
515		if (high == NULL || load > high->ksq_load)
516			high = kseq;
517		if (low == NULL || load < low->ksq_load)
518			low = kseq;
519	}
520	if (high != NULL && low != NULL && high != low)
521		sched_balance_pair(high, low);
522}
523
524static void
525sched_balance_pair(struct kseq *high, struct kseq *low)
526{
527	int transferable;
528	int high_load;
529	int low_load;
530	int move;
531	int diff;
532	int i;
533
534	/*
535	 * If we're transfering within a group we have to use this specific
536	 * kseq's transferable count, otherwise we can steal from other members
537	 * of the group.
538	 */
539	if (high->ksq_group == low->ksq_group) {
540		transferable = high->ksq_transferable;
541		high_load = high->ksq_load;
542		low_load = low->ksq_load;
543	} else {
544		transferable = high->ksq_group->ksg_transferable;
545		high_load = high->ksq_group->ksg_load;
546		low_load = low->ksq_group->ksg_load;
547	}
548	if (transferable == 0)
549		return;
550	/*
551	 * Determine what the imbalance is and then adjust that to how many
552	 * kses we actually have to give up (transferable).
553	 */
554	diff = high_load - low_load;
555	move = diff / 2;
556	if (diff & 0x1)
557		move++;
558	move = min(move, transferable);
559	for (i = 0; i < move; i++)
560		kseq_move(high, KSEQ_ID(low));
561	return;
562}
563
564static void
565kseq_move(struct kseq *from, int cpu)
566{
567	struct kseq *kseq;
568	struct kseq *to;
569	struct kse *ke;
570
571	kseq = from;
572	to = KSEQ_CPU(cpu);
573	ke = kseq_steal(kseq, 1);
574	if (ke == NULL) {
575		struct kseq_group *ksg;
576
577		ksg = kseq->ksq_group;
578		LIST_FOREACH(kseq, &ksg->ksg_members, ksq_siblings) {
579			if (kseq == from || kseq->ksq_transferable == 0)
580				continue;
581			ke = kseq_steal(kseq, 1);
582			break;
583		}
584		if (ke == NULL)
585			panic("kseq_move: No KSEs available with a "
586			    "transferable count of %d\n",
587			    ksg->ksg_transferable);
588	}
589	if (kseq == to)
590		return;
591	ke->ke_state = KES_THREAD;
592	kseq_runq_rem(kseq, ke);
593	kseq_load_rem(kseq, ke);
594	kseq_notify(ke, cpu);
595}
596
597static int
598kseq_idled(struct kseq *kseq)
599{
600	struct kseq_group *ksg;
601	struct kseq *steal;
602	struct kse *ke;
603
604	ksg = kseq->ksq_group;
605	/*
606	 * If we're in a cpu group, try and steal kses from another cpu in
607	 * the group before idling.
608	 */
609	if (ksg->ksg_cpus > 1 && ksg->ksg_transferable) {
610		LIST_FOREACH(steal, &ksg->ksg_members, ksq_siblings) {
611			if (steal == kseq || steal->ksq_transferable == 0)
612				continue;
613			ke = kseq_steal(steal, 0);
614			if (ke == NULL)
615				continue;
616			ke->ke_state = KES_THREAD;
617			kseq_runq_rem(steal, ke);
618			kseq_load_rem(steal, ke);
619			ke->ke_cpu = PCPU_GET(cpuid);
620			sched_add_internal(ke->ke_thread, 0);
621			return (0);
622		}
623	}
624	/*
625	 * We only set the idled bit when all of the cpus in the group are
626	 * idle.  Otherwise we could get into a situation where a KSE bounces
627	 * back and forth between two idle cores on seperate physical CPUs.
628	 */
629	ksg->ksg_idlemask |= PCPU_GET(cpumask);
630	if (ksg->ksg_idlemask != ksg->ksg_cpumask)
631		return (1);
632	atomic_set_int(&kseq_idle, ksg->ksg_mask);
633	return (1);
634}
635
636static void
637kseq_assign(struct kseq *kseq)
638{
639	struct kse *nke;
640	struct kse *ke;
641
642	do {
643		*(volatile struct kse **)&ke = kseq->ksq_assigned;
644	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
645	for (; ke != NULL; ke = nke) {
646		nke = ke->ke_assign;
647		ke->ke_flags &= ~KEF_ASSIGNED;
648		sched_add_internal(ke->ke_thread, 0);
649	}
650}
651
652static void
653kseq_notify(struct kse *ke, int cpu)
654{
655	struct kseq *kseq;
656	struct thread *td;
657	struct pcpu *pcpu;
658	int prio;
659
660	ke->ke_cpu = cpu;
661	ke->ke_flags |= KEF_ASSIGNED;
662	prio = ke->ke_thread->td_priority;
663
664	kseq = KSEQ_CPU(cpu);
665
666	/*
667	 * Place a KSE on another cpu's queue and force a resched.
668	 */
669	do {
670		*(volatile struct kse **)&ke->ke_assign = kseq->ksq_assigned;
671	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
672	/*
673	 * Without sched_lock we could lose a race where we set NEEDRESCHED
674	 * on a thread that is switched out before the IPI is delivered.  This
675	 * would lead us to miss the resched.  This will be a problem once
676	 * sched_lock is pushed down.
677	 */
678	pcpu = pcpu_find(cpu);
679	td = pcpu->pc_curthread;
680	if (ke->ke_thread->td_priority < td->td_priority ||
681	    td == pcpu->pc_idlethread) {
682		td->td_flags |= TDF_NEEDRESCHED;
683		ipi_selected(1 << cpu, IPI_AST);
684	}
685}
686
687static struct kse *
688runq_steal(struct runq *rq)
689{
690	struct rqhead *rqh;
691	struct rqbits *rqb;
692	struct kse *ke;
693	int word;
694	int bit;
695
696	mtx_assert(&sched_lock, MA_OWNED);
697	rqb = &rq->rq_status;
698	for (word = 0; word < RQB_LEN; word++) {
699		if (rqb->rqb_bits[word] == 0)
700			continue;
701		for (bit = 0; bit < RQB_BPW; bit++) {
702			if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
703				continue;
704			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
705			TAILQ_FOREACH(ke, rqh, ke_procq) {
706				if (KSE_CAN_MIGRATE(ke,
707				    PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
708					return (ke);
709			}
710		}
711	}
712	return (NULL);
713}
714
715static struct kse *
716kseq_steal(struct kseq *kseq, int stealidle)
717{
718	struct kse *ke;
719
720	/*
721	 * Steal from next first to try to get a non-interactive task that
722	 * may not have run for a while.
723	 */
724	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
725		return (ke);
726	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
727		return (ke);
728	if (stealidle)
729		return (runq_steal(&kseq->ksq_idle));
730	return (NULL);
731}
732
733int
734kseq_transfer(struct kseq *kseq, struct kse *ke, int class)
735{
736	struct kseq_group *ksg;
737	int cpu;
738
739	if (smp_started == 0)
740		return (0);
741	cpu = 0;
742	/*
743	 * If our load exceeds a certain threshold we should attempt to
744	 * reassign this thread.  The first candidate is the cpu that
745	 * originally ran the thread.  If it is idle, assign it there,
746	 * otherwise, pick an idle cpu.
747	 *
748	 * The threshold at which we start to reassign kses has a large impact
749	 * on the overall performance of the system.  Tuned too high and
750	 * some CPUs may idle.  Too low and there will be excess migration
751	 * and context switches.
752	 */
753	ksg = kseq->ksq_group;
754	if (ksg->ksg_load > ksg->ksg_cpus && kseq_idle) {
755		ksg = KSEQ_CPU(ke->ke_cpu)->ksq_group;
756		if (kseq_idle & ksg->ksg_mask) {
757			cpu = ffs(ksg->ksg_idlemask);
758			if (cpu)
759				goto migrate;
760		}
761		/*
762		 * Multiple cpus could find this bit simultaneously
763		 * but the race shouldn't be terrible.
764		 */
765		cpu = ffs(kseq_idle);
766		if (cpu)
767			goto migrate;
768	}
769	/*
770	 * If another cpu in this group has idled, assign a thread over
771	 * to them after checking to see if there are idled groups.
772	 */
773	ksg = kseq->ksq_group;
774	if (ksg->ksg_idlemask) {
775		cpu = ffs(ksg->ksg_idlemask);
776		if (cpu)
777			goto migrate;
778	}
779	/*
780	 * No new CPU was found.
781	 */
782	return (0);
783migrate:
784	/*
785	 * Now that we've found an idle CPU, migrate the thread.
786	 */
787	cpu--;
788	ke->ke_runq = NULL;
789	kseq_notify(ke, cpu);
790
791	return (1);
792}
793
794#endif	/* SMP */
795
796/*
797 * Pick the highest priority task we have and return it.
798 */
799
800static struct kse *
801kseq_choose(struct kseq *kseq)
802{
803	struct kse *ke;
804	struct runq *swap;
805
806	mtx_assert(&sched_lock, MA_OWNED);
807	swap = NULL;
808
809	for (;;) {
810		ke = runq_choose(kseq->ksq_curr);
811		if (ke == NULL) {
812			/*
813			 * We already swapped once and didn't get anywhere.
814			 */
815			if (swap)
816				break;
817			swap = kseq->ksq_curr;
818			kseq->ksq_curr = kseq->ksq_next;
819			kseq->ksq_next = swap;
820			continue;
821		}
822		/*
823		 * If we encounter a slice of 0 the kse is in a
824		 * TIMESHARE kse group and its nice was too far out
825		 * of the range that receives slices.
826		 */
827		if (ke->ke_slice == 0) {
828			runq_remove(ke->ke_runq, ke);
829			sched_slice(ke);
830			ke->ke_runq = kseq->ksq_next;
831			runq_add(ke->ke_runq, ke);
832			continue;
833		}
834		return (ke);
835	}
836
837	return (runq_choose(&kseq->ksq_idle));
838}
839
840static void
841kseq_setup(struct kseq *kseq)
842{
843	runq_init(&kseq->ksq_timeshare[0]);
844	runq_init(&kseq->ksq_timeshare[1]);
845	runq_init(&kseq->ksq_idle);
846	kseq->ksq_curr = &kseq->ksq_timeshare[0];
847	kseq->ksq_next = &kseq->ksq_timeshare[1];
848	kseq->ksq_load = 0;
849	kseq->ksq_load_timeshare = 0;
850}
851
852static void
853sched_setup(void *dummy)
854{
855#ifdef SMP
856	int balance_groups;
857	int i;
858#endif
859
860	slice_min = (hz/100);	/* 10ms */
861	slice_max = (hz/7);	/* ~140ms */
862
863#ifdef SMP
864	balance_groups = 0;
865	/*
866	 * Initialize the kseqs.
867	 */
868	for (i = 0; i < MAXCPU; i++) {
869		struct kseq *ksq;
870
871		ksq = &kseq_cpu[i];
872		ksq->ksq_assigned = NULL;
873		kseq_setup(&kseq_cpu[i]);
874	}
875	if (smp_topology == NULL) {
876		struct kseq_group *ksg;
877		struct kseq *ksq;
878
879		for (i = 0; i < MAXCPU; i++) {
880			ksq = &kseq_cpu[i];
881			ksg = &kseq_groups[i];
882			/*
883			 * Setup a kseq group with one member.
884			 */
885			ksq->ksq_transferable = 0;
886			ksq->ksq_group = ksg;
887			ksg->ksg_cpus = 1;
888			ksg->ksg_idlemask = 0;
889			ksg->ksg_cpumask = ksg->ksg_mask = 1 << i;
890			ksg->ksg_load = 0;
891			ksg->ksg_transferable = 0;
892			LIST_INIT(&ksg->ksg_members);
893			LIST_INSERT_HEAD(&ksg->ksg_members, ksq, ksq_siblings);
894		}
895	} else {
896		struct kseq_group *ksg;
897		struct cpu_group *cg;
898		int j;
899
900		for (i = 0; i < smp_topology->ct_count; i++) {
901			cg = &smp_topology->ct_group[i];
902			ksg = &kseq_groups[i];
903			/*
904			 * Initialize the group.
905			 */
906			ksg->ksg_idlemask = 0;
907			ksg->ksg_load = 0;
908			ksg->ksg_transferable = 0;
909			ksg->ksg_cpus = cg->cg_count;
910			ksg->ksg_cpumask = cg->cg_mask;
911			LIST_INIT(&ksg->ksg_members);
912			/*
913			 * Find all of the group members and add them.
914			 */
915			for (j = 0; j < MAXCPU; j++) {
916				if ((cg->cg_mask & (1 << j)) != 0) {
917					if (ksg->ksg_mask == 0)
918						ksg->ksg_mask = 1 << j;
919					kseq_cpu[j].ksq_transferable = 0;
920					kseq_cpu[j].ksq_group = ksg;
921					LIST_INSERT_HEAD(&ksg->ksg_members,
922					    &kseq_cpu[j], ksq_siblings);
923				}
924			}
925			if (ksg->ksg_cpus > 1)
926				balance_groups = 1;
927		}
928		ksg_maxid = smp_topology->ct_count - 1;
929	}
930	/*
931	 * Stagger the group and global load balancer so they do not
932	 * interfere with each other.
933	 */
934	bal_tick = ticks + hz;
935	if (balance_groups)
936		gbal_tick = ticks + (hz / 2);
937#else
938	kseq_setup(KSEQ_SELF());
939#endif
940	mtx_lock_spin(&sched_lock);
941	kseq_load_add(KSEQ_SELF(), &kse0);
942	mtx_unlock_spin(&sched_lock);
943}
944
945/*
946 * Scale the scheduling priority according to the "interactivity" of this
947 * process.
948 */
949static void
950sched_priority(struct ksegrp *kg)
951{
952	int pri;
953
954	if (kg->kg_pri_class != PRI_TIMESHARE)
955		return;
956
957	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
958	pri += SCHED_PRI_BASE;
959	pri += kg->kg_proc->p_nice;
960
961	if (pri > PRI_MAX_TIMESHARE)
962		pri = PRI_MAX_TIMESHARE;
963	else if (pri < PRI_MIN_TIMESHARE)
964		pri = PRI_MIN_TIMESHARE;
965
966	kg->kg_user_pri = pri;
967
968	return;
969}
970
971/*
972 * Calculate a time slice based on the properties of the kseg and the runq
973 * that we're on.  This is only for PRI_TIMESHARE ksegrps.
974 */
975static void
976sched_slice(struct kse *ke)
977{
978	struct kseq *kseq;
979	struct ksegrp *kg;
980
981	kg = ke->ke_ksegrp;
982	kseq = KSEQ_CPU(ke->ke_cpu);
983
984	/*
985	 * Rationale:
986	 * KSEs in interactive ksegs get a minimal slice so that we
987	 * quickly notice if it abuses its advantage.
988	 *
989	 * KSEs in non-interactive ksegs are assigned a slice that is
990	 * based on the ksegs nice value relative to the least nice kseg
991	 * on the run queue for this cpu.
992	 *
993	 * If the KSE is less nice than all others it gets the maximum
994	 * slice and other KSEs will adjust their slice relative to
995	 * this when they first expire.
996	 *
997	 * There is 20 point window that starts relative to the least
998	 * nice kse on the run queue.  Slice size is determined by
999	 * the kse distance from the last nice ksegrp.
1000	 *
1001	 * If the kse is outside of the window it will get no slice
1002	 * and will be reevaluated each time it is selected on the
1003	 * run queue.  The exception to this is nice 0 ksegs when
1004	 * a nice -20 is running.  They are always granted a minimum
1005	 * slice.
1006	 */
1007	if (!SCHED_INTERACTIVE(kg)) {
1008		int nice;
1009
1010		nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
1011		if (kseq->ksq_load_timeshare == 0 ||
1012		    kg->kg_proc->p_nice < kseq->ksq_nicemin)
1013			ke->ke_slice = SCHED_SLICE_MAX;
1014		else if (nice <= SCHED_SLICE_NTHRESH)
1015			ke->ke_slice = SCHED_SLICE_NICE(nice);
1016		else if (kg->kg_proc->p_nice == 0)
1017			ke->ke_slice = SCHED_SLICE_MIN;
1018		else
1019			ke->ke_slice = 0;
1020	} else
1021		ke->ke_slice = SCHED_SLICE_INTERACTIVE;
1022
1023	CTR6(KTR_ULE,
1024	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
1025	    ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin,
1026	    kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
1027
1028	return;
1029}
1030
1031/*
1032 * This routine enforces a maximum limit on the amount of scheduling history
1033 * kept.  It is called after either the slptime or runtime is adjusted.
1034 * This routine will not operate correctly when slp or run times have been
1035 * adjusted to more than double their maximum.
1036 */
1037static void
1038sched_interact_update(struct ksegrp *kg)
1039{
1040	int sum;
1041
1042	sum = kg->kg_runtime + kg->kg_slptime;
1043	if (sum < SCHED_SLP_RUN_MAX)
1044		return;
1045	/*
1046	 * If we have exceeded by more than 1/5th then the algorithm below
1047	 * will not bring us back into range.  Dividing by two here forces
1048	 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1049	 */
1050	if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1051		kg->kg_runtime /= 2;
1052		kg->kg_slptime /= 2;
1053		return;
1054	}
1055	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
1056	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
1057}
1058
1059static void
1060sched_interact_fork(struct ksegrp *kg)
1061{
1062	int ratio;
1063	int sum;
1064
1065	sum = kg->kg_runtime + kg->kg_slptime;
1066	if (sum > SCHED_SLP_RUN_FORK) {
1067		ratio = sum / SCHED_SLP_RUN_FORK;
1068		kg->kg_runtime /= ratio;
1069		kg->kg_slptime /= ratio;
1070	}
1071}
1072
1073static int
1074sched_interact_score(struct ksegrp *kg)
1075{
1076	int div;
1077
1078	if (kg->kg_runtime > kg->kg_slptime) {
1079		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
1080		return (SCHED_INTERACT_HALF +
1081		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
1082	} if (kg->kg_slptime > kg->kg_runtime) {
1083		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
1084		return (kg->kg_runtime / div);
1085	}
1086
1087	/*
1088	 * This can happen if slptime and runtime are 0.
1089	 */
1090	return (0);
1091
1092}
1093
1094/*
1095 * This is only somewhat accurate since given many processes of the same
1096 * priority they will switch when their slices run out, which will be
1097 * at most SCHED_SLICE_MAX.
1098 */
1099int
1100sched_rr_interval(void)
1101{
1102	return (SCHED_SLICE_MAX);
1103}
1104
1105static void
1106sched_pctcpu_update(struct kse *ke)
1107{
1108	/*
1109	 * Adjust counters and watermark for pctcpu calc.
1110	 */
1111	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
1112		/*
1113		 * Shift the tick count out so that the divide doesn't
1114		 * round away our results.
1115		 */
1116		ke->ke_ticks <<= 10;
1117		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
1118			    SCHED_CPU_TICKS;
1119		ke->ke_ticks >>= 10;
1120	} else
1121		ke->ke_ticks = 0;
1122	ke->ke_ltick = ticks;
1123	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
1124}
1125
1126void
1127sched_prio(struct thread *td, u_char prio)
1128{
1129	struct kse *ke;
1130
1131	ke = td->td_kse;
1132	mtx_assert(&sched_lock, MA_OWNED);
1133	if (TD_ON_RUNQ(td)) {
1134		/*
1135		 * If the priority has been elevated due to priority
1136		 * propagation, we may have to move ourselves to a new
1137		 * queue.  We still call adjustrunqueue below in case kse
1138		 * needs to fix things up.
1139		 */
1140		if (prio < td->td_priority && ke &&
1141		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
1142		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
1143			runq_remove(ke->ke_runq, ke);
1144			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
1145			runq_add(ke->ke_runq, ke);
1146		}
1147		/*
1148		 * Hold this kse on this cpu so that sched_prio() doesn't
1149		 * cause excessive migration.  We only want migration to
1150		 * happen as the result of a wakeup.
1151		 */
1152		ke->ke_flags |= KEF_HOLD;
1153		adjustrunqueue(td, prio);
1154	} else
1155		td->td_priority = prio;
1156}
1157
1158void
1159sched_switch(struct thread *td, struct thread *newtd)
1160{
1161	struct kse *ke;
1162
1163	mtx_assert(&sched_lock, MA_OWNED);
1164
1165	ke = td->td_kse;
1166
1167	td->td_last_kse = ke;
1168	td->td_lastcpu = td->td_oncpu;
1169	td->td_oncpu = NOCPU;
1170	td->td_flags &= ~TDF_NEEDRESCHED;
1171	td->td_pflags &= ~TDP_OWEPREEMPT;
1172
1173	/*
1174	 * If the KSE has been assigned it may be in the process of switching
1175	 * to the new cpu.  This is the case in sched_bind().
1176	 */
1177	if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
1178		if (td == PCPU_GET(idlethread)) {
1179			TD_SET_CAN_RUN(td);
1180		} else if (TD_IS_RUNNING(td)) {
1181			kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1182			/*
1183			 * Don't allow the kse to migrate from a preemption.
1184			 */
1185			ke->ke_flags |= KEF_HOLD;
1186			setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING);
1187		} else {
1188			if (ke->ke_runq) {
1189				kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1190			} else if ((td->td_flags & TDF_IDLETD) == 0)
1191				kdb_backtrace();
1192			/*
1193			 * We will not be on the run queue. So we must be
1194			 * sleeping or similar.
1195			 */
1196			if (td->td_proc->p_flag & P_SA)
1197				kse_reassign(ke);
1198		}
1199	}
1200	if (newtd != NULL)
1201		kseq_load_add(KSEQ_SELF(), newtd->td_kse);
1202	else
1203		newtd = choosethread();
1204	if (td != newtd)
1205		cpu_switch(td, newtd);
1206	sched_lock.mtx_lock = (uintptr_t)td;
1207
1208	td->td_oncpu = PCPU_GET(cpuid);
1209}
1210
1211void
1212sched_nice(struct proc *p, int nice)
1213{
1214	struct ksegrp *kg;
1215	struct kse *ke;
1216	struct thread *td;
1217	struct kseq *kseq;
1218
1219	PROC_LOCK_ASSERT(p, MA_OWNED);
1220	mtx_assert(&sched_lock, MA_OWNED);
1221	/*
1222	 * We need to adjust the nice counts for running KSEs.
1223	 */
1224	FOREACH_KSEGRP_IN_PROC(p, kg) {
1225		if (kg->kg_pri_class == PRI_TIMESHARE) {
1226			FOREACH_KSE_IN_GROUP(kg, ke) {
1227				if (ke->ke_runq == NULL)
1228					continue;
1229				kseq = KSEQ_CPU(ke->ke_cpu);
1230				kseq_nice_rem(kseq, p->p_nice);
1231				kseq_nice_add(kseq, nice);
1232			}
1233		}
1234	}
1235	p->p_nice = nice;
1236	FOREACH_KSEGRP_IN_PROC(p, kg) {
1237		sched_priority(kg);
1238		FOREACH_THREAD_IN_GROUP(kg, td)
1239			td->td_flags |= TDF_NEEDRESCHED;
1240	}
1241}
1242
1243void
1244sched_sleep(struct thread *td)
1245{
1246	mtx_assert(&sched_lock, MA_OWNED);
1247
1248	td->td_slptime = ticks;
1249	td->td_base_pri = td->td_priority;
1250
1251	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
1252	    td->td_kse, td->td_slptime);
1253}
1254
1255void
1256sched_wakeup(struct thread *td)
1257{
1258	mtx_assert(&sched_lock, MA_OWNED);
1259
1260	/*
1261	 * Let the kseg know how long we slept for.  This is because process
1262	 * interactivity behavior is modeled in the kseg.
1263	 */
1264	if (td->td_slptime) {
1265		struct ksegrp *kg;
1266		int hzticks;
1267
1268		kg = td->td_ksegrp;
1269		hzticks = (ticks - td->td_slptime) << 10;
1270		if (hzticks >= SCHED_SLP_RUN_MAX) {
1271			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1272			kg->kg_runtime = 1;
1273		} else {
1274			kg->kg_slptime += hzticks;
1275			sched_interact_update(kg);
1276		}
1277		sched_priority(kg);
1278		if (td->td_kse)
1279			sched_slice(td->td_kse);
1280		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
1281		    td->td_kse, hzticks);
1282		td->td_slptime = 0;
1283	}
1284	setrunqueue(td, SRQ_BORING);
1285}
1286
1287/*
1288 * Penalize the parent for creating a new child and initialize the child's
1289 * priority.
1290 */
1291void
1292sched_fork(struct thread *td, struct proc *p1)
1293{
1294
1295	mtx_assert(&sched_lock, MA_OWNED);
1296
1297	p1->p_nice = td->td_proc->p_nice;
1298	sched_fork_ksegrp(td, FIRST_KSEGRP_IN_PROC(p1));
1299	sched_fork_kse(td, FIRST_KSE_IN_PROC(p1));
1300	sched_fork_thread(td, FIRST_THREAD_IN_PROC(p1));
1301}
1302
1303void
1304sched_fork_kse(struct thread *td, struct kse *child)
1305{
1306	struct kse *ke = td->td_kse;
1307
1308	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1309	child->ke_cpu = ke->ke_cpu;
1310	child->ke_runq = NULL;
1311
1312	/* Grab our parents cpu estimation information. */
1313	child->ke_ticks = ke->ke_ticks;
1314	child->ke_ltick = ke->ke_ltick;
1315	child->ke_ftick = ke->ke_ftick;
1316}
1317
1318void
1319sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
1320{
1321	struct ksegrp *kg = td->td_ksegrp;
1322	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
1323
1324	child->kg_slptime = kg->kg_slptime;
1325	child->kg_runtime = kg->kg_runtime;
1326	child->kg_user_pri = kg->kg_user_pri;
1327	sched_interact_fork(child);
1328	kg->kg_runtime += tickincr << 10;
1329	sched_interact_update(kg);
1330
1331	CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1332	    kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1333	    child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1334}
1335
1336void
1337sched_fork_thread(struct thread *td, struct thread *child)
1338{
1339}
1340
1341void
1342sched_class(struct ksegrp *kg, int class)
1343{
1344	struct kseq *kseq;
1345	struct kse *ke;
1346	int nclass;
1347	int oclass;
1348
1349	mtx_assert(&sched_lock, MA_OWNED);
1350	if (kg->kg_pri_class == class)
1351		return;
1352
1353	nclass = PRI_BASE(class);
1354	oclass = PRI_BASE(kg->kg_pri_class);
1355	FOREACH_KSE_IN_GROUP(kg, ke) {
1356		if (ke->ke_state != KES_ONRUNQ &&
1357		    ke->ke_state != KES_THREAD)
1358			continue;
1359		kseq = KSEQ_CPU(ke->ke_cpu);
1360
1361#ifdef SMP
1362		/*
1363		 * On SMP if we're on the RUNQ we must adjust the transferable
1364		 * count because could be changing to or from an interrupt
1365		 * class.
1366		 */
1367		if (ke->ke_state == KES_ONRUNQ) {
1368			if (KSE_CAN_MIGRATE(ke, oclass)) {
1369				kseq->ksq_transferable--;
1370				kseq->ksq_group->ksg_transferable--;
1371			}
1372			if (KSE_CAN_MIGRATE(ke, nclass)) {
1373				kseq->ksq_transferable++;
1374				kseq->ksq_group->ksg_transferable++;
1375			}
1376		}
1377#endif
1378		if (oclass == PRI_TIMESHARE) {
1379			kseq->ksq_load_timeshare--;
1380			kseq_nice_rem(kseq, kg->kg_proc->p_nice);
1381		}
1382		if (nclass == PRI_TIMESHARE) {
1383			kseq->ksq_load_timeshare++;
1384			kseq_nice_add(kseq, kg->kg_proc->p_nice);
1385		}
1386	}
1387
1388	kg->kg_pri_class = class;
1389}
1390
1391/*
1392 * Return some of the child's priority and interactivity to the parent.
1393 */
1394void
1395sched_exit(struct proc *p, struct thread *td)
1396{
1397	mtx_assert(&sched_lock, MA_OWNED);
1398	sched_exit_kse(FIRST_KSE_IN_PROC(p), td);
1399	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
1400}
1401
1402void
1403sched_exit_kse(struct kse *ke, struct thread *td)
1404{
1405	kseq_load_rem(KSEQ_CPU(td->td_kse->ke_cpu), td->td_kse);
1406}
1407
1408void
1409sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
1410{
1411	/* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
1412	kg->kg_runtime += td->td_ksegrp->kg_runtime;
1413	sched_interact_update(kg);
1414}
1415
1416void
1417sched_exit_thread(struct thread *td, struct thread *child)
1418{
1419}
1420
1421void
1422sched_clock(struct thread *td)
1423{
1424	struct kseq *kseq;
1425	struct ksegrp *kg;
1426	struct kse *ke;
1427
1428	mtx_assert(&sched_lock, MA_OWNED);
1429	kseq = KSEQ_SELF();
1430#ifdef SMP
1431	if (ticks == bal_tick)
1432		sched_balance();
1433	if (ticks == gbal_tick)
1434		sched_balance_groups();
1435	/*
1436	 * We could have been assigned a non real-time thread without an
1437	 * IPI.
1438	 */
1439	if (kseq->ksq_assigned)
1440		kseq_assign(kseq);	/* Potentially sets NEEDRESCHED */
1441#endif
1442	/*
1443	 * sched_setup() apparently happens prior to stathz being set.  We
1444	 * need to resolve the timers earlier in the boot so we can avoid
1445	 * calculating this here.
1446	 */
1447	if (realstathz == 0) {
1448		realstathz = stathz ? stathz : hz;
1449		tickincr = hz / realstathz;
1450		/*
1451		 * XXX This does not work for values of stathz that are much
1452		 * larger than hz.
1453		 */
1454		if (tickincr == 0)
1455			tickincr = 1;
1456	}
1457
1458	ke = td->td_kse;
1459	kg = ke->ke_ksegrp;
1460
1461	/* Adjust ticks for pctcpu */
1462	ke->ke_ticks++;
1463	ke->ke_ltick = ticks;
1464
1465	/* Go up to one second beyond our max and then trim back down */
1466	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1467		sched_pctcpu_update(ke);
1468
1469	if (td->td_flags & TDF_IDLETD)
1470		return;
1471
1472	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1473	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1474	/*
1475	 * We only do slicing code for TIMESHARE ksegrps.
1476	 */
1477	if (kg->kg_pri_class != PRI_TIMESHARE)
1478		return;
1479	/*
1480	 * We used a tick charge it to the ksegrp so that we can compute our
1481	 * interactivity.
1482	 */
1483	kg->kg_runtime += tickincr << 10;
1484	sched_interact_update(kg);
1485
1486	/*
1487	 * We used up one time slice.
1488	 */
1489	if (--ke->ke_slice > 0)
1490		return;
1491	/*
1492	 * We're out of time, recompute priorities and requeue.
1493	 */
1494	kseq_load_rem(kseq, ke);
1495	sched_priority(kg);
1496	sched_slice(ke);
1497	if (SCHED_CURR(kg, ke))
1498		ke->ke_runq = kseq->ksq_curr;
1499	else
1500		ke->ke_runq = kseq->ksq_next;
1501	kseq_load_add(kseq, ke);
1502	td->td_flags |= TDF_NEEDRESCHED;
1503}
1504
1505int
1506sched_runnable(void)
1507{
1508	struct kseq *kseq;
1509	int load;
1510
1511	load = 1;
1512
1513	kseq = KSEQ_SELF();
1514#ifdef SMP
1515	if (kseq->ksq_assigned) {
1516		mtx_lock_spin(&sched_lock);
1517		kseq_assign(kseq);
1518		mtx_unlock_spin(&sched_lock);
1519	}
1520#endif
1521	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1522		if (kseq->ksq_load > 0)
1523			goto out;
1524	} else
1525		if (kseq->ksq_load - 1 > 0)
1526			goto out;
1527	load = 0;
1528out:
1529	return (load);
1530}
1531
1532void
1533sched_userret(struct thread *td)
1534{
1535	struct ksegrp *kg;
1536
1537	kg = td->td_ksegrp;
1538
1539	if (td->td_priority != kg->kg_user_pri) {
1540		mtx_lock_spin(&sched_lock);
1541		td->td_priority = kg->kg_user_pri;
1542		mtx_unlock_spin(&sched_lock);
1543	}
1544}
1545
1546struct kse *
1547sched_choose(void)
1548{
1549	struct kseq *kseq;
1550	struct kse *ke;
1551
1552	mtx_assert(&sched_lock, MA_OWNED);
1553	kseq = KSEQ_SELF();
1554#ifdef SMP
1555restart:
1556	if (kseq->ksq_assigned)
1557		kseq_assign(kseq);
1558#endif
1559	ke = kseq_choose(kseq);
1560	if (ke) {
1561#ifdef SMP
1562		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1563			if (kseq_idled(kseq) == 0)
1564				goto restart;
1565#endif
1566		kseq_runq_rem(kseq, ke);
1567		ke->ke_state = KES_THREAD;
1568
1569		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1570			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1571			    ke, ke->ke_runq, ke->ke_slice,
1572			    ke->ke_thread->td_priority);
1573		}
1574		return (ke);
1575	}
1576#ifdef SMP
1577	if (kseq_idled(kseq) == 0)
1578		goto restart;
1579#endif
1580	return (NULL);
1581}
1582
1583void
1584sched_add(struct thread *td, int flags)
1585{
1586
1587	/* let jeff work out how to map the flags better */
1588	/* I'm open to suggestions */
1589	if (flags & SRQ_YIELDING)
1590		/*
1591		 * Preempting during switching can be bad JUJU
1592		 * especially for KSE processes
1593		 */
1594		sched_add_internal(td, 0);
1595	else
1596		sched_add_internal(td, 1);
1597}
1598
1599static void
1600sched_add_internal(struct thread *td, int preemptive)
1601{
1602	struct kseq *kseq;
1603	struct ksegrp *kg;
1604	struct kse *ke;
1605#ifdef SMP
1606	int canmigrate;
1607#endif
1608	int class;
1609
1610	mtx_assert(&sched_lock, MA_OWNED);
1611	ke = td->td_kse;
1612	kg = td->td_ksegrp;
1613	if (ke->ke_flags & KEF_ASSIGNED)
1614		return;
1615	kseq = KSEQ_SELF();
1616	KASSERT((ke->ke_thread != NULL),
1617	    ("sched_add: No thread on KSE"));
1618	KASSERT((ke->ke_thread->td_kse != NULL),
1619	    ("sched_add: No KSE on thread"));
1620	KASSERT(ke->ke_state != KES_ONRUNQ,
1621	    ("sched_add: kse %p (%s) already in run queue", ke,
1622	    ke->ke_proc->p_comm));
1623	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1624	    ("sched_add: process swapped out"));
1625	KASSERT(ke->ke_runq == NULL,
1626	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1627
1628	class = PRI_BASE(kg->kg_pri_class);
1629	switch (class) {
1630	case PRI_ITHD:
1631	case PRI_REALTIME:
1632		ke->ke_runq = kseq->ksq_curr;
1633		ke->ke_slice = SCHED_SLICE_MAX;
1634		ke->ke_cpu = PCPU_GET(cpuid);
1635		break;
1636	case PRI_TIMESHARE:
1637		if (SCHED_CURR(kg, ke))
1638			ke->ke_runq = kseq->ksq_curr;
1639		else
1640			ke->ke_runq = kseq->ksq_next;
1641		break;
1642	case PRI_IDLE:
1643		/*
1644		 * This is for priority prop.
1645		 */
1646		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1647			ke->ke_runq = kseq->ksq_curr;
1648		else
1649			ke->ke_runq = &kseq->ksq_idle;
1650		ke->ke_slice = SCHED_SLICE_MIN;
1651		break;
1652	default:
1653		panic("Unknown pri class.");
1654		break;
1655	}
1656#ifdef SMP
1657	/*
1658	 * Don't migrate running threads here.  Force the long term balancer
1659	 * to do it.
1660	 */
1661	canmigrate = KSE_CAN_MIGRATE(ke, class);
1662	if (ke->ke_flags & KEF_HOLD) {
1663		ke->ke_flags &= ~KEF_HOLD;
1664		canmigrate = 0;
1665	}
1666	/*
1667	 * If this thread is pinned or bound, notify the target cpu.
1668	 */
1669	if (!canmigrate && ke->ke_cpu != PCPU_GET(cpuid) ) {
1670		ke->ke_runq = NULL;
1671		kseq_notify(ke, ke->ke_cpu);
1672		return;
1673	}
1674	/*
1675	 * If we had been idle, clear our bit in the group and potentially
1676	 * the global bitmap.  If not, see if we should transfer this thread.
1677	 */
1678	if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
1679	    (kseq->ksq_group->ksg_idlemask & PCPU_GET(cpumask)) != 0) {
1680		/*
1681		 * Check to see if our group is unidling, and if so, remove it
1682		 * from the global idle mask.
1683		 */
1684		if (kseq->ksq_group->ksg_idlemask ==
1685		    kseq->ksq_group->ksg_cpumask)
1686			atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
1687		/*
1688		 * Now remove ourselves from the group specific idle mask.
1689		 */
1690		kseq->ksq_group->ksg_idlemask &= ~PCPU_GET(cpumask);
1691	} else if (kseq->ksq_load > 1 && canmigrate)
1692		if (kseq_transfer(kseq, ke, class))
1693			return;
1694	ke->ke_cpu = PCPU_GET(cpuid);
1695#endif
1696	/*
1697	 * XXX With preemption this is not necessary.
1698	 */
1699	if (td->td_priority < curthread->td_priority &&
1700	    ke->ke_runq == kseq->ksq_curr)
1701		curthread->td_flags |= TDF_NEEDRESCHED;
1702	if (preemptive && maybe_preempt(td))
1703		return;
1704	ke->ke_ksegrp->kg_runq_kses++;
1705	ke->ke_state = KES_ONRUNQ;
1706
1707	kseq_runq_add(kseq, ke);
1708	kseq_load_add(kseq, ke);
1709}
1710
1711void
1712sched_rem(struct thread *td)
1713{
1714	struct kseq *kseq;
1715	struct kse *ke;
1716
1717	ke = td->td_kse;
1718	/*
1719	 * It is safe to just return here because sched_rem() is only ever
1720	 * used in places where we're immediately going to add the
1721	 * kse back on again.  In that case it'll be added with the correct
1722	 * thread and priority when the caller drops the sched_lock.
1723	 */
1724	if (ke->ke_flags & KEF_ASSIGNED)
1725		return;
1726	mtx_assert(&sched_lock, MA_OWNED);
1727	KASSERT((ke->ke_state == KES_ONRUNQ),
1728	    ("sched_rem: KSE not on run queue"));
1729
1730	ke->ke_state = KES_THREAD;
1731	ke->ke_ksegrp->kg_runq_kses--;
1732	kseq = KSEQ_CPU(ke->ke_cpu);
1733	kseq_runq_rem(kseq, ke);
1734	kseq_load_rem(kseq, ke);
1735}
1736
1737fixpt_t
1738sched_pctcpu(struct thread *td)
1739{
1740	fixpt_t pctcpu;
1741	struct kse *ke;
1742
1743	pctcpu = 0;
1744	ke = td->td_kse;
1745	if (ke == NULL)
1746		return (0);
1747
1748	mtx_lock_spin(&sched_lock);
1749	if (ke->ke_ticks) {
1750		int rtick;
1751
1752		/*
1753		 * Don't update more frequently than twice a second.  Allowing
1754		 * this causes the cpu usage to decay away too quickly due to
1755		 * rounding errors.
1756		 */
1757		if (ke->ke_ftick + SCHED_CPU_TICKS < ke->ke_ltick ||
1758		    ke->ke_ltick < (ticks - (hz / 2)))
1759			sched_pctcpu_update(ke);
1760		/* How many rtick per second ? */
1761		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1762		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1763	}
1764
1765	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1766	mtx_unlock_spin(&sched_lock);
1767
1768	return (pctcpu);
1769}
1770
1771void
1772sched_bind(struct thread *td, int cpu)
1773{
1774	struct kse *ke;
1775
1776	mtx_assert(&sched_lock, MA_OWNED);
1777	ke = td->td_kse;
1778	ke->ke_flags |= KEF_BOUND;
1779#ifdef SMP
1780	if (PCPU_GET(cpuid) == cpu)
1781		return;
1782	/* sched_rem without the runq_remove */
1783	ke->ke_state = KES_THREAD;
1784	ke->ke_ksegrp->kg_runq_kses--;
1785	kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
1786	kseq_notify(ke, cpu);
1787	/* When we return from mi_switch we'll be on the correct cpu. */
1788	mi_switch(SW_VOL, NULL);
1789#endif
1790}
1791
1792void
1793sched_unbind(struct thread *td)
1794{
1795	mtx_assert(&sched_lock, MA_OWNED);
1796	td->td_kse->ke_flags &= ~KEF_BOUND;
1797}
1798
1799int
1800sched_load(void)
1801{
1802#ifdef SMP
1803	int total;
1804	int i;
1805
1806	total = 0;
1807	for (i = 0; i <= ksg_maxid; i++)
1808		total += KSEQ_GROUP(i)->ksg_load;
1809	return (total);
1810#else
1811	return (KSEQ_SELF()->ksq_sysload);
1812#endif
1813}
1814
1815int
1816sched_sizeof_kse(void)
1817{
1818	return (sizeof(struct kse) + sizeof(struct ke_sched));
1819}
1820
1821int
1822sched_sizeof_ksegrp(void)
1823{
1824	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1825}
1826
1827int
1828sched_sizeof_proc(void)
1829{
1830	return (sizeof(struct proc));
1831}
1832
1833int
1834sched_sizeof_thread(void)
1835{
1836	return (sizeof(struct thread) + sizeof(struct td_sched));
1837}
1838