sched_ule.c revision 121872
1243789Sdim/*-
2243789Sdim * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3243789Sdim * All rights reserved.
4243789Sdim *
5243789Sdim * Redistribution and use in source and binary forms, with or without
6243789Sdim * modification, are permitted provided that the following conditions
7243789Sdim * are met:
8243789Sdim * 1. Redistributions of source code must retain the above copyright
9243789Sdim *    notice unmodified, this list of conditions, and the following
10243789Sdim *    disclaimer.
11252723Sdim * 2. Redistributions in binary form must reproduce the above copyright
12252723Sdim *    notice, this list of conditions and the following disclaimer in the
13252723Sdim *    documentation and/or other materials provided with the distribution.
14252723Sdim *
15252723Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16252723Sdim * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17243789Sdim * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18243789Sdim * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19243789Sdim * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20243789Sdim * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21243789Sdim * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22263509Sdim * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23243789Sdim * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24252723Sdim * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25252723Sdim */
26252723Sdim
27252723Sdim#include <sys/cdefs.h>
28252723Sdim__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 121872 2003-11-02 04:25:59Z jeff $");
29243789Sdim
30263509Sdim#include <sys/param.h>
31243789Sdim#include <sys/systm.h>
32243789Sdim#include <sys/kernel.h>
33243789Sdim#include <sys/ktr.h>
34243789Sdim#include <sys/lock.h>
35263509Sdim#include <sys/mutex.h>
36263509Sdim#include <sys/proc.h>
37263509Sdim#include <sys/resource.h>
38263509Sdim#include <sys/sched.h>
39243789Sdim#include <sys/smp.h>
40243789Sdim#include <sys/sx.h>
41243789Sdim#include <sys/sysctl.h>
42243789Sdim#include <sys/sysproto.h>
43243789Sdim#include <sys/vmmeter.h>
44252723Sdim#ifdef DDB
45243789Sdim#include <ddb/ddb.h>
46243789Sdim#endif
47243789Sdim#ifdef KTRACE
48243789Sdim#include <sys/uio.h>
49243789Sdim#include <sys/ktrace.h>
50243789Sdim#endif
51243789Sdim
52243789Sdim#include <machine/cpu.h>
53252723Sdim#include <machine/smp.h>
54243789Sdim
55243789Sdim#define KTR_ULE         KTR_NFS
56243789Sdim
57243789Sdim/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
58243789Sdim/* XXX This is bogus compatability crap for ps */
59243789Sdimstatic fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
60243789SdimSYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
61243789Sdim
62252723Sdimstatic void sched_setup(void *dummy);
63252723SdimSYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
64243789Sdim
65243789Sdimstatic SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
66243789Sdim
67243789Sdimstatic int sched_strict;
68243789SdimSYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, "");
69243789Sdim
70243789Sdimstatic int slice_min = 1;
71243789SdimSYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
72243789Sdim
73243789Sdimstatic int slice_max = 10;
74243789SdimSYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
75243789Sdim
76243789Sdimint realstathz;
77243789Sdimint tickincr = 1;
78243789Sdim
79243789Sdim#ifdef SMP
80243789Sdim/* Callout to handle load balancing SMP systems. */
81243789Sdimstatic struct callout kseq_lb_callout;
82243789Sdim#endif
83243789Sdim
84243789Sdim/*
85243789Sdim * These datastructures are allocated within their parent datastructure but
86243789Sdim * are scheduler specific.
87243789Sdim */
88243789Sdim
89243789Sdimstruct ke_sched {
90263509Sdim	int		ske_slice;
91252723Sdim	struct runq	*ske_runq;
92252723Sdim	/* The following variables are only used for pctcpu calculation */
93252723Sdim	int		ske_ltick;	/* Last tick that we were running on */
94252723Sdim	int		ske_ftick;	/* First tick that we were running on */
95252723Sdim	int		ske_ticks;	/* Tick count */
96252723Sdim	/* CPU that we have affinity for. */
97252723Sdim	u_char		ske_cpu;
98252723Sdim};
99252723Sdim#define	ke_slice	ke_sched->ske_slice
100252723Sdim#define	ke_runq		ke_sched->ske_runq
101252723Sdim#define	ke_ltick	ke_sched->ske_ltick
102252723Sdim#define	ke_ftick	ke_sched->ske_ftick
103252723Sdim#define	ke_ticks	ke_sched->ske_ticks
104252723Sdim#define	ke_cpu		ke_sched->ske_cpu
105252723Sdim#define	ke_assign	ke_procq.tqe_next
106252723Sdim
107252723Sdim#define	KEF_ASSIGNED	KEF_SCHED0	/* KSE is being migrated. */
108252723Sdim
109252723Sdimstruct kg_sched {
110252723Sdim	int	skg_slptime;		/* Number of ticks we vol. slept */
111252723Sdim	int	skg_runtime;		/* Number of ticks we were running */
112252723Sdim};
113252723Sdim#define	kg_slptime	kg_sched->skg_slptime
114263509Sdim#define	kg_runtime	kg_sched->skg_runtime
115263509Sdim
116263509Sdimstruct td_sched {
117263509Sdim	int	std_slptime;
118263509Sdim};
119263509Sdim#define	td_slptime	td_sched->std_slptime
120263509Sdim
121263509Sdimstruct td_sched td_sched;
122263509Sdimstruct ke_sched ke_sched;
123263509Sdimstruct kg_sched kg_sched;
124263509Sdim
125263509Sdimstruct ke_sched *kse0_sched = &ke_sched;
126263509Sdimstruct kg_sched *ksegrp0_sched = &kg_sched;
127263509Sdimstruct p_sched *proc0_sched = NULL;
128263509Sdimstruct td_sched *thread0_sched = &td_sched;
129263509Sdim
130263509Sdim/*
131263509Sdim * The priority is primarily determined by the interactivity score.  Thus, we
132263509Sdim * give lower(better) priorities to kse groups that use less CPU.  The nice
133263509Sdim * value is then directly added to this to allow nice to have some effect
134263509Sdim * on latency.
135263509Sdim *
136263509Sdim * PRI_RANGE:	Total priority range for timeshare threads.
137263509Sdim * PRI_NRESV:	Number of nice values.
138263509Sdim * PRI_BASE:	The start of the dynamic range.
139263509Sdim */
140263509Sdim#define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
141263509Sdim#define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
142263509Sdim#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
143263509Sdim#define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
144263509Sdim#define	SCHED_PRI_INTERACT(score)					\
145263509Sdim    ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
146263509Sdim
147263509Sdim/*
148263509Sdim * These determine the interactivity of a process.
149263509Sdim *
150263509Sdim * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
151263509Sdim *		before throttling back.
152263509Sdim * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
153263509Sdim * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
154263509Sdim * INTERACT_THRESH:	Threshhold for placement on the current runq.
155263509Sdim */
156263509Sdim#define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
157263509Sdim#define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
158263509Sdim#define	SCHED_INTERACT_MAX	(100)
159263509Sdim#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
160263509Sdim#define	SCHED_INTERACT_THRESH	(30)
161263509Sdim
162263509Sdim/*
163263509Sdim * These parameters and macros determine the size of the time slice that is
164263509Sdim * granted to each thread.
165263509Sdim *
166263509Sdim * SLICE_MIN:	Minimum time slice granted, in units of ticks.
167263509Sdim * SLICE_MAX:	Maximum time slice granted.
168263509Sdim * SLICE_RANGE:	Range of available time slices scaled by hz.
169263509Sdim * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
170263509Sdim * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
171263509Sdim * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
172263509Sdim */
173263509Sdim#define	SCHED_SLICE_MIN			(slice_min)
174263509Sdim#define	SCHED_SLICE_MAX			(slice_max)
175263509Sdim#define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
176263509Sdim#define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
177263509Sdim#define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
178263509Sdim#define	SCHED_SLICE_NICE(nice)						\
179263509Sdim    (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
180263509Sdim
181263509Sdim/*
182263509Sdim * This macro determines whether or not the kse belongs on the current or
183263509Sdim * next run queue.
184263509Sdim */
185263509Sdim#define	SCHED_INTERACTIVE(kg)						\
186263509Sdim    (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
187263509Sdim#define	SCHED_CURR(kg, ke)						\
188263509Sdim    (ke->ke_thread->td_priority != kg->kg_user_pri ||			\
189263509Sdim    SCHED_INTERACTIVE(kg))
190263509Sdim
191263509Sdim/*
192263509Sdim * Cpu percentage computation macros and defines.
193263509Sdim *
194263509Sdim * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
195263509Sdim * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
196263509Sdim */
197263509Sdim
198263509Sdim#define	SCHED_CPU_TIME	10
199263509Sdim#define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
200263509Sdim
201263509Sdim/*
202263509Sdim * kseq - per processor runqs and statistics.
203263509Sdim */
204263509Sdim
205263509Sdim#define	KSEQ_NCLASS	(PRI_IDLE + 1)	/* Number of run classes. */
206263509Sdim
207263509Sdimstruct kseq {
208263509Sdim	struct runq	ksq_idle;		/* Queue of IDLE threads. */
209263509Sdim	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
210263509Sdim	struct runq	*ksq_next;		/* Next timeshare queue. */
211263509Sdim	struct runq	*ksq_curr;		/* Current queue. */
212263509Sdim	int		ksq_loads[KSEQ_NCLASS];	/* Load for each class */
213263509Sdim	int		ksq_load;		/* Aggregate load. */
214263509Sdim	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
215263509Sdim	short		ksq_nicemin;		/* Least nice. */
216263509Sdim#ifdef SMP
217263509Sdim	unsigned int	ksq_rslices;	/* Slices on run queue */
218263509Sdim	int		ksq_cpus;	/* Count of CPUs in this kseq. */
219263509Sdim	struct kse 	*ksq_assigned;	/* KSEs assigned by another CPU. */
220263509Sdim#endif
221263509Sdim};
222263509Sdim
223263509Sdim/*
224263509Sdim * One kse queue per processor.
225263509Sdim */
226263509Sdim#ifdef SMP
227263509Sdimstatic int kseq_idle;
228263509Sdimstatic struct kseq	kseq_cpu[MAXCPU];
229263509Sdimstatic struct kseq	*kseq_idmap[MAXCPU];
230263509Sdim#define	KSEQ_SELF()	(kseq_idmap[PCPU_GET(cpuid)])
231263509Sdim#define	KSEQ_CPU(x)	(kseq_idmap[(x)])
232263509Sdim#else
233263509Sdimstatic struct kseq	kseq_cpu;
234263509Sdim#define	KSEQ_SELF()	(&kseq_cpu)
235263509Sdim#define	KSEQ_CPU(x)	(&kseq_cpu)
236263509Sdim#endif
237263509Sdim
238263509Sdimstatic void sched_slice(struct kse *ke);
239263509Sdimstatic void sched_priority(struct ksegrp *kg);
240263509Sdimstatic int sched_interact_score(struct ksegrp *kg);
241263509Sdimstatic void sched_interact_update(struct ksegrp *kg);
242263509Sdimstatic void sched_interact_fork(struct ksegrp *kg);
243263509Sdimstatic void sched_pctcpu_update(struct kse *ke);
244263509Sdim
245263509Sdim/* Operations on per processor queues */
246263509Sdimstatic struct kse * kseq_choose(struct kseq *kseq);
247263509Sdimstatic void kseq_setup(struct kseq *kseq);
248263509Sdimstatic void kseq_add(struct kseq *kseq, struct kse *ke);
249263509Sdimstatic void kseq_rem(struct kseq *kseq, struct kse *ke);
250263509Sdimstatic void kseq_nice_add(struct kseq *kseq, int nice);
251263509Sdimstatic void kseq_nice_rem(struct kseq *kseq, int nice);
252263509Sdimvoid kseq_print(int cpu);
253263509Sdim#ifdef SMP
254263509Sdim#if 0
255263509Sdimstatic int sched_pickcpu(void);
256263509Sdim#endif
257263509Sdimstatic struct kse *runq_steal(struct runq *rq);
258263509Sdimstatic struct kseq *kseq_load_highest(void);
259263509Sdimstatic void kseq_balance(void *arg);
260263509Sdimstatic void kseq_move(struct kseq *from, int cpu);
261263509Sdimstatic int kseq_find(void);
262263509Sdimstatic void kseq_notify(struct kse *ke, int cpu);
263263509Sdimstatic void kseq_assign(struct kseq *);
264263509Sdimstatic struct kse *kseq_steal(struct kseq *kseq);
265263509Sdim#endif
266263509Sdim
267263509Sdimvoid
268263509Sdimkseq_print(int cpu)
269263509Sdim{
270263509Sdim	struct kseq *kseq;
271263509Sdim	int i;
272263509Sdim
273263509Sdim	kseq = KSEQ_CPU(cpu);
274263509Sdim
275263509Sdim	printf("kseq:\n");
276263509Sdim	printf("\tload:           %d\n", kseq->ksq_load);
277263509Sdim	printf("\tload ITHD:      %d\n", kseq->ksq_loads[PRI_ITHD]);
278263509Sdim	printf("\tload REALTIME:  %d\n", kseq->ksq_loads[PRI_REALTIME]);
279263509Sdim	printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]);
280263509Sdim	printf("\tload IDLE:      %d\n", kseq->ksq_loads[PRI_IDLE]);
281263509Sdim	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
282263509Sdim	printf("\tnice counts:\n");
283263509Sdim	for (i = 0; i < SCHED_PRI_NRESV; i++)
284263509Sdim		if (kseq->ksq_nice[i])
285263509Sdim			printf("\t\t%d = %d\n",
286263509Sdim			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
287263509Sdim}
288263509Sdim
289263509Sdimstatic void
290263509Sdimkseq_add(struct kseq *kseq, struct kse *ke)
291263509Sdim{
292263509Sdim	mtx_assert(&sched_lock, MA_OWNED);
293263509Sdim	kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++;
294263509Sdim	kseq->ksq_load++;
295263509Sdim	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
296263509Sdim	CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
297263509Sdim	    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
298263509Sdim	    ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
299263509Sdim	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
300263509Sdim		kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
301263509Sdim#ifdef SMP
302263509Sdim	kseq->ksq_rslices += ke->ke_slice;
303263509Sdim#endif
304263509Sdim}
305263509Sdim
306263509Sdimstatic void
307263509Sdimkseq_rem(struct kseq *kseq, struct kse *ke)
308263509Sdim{
309263509Sdim	mtx_assert(&sched_lock, MA_OWNED);
310263509Sdim	kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--;
311263509Sdim	kseq->ksq_load--;
312263509Sdim	ke->ke_runq = NULL;
313263509Sdim	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
314263509Sdim		kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
315263509Sdim#ifdef SMP
316263509Sdim	kseq->ksq_rslices -= ke->ke_slice;
317263509Sdim#endif
318263509Sdim}
319263509Sdim
320263509Sdimstatic void
321263509Sdimkseq_nice_add(struct kseq *kseq, int nice)
322263509Sdim{
323263509Sdim	mtx_assert(&sched_lock, MA_OWNED);
324263509Sdim	/* Normalize to zero. */
325263509Sdim	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
326263509Sdim	if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1)
327263509Sdim		kseq->ksq_nicemin = nice;
328263509Sdim}
329263509Sdim
330263509Sdimstatic void
331263509Sdimkseq_nice_rem(struct kseq *kseq, int nice)
332263509Sdim{
333263509Sdim	int n;
334263509Sdim
335263509Sdim	mtx_assert(&sched_lock, MA_OWNED);
336263509Sdim	/* Normalize to zero. */
337263509Sdim	n = nice + SCHED_PRI_NHALF;
338263509Sdim	kseq->ksq_nice[n]--;
339263509Sdim	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
340263509Sdim
341263509Sdim	/*
342263509Sdim	 * If this wasn't the smallest nice value or there are more in
343263509Sdim	 * this bucket we can just return.  Otherwise we have to recalculate
344263509Sdim	 * the smallest nice.
345263509Sdim	 */
346263509Sdim	if (nice != kseq->ksq_nicemin ||
347263509Sdim	    kseq->ksq_nice[n] != 0 ||
348263509Sdim	    kseq->ksq_loads[PRI_TIMESHARE] == 0)
349263509Sdim		return;
350263509Sdim
351263509Sdim	for (; n < SCHED_PRI_NRESV; n++)
352263509Sdim		if (kseq->ksq_nice[n]) {
353263509Sdim			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
354263509Sdim			return;
355263509Sdim		}
356263509Sdim}
357263509Sdim
358263509Sdim#ifdef SMP
359263509Sdim/*
360263509Sdim * kseq_balance is a simple CPU load balancing algorithm.  It operates by
361263509Sdim * finding the least loaded and most loaded cpu and equalizing their load
362263509Sdim * by migrating some processes.
363263509Sdim *
364263509Sdim * Dealing only with two CPUs at a time has two advantages.  Firstly, most
365263509Sdim * installations will only have 2 cpus.  Secondly, load balancing too much at
366263509Sdim * once can have an unpleasant effect on the system.  The scheduler rarely has
367263509Sdim * enough information to make perfect decisions.  So this algorithm chooses
368252723Sdim * algorithm simplicity and more gradual effects on load in larger systems.
369252723Sdim *
370243789Sdim * It could be improved by considering the priorities and slices assigned to
371243789Sdim * each task prior to balancing them.  There are many pathological cases with
372243789Sdim * any approach and so the semi random algorithm below may work as well as any.
373252723Sdim *
374252723Sdim */
375252723Sdimstatic void
376252723Sdimkseq_balance(void *arg)
377252723Sdim{
378243789Sdim	struct kseq *kseq;
379243789Sdim	int high_load;
380243789Sdim	int low_load;
381252723Sdim	int high_cpu;
382243789Sdim	int low_cpu;
383243789Sdim	int move;
384243789Sdim	int diff;
385243789Sdim	int i;
386243789Sdim
387243789Sdim	high_cpu = 0;
388243789Sdim	low_cpu = 0;
389243789Sdim	high_load = 0;
390243789Sdim	low_load = -1;
391243789Sdim
392243789Sdim	mtx_lock_spin(&sched_lock);
393243789Sdim	if (smp_started == 0)
394243789Sdim		goto out;
395243789Sdim
396243789Sdim	for (i = 0; i < mp_maxid; i++) {
397243789Sdim		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
398243789Sdim			continue;
399243789Sdim		kseq = KSEQ_CPU(i);
400243789Sdim		if (kseq->ksq_load > high_load) {
401252723Sdim			high_load = kseq->ksq_load;
402252723Sdim			high_cpu = i;
403252723Sdim		}
404252723Sdim		if (low_load == -1 || kseq->ksq_load < low_load) {
405252723Sdim			low_load = kseq->ksq_load;
406252723Sdim			low_cpu = i;
407243789Sdim		}
408243789Sdim	}
409252723Sdim
410243789Sdim	kseq = KSEQ_CPU(high_cpu);
411252723Sdim
412243789Sdim	high_load = kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] +
413243789Sdim	    kseq->ksq_loads[PRI_REALTIME];
414243789Sdim	/*
415243789Sdim	 * Nothing to do.
416252723Sdim	 */
417243789Sdim	if (high_load < kseq->ksq_cpus + 1)
418243789Sdim		goto out;
419252723Sdim
420243789Sdim	high_load -= kseq->ksq_cpus;
421252723Sdim
422243789Sdim	if (low_load >= high_load)
423243789Sdim		goto out;
424243789Sdim
425243789Sdim	diff = high_load - low_load;
426252723Sdim	move = diff / 2;
427252723Sdim	if (diff & 0x1)
428243789Sdim		move++;
429243789Sdim
430243789Sdim	for (i = 0; i < move; i++)
431243789Sdim		kseq_move(kseq, low_cpu);
432243789Sdim
433243789Sdimout:
434243789Sdim	mtx_unlock_spin(&sched_lock);
435243789Sdim	callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL);
436243789Sdim
437243789Sdim	return;
438243789Sdim}
439243789Sdim
440243789Sdimstatic struct kseq *
441243789Sdimkseq_load_highest(void)
442243789Sdim{
443243789Sdim	struct kseq *kseq;
444252723Sdim	int load;
445243789Sdim	int cpu;
446243789Sdim	int i;
447252723Sdim
448243789Sdim	mtx_assert(&sched_lock, MA_OWNED);
449243789Sdim	cpu = 0;
450243789Sdim	load = 0;
451243789Sdim
452263509Sdim	for (i = 0; i < mp_maxid; i++) {
453263509Sdim		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
454263509Sdim			continue;
455263509Sdim		kseq = KSEQ_CPU(i);
456263509Sdim		if (kseq->ksq_load > load) {
457263509Sdim			load = kseq->ksq_load;
458263509Sdim			cpu = i;
459263509Sdim		}
460263509Sdim	}
461263509Sdim	kseq = KSEQ_CPU(cpu);
462263509Sdim
463252723Sdim	if ((kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] +
464252723Sdim	    kseq->ksq_loads[PRI_REALTIME]) > kseq->ksq_cpus)
465243789Sdim		return (kseq);
466243789Sdim
467263509Sdim	return (NULL);
468263509Sdim}
469263509Sdim
470263509Sdimstatic void
471263509Sdimkseq_move(struct kseq *from, int cpu)
472263509Sdim{
473263509Sdim	struct kse *ke;
474263509Sdim
475252723Sdim	ke = kseq_steal(from);
476252723Sdim	runq_remove(ke->ke_runq, ke);
477252723Sdim	ke->ke_state = KES_THREAD;
478252723Sdim	kseq_rem(from, ke);
479252723Sdim
480252723Sdim	ke->ke_cpu = cpu;
481252723Sdim	sched_add(ke->ke_thread);
482252723Sdim}
483252723Sdim
484252723Sdimstatic int
485252723Sdimkseq_find(void)
486252723Sdim{
487252723Sdim	struct kseq *high;
488252723Sdim
489252723Sdim	if (!smp_started)
490252723Sdim		return (0);
491252723Sdim	if (kseq_idle & PCPU_GET(cpumask))
492252723Sdim		return (0);
493252723Sdim	/*
494252723Sdim	 * Find the cpu with the highest load and steal one proc.
495252723Sdim	 */
496243789Sdim	if ((high = kseq_load_highest()) == NULL ||
497243789Sdim	    high == KSEQ_SELF()) {
498243789Sdim		/*
499243789Sdim		 * If we couldn't find one, set ourselves in the
500243789Sdim		 * idle map.
501243789Sdim		 */
502243789Sdim		atomic_set_int(&kseq_idle, PCPU_GET(cpumask));
503243789Sdim		return (0);
504243789Sdim	}
505243789Sdim	/*
506243789Sdim	 * Remove this kse from this kseq and runq and then requeue
507243789Sdim	 * on the current processor.  We now have a load of one!
508243789Sdim	 */
509243789Sdim	kseq_move(high, PCPU_GET(cpuid));
510243789Sdim
511243789Sdim	return (1);
512243789Sdim}
513243789Sdim
514243789Sdimstatic void
515243789Sdimkseq_assign(struct kseq *kseq)
516243789Sdim{
517243789Sdim	struct kse *nke;
518243789Sdim	struct kse *ke;
519
520	do {
521		ke = kseq->ksq_assigned;
522	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
523	for (; ke != NULL; ke = nke) {
524		nke = ke->ke_assign;
525		ke->ke_flags &= ~KEF_ASSIGNED;
526		sched_add(ke->ke_thread);
527	}
528}
529
530static void
531kseq_notify(struct kse *ke, int cpu)
532{
533	struct kseq *kseq;
534	struct thread *td;
535	struct pcpu *pcpu;
536
537	ke->ke_flags |= KEF_ASSIGNED;
538
539	kseq = KSEQ_CPU(cpu);
540
541	/*
542	 * Place a KSE on another cpu's queue and force a resched.
543	 */
544	do {
545		ke->ke_assign = kseq->ksq_assigned;
546	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
547	pcpu = pcpu_find(cpu);
548	td = pcpu->pc_curthread;
549	if (ke->ke_thread->td_priority < td->td_priority ||
550	    td == pcpu->pc_idlethread) {
551		td->td_flags |= TDF_NEEDRESCHED;
552		ipi_selected(1 << cpu, IPI_AST);
553	}
554}
555
556static struct kse *
557runq_steal(struct runq *rq)
558{
559	struct rqhead *rqh;
560	struct rqbits *rqb;
561	struct kse *ke;
562	int word;
563	int bit;
564
565	mtx_assert(&sched_lock, MA_OWNED);
566	rqb = &rq->rq_status;
567	for (word = 0; word < RQB_LEN; word++) {
568		if (rqb->rqb_bits[word] == 0)
569			continue;
570		for (bit = 0; bit < RQB_BPW; bit++) {
571			if ((rqb->rqb_bits[word] & (1 << bit)) == 0)
572				continue;
573			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
574			TAILQ_FOREACH(ke, rqh, ke_procq) {
575				if (PRI_BASE(ke->ke_ksegrp->kg_pri_class) !=
576				    PRI_ITHD)
577					return (ke);
578			}
579		}
580	}
581	return (NULL);
582}
583
584static struct kse *
585kseq_steal(struct kseq *kseq)
586{
587	struct kse *ke;
588
589	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
590		return (ke);
591	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
592		return (ke);
593	return (runq_steal(&kseq->ksq_idle));
594}
595#endif	/* SMP */
596
597/*
598 * Pick the highest priority task we have and return it.
599 */
600
601static struct kse *
602kseq_choose(struct kseq *kseq)
603{
604	struct kse *ke;
605	struct runq *swap;
606
607	mtx_assert(&sched_lock, MA_OWNED);
608	swap = NULL;
609
610	for (;;) {
611		ke = runq_choose(kseq->ksq_curr);
612		if (ke == NULL) {
613			/*
614			 * We already swaped once and didn't get anywhere.
615			 */
616			if (swap)
617				break;
618			swap = kseq->ksq_curr;
619			kseq->ksq_curr = kseq->ksq_next;
620			kseq->ksq_next = swap;
621			continue;
622		}
623		/*
624		 * If we encounter a slice of 0 the kse is in a
625		 * TIMESHARE kse group and its nice was too far out
626		 * of the range that receives slices.
627		 */
628		if (ke->ke_slice == 0) {
629			runq_remove(ke->ke_runq, ke);
630			sched_slice(ke);
631			ke->ke_runq = kseq->ksq_next;
632			runq_add(ke->ke_runq, ke);
633			continue;
634		}
635		return (ke);
636	}
637
638	return (runq_choose(&kseq->ksq_idle));
639}
640
641static void
642kseq_setup(struct kseq *kseq)
643{
644	runq_init(&kseq->ksq_timeshare[0]);
645	runq_init(&kseq->ksq_timeshare[1]);
646	runq_init(&kseq->ksq_idle);
647
648	kseq->ksq_curr = &kseq->ksq_timeshare[0];
649	kseq->ksq_next = &kseq->ksq_timeshare[1];
650
651	kseq->ksq_loads[PRI_ITHD] = 0;
652	kseq->ksq_loads[PRI_REALTIME] = 0;
653	kseq->ksq_loads[PRI_TIMESHARE] = 0;
654	kseq->ksq_loads[PRI_IDLE] = 0;
655	kseq->ksq_load = 0;
656#ifdef SMP
657	kseq->ksq_rslices = 0;
658	kseq->ksq_assigned = NULL;
659#endif
660}
661
662static void
663sched_setup(void *dummy)
664{
665#ifdef SMP
666	int i;
667#endif
668
669	slice_min = (hz/100);	/* 10ms */
670	slice_max = (hz/7);	/* ~140ms */
671
672#ifdef SMP
673	/* init kseqs */
674	/* Create the idmap. */
675#ifdef ULE_HTT_EXPERIMENTAL
676	if (smp_topology == NULL) {
677#else
678	if (1) {
679#endif
680		for (i = 0; i < MAXCPU; i++) {
681			kseq_setup(&kseq_cpu[i]);
682			kseq_idmap[i] = &kseq_cpu[i];
683			kseq_cpu[i].ksq_cpus = 1;
684		}
685	} else {
686		int j;
687
688		for (i = 0; i < smp_topology->ct_count; i++) {
689			struct cpu_group *cg;
690
691			cg = &smp_topology->ct_group[i];
692			kseq_setup(&kseq_cpu[i]);
693
694			for (j = 0; j < MAXCPU; j++)
695				if ((cg->cg_mask & (1 << j)) != 0)
696					kseq_idmap[j] = &kseq_cpu[i];
697			kseq_cpu[i].ksq_cpus = cg->cg_count;
698		}
699	}
700	callout_init(&kseq_lb_callout, CALLOUT_MPSAFE);
701	kseq_balance(NULL);
702#else
703	kseq_setup(KSEQ_SELF());
704#endif
705	mtx_lock_spin(&sched_lock);
706	kseq_add(KSEQ_SELF(), &kse0);
707	mtx_unlock_spin(&sched_lock);
708}
709
710/*
711 * Scale the scheduling priority according to the "interactivity" of this
712 * process.
713 */
714static void
715sched_priority(struct ksegrp *kg)
716{
717	int pri;
718
719	if (kg->kg_pri_class != PRI_TIMESHARE)
720		return;
721
722	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
723	pri += SCHED_PRI_BASE;
724	pri += kg->kg_nice;
725
726	if (pri > PRI_MAX_TIMESHARE)
727		pri = PRI_MAX_TIMESHARE;
728	else if (pri < PRI_MIN_TIMESHARE)
729		pri = PRI_MIN_TIMESHARE;
730
731	kg->kg_user_pri = pri;
732
733	return;
734}
735
736/*
737 * Calculate a time slice based on the properties of the kseg and the runq
738 * that we're on.  This is only for PRI_TIMESHARE ksegrps.
739 */
740static void
741sched_slice(struct kse *ke)
742{
743	struct kseq *kseq;
744	struct ksegrp *kg;
745
746	kg = ke->ke_ksegrp;
747	kseq = KSEQ_CPU(ke->ke_cpu);
748
749	/*
750	 * Rationale:
751	 * KSEs in interactive ksegs get the minimum slice so that we
752	 * quickly notice if it abuses its advantage.
753	 *
754	 * KSEs in non-interactive ksegs are assigned a slice that is
755	 * based on the ksegs nice value relative to the least nice kseg
756	 * on the run queue for this cpu.
757	 *
758	 * If the KSE is less nice than all others it gets the maximum
759	 * slice and other KSEs will adjust their slice relative to
760	 * this when they first expire.
761	 *
762	 * There is 20 point window that starts relative to the least
763	 * nice kse on the run queue.  Slice size is determined by
764	 * the kse distance from the last nice ksegrp.
765	 *
766	 * If the kse is outside of the window it will get no slice
767	 * and will be reevaluated each time it is selected on the
768	 * run queue.  The exception to this is nice 0 ksegs when
769	 * a nice -20 is running.  They are always granted a minimum
770	 * slice.
771	 */
772	if (!SCHED_INTERACTIVE(kg)) {
773		int nice;
774
775		nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
776		if (kseq->ksq_loads[PRI_TIMESHARE] == 0 ||
777		    kg->kg_nice < kseq->ksq_nicemin)
778			ke->ke_slice = SCHED_SLICE_MAX;
779		else if (nice <= SCHED_SLICE_NTHRESH)
780			ke->ke_slice = SCHED_SLICE_NICE(nice);
781		else if (kg->kg_nice == 0)
782			ke->ke_slice = SCHED_SLICE_MIN;
783		else
784			ke->ke_slice = 0;
785	} else
786		ke->ke_slice = SCHED_SLICE_MIN;
787
788	CTR6(KTR_ULE,
789	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
790	    ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
791	    kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg));
792
793	return;
794}
795
796/*
797 * This routine enforces a maximum limit on the amount of scheduling history
798 * kept.  It is called after either the slptime or runtime is adjusted.
799 * This routine will not operate correctly when slp or run times have been
800 * adjusted to more than double their maximum.
801 */
802static void
803sched_interact_update(struct ksegrp *kg)
804{
805	int sum;
806
807	sum = kg->kg_runtime + kg->kg_slptime;
808	if (sum < SCHED_SLP_RUN_MAX)
809		return;
810	/*
811	 * If we have exceeded by more than 1/5th then the algorithm below
812	 * will not bring us back into range.  Dividing by two here forces
813	 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
814	 */
815	if (sum > (SCHED_INTERACT_MAX / 5) * 6) {
816		kg->kg_runtime /= 2;
817		kg->kg_slptime /= 2;
818		return;
819	}
820	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
821	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
822}
823
824static void
825sched_interact_fork(struct ksegrp *kg)
826{
827	int ratio;
828	int sum;
829
830	sum = kg->kg_runtime + kg->kg_slptime;
831	if (sum > SCHED_SLP_RUN_FORK) {
832		ratio = sum / SCHED_SLP_RUN_FORK;
833		kg->kg_runtime /= ratio;
834		kg->kg_slptime /= ratio;
835	}
836}
837
838static int
839sched_interact_score(struct ksegrp *kg)
840{
841	int div;
842
843	if (kg->kg_runtime > kg->kg_slptime) {
844		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
845		return (SCHED_INTERACT_HALF +
846		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
847	} if (kg->kg_slptime > kg->kg_runtime) {
848		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
849		return (kg->kg_runtime / div);
850	}
851
852	/*
853	 * This can happen if slptime and runtime are 0.
854	 */
855	return (0);
856
857}
858
859/*
860 * This is only somewhat accurate since given many processes of the same
861 * priority they will switch when their slices run out, which will be
862 * at most SCHED_SLICE_MAX.
863 */
864int
865sched_rr_interval(void)
866{
867	return (SCHED_SLICE_MAX);
868}
869
870static void
871sched_pctcpu_update(struct kse *ke)
872{
873	/*
874	 * Adjust counters and watermark for pctcpu calc.
875	 */
876	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
877		/*
878		 * Shift the tick count out so that the divide doesn't
879		 * round away our results.
880		 */
881		ke->ke_ticks <<= 10;
882		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
883			    SCHED_CPU_TICKS;
884		ke->ke_ticks >>= 10;
885	} else
886		ke->ke_ticks = 0;
887	ke->ke_ltick = ticks;
888	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
889}
890
891#if 0
892/* XXX Should be changed to kseq_load_lowest() */
893int
894sched_pickcpu(void)
895{
896	struct kseq *kseq;
897	int load;
898	int cpu;
899	int i;
900
901	mtx_assert(&sched_lock, MA_OWNED);
902	if (!smp_started)
903		return (0);
904
905	load = 0;
906	cpu = 0;
907
908	for (i = 0; i < mp_maxid; i++) {
909		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
910			continue;
911		kseq = KSEQ_CPU(i);
912		if (kseq->ksq_load < load) {
913			cpu = i;
914			load = kseq->ksq_load;
915		}
916	}
917
918	CTR1(KTR_ULE, "sched_pickcpu: %d", cpu);
919	return (cpu);
920}
921#endif
922
923void
924sched_prio(struct thread *td, u_char prio)
925{
926	struct kse *ke;
927
928	ke = td->td_kse;
929	mtx_assert(&sched_lock, MA_OWNED);
930	if (TD_ON_RUNQ(td)) {
931		/*
932		 * If the priority has been elevated due to priority
933		 * propagation, we may have to move ourselves to a new
934		 * queue.  We still call adjustrunqueue below in case kse
935		 * needs to fix things up.
936		 */
937		if (prio < td->td_priority && ke &&
938		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
939		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
940			runq_remove(ke->ke_runq, ke);
941			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
942			runq_add(ke->ke_runq, ke);
943		}
944		adjustrunqueue(td, prio);
945	} else
946		td->td_priority = prio;
947}
948
949void
950sched_switch(struct thread *td)
951{
952	struct thread *newtd;
953	struct kse *ke;
954
955	mtx_assert(&sched_lock, MA_OWNED);
956
957	ke = td->td_kse;
958
959	td->td_last_kse = ke;
960        td->td_lastcpu = td->td_oncpu;
961	td->td_oncpu = NOCPU;
962        td->td_flags &= ~TDF_NEEDRESCHED;
963
964	if (TD_IS_RUNNING(td)) {
965		if (td->td_proc->p_flag & P_SA) {
966			kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
967			setrunqueue(td);
968		} else {
969			/*
970			 * This queue is always correct except for idle threads
971			 * which have a higher priority due to priority
972			 * propagation.
973			 */
974			if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) {
975				if (td->td_priority < PRI_MIN_IDLE)
976					ke->ke_runq = KSEQ_SELF()->ksq_curr;
977				else
978					ke->ke_runq = &KSEQ_SELF()->ksq_idle;
979			}
980			runq_add(ke->ke_runq, ke);
981			/* setrunqueue(td); */
982		}
983	} else {
984		if (ke->ke_runq)
985			kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
986		/*
987		 * We will not be on the run queue. So we must be
988		 * sleeping or similar.
989		 */
990		if (td->td_proc->p_flag & P_SA)
991			kse_reassign(ke);
992	}
993	newtd = choosethread();
994	if (td != newtd)
995		cpu_switch(td, newtd);
996	sched_lock.mtx_lock = (uintptr_t)td;
997
998	td->td_oncpu = PCPU_GET(cpuid);
999}
1000
1001void
1002sched_nice(struct ksegrp *kg, int nice)
1003{
1004	struct kse *ke;
1005	struct thread *td;
1006	struct kseq *kseq;
1007
1008	PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
1009	mtx_assert(&sched_lock, MA_OWNED);
1010	/*
1011	 * We need to adjust the nice counts for running KSEs.
1012	 */
1013	if (kg->kg_pri_class == PRI_TIMESHARE)
1014		FOREACH_KSE_IN_GROUP(kg, ke) {
1015			if (ke->ke_runq == NULL)
1016				continue;
1017			kseq = KSEQ_CPU(ke->ke_cpu);
1018			kseq_nice_rem(kseq, kg->kg_nice);
1019			kseq_nice_add(kseq, nice);
1020		}
1021	kg->kg_nice = nice;
1022	sched_priority(kg);
1023	FOREACH_THREAD_IN_GROUP(kg, td)
1024		td->td_flags |= TDF_NEEDRESCHED;
1025}
1026
1027void
1028sched_sleep(struct thread *td, u_char prio)
1029{
1030	mtx_assert(&sched_lock, MA_OWNED);
1031
1032	td->td_slptime = ticks;
1033	td->td_priority = prio;
1034
1035	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
1036	    td->td_kse, td->td_slptime);
1037}
1038
1039void
1040sched_wakeup(struct thread *td)
1041{
1042	mtx_assert(&sched_lock, MA_OWNED);
1043
1044	/*
1045	 * Let the kseg know how long we slept for.  This is because process
1046	 * interactivity behavior is modeled in the kseg.
1047	 */
1048	if (td->td_slptime) {
1049		struct ksegrp *kg;
1050		int hzticks;
1051
1052		kg = td->td_ksegrp;
1053		hzticks = (ticks - td->td_slptime) << 10;
1054		if (hzticks >= SCHED_SLP_RUN_MAX) {
1055			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1056			kg->kg_runtime = 1;
1057		} else {
1058			kg->kg_slptime += hzticks;
1059			sched_interact_update(kg);
1060		}
1061		sched_priority(kg);
1062		if (td->td_kse)
1063			sched_slice(td->td_kse);
1064		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
1065		    td->td_kse, hzticks);
1066		td->td_slptime = 0;
1067	}
1068	setrunqueue(td);
1069}
1070
1071/*
1072 * Penalize the parent for creating a new child and initialize the child's
1073 * priority.
1074 */
1075void
1076sched_fork(struct proc *p, struct proc *p1)
1077{
1078
1079	mtx_assert(&sched_lock, MA_OWNED);
1080
1081	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
1082	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
1083	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
1084}
1085
1086void
1087sched_fork_kse(struct kse *ke, struct kse *child)
1088{
1089
1090	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1091	child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */
1092	child->ke_runq = NULL;
1093
1094	/* Grab our parents cpu estimation information. */
1095	child->ke_ticks = ke->ke_ticks;
1096	child->ke_ltick = ke->ke_ltick;
1097	child->ke_ftick = ke->ke_ftick;
1098}
1099
1100void
1101sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1102{
1103	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
1104
1105	child->kg_slptime = kg->kg_slptime;
1106	child->kg_runtime = kg->kg_runtime;
1107	child->kg_user_pri = kg->kg_user_pri;
1108	child->kg_nice = kg->kg_nice;
1109	sched_interact_fork(child);
1110	kg->kg_runtime += tickincr << 10;
1111	sched_interact_update(kg);
1112
1113	CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1114	    kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1115	    child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1116}
1117
1118void
1119sched_fork_thread(struct thread *td, struct thread *child)
1120{
1121}
1122
1123void
1124sched_class(struct ksegrp *kg, int class)
1125{
1126	struct kseq *kseq;
1127	struct kse *ke;
1128
1129	mtx_assert(&sched_lock, MA_OWNED);
1130	if (kg->kg_pri_class == class)
1131		return;
1132
1133	FOREACH_KSE_IN_GROUP(kg, ke) {
1134		if (ke->ke_state != KES_ONRUNQ &&
1135		    ke->ke_state != KES_THREAD)
1136			continue;
1137		kseq = KSEQ_CPU(ke->ke_cpu);
1138
1139		kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--;
1140		kseq->ksq_loads[PRI_BASE(class)]++;
1141
1142		if (kg->kg_pri_class == PRI_TIMESHARE)
1143			kseq_nice_rem(kseq, kg->kg_nice);
1144		else if (class == PRI_TIMESHARE)
1145			kseq_nice_add(kseq, kg->kg_nice);
1146	}
1147
1148	kg->kg_pri_class = class;
1149}
1150
1151/*
1152 * Return some of the child's priority and interactivity to the parent.
1153 */
1154void
1155sched_exit(struct proc *p, struct proc *child)
1156{
1157	mtx_assert(&sched_lock, MA_OWNED);
1158	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
1159	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
1160}
1161
1162void
1163sched_exit_kse(struct kse *ke, struct kse *child)
1164{
1165	kseq_rem(KSEQ_CPU(child->ke_cpu), child);
1166}
1167
1168void
1169sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1170{
1171	/* kg->kg_slptime += child->kg_slptime; */
1172	kg->kg_runtime += child->kg_runtime;
1173	sched_interact_update(kg);
1174}
1175
1176void
1177sched_exit_thread(struct thread *td, struct thread *child)
1178{
1179}
1180
1181void
1182sched_clock(struct thread *td)
1183{
1184	struct kseq *kseq;
1185	struct ksegrp *kg;
1186	struct kse *ke;
1187
1188	/*
1189	 * sched_setup() apparently happens prior to stathz being set.  We
1190	 * need to resolve the timers earlier in the boot so we can avoid
1191	 * calculating this here.
1192	 */
1193	if (realstathz == 0) {
1194		realstathz = stathz ? stathz : hz;
1195		tickincr = hz / realstathz;
1196		/*
1197		 * XXX This does not work for values of stathz that are much
1198		 * larger than hz.
1199		 */
1200		if (tickincr == 0)
1201			tickincr = 1;
1202	}
1203
1204	ke = td->td_kse;
1205	kg = ke->ke_ksegrp;
1206
1207	mtx_assert(&sched_lock, MA_OWNED);
1208	KASSERT((td != NULL), ("schedclock: null thread pointer"));
1209
1210	/* Adjust ticks for pctcpu */
1211	ke->ke_ticks++;
1212	ke->ke_ltick = ticks;
1213
1214	/* Go up to one second beyond our max and then trim back down */
1215	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1216		sched_pctcpu_update(ke);
1217
1218	if (td->td_flags & TDF_IDLETD)
1219		return;
1220
1221	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1222	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1223	/*
1224	 * We only do slicing code for TIMESHARE ksegrps.
1225	 */
1226	if (kg->kg_pri_class != PRI_TIMESHARE)
1227		return;
1228	/*
1229	 * We used a tick charge it to the ksegrp so that we can compute our
1230	 * interactivity.
1231	 */
1232	kg->kg_runtime += tickincr << 10;
1233	sched_interact_update(kg);
1234
1235	/*
1236	 * We used up one time slice.
1237	 */
1238	ke->ke_slice--;
1239	kseq = KSEQ_SELF();
1240#ifdef SMP
1241	kseq->ksq_rslices--;
1242#endif
1243
1244	if (ke->ke_slice > 0)
1245		return;
1246	/*
1247	 * We're out of time, recompute priorities and requeue.
1248	 */
1249	kseq_rem(kseq, ke);
1250	sched_priority(kg);
1251	sched_slice(ke);
1252	if (SCHED_CURR(kg, ke))
1253		ke->ke_runq = kseq->ksq_curr;
1254	else
1255		ke->ke_runq = kseq->ksq_next;
1256	kseq_add(kseq, ke);
1257	td->td_flags |= TDF_NEEDRESCHED;
1258}
1259
1260int
1261sched_runnable(void)
1262{
1263	struct kseq *kseq;
1264	int load;
1265
1266	load = 1;
1267
1268	mtx_lock_spin(&sched_lock);
1269	kseq = KSEQ_SELF();
1270#ifdef SMP
1271	if (kseq->ksq_assigned)
1272		kseq_assign(kseq);
1273#endif
1274	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1275		if (kseq->ksq_load > 0)
1276			goto out;
1277	} else
1278		if (kseq->ksq_load - 1 > 0)
1279			goto out;
1280	load = 0;
1281out:
1282	mtx_unlock_spin(&sched_lock);
1283	return (load);
1284}
1285
1286void
1287sched_userret(struct thread *td)
1288{
1289	struct ksegrp *kg;
1290
1291	kg = td->td_ksegrp;
1292
1293	if (td->td_priority != kg->kg_user_pri) {
1294		mtx_lock_spin(&sched_lock);
1295		td->td_priority = kg->kg_user_pri;
1296		mtx_unlock_spin(&sched_lock);
1297	}
1298}
1299
1300struct kse *
1301sched_choose(void)
1302{
1303	struct kseq *kseq;
1304	struct kse *ke;
1305
1306	mtx_assert(&sched_lock, MA_OWNED);
1307	kseq = KSEQ_SELF();
1308#ifdef SMP
1309retry:
1310	if (kseq->ksq_assigned)
1311		kseq_assign(kseq);
1312#endif
1313	ke = kseq_choose(kseq);
1314	if (ke) {
1315#ifdef SMP
1316		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1317			if (kseq_find())
1318				goto retry;
1319#endif
1320		runq_remove(ke->ke_runq, ke);
1321		ke->ke_state = KES_THREAD;
1322
1323		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1324			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1325			    ke, ke->ke_runq, ke->ke_slice,
1326			    ke->ke_thread->td_priority);
1327		}
1328		return (ke);
1329	}
1330#ifdef SMP
1331	if (kseq_find())
1332		goto retry;
1333#endif
1334
1335	return (NULL);
1336}
1337
1338void
1339sched_add(struct thread *td)
1340{
1341	struct kseq *kseq;
1342	struct ksegrp *kg;
1343	struct kse *ke;
1344	int class;
1345
1346	mtx_assert(&sched_lock, MA_OWNED);
1347	ke = td->td_kse;
1348	kg = td->td_ksegrp;
1349	if (ke->ke_flags & KEF_ASSIGNED)
1350		return;
1351	kseq = KSEQ_SELF();
1352	KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
1353	KASSERT((ke->ke_thread->td_kse != NULL),
1354	    ("sched_add: No KSE on thread"));
1355	KASSERT(ke->ke_state != KES_ONRUNQ,
1356	    ("sched_add: kse %p (%s) already in run queue", ke,
1357	    ke->ke_proc->p_comm));
1358	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1359	    ("sched_add: process swapped out"));
1360	KASSERT(ke->ke_runq == NULL,
1361	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1362
1363	class = PRI_BASE(kg->kg_pri_class);
1364	switch (class) {
1365	case PRI_ITHD:
1366	case PRI_REALTIME:
1367		ke->ke_runq = kseq->ksq_curr;
1368		ke->ke_slice = SCHED_SLICE_MAX;
1369		ke->ke_cpu = PCPU_GET(cpuid);
1370		break;
1371	case PRI_TIMESHARE:
1372#ifdef SMP
1373		if (ke->ke_cpu != PCPU_GET(cpuid)) {
1374			kseq_notify(ke, ke->ke_cpu);
1375			return;
1376		}
1377#endif
1378		if (SCHED_CURR(kg, ke))
1379			ke->ke_runq = kseq->ksq_curr;
1380		else
1381			ke->ke_runq = kseq->ksq_next;
1382		break;
1383	case PRI_IDLE:
1384#ifdef SMP
1385		if (ke->ke_cpu != PCPU_GET(cpuid)) {
1386			kseq_notify(ke, ke->ke_cpu);
1387			return;
1388		}
1389#endif
1390		/*
1391		 * This is for priority prop.
1392		 */
1393		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1394			ke->ke_runq = kseq->ksq_curr;
1395		else
1396			ke->ke_runq = &kseq->ksq_idle;
1397		ke->ke_slice = SCHED_SLICE_MIN;
1398		break;
1399	default:
1400		panic("Unknown pri class.");
1401		break;
1402	}
1403#ifdef SMP
1404	/*
1405	 * If there are any idle processors, give them our extra load.
1406	 */
1407	if (kseq_idle && class != PRI_ITHD &&
1408	    (kseq->ksq_loads[PRI_IDLE] + kseq->ksq_loads[PRI_TIMESHARE] +
1409	    kseq->ksq_loads[PRI_REALTIME]) >= kseq->ksq_cpus) {
1410		int cpu;
1411
1412		/*
1413		 * Multiple cpus could find this bit simultaneously but the
1414		 * race shouldn't be terrible.
1415		 */
1416		cpu = ffs(kseq_idle);
1417		if (cpu) {
1418			cpu--;
1419			atomic_clear_int(&kseq_idle, 1 << cpu);
1420			ke->ke_cpu = cpu;
1421			ke->ke_runq = NULL;
1422			kseq_notify(ke, cpu);
1423			return;
1424		}
1425	}
1426	if (class == PRI_TIMESHARE || class == PRI_REALTIME)
1427		atomic_clear_int(&kseq_idle, PCPU_GET(cpumask));
1428#endif
1429        if (td->td_priority < curthread->td_priority)
1430                curthread->td_flags |= TDF_NEEDRESCHED;
1431
1432	ke->ke_ksegrp->kg_runq_kses++;
1433	ke->ke_state = KES_ONRUNQ;
1434
1435	runq_add(ke->ke_runq, ke);
1436	kseq_add(kseq, ke);
1437}
1438
1439void
1440sched_rem(struct thread *td)
1441{
1442	struct kseq *kseq;
1443	struct kse *ke;
1444
1445	ke = td->td_kse;
1446	/*
1447	 * It is safe to just return here because sched_rem() is only ever
1448	 * used in places where we're immediately going to add the
1449	 * kse back on again.  In that case it'll be added with the correct
1450	 * thread and priority when the caller drops the sched_lock.
1451	 */
1452	if (ke->ke_flags & KEF_ASSIGNED)
1453		return;
1454	mtx_assert(&sched_lock, MA_OWNED);
1455	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
1456
1457	ke->ke_state = KES_THREAD;
1458	ke->ke_ksegrp->kg_runq_kses--;
1459	kseq = KSEQ_CPU(ke->ke_cpu);
1460	runq_remove(ke->ke_runq, ke);
1461	kseq_rem(kseq, ke);
1462}
1463
1464fixpt_t
1465sched_pctcpu(struct thread *td)
1466{
1467	fixpt_t pctcpu;
1468	struct kse *ke;
1469
1470	pctcpu = 0;
1471	ke = td->td_kse;
1472	if (ke == NULL)
1473		return (0);
1474
1475	mtx_lock_spin(&sched_lock);
1476	if (ke->ke_ticks) {
1477		int rtick;
1478
1479		/*
1480		 * Don't update more frequently than twice a second.  Allowing
1481		 * this causes the cpu usage to decay away too quickly due to
1482		 * rounding errors.
1483		 */
1484		if (ke->ke_ltick < (ticks - (hz / 2)))
1485			sched_pctcpu_update(ke);
1486		/* How many rtick per second ? */
1487		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1488		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1489	}
1490
1491	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1492	mtx_unlock_spin(&sched_lock);
1493
1494	return (pctcpu);
1495}
1496
1497int
1498sched_sizeof_kse(void)
1499{
1500	return (sizeof(struct kse) + sizeof(struct ke_sched));
1501}
1502
1503int
1504sched_sizeof_ksegrp(void)
1505{
1506	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1507}
1508
1509int
1510sched_sizeof_proc(void)
1511{
1512	return (sizeof(struct proc));
1513}
1514
1515int
1516sched_sizeof_thread(void)
1517{
1518	return (sizeof(struct thread) + sizeof(struct td_sched));
1519}
1520