sched_ule.c revision 119137
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 119137 2003-08-19 17:51:11Z sam $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/ktr.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/proc.h>
37#include <sys/resource.h>
38#include <sys/sched.h>
39#include <sys/smp.h>
40#include <sys/sx.h>
41#include <sys/sysctl.h>
42#include <sys/sysproto.h>
43#include <sys/vmmeter.h>
44#ifdef DDB
45#include <ddb/ddb.h>
46#endif
47#ifdef KTRACE
48#include <sys/uio.h>
49#include <sys/ktrace.h>
50#endif
51
52#include <machine/cpu.h>
53
54#define KTR_ULE         KTR_NFS
55
56/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
57/* XXX This is bogus compatability crap for ps */
58static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
59SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
60
61static void sched_setup(void *dummy);
62SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
63
64static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
65
66static int sched_strict;
67SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, "");
68
69static int slice_min = 1;
70SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
71
72static int slice_max = 10;
73SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
74
75int realstathz;
76int tickincr = 1;
77
78#ifdef SMP
79/* Callout to handle load balancing SMP systems. */
80static struct callout kseq_lb_callout;
81#endif
82
83/*
84 * These datastructures are allocated within their parent datastructure but
85 * are scheduler specific.
86 */
87
88struct ke_sched {
89	int		ske_slice;
90	struct runq	*ske_runq;
91	/* The following variables are only used for pctcpu calculation */
92	int		ske_ltick;	/* Last tick that we were running on */
93	int		ske_ftick;	/* First tick that we were running on */
94	int		ske_ticks;	/* Tick count */
95	/* CPU that we have affinity for. */
96	u_char		ske_cpu;
97};
98#define	ke_slice	ke_sched->ske_slice
99#define	ke_runq		ke_sched->ske_runq
100#define	ke_ltick	ke_sched->ske_ltick
101#define	ke_ftick	ke_sched->ske_ftick
102#define	ke_ticks	ke_sched->ske_ticks
103#define	ke_cpu		ke_sched->ske_cpu
104
105struct kg_sched {
106	int	skg_slptime;		/* Number of ticks we vol. slept */
107	int	skg_runtime;		/* Number of ticks we were running */
108};
109#define	kg_slptime	kg_sched->skg_slptime
110#define	kg_runtime	kg_sched->skg_runtime
111
112struct td_sched {
113	int	std_slptime;
114};
115#define	td_slptime	td_sched->std_slptime
116
117struct td_sched td_sched;
118struct ke_sched ke_sched;
119struct kg_sched kg_sched;
120
121struct ke_sched *kse0_sched = &ke_sched;
122struct kg_sched *ksegrp0_sched = &kg_sched;
123struct p_sched *proc0_sched = NULL;
124struct td_sched *thread0_sched = &td_sched;
125
126/*
127 * The priority is primarily determined by the interactivity score.  Thus, we
128 * give lower(better) priorities to kse groups that use less CPU.  The nice
129 * value is then directly added to this to allow nice to have some effect
130 * on latency.
131 *
132 * PRI_RANGE:	Total priority range for timeshare threads.
133 * PRI_NRESV:	Number of nice values.
134 * PRI_BASE:	The start of the dynamic range.
135 */
136#define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
137#define	SCHED_PRI_NRESV		PRIO_TOTAL
138#define	SCHED_PRI_NHALF		(PRIO_TOTAL / 2)
139#define	SCHED_PRI_NTHRESH	(SCHED_PRI_NHALF - 1)
140#define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
141#define	SCHED_PRI_INTERACT(score)					\
142    ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
143
144/*
145 * These determine the interactivity of a process.
146 *
147 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
148 *		before throttling back.
149 * SLP_RUN_THROTTLE:	Divisor for reducing slp/run time at fork time.
150 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
151 * INTERACT_THRESH:	Threshhold for placement on the current runq.
152 */
153#define	SCHED_SLP_RUN_MAX	((hz * 2) << 10)
154#define	SCHED_SLP_RUN_THROTTLE	(100)
155#define	SCHED_INTERACT_MAX	(100)
156#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
157#define	SCHED_INTERACT_THRESH	(20)
158
159/*
160 * These parameters and macros determine the size of the time slice that is
161 * granted to each thread.
162 *
163 * SLICE_MIN:	Minimum time slice granted, in units of ticks.
164 * SLICE_MAX:	Maximum time slice granted.
165 * SLICE_RANGE:	Range of available time slices scaled by hz.
166 * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
167 * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
168 */
169#define	SCHED_SLICE_MIN			(slice_min)
170#define	SCHED_SLICE_MAX			(slice_max)
171#define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
172#define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
173#define	SCHED_SLICE_NICE(nice)						\
174    (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH))
175
176/*
177 * This macro determines whether or not the kse belongs on the current or
178 * next run queue.
179 *
180 * XXX nice value should effect how interactive a kg is.
181 */
182#define	SCHED_INTERACTIVE(kg)						\
183    (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
184#define	SCHED_CURR(kg, ke)						\
185    (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || SCHED_INTERACTIVE(kg))
186
187/*
188 * Cpu percentage computation macros and defines.
189 *
190 * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
191 * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
192 */
193
194#define	SCHED_CPU_TIME	10
195#define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
196
197/*
198 * kseq - per processor runqs and statistics.
199 */
200
201#define	KSEQ_NCLASS	(PRI_IDLE + 1)	/* Number of run classes. */
202
203struct kseq {
204	struct runq	ksq_idle;		/* Queue of IDLE threads. */
205	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
206	struct runq	*ksq_next;		/* Next timeshare queue. */
207	struct runq	*ksq_curr;		/* Current queue. */
208	int		ksq_loads[KSEQ_NCLASS];	/* Load for each class */
209	int		ksq_load;		/* Aggregate load. */
210	short		ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */
211	short		ksq_nicemin;		/* Least nice. */
212#ifdef SMP
213	int		ksq_cpus;	/* Count of CPUs in this kseq. */
214	unsigned int	ksq_rslices;	/* Slices on run queue */
215#endif
216};
217
218/*
219 * One kse queue per processor.
220 */
221#ifdef SMP
222struct kseq	kseq_cpu[MAXCPU];
223struct kseq	*kseq_idmap[MAXCPU];
224#define	KSEQ_SELF()	(kseq_idmap[PCPU_GET(cpuid)])
225#define	KSEQ_CPU(x)	(kseq_idmap[(x)])
226#else
227struct kseq	kseq_cpu;
228#define	KSEQ_SELF()	(&kseq_cpu)
229#define	KSEQ_CPU(x)	(&kseq_cpu)
230#endif
231
232static void sched_slice(struct kse *ke);
233static void sched_priority(struct ksegrp *kg);
234static int sched_interact_score(struct ksegrp *kg);
235static void sched_interact_update(struct ksegrp *kg);
236void sched_pctcpu_update(struct kse *ke);
237int sched_pickcpu(void);
238
239/* Operations on per processor queues */
240static struct kse * kseq_choose(struct kseq *kseq, int steal);
241static void kseq_setup(struct kseq *kseq);
242static void kseq_add(struct kseq *kseq, struct kse *ke);
243static void kseq_rem(struct kseq *kseq, struct kse *ke);
244static void kseq_nice_add(struct kseq *kseq, int nice);
245static void kseq_nice_rem(struct kseq *kseq, int nice);
246void kseq_print(int cpu);
247#ifdef SMP
248struct kseq * kseq_load_highest(void);
249void kseq_balance(void *arg);
250void kseq_move(struct kseq *from, int cpu);
251#endif
252
253void
254kseq_print(int cpu)
255{
256	struct kseq *kseq;
257	int i;
258
259	kseq = KSEQ_CPU(cpu);
260
261	printf("kseq:\n");
262	printf("\tload:           %d\n", kseq->ksq_load);
263	printf("\tload ITHD:      %d\n", kseq->ksq_loads[PRI_ITHD]);
264	printf("\tload REALTIME:  %d\n", kseq->ksq_loads[PRI_REALTIME]);
265	printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]);
266	printf("\tload IDLE:      %d\n", kseq->ksq_loads[PRI_IDLE]);
267	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
268	printf("\tnice counts:\n");
269	for (i = 0; i < PRIO_TOTAL + 1; i++)
270		if (kseq->ksq_nice[i])
271			printf("\t\t%d = %d\n",
272			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
273}
274
275static void
276kseq_add(struct kseq *kseq, struct kse *ke)
277{
278	mtx_assert(&sched_lock, MA_OWNED);
279	kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++;
280	kseq->ksq_load++;
281	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
282	CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
283	    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
284	    ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
285	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
286		kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
287#ifdef SMP
288	kseq->ksq_rslices += ke->ke_slice;
289#endif
290}
291
292static void
293kseq_rem(struct kseq *kseq, struct kse *ke)
294{
295	mtx_assert(&sched_lock, MA_OWNED);
296	kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--;
297	kseq->ksq_load--;
298	ke->ke_runq = NULL;
299	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
300		kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
301#ifdef SMP
302	kseq->ksq_rslices -= ke->ke_slice;
303#endif
304}
305
306static void
307kseq_nice_add(struct kseq *kseq, int nice)
308{
309	mtx_assert(&sched_lock, MA_OWNED);
310	/* Normalize to zero. */
311	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
312	if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 1)
313		kseq->ksq_nicemin = nice;
314}
315
316static void
317kseq_nice_rem(struct kseq *kseq, int nice)
318{
319	int n;
320
321	mtx_assert(&sched_lock, MA_OWNED);
322	/* Normalize to zero. */
323	n = nice + SCHED_PRI_NHALF;
324	kseq->ksq_nice[n]--;
325	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
326
327	/*
328	 * If this wasn't the smallest nice value or there are more in
329	 * this bucket we can just return.  Otherwise we have to recalculate
330	 * the smallest nice.
331	 */
332	if (nice != kseq->ksq_nicemin ||
333	    kseq->ksq_nice[n] != 0 ||
334	    kseq->ksq_loads[PRI_TIMESHARE] == 0)
335		return;
336
337	for (; n < SCHED_PRI_NRESV + 1; n++)
338		if (kseq->ksq_nice[n]) {
339			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
340			return;
341		}
342}
343
344#ifdef SMP
345/*
346 * kseq_balance is a simple CPU load balancing algorithm.  It operates by
347 * finding the least loaded and most loaded cpu and equalizing their load
348 * by migrating some processes.
349 *
350 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
351 * installations will only have 2 cpus.  Secondly, load balancing too much at
352 * once can have an unpleasant effect on the system.  The scheduler rarely has
353 * enough information to make perfect decisions.  So this algorithm chooses
354 * algorithm simplicity and more gradual effects on load in larger systems.
355 *
356 * It could be improved by considering the priorities and slices assigned to
357 * each task prior to balancing them.  There are many pathological cases with
358 * any approach and so the semi random algorithm below may work as well as any.
359 *
360 */
361void
362kseq_balance(void *arg)
363{
364	struct kseq *kseq;
365	int high_load;
366	int low_load;
367	int high_cpu;
368	int low_cpu;
369	int move;
370	int diff;
371	int i;
372
373	high_cpu = 0;
374	low_cpu = 0;
375	high_load = 0;
376	low_load = -1;
377
378	mtx_lock_spin(&sched_lock);
379	if (smp_started == 0)
380		goto out;
381
382	for (i = 0; i < mp_maxid; i++) {
383		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
384			continue;
385		kseq = KSEQ_CPU(i);
386		if (kseq->ksq_load > high_load) {
387			high_load = kseq->ksq_load;
388			high_cpu = i;
389		}
390		if (low_load == -1 || kseq->ksq_load < low_load) {
391			low_load = kseq->ksq_load;
392			low_cpu = i;
393		}
394	}
395
396	kseq = KSEQ_CPU(high_cpu);
397
398	/*
399	 * Nothing to do.
400	 */
401	if (high_load < kseq->ksq_cpus + 1)
402		goto out;
403
404	high_load -= kseq->ksq_cpus;
405
406	if (low_load >= high_load)
407		goto out;
408
409	diff = high_load - low_load;
410	move = diff / 2;
411	if (diff & 0x1)
412		move++;
413
414	for (i = 0; i < move; i++)
415		kseq_move(kseq, low_cpu);
416
417out:
418	mtx_unlock_spin(&sched_lock);
419	callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL);
420
421	return;
422}
423
424struct kseq *
425kseq_load_highest(void)
426{
427	struct kseq *kseq;
428	int load;
429	int cpu;
430	int i;
431
432	mtx_assert(&sched_lock, MA_OWNED);
433	cpu = 0;
434	load = 0;
435
436	for (i = 0; i < mp_maxid; i++) {
437		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
438			continue;
439		kseq = KSEQ_CPU(i);
440		if (kseq->ksq_load > load) {
441			load = kseq->ksq_load;
442			cpu = i;
443		}
444	}
445	kseq = KSEQ_CPU(cpu);
446
447	if (load > kseq->ksq_cpus)
448		return (kseq);
449
450	return (NULL);
451}
452
453void
454kseq_move(struct kseq *from, int cpu)
455{
456	struct kse *ke;
457
458	ke = kseq_choose(from, 1);
459	runq_remove(ke->ke_runq, ke);
460	ke->ke_state = KES_THREAD;
461	kseq_rem(from, ke);
462
463	ke->ke_cpu = cpu;
464	sched_add(ke);
465}
466#endif
467
468/*
469 * Pick the highest priority task we have and return it.   If steal is 1 we
470 * will return kses that have been denied slices due to their nice being too
471 * low.  In the future we should prohibit stealing interrupt threads as well.
472 */
473
474struct kse *
475kseq_choose(struct kseq *kseq, int steal)
476{
477	struct kse *ke;
478	struct runq *swap;
479
480	mtx_assert(&sched_lock, MA_OWNED);
481	swap = NULL;
482
483	for (;;) {
484		ke = runq_choose(kseq->ksq_curr);
485		if (ke == NULL) {
486			/*
487			 * We already swaped once and didn't get anywhere.
488			 */
489			if (swap)
490				break;
491			swap = kseq->ksq_curr;
492			kseq->ksq_curr = kseq->ksq_next;
493			kseq->ksq_next = swap;
494			continue;
495		}
496		/*
497		 * If we encounter a slice of 0 the kse is in a
498		 * TIMESHARE kse group and its nice was too far out
499		 * of the range that receives slices.
500		 */
501		if (ke->ke_slice == 0 && steal == 0) {
502			runq_remove(ke->ke_runq, ke);
503			sched_slice(ke);
504			ke->ke_runq = kseq->ksq_next;
505			runq_add(ke->ke_runq, ke);
506			continue;
507		}
508		return (ke);
509	}
510
511	return (runq_choose(&kseq->ksq_idle));
512}
513
514static void
515kseq_setup(struct kseq *kseq)
516{
517	runq_init(&kseq->ksq_timeshare[0]);
518	runq_init(&kseq->ksq_timeshare[1]);
519	runq_init(&kseq->ksq_idle);
520
521	kseq->ksq_curr = &kseq->ksq_timeshare[0];
522	kseq->ksq_next = &kseq->ksq_timeshare[1];
523
524	kseq->ksq_loads[PRI_ITHD] = 0;
525	kseq->ksq_loads[PRI_REALTIME] = 0;
526	kseq->ksq_loads[PRI_TIMESHARE] = 0;
527	kseq->ksq_loads[PRI_IDLE] = 0;
528	kseq->ksq_load = 0;
529#ifdef SMP
530	kseq->ksq_rslices = 0;
531#endif
532}
533
534static void
535sched_setup(void *dummy)
536{
537#ifdef SMP
538	int i;
539#endif
540
541	slice_min = (hz/100);	/* 10ms */
542	slice_max = (hz/7);	/* ~140ms */
543
544#ifdef SMP
545	/* init kseqs */
546	/* Create the idmap. */
547#ifdef ULE_HTT_EXPERIMENTAL
548	if (smp_topology == NULL) {
549#else
550	if (1) {
551#endif
552		for (i = 0; i < MAXCPU; i++) {
553			kseq_setup(&kseq_cpu[i]);
554			kseq_idmap[i] = &kseq_cpu[i];
555			kseq_cpu[i].ksq_cpus = 1;
556		}
557	} else {
558		int j;
559
560		for (i = 0; i < smp_topology->ct_count; i++) {
561			struct cpu_group *cg;
562
563			cg = &smp_topology->ct_group[i];
564			kseq_setup(&kseq_cpu[i]);
565
566			for (j = 0; j < MAXCPU; j++)
567				if ((cg->cg_mask & (1 << j)) != 0)
568					kseq_idmap[j] = &kseq_cpu[i];
569			kseq_cpu[i].ksq_cpus = cg->cg_count;
570		}
571	}
572	callout_init(&kseq_lb_callout, CALLOUT_MPSAFE);
573	kseq_balance(NULL);
574#else
575	kseq_setup(KSEQ_SELF());
576#endif
577	mtx_lock_spin(&sched_lock);
578	kseq_add(KSEQ_SELF(), &kse0);
579	mtx_unlock_spin(&sched_lock);
580}
581
582/*
583 * Scale the scheduling priority according to the "interactivity" of this
584 * process.
585 */
586static void
587sched_priority(struct ksegrp *kg)
588{
589	int pri;
590
591	if (kg->kg_pri_class != PRI_TIMESHARE)
592		return;
593
594	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
595	pri += SCHED_PRI_BASE;
596	pri += kg->kg_nice;
597
598	if (pri > PRI_MAX_TIMESHARE)
599		pri = PRI_MAX_TIMESHARE;
600	else if (pri < PRI_MIN_TIMESHARE)
601		pri = PRI_MIN_TIMESHARE;
602
603	kg->kg_user_pri = pri;
604
605	return;
606}
607
608/*
609 * Calculate a time slice based on the properties of the kseg and the runq
610 * that we're on.  This is only for PRI_TIMESHARE ksegrps.
611 */
612static void
613sched_slice(struct kse *ke)
614{
615	struct kseq *kseq;
616	struct ksegrp *kg;
617
618	kg = ke->ke_ksegrp;
619	kseq = KSEQ_CPU(ke->ke_cpu);
620
621	/*
622	 * Rationale:
623	 * KSEs in interactive ksegs get the minimum slice so that we
624	 * quickly notice if it abuses its advantage.
625	 *
626	 * KSEs in non-interactive ksegs are assigned a slice that is
627	 * based on the ksegs nice value relative to the least nice kseg
628	 * on the run queue for this cpu.
629	 *
630	 * If the KSE is less nice than all others it gets the maximum
631	 * slice and other KSEs will adjust their slice relative to
632	 * this when they first expire.
633	 *
634	 * There is 20 point window that starts relative to the least
635	 * nice kse on the run queue.  Slice size is determined by
636	 * the kse distance from the last nice ksegrp.
637	 *
638	 * If you are outside of the window you will get no slice and
639	 * you will be reevaluated each time you are selected on the
640	 * run queue.
641	 *
642	 */
643
644	if (!SCHED_INTERACTIVE(kg)) {
645		int nice;
646
647		nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
648		if (kseq->ksq_loads[PRI_TIMESHARE] == 0 ||
649		    kg->kg_nice < kseq->ksq_nicemin)
650			ke->ke_slice = SCHED_SLICE_MAX;
651		else if (nice <= SCHED_PRI_NTHRESH)
652			ke->ke_slice = SCHED_SLICE_NICE(nice);
653		else
654			ke->ke_slice = 0;
655	} else
656		ke->ke_slice = SCHED_SLICE_MIN;
657
658	CTR6(KTR_ULE,
659	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
660	    ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
661	    kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg));
662
663	/*
664	 * Check to see if we need to scale back the slp and run time
665	 * in the kg.  This will cause us to forget old interactivity
666	 * while maintaining the current ratio.
667	 */
668	sched_interact_update(kg);
669
670	return;
671}
672
673static void
674sched_interact_update(struct ksegrp *kg)
675{
676	/* XXX Fixme, use a linear algorithm and not a while loop. */
677	while ((kg->kg_runtime + kg->kg_slptime) >  SCHED_SLP_RUN_MAX) {
678		kg->kg_runtime = (kg->kg_runtime / 5) * 4;
679		kg->kg_slptime = (kg->kg_slptime / 5) * 4;
680	}
681}
682
683static int
684sched_interact_score(struct ksegrp *kg)
685{
686	int div;
687
688	if (kg->kg_runtime > kg->kg_slptime) {
689		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
690		return (SCHED_INTERACT_HALF +
691		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
692	} if (kg->kg_slptime > kg->kg_runtime) {
693		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
694		return (kg->kg_runtime / div);
695	}
696
697	/*
698	 * This can happen if slptime and runtime are 0.
699	 */
700	return (0);
701
702}
703
704/*
705 * This is only somewhat accurate since given many processes of the same
706 * priority they will switch when their slices run out, which will be
707 * at most SCHED_SLICE_MAX.
708 */
709int
710sched_rr_interval(void)
711{
712	return (SCHED_SLICE_MAX);
713}
714
715void
716sched_pctcpu_update(struct kse *ke)
717{
718	/*
719	 * Adjust counters and watermark for pctcpu calc.
720	 */
721
722	/*
723	 * Shift the tick count out so that the divide doesn't round away
724	 * our results.
725	 */
726	ke->ke_ticks <<= 10;
727	ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) *
728		    SCHED_CPU_TICKS;
729	ke->ke_ticks >>= 10;
730	ke->ke_ltick = ticks;
731	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
732}
733
734#ifdef SMP
735/* XXX Should be changed to kseq_load_lowest() */
736int
737sched_pickcpu(void)
738{
739	struct kseq *kseq;
740	int load;
741	int cpu;
742	int i;
743
744	mtx_assert(&sched_lock, MA_OWNED);
745	if (!smp_started)
746		return (0);
747
748	load = 0;
749	cpu = 0;
750
751	for (i = 0; i < mp_maxid; i++) {
752		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
753			continue;
754		kseq = KSEQ_CPU(i);
755		if (kseq->ksq_load < load) {
756			cpu = i;
757			load = kseq->ksq_load;
758		}
759	}
760
761	CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu);
762	return (cpu);
763}
764#else
765int
766sched_pickcpu(void)
767{
768	return (0);
769}
770#endif
771
772void
773sched_prio(struct thread *td, u_char prio)
774{
775	struct kse *ke;
776	struct runq *rq;
777
778	mtx_assert(&sched_lock, MA_OWNED);
779	ke = td->td_kse;
780	td->td_priority = prio;
781
782	if (TD_ON_RUNQ(td)) {
783		rq = ke->ke_runq;
784
785		runq_remove(rq, ke);
786		runq_add(rq, ke);
787	}
788}
789
790void
791sched_switchout(struct thread *td)
792{
793	struct kse *ke;
794
795	mtx_assert(&sched_lock, MA_OWNED);
796
797	ke = td->td_kse;
798
799	td->td_last_kse = ke;
800        td->td_lastcpu = td->td_oncpu;
801	td->td_oncpu = NOCPU;
802        td->td_flags &= ~TDF_NEEDRESCHED;
803
804	if (TD_IS_RUNNING(td)) {
805		/*
806		 * This queue is always correct except for idle threads which
807		 * have a higher priority due to priority propagation.
808		 */
809		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE &&
810		    ke->ke_thread->td_priority > PRI_MIN_IDLE)
811			ke->ke_runq = KSEQ_SELF()->ksq_curr;
812		runq_add(ke->ke_runq, ke);
813		/* setrunqueue(td); */
814		return;
815	}
816	if (ke->ke_runq)
817		kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
818	/*
819	 * We will not be on the run queue. So we must be
820	 * sleeping or similar.
821	 */
822	if (td->td_proc->p_flag & P_SA)
823		kse_reassign(ke);
824}
825
826void
827sched_switchin(struct thread *td)
828{
829	/* struct kse *ke = td->td_kse; */
830	mtx_assert(&sched_lock, MA_OWNED);
831
832	td->td_oncpu = PCPU_GET(cpuid);
833}
834
835void
836sched_nice(struct ksegrp *kg, int nice)
837{
838	struct kse *ke;
839	struct thread *td;
840	struct kseq *kseq;
841
842	PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
843	mtx_assert(&sched_lock, MA_OWNED);
844	/*
845	 * We need to adjust the nice counts for running KSEs.
846	 */
847	if (kg->kg_pri_class == PRI_TIMESHARE)
848		FOREACH_KSE_IN_GROUP(kg, ke) {
849			if (ke->ke_runq == NULL)
850				continue;
851			kseq = KSEQ_CPU(ke->ke_cpu);
852			kseq_nice_rem(kseq, kg->kg_nice);
853			kseq_nice_add(kseq, nice);
854		}
855	kg->kg_nice = nice;
856	sched_priority(kg);
857	FOREACH_THREAD_IN_GROUP(kg, td)
858		td->td_flags |= TDF_NEEDRESCHED;
859}
860
861void
862sched_sleep(struct thread *td, u_char prio)
863{
864	mtx_assert(&sched_lock, MA_OWNED);
865
866	td->td_slptime = ticks;
867	td->td_priority = prio;
868
869	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
870	    td->td_kse, td->td_slptime);
871}
872
873void
874sched_wakeup(struct thread *td)
875{
876	mtx_assert(&sched_lock, MA_OWNED);
877
878	/*
879	 * Let the kseg know how long we slept for.  This is because process
880	 * interactivity behavior is modeled in the kseg.
881	 */
882	if (td->td_slptime) {
883		struct ksegrp *kg;
884		int hzticks;
885
886		kg = td->td_ksegrp;
887		hzticks = ticks - td->td_slptime;
888		kg->kg_slptime += hzticks << 10;
889		sched_interact_update(kg);
890		sched_priority(kg);
891		if (td->td_kse)
892			sched_slice(td->td_kse);
893		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
894		    td->td_kse, hzticks);
895		td->td_slptime = 0;
896	}
897	setrunqueue(td);
898        if (td->td_priority < curthread->td_priority)
899                curthread->td_flags |= TDF_NEEDRESCHED;
900}
901
902/*
903 * Penalize the parent for creating a new child and initialize the child's
904 * priority.
905 */
906void
907sched_fork(struct proc *p, struct proc *p1)
908{
909
910	mtx_assert(&sched_lock, MA_OWNED);
911
912	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
913	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
914	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
915}
916
917void
918sched_fork_kse(struct kse *ke, struct kse *child)
919{
920
921	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
922	child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */
923	child->ke_runq = NULL;
924
925	/*
926	 * Claim that we've been running for one second for statistical
927	 * purposes.
928	 */
929	child->ke_ticks = 0;
930	child->ke_ltick = ticks;
931	child->ke_ftick = ticks - hz;
932}
933
934void
935sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
936{
937
938	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
939	/* XXX Need something better here */
940
941	child->kg_slptime = kg->kg_slptime / SCHED_SLP_RUN_THROTTLE;
942	child->kg_runtime = kg->kg_runtime / SCHED_SLP_RUN_THROTTLE;
943	kg->kg_runtime += tickincr << 10;
944	sched_interact_update(kg);
945
946	child->kg_user_pri = kg->kg_user_pri;
947	child->kg_nice = kg->kg_nice;
948}
949
950void
951sched_fork_thread(struct thread *td, struct thread *child)
952{
953}
954
955void
956sched_class(struct ksegrp *kg, int class)
957{
958	struct kseq *kseq;
959	struct kse *ke;
960
961	mtx_assert(&sched_lock, MA_OWNED);
962	if (kg->kg_pri_class == class)
963		return;
964
965	FOREACH_KSE_IN_GROUP(kg, ke) {
966		if (ke->ke_state != KES_ONRUNQ &&
967		    ke->ke_state != KES_THREAD)
968			continue;
969		kseq = KSEQ_CPU(ke->ke_cpu);
970
971		kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--;
972		kseq->ksq_loads[PRI_BASE(class)]++;
973
974		if (kg->kg_pri_class == PRI_TIMESHARE)
975			kseq_nice_rem(kseq, kg->kg_nice);
976		else if (class == PRI_TIMESHARE)
977			kseq_nice_add(kseq, kg->kg_nice);
978	}
979
980	kg->kg_pri_class = class;
981}
982
983/*
984 * Return some of the child's priority and interactivity to the parent.
985 */
986void
987sched_exit(struct proc *p, struct proc *child)
988{
989	/* XXX Need something better here */
990	mtx_assert(&sched_lock, MA_OWNED);
991	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
992	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
993}
994
995void
996sched_exit_kse(struct kse *ke, struct kse *child)
997{
998	kseq_rem(KSEQ_CPU(child->ke_cpu), child);
999}
1000
1001void
1002sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1003{
1004	/* kg->kg_slptime += child->kg_slptime; */
1005	kg->kg_runtime += child->kg_runtime;
1006	sched_interact_update(kg);
1007}
1008
1009void
1010sched_exit_thread(struct thread *td, struct thread *child)
1011{
1012}
1013
1014void
1015sched_clock(struct kse *ke)
1016{
1017	struct kseq *kseq;
1018	struct ksegrp *kg;
1019	struct thread *td;
1020#if 0
1021	struct kse *nke;
1022#endif
1023
1024	/*
1025	 * sched_setup() apparently happens prior to stathz being set.  We
1026	 * need to resolve the timers earlier in the boot so we can avoid
1027	 * calculating this here.
1028	 */
1029	if (realstathz == 0) {
1030		realstathz = stathz ? stathz : hz;
1031		tickincr = hz / realstathz;
1032		/*
1033		 * XXX This does not work for values of stathz that are much
1034		 * larger than hz.
1035		 */
1036		if (tickincr == 0)
1037			tickincr = 1;
1038	}
1039
1040	td = ke->ke_thread;
1041	kg = ke->ke_ksegrp;
1042
1043	mtx_assert(&sched_lock, MA_OWNED);
1044	KASSERT((td != NULL), ("schedclock: null thread pointer"));
1045
1046	/* Adjust ticks for pctcpu */
1047	ke->ke_ticks++;
1048	ke->ke_ltick = ticks;
1049
1050	/* Go up to one second beyond our max and then trim back down */
1051	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1052		sched_pctcpu_update(ke);
1053
1054	if (td->td_flags & TDF_IDLETD)
1055		return;
1056
1057	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1058	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1059
1060	/*
1061	 * We only do slicing code for TIMESHARE ksegrps.
1062	 */
1063	if (kg->kg_pri_class != PRI_TIMESHARE)
1064		return;
1065	/*
1066	 * Check for a higher priority task on the run queue.  This can happen
1067	 * on SMP if another processor woke up a process on our runq.
1068	 */
1069	kseq = KSEQ_SELF();
1070#if 0
1071	if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq, 0)) != NULL) {
1072		if (sched_strict &&
1073		    nke->ke_thread->td_priority < td->td_priority)
1074			td->td_flags |= TDF_NEEDRESCHED;
1075		else if (nke->ke_thread->td_priority <
1076		    td->td_priority SCHED_PRIO_SLOP)
1077
1078		if (nke->ke_thread->td_priority < td->td_priority)
1079			td->td_flags |= TDF_NEEDRESCHED;
1080	}
1081#endif
1082	/*
1083	 * We used a tick charge it to the ksegrp so that we can compute our
1084	 * interactivity.
1085	 */
1086	kg->kg_runtime += tickincr << 10;
1087	sched_interact_update(kg);
1088
1089	/*
1090	 * We used up one time slice.
1091	 */
1092	ke->ke_slice--;
1093#ifdef SMP
1094	kseq->ksq_rslices--;
1095#endif
1096
1097	if (ke->ke_slice > 0)
1098		return;
1099	/*
1100	 * We're out of time, recompute priorities and requeue.
1101	 */
1102	kseq_rem(kseq, ke);
1103	sched_priority(kg);
1104	sched_slice(ke);
1105	if (SCHED_CURR(kg, ke))
1106		ke->ke_runq = kseq->ksq_curr;
1107	else
1108		ke->ke_runq = kseq->ksq_next;
1109	kseq_add(kseq, ke);
1110	td->td_flags |= TDF_NEEDRESCHED;
1111}
1112
1113int
1114sched_runnable(void)
1115{
1116	struct kseq *kseq;
1117	int load;
1118
1119	load = 1;
1120
1121	mtx_lock_spin(&sched_lock);
1122	kseq = KSEQ_SELF();
1123
1124	if (kseq->ksq_load)
1125		goto out;
1126#ifdef SMP
1127	/*
1128	 * For SMP we may steal other processor's KSEs.  Just search until we
1129	 * verify that at least on other cpu has a runnable task.
1130	 */
1131	if (smp_started) {
1132		int i;
1133
1134		for (i = 0; i < mp_maxid; i++) {
1135			if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
1136				continue;
1137			kseq = KSEQ_CPU(i);
1138			if (kseq->ksq_load > kseq->ksq_cpus)
1139				goto out;
1140		}
1141	}
1142#endif
1143	load = 0;
1144out:
1145	mtx_unlock_spin(&sched_lock);
1146	return (load);
1147}
1148
1149void
1150sched_userret(struct thread *td)
1151{
1152	struct ksegrp *kg;
1153	struct kseq *kseq;
1154	struct kse *ke;
1155
1156	kg = td->td_ksegrp;
1157
1158	if (td->td_priority != kg->kg_user_pri) {
1159		mtx_lock_spin(&sched_lock);
1160		td->td_priority = kg->kg_user_pri;
1161		kseq = KSEQ_SELF();
1162		if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
1163#ifdef SMP
1164		    kseq->ksq_load > kseq->ksq_cpus &&
1165#else
1166		    kseq->ksq_load > 1 &&
1167#endif
1168		    (ke = kseq_choose(kseq, 0)) != NULL &&
1169		    ke->ke_thread->td_priority < td->td_priority)
1170			curthread->td_flags |= TDF_NEEDRESCHED;
1171		mtx_unlock_spin(&sched_lock);
1172	}
1173}
1174
1175struct kse *
1176sched_choose(void)
1177{
1178	struct kseq *kseq;
1179	struct kse *ke;
1180
1181	mtx_assert(&sched_lock, MA_OWNED);
1182#ifdef SMP
1183retry:
1184#endif
1185	kseq = KSEQ_SELF();
1186	ke = kseq_choose(kseq, 0);
1187	if (ke) {
1188		runq_remove(ke->ke_runq, ke);
1189		ke->ke_state = KES_THREAD;
1190
1191		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1192			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1193			    ke, ke->ke_runq, ke->ke_slice,
1194			    ke->ke_thread->td_priority);
1195		}
1196		return (ke);
1197	}
1198
1199#ifdef SMP
1200	if (smp_started) {
1201		/*
1202		 * Find the cpu with the highest load and steal one proc.
1203		 */
1204		if ((kseq = kseq_load_highest()) == NULL)
1205			return (NULL);
1206
1207		/*
1208		 * Remove this kse from this kseq and runq and then requeue
1209		 * on the current processor.  Then we will dequeue it
1210		 * normally above.
1211		 */
1212		kseq_move(kseq, PCPU_GET(cpuid));
1213		goto retry;
1214	}
1215#endif
1216
1217	return (NULL);
1218}
1219
1220void
1221sched_add(struct kse *ke)
1222{
1223	struct kseq *kseq;
1224	struct ksegrp *kg;
1225
1226	mtx_assert(&sched_lock, MA_OWNED);
1227	KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
1228	KASSERT((ke->ke_thread->td_kse != NULL),
1229	    ("sched_add: No KSE on thread"));
1230	KASSERT(ke->ke_state != KES_ONRUNQ,
1231	    ("sched_add: kse %p (%s) already in run queue", ke,
1232	    ke->ke_proc->p_comm));
1233	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1234	    ("sched_add: process swapped out"));
1235	KASSERT(ke->ke_runq == NULL,
1236	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1237
1238	kg = ke->ke_ksegrp;
1239
1240	switch (PRI_BASE(kg->kg_pri_class)) {
1241	case PRI_ITHD:
1242	case PRI_REALTIME:
1243		kseq = KSEQ_SELF();
1244		ke->ke_runq = kseq->ksq_curr;
1245		ke->ke_slice = SCHED_SLICE_MAX;
1246		ke->ke_cpu = PCPU_GET(cpuid);
1247		break;
1248	case PRI_TIMESHARE:
1249		kseq = KSEQ_CPU(ke->ke_cpu);
1250		if (SCHED_CURR(kg, ke))
1251			ke->ke_runq = kseq->ksq_curr;
1252		else
1253			ke->ke_runq = kseq->ksq_next;
1254		break;
1255	case PRI_IDLE:
1256		kseq = KSEQ_CPU(ke->ke_cpu);
1257		/*
1258		 * This is for priority prop.
1259		 */
1260		if (ke->ke_thread->td_priority > PRI_MIN_IDLE)
1261			ke->ke_runq = kseq->ksq_curr;
1262		else
1263			ke->ke_runq = &kseq->ksq_idle;
1264		ke->ke_slice = SCHED_SLICE_MIN;
1265		break;
1266	default:
1267		panic("Unknown pri class.\n");
1268		break;
1269	}
1270
1271	ke->ke_ksegrp->kg_runq_kses++;
1272	ke->ke_state = KES_ONRUNQ;
1273
1274	runq_add(ke->ke_runq, ke);
1275	kseq_add(kseq, ke);
1276}
1277
1278void
1279sched_rem(struct kse *ke)
1280{
1281	struct kseq *kseq;
1282
1283	mtx_assert(&sched_lock, MA_OWNED);
1284	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
1285
1286	ke->ke_state = KES_THREAD;
1287	ke->ke_ksegrp->kg_runq_kses--;
1288	kseq = KSEQ_CPU(ke->ke_cpu);
1289	runq_remove(ke->ke_runq, ke);
1290	kseq_rem(kseq, ke);
1291}
1292
1293fixpt_t
1294sched_pctcpu(struct kse *ke)
1295{
1296	fixpt_t pctcpu;
1297
1298	pctcpu = 0;
1299
1300	mtx_lock_spin(&sched_lock);
1301	if (ke->ke_ticks) {
1302		int rtick;
1303
1304		/*
1305		 * Don't update more frequently than twice a second.  Allowing
1306		 * this causes the cpu usage to decay away too quickly due to
1307		 * rounding errors.
1308		 */
1309		if (ke->ke_ltick < (ticks - (hz / 2)))
1310			sched_pctcpu_update(ke);
1311
1312		/* How many rtick per second ? */
1313		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1314		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1315	}
1316
1317	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1318	mtx_unlock_spin(&sched_lock);
1319
1320	return (pctcpu);
1321}
1322
1323int
1324sched_sizeof_kse(void)
1325{
1326	return (sizeof(struct kse) + sizeof(struct ke_sched));
1327}
1328
1329int
1330sched_sizeof_ksegrp(void)
1331{
1332	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1333}
1334
1335int
1336sched_sizeof_proc(void)
1337{
1338	return (sizeof(struct proc));
1339}
1340
1341int
1342sched_sizeof_thread(void)
1343{
1344	return (sizeof(struct thread) + sizeof(struct td_sched));
1345}
1346