sched_ule.c revision 113387
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/kern/sched_ule.c 113387 2003-04-12 07:28:36Z jeff $
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/kernel.h>
32#include <sys/ktr.h>
33#include <sys/lock.h>
34#include <sys/mutex.h>
35#include <sys/proc.h>
36#include <sys/resource.h>
37#include <sys/sched.h>
38#include <sys/smp.h>
39#include <sys/sx.h>
40#include <sys/sysctl.h>
41#include <sys/sysproto.h>
42#include <sys/vmmeter.h>
43#ifdef DDB
44#include <ddb/ddb.h>
45#endif
46#ifdef KTRACE
47#include <sys/uio.h>
48#include <sys/ktrace.h>
49#endif
50
51#include <machine/cpu.h>
52
53#define KTR_ULE         KTR_NFS
54
55/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
56/* XXX This is bogus compatability crap for ps */
57static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
58SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
59
60static void sched_setup(void *dummy);
61SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
62
63static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
64
65static int sched_strict;
66SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, "");
67
68static int slice_min = 1;
69SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
70
71static int slice_max = 2;
72SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
73
74int realstathz;
75int tickincr = 1;
76
77/*
78 * These datastructures are allocated within their parent datastructure but
79 * are scheduler specific.
80 */
81
82struct ke_sched {
83	int		ske_slice;
84	struct runq	*ske_runq;
85	/* The following variables are only used for pctcpu calculation */
86	int		ske_ltick;	/* Last tick that we were running on */
87	int		ske_ftick;	/* First tick that we were running on */
88	int		ske_ticks;	/* Tick count */
89	/* CPU that we have affinity for. */
90	u_char		ske_cpu;
91};
92#define	ke_slice	ke_sched->ske_slice
93#define	ke_runq		ke_sched->ske_runq
94#define	ke_ltick	ke_sched->ske_ltick
95#define	ke_ftick	ke_sched->ske_ftick
96#define	ke_ticks	ke_sched->ske_ticks
97#define	ke_cpu		ke_sched->ske_cpu
98
99struct kg_sched {
100	int	skg_slptime;		/* Number of ticks we vol. slept */
101	int	skg_runtime;		/* Number of ticks we were running */
102};
103#define	kg_slptime	kg_sched->skg_slptime
104#define	kg_runtime	kg_sched->skg_runtime
105
106struct td_sched {
107	int	std_slptime;
108};
109#define	td_slptime	td_sched->std_slptime
110
111struct td_sched td_sched;
112struct ke_sched ke_sched;
113struct kg_sched kg_sched;
114
115struct ke_sched *kse0_sched = &ke_sched;
116struct kg_sched *ksegrp0_sched = &kg_sched;
117struct p_sched *proc0_sched = NULL;
118struct td_sched *thread0_sched = &td_sched;
119
120/*
121 * This priority range has 20 priorities on either end that are reachable
122 * only through nice values.
123 *
124 * PRI_RANGE:	Total priority range for timeshare threads.
125 * PRI_NRESV:	Reserved priorities for nice.
126 * PRI_BASE:	The start of the dynamic range.
127 * DYN_RANGE:	Number of priorities that are available int the dynamic
128 *		priority range.
129 */
130#define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
131#define	SCHED_PRI_NRESV		PRIO_TOTAL
132#define	SCHED_PRI_NHALF		(PRIO_TOTAL / 2)
133#define	SCHED_PRI_NTHRESH	(SCHED_PRI_NHALF - 1)
134#define	SCHED_PRI_BASE		((SCHED_PRI_NRESV / 2) + PRI_MIN_TIMESHARE)
135#define	SCHED_DYN_RANGE		(SCHED_PRI_RANGE - SCHED_PRI_NRESV)
136#define	SCHED_PRI_INTERACT(score)					\
137    ((score) * SCHED_DYN_RANGE / SCHED_INTERACT_RANGE)
138
139/*
140 * These determine the interactivity of a process.
141 *
142 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
143 *		before throttling back.
144 * SLP_RUN_THROTTLE:	Divisor for reducing slp/run time.
145 * INTERACT_RANGE:	Range of interactivity values.  Smaller is better.
146 * INTERACT_HALF:	Convenience define, half of the interactivity range.
147 * INTERACT_THRESH:	Threshhold for placement on the current runq.
148 */
149#define	SCHED_SLP_RUN_MAX	((hz / 10) << 10)
150#define	SCHED_SLP_RUN_THROTTLE	(10)
151#define	SCHED_INTERACT_RANGE	(100)
152#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_RANGE / 2)
153#define	SCHED_INTERACT_THRESH	(10)
154
155/*
156 * These parameters and macros determine the size of the time slice that is
157 * granted to each thread.
158 *
159 * SLICE_MIN:	Minimum time slice granted, in units of ticks.
160 * SLICE_MAX:	Maximum time slice granted.
161 * SLICE_RANGE:	Range of available time slices scaled by hz.
162 * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
163 * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
164 */
165#define	SCHED_SLICE_MIN			(slice_min)
166#define	SCHED_SLICE_MAX			(slice_max)
167#define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
168#define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
169#define	SCHED_SLICE_NICE(nice)						\
170    (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_PRI_NTHRESH))
171
172/*
173 * This macro determines whether or not the kse belongs on the current or
174 * next run queue.
175 *
176 * XXX nice value should effect how interactive a kg is.
177 */
178#define	SCHED_INTERACTIVE(kg)						\
179    (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
180#define	SCHED_CURR(kg, ke)	SCHED_INTERACTIVE(kg)
181#if 0
182    (ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || SCHED_INTERACTIVE(kg))
183#endif
184
185/*
186 * Cpu percentage computation macros and defines.
187 *
188 * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
189 * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
190 */
191
192#define	SCHED_CPU_TIME	10
193#define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
194
195/*
196 * kseq - per processor runqs and statistics.
197 */
198
199#define	KSEQ_NCLASS	(PRI_IDLE + 1)	/* Number of run classes. */
200
201struct kseq {
202	struct runq	ksq_idle;		/* Queue of IDLE threads. */
203	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
204	struct runq	*ksq_next;		/* Next timeshare queue. */
205	struct runq	*ksq_curr;		/* Current queue. */
206	int		ksq_loads[KSEQ_NCLASS];	/* Load for each class */
207	int		ksq_load;		/* Aggregate load. */
208	short		ksq_nice[PRIO_TOTAL + 1]; /* KSEs in each nice bin. */
209	short		ksq_nicemin;		/* Least nice. */
210#ifdef SMP
211	unsigned int	ksq_rslices;	/* Slices on run queue */
212#endif
213};
214
215/*
216 * One kse queue per processor.
217 */
218#ifdef SMP
219struct kseq	kseq_cpu[MAXCPU];
220#define	KSEQ_SELF()	(&kseq_cpu[PCPU_GET(cpuid)])
221#define	KSEQ_CPU(x)	(&kseq_cpu[(x)])
222#else
223struct kseq	kseq_cpu;
224#define	KSEQ_SELF()	(&kseq_cpu)
225#define	KSEQ_CPU(x)	(&kseq_cpu)
226#endif
227
228static void sched_slice(struct kse *ke);
229static void sched_priority(struct ksegrp *kg);
230static int sched_interact_score(struct ksegrp *kg);
231void sched_pctcpu_update(struct kse *ke);
232int sched_pickcpu(void);
233
234/* Operations on per processor queues */
235static struct kse * kseq_choose(struct kseq *kseq);
236static void kseq_setup(struct kseq *kseq);
237static void kseq_add(struct kseq *kseq, struct kse *ke);
238static void kseq_rem(struct kseq *kseq, struct kse *ke);
239static void kseq_nice_add(struct kseq *kseq, int nice);
240static void kseq_nice_rem(struct kseq *kseq, int nice);
241void kseq_print(struct kseq *kseq);
242#ifdef SMP
243struct kseq * kseq_load_highest(void);
244#endif
245
246void
247kseq_print(struct kseq *kseq)
248{
249	int i;
250
251	if (kseq == NULL)
252		kseq = KSEQ_SELF();
253
254	printf("kseq:\n");
255	printf("\tload:           %d\n", kseq->ksq_load);
256	printf("\tload ITHD:      %d\n", kseq->ksq_loads[PRI_ITHD]);
257	printf("\tload REALTIME:  %d\n", kseq->ksq_loads[PRI_REALTIME]);
258	printf("\tload TIMESHARE: %d\n", kseq->ksq_loads[PRI_TIMESHARE]);
259	printf("\tload IDLE:      %d\n", kseq->ksq_loads[PRI_IDLE]);
260	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
261	printf("\tnice counts:\n");
262	for (i = 0; i < PRIO_TOTAL + 1; i++)
263		if (kseq->ksq_nice[i])
264			printf("\t\t%d = %d\n",
265			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
266}
267
268static void
269kseq_add(struct kseq *kseq, struct kse *ke)
270{
271	kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]++;
272	kseq->ksq_load++;
273	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
274	CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
275	    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
276	    ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
277	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
278		kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
279#ifdef SMP
280	kseq->ksq_rslices += ke->ke_slice;
281#endif
282}
283
284static void
285kseq_rem(struct kseq *kseq, struct kse *ke)
286{
287	kseq->ksq_loads[PRI_BASE(ke->ke_ksegrp->kg_pri_class)]--;
288	kseq->ksq_load--;
289	ke->ke_runq = NULL;
290	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
291		kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
292#ifdef SMP
293	kseq->ksq_rslices -= ke->ke_slice;
294#endif
295}
296
297static void
298kseq_nice_add(struct kseq *kseq, int nice)
299{
300	/* Normalize to zero. */
301	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
302	if (nice < kseq->ksq_nicemin || kseq->ksq_loads[PRI_TIMESHARE] == 0)
303		kseq->ksq_nicemin = nice;
304}
305
306static void
307kseq_nice_rem(struct kseq *kseq, int nice)
308{
309	int n;
310
311	/* Normalize to zero. */
312	n = nice + SCHED_PRI_NHALF;
313	kseq->ksq_nice[n]--;
314	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
315
316	/*
317	 * If this wasn't the smallest nice value or there are more in
318	 * this bucket we can just return.  Otherwise we have to recalculate
319	 * the smallest nice.
320	 */
321	if (nice != kseq->ksq_nicemin ||
322	    kseq->ksq_nice[n] != 0 ||
323	    kseq->ksq_loads[PRI_TIMESHARE] == 0)
324		return;
325
326	for (; n < SCHED_PRI_NRESV + 1; n++)
327		if (kseq->ksq_nice[n]) {
328			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
329			return;
330		}
331}
332
333#ifdef SMP
334struct kseq *
335kseq_load_highest(void)
336{
337	struct kseq *kseq;
338	int load;
339	int cpu;
340	int i;
341
342	cpu = 0;
343	load = 0;
344
345	for (i = 0; i < mp_maxid; i++) {
346		if (CPU_ABSENT(i))
347			continue;
348		kseq = KSEQ_CPU(i);
349		if (kseq->ksq_load > load) {
350			load = kseq->ksq_load;
351			cpu = i;
352		}
353	}
354	if (load > 1)
355		return (KSEQ_CPU(cpu));
356
357	return (NULL);
358}
359#endif
360
361struct kse *
362kseq_choose(struct kseq *kseq)
363{
364	struct kse *ke;
365	struct runq *swap;
366
367	swap = NULL;
368
369	for (;;) {
370		ke = runq_choose(kseq->ksq_curr);
371		if (ke == NULL) {
372			/*
373			 * We already swaped once and didn't get anywhere.
374			 */
375			if (swap)
376				break;
377			swap = kseq->ksq_curr;
378			kseq->ksq_curr = kseq->ksq_next;
379			kseq->ksq_next = swap;
380			continue;
381		}
382		/*
383		 * If we encounter a slice of 0 the kse is in a
384		 * TIMESHARE kse group and its nice was too far out
385		 * of the range that receives slices.
386		 */
387		if (ke->ke_slice == 0) {
388			runq_remove(ke->ke_runq, ke);
389			sched_slice(ke);
390			ke->ke_runq = kseq->ksq_next;
391			runq_add(ke->ke_runq, ke);
392			continue;
393		}
394		return (ke);
395	}
396
397	return (runq_choose(&kseq->ksq_idle));
398}
399
400static void
401kseq_setup(struct kseq *kseq)
402{
403	runq_init(&kseq->ksq_timeshare[0]);
404	runq_init(&kseq->ksq_timeshare[1]);
405	runq_init(&kseq->ksq_idle);
406
407	kseq->ksq_curr = &kseq->ksq_timeshare[0];
408	kseq->ksq_next = &kseq->ksq_timeshare[1];
409
410	kseq->ksq_loads[PRI_ITHD] = 0;
411	kseq->ksq_loads[PRI_REALTIME] = 0;
412	kseq->ksq_loads[PRI_TIMESHARE] = 0;
413	kseq->ksq_loads[PRI_IDLE] = 0;
414#ifdef SMP
415	kseq->ksq_rslices = 0;
416#endif
417}
418
419static void
420sched_setup(void *dummy)
421{
422	int i;
423
424	slice_min = (hz/100);
425	slice_max = (hz/10);
426
427	mtx_lock_spin(&sched_lock);
428	/* init kseqs */
429	for (i = 0; i < MAXCPU; i++)
430		kseq_setup(KSEQ_CPU(i));
431
432	kseq_add(KSEQ_SELF(), &kse0);
433	mtx_unlock_spin(&sched_lock);
434}
435
436/*
437 * Scale the scheduling priority according to the "interactivity" of this
438 * process.
439 */
440static void
441sched_priority(struct ksegrp *kg)
442{
443	int pri;
444
445	if (kg->kg_pri_class != PRI_TIMESHARE)
446		return;
447
448	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
449	pri += SCHED_PRI_BASE;
450	pri += kg->kg_nice;
451
452	if (pri > PRI_MAX_TIMESHARE)
453		pri = PRI_MAX_TIMESHARE;
454	else if (pri < PRI_MIN_TIMESHARE)
455		pri = PRI_MIN_TIMESHARE;
456
457	kg->kg_user_pri = pri;
458
459	return;
460}
461
462/*
463 * Calculate a time slice based on the properties of the kseg and the runq
464 * that we're on.  This is only for PRI_TIMESHARE ksegrps.
465 */
466static void
467sched_slice(struct kse *ke)
468{
469	struct kseq *kseq;
470	struct ksegrp *kg;
471
472	kg = ke->ke_ksegrp;
473	kseq = KSEQ_CPU(ke->ke_cpu);
474
475	/*
476	 * Rationale:
477	 * KSEs in interactive ksegs get the minimum slice so that we
478	 * quickly notice if it abuses its advantage.
479	 *
480	 * KSEs in non-interactive ksegs are assigned a slice that is
481	 * based on the ksegs nice value relative to the least nice kseg
482	 * on the run queue for this cpu.
483	 *
484	 * If the KSE is less nice than all others it gets the maximum
485	 * slice and other KSEs will adjust their slice relative to
486	 * this when they first expire.
487	 *
488	 * There is 20 point window that starts relative to the least
489	 * nice kse on the run queue.  Slice size is determined by
490	 * the kse distance from the last nice ksegrp.
491	 *
492	 * If you are outside of the window you will get no slice and
493	 * you will be reevaluated each time you are selected on the
494	 * run queue.
495	 *
496	 */
497
498	if (!SCHED_INTERACTIVE(kg)) {
499		int nice;
500
501		nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
502		if (kseq->ksq_loads[PRI_TIMESHARE] == 0 ||
503		    kg->kg_nice < kseq->ksq_nicemin)
504			ke->ke_slice = SCHED_SLICE_MAX;
505		else if (nice <= SCHED_PRI_NTHRESH)
506			ke->ke_slice = SCHED_SLICE_NICE(nice);
507		else
508			ke->ke_slice = 0;
509	} else
510		ke->ke_slice = SCHED_SLICE_MIN;
511
512	CTR6(KTR_ULE,
513	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
514	    ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
515	    kseq->ksq_loads[PRI_TIMESHARE], SCHED_INTERACTIVE(kg));
516
517	/*
518	 * Check to see if we need to scale back the slp and run time
519	 * in the kg.  This will cause us to forget old interactivity
520	 * while maintaining the current ratio.
521	 */
522	CTR4(KTR_ULE, "Slp vs Run %p (Slp %d, Run %d, Score %d)",
523	    ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10,
524	    sched_interact_score(kg));
525
526	if ((kg->kg_runtime + kg->kg_slptime) >  SCHED_SLP_RUN_MAX) {
527		kg->kg_runtime /= SCHED_SLP_RUN_THROTTLE;
528		kg->kg_slptime /= SCHED_SLP_RUN_THROTTLE;
529	}
530	CTR4(KTR_ULE, "Slp vs Run(2) %p (Slp %d, Run %d, Score %d)",
531	    ke, kg->kg_slptime >> 10, kg->kg_runtime >> 10,
532	    sched_interact_score(kg));
533
534	return;
535}
536
537static int
538sched_interact_score(struct ksegrp *kg)
539{
540	int big;
541	int small;
542	int base;
543
544	if (kg->kg_runtime > kg->kg_slptime) {
545		big = kg->kg_runtime;
546		small = kg->kg_slptime;
547		base = SCHED_INTERACT_HALF;
548	} else {
549		big = kg->kg_slptime;
550		small = kg->kg_runtime;
551		base = 0;
552	}
553
554	big /= SCHED_INTERACT_HALF;
555	if (big != 0)
556		small /= big;
557	else
558		small = 0;
559
560	small += base;
561	/* XXX Factor in nice */
562	return (small);
563}
564
565/*
566 * This is only somewhat accurate since given many processes of the same
567 * priority they will switch when their slices run out, which will be
568 * at most SCHED_SLICE_MAX.
569 */
570int
571sched_rr_interval(void)
572{
573	return (SCHED_SLICE_MAX);
574}
575
576void
577sched_pctcpu_update(struct kse *ke)
578{
579	/*
580	 * Adjust counters and watermark for pctcpu calc.
581	 *
582	 * Shift the tick count out so that the divide doesn't round away
583	 * our results.
584	 */
585	ke->ke_ticks <<= 10;
586	ke->ke_ticks = (ke->ke_ticks / (ke->ke_ltick - ke->ke_ftick)) *
587		    SCHED_CPU_TICKS;
588	ke->ke_ticks >>= 10;
589	ke->ke_ltick = ticks;
590	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
591}
592
593#ifdef SMP
594/* XXX Should be changed to kseq_load_lowest() */
595int
596sched_pickcpu(void)
597{
598	struct kseq *kseq;
599	int load;
600	int cpu;
601	int i;
602
603	if (!smp_started)
604		return (0);
605
606	load = 0;
607	cpu = 0;
608
609	for (i = 0; i < mp_maxid; i++) {
610		if (CPU_ABSENT(i))
611			continue;
612		kseq = KSEQ_CPU(i);
613		if (kseq->ksq_load < load) {
614			cpu = i;
615			load = kseq->ksq_load;
616		}
617	}
618
619	CTR1(KTR_RUNQ, "sched_pickcpu: %d", cpu);
620	return (cpu);
621}
622#else
623int
624sched_pickcpu(void)
625{
626	return (0);
627}
628#endif
629
630void
631sched_prio(struct thread *td, u_char prio)
632{
633	struct kse *ke;
634	struct runq *rq;
635
636	mtx_assert(&sched_lock, MA_OWNED);
637	ke = td->td_kse;
638	td->td_priority = prio;
639
640	if (TD_ON_RUNQ(td)) {
641		rq = ke->ke_runq;
642
643		runq_remove(rq, ke);
644		runq_add(rq, ke);
645	}
646}
647
648void
649sched_switchout(struct thread *td)
650{
651	struct kse *ke;
652
653	mtx_assert(&sched_lock, MA_OWNED);
654
655	ke = td->td_kse;
656
657	td->td_last_kse = ke;
658        td->td_lastcpu = td->td_oncpu;
659	td->td_oncpu = NOCPU;
660        td->td_flags &= ~TDF_NEEDRESCHED;
661
662	if (TD_IS_RUNNING(td)) {
663		runq_add(ke->ke_runq, ke);
664		/* setrunqueue(td); */
665		return;
666	}
667	if (ke->ke_runq)
668		kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
669	/*
670	 * We will not be on the run queue. So we must be
671	 * sleeping or similar.
672	 */
673	if (td->td_proc->p_flag & P_THREADED)
674		kse_reassign(ke);
675}
676
677void
678sched_switchin(struct thread *td)
679{
680	/* struct kse *ke = td->td_kse; */
681	mtx_assert(&sched_lock, MA_OWNED);
682
683	td->td_oncpu = PCPU_GET(cpuid);
684
685	if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
686	    td->td_priority != td->td_ksegrp->kg_user_pri)
687		curthread->td_flags |= TDF_NEEDRESCHED;
688}
689
690void
691sched_nice(struct ksegrp *kg, int nice)
692{
693	struct kse *ke;
694	struct thread *td;
695	struct kseq *kseq;
696
697	/*
698	 * We need to adjust the nice counts for running KSEs.
699	 */
700	if (kg->kg_pri_class == PRI_TIMESHARE)
701		FOREACH_KSE_IN_GROUP(kg, ke) {
702			if (ke->ke_state != KES_ONRUNQ &&
703			    ke->ke_state != KES_THREAD)
704				continue;
705			kseq = KSEQ_CPU(ke->ke_cpu);
706			kseq_nice_rem(kseq, kg->kg_nice);
707			kseq_nice_add(kseq, nice);
708		}
709	kg->kg_nice = nice;
710	sched_priority(kg);
711	FOREACH_THREAD_IN_GROUP(kg, td)
712		td->td_flags |= TDF_NEEDRESCHED;
713}
714
715void
716sched_sleep(struct thread *td, u_char prio)
717{
718	mtx_assert(&sched_lock, MA_OWNED);
719
720	td->td_slptime = ticks;
721	td->td_priority = prio;
722
723	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
724	    td->td_kse, td->td_slptime);
725}
726
727void
728sched_wakeup(struct thread *td)
729{
730	mtx_assert(&sched_lock, MA_OWNED);
731
732	/*
733	 * Let the kseg know how long we slept for.  This is because process
734	 * interactivity behavior is modeled in the kseg.
735	 */
736	if (td->td_slptime) {
737		struct ksegrp *kg;
738		int hzticks;
739
740		kg = td->td_ksegrp;
741		hzticks = ticks - td->td_slptime;
742		kg->kg_slptime += hzticks << 10;
743		sched_priority(kg);
744		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
745		    td->td_kse, hzticks);
746		td->td_slptime = 0;
747	}
748	setrunqueue(td);
749        if (td->td_priority < curthread->td_priority)
750                curthread->td_flags |= TDF_NEEDRESCHED;
751}
752
753/*
754 * Penalize the parent for creating a new child and initialize the child's
755 * priority.
756 */
757void
758sched_fork(struct proc *p, struct proc *p1)
759{
760
761	mtx_assert(&sched_lock, MA_OWNED);
762
763	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
764	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
765	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
766}
767
768void
769sched_fork_kse(struct kse *ke, struct kse *child)
770{
771	child->ke_slice = ke->ke_slice;
772	child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */
773	child->ke_runq = NULL;
774
775	/*
776	 * Claim that we've been running for one second for statistical
777	 * purposes.
778	 */
779	child->ke_ticks = 0;
780	child->ke_ltick = ticks;
781	child->ke_ftick = ticks - hz;
782}
783
784void
785sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
786{
787	/* XXX Need something better here */
788	if (kg->kg_slptime > kg->kg_runtime) {
789		child->kg_slptime = SCHED_DYN_RANGE;
790		child->kg_runtime = kg->kg_slptime / SCHED_DYN_RANGE;
791	} else {
792		child->kg_runtime = SCHED_DYN_RANGE;
793		child->kg_slptime = kg->kg_runtime / SCHED_DYN_RANGE;
794	}
795
796	child->kg_user_pri = kg->kg_user_pri;
797	child->kg_nice = kg->kg_nice;
798}
799
800void
801sched_fork_thread(struct thread *td, struct thread *child)
802{
803}
804
805void
806sched_class(struct ksegrp *kg, int class)
807{
808	struct kseq *kseq;
809	struct kse *ke;
810
811	if (kg->kg_pri_class == class)
812		return;
813
814	FOREACH_KSE_IN_GROUP(kg, ke) {
815		if (ke->ke_state != KES_ONRUNQ &&
816		    ke->ke_state != KES_THREAD)
817			continue;
818		kseq = KSEQ_CPU(ke->ke_cpu);
819
820		kseq->ksq_loads[PRI_BASE(kg->kg_pri_class)]--;
821		kseq->ksq_loads[PRI_BASE(class)]++;
822
823		if (kg->kg_pri_class == PRI_TIMESHARE)
824			kseq_nice_rem(kseq, kg->kg_nice);
825		else if (class == PRI_TIMESHARE)
826			kseq_nice_add(kseq, kg->kg_nice);
827	}
828
829	kg->kg_pri_class = class;
830}
831
832/*
833 * Return some of the child's priority and interactivity to the parent.
834 */
835void
836sched_exit(struct proc *p, struct proc *child)
837{
838	/* XXX Need something better here */
839	mtx_assert(&sched_lock, MA_OWNED);
840	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
841}
842
843void
844sched_exit_kse(struct kse *ke, struct kse *child)
845{
846	kseq_rem(KSEQ_CPU(child->ke_cpu), child);
847}
848
849void
850sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
851{
852}
853
854void
855sched_exit_thread(struct thread *td, struct thread *child)
856{
857}
858
859void
860sched_clock(struct kse *ke)
861{
862	struct kseq *kseq;
863	struct ksegrp *kg;
864	struct thread *td;
865#if 0
866	struct kse *nke;
867#endif
868
869	/*
870	 * sched_setup() apparently happens prior to stathz being set.  We
871	 * need to resolve the timers earlier in the boot so we can avoid
872	 * calculating this here.
873	 */
874	if (realstathz == 0) {
875		realstathz = stathz ? stathz : hz;
876		tickincr = hz / realstathz;
877		/*
878		 * XXX This does not work for values of stathz that are much
879		 * larger than hz.
880		 */
881		if (tickincr == 0)
882			tickincr = 1;
883	}
884
885	td = ke->ke_thread;
886	kg = ke->ke_ksegrp;
887
888	mtx_assert(&sched_lock, MA_OWNED);
889	KASSERT((td != NULL), ("schedclock: null thread pointer"));
890
891	/* Adjust ticks for pctcpu */
892	ke->ke_ticks++;
893	ke->ke_ltick = ticks;
894
895	/* Go up to one second beyond our max and then trim back down */
896	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
897		sched_pctcpu_update(ke);
898
899	if (td->td_kse->ke_flags & KEF_IDLEKSE)
900		return;
901
902	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
903	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
904
905	/*
906	 * We only do slicing code for TIMESHARE ksegrps.
907	 */
908	if (kg->kg_pri_class != PRI_TIMESHARE)
909		return;
910	/*
911	 * Check for a higher priority task on the run queue.  This can happen
912	 * on SMP if another processor woke up a process on our runq.
913	 */
914	kseq = KSEQ_SELF();
915#if 0
916	if (kseq->ksq_load > 1 && (nke = kseq_choose(kseq)) != NULL) {
917		if (sched_strict &&
918		    nke->ke_thread->td_priority < td->td_priority)
919			td->td_flags |= TDF_NEEDRESCHED;
920		else if (nke->ke_thread->td_priority <
921		    td->td_priority SCHED_PRIO_SLOP)
922
923		if (nke->ke_thread->td_priority < td->td_priority)
924			td->td_flags |= TDF_NEEDRESCHED;
925	}
926#endif
927	/*
928	 * We used a tick charge it to the ksegrp so that we can compute our
929	 * interactivity.
930	 */
931	kg->kg_runtime += tickincr << 10;
932
933	/*
934	 * We used up one time slice.
935	 */
936	ke->ke_slice--;
937#ifdef SMP
938	kseq->ksq_rslices--;
939#endif
940
941	if (ke->ke_slice > 0)
942		return;
943	/*
944	 * We're out of time, recompute priorities and requeue.
945	 */
946	kseq_rem(kseq, ke);
947	sched_priority(kg);
948	sched_slice(ke);
949	if (SCHED_CURR(kg, ke))
950		ke->ke_runq = kseq->ksq_curr;
951	else
952		ke->ke_runq = kseq->ksq_next;
953	kseq_add(kseq, ke);
954	td->td_flags |= TDF_NEEDRESCHED;
955}
956
957int
958sched_runnable(void)
959{
960	struct kseq *kseq;
961
962	kseq = KSEQ_SELF();
963
964	if (kseq->ksq_load)
965		return (1);
966#ifdef SMP
967	/*
968	 * For SMP we may steal other processor's KSEs.  Just search until we
969	 * verify that at least on other cpu has a runnable task.
970	 */
971	if (smp_started) {
972		int i;
973
974		for (i = 0; i < mp_maxid; i++) {
975			if (CPU_ABSENT(i))
976				continue;
977			kseq = KSEQ_CPU(i);
978			if (kseq->ksq_load)
979				return (1);
980		}
981	}
982#endif
983	return (0);
984}
985
986void
987sched_userret(struct thread *td)
988{
989	struct ksegrp *kg;
990
991	kg = td->td_ksegrp;
992
993	if (td->td_priority != kg->kg_user_pri) {
994		mtx_lock_spin(&sched_lock);
995		td->td_priority = kg->kg_user_pri;
996		mtx_unlock_spin(&sched_lock);
997	}
998}
999
1000struct kse *
1001sched_choose(void)
1002{
1003	struct kseq *kseq;
1004	struct kse *ke;
1005
1006#ifdef SMP
1007retry:
1008#endif
1009	kseq = KSEQ_SELF();
1010	ke = kseq_choose(kseq);
1011	if (ke) {
1012		runq_remove(ke->ke_runq, ke);
1013		ke->ke_state = KES_THREAD;
1014
1015		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1016			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1017			    ke, ke->ke_runq, ke->ke_slice,
1018			    ke->ke_thread->td_priority);
1019		}
1020		return (ke);
1021	}
1022
1023#ifdef SMP
1024	if (smp_started) {
1025		/*
1026		 * Find the cpu with the highest load and steal one proc.
1027		 */
1028		if ((kseq = kseq_load_highest()) == NULL)
1029			return (NULL);
1030
1031		/*
1032		 * Remove this kse from this kseq and runq and then requeue
1033		 * on the current processor.  Then we will dequeue it
1034		 * normally above.
1035		 */
1036		ke = kseq_choose(kseq);
1037		runq_remove(ke->ke_runq, ke);
1038		ke->ke_state = KES_THREAD;
1039		kseq_rem(kseq, ke);
1040
1041		ke->ke_cpu = PCPU_GET(cpuid);
1042		sched_add(ke);
1043		goto retry;
1044	}
1045#endif
1046
1047	return (NULL);
1048}
1049
1050void
1051sched_add(struct kse *ke)
1052{
1053	struct kseq *kseq;
1054	struct ksegrp *kg;
1055
1056	mtx_assert(&sched_lock, MA_OWNED);
1057	KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
1058	KASSERT((ke->ke_thread->td_kse != NULL),
1059	    ("sched_add: No KSE on thread"));
1060	KASSERT(ke->ke_state != KES_ONRUNQ,
1061	    ("sched_add: kse %p (%s) already in run queue", ke,
1062	    ke->ke_proc->p_comm));
1063	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1064	    ("sched_add: process swapped out"));
1065	KASSERT(ke->ke_runq == NULL,
1066	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1067
1068	kg = ke->ke_ksegrp;
1069
1070	switch (PRI_BASE(kg->kg_pri_class)) {
1071	case PRI_ITHD:
1072	case PRI_REALTIME:
1073		kseq = KSEQ_SELF();
1074		ke->ke_runq = kseq->ksq_curr;
1075		ke->ke_slice = SCHED_SLICE_MAX;
1076		break;
1077	case PRI_TIMESHARE:
1078		kseq = KSEQ_CPU(ke->ke_cpu);
1079		if (SCHED_CURR(kg, ke))
1080			ke->ke_runq = kseq->ksq_curr;
1081		else
1082			ke->ke_runq = kseq->ksq_next;
1083		break;
1084	case PRI_IDLE:
1085		kseq = KSEQ_CPU(ke->ke_cpu);
1086		/*
1087		 * This is for priority prop.
1088		 */
1089		if (ke->ke_thread->td_priority < PRI_MAX_TIMESHARE)
1090			ke->ke_runq = kseq->ksq_curr;
1091		else
1092			ke->ke_runq = &kseq->ksq_idle;
1093		ke->ke_slice = SCHED_SLICE_MIN;
1094		break;
1095	default:
1096		panic("Unknown pri class.\n");
1097		break;
1098	}
1099
1100	ke->ke_ksegrp->kg_runq_kses++;
1101	ke->ke_state = KES_ONRUNQ;
1102
1103	runq_add(ke->ke_runq, ke);
1104	kseq_add(kseq, ke);
1105}
1106
1107void
1108sched_rem(struct kse *ke)
1109{
1110	struct kseq *kseq;
1111
1112	mtx_assert(&sched_lock, MA_OWNED);
1113	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
1114
1115	ke->ke_state = KES_THREAD;
1116	ke->ke_ksegrp->kg_runq_kses--;
1117	kseq = KSEQ_CPU(ke->ke_cpu);
1118	runq_remove(ke->ke_runq, ke);
1119	kseq_rem(kseq, ke);
1120}
1121
1122fixpt_t
1123sched_pctcpu(struct kse *ke)
1124{
1125	fixpt_t pctcpu;
1126
1127	pctcpu = 0;
1128
1129	if (ke->ke_ticks) {
1130		int rtick;
1131
1132		/* Update to account for time potentially spent sleeping */
1133		ke->ke_ltick = ticks;
1134		sched_pctcpu_update(ke);
1135
1136		/* How many rtick per second ? */
1137		rtick = ke->ke_ticks / SCHED_CPU_TIME;
1138		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1139	}
1140
1141	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1142
1143	return (pctcpu);
1144}
1145
1146int
1147sched_sizeof_kse(void)
1148{
1149	return (sizeof(struct kse) + sizeof(struct ke_sched));
1150}
1151
1152int
1153sched_sizeof_ksegrp(void)
1154{
1155	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1156}
1157
1158int
1159sched_sizeof_proc(void)
1160{
1161	return (sizeof(struct proc));
1162}
1163
1164int
1165sched_sizeof_thread(void)
1166{
1167	return (sizeof(struct thread) + sizeof(struct td_sched));
1168}
1169