sched_ule.c revision 121896
1/*-
2 * Copyright (c) 2002-2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 121896 2003-11-02 10:56:48Z jeff $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/kernel.h>
33#include <sys/ktr.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/proc.h>
37#include <sys/resource.h>
38#include <sys/sched.h>
39#include <sys/smp.h>
40#include <sys/sx.h>
41#include <sys/sysctl.h>
42#include <sys/sysproto.h>
43#include <sys/vmmeter.h>
44#ifdef DDB
45#include <ddb/ddb.h>
46#endif
47#ifdef KTRACE
48#include <sys/uio.h>
49#include <sys/ktrace.h>
50#endif
51
52#include <machine/cpu.h>
53#include <machine/smp.h>
54
55#define KTR_ULE         KTR_NFS
56
57/* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
58/* XXX This is bogus compatability crap for ps */
59static fixpt_t  ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
60SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
61
62static void sched_setup(void *dummy);
63SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
64
65static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "SCHED");
66
67static int sched_strict;
68SYSCTL_INT(_kern_sched, OID_AUTO, strict, CTLFLAG_RD, &sched_strict, 0, "");
69
70static int slice_min = 1;
71SYSCTL_INT(_kern_sched, OID_AUTO, slice_min, CTLFLAG_RW, &slice_min, 0, "");
72
73static int slice_max = 10;
74SYSCTL_INT(_kern_sched, OID_AUTO, slice_max, CTLFLAG_RW, &slice_max, 0, "");
75
76int realstathz;
77int tickincr = 1;
78
79#ifdef SMP
80/* Callout to handle load balancing SMP systems. */
81static struct callout kseq_lb_callout;
82#endif
83
84/*
85 * These datastructures are allocated within their parent datastructure but
86 * are scheduler specific.
87 */
88
89struct ke_sched {
90	int		ske_slice;
91	struct runq	*ske_runq;
92	/* The following variables are only used for pctcpu calculation */
93	int		ske_ltick;	/* Last tick that we were running on */
94	int		ske_ftick;	/* First tick that we were running on */
95	int		ske_ticks;	/* Tick count */
96	/* CPU that we have affinity for. */
97	u_char		ske_cpu;
98};
99#define	ke_slice	ke_sched->ske_slice
100#define	ke_runq		ke_sched->ske_runq
101#define	ke_ltick	ke_sched->ske_ltick
102#define	ke_ftick	ke_sched->ske_ftick
103#define	ke_ticks	ke_sched->ske_ticks
104#define	ke_cpu		ke_sched->ske_cpu
105#define	ke_assign	ke_procq.tqe_next
106
107#define	KEF_ASSIGNED	KEF_SCHED0	/* KSE is being migrated. */
108
109struct kg_sched {
110	int	skg_slptime;		/* Number of ticks we vol. slept */
111	int	skg_runtime;		/* Number of ticks we were running */
112};
113#define	kg_slptime	kg_sched->skg_slptime
114#define	kg_runtime	kg_sched->skg_runtime
115
116struct td_sched {
117	int	std_slptime;
118};
119#define	td_slptime	td_sched->std_slptime
120
121struct td_sched td_sched;
122struct ke_sched ke_sched;
123struct kg_sched kg_sched;
124
125struct ke_sched *kse0_sched = &ke_sched;
126struct kg_sched *ksegrp0_sched = &kg_sched;
127struct p_sched *proc0_sched = NULL;
128struct td_sched *thread0_sched = &td_sched;
129
130/*
131 * The priority is primarily determined by the interactivity score.  Thus, we
132 * give lower(better) priorities to kse groups that use less CPU.  The nice
133 * value is then directly added to this to allow nice to have some effect
134 * on latency.
135 *
136 * PRI_RANGE:	Total priority range for timeshare threads.
137 * PRI_NRESV:	Number of nice values.
138 * PRI_BASE:	The start of the dynamic range.
139 */
140#define	SCHED_PRI_RANGE		(PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
141#define	SCHED_PRI_NRESV		((PRIO_MAX - PRIO_MIN) + 1)
142#define	SCHED_PRI_NHALF		(SCHED_PRI_NRESV / 2)
143#define	SCHED_PRI_BASE		(PRI_MIN_TIMESHARE)
144#define	SCHED_PRI_INTERACT(score)					\
145    ((score) * SCHED_PRI_RANGE / SCHED_INTERACT_MAX)
146
147/*
148 * These determine the interactivity of a process.
149 *
150 * SLP_RUN_MAX:	Maximum amount of sleep time + run time we'll accumulate
151 *		before throttling back.
152 * SLP_RUN_FORK:	Maximum slp+run time to inherit at fork time.
153 * INTERACT_MAX:	Maximum interactivity value.  Smaller is better.
154 * INTERACT_THRESH:	Threshhold for placement on the current runq.
155 */
156#define	SCHED_SLP_RUN_MAX	((hz * 5) << 10)
157#define	SCHED_SLP_RUN_FORK	((hz / 2) << 10)
158#define	SCHED_INTERACT_MAX	(100)
159#define	SCHED_INTERACT_HALF	(SCHED_INTERACT_MAX / 2)
160#define	SCHED_INTERACT_THRESH	(30)
161
162/*
163 * These parameters and macros determine the size of the time slice that is
164 * granted to each thread.
165 *
166 * SLICE_MIN:	Minimum time slice granted, in units of ticks.
167 * SLICE_MAX:	Maximum time slice granted.
168 * SLICE_RANGE:	Range of available time slices scaled by hz.
169 * SLICE_SCALE:	The number slices granted per val in the range of [0, max].
170 * SLICE_NICE:  Determine the amount of slice granted to a scaled nice.
171 * SLICE_NTHRESH:	The nice cutoff point for slice assignment.
172 */
173#define	SCHED_SLICE_MIN			(slice_min)
174#define	SCHED_SLICE_MAX			(slice_max)
175#define	SCHED_SLICE_NTHRESH	(SCHED_PRI_NHALF - 1)
176#define	SCHED_SLICE_RANGE		(SCHED_SLICE_MAX - SCHED_SLICE_MIN + 1)
177#define	SCHED_SLICE_SCALE(val, max)	(((val) * SCHED_SLICE_RANGE) / (max))
178#define	SCHED_SLICE_NICE(nice)						\
179    (SCHED_SLICE_MAX - SCHED_SLICE_SCALE((nice), SCHED_SLICE_NTHRESH))
180
181/*
182 * This macro determines whether or not the kse belongs on the current or
183 * next run queue.
184 */
185#define	SCHED_INTERACTIVE(kg)						\
186    (sched_interact_score(kg) < SCHED_INTERACT_THRESH)
187#define	SCHED_CURR(kg, ke)						\
188    (ke->ke_thread->td_priority != kg->kg_user_pri ||			\
189    SCHED_INTERACTIVE(kg))
190
191/*
192 * Cpu percentage computation macros and defines.
193 *
194 * SCHED_CPU_TIME:	Number of seconds to average the cpu usage across.
195 * SCHED_CPU_TICKS:	Number of hz ticks to average the cpu usage across.
196 */
197
198#define	SCHED_CPU_TIME	10
199#define	SCHED_CPU_TICKS	(hz * SCHED_CPU_TIME)
200
201/*
202 * kseq - per processor runqs and statistics.
203 */
204
205#define	KSEQ_NCLASS	(PRI_IDLE + 1)	/* Number of run classes. */
206
207struct kseq {
208	struct runq	ksq_idle;		/* Queue of IDLE threads. */
209	struct runq	ksq_timeshare[2];	/* Run queues for !IDLE. */
210	struct runq	*ksq_next;		/* Next timeshare queue. */
211	struct runq	*ksq_curr;		/* Current queue. */
212	int		ksq_load_timeshare;	/* Load for timeshare. */
213	int		ksq_load;		/* Aggregate load. */
214	short		ksq_nice[SCHED_PRI_NRESV]; /* KSEs in each nice bin. */
215	short		ksq_nicemin;		/* Least nice. */
216#ifdef SMP
217	int		ksq_load_transferable;	/* kses that may be migrated. */
218	unsigned int	ksq_rslices;	/* Slices on run queue */
219	int		ksq_cpus;	/* Count of CPUs in this kseq. */
220	struct kse 	*ksq_assigned;	/* KSEs assigned by another CPU. */
221#endif
222};
223
224/*
225 * One kse queue per processor.
226 */
227#ifdef SMP
228static int kseq_idle;
229static struct kseq	kseq_cpu[MAXCPU];
230static struct kseq	*kseq_idmap[MAXCPU];
231#define	KSEQ_SELF()	(kseq_idmap[PCPU_GET(cpuid)])
232#define	KSEQ_CPU(x)	(kseq_idmap[(x)])
233#else
234static struct kseq	kseq_cpu;
235#define	KSEQ_SELF()	(&kseq_cpu)
236#define	KSEQ_CPU(x)	(&kseq_cpu)
237#endif
238
239static void sched_slice(struct kse *ke);
240static void sched_priority(struct ksegrp *kg);
241static int sched_interact_score(struct ksegrp *kg);
242static void sched_interact_update(struct ksegrp *kg);
243static void sched_interact_fork(struct ksegrp *kg);
244static void sched_pctcpu_update(struct kse *ke);
245
246/* Operations on per processor queues */
247static struct kse * kseq_choose(struct kseq *kseq);
248static void kseq_setup(struct kseq *kseq);
249static void kseq_add(struct kseq *kseq, struct kse *ke);
250static void kseq_rem(struct kseq *kseq, struct kse *ke);
251static void kseq_nice_add(struct kseq *kseq, int nice);
252static void kseq_nice_rem(struct kseq *kseq, int nice);
253void kseq_print(int cpu);
254#ifdef SMP
255#if 0
256static int sched_pickcpu(void);
257#endif
258static struct kse *runq_steal(struct runq *rq);
259static struct kseq *kseq_load_highest(void);
260static void kseq_balance(void *arg);
261static void kseq_move(struct kseq *from, int cpu);
262static int kseq_find(void);
263static void kseq_notify(struct kse *ke, int cpu);
264static void kseq_assign(struct kseq *);
265static struct kse *kseq_steal(struct kseq *kseq);
266#define	KSE_CAN_MIGRATE(ke, class)	((class) != PRI_ITHD)
267#endif
268
269void
270kseq_print(int cpu)
271{
272	struct kseq *kseq;
273	int i;
274
275	kseq = KSEQ_CPU(cpu);
276
277	printf("kseq:\n");
278	printf("\tload:           %d\n", kseq->ksq_load);
279	printf("\tload REALTIME:  %d\n", kseq->ksq_load_timeshare);
280#ifdef SMP
281	printf("\tload transferable: %d\n", kseq->ksq_load_transferable);
282#endif
283	printf("\tnicemin:\t%d\n", kseq->ksq_nicemin);
284	printf("\tnice counts:\n");
285	for (i = 0; i < SCHED_PRI_NRESV; i++)
286		if (kseq->ksq_nice[i])
287			printf("\t\t%d = %d\n",
288			    i - SCHED_PRI_NHALF, kseq->ksq_nice[i]);
289}
290
291static void
292kseq_add(struct kseq *kseq, struct kse *ke)
293{
294	int class;
295	mtx_assert(&sched_lock, MA_OWNED);
296	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
297	if (class == PRI_TIMESHARE)
298		kseq->ksq_load_timeshare++;
299#ifdef SMP
300	if (KSE_CAN_MIGRATE(ke, class))
301		kseq->ksq_load_transferable++;
302	kseq->ksq_rslices += ke->ke_slice;
303#endif
304	kseq->ksq_load++;
305	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
306	CTR6(KTR_ULE, "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
307	    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
308	    ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
309	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
310		kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
311}
312
313static void
314kseq_rem(struct kseq *kseq, struct kse *ke)
315{
316	int class;
317	mtx_assert(&sched_lock, MA_OWNED);
318	class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
319	if (class == PRI_TIMESHARE)
320		kseq->ksq_load_timeshare--;
321#ifdef SMP
322	if (KSE_CAN_MIGRATE(ke, class))
323		kseq->ksq_load_transferable--;
324	kseq->ksq_rslices -= ke->ke_slice;
325#endif
326	kseq->ksq_load--;
327	ke->ke_runq = NULL;
328	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
329		kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
330}
331
332static void
333kseq_nice_add(struct kseq *kseq, int nice)
334{
335	mtx_assert(&sched_lock, MA_OWNED);
336	/* Normalize to zero. */
337	kseq->ksq_nice[nice + SCHED_PRI_NHALF]++;
338	if (nice < kseq->ksq_nicemin || kseq->ksq_load_timeshare == 1)
339		kseq->ksq_nicemin = nice;
340}
341
342static void
343kseq_nice_rem(struct kseq *kseq, int nice)
344{
345	int n;
346
347	mtx_assert(&sched_lock, MA_OWNED);
348	/* Normalize to zero. */
349	n = nice + SCHED_PRI_NHALF;
350	kseq->ksq_nice[n]--;
351	KASSERT(kseq->ksq_nice[n] >= 0, ("Negative nice count."));
352
353	/*
354	 * If this wasn't the smallest nice value or there are more in
355	 * this bucket we can just return.  Otherwise we have to recalculate
356	 * the smallest nice.
357	 */
358	if (nice != kseq->ksq_nicemin ||
359	    kseq->ksq_nice[n] != 0 ||
360	    kseq->ksq_load_timeshare == 0)
361		return;
362
363	for (; n < SCHED_PRI_NRESV; n++)
364		if (kseq->ksq_nice[n]) {
365			kseq->ksq_nicemin = n - SCHED_PRI_NHALF;
366			return;
367		}
368}
369
370#ifdef SMP
371/*
372 * kseq_balance is a simple CPU load balancing algorithm.  It operates by
373 * finding the least loaded and most loaded cpu and equalizing their load
374 * by migrating some processes.
375 *
376 * Dealing only with two CPUs at a time has two advantages.  Firstly, most
377 * installations will only have 2 cpus.  Secondly, load balancing too much at
378 * once can have an unpleasant effect on the system.  The scheduler rarely has
379 * enough information to make perfect decisions.  So this algorithm chooses
380 * algorithm simplicity and more gradual effects on load in larger systems.
381 *
382 * It could be improved by considering the priorities and slices assigned to
383 * each task prior to balancing them.  There are many pathological cases with
384 * any approach and so the semi random algorithm below may work as well as any.
385 *
386 */
387static void
388kseq_balance(void *arg)
389{
390	struct kseq *kseq;
391	int high_load;
392	int low_load;
393	int high_cpu;
394	int low_cpu;
395	int move;
396	int diff;
397	int i;
398
399	high_cpu = 0;
400	low_cpu = 0;
401	high_load = 0;
402	low_load = -1;
403
404	mtx_lock_spin(&sched_lock);
405	if (smp_started == 0)
406		goto out;
407
408	for (i = 0; i < mp_maxid; i++) {
409		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
410			continue;
411		kseq = KSEQ_CPU(i);
412		if (kseq->ksq_load > high_load) {
413			high_load = kseq->ksq_load;
414			high_cpu = i;
415		}
416		if (low_load == -1 || kseq->ksq_load < low_load) {
417			low_load = kseq->ksq_load;
418			low_cpu = i;
419		}
420	}
421
422	kseq = KSEQ_CPU(high_cpu);
423
424	high_load = kseq->ksq_load_transferable;
425	/*
426	 * Nothing to do.
427	 */
428	if (high_load < kseq->ksq_cpus + 1)
429		goto out;
430
431	high_load -= kseq->ksq_cpus;
432
433	if (low_load >= high_load)
434		goto out;
435
436	diff = high_load - low_load;
437	move = diff / 2;
438	if (diff & 0x1)
439		move++;
440
441	for (i = 0; i < move; i++)
442		kseq_move(kseq, low_cpu);
443
444out:
445	mtx_unlock_spin(&sched_lock);
446	callout_reset(&kseq_lb_callout, hz, kseq_balance, NULL);
447
448	return;
449}
450
451static struct kseq *
452kseq_load_highest(void)
453{
454	struct kseq *kseq;
455	int load;
456	int cpu;
457	int i;
458
459	mtx_assert(&sched_lock, MA_OWNED);
460	cpu = 0;
461	load = 0;
462
463	for (i = 0; i < mp_maxid; i++) {
464		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
465			continue;
466		kseq = KSEQ_CPU(i);
467		if (kseq->ksq_load > load) {
468			load = kseq->ksq_load;
469			cpu = i;
470		}
471	}
472	kseq = KSEQ_CPU(cpu);
473
474	if (kseq->ksq_load_transferable > kseq->ksq_cpus)
475		return (kseq);
476
477	return (NULL);
478}
479
480static void
481kseq_move(struct kseq *from, int cpu)
482{
483	struct kse *ke;
484
485	ke = kseq_steal(from);
486	runq_remove(ke->ke_runq, ke);
487	ke->ke_state = KES_THREAD;
488	kseq_rem(from, ke);
489
490	ke->ke_cpu = cpu;
491	sched_add(ke->ke_thread);
492}
493
494static int
495kseq_find(void)
496{
497	struct kseq *high;
498
499	if (!smp_started)
500		return (0);
501	if (kseq_idle & PCPU_GET(cpumask))
502		return (0);
503	/*
504	 * Find the cpu with the highest load and steal one proc.
505	 */
506	if ((high = kseq_load_highest()) == NULL ||
507	    high == KSEQ_SELF()) {
508		/*
509		 * If we couldn't find one, set ourselves in the
510		 * idle map.
511		 */
512		atomic_set_int(&kseq_idle, PCPU_GET(cpumask));
513		return (0);
514	}
515	/*
516	 * Remove this kse from this kseq and runq and then requeue
517	 * on the current processor.  We now have a load of one!
518	 */
519	kseq_move(high, PCPU_GET(cpuid));
520
521	return (1);
522}
523
524static void
525kseq_assign(struct kseq *kseq)
526{
527	struct kse *nke;
528	struct kse *ke;
529
530	do {
531		ke = kseq->ksq_assigned;
532	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke, NULL));
533	for (; ke != NULL; ke = nke) {
534		nke = ke->ke_assign;
535		ke->ke_flags &= ~KEF_ASSIGNED;
536		sched_add(ke->ke_thread);
537	}
538}
539
540static void
541kseq_notify(struct kse *ke, int cpu)
542{
543	struct kseq *kseq;
544	struct thread *td;
545	struct pcpu *pcpu;
546
547	ke->ke_flags |= KEF_ASSIGNED;
548
549	kseq = KSEQ_CPU(cpu);
550
551	/*
552	 * Place a KSE on another cpu's queue and force a resched.
553	 */
554	do {
555		ke->ke_assign = kseq->ksq_assigned;
556	} while(!atomic_cmpset_ptr(&kseq->ksq_assigned, ke->ke_assign, ke));
557	pcpu = pcpu_find(cpu);
558	td = pcpu->pc_curthread;
559	if (ke->ke_thread->td_priority < td->td_priority ||
560	    td == pcpu->pc_idlethread) {
561		td->td_flags |= TDF_NEEDRESCHED;
562		ipi_selected(1 << cpu, IPI_AST);
563	}
564}
565
566static struct kse *
567runq_steal(struct runq *rq)
568{
569	struct rqhead *rqh;
570	struct rqbits *rqb;
571	struct kse *ke;
572	int word;
573	int bit;
574
575	mtx_assert(&sched_lock, MA_OWNED);
576	rqb = &rq->rq_status;
577	for (word = 0; word < RQB_LEN; word++) {
578		if (rqb->rqb_bits[word] == 0)
579			continue;
580		for (bit = 0; bit < RQB_BPW; bit++) {
581			if ((rqb->rqb_bits[word] & (1 << bit)) == 0)
582				continue;
583			rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
584			TAILQ_FOREACH(ke, rqh, ke_procq) {
585				if (KSE_CAN_MIGRATE(ke,
586				    PRI_BASE(ke->ke_ksegrp->kg_pri_class)))
587					return (ke);
588			}
589		}
590	}
591	return (NULL);
592}
593
594static struct kse *
595kseq_steal(struct kseq *kseq)
596{
597	struct kse *ke;
598
599	if ((ke = runq_steal(kseq->ksq_curr)) != NULL)
600		return (ke);
601	if ((ke = runq_steal(kseq->ksq_next)) != NULL)
602		return (ke);
603	return (runq_steal(&kseq->ksq_idle));
604}
605#endif	/* SMP */
606
607/*
608 * Pick the highest priority task we have and return it.
609 */
610
611static struct kse *
612kseq_choose(struct kseq *kseq)
613{
614	struct kse *ke;
615	struct runq *swap;
616
617	mtx_assert(&sched_lock, MA_OWNED);
618	swap = NULL;
619
620	for (;;) {
621		ke = runq_choose(kseq->ksq_curr);
622		if (ke == NULL) {
623			/*
624			 * We already swaped once and didn't get anywhere.
625			 */
626			if (swap)
627				break;
628			swap = kseq->ksq_curr;
629			kseq->ksq_curr = kseq->ksq_next;
630			kseq->ksq_next = swap;
631			continue;
632		}
633		/*
634		 * If we encounter a slice of 0 the kse is in a
635		 * TIMESHARE kse group and its nice was too far out
636		 * of the range that receives slices.
637		 */
638		if (ke->ke_slice == 0) {
639			runq_remove(ke->ke_runq, ke);
640			sched_slice(ke);
641			ke->ke_runq = kseq->ksq_next;
642			runq_add(ke->ke_runq, ke);
643			continue;
644		}
645		return (ke);
646	}
647
648	return (runq_choose(&kseq->ksq_idle));
649}
650
651static void
652kseq_setup(struct kseq *kseq)
653{
654	runq_init(&kseq->ksq_timeshare[0]);
655	runq_init(&kseq->ksq_timeshare[1]);
656	runq_init(&kseq->ksq_idle);
657	kseq->ksq_curr = &kseq->ksq_timeshare[0];
658	kseq->ksq_next = &kseq->ksq_timeshare[1];
659	kseq->ksq_load = 0;
660	kseq->ksq_load_timeshare = 0;
661#ifdef SMP
662	kseq->ksq_load_transferable = 0;
663	kseq->ksq_rslices = 0;
664	kseq->ksq_assigned = NULL;
665#endif
666}
667
668static void
669sched_setup(void *dummy)
670{
671#ifdef SMP
672	int i;
673#endif
674
675	slice_min = (hz/100);	/* 10ms */
676	slice_max = (hz/7);	/* ~140ms */
677
678#ifdef SMP
679	/* init kseqs */
680	/* Create the idmap. */
681#ifdef ULE_HTT_EXPERIMENTAL
682	if (smp_topology == NULL) {
683#else
684	if (1) {
685#endif
686		for (i = 0; i < MAXCPU; i++) {
687			kseq_setup(&kseq_cpu[i]);
688			kseq_idmap[i] = &kseq_cpu[i];
689			kseq_cpu[i].ksq_cpus = 1;
690		}
691	} else {
692		int j;
693
694		for (i = 0; i < smp_topology->ct_count; i++) {
695			struct cpu_group *cg;
696
697			cg = &smp_topology->ct_group[i];
698			kseq_setup(&kseq_cpu[i]);
699
700			for (j = 0; j < MAXCPU; j++)
701				if ((cg->cg_mask & (1 << j)) != 0)
702					kseq_idmap[j] = &kseq_cpu[i];
703			kseq_cpu[i].ksq_cpus = cg->cg_count;
704		}
705	}
706	callout_init(&kseq_lb_callout, CALLOUT_MPSAFE);
707	kseq_balance(NULL);
708#else
709	kseq_setup(KSEQ_SELF());
710#endif
711	mtx_lock_spin(&sched_lock);
712	kseq_add(KSEQ_SELF(), &kse0);
713	mtx_unlock_spin(&sched_lock);
714}
715
716/*
717 * Scale the scheduling priority according to the "interactivity" of this
718 * process.
719 */
720static void
721sched_priority(struct ksegrp *kg)
722{
723	int pri;
724
725	if (kg->kg_pri_class != PRI_TIMESHARE)
726		return;
727
728	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
729	pri += SCHED_PRI_BASE;
730	pri += kg->kg_nice;
731
732	if (pri > PRI_MAX_TIMESHARE)
733		pri = PRI_MAX_TIMESHARE;
734	else if (pri < PRI_MIN_TIMESHARE)
735		pri = PRI_MIN_TIMESHARE;
736
737	kg->kg_user_pri = pri;
738
739	return;
740}
741
742/*
743 * Calculate a time slice based on the properties of the kseg and the runq
744 * that we're on.  This is only for PRI_TIMESHARE ksegrps.
745 */
746static void
747sched_slice(struct kse *ke)
748{
749	struct kseq *kseq;
750	struct ksegrp *kg;
751
752	kg = ke->ke_ksegrp;
753	kseq = KSEQ_CPU(ke->ke_cpu);
754
755	/*
756	 * Rationale:
757	 * KSEs in interactive ksegs get the minimum slice so that we
758	 * quickly notice if it abuses its advantage.
759	 *
760	 * KSEs in non-interactive ksegs are assigned a slice that is
761	 * based on the ksegs nice value relative to the least nice kseg
762	 * on the run queue for this cpu.
763	 *
764	 * If the KSE is less nice than all others it gets the maximum
765	 * slice and other KSEs will adjust their slice relative to
766	 * this when they first expire.
767	 *
768	 * There is 20 point window that starts relative to the least
769	 * nice kse on the run queue.  Slice size is determined by
770	 * the kse distance from the last nice ksegrp.
771	 *
772	 * If the kse is outside of the window it will get no slice
773	 * and will be reevaluated each time it is selected on the
774	 * run queue.  The exception to this is nice 0 ksegs when
775	 * a nice -20 is running.  They are always granted a minimum
776	 * slice.
777	 */
778	if (!SCHED_INTERACTIVE(kg)) {
779		int nice;
780
781		nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
782		if (kseq->ksq_load_timeshare == 0 ||
783		    kg->kg_nice < kseq->ksq_nicemin)
784			ke->ke_slice = SCHED_SLICE_MAX;
785		else if (nice <= SCHED_SLICE_NTHRESH)
786			ke->ke_slice = SCHED_SLICE_NICE(nice);
787		else if (kg->kg_nice == 0)
788			ke->ke_slice = SCHED_SLICE_MIN;
789		else
790			ke->ke_slice = 0;
791	} else
792		ke->ke_slice = SCHED_SLICE_MIN;
793
794	CTR6(KTR_ULE,
795	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
796	    ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
797	    kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
798
799	return;
800}
801
802/*
803 * This routine enforces a maximum limit on the amount of scheduling history
804 * kept.  It is called after either the slptime or runtime is adjusted.
805 * This routine will not operate correctly when slp or run times have been
806 * adjusted to more than double their maximum.
807 */
808static void
809sched_interact_update(struct ksegrp *kg)
810{
811	int sum;
812
813	sum = kg->kg_runtime + kg->kg_slptime;
814	if (sum < SCHED_SLP_RUN_MAX)
815		return;
816	/*
817	 * If we have exceeded by more than 1/5th then the algorithm below
818	 * will not bring us back into range.  Dividing by two here forces
819	 * us into the range of [3/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
820	 */
821	if (sum > (SCHED_INTERACT_MAX / 5) * 6) {
822		kg->kg_runtime /= 2;
823		kg->kg_slptime /= 2;
824		return;
825	}
826	kg->kg_runtime = (kg->kg_runtime / 5) * 4;
827	kg->kg_slptime = (kg->kg_slptime / 5) * 4;
828}
829
830static void
831sched_interact_fork(struct ksegrp *kg)
832{
833	int ratio;
834	int sum;
835
836	sum = kg->kg_runtime + kg->kg_slptime;
837	if (sum > SCHED_SLP_RUN_FORK) {
838		ratio = sum / SCHED_SLP_RUN_FORK;
839		kg->kg_runtime /= ratio;
840		kg->kg_slptime /= ratio;
841	}
842}
843
844static int
845sched_interact_score(struct ksegrp *kg)
846{
847	int div;
848
849	if (kg->kg_runtime > kg->kg_slptime) {
850		div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
851		return (SCHED_INTERACT_HALF +
852		    (SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
853	} if (kg->kg_slptime > kg->kg_runtime) {
854		div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
855		return (kg->kg_runtime / div);
856	}
857
858	/*
859	 * This can happen if slptime and runtime are 0.
860	 */
861	return (0);
862
863}
864
865/*
866 * This is only somewhat accurate since given many processes of the same
867 * priority they will switch when their slices run out, which will be
868 * at most SCHED_SLICE_MAX.
869 */
870int
871sched_rr_interval(void)
872{
873	return (SCHED_SLICE_MAX);
874}
875
876static void
877sched_pctcpu_update(struct kse *ke)
878{
879	/*
880	 * Adjust counters and watermark for pctcpu calc.
881	 */
882	if (ke->ke_ltick > ticks - SCHED_CPU_TICKS) {
883		/*
884		 * Shift the tick count out so that the divide doesn't
885		 * round away our results.
886		 */
887		ke->ke_ticks <<= 10;
888		ke->ke_ticks = (ke->ke_ticks / (ticks - ke->ke_ftick)) *
889			    SCHED_CPU_TICKS;
890		ke->ke_ticks >>= 10;
891	} else
892		ke->ke_ticks = 0;
893	ke->ke_ltick = ticks;
894	ke->ke_ftick = ke->ke_ltick - SCHED_CPU_TICKS;
895}
896
897#if 0
898/* XXX Should be changed to kseq_load_lowest() */
899int
900sched_pickcpu(void)
901{
902	struct kseq *kseq;
903	int load;
904	int cpu;
905	int i;
906
907	mtx_assert(&sched_lock, MA_OWNED);
908	if (!smp_started)
909		return (0);
910
911	load = 0;
912	cpu = 0;
913
914	for (i = 0; i < mp_maxid; i++) {
915		if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
916			continue;
917		kseq = KSEQ_CPU(i);
918		if (kseq->ksq_load < load) {
919			cpu = i;
920			load = kseq->ksq_load;
921		}
922	}
923
924	CTR1(KTR_ULE, "sched_pickcpu: %d", cpu);
925	return (cpu);
926}
927#endif
928
929void
930sched_prio(struct thread *td, u_char prio)
931{
932	struct kse *ke;
933
934	ke = td->td_kse;
935	mtx_assert(&sched_lock, MA_OWNED);
936	if (TD_ON_RUNQ(td)) {
937		/*
938		 * If the priority has been elevated due to priority
939		 * propagation, we may have to move ourselves to a new
940		 * queue.  We still call adjustrunqueue below in case kse
941		 * needs to fix things up.
942		 */
943		if (prio < td->td_priority && ke &&
944		    (ke->ke_flags & KEF_ASSIGNED) == 0 &&
945		    ke->ke_runq != KSEQ_CPU(ke->ke_cpu)->ksq_curr) {
946			runq_remove(ke->ke_runq, ke);
947			ke->ke_runq = KSEQ_CPU(ke->ke_cpu)->ksq_curr;
948			runq_add(ke->ke_runq, ke);
949		}
950		adjustrunqueue(td, prio);
951	} else
952		td->td_priority = prio;
953}
954
955void
956sched_switch(struct thread *td)
957{
958	struct thread *newtd;
959	struct kse *ke;
960
961	mtx_assert(&sched_lock, MA_OWNED);
962
963	ke = td->td_kse;
964
965	td->td_last_kse = ke;
966        td->td_lastcpu = td->td_oncpu;
967	td->td_oncpu = NOCPU;
968        td->td_flags &= ~TDF_NEEDRESCHED;
969
970	if (TD_IS_RUNNING(td)) {
971		if (td->td_proc->p_flag & P_SA) {
972			kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
973			setrunqueue(td);
974		} else {
975			/*
976			 * This queue is always correct except for idle threads
977			 * which have a higher priority due to priority
978			 * propagation.
979			 */
980			if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE) {
981				if (td->td_priority < PRI_MIN_IDLE)
982					ke->ke_runq = KSEQ_SELF()->ksq_curr;
983				else
984					ke->ke_runq = &KSEQ_SELF()->ksq_idle;
985			}
986			runq_add(ke->ke_runq, ke);
987			/* setrunqueue(td); */
988		}
989	} else {
990		if (ke->ke_runq)
991			kseq_rem(KSEQ_CPU(ke->ke_cpu), ke);
992		/*
993		 * We will not be on the run queue. So we must be
994		 * sleeping or similar.
995		 */
996		if (td->td_proc->p_flag & P_SA)
997			kse_reassign(ke);
998	}
999	newtd = choosethread();
1000	if (td != newtd)
1001		cpu_switch(td, newtd);
1002	sched_lock.mtx_lock = (uintptr_t)td;
1003
1004	td->td_oncpu = PCPU_GET(cpuid);
1005}
1006
1007void
1008sched_nice(struct ksegrp *kg, int nice)
1009{
1010	struct kse *ke;
1011	struct thread *td;
1012	struct kseq *kseq;
1013
1014	PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
1015	mtx_assert(&sched_lock, MA_OWNED);
1016	/*
1017	 * We need to adjust the nice counts for running KSEs.
1018	 */
1019	if (kg->kg_pri_class == PRI_TIMESHARE)
1020		FOREACH_KSE_IN_GROUP(kg, ke) {
1021			if (ke->ke_runq == NULL)
1022				continue;
1023			kseq = KSEQ_CPU(ke->ke_cpu);
1024			kseq_nice_rem(kseq, kg->kg_nice);
1025			kseq_nice_add(kseq, nice);
1026		}
1027	kg->kg_nice = nice;
1028	sched_priority(kg);
1029	FOREACH_THREAD_IN_GROUP(kg, td)
1030		td->td_flags |= TDF_NEEDRESCHED;
1031}
1032
1033void
1034sched_sleep(struct thread *td, u_char prio)
1035{
1036	mtx_assert(&sched_lock, MA_OWNED);
1037
1038	td->td_slptime = ticks;
1039	td->td_priority = prio;
1040
1041	CTR2(KTR_ULE, "sleep kse %p (tick: %d)",
1042	    td->td_kse, td->td_slptime);
1043}
1044
1045void
1046sched_wakeup(struct thread *td)
1047{
1048	mtx_assert(&sched_lock, MA_OWNED);
1049
1050	/*
1051	 * Let the kseg know how long we slept for.  This is because process
1052	 * interactivity behavior is modeled in the kseg.
1053	 */
1054	if (td->td_slptime) {
1055		struct ksegrp *kg;
1056		int hzticks;
1057
1058		kg = td->td_ksegrp;
1059		hzticks = (ticks - td->td_slptime) << 10;
1060		if (hzticks >= SCHED_SLP_RUN_MAX) {
1061			kg->kg_slptime = SCHED_SLP_RUN_MAX;
1062			kg->kg_runtime = 1;
1063		} else {
1064			kg->kg_slptime += hzticks;
1065			sched_interact_update(kg);
1066		}
1067		sched_priority(kg);
1068		if (td->td_kse)
1069			sched_slice(td->td_kse);
1070		CTR2(KTR_ULE, "wakeup kse %p (%d ticks)",
1071		    td->td_kse, hzticks);
1072		td->td_slptime = 0;
1073	}
1074	setrunqueue(td);
1075}
1076
1077/*
1078 * Penalize the parent for creating a new child and initialize the child's
1079 * priority.
1080 */
1081void
1082sched_fork(struct proc *p, struct proc *p1)
1083{
1084
1085	mtx_assert(&sched_lock, MA_OWNED);
1086
1087	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
1088	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
1089	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
1090}
1091
1092void
1093sched_fork_kse(struct kse *ke, struct kse *child)
1094{
1095
1096	child->ke_slice = 1;	/* Attempt to quickly learn interactivity. */
1097	child->ke_cpu = ke->ke_cpu; /* sched_pickcpu(); */
1098	child->ke_runq = NULL;
1099
1100	/* Grab our parents cpu estimation information. */
1101	child->ke_ticks = ke->ke_ticks;
1102	child->ke_ltick = ke->ke_ltick;
1103	child->ke_ftick = ke->ke_ftick;
1104}
1105
1106void
1107sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1108{
1109	PROC_LOCK_ASSERT(child->kg_proc, MA_OWNED);
1110
1111	child->kg_slptime = kg->kg_slptime;
1112	child->kg_runtime = kg->kg_runtime;
1113	child->kg_user_pri = kg->kg_user_pri;
1114	child->kg_nice = kg->kg_nice;
1115	sched_interact_fork(child);
1116	kg->kg_runtime += tickincr << 10;
1117	sched_interact_update(kg);
1118
1119	CTR6(KTR_ULE, "sched_fork_ksegrp: %d(%d, %d) - %d(%d, %d)",
1120	    kg->kg_proc->p_pid, kg->kg_slptime, kg->kg_runtime,
1121	    child->kg_proc->p_pid, child->kg_slptime, child->kg_runtime);
1122}
1123
1124void
1125sched_fork_thread(struct thread *td, struct thread *child)
1126{
1127}
1128
1129void
1130sched_class(struct ksegrp *kg, int class)
1131{
1132	struct kseq *kseq;
1133	struct kse *ke;
1134	int nclass;
1135	int oclass;
1136
1137	mtx_assert(&sched_lock, MA_OWNED);
1138	if (kg->kg_pri_class == class)
1139		return;
1140
1141	nclass = PRI_BASE(class);
1142	oclass = PRI_BASE(kg->kg_pri_class);
1143	FOREACH_KSE_IN_GROUP(kg, ke) {
1144		if (ke->ke_state != KES_ONRUNQ &&
1145		    ke->ke_state != KES_THREAD)
1146			continue;
1147		kseq = KSEQ_CPU(ke->ke_cpu);
1148
1149#ifdef SMP
1150		if (KSE_CAN_MIGRATE(ke, oclass))
1151			kseq->ksq_load_transferable--;
1152		if (KSE_CAN_MIGRATE(ke, nclass))
1153			kseq->ksq_load_transferable++;
1154#endif
1155		if (oclass == PRI_TIMESHARE)
1156			kseq->ksq_load_timeshare--;
1157		if (nclass == PRI_TIMESHARE)
1158			kseq->ksq_load_timeshare++;
1159
1160		if (kg->kg_pri_class == PRI_TIMESHARE)
1161			kseq_nice_rem(kseq, kg->kg_nice);
1162		else if (class == PRI_TIMESHARE)
1163			kseq_nice_add(kseq, kg->kg_nice);
1164	}
1165
1166	kg->kg_pri_class = class;
1167}
1168
1169/*
1170 * Return some of the child's priority and interactivity to the parent.
1171 */
1172void
1173sched_exit(struct proc *p, struct proc *child)
1174{
1175	mtx_assert(&sched_lock, MA_OWNED);
1176	sched_exit_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(child));
1177	sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(child));
1178}
1179
1180void
1181sched_exit_kse(struct kse *ke, struct kse *child)
1182{
1183	kseq_rem(KSEQ_CPU(child->ke_cpu), child);
1184}
1185
1186void
1187sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child)
1188{
1189	/* kg->kg_slptime += child->kg_slptime; */
1190	kg->kg_runtime += child->kg_runtime;
1191	sched_interact_update(kg);
1192}
1193
1194void
1195sched_exit_thread(struct thread *td, struct thread *child)
1196{
1197}
1198
1199void
1200sched_clock(struct thread *td)
1201{
1202	struct kseq *kseq;
1203	struct ksegrp *kg;
1204	struct kse *ke;
1205
1206	/*
1207	 * sched_setup() apparently happens prior to stathz being set.  We
1208	 * need to resolve the timers earlier in the boot so we can avoid
1209	 * calculating this here.
1210	 */
1211	if (realstathz == 0) {
1212		realstathz = stathz ? stathz : hz;
1213		tickincr = hz / realstathz;
1214		/*
1215		 * XXX This does not work for values of stathz that are much
1216		 * larger than hz.
1217		 */
1218		if (tickincr == 0)
1219			tickincr = 1;
1220	}
1221
1222	ke = td->td_kse;
1223	kg = ke->ke_ksegrp;
1224
1225	mtx_assert(&sched_lock, MA_OWNED);
1226	KASSERT((td != NULL), ("schedclock: null thread pointer"));
1227
1228	/* Adjust ticks for pctcpu */
1229	ke->ke_ticks++;
1230	ke->ke_ltick = ticks;
1231
1232	/* Go up to one second beyond our max and then trim back down */
1233	if (ke->ke_ftick + SCHED_CPU_TICKS + hz < ke->ke_ltick)
1234		sched_pctcpu_update(ke);
1235
1236	if (td->td_flags & TDF_IDLETD)
1237		return;
1238
1239	CTR4(KTR_ULE, "Tick kse %p (slice: %d, slptime: %d, runtime: %d)",
1240	    ke, ke->ke_slice, kg->kg_slptime >> 10, kg->kg_runtime >> 10);
1241	/*
1242	 * We only do slicing code for TIMESHARE ksegrps.
1243	 */
1244	if (kg->kg_pri_class != PRI_TIMESHARE)
1245		return;
1246	/*
1247	 * We used a tick charge it to the ksegrp so that we can compute our
1248	 * interactivity.
1249	 */
1250	kg->kg_runtime += tickincr << 10;
1251	sched_interact_update(kg);
1252
1253	/*
1254	 * We used up one time slice.
1255	 */
1256	ke->ke_slice--;
1257	kseq = KSEQ_SELF();
1258#ifdef SMP
1259	kseq->ksq_rslices--;
1260#endif
1261
1262	if (ke->ke_slice > 0)
1263		return;
1264	/*
1265	 * We're out of time, recompute priorities and requeue.
1266	 */
1267	kseq_rem(kseq, ke);
1268	sched_priority(kg);
1269	sched_slice(ke);
1270	if (SCHED_CURR(kg, ke))
1271		ke->ke_runq = kseq->ksq_curr;
1272	else
1273		ke->ke_runq = kseq->ksq_next;
1274	kseq_add(kseq, ke);
1275	td->td_flags |= TDF_NEEDRESCHED;
1276}
1277
1278int
1279sched_runnable(void)
1280{
1281	struct kseq *kseq;
1282	int load;
1283
1284	load = 1;
1285
1286	mtx_lock_spin(&sched_lock);
1287	kseq = KSEQ_SELF();
1288#ifdef SMP
1289	if (kseq->ksq_assigned)
1290		kseq_assign(kseq);
1291#endif
1292	if ((curthread->td_flags & TDF_IDLETD) != 0) {
1293		if (kseq->ksq_load > 0)
1294			goto out;
1295	} else
1296		if (kseq->ksq_load - 1 > 0)
1297			goto out;
1298	load = 0;
1299out:
1300	mtx_unlock_spin(&sched_lock);
1301	return (load);
1302}
1303
1304void
1305sched_userret(struct thread *td)
1306{
1307	struct ksegrp *kg;
1308
1309	kg = td->td_ksegrp;
1310
1311	if (td->td_priority != kg->kg_user_pri) {
1312		mtx_lock_spin(&sched_lock);
1313		td->td_priority = kg->kg_user_pri;
1314		mtx_unlock_spin(&sched_lock);
1315	}
1316}
1317
1318struct kse *
1319sched_choose(void)
1320{
1321	struct kseq *kseq;
1322	struct kse *ke;
1323
1324	mtx_assert(&sched_lock, MA_OWNED);
1325	kseq = KSEQ_SELF();
1326#ifdef SMP
1327retry:
1328	if (kseq->ksq_assigned)
1329		kseq_assign(kseq);
1330#endif
1331	ke = kseq_choose(kseq);
1332	if (ke) {
1333#ifdef SMP
1334		if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
1335			if (kseq_find())
1336				goto retry;
1337#endif
1338		runq_remove(ke->ke_runq, ke);
1339		ke->ke_state = KES_THREAD;
1340
1341		if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE) {
1342			CTR4(KTR_ULE, "Run kse %p from %p (slice: %d, pri: %d)",
1343			    ke, ke->ke_runq, ke->ke_slice,
1344			    ke->ke_thread->td_priority);
1345		}
1346		return (ke);
1347	}
1348#ifdef SMP
1349	if (kseq_find())
1350		goto retry;
1351#endif
1352
1353	return (NULL);
1354}
1355
1356void
1357sched_add(struct thread *td)
1358{
1359	struct kseq *kseq;
1360	struct ksegrp *kg;
1361	struct kse *ke;
1362	int class;
1363
1364	mtx_assert(&sched_lock, MA_OWNED);
1365	ke = td->td_kse;
1366	kg = td->td_ksegrp;
1367	if (ke->ke_flags & KEF_ASSIGNED)
1368		return;
1369	kseq = KSEQ_SELF();
1370	KASSERT((ke->ke_thread != NULL), ("sched_add: No thread on KSE"));
1371	KASSERT((ke->ke_thread->td_kse != NULL),
1372	    ("sched_add: No KSE on thread"));
1373	KASSERT(ke->ke_state != KES_ONRUNQ,
1374	    ("sched_add: kse %p (%s) already in run queue", ke,
1375	    ke->ke_proc->p_comm));
1376	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
1377	    ("sched_add: process swapped out"));
1378	KASSERT(ke->ke_runq == NULL,
1379	    ("sched_add: KSE %p is still assigned to a run queue", ke));
1380
1381	class = PRI_BASE(kg->kg_pri_class);
1382	switch (class) {
1383	case PRI_ITHD:
1384	case PRI_REALTIME:
1385		ke->ke_runq = kseq->ksq_curr;
1386		ke->ke_slice = SCHED_SLICE_MAX;
1387		ke->ke_cpu = PCPU_GET(cpuid);
1388		break;
1389	case PRI_TIMESHARE:
1390#ifdef SMP
1391		if (ke->ke_cpu != PCPU_GET(cpuid)) {
1392			kseq_notify(ke, ke->ke_cpu);
1393			return;
1394		}
1395#endif
1396		if (SCHED_CURR(kg, ke))
1397			ke->ke_runq = kseq->ksq_curr;
1398		else
1399			ke->ke_runq = kseq->ksq_next;
1400		break;
1401	case PRI_IDLE:
1402#ifdef SMP
1403		if (ke->ke_cpu != PCPU_GET(cpuid)) {
1404			kseq_notify(ke, ke->ke_cpu);
1405			return;
1406		}
1407#endif
1408		/*
1409		 * This is for priority prop.
1410		 */
1411		if (ke->ke_thread->td_priority < PRI_MIN_IDLE)
1412			ke->ke_runq = kseq->ksq_curr;
1413		else
1414			ke->ke_runq = &kseq->ksq_idle;
1415		ke->ke_slice = SCHED_SLICE_MIN;
1416		break;
1417	default:
1418		panic("Unknown pri class.");
1419		break;
1420	}
1421#ifdef SMP
1422	/*
1423	 * If there are any idle processors, give them our extra load.
1424	 */
1425	if (kseq_idle && class != PRI_ITHD &&
1426	    kseq->ksq_load_transferable >= kseq->ksq_cpus) {
1427		int cpu;
1428
1429		/*
1430		 * Multiple cpus could find this bit simultaneously but the
1431		 * race shouldn't be terrible.
1432		 */
1433		cpu = ffs(kseq_idle);
1434		if (cpu) {
1435			cpu--;
1436			atomic_clear_int(&kseq_idle, 1 << cpu);
1437			ke->ke_cpu = cpu;
1438			ke->ke_runq = NULL;
1439			kseq_notify(ke, cpu);
1440			return;
1441		}
1442	}
1443	if (class == PRI_TIMESHARE || class == PRI_REALTIME)
1444		atomic_clear_int(&kseq_idle, PCPU_GET(cpumask));
1445#endif
1446        if (td->td_priority < curthread->td_priority)
1447                curthread->td_flags |= TDF_NEEDRESCHED;
1448
1449	ke->ke_ksegrp->kg_runq_kses++;
1450	ke->ke_state = KES_ONRUNQ;
1451
1452	runq_add(ke->ke_runq, ke);
1453	kseq_add(kseq, ke);
1454}
1455
1456void
1457sched_rem(struct thread *td)
1458{
1459	struct kseq *kseq;
1460	struct kse *ke;
1461
1462	ke = td->td_kse;
1463	/*
1464	 * It is safe to just return here because sched_rem() is only ever
1465	 * used in places where we're immediately going to add the
1466	 * kse back on again.  In that case it'll be added with the correct
1467	 * thread and priority when the caller drops the sched_lock.
1468	 */
1469	if (ke->ke_flags & KEF_ASSIGNED)
1470		return;
1471	mtx_assert(&sched_lock, MA_OWNED);
1472	KASSERT((ke->ke_state == KES_ONRUNQ), ("KSE not on run queue"));
1473
1474	ke->ke_state = KES_THREAD;
1475	ke->ke_ksegrp->kg_runq_kses--;
1476	kseq = KSEQ_CPU(ke->ke_cpu);
1477	runq_remove(ke->ke_runq, ke);
1478	kseq_rem(kseq, ke);
1479}
1480
1481fixpt_t
1482sched_pctcpu(struct thread *td)
1483{
1484	fixpt_t pctcpu;
1485	struct kse *ke;
1486
1487	pctcpu = 0;
1488	ke = td->td_kse;
1489	if (ke == NULL)
1490		return (0);
1491
1492	mtx_lock_spin(&sched_lock);
1493	if (ke->ke_ticks) {
1494		int rtick;
1495
1496		/*
1497		 * Don't update more frequently than twice a second.  Allowing
1498		 * this causes the cpu usage to decay away too quickly due to
1499		 * rounding errors.
1500		 */
1501		if (ke->ke_ltick < (ticks - (hz / 2)))
1502			sched_pctcpu_update(ke);
1503		/* How many rtick per second ? */
1504		rtick = min(ke->ke_ticks / SCHED_CPU_TIME, SCHED_CPU_TICKS);
1505		pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
1506	}
1507
1508	ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
1509	mtx_unlock_spin(&sched_lock);
1510
1511	return (pctcpu);
1512}
1513
1514int
1515sched_sizeof_kse(void)
1516{
1517	return (sizeof(struct kse) + sizeof(struct ke_sched));
1518}
1519
1520int
1521sched_sizeof_ksegrp(void)
1522{
1523	return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
1524}
1525
1526int
1527sched_sizeof_proc(void)
1528{
1529	return (sizeof(struct proc));
1530}
1531
1532int
1533sched_sizeof_thread(void)
1534{
1535	return (sizeof(struct thread) + sizeof(struct td_sched));
1536}
1537