kern_switch.c revision 170293
1139804Simp/*-
272376Sjake * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
372376Sjake * All rights reserved.
450027Speter *
550027Speter * Redistribution and use in source and binary forms, with or without
650027Speter * modification, are permitted provided that the following conditions
750027Speter * are met:
850027Speter * 1. Redistributions of source code must retain the above copyright
950027Speter *    notice, this list of conditions and the following disclaimer.
1050027Speter * 2. Redistributions in binary form must reproduce the above copyright
1150027Speter *    notice, this list of conditions and the following disclaimer in the
1250027Speter *    documentation and/or other materials provided with the distribution.
1350027Speter *
1450027Speter * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1550027Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1650027Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1750027Speter * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1850027Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1950027Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2050027Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2150027Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2250027Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2350027Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2450027Speter * SUCH DAMAGE.
2550027Speter */
2650027Speter
2799072Sjulian
28116182Sobrien#include <sys/cdefs.h>
29116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 170293 2007-06-04 23:50:30Z jeff $");
30116182Sobrien
31134591Sjulian#include "opt_sched.h"
32131481Sjhb
33134791Sjulian#ifndef KERN_SWITCH_INCLUDE
3450027Speter#include <sys/param.h>
3550027Speter#include <sys/systm.h>
36131927Smarcel#include <sys/kdb.h>
3750027Speter#include <sys/kernel.h>
3865557Sjasone#include <sys/ktr.h>
3974914Sjhb#include <sys/lock.h>
4067365Sjhb#include <sys/mutex.h>
4150027Speter#include <sys/proc.h>
4250027Speter#include <sys/queue.h>
43104964Sjeff#include <sys/sched.h>
44134791Sjulian#else  /* KERN_SWITCH_INCLUDE */
45122849Speter#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
46112993Speter#include <sys/smp.h>
47112993Speter#endif
48134591Sjulian#if defined(SMP) && defined(SCHED_4BSD)
49134591Sjulian#include <sys/sysctl.h>
50134591Sjulian#endif
5150027Speter
52170293Sjeff#include <machine/cpu.h>
53170293Sjeff
54153510Snjl/* Uncomment this to enable logging of critical_enter/exit. */
55153510Snjl#if 0
56153510Snjl#define	KTR_CRITICAL	KTR_SCHED
57153510Snjl#else
58153510Snjl#define	KTR_CRITICAL	0
59153510Snjl#endif
60153510Snjl
61134649Sscottl#ifdef FULL_PREEMPTION
62134649Sscottl#ifndef PREEMPTION
63134649Sscottl#error "The FULL_PREEMPTION option requires the PREEMPTION option"
64134649Sscottl#endif
65134649Sscottl#endif
66134591Sjulian
6797261SjakeCTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
6897261Sjake
69143884Srwatson/*
70143884Srwatson * kern.sched.preemption allows user space to determine if preemption support
71143884Srwatson * is compiled in or not.  It is not currently a boot or runtime flag that
72143884Srwatson * can be changed.
73143884Srwatson */
74143884Srwatson#ifdef PREEMPTION
75143884Srwatsonstatic int kern_sched_preemption = 1;
76143884Srwatson#else
77143884Srwatsonstatic int kern_sched_preemption = 0;
78143884Srwatson#endif
79143884SrwatsonSYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
80143884Srwatson    &kern_sched_preemption, 0, "Kernel preemption enabled");
81143884Srwatson
82170293Sjeff#ifdef SCHED_STATS
83170293Sjefflong switch_preempt;
84170293Sjefflong switch_owepreempt;
85170293Sjefflong switch_turnstile;
86170293Sjefflong switch_sleepq;
87170293Sjefflong switch_sleepqtimo;
88170293Sjefflong switch_relinquish;
89170293Sjefflong switch_needresched;
90170293Sjeffstatic SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
91170293SjeffSYSCTL_INT(_kern_sched_stats, OID_AUTO, preempt, CTLFLAG_RD, &switch_preempt, 0, "");
92170293SjeffSYSCTL_INT(_kern_sched_stats, OID_AUTO, owepreempt, CTLFLAG_RD, &switch_owepreempt, 0, "");
93170293SjeffSYSCTL_INT(_kern_sched_stats, OID_AUTO, turnstile, CTLFLAG_RD, &switch_turnstile, 0, "");
94170293SjeffSYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepq, CTLFLAG_RD, &switch_sleepq, 0, "");
95170293SjeffSYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepqtimo, CTLFLAG_RD, &switch_sleepqtimo, 0, "");
96170293SjeffSYSCTL_INT(_kern_sched_stats, OID_AUTO, relinquish, CTLFLAG_RD, &switch_relinquish, 0, "");
97170293SjeffSYSCTL_INT(_kern_sched_stats, OID_AUTO, needresched, CTLFLAG_RD, &switch_needresched, 0, "");
98170293Sjeffstatic int
99170293Sjeffsysctl_stats_reset(SYSCTL_HANDLER_ARGS)
100170293Sjeff{
101170293Sjeff        int error;
102170293Sjeff	int val;
103170293Sjeff
104170293Sjeff        val = 0;
105170293Sjeff        error = sysctl_handle_int(oidp, &val, 0, req);
106170293Sjeff        if (error != 0 || req->newptr == NULL)
107170293Sjeff                return (error);
108170293Sjeff        if (val == 0)
109170293Sjeff                return (0);
110170293Sjeff	switch_preempt = 0;
111170293Sjeff	switch_owepreempt = 0;
112170293Sjeff	switch_turnstile = 0;
113170293Sjeff	switch_sleepq = 0;
114170293Sjeff	switch_sleepqtimo = 0;
115170293Sjeff	switch_relinquish = 0;
116170293Sjeff	switch_needresched = 0;
117170293Sjeff
118170293Sjeff	return (0);
119170293Sjeff}
120170293Sjeff
121170293SjeffSYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
122170293Sjeff    0, sysctl_stats_reset, "I", "Reset scheduler statistics");
123170293Sjeff#endif
124170293Sjeff
12599072Sjulian/************************************************************************
12699072Sjulian * Functions that manipulate runnability from a thread perspective.	*
12799072Sjulian ************************************************************************/
12850027Speter/*
129163709Sjb * Select the thread that will be run next.
130163709Sjb */
13183366Sjulianstruct thread *
13283366Sjulianchoosethread(void)
13350027Speter{
13499072Sjulian	struct thread *td;
13599072Sjulian
136122849Speter#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
137112993Speter	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
138112993Speter		/* Shutting down, run idlethread on AP's */
139112993Speter		td = PCPU_GET(idlethread);
140112993Speter		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
141112993Speter		TD_SET_RUNNING(td);
142112993Speter		return (td);
143112993Speter	}
144112993Speter#endif
145112993Speter
146100209Sgallatinretry:
147166188Sjeff	td = sched_choose();
148108338Sjulian
149108338Sjulian	/*
150115215Sjulian	 * If we are in panic, only allow system threads,
151115215Sjulian	 * plus the one we are running in, to be run.
152108338Sjulian	 */
153100209Sgallatin	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
154115215Sjulian	    (td->td_flags & TDF_INPANIC) == 0)) {
155115215Sjulian		/* note that it is no longer on the run queue */
156115215Sjulian		TD_SET_CAN_RUN(td);
157100209Sgallatin		goto retry;
158115215Sjulian	}
159108338Sjulian
160103216Sjulian	TD_SET_RUNNING(td);
16199072Sjulian	return (td);
16272376Sjake}
16350027Speter
16499072Sjulian/*
165131481Sjhb * Kernel thread preemption implementation.  Critical sections mark
166131481Sjhb * regions of code in which preemptions are not allowed.
167131481Sjhb */
16888088Sjhbvoid
16988088Sjhbcritical_enter(void)
17088088Sjhb{
17188088Sjhb	struct thread *td;
17288088Sjhb
17388088Sjhb	td = curthread;
17488088Sjhb	td->td_critnest++;
175153510Snjl	CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
176137364Srwatson	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
17788088Sjhb}
17888088Sjhb
17988088Sjhbvoid
18088088Sjhbcritical_exit(void)
18188088Sjhb{
18288088Sjhb	struct thread *td;
18388088Sjhb
18488088Sjhb	td = curthread;
185125315Sjeff	KASSERT(td->td_critnest != 0,
186125315Sjeff	    ("critical_exit: td_critnest == 0"));
187146554Sups#ifdef PREEMPTION
18888088Sjhb	if (td->td_critnest == 1) {
189144777Sups		td->td_critnest = 0;
190144777Sups		if (td->td_owepreempt) {
191144777Sups			td->td_critnest = 1;
192170293Sjeff			thread_lock(td);
193144777Sups			td->td_critnest--;
194170293Sjeff			SCHED_STAT_INC(switch_owepreempt);
195131481Sjhb			mi_switch(SW_INVOL, NULL);
196170293Sjeff			thread_unlock(td);
197131481Sjhb		}
198153797Skan	} else
199131481Sjhb#endif
20088088Sjhb		td->td_critnest--;
201153797Skan
202153510Snjl	CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
203137364Srwatson	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
20488088Sjhb}
20588088Sjhb
206131481Sjhb/*
207131481Sjhb * This function is called when a thread is about to be put on run queue
208131481Sjhb * because it has been made runnable or its priority has been adjusted.  It
209131481Sjhb * determines if the new thread should be immediately preempted to.  If so,
210131481Sjhb * it switches to it and eventually returns true.  If not, it returns false
211131481Sjhb * so that the caller may place the thread on an appropriate run queue.
212131481Sjhb */
213131481Sjhbint
214131481Sjhbmaybe_preempt(struct thread *td)
215131481Sjhb{
216131508Smarcel#ifdef PREEMPTION
217131481Sjhb	struct thread *ctd;
218131481Sjhb	int cpri, pri;
219131508Smarcel#endif
22099072Sjulian
221131481Sjhb#ifdef PREEMPTION
222131481Sjhb	/*
223131481Sjhb	 * The new thread should not preempt the current thread if any of the
224131481Sjhb	 * following conditions are true:
225131481Sjhb	 *
226143757Srwatson	 *  - The kernel is in the throes of crashing (panicstr).
227132266Sjhb	 *  - The current thread has a higher (numerically lower) or
228132266Sjhb	 *    equivalent priority.  Note that this prevents curthread from
229132266Sjhb	 *    trying to preempt to itself.
230131481Sjhb	 *  - It is too early in the boot for context switches (cold is set).
231131481Sjhb	 *  - The current thread has an inhibitor set or is in the process of
232131481Sjhb	 *    exiting.  In this case, the current thread is about to switch
233131481Sjhb	 *    out anyways, so there's no point in preempting.  If we did,
234131481Sjhb	 *    the current thread would not be properly resumed as well, so
235131481Sjhb	 *    just avoid that whole landmine.
236131481Sjhb	 *  - If the new thread's priority is not a realtime priority and
237131481Sjhb	 *    the current thread's priority is not an idle priority and
238131481Sjhb	 *    FULL_PREEMPTION is disabled.
239131481Sjhb	 *
240131481Sjhb	 * If all of these conditions are false, but the current thread is in
241131481Sjhb	 * a nested critical section, then we have to defer the preemption
242131481Sjhb	 * until we exit the critical section.  Otherwise, switch immediately
243131481Sjhb	 * to the new thread.
244131481Sjhb	 */
245131481Sjhb	ctd = curthread;
246170293Sjeff	THREAD_LOCK_ASSERT(td, MA_OWNED);
247164936Sjulian	KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
248134837Sjulian	  ("thread has no (or wrong) sched-private part."));
249135181Sjulian	KASSERT((td->td_inhibitors == 0),
250165693Srwatson			("maybe_preempt: trying to run inhibited thread"));
251131481Sjhb	pri = td->td_priority;
252131481Sjhb	cpri = ctd->td_priority;
253143757Srwatson	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
254166188Sjeff	    TD_IS_INHIBITED(ctd))
255131481Sjhb		return (0);
256131481Sjhb#ifndef FULL_PREEMPTION
257147216Sups	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
258131481Sjhb		return (0);
259131481Sjhb#endif
260147190Sups
261131481Sjhb	if (ctd->td_critnest > 1) {
262131481Sjhb		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
263131481Sjhb		    ctd->td_critnest);
264144777Sups		ctd->td_owepreempt = 1;
265131481Sjhb		return (0);
266131481Sjhb	}
267131481Sjhb	/*
268136170Sjulian	 * Thread is runnable but not yet put on system run queue.
269131481Sjhb	 */
270170293Sjeff	MPASS(ctd->td_lock == &sched_lock);
271170293Sjeff	MPASS(td->td_lock == &sched_lock);
272131481Sjhb	MPASS(TD_ON_RUNQ(td));
273131481Sjhb	TD_SET_RUNNING(td);
274131481Sjhb	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
275131481Sjhb	    td->td_proc->p_pid, td->td_proc->p_comm);
276170293Sjeff	SCHED_STAT_INC(switch_preempt);
277136170Sjulian	mi_switch(SW_INVOL|SW_PREEMPT, td);
278170293Sjeff	/*
279170293Sjeff	 * td's lock pointer may have changed.  We have to return with it
280170293Sjeff	 * locked.
281170293Sjeff	 */
282170293Sjeff	spinlock_enter();
283170293Sjeff	thread_unlock(ctd);
284170293Sjeff	thread_lock(td);
285170293Sjeff	spinlock_exit();
286131481Sjhb	return (1);
287131481Sjhb#else
288131481Sjhb	return (0);
289131481Sjhb#endif
290131481Sjhb}
291131481Sjhb
292133219Sjhb#if 0
293131481Sjhb#ifndef PREEMPTION
294131481Sjhb/* XXX: There should be a non-static version of this. */
295131481Sjhbstatic void
296131481Sjhbprintf_caddr_t(void *data)
297131481Sjhb{
298131481Sjhb	printf("%s", (char *)data);
299131481Sjhb}
300131481Sjhbstatic char preempt_warning[] =
301131481Sjhb    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
302131481SjhbSYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
303131481Sjhb    preempt_warning)
304131481Sjhb#endif
305133219Sjhb#endif
306131481Sjhb
30799072Sjulian/************************************************************************
30899072Sjulian * SYSTEM RUN QUEUE manipulations and tests				*
30999072Sjulian ************************************************************************/
31072376Sjake/*
31199072Sjulian * Initialize a run structure.
31299072Sjulian */
31399072Sjulianvoid
31499072Sjulianrunq_init(struct runq *rq)
31599072Sjulian{
31699072Sjulian	int i;
31799072Sjulian
31899072Sjulian	bzero(rq, sizeof *rq);
31999072Sjulian	for (i = 0; i < RQ_NQS; i++)
32099072Sjulian		TAILQ_INIT(&rq->rq_queues[i]);
32199072Sjulian}
32299072Sjulian
32399072Sjulian/*
32472376Sjake * Clear the status bit of the queue corresponding to priority level pri,
32572376Sjake * indicating that it is empty.
32672376Sjake */
32772376Sjakestatic __inline void
32872376Sjakerunq_clrbit(struct runq *rq, int pri)
32972376Sjake{
33072376Sjake	struct rqbits *rqb;
33165557Sjasone
33272376Sjake	rqb = &rq->rq_status;
33372376Sjake	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
33472376Sjake	    rqb->rqb_bits[RQB_WORD(pri)],
33572376Sjake	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
33672376Sjake	    RQB_BIT(pri), RQB_WORD(pri));
33772376Sjake	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
33850027Speter}
33950027Speter
34050027Speter/*
34172376Sjake * Find the index of the first non-empty run queue.  This is done by
34272376Sjake * scanning the status bits, a set bit indicates a non-empty queue.
34350027Speter */
34472376Sjakestatic __inline int
34572376Sjakerunq_findbit(struct runq *rq)
34672376Sjake{
34772376Sjake	struct rqbits *rqb;
34872376Sjake	int pri;
34972376Sjake	int i;
35072376Sjake
35172376Sjake	rqb = &rq->rq_status;
35272376Sjake	for (i = 0; i < RQB_LEN; i++)
35372376Sjake		if (rqb->rqb_bits[i]) {
35498469Speter			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
35572376Sjake			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
35672376Sjake			    rqb->rqb_bits[i], i, pri);
35772376Sjake			return (pri);
35872376Sjake		}
35972376Sjake
36072376Sjake	return (-1);
36172376Sjake}
36272376Sjake
363165761Sjeffstatic __inline int
364166557Sjeffrunq_findbit_from(struct runq *rq, u_char start)
365165761Sjeff{
366165761Sjeff	struct rqbits *rqb;
367165761Sjeff	int bit;
368165761Sjeff	int pri;
369165761Sjeff	int i;
370165761Sjeff
371165761Sjeff	rqb = &rq->rq_status;
372165761Sjeff	bit = start & (RQB_BPW -1);
373165761Sjeff	pri = 0;
374165761Sjeff	CTR1(KTR_RUNQ, "runq_findbit_from: start %d", start);
375165761Sjeffagain:
376165761Sjeff	for (i = RQB_WORD(start); i < RQB_LEN; i++) {
377165761Sjeff		CTR3(KTR_RUNQ, "runq_findbit_from: bits %d = %#x bit = %d",
378165761Sjeff		    i, rqb->rqb_bits[i], bit);
379165761Sjeff		if (rqb->rqb_bits[i]) {
380165761Sjeff			if (bit != 0) {
381165761Sjeff				for (pri = bit; pri < RQB_BPW; pri++)
382165761Sjeff					if (rqb->rqb_bits[i] & (1ul << pri))
383165761Sjeff						break;
384165761Sjeff				bit = 0;
385165761Sjeff				if (pri >= RQB_BPW)
386165761Sjeff					continue;
387165761Sjeff			} else
388165761Sjeff				pri = RQB_FFS(rqb->rqb_bits[i]);
389165761Sjeff			pri += (i << RQB_L2BPW);
390165761Sjeff			CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
391165761Sjeff			    rqb->rqb_bits[i], i, pri);
392165761Sjeff			return (pri);
393165761Sjeff		}
394165761Sjeff		bit = 0;
395165761Sjeff	}
396165761Sjeff	if (start != 0) {
397165761Sjeff		CTR0(KTR_RUNQ, "runq_findbit_from: restarting");
398165761Sjeff		start = 0;
399165761Sjeff		goto again;
400165761Sjeff	}
401165761Sjeff
402165761Sjeff	return (-1);
403165761Sjeff}
404165761Sjeff
40572376Sjake/*
40672376Sjake * Set the status bit of the queue corresponding to priority level pri,
40772376Sjake * indicating that it is non-empty.
40872376Sjake */
40972376Sjakestatic __inline void
41072376Sjakerunq_setbit(struct runq *rq, int pri)
41172376Sjake{
41272376Sjake	struct rqbits *rqb;
41372376Sjake
41472376Sjake	rqb = &rq->rq_status;
41572376Sjake	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
41672376Sjake	    rqb->rqb_bits[RQB_WORD(pri)],
41772376Sjake	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
41872376Sjake	    RQB_BIT(pri), RQB_WORD(pri));
41972376Sjake	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
42072376Sjake}
42172376Sjake
42272376Sjake/*
423164936Sjulian * Add the thread to the queue specified by its priority, and set the
42472376Sjake * corresponding status bit.
42572376Sjake */
42650027Spetervoid
427164936Sjulianrunq_add(struct runq *rq, struct td_sched *ts, int flags)
42850027Speter{
42972376Sjake	struct rqhead *rqh;
43072376Sjake	int pri;
43150027Speter
432164936Sjulian	pri = ts->ts_thread->td_priority / RQ_PPQ;
433164936Sjulian	ts->ts_rqindex = pri;
43472376Sjake	runq_setbit(rq, pri);
43572376Sjake	rqh = &rq->rq_queues[pri];
436164936Sjulian	CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
437164936Sjulian	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
438136170Sjulian	if (flags & SRQ_PREEMPTED) {
439164936Sjulian		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
440136170Sjulian	} else {
441164936Sjulian		TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
442136170Sjulian	}
44350027Speter}
44450027Speter
445165761Sjeffvoid
446166557Sjeffrunq_add_pri(struct runq *rq, struct td_sched *ts, u_char pri, int flags)
447165761Sjeff{
448165761Sjeff	struct rqhead *rqh;
449165761Sjeff
450165761Sjeff	KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
451165761Sjeff	ts->ts_rqindex = pri;
452165761Sjeff	runq_setbit(rq, pri);
453165761Sjeff	rqh = &rq->rq_queues[pri];
454165761Sjeff	CTR5(KTR_RUNQ, "runq_add_pri: td=%p ke=%p pri=%d idx=%d rqh=%p",
455165761Sjeff	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
456165761Sjeff	if (flags & SRQ_PREEMPTED) {
457165761Sjeff		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
458165761Sjeff	} else {
459165761Sjeff		TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
460165761Sjeff	}
461165761Sjeff}
46250027Speter/*
46372376Sjake * Return true if there are runnable processes of any priority on the run
46472376Sjake * queue, false otherwise.  Has no side effects, does not modify the run
46572376Sjake * queue structure.
46650027Speter */
46772376Sjakeint
46872376Sjakerunq_check(struct runq *rq)
46950027Speter{
47072376Sjake	struct rqbits *rqb;
47172376Sjake	int i;
47272376Sjake
47372376Sjake	rqb = &rq->rq_status;
47472376Sjake	for (i = 0; i < RQB_LEN; i++)
47572376Sjake		if (rqb->rqb_bits[i]) {
47672376Sjake			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
47772376Sjake			    rqb->rqb_bits[i], i);
47872376Sjake			return (1);
47972376Sjake		}
48072376Sjake	CTR0(KTR_RUNQ, "runq_check: empty");
48172376Sjake
48272376Sjake	return (0);
48350027Speter}
48450027Speter
485134591Sjulian#if defined(SMP) && defined(SCHED_4BSD)
486134591Sjulianint runq_fuzz = 1;
487134591SjulianSYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
488134591Sjulian#endif
489134591Sjulian
49050027Speter/*
491104964Sjeff * Find the highest priority process on the run queue.
49250027Speter */
493164936Sjulianstruct td_sched *
49472376Sjakerunq_choose(struct runq *rq)
49550027Speter{
49672376Sjake	struct rqhead *rqh;
497164936Sjulian	struct td_sched *ts;
49872376Sjake	int pri;
49950027Speter
50099072Sjulian	while ((pri = runq_findbit(rq)) != -1) {
50172376Sjake		rqh = &rq->rq_queues[pri];
502134591Sjulian#if defined(SMP) && defined(SCHED_4BSD)
503134591Sjulian		/* fuzz == 1 is normal.. 0 or less are ignored */
504134591Sjulian		if (runq_fuzz > 1) {
505134591Sjulian			/*
506134591Sjulian			 * In the first couple of entries, check if
507134591Sjulian			 * there is one for our CPU as a preference.
508134591Sjulian			 */
509134591Sjulian			int count = runq_fuzz;
510134591Sjulian			int cpu = PCPU_GET(cpuid);
511164936Sjulian			struct td_sched *ts2;
512164936Sjulian			ts2 = ts = TAILQ_FIRST(rqh);
513134591Sjulian
514164936Sjulian			while (count-- && ts2) {
515164936Sjulian				if (ts->ts_thread->td_lastcpu == cpu) {
516164936Sjulian					ts = ts2;
517134591Sjulian					break;
518134591Sjulian				}
519164936Sjulian				ts2 = TAILQ_NEXT(ts2, ts_procq);
520134591Sjulian			}
521153797Skan		} else
522134591Sjulian#endif
523164936Sjulian			ts = TAILQ_FIRST(rqh);
524164936Sjulian		KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
52599072Sjulian		CTR3(KTR_RUNQ,
526164936Sjulian		    "runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
527164936Sjulian		return (ts);
52850027Speter	}
52972376Sjake	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
53072376Sjake
53199072Sjulian	return (NULL);
53250027Speter}
53372376Sjake
534165761Sjeffstruct td_sched *
535166557Sjeffrunq_choose_from(struct runq *rq, u_char idx)
536165761Sjeff{
537165761Sjeff	struct rqhead *rqh;
538165761Sjeff	struct td_sched *ts;
539165761Sjeff	int pri;
540165761Sjeff
541165765Sjeff	if ((pri = runq_findbit_from(rq, idx)) != -1) {
542165761Sjeff		rqh = &rq->rq_queues[pri];
543165761Sjeff		ts = TAILQ_FIRST(rqh);
544165761Sjeff		KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
545165761Sjeff		CTR4(KTR_RUNQ,
546165761Sjeff		    "runq_choose_from: pri=%d kse=%p idx=%d rqh=%p",
547165761Sjeff		    pri, ts, ts->ts_rqindex, rqh);
548165761Sjeff		return (ts);
549165761Sjeff	}
550165761Sjeff	CTR1(KTR_RUNQ, "runq_choose_from: idleproc pri=%d", pri);
551165761Sjeff
552165761Sjeff	return (NULL);
553165761Sjeff}
55472376Sjake/*
555164936Sjulian * Remove the thread from the queue specified by its priority, and clear the
55672376Sjake * corresponding status bit if the queue becomes empty.
557166188Sjeff * Caller must set state afterwards.
55872376Sjake */
55972376Sjakevoid
560164936Sjulianrunq_remove(struct runq *rq, struct td_sched *ts)
56172376Sjake{
562165761Sjeff
563165761Sjeff	runq_remove_idx(rq, ts, NULL);
564165761Sjeff}
565165761Sjeff
566165761Sjeffvoid
567166557Sjeffrunq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx)
568165761Sjeff{
56972376Sjake	struct rqhead *rqh;
570166557Sjeff	u_char pri;
57172376Sjake
572164936Sjulian	KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
573165761Sjeff		("runq_remove_idx: process swapped out"));
574164936Sjulian	pri = ts->ts_rqindex;
575170293Sjeff	KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
57672376Sjake	rqh = &rq->rq_queues[pri];
577165761Sjeff	CTR5(KTR_RUNQ, "runq_remove_idx: td=%p, ts=%p pri=%d %d rqh=%p",
578164936Sjulian	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
579170293Sjeff	{
580170293Sjeff		struct td_sched *nts;
581170293Sjeff
582170293Sjeff		TAILQ_FOREACH(nts, rqh, ts_procq)
583170293Sjeff			if (nts == ts)
584170293Sjeff				break;
585170293Sjeff		if (ts != nts)
586170293Sjeff			panic("runq_remove_idx: ts %p not on rqindex %d",
587170293Sjeff			    ts, pri);
588170293Sjeff	}
589164936Sjulian	TAILQ_REMOVE(rqh, ts, ts_procq);
59072376Sjake	if (TAILQ_EMPTY(rqh)) {
591165761Sjeff		CTR0(KTR_RUNQ, "runq_remove_idx: empty");
59272376Sjake		runq_clrbit(rq, pri);
593165761Sjeff		if (idx != NULL && *idx == pri)
594165761Sjeff			*idx = (pri + 1) % RQ_NQS;
59572376Sjake	}
59672376Sjake}
59799072Sjulian
598134791Sjulian/****** functions that are temporarily here ***********/
599134791Sjulian#include <vm/uma.h>
600134791Sjulianextern struct mtx kse_zombie_lock;
601134791Sjulian
602134791Sjulian/*
603134791Sjulian *  Allocate scheduler specific per-process resources.
604164936Sjulian * The thread and proc have already been linked in.
605134791Sjulian *
606134791Sjulian * Called from:
607134791Sjulian *  proc_init() (UMA init method)
608134791Sjulian */
609134791Sjulianvoid
610164936Sjuliansched_newproc(struct proc *p, struct thread *td)
611134791Sjulian{
612134791Sjulian}
613134791Sjulian
614134791Sjulian/*
615134791Sjulian * thread is being either created or recycled.
616134791Sjulian * Fix up the per-scheduler resources associated with it.
617134791Sjulian * Called from:
618134791Sjulian *  sched_fork_thread()
619134791Sjulian *  thread_dtor()  (*may go away)
620134791Sjulian *  thread_init()  (*may go away)
621134791Sjulian */
622134791Sjulianvoid
623134791Sjuliansched_newthread(struct thread *td)
624134791Sjulian{
625164936Sjulian	struct td_sched *ts;
626134791Sjulian
627164936Sjulian	ts = (struct td_sched *) (td + 1);
628164936Sjulian	bzero(ts, sizeof(*ts));
629164936Sjulian	td->td_sched     = ts;
630164936Sjulian	ts->ts_thread	= td;
631134791Sjulian}
632134791Sjulian
633134791Sjulian/*
634134791Sjulian * Called from:
635134791Sjulian *  thr_create()
636134791Sjulian *  proc_init() (UMA) via sched_newproc()
637134791Sjulian */
638134791Sjulianvoid
639164936Sjuliansched_init_concurrency(struct proc *p)
640134791Sjulian{
641134791Sjulian}
642134791Sjulian
643134791Sjulian/*
644164936Sjulian * Change the concurrency of an existing proc to N
645134791Sjulian * Called from:
646134791Sjulian *  kse_create()
647134791Sjulian *  kse_exit()
648134791Sjulian *  thread_exit()
649134791Sjulian *  thread_single()
650134791Sjulian */
651134791Sjulianvoid
652164936Sjuliansched_set_concurrency(struct proc *p, int concurrency)
653134791Sjulian{
654134791Sjulian}
655134791Sjulian
656134791Sjulian#endif /* KERN_SWITCH_INCLUDE */
657