kern_switch.c revision 164936
1139804Simp/*-
272376Sjake * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
372376Sjake * All rights reserved.
450027Speter *
550027Speter * Redistribution and use in source and binary forms, with or without
650027Speter * modification, are permitted provided that the following conditions
750027Speter * are met:
850027Speter * 1. Redistributions of source code must retain the above copyright
950027Speter *    notice, this list of conditions and the following disclaimer.
1050027Speter * 2. Redistributions in binary form must reproduce the above copyright
1150027Speter *    notice, this list of conditions and the following disclaimer in the
1250027Speter *    documentation and/or other materials provided with the distribution.
1350027Speter *
1450027Speter * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1550027Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1650027Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1750027Speter * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1850027Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1950027Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2050027Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2150027Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2250027Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2350027Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2450027Speter * SUCH DAMAGE.
2550027Speter */
2650027Speter
2799072Sjulian
28116182Sobrien#include <sys/cdefs.h>
29116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 164936 2006-12-06 06:34:57Z julian $");
30116182Sobrien
31134591Sjulian#include "opt_sched.h"
32131481Sjhb
33134791Sjulian#ifndef KERN_SWITCH_INCLUDE
3450027Speter#include <sys/param.h>
3550027Speter#include <sys/systm.h>
36131927Smarcel#include <sys/kdb.h>
3750027Speter#include <sys/kernel.h>
3865557Sjasone#include <sys/ktr.h>
3974914Sjhb#include <sys/lock.h>
4067365Sjhb#include <sys/mutex.h>
4150027Speter#include <sys/proc.h>
4250027Speter#include <sys/queue.h>
43104964Sjeff#include <sys/sched.h>
44134791Sjulian#else  /* KERN_SWITCH_INCLUDE */
45122849Speter#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
46112993Speter#include <sys/smp.h>
47112993Speter#endif
48134591Sjulian#if defined(SMP) && defined(SCHED_4BSD)
49134591Sjulian#include <sys/sysctl.h>
50134591Sjulian#endif
5150027Speter
52153510Snjl/* Uncomment this to enable logging of critical_enter/exit. */
53153510Snjl#if 0
54153510Snjl#define	KTR_CRITICAL	KTR_SCHED
55153510Snjl#else
56153510Snjl#define	KTR_CRITICAL	0
57153510Snjl#endif
58153510Snjl
59134649Sscottl#ifdef FULL_PREEMPTION
60134649Sscottl#ifndef PREEMPTION
61134649Sscottl#error "The FULL_PREEMPTION option requires the PREEMPTION option"
62134649Sscottl#endif
63134649Sscottl#endif
64134591Sjulian
6597261SjakeCTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
6697261Sjake
67143884Srwatson/*
68143884Srwatson * kern.sched.preemption allows user space to determine if preemption support
69143884Srwatson * is compiled in or not.  It is not currently a boot or runtime flag that
70143884Srwatson * can be changed.
71143884Srwatson */
72143884Srwatson#ifdef PREEMPTION
73143884Srwatsonstatic int kern_sched_preemption = 1;
74143884Srwatson#else
75143884Srwatsonstatic int kern_sched_preemption = 0;
76143884Srwatson#endif
77143884SrwatsonSYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
78143884Srwatson    &kern_sched_preemption, 0, "Kernel preemption enabled");
79143884Srwatson
8099072Sjulian/************************************************************************
8199072Sjulian * Functions that manipulate runnability from a thread perspective.	*
8299072Sjulian ************************************************************************/
8350027Speter/*
84163709Sjb * Select the thread that will be run next.
85163709Sjb */
8683366Sjulianstruct thread *
8783366Sjulianchoosethread(void)
8850027Speter{
89164936Sjulian	struct td_sched *ts;
9099072Sjulian	struct thread *td;
9199072Sjulian
92122849Speter#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
93112993Speter	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
94112993Speter		/* Shutting down, run idlethread on AP's */
95112993Speter		td = PCPU_GET(idlethread);
96164936Sjulian		ts = td->td_sched;
97112993Speter		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
98164936Sjulian		ts->ts_flags |= TSF_DIDRUN;
99112993Speter		TD_SET_RUNNING(td);
100112993Speter		return (td);
101112993Speter	}
102112993Speter#endif
103112993Speter
104100209Sgallatinretry:
105164936Sjulian	ts = sched_choose();
106164936Sjulian	if (ts) {
107164936Sjulian		td = ts->ts_thread;
10899072Sjulian		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
10999072Sjulian		    td, td->td_priority);
11099072Sjulian	} else {
11199889Sjulian		/* Simulate runq_choose() having returned the idle thread */
11299072Sjulian		td = PCPU_GET(idlethread);
113164936Sjulian		ts = td->td_sched;
11499072Sjulian		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
11599072Sjulian	}
116164936Sjulian	ts->ts_flags |= TSF_DIDRUN;
117108338Sjulian
118108338Sjulian	/*
119115215Sjulian	 * If we are in panic, only allow system threads,
120115215Sjulian	 * plus the one we are running in, to be run.
121108338Sjulian	 */
122100209Sgallatin	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
123115215Sjulian	    (td->td_flags & TDF_INPANIC) == 0)) {
124115215Sjulian		/* note that it is no longer on the run queue */
125115215Sjulian		TD_SET_CAN_RUN(td);
126100209Sgallatin		goto retry;
127115215Sjulian	}
128108338Sjulian
129103216Sjulian	TD_SET_RUNNING(td);
13099072Sjulian	return (td);
13172376Sjake}
13250027Speter
13399072Sjulian
134164936Sjulian#if 0
13599072Sjulian/*
136164936Sjulian * currently not used.. threads remove themselves from the
137164936Sjulian * run queue by running.
13899072Sjulian */
139105127Sjulianstatic void
14083366Sjulianremrunqueue(struct thread *td)
14172376Sjake{
14299072Sjulian	mtx_assert(&sched_lock, MA_OWNED);
143111028Sjeff	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
14499072Sjulian	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
145103216Sjulian	TD_SET_CAN_RUN(td);
146164936Sjulian	/* remove from sys run queue */
147164936Sjulian	sched_rem(td);
148164936Sjulian	return;
14972376Sjake}
150135255Sjulian#endif
15172376Sjake
152105127Sjulian/*
153105127Sjulian * Change the priority of a thread that is on the run queue.
154105127Sjulian */
15572376Sjakevoid
156153797Skanadjustrunqueue( struct thread *td, int newpri)
157105127Sjulian{
158164936Sjulian	struct td_sched *ts;
159105127Sjulian
160105127Sjulian	mtx_assert(&sched_lock, MA_OWNED);
161111028Sjeff	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
162111028Sjeff
163164936Sjulian	ts = td->td_sched;
164111028Sjeff	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
165164936Sjulian		/* We only care about the td_sched in the run queue. */
166105127Sjulian	td->td_priority = newpri;
167163709Sjb#ifndef SCHED_CORE
168164936Sjulian	if (ts->ts_rqindex != (newpri / RQ_PPQ))
169163709Sjb#else
170164936Sjulian	if (ts->ts_rqindex != newpri)
171163709Sjb#endif
172163709Sjb	{
173163709Sjb		sched_rem(td);
174163709Sjb		sched_add(td, SRQ_BORING);
175163709Sjb	}
176105127Sjulian}
177136438Sups
178105127Sjulianvoid
179134586Sjuliansetrunqueue(struct thread *td, int flags)
18050027Speter{
18199072Sjulian
182163709Sjb	CTR2(KTR_RUNQ, "setrunqueue: td:%p pid:%d",
183163709Sjb	    td, td->td_proc->p_pid);
184139315Sjeff	CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
185139315Sjeff            td, td->td_proc->p_comm, td->td_priority, curthread,
186139315Sjeff            curthread->td_proc->p_comm);
18799072Sjulian	mtx_assert(&sched_lock, MA_OWNED);
188135181Sjulian	KASSERT((td->td_inhibitors == 0),
189135181Sjulian			("setrunqueue: trying to run inhibitted thread"));
190103216Sjulian	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
191103216Sjulian	    ("setrunqueue: bad thread state"));
192103216Sjulian	TD_SET_RUNQ(td);
193163709Sjb	sched_add(td, flags);
19472376Sjake}
19550027Speter
196131481Sjhb/*
197131481Sjhb * Kernel thread preemption implementation.  Critical sections mark
198131481Sjhb * regions of code in which preemptions are not allowed.
199131481Sjhb */
20088088Sjhbvoid
20188088Sjhbcritical_enter(void)
20288088Sjhb{
20388088Sjhb	struct thread *td;
20488088Sjhb
20588088Sjhb	td = curthread;
20688088Sjhb	td->td_critnest++;
207153510Snjl	CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
208137364Srwatson	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
20988088Sjhb}
21088088Sjhb
21188088Sjhbvoid
21288088Sjhbcritical_exit(void)
21388088Sjhb{
21488088Sjhb	struct thread *td;
21588088Sjhb
21688088Sjhb	td = curthread;
217125315Sjeff	KASSERT(td->td_critnest != 0,
218125315Sjeff	    ("critical_exit: td_critnest == 0"));
219146554Sups#ifdef PREEMPTION
22088088Sjhb	if (td->td_critnest == 1) {
221144777Sups		td->td_critnest = 0;
222132266Sjhb		mtx_assert(&sched_lock, MA_NOTOWNED);
223144777Sups		if (td->td_owepreempt) {
224144777Sups			td->td_critnest = 1;
225131481Sjhb			mtx_lock_spin(&sched_lock);
226144777Sups			td->td_critnest--;
227131481Sjhb			mi_switch(SW_INVOL, NULL);
228131481Sjhb			mtx_unlock_spin(&sched_lock);
229131481Sjhb		}
230153797Skan	} else
231131481Sjhb#endif
23288088Sjhb		td->td_critnest--;
233153797Skan
234153510Snjl	CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
235137364Srwatson	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
23688088Sjhb}
23788088Sjhb
238131481Sjhb/*
239131481Sjhb * This function is called when a thread is about to be put on run queue
240131481Sjhb * because it has been made runnable or its priority has been adjusted.  It
241131481Sjhb * determines if the new thread should be immediately preempted to.  If so,
242131481Sjhb * it switches to it and eventually returns true.  If not, it returns false
243131481Sjhb * so that the caller may place the thread on an appropriate run queue.
244131481Sjhb */
245131481Sjhbint
246131481Sjhbmaybe_preempt(struct thread *td)
247131481Sjhb{
248131508Smarcel#ifdef PREEMPTION
249131481Sjhb	struct thread *ctd;
250131481Sjhb	int cpri, pri;
251131508Smarcel#endif
25299072Sjulian
253131481Sjhb	mtx_assert(&sched_lock, MA_OWNED);
254131481Sjhb#ifdef PREEMPTION
255131481Sjhb	/*
256131481Sjhb	 * The new thread should not preempt the current thread if any of the
257131481Sjhb	 * following conditions are true:
258131481Sjhb	 *
259143757Srwatson	 *  - The kernel is in the throes of crashing (panicstr).
260132266Sjhb	 *  - The current thread has a higher (numerically lower) or
261132266Sjhb	 *    equivalent priority.  Note that this prevents curthread from
262132266Sjhb	 *    trying to preempt to itself.
263131481Sjhb	 *  - It is too early in the boot for context switches (cold is set).
264131481Sjhb	 *  - The current thread has an inhibitor set or is in the process of
265131481Sjhb	 *    exiting.  In this case, the current thread is about to switch
266131481Sjhb	 *    out anyways, so there's no point in preempting.  If we did,
267131481Sjhb	 *    the current thread would not be properly resumed as well, so
268131481Sjhb	 *    just avoid that whole landmine.
269131481Sjhb	 *  - If the new thread's priority is not a realtime priority and
270131481Sjhb	 *    the current thread's priority is not an idle priority and
271131481Sjhb	 *    FULL_PREEMPTION is disabled.
272131481Sjhb	 *
273131481Sjhb	 * If all of these conditions are false, but the current thread is in
274131481Sjhb	 * a nested critical section, then we have to defer the preemption
275131481Sjhb	 * until we exit the critical section.  Otherwise, switch immediately
276131481Sjhb	 * to the new thread.
277131481Sjhb	 */
278131481Sjhb	ctd = curthread;
279164936Sjulian	KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
280134837Sjulian	  ("thread has no (or wrong) sched-private part."));
281135181Sjulian	KASSERT((td->td_inhibitors == 0),
282135181Sjulian			("maybe_preempt: trying to run inhibitted thread"));
283131481Sjhb	pri = td->td_priority;
284131481Sjhb	cpri = ctd->td_priority;
285143757Srwatson	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
286164936Sjulian	    TD_IS_INHIBITED(ctd) || td->td_sched->ts_state != TSS_THREAD)
287131481Sjhb		return (0);
288131481Sjhb#ifndef FULL_PREEMPTION
289147216Sups	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
290131481Sjhb		return (0);
291131481Sjhb#endif
292147190Sups
293131481Sjhb	if (ctd->td_critnest > 1) {
294131481Sjhb		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
295131481Sjhb		    ctd->td_critnest);
296144777Sups		ctd->td_owepreempt = 1;
297131481Sjhb		return (0);
298131481Sjhb	}
299131481Sjhb
300131481Sjhb	/*
301136170Sjulian	 * Thread is runnable but not yet put on system run queue.
302131481Sjhb	 */
303131481Sjhb	MPASS(TD_ON_RUNQ(td));
304164936Sjulian	MPASS(td->td_sched->ts_state != TSS_ONRUNQ);
305131481Sjhb	TD_SET_RUNNING(td);
306131481Sjhb	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
307131481Sjhb	    td->td_proc->p_pid, td->td_proc->p_comm);
308136170Sjulian	mi_switch(SW_INVOL|SW_PREEMPT, td);
309131481Sjhb	return (1);
310131481Sjhb#else
311131481Sjhb	return (0);
312131481Sjhb#endif
313131481Sjhb}
314131481Sjhb
315133219Sjhb#if 0
316131481Sjhb#ifndef PREEMPTION
317131481Sjhb/* XXX: There should be a non-static version of this. */
318131481Sjhbstatic void
319131481Sjhbprintf_caddr_t(void *data)
320131481Sjhb{
321131481Sjhb	printf("%s", (char *)data);
322131481Sjhb}
323131481Sjhbstatic char preempt_warning[] =
324131481Sjhb    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
325131481SjhbSYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
326131481Sjhb    preempt_warning)
327131481Sjhb#endif
328133219Sjhb#endif
329131481Sjhb
33099072Sjulian/************************************************************************
33199072Sjulian * SYSTEM RUN QUEUE manipulations and tests				*
33299072Sjulian ************************************************************************/
33372376Sjake/*
33499072Sjulian * Initialize a run structure.
33599072Sjulian */
33699072Sjulianvoid
33799072Sjulianrunq_init(struct runq *rq)
33899072Sjulian{
33999072Sjulian	int i;
34099072Sjulian
34199072Sjulian	bzero(rq, sizeof *rq);
34299072Sjulian	for (i = 0; i < RQ_NQS; i++)
34399072Sjulian		TAILQ_INIT(&rq->rq_queues[i]);
34499072Sjulian}
34599072Sjulian
34699072Sjulian/*
34772376Sjake * Clear the status bit of the queue corresponding to priority level pri,
34872376Sjake * indicating that it is empty.
34972376Sjake */
35072376Sjakestatic __inline void
35172376Sjakerunq_clrbit(struct runq *rq, int pri)
35272376Sjake{
35372376Sjake	struct rqbits *rqb;
35465557Sjasone
35572376Sjake	rqb = &rq->rq_status;
35672376Sjake	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
35772376Sjake	    rqb->rqb_bits[RQB_WORD(pri)],
35872376Sjake	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
35972376Sjake	    RQB_BIT(pri), RQB_WORD(pri));
36072376Sjake	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
36150027Speter}
36250027Speter
36350027Speter/*
36472376Sjake * Find the index of the first non-empty run queue.  This is done by
36572376Sjake * scanning the status bits, a set bit indicates a non-empty queue.
36650027Speter */
36772376Sjakestatic __inline int
36872376Sjakerunq_findbit(struct runq *rq)
36972376Sjake{
37072376Sjake	struct rqbits *rqb;
37172376Sjake	int pri;
37272376Sjake	int i;
37372376Sjake
37472376Sjake	rqb = &rq->rq_status;
37572376Sjake	for (i = 0; i < RQB_LEN; i++)
37672376Sjake		if (rqb->rqb_bits[i]) {
37798469Speter			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
37872376Sjake			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
37972376Sjake			    rqb->rqb_bits[i], i, pri);
38072376Sjake			return (pri);
38172376Sjake		}
38272376Sjake
38372376Sjake	return (-1);
38472376Sjake}
38572376Sjake
38672376Sjake/*
38772376Sjake * Set the status bit of the queue corresponding to priority level pri,
38872376Sjake * indicating that it is non-empty.
38972376Sjake */
39072376Sjakestatic __inline void
39172376Sjakerunq_setbit(struct runq *rq, int pri)
39272376Sjake{
39372376Sjake	struct rqbits *rqb;
39472376Sjake
39572376Sjake	rqb = &rq->rq_status;
39672376Sjake	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
39772376Sjake	    rqb->rqb_bits[RQB_WORD(pri)],
39872376Sjake	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
39972376Sjake	    RQB_BIT(pri), RQB_WORD(pri));
40072376Sjake	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
40172376Sjake}
40272376Sjake
40372376Sjake/*
404164936Sjulian * Add the thread to the queue specified by its priority, and set the
40572376Sjake * corresponding status bit.
40672376Sjake */
40750027Spetervoid
408164936Sjulianrunq_add(struct runq *rq, struct td_sched *ts, int flags)
40950027Speter{
41072376Sjake	struct rqhead *rqh;
41172376Sjake	int pri;
41250027Speter
413164936Sjulian	pri = ts->ts_thread->td_priority / RQ_PPQ;
414164936Sjulian	ts->ts_rqindex = pri;
41572376Sjake	runq_setbit(rq, pri);
41672376Sjake	rqh = &rq->rq_queues[pri];
417164936Sjulian	CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
418164936Sjulian	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
419136170Sjulian	if (flags & SRQ_PREEMPTED) {
420164936Sjulian		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
421136170Sjulian	} else {
422164936Sjulian		TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
423136170Sjulian	}
42450027Speter}
42550027Speter
42650027Speter/*
42772376Sjake * Return true if there are runnable processes of any priority on the run
42872376Sjake * queue, false otherwise.  Has no side effects, does not modify the run
42972376Sjake * queue structure.
43050027Speter */
43172376Sjakeint
43272376Sjakerunq_check(struct runq *rq)
43350027Speter{
43472376Sjake	struct rqbits *rqb;
43572376Sjake	int i;
43672376Sjake
43772376Sjake	rqb = &rq->rq_status;
43872376Sjake	for (i = 0; i < RQB_LEN; i++)
43972376Sjake		if (rqb->rqb_bits[i]) {
44072376Sjake			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
44172376Sjake			    rqb->rqb_bits[i], i);
44272376Sjake			return (1);
44372376Sjake		}
44472376Sjake	CTR0(KTR_RUNQ, "runq_check: empty");
44572376Sjake
44672376Sjake	return (0);
44750027Speter}
44850027Speter
449134591Sjulian#if defined(SMP) && defined(SCHED_4BSD)
450134591Sjulianint runq_fuzz = 1;
451134591SjulianSYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
452134591Sjulian#endif
453134591Sjulian
45450027Speter/*
455104964Sjeff * Find the highest priority process on the run queue.
45650027Speter */
457164936Sjulianstruct td_sched *
45872376Sjakerunq_choose(struct runq *rq)
45950027Speter{
46072376Sjake	struct rqhead *rqh;
461164936Sjulian	struct td_sched *ts;
46272376Sjake	int pri;
46350027Speter
46465557Sjasone	mtx_assert(&sched_lock, MA_OWNED);
46599072Sjulian	while ((pri = runq_findbit(rq)) != -1) {
46672376Sjake		rqh = &rq->rq_queues[pri];
467134591Sjulian#if defined(SMP) && defined(SCHED_4BSD)
468134591Sjulian		/* fuzz == 1 is normal.. 0 or less are ignored */
469134591Sjulian		if (runq_fuzz > 1) {
470134591Sjulian			/*
471134591Sjulian			 * In the first couple of entries, check if
472134591Sjulian			 * there is one for our CPU as a preference.
473134591Sjulian			 */
474134591Sjulian			int count = runq_fuzz;
475134591Sjulian			int cpu = PCPU_GET(cpuid);
476164936Sjulian			struct td_sched *ts2;
477164936Sjulian			ts2 = ts = TAILQ_FIRST(rqh);
478134591Sjulian
479164936Sjulian			while (count-- && ts2) {
480164936Sjulian				if (ts->ts_thread->td_lastcpu == cpu) {
481164936Sjulian					ts = ts2;
482134591Sjulian					break;
483134591Sjulian				}
484164936Sjulian				ts2 = TAILQ_NEXT(ts2, ts_procq);
485134591Sjulian			}
486153797Skan		} else
487134591Sjulian#endif
488164936Sjulian			ts = TAILQ_FIRST(rqh);
489164936Sjulian		KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
49099072Sjulian		CTR3(KTR_RUNQ,
491164936Sjulian		    "runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
492164936Sjulian		return (ts);
49350027Speter	}
49472376Sjake	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
49572376Sjake
49699072Sjulian	return (NULL);
49750027Speter}
49872376Sjake
49972376Sjake/*
500164936Sjulian * Remove the thread from the queue specified by its priority, and clear the
50172376Sjake * corresponding status bit if the queue becomes empty.
502164936Sjulian * Caller must set ts->ts_state afterwards.
50372376Sjake */
50472376Sjakevoid
505164936Sjulianrunq_remove(struct runq *rq, struct td_sched *ts)
50672376Sjake{
50772376Sjake	struct rqhead *rqh;
50872376Sjake	int pri;
50972376Sjake
510164936Sjulian	KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
511100913Stanimura		("runq_remove: process swapped out"));
512164936Sjulian	pri = ts->ts_rqindex;
51372376Sjake	rqh = &rq->rq_queues[pri];
514164936Sjulian	CTR5(KTR_RUNQ, "runq_remove: td=%p, ts=%p pri=%d %d rqh=%p",
515164936Sjulian	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
516164936Sjulian	KASSERT(ts != NULL, ("runq_remove: no proc on busy queue"));
517164936Sjulian	TAILQ_REMOVE(rqh, ts, ts_procq);
51872376Sjake	if (TAILQ_EMPTY(rqh)) {
51972376Sjake		CTR0(KTR_RUNQ, "runq_remove: empty");
52072376Sjake		runq_clrbit(rq, pri);
52172376Sjake	}
52272376Sjake}
52399072Sjulian
524134791Sjulian/****** functions that are temporarily here ***********/
525134791Sjulian#include <vm/uma.h>
526134791Sjulianextern struct mtx kse_zombie_lock;
527134791Sjulian
528134791Sjulian/*
529134791Sjulian *  Allocate scheduler specific per-process resources.
530164936Sjulian * The thread and proc have already been linked in.
531134791Sjulian *
532134791Sjulian * Called from:
533134791Sjulian *  proc_init() (UMA init method)
534134791Sjulian */
535134791Sjulianvoid
536164936Sjuliansched_newproc(struct proc *p, struct thread *td)
537134791Sjulian{
538134791Sjulian}
539134791Sjulian
540134791Sjulian/*
541134791Sjulian * thread is being either created or recycled.
542134791Sjulian * Fix up the per-scheduler resources associated with it.
543134791Sjulian * Called from:
544134791Sjulian *  sched_fork_thread()
545134791Sjulian *  thread_dtor()  (*may go away)
546134791Sjulian *  thread_init()  (*may go away)
547134791Sjulian */
548134791Sjulianvoid
549134791Sjuliansched_newthread(struct thread *td)
550134791Sjulian{
551164936Sjulian	struct td_sched *ts;
552134791Sjulian
553164936Sjulian	ts = (struct td_sched *) (td + 1);
554164936Sjulian	bzero(ts, sizeof(*ts));
555164936Sjulian	td->td_sched     = ts;
556164936Sjulian	ts->ts_thread	= td;
557164936Sjulian	ts->ts_state	= TSS_THREAD;
558134791Sjulian}
559134791Sjulian
560134791Sjulian/*
561134791Sjulian * Called from:
562134791Sjulian *  thr_create()
563134791Sjulian *  proc_init() (UMA) via sched_newproc()
564134791Sjulian */
565134791Sjulianvoid
566164936Sjuliansched_init_concurrency(struct proc *p)
567134791Sjulian{
568134791Sjulian}
569134791Sjulian
570134791Sjulian/*
571164936Sjulian * Change the concurrency of an existing proc to N
572134791Sjulian * Called from:
573134791Sjulian *  kse_create()
574134791Sjulian *  kse_exit()
575134791Sjulian *  thread_exit()
576134791Sjulian *  thread_single()
577134791Sjulian */
578134791Sjulianvoid
579164936Sjuliansched_set_concurrency(struct proc *p, int concurrency)
580134791Sjulian{
581134791Sjulian}
582134791Sjulian
583134791Sjulian/*
584134791Sjulian * Called from thread_exit() for all exiting thread
585134791Sjulian *
586134791Sjulian * Not to be confused with sched_exit_thread()
587134791Sjulian * that is only called from thread_exit() for threads exiting
588134791Sjulian * without the rest of the process exiting because it is also called from
589134791Sjulian * sched_exit() and we wouldn't want to call it twice.
590134791Sjulian * XXX This can probably be fixed.
591134791Sjulian */
592134791Sjulianvoid
593134791Sjuliansched_thread_exit(struct thread *td)
594134791Sjulian{
595134791Sjulian}
596134791Sjulian
597134791Sjulian#endif /* KERN_SWITCH_INCLUDE */
598