kern_switch.c revision 135051
1/*
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/***
28Here is the logic..
29
30If there are N processors, then there are at most N KSEs (kernel
31schedulable entities) working to process threads that belong to a
32KSEGROUP (kg). If there are X of these KSEs actually running at the
33moment in question, then there are at most M (N-X) of these KSEs on
34the run queue, as running KSEs are not on the queue.
35
36Runnable threads are queued off the KSEGROUP in priority order.
37If there are M or more threads runnable, the top M threads
38(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39their priority from those threads and are put on the run queue.
40
41The last thread that had a priority high enough to have a KSE associated
42with it, AND IS ON THE RUN QUEUE is pointed to by
43kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44assigned as all the available KSEs are activly running, or because there
45are no threads queued, that pointer is NULL.
46
47When a KSE is removed from the run queue to become runnable, we know
48it was associated with the highest priority thread in the queue (at the head
49of the queue). If it is also the last assigned we know M was 1 and must
50now be 0. Since the thread is no longer queued that pointer must be
51removed from it. Since we know there were no more KSEs available,
52(M was 1 and is now 0) and since we are not FREEING our KSE
53but using it, we know there are STILL no more KSEs available, we can prove
54that the next thread in the ksegrp list will not have a KSE to assign to
55it, so we can show that the pointer must be made 'invalid' (NULL).
56
57The pointer exists so that when a new thread is made runnable, it can
58have its priority compared with the last assigned thread to see if
59it should 'steal' its KSE or not.. i.e. is it 'earlier'
60on the list than that thread or later.. If it's earlier, then the KSE is
61removed from the last assigned (which is now not assigned a KSE)
62and reassigned to the new thread, which is placed earlier in the list.
63The pointer is then backed up to the previous thread (which may or may not
64be the new thread).
65
66When a thread sleeps or is removed, the KSE becomes available and if there
67are queued threads that are not assigned KSEs, the highest priority one of
68them is assigned the KSE, which is then placed back on the run queue at
69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70to point to it.
71
72The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75              \    \____
76               \        \
77    KSEGROUP---thread--thread--thread    (queued in priority order)
78        \                 /
79         \_______________/
80          (last_assigned)
81
82The result of this scheme is that the M available KSEs are always
83queued at the priorities they have inherrited from the M highest priority
84threads for that KSEGROUP. If this situation changes, the KSEs are
85reassigned to keep this true.
86***/
87
88#include <sys/cdefs.h>
89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 135051 2004-09-10 21:04:38Z julian $");
90
91#include "opt_sched.h"
92
93#ifndef KERN_SWITCH_INCLUDE
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kdb.h>
97#include <sys/kernel.h>
98#include <sys/ktr.h>
99#include <sys/lock.h>
100#include <sys/mutex.h>
101#include <sys/proc.h>
102#include <sys/queue.h>
103#include <sys/sched.h>
104#else  /* KERN_SWITCH_INCLUDE */
105#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
106#include <sys/smp.h>
107#endif
108#include <machine/critical.h>
109#if defined(SMP) && defined(SCHED_4BSD)
110#include <sys/sysctl.h>
111#endif
112
113#ifdef FULL_PREEMPTION
114#ifndef PREEMPTION
115#error "The FULL_PREEMPTION option requires the PREEMPTION option"
116#endif
117#endif
118
119CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
120
121#define td_kse td_sched
122
123/************************************************************************
124 * Functions that manipulate runnability from a thread perspective.	*
125 ************************************************************************/
126/*
127 * Select the KSE that will be run next.  From that find the thread, and
128 * remove it from the KSEGRP's run queue.  If there is thread clustering,
129 * this will be what does it.
130 */
131struct thread *
132choosethread(void)
133{
134	struct kse *ke;
135	struct thread *td;
136	struct ksegrp *kg;
137
138#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
139	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
140		/* Shutting down, run idlethread on AP's */
141		td = PCPU_GET(idlethread);
142		ke = td->td_kse;
143		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
144		ke->ke_flags |= KEF_DIDRUN;
145		TD_SET_RUNNING(td);
146		return (td);
147	}
148#endif
149
150retry:
151	ke = sched_choose();
152	if (ke) {
153		td = ke->ke_thread;
154		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
155		kg = ke->ke_ksegrp;
156		if (td->td_proc->p_flag & P_HADTHREADS) {
157			if (kg->kg_last_assigned == td) {
158				kg->kg_last_assigned = TAILQ_PREV(td,
159				    threadqueue, td_runq);
160			}
161			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
162			kg->kg_runnable--;
163		}
164		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
165		    td, td->td_priority);
166	} else {
167		/* Simulate runq_choose() having returned the idle thread */
168		td = PCPU_GET(idlethread);
169		ke = td->td_kse;
170		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
171	}
172	ke->ke_flags |= KEF_DIDRUN;
173
174	/*
175	 * If we are in panic, only allow system threads,
176	 * plus the one we are running in, to be run.
177	 */
178	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
179	    (td->td_flags & TDF_INPANIC) == 0)) {
180		/* note that it is no longer on the run queue */
181		TD_SET_CAN_RUN(td);
182		goto retry;
183	}
184
185	TD_SET_RUNNING(td);
186	return (td);
187}
188
189/*
190 * Given a surplus system slot, try assign a new runnable thread to it.
191 * Called from:
192 *  sched_thread_exit()  (local)
193 *  sched_switch()  (local)
194 *  sched_thread_exit()  (local)
195 *  remrunqueue()  (local)
196 */
197static void
198slot_fill(struct ksegrp *kg)
199{
200	struct thread *td;
201
202	mtx_assert(&sched_lock, MA_OWNED);
203	while (kg->kg_avail_opennings > 0) {
204		/*
205		 * Find the first unassigned thread
206		 */
207		if ((td = kg->kg_last_assigned) != NULL)
208			td = TAILQ_NEXT(td, td_runq);
209		else
210			td = TAILQ_FIRST(&kg->kg_runq);
211
212		/*
213		 * If we found one, send it to the system scheduler.
214		 */
215		if (td) {
216			kg->kg_last_assigned = td;
217			kg->kg_avail_opennings--;
218			sched_add(td, SRQ_BORING);
219			CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
220		} else {
221			/* no threads to use up the slots. quit now */
222			break;
223		}
224	}
225}
226
227#ifdef SCHED_4BSD
228/*
229 * Remove a thread from its KSEGRP's run queue.
230 * This in turn may remove it from a KSE if it was already assigned
231 * to one, possibly causing a new thread to be assigned to the KSE
232 * and the KSE getting a new priority.
233 */
234static void
235remrunqueue(struct thread *td)
236{
237	struct thread *td2, *td3;
238	struct ksegrp *kg;
239	struct kse *ke;
240
241	mtx_assert(&sched_lock, MA_OWNED);
242	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
243	kg = td->td_ksegrp;
244	ke = td->td_kse;
245	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
246	TD_SET_CAN_RUN(td);
247	/*
248	 * If it is not a threaded process, take the shortcut.
249	 */
250	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
251		/* remve from sys run queue and free up a slot */
252		sched_rem(td);
253		kg->kg_avail_opennings++;
254		ke->ke_state = KES_THREAD;
255		return;
256	}
257   	td3 = TAILQ_PREV(td, threadqueue, td_runq);
258	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
259	kg->kg_runnable--;
260	if (ke->ke_state == KES_ONRUNQ) {
261		/*
262		 * This thread has been assigned to the system run queue.
263		 * We need to dissociate it and try assign the
264		 * KSE to the next available thread. Then, we should
265		 * see if we need to move the KSE in the run queues.
266		 */
267		sched_rem(td);
268		kg->kg_avail_opennings++;
269		ke->ke_state = KES_THREAD;
270		td2 = kg->kg_last_assigned;
271		KASSERT((td2 != NULL), ("last assigned has wrong value"));
272		if (td2 == td)
273			kg->kg_last_assigned = td3;
274		/* slot_fill(kg); */ /* will replace it with another */
275	}
276}
277#endif
278
279/*
280 * Change the priority of a thread that is on the run queue.
281 */
282void
283adjustrunqueue( struct thread *td, int newpri)
284{
285	struct ksegrp *kg;
286	struct kse *ke;
287
288	mtx_assert(&sched_lock, MA_OWNED);
289	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
290
291	ke = td->td_kse;
292	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
293	/*
294	 * If it is not a threaded process, take the shortcut.
295	 */
296	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
297		/* We only care about the kse in the run queue. */
298		td->td_priority = newpri;
299		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
300			sched_rem(td);
301			sched_add(td, SRQ_BORING);
302		}
303		return;
304	}
305
306	/* It is a threaded process */
307	kg = td->td_ksegrp;
308	TD_SET_CAN_RUN(td);
309	if (ke->ke_state == KES_ONRUNQ) {
310		if (kg->kg_last_assigned == td) {
311			kg->kg_last_assigned =
312			    TAILQ_PREV(td, threadqueue, td_runq);
313		}
314		sched_rem(td);
315		kg->kg_avail_opennings++;
316	}
317	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
318	kg->kg_runnable--;
319	td->td_priority = newpri;
320	setrunqueue(td, SRQ_BORING);
321}
322int limitcount;
323void
324setrunqueue(struct thread *td, int flags)
325{
326	struct ksegrp *kg;
327	struct thread *td2;
328	struct thread *tda;
329	int count;
330
331	CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
332	    td, td->td_ksegrp, td->td_proc->p_pid);
333	mtx_assert(&sched_lock, MA_OWNED);
334	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
335	    ("setrunqueue: bad thread state"));
336	TD_SET_RUNQ(td);
337	kg = td->td_ksegrp;
338	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
339		/*
340		 * Common path optimisation: Only one of everything
341		 * and the KSE is always already attached.
342		 * Totally ignore the ksegrp run queue.
343		 */
344		if (kg->kg_avail_opennings != 1) {
345			if (limitcount < 1) {
346				limitcount++;
347				printf("pid %d: corrected slot count (%d->1)\n",
348				    td->td_proc->p_pid, kg->kg_avail_opennings);
349
350			}
351			kg->kg_avail_opennings = 1;
352		}
353		kg->kg_avail_opennings--;
354		sched_add(td, flags);
355		return;
356	}
357
358	tda = kg->kg_last_assigned;
359	if ((kg->kg_avail_opennings <= 0) &&
360	(tda && (tda->td_priority > td->td_priority))) {
361		/*
362		 * None free, but there is one we can commandeer.
363		 */
364		CTR2(KTR_RUNQ,
365		    "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
366		sched_rem(tda);
367		tda = kg->kg_last_assigned =
368		    TAILQ_PREV(tda, threadqueue, td_runq);
369		kg->kg_avail_opennings++;
370	}
371
372	/*
373	 * Add the thread to the ksegrp's run queue at
374	 * the appropriate place.
375	 */
376	count = 0;
377	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
378		if (td2->td_priority > td->td_priority) {
379			kg->kg_runnable++;
380			TAILQ_INSERT_BEFORE(td2, td, td_runq);
381			break;
382		}
383		/* XXX Debugging hack */
384		if (++count > 10000) {
385			printf("setrunqueue(): corrupt kq_runq, td= %p\n", td);
386			panic("deadlock in setrunqueue");
387		}
388	}
389	if (td2 == NULL) {
390		/* We ran off the end of the TAILQ or it was empty. */
391		kg->kg_runnable++;
392		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
393	}
394
395	/*
396	 * If we have a slot to use, then put the thread on the system
397	 * run queue and if needed, readjust the last_assigned pointer.
398	 */
399	if (kg->kg_avail_opennings > 0) {
400		if (tda == NULL) {
401			/*
402			 * No pre-existing last assigned so whoever is first
403			 * gets the KSE we brought in.. (maybe us)
404			 */
405			td2 = TAILQ_FIRST(&kg->kg_runq);
406			kg->kg_last_assigned = td2;
407		} else if (tda->td_priority > td->td_priority) {
408			td2 = td;
409		} else {
410			/*
411			 * We are past last_assigned, so
412			 * gave the next slot to whatever is next,
413			 * which may or may not be us.
414			 */
415			td2 = TAILQ_NEXT(tda, td_runq);
416			kg->kg_last_assigned = td2;
417		}
418		kg->kg_avail_opennings--;
419		sched_add(td2, flags);
420	} else {
421		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
422			td, td->td_ksegrp, td->td_proc->p_pid);
423	}
424}
425
426/*
427 * Kernel thread preemption implementation.  Critical sections mark
428 * regions of code in which preemptions are not allowed.
429 */
430void
431critical_enter(void)
432{
433	struct thread *td;
434
435	td = curthread;
436	if (td->td_critnest == 0)
437		cpu_critical_enter(td);
438	td->td_critnest++;
439}
440
441void
442critical_exit(void)
443{
444	struct thread *td;
445
446	td = curthread;
447	KASSERT(td->td_critnest != 0,
448	    ("critical_exit: td_critnest == 0"));
449	if (td->td_critnest == 1) {
450#ifdef PREEMPTION
451		mtx_assert(&sched_lock, MA_NOTOWNED);
452		if (td->td_pflags & TDP_OWEPREEMPT) {
453			mtx_lock_spin(&sched_lock);
454			mi_switch(SW_INVOL, NULL);
455			mtx_unlock_spin(&sched_lock);
456		}
457#endif
458		td->td_critnest = 0;
459		cpu_critical_exit(td);
460	} else {
461		td->td_critnest--;
462	}
463}
464
465/*
466 * This function is called when a thread is about to be put on run queue
467 * because it has been made runnable or its priority has been adjusted.  It
468 * determines if the new thread should be immediately preempted to.  If so,
469 * it switches to it and eventually returns true.  If not, it returns false
470 * so that the caller may place the thread on an appropriate run queue.
471 */
472int
473maybe_preempt(struct thread *td)
474{
475#ifdef PREEMPTION
476	struct thread *ctd;
477	int cpri, pri;
478#endif
479
480	mtx_assert(&sched_lock, MA_OWNED);
481#ifdef PREEMPTION
482	/*
483	 * The new thread should not preempt the current thread if any of the
484	 * following conditions are true:
485	 *
486	 *  - The current thread has a higher (numerically lower) or
487	 *    equivalent priority.  Note that this prevents curthread from
488	 *    trying to preempt to itself.
489	 *  - It is too early in the boot for context switches (cold is set).
490	 *  - The current thread has an inhibitor set or is in the process of
491	 *    exiting.  In this case, the current thread is about to switch
492	 *    out anyways, so there's no point in preempting.  If we did,
493	 *    the current thread would not be properly resumed as well, so
494	 *    just avoid that whole landmine.
495	 *  - If the new thread's priority is not a realtime priority and
496	 *    the current thread's priority is not an idle priority and
497	 *    FULL_PREEMPTION is disabled.
498	 *
499	 * If all of these conditions are false, but the current thread is in
500	 * a nested critical section, then we have to defer the preemption
501	 * until we exit the critical section.  Otherwise, switch immediately
502	 * to the new thread.
503	 */
504	ctd = curthread;
505	KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
506	  ("thread has no (or wrong) sched-private part."));
507	pri = td->td_priority;
508	cpri = ctd->td_priority;
509	if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
510	    td->td_kse->ke_state != KES_THREAD)
511		return (0);
512#ifndef FULL_PREEMPTION
513	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
514	    !(cpri >= PRI_MIN_IDLE))
515		return (0);
516#endif
517	if (ctd->td_critnest > 1) {
518		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
519		    ctd->td_critnest);
520		ctd->td_pflags |= TDP_OWEPREEMPT;
521		return (0);
522	}
523
524	/*
525	 * Our thread state says that we are already on a run queue, so
526	 * update our state as if we had been dequeued by choosethread().
527	 */
528	MPASS(TD_ON_RUNQ(td));
529	TD_SET_RUNNING(td);
530	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
531	    td->td_proc->p_pid, td->td_proc->p_comm);
532	mi_switch(SW_INVOL, td);
533	return (1);
534#else
535	return (0);
536#endif
537}
538
539#if 0
540#ifndef PREEMPTION
541/* XXX: There should be a non-static version of this. */
542static void
543printf_caddr_t(void *data)
544{
545	printf("%s", (char *)data);
546}
547static char preempt_warning[] =
548    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
549SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
550    preempt_warning)
551#endif
552#endif
553
554/************************************************************************
555 * SYSTEM RUN QUEUE manipulations and tests				*
556 ************************************************************************/
557/*
558 * Initialize a run structure.
559 */
560void
561runq_init(struct runq *rq)
562{
563	int i;
564
565	bzero(rq, sizeof *rq);
566	for (i = 0; i < RQ_NQS; i++)
567		TAILQ_INIT(&rq->rq_queues[i]);
568}
569
570/*
571 * Clear the status bit of the queue corresponding to priority level pri,
572 * indicating that it is empty.
573 */
574static __inline void
575runq_clrbit(struct runq *rq, int pri)
576{
577	struct rqbits *rqb;
578
579	rqb = &rq->rq_status;
580	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
581	    rqb->rqb_bits[RQB_WORD(pri)],
582	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
583	    RQB_BIT(pri), RQB_WORD(pri));
584	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
585}
586
587/*
588 * Find the index of the first non-empty run queue.  This is done by
589 * scanning the status bits, a set bit indicates a non-empty queue.
590 */
591static __inline int
592runq_findbit(struct runq *rq)
593{
594	struct rqbits *rqb;
595	int pri;
596	int i;
597
598	rqb = &rq->rq_status;
599	for (i = 0; i < RQB_LEN; i++)
600		if (rqb->rqb_bits[i]) {
601			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
602			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
603			    rqb->rqb_bits[i], i, pri);
604			return (pri);
605		}
606
607	return (-1);
608}
609
610/*
611 * Set the status bit of the queue corresponding to priority level pri,
612 * indicating that it is non-empty.
613 */
614static __inline void
615runq_setbit(struct runq *rq, int pri)
616{
617	struct rqbits *rqb;
618
619	rqb = &rq->rq_status;
620	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
621	    rqb->rqb_bits[RQB_WORD(pri)],
622	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
623	    RQB_BIT(pri), RQB_WORD(pri));
624	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
625}
626
627/*
628 * Add the KSE to the queue specified by its priority, and set the
629 * corresponding status bit.
630 */
631void
632runq_add(struct runq *rq, struct kse *ke)
633{
634	struct rqhead *rqh;
635	int pri;
636
637	pri = ke->ke_thread->td_priority / RQ_PPQ;
638	ke->ke_rqindex = pri;
639	runq_setbit(rq, pri);
640	rqh = &rq->rq_queues[pri];
641	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
642	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
643	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
644}
645
646/*
647 * Return true if there are runnable processes of any priority on the run
648 * queue, false otherwise.  Has no side effects, does not modify the run
649 * queue structure.
650 */
651int
652runq_check(struct runq *rq)
653{
654	struct rqbits *rqb;
655	int i;
656
657	rqb = &rq->rq_status;
658	for (i = 0; i < RQB_LEN; i++)
659		if (rqb->rqb_bits[i]) {
660			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
661			    rqb->rqb_bits[i], i);
662			return (1);
663		}
664	CTR0(KTR_RUNQ, "runq_check: empty");
665
666	return (0);
667}
668
669#if defined(SMP) && defined(SCHED_4BSD)
670int runq_fuzz = 1;
671SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
672#endif
673
674/*
675 * Find the highest priority process on the run queue.
676 */
677struct kse *
678runq_choose(struct runq *rq)
679{
680	struct rqhead *rqh;
681	struct kse *ke;
682	int pri;
683
684	mtx_assert(&sched_lock, MA_OWNED);
685	while ((pri = runq_findbit(rq)) != -1) {
686		rqh = &rq->rq_queues[pri];
687#if defined(SMP) && defined(SCHED_4BSD)
688		/* fuzz == 1 is normal.. 0 or less are ignored */
689		if (runq_fuzz > 1) {
690			/*
691			 * In the first couple of entries, check if
692			 * there is one for our CPU as a preference.
693			 */
694			int count = runq_fuzz;
695			int cpu = PCPU_GET(cpuid);
696			struct kse *ke2;
697			ke2 = ke = TAILQ_FIRST(rqh);
698
699			while (count-- && ke2) {
700				if (ke->ke_thread->td_lastcpu == cpu) {
701					ke = ke2;
702					break;
703				}
704				ke2 = TAILQ_NEXT(ke2, ke_procq);
705			}
706		} else
707#endif
708			ke = TAILQ_FIRST(rqh);
709		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
710		CTR3(KTR_RUNQ,
711		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
712		return (ke);
713	}
714	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
715
716	return (NULL);
717}
718
719/*
720 * Remove the KSE from the queue specified by its priority, and clear the
721 * corresponding status bit if the queue becomes empty.
722 * Caller must set ke->ke_state afterwards.
723 */
724void
725runq_remove(struct runq *rq, struct kse *ke)
726{
727	struct rqhead *rqh;
728	int pri;
729
730	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
731		("runq_remove: process swapped out"));
732	pri = ke->ke_rqindex;
733	rqh = &rq->rq_queues[pri];
734	CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
735	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
736	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
737	TAILQ_REMOVE(rqh, ke, ke_procq);
738	if (TAILQ_EMPTY(rqh)) {
739		CTR0(KTR_RUNQ, "runq_remove: empty");
740		runq_clrbit(rq, pri);
741	}
742}
743
744/****** functions that are temporarily here ***********/
745#include <vm/uma.h>
746#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
747extern struct mtx kse_zombie_lock;
748
749/*
750 *  Allocate scheduler specific per-process resources.
751 * The thread and ksegrp have already been linked in.
752 * In this case just set the default concurrency value.
753 *
754 * Called from:
755 *  proc_init() (UMA init method)
756 */
757void
758sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
759{
760
761	/* This can go in sched_fork */
762	sched_init_concurrency(kg);
763}
764
765/*
766 * Called by the uma process fini routine..
767 * undo anything we may have done in the uma_init method.
768 * Panic if it's not all 1:1:1:1
769 * Called from:
770 *  proc_fini() (UMA method)
771 */
772void
773sched_destroyproc(struct proc *p)
774{
775
776	/* this function slated for destruction */
777	KASSERT((p->p_numthreads == 1), ("Cached proc with > 1 thread "));
778	KASSERT((p->p_numksegrps == 1), ("Cached proc with > 1 ksegrp "));
779}
780
781#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
782/*
783 * thread is being either created or recycled.
784 * Fix up the per-scheduler resources associated with it.
785 * Called from:
786 *  sched_fork_thread()
787 *  thread_dtor()  (*may go away)
788 *  thread_init()  (*may go away)
789 */
790void
791sched_newthread(struct thread *td)
792{
793	struct td_sched *ke;
794
795	ke = (struct td_sched *) (td + 1);
796	bzero(ke, sizeof(*ke));
797	td->td_sched     = ke;
798	ke->ke_thread	= td;
799	ke->ke_oncpu	= NOCPU;
800	ke->ke_state	= KES_THREAD;
801}
802
803/*
804 * Set up an initial concurrency of 1
805 * and set the given thread (if given) to be using that
806 * concurrency slot.
807 * May be used "offline"..before the ksegrp is attached to the world
808 * and thus wouldn't need schedlock in that case.
809 * Called from:
810 *  thr_create()
811 *  proc_init() (UMA) via sched_newproc()
812 */
813void
814sched_init_concurrency(struct ksegrp *kg)
815{
816
817	kg->kg_concurrency = 1;
818	kg->kg_avail_opennings = 1;
819}
820
821/*
822 * Change the concurrency of an existing ksegrp to N
823 * Called from:
824 *  kse_create()
825 *  kse_exit()
826 *  thread_exit()
827 *  thread_single()
828 */
829void
830sched_set_concurrency(struct ksegrp *kg, int concurrency)
831{
832
833	/* Handle the case for a declining concurrency */
834	kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
835	kg->kg_concurrency = concurrency;
836}
837
838/*
839 * Called from thread_exit() for all exiting thread
840 *
841 * Not to be confused with sched_exit_thread()
842 * that is only called from thread_exit() for threads exiting
843 * without the rest of the process exiting because it is also called from
844 * sched_exit() and we wouldn't want to call it twice.
845 * XXX This can probably be fixed.
846 */
847void
848sched_thread_exit(struct thread *td)
849{
850
851	td->td_ksegrp->kg_avail_opennings++;
852	slot_fill(td->td_ksegrp);
853}
854
855#endif /* KERN_SWITCH_INCLUDE */
856