kern_switch.c revision 136170
1/*
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/***
28Here is the logic..
29
30If there are N processors, then there are at most N KSEs (kernel
31schedulable entities) working to process threads that belong to a
32KSEGROUP (kg). If there are X of these KSEs actually running at the
33moment in question, then there are at most M (N-X) of these KSEs on
34the run queue, as running KSEs are not on the queue.
35
36Runnable threads are queued off the KSEGROUP in priority order.
37If there are M or more threads runnable, the top M threads
38(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39their priority from those threads and are put on the run queue.
40
41The last thread that had a priority high enough to have a KSE associated
42with it, AND IS ON THE RUN QUEUE is pointed to by
43kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44assigned as all the available KSEs are activly running, or because there
45are no threads queued, that pointer is NULL.
46
47When a KSE is removed from the run queue to become runnable, we know
48it was associated with the highest priority thread in the queue (at the head
49of the queue). If it is also the last assigned we know M was 1 and must
50now be 0. Since the thread is no longer queued that pointer must be
51removed from it. Since we know there were no more KSEs available,
52(M was 1 and is now 0) and since we are not FREEING our KSE
53but using it, we know there are STILL no more KSEs available, we can prove
54that the next thread in the ksegrp list will not have a KSE to assign to
55it, so we can show that the pointer must be made 'invalid' (NULL).
56
57The pointer exists so that when a new thread is made runnable, it can
58have its priority compared with the last assigned thread to see if
59it should 'steal' its KSE or not.. i.e. is it 'earlier'
60on the list than that thread or later.. If it's earlier, then the KSE is
61removed from the last assigned (which is now not assigned a KSE)
62and reassigned to the new thread, which is placed earlier in the list.
63The pointer is then backed up to the previous thread (which may or may not
64be the new thread).
65
66When a thread sleeps or is removed, the KSE becomes available and if there
67are queued threads that are not assigned KSEs, the highest priority one of
68them is assigned the KSE, which is then placed back on the run queue at
69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70to point to it.
71
72The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75              \    \____
76               \        \
77    KSEGROUP---thread--thread--thread    (queued in priority order)
78        \                 /
79         \_______________/
80          (last_assigned)
81
82The result of this scheme is that the M available KSEs are always
83queued at the priorities they have inherrited from the M highest priority
84threads for that KSEGROUP. If this situation changes, the KSEs are
85reassigned to keep this true.
86***/
87
88#include <sys/cdefs.h>
89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 136170 2004-10-05 22:03:10Z julian $");
90
91#include "opt_sched.h"
92
93#ifndef KERN_SWITCH_INCLUDE
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kdb.h>
97#include <sys/kernel.h>
98#include <sys/ktr.h>
99#include <sys/lock.h>
100#include <sys/mutex.h>
101#include <sys/proc.h>
102#include <sys/queue.h>
103#include <sys/sched.h>
104#else  /* KERN_SWITCH_INCLUDE */
105#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
106#include <sys/smp.h>
107#endif
108#include <machine/critical.h>
109#if defined(SMP) && defined(SCHED_4BSD)
110#include <sys/sysctl.h>
111#endif
112
113#ifdef FULL_PREEMPTION
114#ifndef PREEMPTION
115#error "The FULL_PREEMPTION option requires the PREEMPTION option"
116#endif
117#endif
118
119CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
120
121#define td_kse td_sched
122
123/************************************************************************
124 * Functions that manipulate runnability from a thread perspective.	*
125 ************************************************************************/
126/*
127 * Select the KSE that will be run next.  From that find the thread, and
128 * remove it from the KSEGRP's run queue.  If there is thread clustering,
129 * this will be what does it.
130 */
131struct thread *
132choosethread(void)
133{
134	struct kse *ke;
135	struct thread *td;
136	struct ksegrp *kg;
137
138#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
139	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
140		/* Shutting down, run idlethread on AP's */
141		td = PCPU_GET(idlethread);
142		ke = td->td_kse;
143		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
144		ke->ke_flags |= KEF_DIDRUN;
145		TD_SET_RUNNING(td);
146		return (td);
147	}
148#endif
149
150retry:
151	ke = sched_choose();
152	if (ke) {
153		td = ke->ke_thread;
154		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
155		kg = ke->ke_ksegrp;
156		if (td->td_proc->p_flag & P_HADTHREADS) {
157			if (kg->kg_last_assigned == td) {
158				kg->kg_last_assigned = TAILQ_PREV(td,
159				    threadqueue, td_runq);
160			}
161			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
162			kg->kg_runnable--;
163		}
164		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
165		    td, td->td_priority);
166	} else {
167		/* Simulate runq_choose() having returned the idle thread */
168		td = PCPU_GET(idlethread);
169		ke = td->td_kse;
170		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
171	}
172	ke->ke_flags |= KEF_DIDRUN;
173
174	/*
175	 * If we are in panic, only allow system threads,
176	 * plus the one we are running in, to be run.
177	 */
178	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
179	    (td->td_flags & TDF_INPANIC) == 0)) {
180		/* note that it is no longer on the run queue */
181		TD_SET_CAN_RUN(td);
182		goto retry;
183	}
184
185	TD_SET_RUNNING(td);
186	return (td);
187}
188
189/*
190 * Given a surplus system slot, try assign a new runnable thread to it.
191 * Called from:
192 *  sched_thread_exit()  (local)
193 *  sched_switch()  (local)
194 *  sched_thread_exit()  (local)
195 *  remrunqueue()  (local)  (not at the moment)
196 */
197static void
198slot_fill(struct ksegrp *kg)
199{
200	struct thread *td;
201
202	mtx_assert(&sched_lock, MA_OWNED);
203	while (kg->kg_avail_opennings > 0) {
204		/*
205		 * Find the first unassigned thread
206		 */
207		if ((td = kg->kg_last_assigned) != NULL)
208			td = TAILQ_NEXT(td, td_runq);
209		else
210			td = TAILQ_FIRST(&kg->kg_runq);
211
212		/*
213		 * If we found one, send it to the system scheduler.
214		 */
215		if (td) {
216			kg->kg_last_assigned = td;
217			sched_add(td, SRQ_BORING);
218			CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
219		} else {
220			/* no threads to use up the slots. quit now */
221			break;
222		}
223	}
224}
225
226#ifdef	SCHED_4BSD
227/*
228 * Remove a thread from its KSEGRP's run queue.
229 * This in turn may remove it from a KSE if it was already assigned
230 * to one, possibly causing a new thread to be assigned to the KSE
231 * and the KSE getting a new priority.
232 */
233static void
234remrunqueue(struct thread *td)
235{
236	struct thread *td2, *td3;
237	struct ksegrp *kg;
238	struct kse *ke;
239
240	mtx_assert(&sched_lock, MA_OWNED);
241	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
242	kg = td->td_ksegrp;
243	ke = td->td_kse;
244	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
245	TD_SET_CAN_RUN(td);
246	/*
247	 * If it is not a threaded process, take the shortcut.
248	 */
249	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
250		/* remve from sys run queue and free up a slot */
251		sched_rem(td);
252		ke->ke_state = KES_THREAD;
253		return;
254	}
255   	td3 = TAILQ_PREV(td, threadqueue, td_runq);
256	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
257	kg->kg_runnable--;
258	if (ke->ke_state == KES_ONRUNQ) {
259		/*
260		 * This thread has been assigned to the system run queue.
261		 * We need to dissociate it and try assign the
262		 * KSE to the next available thread. Then, we should
263		 * see if we need to move the KSE in the run queues.
264		 */
265		sched_rem(td);
266		ke->ke_state = KES_THREAD;
267		td2 = kg->kg_last_assigned;
268		KASSERT((td2 != NULL), ("last assigned has wrong value"));
269		if (td2 == td)
270			kg->kg_last_assigned = td3;
271		/* slot_fill(kg); */ /* will replace it with another */
272	}
273}
274#endif
275
276/*
277 * Change the priority of a thread that is on the run queue.
278 */
279void
280adjustrunqueue( struct thread *td, int newpri)
281{
282	struct ksegrp *kg;
283	struct kse *ke;
284
285	mtx_assert(&sched_lock, MA_OWNED);
286	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
287
288	ke = td->td_kse;
289	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
290	/*
291	 * If it is not a threaded process, take the shortcut.
292	 */
293	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
294		/* We only care about the kse in the run queue. */
295		td->td_priority = newpri;
296		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
297			sched_rem(td);
298			sched_add(td, SRQ_BORING);
299		}
300		return;
301	}
302
303	/* It is a threaded process */
304	kg = td->td_ksegrp;
305	if (ke->ke_state == KES_ONRUNQ) {
306		if (kg->kg_last_assigned == td) {
307			kg->kg_last_assigned =
308			    TAILQ_PREV(td, threadqueue, td_runq);
309		}
310		sched_rem(td);
311	}
312	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
313	kg->kg_runnable--;
314	TD_SET_CAN_RUN(td);
315	td->td_priority = newpri;
316	setrunqueue(td, SRQ_BORING);
317}
318int limitcount;
319void
320setrunqueue(struct thread *td, int flags)
321{
322	struct ksegrp *kg;
323	struct thread *td2;
324	struct thread *tda;
325
326	CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
327	    td, td->td_ksegrp, td->td_proc->p_pid);
328	mtx_assert(&sched_lock, MA_OWNED);
329	KASSERT((td->td_inhibitors == 0),
330			("setrunqueue: trying to run inhibitted thread"));
331	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
332	    ("setrunqueue: bad thread state"));
333	TD_SET_RUNQ(td);
334	kg = td->td_ksegrp;
335	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
336		/*
337		 * Common path optimisation: Only one of everything
338		 * and the KSE is always already attached.
339		 * Totally ignore the ksegrp run queue.
340		 */
341		if (kg->kg_avail_opennings != 1) {
342			if (limitcount < 1) {
343				limitcount++;
344				printf("pid %d: corrected slot count (%d->1)\n",
345				    td->td_proc->p_pid, kg->kg_avail_opennings);
346
347			}
348			kg->kg_avail_opennings = 1;
349		}
350		sched_add(td, flags);
351		return;
352	}
353
354	/*
355	 * If the concurrency has reduced, and we would go in the
356	 * assigned section, then keep removing entries from the
357	 * system run queue, until we are not in that section
358	 * or there is room for us to be put in that section.
359	 * What we MUST avoid is the case where there are threads of less
360	 * priority than the new one scheduled, but it can not
361	 * be scheduled itself. That would lead to a non contiguous set
362	 * of scheduled threads, and everything would break.
363	 */
364	tda = kg->kg_last_assigned;
365	while ((kg->kg_avail_opennings <= 0) &&
366	    (tda && (tda->td_priority > td->td_priority))) {
367		/*
368		 * None free, but there is one we can commandeer.
369		 */
370		CTR2(KTR_RUNQ,
371		    "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
372		sched_rem(tda);
373		tda = kg->kg_last_assigned =
374		    TAILQ_PREV(tda, threadqueue, td_runq);
375		SLOT_RELEASE(kg);
376	}
377
378	/*
379	 * Add the thread to the ksegrp's run queue at
380	 * the appropriate place.
381	 */
382	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
383		if (td2->td_priority > td->td_priority) {
384			kg->kg_runnable++;
385			TAILQ_INSERT_BEFORE(td2, td, td_runq);
386			break;
387		}
388	}
389	if (td2 == NULL) {
390		/* We ran off the end of the TAILQ or it was empty. */
391		kg->kg_runnable++;
392		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
393	}
394
395	/*
396	 * If we have a slot to use, then put the thread on the system
397	 * run queue and if needed, readjust the last_assigned pointer.
398	 * it may be that we need to schedule something anyhow
399	 * even if the availabel slots are -ve so that
400	 * all the items < last_assigned are scheduled.
401	 */
402	if (kg->kg_avail_opennings > 0) {
403		if (tda == NULL) {
404			/*
405			 * No pre-existing last assigned so whoever is first
406			 * gets the slot.. (maybe us)
407			 */
408			td2 = TAILQ_FIRST(&kg->kg_runq);
409			kg->kg_last_assigned = td2;
410		} else if (tda->td_priority > td->td_priority) {
411			td2 = td;
412		} else {
413			/*
414			 * We are past last_assigned, so
415			 * give the next slot to whatever is next,
416			 * which may or may not be us.
417			 */
418			td2 = TAILQ_NEXT(tda, td_runq);
419			kg->kg_last_assigned = td2;
420		}
421		sched_add(td2, flags);
422	} else {
423		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
424			td, td->td_ksegrp, td->td_proc->p_pid);
425	}
426}
427
428/*
429 * Kernel thread preemption implementation.  Critical sections mark
430 * regions of code in which preemptions are not allowed.
431 */
432void
433critical_enter(void)
434{
435	struct thread *td;
436
437	td = curthread;
438	if (td->td_critnest == 0)
439		cpu_critical_enter(td);
440	td->td_critnest++;
441}
442
443void
444critical_exit(void)
445{
446	struct thread *td;
447
448	td = curthread;
449	KASSERT(td->td_critnest != 0,
450	    ("critical_exit: td_critnest == 0"));
451	if (td->td_critnest == 1) {
452#ifdef PREEMPTION
453		mtx_assert(&sched_lock, MA_NOTOWNED);
454		if (td->td_pflags & TDP_OWEPREEMPT) {
455			mtx_lock_spin(&sched_lock);
456			mi_switch(SW_INVOL, NULL);
457			mtx_unlock_spin(&sched_lock);
458		}
459#endif
460		td->td_critnest = 0;
461		cpu_critical_exit(td);
462	} else {
463		td->td_critnest--;
464	}
465}
466
467/*
468 * This function is called when a thread is about to be put on run queue
469 * because it has been made runnable or its priority has been adjusted.  It
470 * determines if the new thread should be immediately preempted to.  If so,
471 * it switches to it and eventually returns true.  If not, it returns false
472 * so that the caller may place the thread on an appropriate run queue.
473 */
474int
475maybe_preempt(struct thread *td)
476{
477#ifdef PREEMPTION
478	struct thread *ctd;
479	int cpri, pri;
480#endif
481
482	mtx_assert(&sched_lock, MA_OWNED);
483#ifdef PREEMPTION
484	/*
485	 * The new thread should not preempt the current thread if any of the
486	 * following conditions are true:
487	 *
488	 *  - The current thread has a higher (numerically lower) or
489	 *    equivalent priority.  Note that this prevents curthread from
490	 *    trying to preempt to itself.
491	 *  - It is too early in the boot for context switches (cold is set).
492	 *  - The current thread has an inhibitor set or is in the process of
493	 *    exiting.  In this case, the current thread is about to switch
494	 *    out anyways, so there's no point in preempting.  If we did,
495	 *    the current thread would not be properly resumed as well, so
496	 *    just avoid that whole landmine.
497	 *  - If the new thread's priority is not a realtime priority and
498	 *    the current thread's priority is not an idle priority and
499	 *    FULL_PREEMPTION is disabled.
500	 *
501	 * If all of these conditions are false, but the current thread is in
502	 * a nested critical section, then we have to defer the preemption
503	 * until we exit the critical section.  Otherwise, switch immediately
504	 * to the new thread.
505	 */
506	ctd = curthread;
507	KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
508	  ("thread has no (or wrong) sched-private part."));
509	KASSERT((td->td_inhibitors == 0),
510			("maybe_preempt: trying to run inhibitted thread"));
511	pri = td->td_priority;
512	cpri = ctd->td_priority;
513	if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
514	    td->td_kse->ke_state != KES_THREAD)
515		return (0);
516#ifndef FULL_PREEMPTION
517	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
518	    !(cpri >= PRI_MIN_IDLE))
519		return (0);
520#endif
521	if (ctd->td_critnest > 1) {
522		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
523		    ctd->td_critnest);
524		ctd->td_pflags |= TDP_OWEPREEMPT;
525		return (0);
526	}
527
528	/*
529	 * Thread is runnable but not yet put on system run queue.
530	 */
531	MPASS(TD_ON_RUNQ(td));
532	MPASS(td->td_sched->ke_state != KES_ONRUNQ);
533	if (td->td_proc->p_flag & P_HADTHREADS) {
534		/*
535		 * If this is a threaded process we actually ARE on the
536		 * ksegrp run queue so take it off that first.
537		 * Also undo any damage done to the last_assigned pointer.
538		 * XXX Fix setrunqueue so this isn't needed
539		 */
540		struct ksegrp *kg;
541
542		kg = td->td_ksegrp;
543		if (kg->kg_last_assigned == td)
544			kg->kg_last_assigned =
545			    TAILQ_PREV(td, threadqueue, td_runq);
546		TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
547	}
548
549	TD_SET_RUNNING(td);
550	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
551	    td->td_proc->p_pid, td->td_proc->p_comm);
552	mi_switch(SW_INVOL|SW_PREEMPT, td);
553	return (1);
554#else
555	return (0);
556#endif
557}
558
559#if 0
560#ifndef PREEMPTION
561/* XXX: There should be a non-static version of this. */
562static void
563printf_caddr_t(void *data)
564{
565	printf("%s", (char *)data);
566}
567static char preempt_warning[] =
568    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
569SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
570    preempt_warning)
571#endif
572#endif
573
574/************************************************************************
575 * SYSTEM RUN QUEUE manipulations and tests				*
576 ************************************************************************/
577/*
578 * Initialize a run structure.
579 */
580void
581runq_init(struct runq *rq)
582{
583	int i;
584
585	bzero(rq, sizeof *rq);
586	for (i = 0; i < RQ_NQS; i++)
587		TAILQ_INIT(&rq->rq_queues[i]);
588}
589
590/*
591 * Clear the status bit of the queue corresponding to priority level pri,
592 * indicating that it is empty.
593 */
594static __inline void
595runq_clrbit(struct runq *rq, int pri)
596{
597	struct rqbits *rqb;
598
599	rqb = &rq->rq_status;
600	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
601	    rqb->rqb_bits[RQB_WORD(pri)],
602	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
603	    RQB_BIT(pri), RQB_WORD(pri));
604	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
605}
606
607/*
608 * Find the index of the first non-empty run queue.  This is done by
609 * scanning the status bits, a set bit indicates a non-empty queue.
610 */
611static __inline int
612runq_findbit(struct runq *rq)
613{
614	struct rqbits *rqb;
615	int pri;
616	int i;
617
618	rqb = &rq->rq_status;
619	for (i = 0; i < RQB_LEN; i++)
620		if (rqb->rqb_bits[i]) {
621			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
622			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
623			    rqb->rqb_bits[i], i, pri);
624			return (pri);
625		}
626
627	return (-1);
628}
629
630/*
631 * Set the status bit of the queue corresponding to priority level pri,
632 * indicating that it is non-empty.
633 */
634static __inline void
635runq_setbit(struct runq *rq, int pri)
636{
637	struct rqbits *rqb;
638
639	rqb = &rq->rq_status;
640	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
641	    rqb->rqb_bits[RQB_WORD(pri)],
642	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
643	    RQB_BIT(pri), RQB_WORD(pri));
644	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
645}
646
647/*
648 * Add the KSE to the queue specified by its priority, and set the
649 * corresponding status bit.
650 */
651void
652runq_add(struct runq *rq, struct kse *ke, int flags)
653{
654	struct rqhead *rqh;
655	int pri;
656
657	pri = ke->ke_thread->td_priority / RQ_PPQ;
658	ke->ke_rqindex = pri;
659	runq_setbit(rq, pri);
660	rqh = &rq->rq_queues[pri];
661	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
662	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
663	if (flags & SRQ_PREEMPTED) {
664		TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
665	} else {
666		TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
667	}
668}
669
670/*
671 * Return true if there are runnable processes of any priority on the run
672 * queue, false otherwise.  Has no side effects, does not modify the run
673 * queue structure.
674 */
675int
676runq_check(struct runq *rq)
677{
678	struct rqbits *rqb;
679	int i;
680
681	rqb = &rq->rq_status;
682	for (i = 0; i < RQB_LEN; i++)
683		if (rqb->rqb_bits[i]) {
684			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
685			    rqb->rqb_bits[i], i);
686			return (1);
687		}
688	CTR0(KTR_RUNQ, "runq_check: empty");
689
690	return (0);
691}
692
693#if defined(SMP) && defined(SCHED_4BSD)
694int runq_fuzz = 1;
695SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
696#endif
697
698/*
699 * Find the highest priority process on the run queue.
700 */
701struct kse *
702runq_choose(struct runq *rq)
703{
704	struct rqhead *rqh;
705	struct kse *ke;
706	int pri;
707
708	mtx_assert(&sched_lock, MA_OWNED);
709	while ((pri = runq_findbit(rq)) != -1) {
710		rqh = &rq->rq_queues[pri];
711#if defined(SMP) && defined(SCHED_4BSD)
712		/* fuzz == 1 is normal.. 0 or less are ignored */
713		if (runq_fuzz > 1) {
714			/*
715			 * In the first couple of entries, check if
716			 * there is one for our CPU as a preference.
717			 */
718			int count = runq_fuzz;
719			int cpu = PCPU_GET(cpuid);
720			struct kse *ke2;
721			ke2 = ke = TAILQ_FIRST(rqh);
722
723			while (count-- && ke2) {
724				if (ke->ke_thread->td_lastcpu == cpu) {
725					ke = ke2;
726					break;
727				}
728				ke2 = TAILQ_NEXT(ke2, ke_procq);
729			}
730		} else
731#endif
732			ke = TAILQ_FIRST(rqh);
733		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
734		CTR3(KTR_RUNQ,
735		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
736		return (ke);
737	}
738	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
739
740	return (NULL);
741}
742
743/*
744 * Remove the KSE from the queue specified by its priority, and clear the
745 * corresponding status bit if the queue becomes empty.
746 * Caller must set ke->ke_state afterwards.
747 */
748void
749runq_remove(struct runq *rq, struct kse *ke)
750{
751	struct rqhead *rqh;
752	int pri;
753
754	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
755		("runq_remove: process swapped out"));
756	pri = ke->ke_rqindex;
757	rqh = &rq->rq_queues[pri];
758	CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
759	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
760	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
761	TAILQ_REMOVE(rqh, ke, ke_procq);
762	if (TAILQ_EMPTY(rqh)) {
763		CTR0(KTR_RUNQ, "runq_remove: empty");
764		runq_clrbit(rq, pri);
765	}
766}
767
768/****** functions that are temporarily here ***********/
769#include <vm/uma.h>
770#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
771extern struct mtx kse_zombie_lock;
772
773/*
774 *  Allocate scheduler specific per-process resources.
775 * The thread and ksegrp have already been linked in.
776 * In this case just set the default concurrency value.
777 *
778 * Called from:
779 *  proc_init() (UMA init method)
780 */
781void
782sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
783{
784
785	/* This can go in sched_fork */
786	sched_init_concurrency(kg);
787}
788
789#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
790/*
791 * thread is being either created or recycled.
792 * Fix up the per-scheduler resources associated with it.
793 * Called from:
794 *  sched_fork_thread()
795 *  thread_dtor()  (*may go away)
796 *  thread_init()  (*may go away)
797 */
798void
799sched_newthread(struct thread *td)
800{
801	struct td_sched *ke;
802
803	ke = (struct td_sched *) (td + 1);
804	bzero(ke, sizeof(*ke));
805	td->td_sched     = ke;
806	ke->ke_thread	= td;
807	ke->ke_oncpu	= NOCPU;
808	ke->ke_state	= KES_THREAD;
809}
810
811/*
812 * Set up an initial concurrency of 1
813 * and set the given thread (if given) to be using that
814 * concurrency slot.
815 * May be used "offline"..before the ksegrp is attached to the world
816 * and thus wouldn't need schedlock in that case.
817 * Called from:
818 *  thr_create()
819 *  proc_init() (UMA) via sched_newproc()
820 */
821void
822sched_init_concurrency(struct ksegrp *kg)
823{
824
825	CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
826	kg->kg_concurrency = 1;
827	kg->kg_avail_opennings = 1;
828}
829
830/*
831 * Change the concurrency of an existing ksegrp to N
832 * Called from:
833 *  kse_create()
834 *  kse_exit()
835 *  thread_exit()
836 *  thread_single()
837 */
838void
839sched_set_concurrency(struct ksegrp *kg, int concurrency)
840{
841
842	CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
843	    kg,
844	    concurrency,
845	    kg->kg_avail_opennings,
846	    kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
847	kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
848	kg->kg_concurrency = concurrency;
849}
850
851/*
852 * Called from thread_exit() for all exiting thread
853 *
854 * Not to be confused with sched_exit_thread()
855 * that is only called from thread_exit() for threads exiting
856 * without the rest of the process exiting because it is also called from
857 * sched_exit() and we wouldn't want to call it twice.
858 * XXX This can probably be fixed.
859 */
860void
861sched_thread_exit(struct thread *td)
862{
863
864	SLOT_RELEASE(td->td_ksegrp);
865	slot_fill(td->td_ksegrp);
866}
867
868#endif /* KERN_SWITCH_INCLUDE */
869