kern_switch.c revision 135295
1/*
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/***
28Here is the logic..
29
30If there are N processors, then there are at most N KSEs (kernel
31schedulable entities) working to process threads that belong to a
32KSEGROUP (kg). If there are X of these KSEs actually running at the
33moment in question, then there are at most M (N-X) of these KSEs on
34the run queue, as running KSEs are not on the queue.
35
36Runnable threads are queued off the KSEGROUP in priority order.
37If there are M or more threads runnable, the top M threads
38(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39their priority from those threads and are put on the run queue.
40
41The last thread that had a priority high enough to have a KSE associated
42with it, AND IS ON THE RUN QUEUE is pointed to by
43kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44assigned as all the available KSEs are activly running, or because there
45are no threads queued, that pointer is NULL.
46
47When a KSE is removed from the run queue to become runnable, we know
48it was associated with the highest priority thread in the queue (at the head
49of the queue). If it is also the last assigned we know M was 1 and must
50now be 0. Since the thread is no longer queued that pointer must be
51removed from it. Since we know there were no more KSEs available,
52(M was 1 and is now 0) and since we are not FREEING our KSE
53but using it, we know there are STILL no more KSEs available, we can prove
54that the next thread in the ksegrp list will not have a KSE to assign to
55it, so we can show that the pointer must be made 'invalid' (NULL).
56
57The pointer exists so that when a new thread is made runnable, it can
58have its priority compared with the last assigned thread to see if
59it should 'steal' its KSE or not.. i.e. is it 'earlier'
60on the list than that thread or later.. If it's earlier, then the KSE is
61removed from the last assigned (which is now not assigned a KSE)
62and reassigned to the new thread, which is placed earlier in the list.
63The pointer is then backed up to the previous thread (which may or may not
64be the new thread).
65
66When a thread sleeps or is removed, the KSE becomes available and if there
67are queued threads that are not assigned KSEs, the highest priority one of
68them is assigned the KSE, which is then placed back on the run queue at
69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70to point to it.
71
72The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75              \    \____
76               \        \
77    KSEGROUP---thread--thread--thread    (queued in priority order)
78        \                 /
79         \_______________/
80          (last_assigned)
81
82The result of this scheme is that the M available KSEs are always
83queued at the priorities they have inherrited from the M highest priority
84threads for that KSEGROUP. If this situation changes, the KSEs are
85reassigned to keep this true.
86***/
87
88#include <sys/cdefs.h>
89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 135295 2004-09-16 07:12:59Z julian $");
90
91#include "opt_sched.h"
92
93#ifndef KERN_SWITCH_INCLUDE
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kdb.h>
97#include <sys/kernel.h>
98#include <sys/ktr.h>
99#include <sys/lock.h>
100#include <sys/mutex.h>
101#include <sys/proc.h>
102#include <sys/queue.h>
103#include <sys/sched.h>
104#else  /* KERN_SWITCH_INCLUDE */
105#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
106#include <sys/smp.h>
107#endif
108#include <machine/critical.h>
109#if defined(SMP) && defined(SCHED_4BSD)
110#include <sys/sysctl.h>
111#endif
112
113#ifdef FULL_PREEMPTION
114#ifndef PREEMPTION
115#error "The FULL_PREEMPTION option requires the PREEMPTION option"
116#endif
117#endif
118
119CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
120
121#define td_kse td_sched
122
123/************************************************************************
124 * Functions that manipulate runnability from a thread perspective.	*
125 ************************************************************************/
126/*
127 * Select the KSE that will be run next.  From that find the thread, and
128 * remove it from the KSEGRP's run queue.  If there is thread clustering,
129 * this will be what does it.
130 */
131struct thread *
132choosethread(void)
133{
134	struct kse *ke;
135	struct thread *td;
136	struct ksegrp *kg;
137
138#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
139	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
140		/* Shutting down, run idlethread on AP's */
141		td = PCPU_GET(idlethread);
142		ke = td->td_kse;
143		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
144		ke->ke_flags |= KEF_DIDRUN;
145		TD_SET_RUNNING(td);
146		return (td);
147	}
148#endif
149
150retry:
151	ke = sched_choose();
152	if (ke) {
153		td = ke->ke_thread;
154		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
155		kg = ke->ke_ksegrp;
156		if (td->td_proc->p_flag & P_HADTHREADS) {
157			if (kg->kg_last_assigned == td) {
158				kg->kg_last_assigned = TAILQ_PREV(td,
159				    threadqueue, td_runq);
160			}
161			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
162			kg->kg_runnable--;
163		}
164		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
165		    td, td->td_priority);
166	} else {
167		/* Simulate runq_choose() having returned the idle thread */
168		td = PCPU_GET(idlethread);
169		ke = td->td_kse;
170		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
171	}
172	ke->ke_flags |= KEF_DIDRUN;
173
174	/*
175	 * If we are in panic, only allow system threads,
176	 * plus the one we are running in, to be run.
177	 */
178	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
179	    (td->td_flags & TDF_INPANIC) == 0)) {
180		/* note that it is no longer on the run queue */
181		TD_SET_CAN_RUN(td);
182		goto retry;
183	}
184
185	TD_SET_RUNNING(td);
186	return (td);
187}
188
189/*
190 * Given a surplus system slot, try assign a new runnable thread to it.
191 * Called from:
192 *  sched_thread_exit()  (local)
193 *  sched_switch()  (local)
194 *  sched_thread_exit()  (local)
195 *  remrunqueue()  (local)  (not at the moment)
196 */
197static void
198slot_fill(struct ksegrp *kg)
199{
200	struct thread *td;
201
202	mtx_assert(&sched_lock, MA_OWNED);
203	while (kg->kg_avail_opennings > 0) {
204		/*
205		 * Find the first unassigned thread
206		 */
207		if ((td = kg->kg_last_assigned) != NULL)
208			td = TAILQ_NEXT(td, td_runq);
209		else
210			td = TAILQ_FIRST(&kg->kg_runq);
211
212		/*
213		 * If we found one, send it to the system scheduler.
214		 */
215		if (td) {
216			kg->kg_last_assigned = td;
217			sched_add(td, SRQ_BORING);
218			CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
219		} else {
220			/* no threads to use up the slots. quit now */
221			break;
222		}
223	}
224}
225
226#ifdef	SCHED_4BSD
227/*
228 * Remove a thread from its KSEGRP's run queue.
229 * This in turn may remove it from a KSE if it was already assigned
230 * to one, possibly causing a new thread to be assigned to the KSE
231 * and the KSE getting a new priority.
232 */
233static void
234remrunqueue(struct thread *td)
235{
236	struct thread *td2, *td3;
237	struct ksegrp *kg;
238	struct kse *ke;
239
240	mtx_assert(&sched_lock, MA_OWNED);
241	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
242	kg = td->td_ksegrp;
243	ke = td->td_kse;
244	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
245	TD_SET_CAN_RUN(td);
246	/*
247	 * If it is not a threaded process, take the shortcut.
248	 */
249	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
250		/* remve from sys run queue and free up a slot */
251		sched_rem(td);
252		ke->ke_state = KES_THREAD;
253		return;
254	}
255   	td3 = TAILQ_PREV(td, threadqueue, td_runq);
256	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
257	kg->kg_runnable--;
258	if (ke->ke_state == KES_ONRUNQ) {
259		/*
260		 * This thread has been assigned to the system run queue.
261		 * We need to dissociate it and try assign the
262		 * KSE to the next available thread. Then, we should
263		 * see if we need to move the KSE in the run queues.
264		 */
265		sched_rem(td);
266		ke->ke_state = KES_THREAD;
267		td2 = kg->kg_last_assigned;
268		KASSERT((td2 != NULL), ("last assigned has wrong value"));
269		if (td2 == td)
270			kg->kg_last_assigned = td3;
271		/* slot_fill(kg); */ /* will replace it with another */
272	}
273}
274#endif
275
276/*
277 * Change the priority of a thread that is on the run queue.
278 */
279void
280adjustrunqueue( struct thread *td, int newpri)
281{
282	struct ksegrp *kg;
283	struct kse *ke;
284
285	mtx_assert(&sched_lock, MA_OWNED);
286	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
287
288	ke = td->td_kse;
289	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
290	/*
291	 * If it is not a threaded process, take the shortcut.
292	 */
293	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
294		/* We only care about the kse in the run queue. */
295		td->td_priority = newpri;
296		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
297			sched_rem(td);
298			sched_add(td, SRQ_BORING);
299		}
300		return;
301	}
302
303	/* It is a threaded process */
304	kg = td->td_ksegrp;
305	if (ke->ke_state == KES_ONRUNQ) {
306		if (kg->kg_last_assigned == td) {
307			kg->kg_last_assigned =
308			    TAILQ_PREV(td, threadqueue, td_runq);
309		}
310		sched_rem(td);
311	}
312	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
313	kg->kg_runnable--;
314	TD_SET_CAN_RUN(td);
315	td->td_priority = newpri;
316	setrunqueue(td, SRQ_BORING);
317}
318int limitcount;
319void
320setrunqueue(struct thread *td, int flags)
321{
322	struct ksegrp *kg;
323	struct thread *td2;
324	struct thread *tda;
325
326	CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
327	    td, td->td_ksegrp, td->td_proc->p_pid);
328	mtx_assert(&sched_lock, MA_OWNED);
329	KASSERT((td->td_inhibitors == 0),
330			("setrunqueue: trying to run inhibitted thread"));
331	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
332	    ("setrunqueue: bad thread state"));
333	TD_SET_RUNQ(td);
334	kg = td->td_ksegrp;
335	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
336		/*
337		 * Common path optimisation: Only one of everything
338		 * and the KSE is always already attached.
339		 * Totally ignore the ksegrp run queue.
340		 */
341		if (kg->kg_avail_opennings != 1) {
342			if (limitcount < 1) {
343				limitcount++;
344				printf("pid %d: corrected slot count (%d->1)\n",
345				    td->td_proc->p_pid, kg->kg_avail_opennings);
346
347			}
348			kg->kg_avail_opennings = 1;
349		}
350		sched_add(td, flags);
351		return;
352	}
353
354	/*
355	 * If the concurrency has reduced, and we would go in the
356	 * assigned section, then keep removing entries from the
357	 * system run queue, until we are not in that section
358	 * or there is room for us to be put in that section.
359	 * What we MUST avoid is the case where there are threads of less
360	 * priority than the new one scheduled, but it can not
361	 * be scheduled itself. That would lead to a non contiguous set
362	 * of scheduled threads, and everything would break.
363	 */
364	tda = kg->kg_last_assigned;
365	while ((kg->kg_avail_opennings <= 0) &&
366	    (tda && (tda->td_priority > td->td_priority))) {
367		/*
368		 * None free, but there is one we can commandeer.
369		 */
370		CTR2(KTR_RUNQ,
371		    "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
372		sched_rem(tda);
373		tda = kg->kg_last_assigned =
374		    TAILQ_PREV(tda, threadqueue, td_runq);
375		kg->kg_avail_opennings++;
376	}
377
378	/*
379	 * Add the thread to the ksegrp's run queue at
380	 * the appropriate place.
381	 */
382	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
383		if (td2->td_priority > td->td_priority) {
384			kg->kg_runnable++;
385			TAILQ_INSERT_BEFORE(td2, td, td_runq);
386			break;
387		}
388	}
389	if (td2 == NULL) {
390		/* We ran off the end of the TAILQ or it was empty. */
391		kg->kg_runnable++;
392		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
393	}
394
395	/*
396	 * If we have a slot to use, then put the thread on the system
397	 * run queue and if needed, readjust the last_assigned pointer.
398	 * it may be that we need to schedule something anyhow
399	 * even if the availabel slots are -ve so that
400	 * all the items < last_assigned are scheduled.
401	 */
402	if (kg->kg_avail_opennings > 0) {
403		if (tda == NULL) {
404			/*
405			 * No pre-existing last assigned so whoever is first
406			 * gets the slot.. (maybe us)
407			 */
408			td2 = TAILQ_FIRST(&kg->kg_runq);
409			kg->kg_last_assigned = td2;
410		} else if (tda->td_priority > td->td_priority) {
411			td2 = td;
412		} else {
413			/*
414			 * We are past last_assigned, so
415			 * give the next slot to whatever is next,
416			 * which may or may not be us.
417			 */
418			td2 = TAILQ_NEXT(tda, td_runq);
419			kg->kg_last_assigned = td2;
420		}
421		sched_add(td2, flags);
422	} else {
423		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
424			td, td->td_ksegrp, td->td_proc->p_pid);
425	}
426}
427
428/*
429 * Kernel thread preemption implementation.  Critical sections mark
430 * regions of code in which preemptions are not allowed.
431 */
432void
433critical_enter(void)
434{
435	struct thread *td;
436
437	td = curthread;
438	if (td->td_critnest == 0)
439		cpu_critical_enter(td);
440	td->td_critnest++;
441}
442
443void
444critical_exit(void)
445{
446	struct thread *td;
447
448	td = curthread;
449	KASSERT(td->td_critnest != 0,
450	    ("critical_exit: td_critnest == 0"));
451	if (td->td_critnest == 1) {
452#ifdef PREEMPTION
453		mtx_assert(&sched_lock, MA_NOTOWNED);
454		if (td->td_pflags & TDP_OWEPREEMPT) {
455			mtx_lock_spin(&sched_lock);
456			mi_switch(SW_INVOL, NULL);
457			mtx_unlock_spin(&sched_lock);
458		}
459#endif
460		td->td_critnest = 0;
461		cpu_critical_exit(td);
462	} else {
463		td->td_critnest--;
464	}
465}
466
467/*
468 * This function is called when a thread is about to be put on run queue
469 * because it has been made runnable or its priority has been adjusted.  It
470 * determines if the new thread should be immediately preempted to.  If so,
471 * it switches to it and eventually returns true.  If not, it returns false
472 * so that the caller may place the thread on an appropriate run queue.
473 */
474int
475maybe_preempt(struct thread *td)
476{
477#ifdef PREEMPTION
478	struct thread *ctd;
479	int cpri, pri;
480#endif
481
482	mtx_assert(&sched_lock, MA_OWNED);
483#ifdef PREEMPTION
484	/*
485	 * The new thread should not preempt the current thread if any of the
486	 * following conditions are true:
487	 *
488	 *  - The current thread has a higher (numerically lower) or
489	 *    equivalent priority.  Note that this prevents curthread from
490	 *    trying to preempt to itself.
491	 *  - It is too early in the boot for context switches (cold is set).
492	 *  - The current thread has an inhibitor set or is in the process of
493	 *    exiting.  In this case, the current thread is about to switch
494	 *    out anyways, so there's no point in preempting.  If we did,
495	 *    the current thread would not be properly resumed as well, so
496	 *    just avoid that whole landmine.
497	 *  - If the new thread's priority is not a realtime priority and
498	 *    the current thread's priority is not an idle priority and
499	 *    FULL_PREEMPTION is disabled.
500	 *
501	 * If all of these conditions are false, but the current thread is in
502	 * a nested critical section, then we have to defer the preemption
503	 * until we exit the critical section.  Otherwise, switch immediately
504	 * to the new thread.
505	 */
506	ctd = curthread;
507	KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
508	  ("thread has no (or wrong) sched-private part."));
509	KASSERT((td->td_inhibitors == 0),
510			("maybe_preempt: trying to run inhibitted thread"));
511	pri = td->td_priority;
512	cpri = ctd->td_priority;
513	if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
514	    td->td_kse->ke_state != KES_THREAD)
515		return (0);
516#ifndef FULL_PREEMPTION
517	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
518	    !(cpri >= PRI_MIN_IDLE))
519		return (0);
520#endif
521	if (ctd->td_critnest > 1) {
522		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
523		    ctd->td_critnest);
524		ctd->td_pflags |= TDP_OWEPREEMPT;
525		return (0);
526	}
527
528	/*
529	 * Our thread state says that we are already on a run queue, so
530	 * update our state as if we had been dequeued by choosethread().
531	 * However we must not actually be on the system run queue yet.
532	 */
533	MPASS(TD_ON_RUNQ(td));
534	MPASS(td->td_sched->ke_state != KES_ONRUNQ);
535	if (td->td_proc->p_flag & P_HADTHREADS) {
536		/*
537		 * If this is a threaded process we actually ARE on the
538		 * ksegrp run queue so take it off that first.
539		 * Also undo any damage done to the last_assigned pointer.
540		 * XXX Fix setrunqueue so this isn't needed
541		 */
542		struct ksegrp *kg;
543
544		kg = td->td_ksegrp;
545		if (kg->kg_last_assigned == td)
546			kg->kg_last_assigned =
547			    TAILQ_PREV(td, threadqueue, td_runq);
548		TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
549	}
550
551	TD_SET_RUNNING(td);
552	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
553	    td->td_proc->p_pid, td->td_proc->p_comm);
554	mi_switch(SW_INVOL, td);
555	return (1);
556#else
557	return (0);
558#endif
559}
560
561#if 0
562#ifndef PREEMPTION
563/* XXX: There should be a non-static version of this. */
564static void
565printf_caddr_t(void *data)
566{
567	printf("%s", (char *)data);
568}
569static char preempt_warning[] =
570    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
571SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
572    preempt_warning)
573#endif
574#endif
575
576/************************************************************************
577 * SYSTEM RUN QUEUE manipulations and tests				*
578 ************************************************************************/
579/*
580 * Initialize a run structure.
581 */
582void
583runq_init(struct runq *rq)
584{
585	int i;
586
587	bzero(rq, sizeof *rq);
588	for (i = 0; i < RQ_NQS; i++)
589		TAILQ_INIT(&rq->rq_queues[i]);
590}
591
592/*
593 * Clear the status bit of the queue corresponding to priority level pri,
594 * indicating that it is empty.
595 */
596static __inline void
597runq_clrbit(struct runq *rq, int pri)
598{
599	struct rqbits *rqb;
600
601	rqb = &rq->rq_status;
602	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
603	    rqb->rqb_bits[RQB_WORD(pri)],
604	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
605	    RQB_BIT(pri), RQB_WORD(pri));
606	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
607}
608
609/*
610 * Find the index of the first non-empty run queue.  This is done by
611 * scanning the status bits, a set bit indicates a non-empty queue.
612 */
613static __inline int
614runq_findbit(struct runq *rq)
615{
616	struct rqbits *rqb;
617	int pri;
618	int i;
619
620	rqb = &rq->rq_status;
621	for (i = 0; i < RQB_LEN; i++)
622		if (rqb->rqb_bits[i]) {
623			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
624			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
625			    rqb->rqb_bits[i], i, pri);
626			return (pri);
627		}
628
629	return (-1);
630}
631
632/*
633 * Set the status bit of the queue corresponding to priority level pri,
634 * indicating that it is non-empty.
635 */
636static __inline void
637runq_setbit(struct runq *rq, int pri)
638{
639	struct rqbits *rqb;
640
641	rqb = &rq->rq_status;
642	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
643	    rqb->rqb_bits[RQB_WORD(pri)],
644	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
645	    RQB_BIT(pri), RQB_WORD(pri));
646	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
647}
648
649/*
650 * Add the KSE to the queue specified by its priority, and set the
651 * corresponding status bit.
652 */
653void
654runq_add(struct runq *rq, struct kse *ke)
655{
656	struct rqhead *rqh;
657	int pri;
658
659	pri = ke->ke_thread->td_priority / RQ_PPQ;
660	ke->ke_rqindex = pri;
661	runq_setbit(rq, pri);
662	rqh = &rq->rq_queues[pri];
663	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
664	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
665	TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
666}
667
668/*
669 * Return true if there are runnable processes of any priority on the run
670 * queue, false otherwise.  Has no side effects, does not modify the run
671 * queue structure.
672 */
673int
674runq_check(struct runq *rq)
675{
676	struct rqbits *rqb;
677	int i;
678
679	rqb = &rq->rq_status;
680	for (i = 0; i < RQB_LEN; i++)
681		if (rqb->rqb_bits[i]) {
682			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
683			    rqb->rqb_bits[i], i);
684			return (1);
685		}
686	CTR0(KTR_RUNQ, "runq_check: empty");
687
688	return (0);
689}
690
691#if defined(SMP) && defined(SCHED_4BSD)
692int runq_fuzz = 1;
693SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
694#endif
695
696/*
697 * Find the highest priority process on the run queue.
698 */
699struct kse *
700runq_choose(struct runq *rq)
701{
702	struct rqhead *rqh;
703	struct kse *ke;
704	int pri;
705
706	mtx_assert(&sched_lock, MA_OWNED);
707	while ((pri = runq_findbit(rq)) != -1) {
708		rqh = &rq->rq_queues[pri];
709#if defined(SMP) && defined(SCHED_4BSD)
710		/* fuzz == 1 is normal.. 0 or less are ignored */
711		if (runq_fuzz > 1) {
712			/*
713			 * In the first couple of entries, check if
714			 * there is one for our CPU as a preference.
715			 */
716			int count = runq_fuzz;
717			int cpu = PCPU_GET(cpuid);
718			struct kse *ke2;
719			ke2 = ke = TAILQ_FIRST(rqh);
720
721			while (count-- && ke2) {
722				if (ke->ke_thread->td_lastcpu == cpu) {
723					ke = ke2;
724					break;
725				}
726				ke2 = TAILQ_NEXT(ke2, ke_procq);
727			}
728		} else
729#endif
730			ke = TAILQ_FIRST(rqh);
731		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
732		CTR3(KTR_RUNQ,
733		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
734		return (ke);
735	}
736	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
737
738	return (NULL);
739}
740
741/*
742 * Remove the KSE from the queue specified by its priority, and clear the
743 * corresponding status bit if the queue becomes empty.
744 * Caller must set ke->ke_state afterwards.
745 */
746void
747runq_remove(struct runq *rq, struct kse *ke)
748{
749	struct rqhead *rqh;
750	int pri;
751
752	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
753		("runq_remove: process swapped out"));
754	pri = ke->ke_rqindex;
755	rqh = &rq->rq_queues[pri];
756	CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
757	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
758	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
759	TAILQ_REMOVE(rqh, ke, ke_procq);
760	if (TAILQ_EMPTY(rqh)) {
761		CTR0(KTR_RUNQ, "runq_remove: empty");
762		runq_clrbit(rq, pri);
763	}
764}
765
766/****** functions that are temporarily here ***********/
767#include <vm/uma.h>
768#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
769extern struct mtx kse_zombie_lock;
770
771/*
772 *  Allocate scheduler specific per-process resources.
773 * The thread and ksegrp have already been linked in.
774 * In this case just set the default concurrency value.
775 *
776 * Called from:
777 *  proc_init() (UMA init method)
778 */
779void
780sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
781{
782
783	/* This can go in sched_fork */
784	sched_init_concurrency(kg);
785}
786
787/*
788 * Called by the uma process fini routine..
789 * undo anything we may have done in the uma_init method.
790 * Panic if it's not all 1:1:1:1
791 * Called from:
792 *  proc_fini() (UMA method)
793 */
794void
795sched_destroyproc(struct proc *p)
796{
797
798	/* this function slated for destruction */
799	KASSERT((p->p_numthreads == 1), ("Cached proc with > 1 thread "));
800	KASSERT((p->p_numksegrps == 1), ("Cached proc with > 1 ksegrp "));
801}
802
803#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
804/*
805 * thread is being either created or recycled.
806 * Fix up the per-scheduler resources associated with it.
807 * Called from:
808 *  sched_fork_thread()
809 *  thread_dtor()  (*may go away)
810 *  thread_init()  (*may go away)
811 */
812void
813sched_newthread(struct thread *td)
814{
815	struct td_sched *ke;
816
817	ke = (struct td_sched *) (td + 1);
818	bzero(ke, sizeof(*ke));
819	td->td_sched     = ke;
820	ke->ke_thread	= td;
821	ke->ke_oncpu	= NOCPU;
822	ke->ke_state	= KES_THREAD;
823}
824
825/*
826 * Set up an initial concurrency of 1
827 * and set the given thread (if given) to be using that
828 * concurrency slot.
829 * May be used "offline"..before the ksegrp is attached to the world
830 * and thus wouldn't need schedlock in that case.
831 * Called from:
832 *  thr_create()
833 *  proc_init() (UMA) via sched_newproc()
834 */
835void
836sched_init_concurrency(struct ksegrp *kg)
837{
838
839	kg->kg_concurrency = 1;
840	kg->kg_avail_opennings = 1;
841}
842
843/*
844 * Change the concurrency of an existing ksegrp to N
845 * Called from:
846 *  kse_create()
847 *  kse_exit()
848 *  thread_exit()
849 *  thread_single()
850 */
851void
852sched_set_concurrency(struct ksegrp *kg, int concurrency)
853{
854
855	/* Handle the case for a declining concurrency */
856	kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
857	kg->kg_concurrency = concurrency;
858}
859
860/*
861 * Called from thread_exit() for all exiting thread
862 *
863 * Not to be confused with sched_exit_thread()
864 * that is only called from thread_exit() for threads exiting
865 * without the rest of the process exiting because it is also called from
866 * sched_exit() and we wouldn't want to call it twice.
867 * XXX This can probably be fixed.
868 */
869void
870sched_thread_exit(struct thread *td)
871{
872
873	td->td_ksegrp->kg_avail_opennings++;
874	slot_fill(td->td_ksegrp);
875}
876
877#endif /* KERN_SWITCH_INCLUDE */
878