kern_switch.c revision 146554
1/*-
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/***
28Here is the logic..
29
30If there are N processors, then there are at most N KSEs (kernel
31schedulable entities) working to process threads that belong to a
32KSEGROUP (kg). If there are X of these KSEs actually running at the
33moment in question, then there are at most M (N-X) of these KSEs on
34the run queue, as running KSEs are not on the queue.
35
36Runnable threads are queued off the KSEGROUP in priority order.
37If there are M or more threads runnable, the top M threads
38(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39their priority from those threads and are put on the run queue.
40
41The last thread that had a priority high enough to have a KSE associated
42with it, AND IS ON THE RUN QUEUE is pointed to by
43kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44assigned as all the available KSEs are activly running, or because there
45are no threads queued, that pointer is NULL.
46
47When a KSE is removed from the run queue to become runnable, we know
48it was associated with the highest priority thread in the queue (at the head
49of the queue). If it is also the last assigned we know M was 1 and must
50now be 0. Since the thread is no longer queued that pointer must be
51removed from it. Since we know there were no more KSEs available,
52(M was 1 and is now 0) and since we are not FREEING our KSE
53but using it, we know there are STILL no more KSEs available, we can prove
54that the next thread in the ksegrp list will not have a KSE to assign to
55it, so we can show that the pointer must be made 'invalid' (NULL).
56
57The pointer exists so that when a new thread is made runnable, it can
58have its priority compared with the last assigned thread to see if
59it should 'steal' its KSE or not.. i.e. is it 'earlier'
60on the list than that thread or later.. If it's earlier, then the KSE is
61removed from the last assigned (which is now not assigned a KSE)
62and reassigned to the new thread, which is placed earlier in the list.
63The pointer is then backed up to the previous thread (which may or may not
64be the new thread).
65
66When a thread sleeps or is removed, the KSE becomes available and if there
67are queued threads that are not assigned KSEs, the highest priority one of
68them is assigned the KSE, which is then placed back on the run queue at
69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70to point to it.
71
72The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75              \    \____
76               \        \
77    KSEGROUP---thread--thread--thread    (queued in priority order)
78        \                 /
79         \_______________/
80          (last_assigned)
81
82The result of this scheme is that the M available KSEs are always
83queued at the priorities they have inherrited from the M highest priority
84threads for that KSEGROUP. If this situation changes, the KSEs are
85reassigned to keep this true.
86***/
87
88#include <sys/cdefs.h>
89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 146554 2005-05-23 23:01:53Z ups $");
90
91#include "opt_sched.h"
92
93#ifndef KERN_SWITCH_INCLUDE
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kdb.h>
97#include <sys/kernel.h>
98#include <sys/ktr.h>
99#include <sys/lock.h>
100#include <sys/mutex.h>
101#include <sys/proc.h>
102#include <sys/queue.h>
103#include <sys/sched.h>
104#else  /* KERN_SWITCH_INCLUDE */
105#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
106#include <sys/smp.h>
107#endif
108#if defined(SMP) && defined(SCHED_4BSD)
109#include <sys/sysctl.h>
110#endif
111
112#ifdef FULL_PREEMPTION
113#ifndef PREEMPTION
114#error "The FULL_PREEMPTION option requires the PREEMPTION option"
115#endif
116#endif
117
118CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
119
120#define td_kse td_sched
121
122/*
123 * kern.sched.preemption allows user space to determine if preemption support
124 * is compiled in or not.  It is not currently a boot or runtime flag that
125 * can be changed.
126 */
127#ifdef PREEMPTION
128static int kern_sched_preemption = 1;
129#else
130static int kern_sched_preemption = 0;
131#endif
132SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
133    &kern_sched_preemption, 0, "Kernel preemption enabled");
134
135/************************************************************************
136 * Functions that manipulate runnability from a thread perspective.	*
137 ************************************************************************/
138/*
139 * Select the KSE that will be run next.  From that find the thread, and
140 * remove it from the KSEGRP's run queue.  If there is thread clustering,
141 * this will be what does it.
142 */
143struct thread *
144choosethread(void)
145{
146	struct kse *ke;
147	struct thread *td;
148	struct ksegrp *kg;
149
150#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
151	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
152		/* Shutting down, run idlethread on AP's */
153		td = PCPU_GET(idlethread);
154		ke = td->td_kse;
155		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
156		ke->ke_flags |= KEF_DIDRUN;
157		TD_SET_RUNNING(td);
158		return (td);
159	}
160#endif
161
162retry:
163	ke = sched_choose();
164	if (ke) {
165		td = ke->ke_thread;
166		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
167		kg = ke->ke_ksegrp;
168		if (td->td_proc->p_flag & P_HADTHREADS) {
169			if (kg->kg_last_assigned == td) {
170				kg->kg_last_assigned = TAILQ_PREV(td,
171				    threadqueue, td_runq);
172			}
173			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
174		}
175		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
176		    td, td->td_priority);
177	} else {
178		/* Simulate runq_choose() having returned the idle thread */
179		td = PCPU_GET(idlethread);
180		ke = td->td_kse;
181		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
182	}
183	ke->ke_flags |= KEF_DIDRUN;
184
185	/*
186	 * If we are in panic, only allow system threads,
187	 * plus the one we are running in, to be run.
188	 */
189	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
190	    (td->td_flags & TDF_INPANIC) == 0)) {
191		/* note that it is no longer on the run queue */
192		TD_SET_CAN_RUN(td);
193		goto retry;
194	}
195
196	TD_SET_RUNNING(td);
197	return (td);
198}
199
200/*
201 * Given a surplus system slot, try assign a new runnable thread to it.
202 * Called from:
203 *  sched_thread_exit()  (local)
204 *  sched_switch()  (local)
205 *  sched_thread_exit()  (local)
206 *  remrunqueue()  (local)  (not at the moment)
207 */
208static void
209slot_fill(struct ksegrp *kg)
210{
211	struct thread *td;
212
213	mtx_assert(&sched_lock, MA_OWNED);
214	while (kg->kg_avail_opennings > 0) {
215		/*
216		 * Find the first unassigned thread
217		 */
218		if ((td = kg->kg_last_assigned) != NULL)
219			td = TAILQ_NEXT(td, td_runq);
220		else
221			td = TAILQ_FIRST(&kg->kg_runq);
222
223		/*
224		 * If we found one, send it to the system scheduler.
225		 */
226		if (td) {
227			kg->kg_last_assigned = td;
228			sched_add(td, SRQ_YIELDING);
229			CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
230		} else {
231			/* no threads to use up the slots. quit now */
232			break;
233		}
234	}
235}
236
237#ifdef	SCHED_4BSD
238/*
239 * Remove a thread from its KSEGRP's run queue.
240 * This in turn may remove it from a KSE if it was already assigned
241 * to one, possibly causing a new thread to be assigned to the KSE
242 * and the KSE getting a new priority.
243 */
244static void
245remrunqueue(struct thread *td)
246{
247	struct thread *td2, *td3;
248	struct ksegrp *kg;
249	struct kse *ke;
250
251	mtx_assert(&sched_lock, MA_OWNED);
252	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
253	kg = td->td_ksegrp;
254	ke = td->td_kse;
255	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
256	TD_SET_CAN_RUN(td);
257	/*
258	 * If it is not a threaded process, take the shortcut.
259	 */
260	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
261		/* remve from sys run queue and free up a slot */
262		sched_rem(td);
263		ke->ke_state = KES_THREAD;
264		return;
265	}
266   	td3 = TAILQ_PREV(td, threadqueue, td_runq);
267	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
268	if (ke->ke_state == KES_ONRUNQ) {
269		/*
270		 * This thread has been assigned to the system run queue.
271		 * We need to dissociate it and try assign the
272		 * KSE to the next available thread. Then, we should
273		 * see if we need to move the KSE in the run queues.
274		 */
275		sched_rem(td);
276		ke->ke_state = KES_THREAD;
277		td2 = kg->kg_last_assigned;
278		KASSERT((td2 != NULL), ("last assigned has wrong value"));
279		if (td2 == td)
280			kg->kg_last_assigned = td3;
281		/* slot_fill(kg); */ /* will replace it with another */
282	}
283}
284#endif
285
286/*
287 * Change the priority of a thread that is on the run queue.
288 */
289void
290adjustrunqueue( struct thread *td, int newpri)
291{
292	struct ksegrp *kg;
293	struct kse *ke;
294
295	mtx_assert(&sched_lock, MA_OWNED);
296	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
297
298	ke = td->td_kse;
299	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
300	/*
301	 * If it is not a threaded process, take the shortcut.
302	 */
303	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
304		/* We only care about the kse in the run queue. */
305		td->td_priority = newpri;
306		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
307			sched_rem(td);
308			sched_add(td, SRQ_BORING);
309		}
310		return;
311	}
312
313	/* It is a threaded process */
314	kg = td->td_ksegrp;
315	if (ke->ke_state == KES_ONRUNQ) {
316		if (kg->kg_last_assigned == td) {
317			kg->kg_last_assigned =
318			    TAILQ_PREV(td, threadqueue, td_runq);
319		}
320		sched_rem(td);
321	}
322	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
323	TD_SET_CAN_RUN(td);
324	td->td_priority = newpri;
325	setrunqueue(td, SRQ_BORING);
326}
327
328/*
329 * This function is called when a thread is about to be put on a
330 * ksegrp run queue because it has been made runnable or its
331 * priority has been adjusted and the ksegrp does not have a
332 * free kse slot.  It determines if a thread from the same ksegrp
333 * should be preempted.  If so, it tries to switch threads
334 * if the thread is on the same cpu or notifies another cpu that
335 * it should switch threads.
336 */
337
338static void
339maybe_preempt_in_ksegrp(struct thread *td)
340#if  !defined(SMP)
341{
342	struct thread *running_thread;
343
344#ifndef FULL_PREEMPTION
345	int pri;
346	pri = td->td_priority;
347	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD))
348		return;
349#endif
350	mtx_assert(&sched_lock, MA_OWNED);
351	running_thread = curthread;
352
353	if (running_thread->td_ksegrp != td->td_ksegrp)
354		return;
355
356	if (td->td_priority >= running_thread->td_priority)
357		return;
358#ifdef PREEMPTION
359	if (running_thread->td_critnest > 1)
360		running_thread->td_owepreempt = 1;
361	 else
362		 mi_switch(SW_INVOL, NULL);
363
364#else
365	running_thread->td_flags |= TDF_NEEDRESCHED;
366#endif
367	return;
368}
369
370#else /* SMP */
371{
372	struct thread *running_thread;
373	int worst_pri;
374	struct ksegrp *kg;
375	cpumask_t cpumask,dontuse;
376	struct pcpu *pc;
377	struct pcpu *best_pcpu;
378	struct thread *cputhread;
379
380#ifndef FULL_PREEMPTION
381	int pri;
382	pri = td->td_priority;
383	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD))
384		return;
385#endif
386
387	mtx_assert(&sched_lock, MA_OWNED);
388
389	running_thread = curthread;
390
391#if !defined(KSEG_PEEMPT_BEST_CPU)
392	if (running_thread->td_ksegrp != td->td_ksegrp) {
393#endif
394		kg = td->td_ksegrp;
395
396		/* if someone is ahead of this thread, wait our turn */
397		if (td != TAILQ_FIRST(&kg->kg_runq))
398			return;
399
400		worst_pri = td->td_priority;
401		best_pcpu = NULL;
402		dontuse   = stopped_cpus | idle_cpus_mask;
403
404		/*
405		 * Find a cpu with the worst priority that runs at thread from
406		 * the same  ksegrp - if multiple exist give first the last run
407		 * cpu and then the current cpu priority
408		 */
409
410		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
411			cpumask   = pc->pc_cpumask;
412			cputhread = pc->pc_curthread;
413
414			if ((cpumask & dontuse)  ||
415			    cputhread->td_ksegrp != kg)
416				continue;
417
418			if (cputhread->td_priority > worst_pri) {
419				worst_pri = cputhread->td_priority;
420				best_pcpu = pc;
421				continue;
422			}
423
424			if (cputhread->td_priority == worst_pri &&
425			    best_pcpu != NULL &&
426			    (td->td_lastcpu == pc->pc_cpuid ||
427				(PCPU_GET(cpumask) == cpumask &&
428				    td->td_lastcpu != best_pcpu->pc_cpuid)))
429			    best_pcpu = pc;
430		}
431
432		/* Check if we need to preempt someone */
433		if (best_pcpu == NULL)
434			return;
435
436		if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
437			best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
438			ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
439			return;
440		}
441#if !defined(KSEG_PEEMPT_BEST_CPU)
442	}
443#endif
444
445	if (td->td_priority >= running_thread->td_priority)
446		return;
447#ifdef PREEMPTION
448	if (running_thread->td_critnest > 1)
449		running_thread->td_owepreempt = 1;
450	 else
451		 mi_switch(SW_INVOL, NULL);
452
453#else
454	running_thread->td_flags |= TDF_NEEDRESCHED;
455#endif
456	return;
457}
458#endif /* !SMP */
459
460
461int limitcount;
462void
463setrunqueue(struct thread *td, int flags)
464{
465	struct ksegrp *kg;
466	struct thread *td2;
467	struct thread *tda;
468
469	CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
470	    td, td->td_ksegrp, td->td_proc->p_pid);
471	CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
472            td, td->td_proc->p_comm, td->td_priority, curthread,
473            curthread->td_proc->p_comm);
474	mtx_assert(&sched_lock, MA_OWNED);
475	KASSERT((td->td_inhibitors == 0),
476			("setrunqueue: trying to run inhibitted thread"));
477	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
478	    ("setrunqueue: bad thread state"));
479	TD_SET_RUNQ(td);
480	kg = td->td_ksegrp;
481	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
482		/*
483		 * Common path optimisation: Only one of everything
484		 * and the KSE is always already attached.
485		 * Totally ignore the ksegrp run queue.
486		 */
487		if (kg->kg_avail_opennings != 1) {
488			if (limitcount < 1) {
489				limitcount++;
490				printf("pid %d: corrected slot count (%d->1)\n",
491				    td->td_proc->p_pid, kg->kg_avail_opennings);
492
493			}
494			kg->kg_avail_opennings = 1;
495		}
496		sched_add(td, flags);
497		return;
498	}
499
500	/*
501	 * If the concurrency has reduced, and we would go in the
502	 * assigned section, then keep removing entries from the
503	 * system run queue, until we are not in that section
504	 * or there is room for us to be put in that section.
505	 * What we MUST avoid is the case where there are threads of less
506	 * priority than the new one scheduled, but it can not
507	 * be scheduled itself. That would lead to a non contiguous set
508	 * of scheduled threads, and everything would break.
509	 */
510	tda = kg->kg_last_assigned;
511	while ((kg->kg_avail_opennings <= 0) &&
512	    (tda && (tda->td_priority > td->td_priority))) {
513		/*
514		 * None free, but there is one we can commandeer.
515		 */
516		CTR2(KTR_RUNQ,
517		    "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
518		sched_rem(tda);
519		tda = kg->kg_last_assigned =
520		    TAILQ_PREV(tda, threadqueue, td_runq);
521	}
522
523	/*
524	 * Add the thread to the ksegrp's run queue at
525	 * the appropriate place.
526	 */
527	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
528		if (td2->td_priority > td->td_priority) {
529			TAILQ_INSERT_BEFORE(td2, td, td_runq);
530			break;
531		}
532	}
533	if (td2 == NULL) {
534		/* We ran off the end of the TAILQ or it was empty. */
535		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
536	}
537
538	/*
539	 * If we have a slot to use, then put the thread on the system
540	 * run queue and if needed, readjust the last_assigned pointer.
541	 * it may be that we need to schedule something anyhow
542	 * even if the availabel slots are -ve so that
543	 * all the items < last_assigned are scheduled.
544	 */
545	if (kg->kg_avail_opennings > 0) {
546		if (tda == NULL) {
547			/*
548			 * No pre-existing last assigned so whoever is first
549			 * gets the slot.. (maybe us)
550			 */
551			td2 = TAILQ_FIRST(&kg->kg_runq);
552			kg->kg_last_assigned = td2;
553		} else if (tda->td_priority > td->td_priority) {
554			td2 = td;
555		} else {
556			/*
557			 * We are past last_assigned, so
558			 * give the next slot to whatever is next,
559			 * which may or may not be us.
560			 */
561			td2 = TAILQ_NEXT(tda, td_runq);
562			kg->kg_last_assigned = td2;
563		}
564		sched_add(td2, flags);
565	} else {
566		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
567			td, td->td_ksegrp, td->td_proc->p_pid);
568		if ((flags & SRQ_YIELDING) == 0)
569			maybe_preempt_in_ksegrp(td);
570	}
571}
572
573/*
574 * Kernel thread preemption implementation.  Critical sections mark
575 * regions of code in which preemptions are not allowed.
576 */
577void
578critical_enter(void)
579{
580	struct thread *td;
581
582	td = curthread;
583	td->td_critnest++;
584	CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
585	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
586}
587
588void
589critical_exit(void)
590{
591	struct thread *td;
592
593	td = curthread;
594	KASSERT(td->td_critnest != 0,
595	    ("critical_exit: td_critnest == 0"));
596#ifdef PREEMPTION
597	if (td->td_critnest == 1) {
598		td->td_critnest = 0;
599		mtx_assert(&sched_lock, MA_NOTOWNED);
600		if (td->td_owepreempt) {
601			td->td_critnest = 1;
602			mtx_lock_spin(&sched_lock);
603			td->td_critnest--;
604			mi_switch(SW_INVOL, NULL);
605			mtx_unlock_spin(&sched_lock);
606		}
607	} else
608#endif
609		td->td_critnest--;
610
611
612	CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
613	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
614}
615
616/*
617 * This function is called when a thread is about to be put on run queue
618 * because it has been made runnable or its priority has been adjusted.  It
619 * determines if the new thread should be immediately preempted to.  If so,
620 * it switches to it and eventually returns true.  If not, it returns false
621 * so that the caller may place the thread on an appropriate run queue.
622 */
623int
624maybe_preempt(struct thread *td)
625{
626#ifdef PREEMPTION
627	struct thread *ctd;
628	int cpri, pri;
629#endif
630
631	mtx_assert(&sched_lock, MA_OWNED);
632#ifdef PREEMPTION
633	/*
634	 * The new thread should not preempt the current thread if any of the
635	 * following conditions are true:
636	 *
637	 *  - The kernel is in the throes of crashing (panicstr).
638	 *  - The current thread has a higher (numerically lower) or
639	 *    equivalent priority.  Note that this prevents curthread from
640	 *    trying to preempt to itself.
641	 *  - It is too early in the boot for context switches (cold is set).
642	 *  - The current thread has an inhibitor set or is in the process of
643	 *    exiting.  In this case, the current thread is about to switch
644	 *    out anyways, so there's no point in preempting.  If we did,
645	 *    the current thread would not be properly resumed as well, so
646	 *    just avoid that whole landmine.
647	 *  - If the new thread's priority is not a realtime priority and
648	 *    the current thread's priority is not an idle priority and
649	 *    FULL_PREEMPTION is disabled.
650	 *
651	 * If all of these conditions are false, but the current thread is in
652	 * a nested critical section, then we have to defer the preemption
653	 * until we exit the critical section.  Otherwise, switch immediately
654	 * to the new thread.
655	 */
656	ctd = curthread;
657	KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
658	  ("thread has no (or wrong) sched-private part."));
659	KASSERT((td->td_inhibitors == 0),
660			("maybe_preempt: trying to run inhibitted thread"));
661	pri = td->td_priority;
662	cpri = ctd->td_priority;
663	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
664	    TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD)
665		return (0);
666#ifndef FULL_PREEMPTION
667	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
668	    !(cpri >= PRI_MIN_IDLE))
669		return (0);
670#endif
671	if (ctd->td_critnest > 1) {
672		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
673		    ctd->td_critnest);
674		ctd->td_owepreempt = 1;
675		return (0);
676	}
677
678	/*
679	 * Thread is runnable but not yet put on system run queue.
680	 */
681	MPASS(TD_ON_RUNQ(td));
682	MPASS(td->td_sched->ke_state != KES_ONRUNQ);
683	if (td->td_proc->p_flag & P_HADTHREADS) {
684		/*
685		 * If this is a threaded process we actually ARE on the
686		 * ksegrp run queue so take it off that first.
687		 * Also undo any damage done to the last_assigned pointer.
688		 * XXX Fix setrunqueue so this isn't needed
689		 */
690		struct ksegrp *kg;
691
692		kg = td->td_ksegrp;
693		if (kg->kg_last_assigned == td)
694			kg->kg_last_assigned =
695			    TAILQ_PREV(td, threadqueue, td_runq);
696		TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
697	}
698
699	TD_SET_RUNNING(td);
700	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
701	    td->td_proc->p_pid, td->td_proc->p_comm);
702	mi_switch(SW_INVOL|SW_PREEMPT, td);
703	return (1);
704#else
705	return (0);
706#endif
707}
708
709#if 0
710#ifndef PREEMPTION
711/* XXX: There should be a non-static version of this. */
712static void
713printf_caddr_t(void *data)
714{
715	printf("%s", (char *)data);
716}
717static char preempt_warning[] =
718    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
719SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
720    preempt_warning)
721#endif
722#endif
723
724/************************************************************************
725 * SYSTEM RUN QUEUE manipulations and tests				*
726 ************************************************************************/
727/*
728 * Initialize a run structure.
729 */
730void
731runq_init(struct runq *rq)
732{
733	int i;
734
735	bzero(rq, sizeof *rq);
736	for (i = 0; i < RQ_NQS; i++)
737		TAILQ_INIT(&rq->rq_queues[i]);
738}
739
740/*
741 * Clear the status bit of the queue corresponding to priority level pri,
742 * indicating that it is empty.
743 */
744static __inline void
745runq_clrbit(struct runq *rq, int pri)
746{
747	struct rqbits *rqb;
748
749	rqb = &rq->rq_status;
750	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
751	    rqb->rqb_bits[RQB_WORD(pri)],
752	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
753	    RQB_BIT(pri), RQB_WORD(pri));
754	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
755}
756
757/*
758 * Find the index of the first non-empty run queue.  This is done by
759 * scanning the status bits, a set bit indicates a non-empty queue.
760 */
761static __inline int
762runq_findbit(struct runq *rq)
763{
764	struct rqbits *rqb;
765	int pri;
766	int i;
767
768	rqb = &rq->rq_status;
769	for (i = 0; i < RQB_LEN; i++)
770		if (rqb->rqb_bits[i]) {
771			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
772			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
773			    rqb->rqb_bits[i], i, pri);
774			return (pri);
775		}
776
777	return (-1);
778}
779
780/*
781 * Set the status bit of the queue corresponding to priority level pri,
782 * indicating that it is non-empty.
783 */
784static __inline void
785runq_setbit(struct runq *rq, int pri)
786{
787	struct rqbits *rqb;
788
789	rqb = &rq->rq_status;
790	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
791	    rqb->rqb_bits[RQB_WORD(pri)],
792	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
793	    RQB_BIT(pri), RQB_WORD(pri));
794	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
795}
796
797/*
798 * Add the KSE to the queue specified by its priority, and set the
799 * corresponding status bit.
800 */
801void
802runq_add(struct runq *rq, struct kse *ke, int flags)
803{
804	struct rqhead *rqh;
805	int pri;
806
807	pri = ke->ke_thread->td_priority / RQ_PPQ;
808	ke->ke_rqindex = pri;
809	runq_setbit(rq, pri);
810	rqh = &rq->rq_queues[pri];
811	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
812	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
813	if (flags & SRQ_PREEMPTED) {
814		TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
815	} else {
816		TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
817	}
818}
819
820/*
821 * Return true if there are runnable processes of any priority on the run
822 * queue, false otherwise.  Has no side effects, does not modify the run
823 * queue structure.
824 */
825int
826runq_check(struct runq *rq)
827{
828	struct rqbits *rqb;
829	int i;
830
831	rqb = &rq->rq_status;
832	for (i = 0; i < RQB_LEN; i++)
833		if (rqb->rqb_bits[i]) {
834			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
835			    rqb->rqb_bits[i], i);
836			return (1);
837		}
838	CTR0(KTR_RUNQ, "runq_check: empty");
839
840	return (0);
841}
842
843#if defined(SMP) && defined(SCHED_4BSD)
844int runq_fuzz = 1;
845SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
846#endif
847
848/*
849 * Find the highest priority process on the run queue.
850 */
851struct kse *
852runq_choose(struct runq *rq)
853{
854	struct rqhead *rqh;
855	struct kse *ke;
856	int pri;
857
858	mtx_assert(&sched_lock, MA_OWNED);
859	while ((pri = runq_findbit(rq)) != -1) {
860		rqh = &rq->rq_queues[pri];
861#if defined(SMP) && defined(SCHED_4BSD)
862		/* fuzz == 1 is normal.. 0 or less are ignored */
863		if (runq_fuzz > 1) {
864			/*
865			 * In the first couple of entries, check if
866			 * there is one for our CPU as a preference.
867			 */
868			int count = runq_fuzz;
869			int cpu = PCPU_GET(cpuid);
870			struct kse *ke2;
871			ke2 = ke = TAILQ_FIRST(rqh);
872
873			while (count-- && ke2) {
874				if (ke->ke_thread->td_lastcpu == cpu) {
875					ke = ke2;
876					break;
877				}
878				ke2 = TAILQ_NEXT(ke2, ke_procq);
879			}
880		} else
881#endif
882			ke = TAILQ_FIRST(rqh);
883		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
884		CTR3(KTR_RUNQ,
885		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
886		return (ke);
887	}
888	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
889
890	return (NULL);
891}
892
893/*
894 * Remove the KSE from the queue specified by its priority, and clear the
895 * corresponding status bit if the queue becomes empty.
896 * Caller must set ke->ke_state afterwards.
897 */
898void
899runq_remove(struct runq *rq, struct kse *ke)
900{
901	struct rqhead *rqh;
902	int pri;
903
904	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
905		("runq_remove: process swapped out"));
906	pri = ke->ke_rqindex;
907	rqh = &rq->rq_queues[pri];
908	CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
909	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
910	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
911	TAILQ_REMOVE(rqh, ke, ke_procq);
912	if (TAILQ_EMPTY(rqh)) {
913		CTR0(KTR_RUNQ, "runq_remove: empty");
914		runq_clrbit(rq, pri);
915	}
916}
917
918/****** functions that are temporarily here ***********/
919#include <vm/uma.h>
920extern struct mtx kse_zombie_lock;
921
922/*
923 *  Allocate scheduler specific per-process resources.
924 * The thread and ksegrp have already been linked in.
925 * In this case just set the default concurrency value.
926 *
927 * Called from:
928 *  proc_init() (UMA init method)
929 */
930void
931sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
932{
933
934	/* This can go in sched_fork */
935	sched_init_concurrency(kg);
936}
937
938/*
939 * thread is being either created or recycled.
940 * Fix up the per-scheduler resources associated with it.
941 * Called from:
942 *  sched_fork_thread()
943 *  thread_dtor()  (*may go away)
944 *  thread_init()  (*may go away)
945 */
946void
947sched_newthread(struct thread *td)
948{
949	struct td_sched *ke;
950
951	ke = (struct td_sched *) (td + 1);
952	bzero(ke, sizeof(*ke));
953	td->td_sched     = ke;
954	ke->ke_thread	= td;
955	ke->ke_state	= KES_THREAD;
956}
957
958/*
959 * Set up an initial concurrency of 1
960 * and set the given thread (if given) to be using that
961 * concurrency slot.
962 * May be used "offline"..before the ksegrp is attached to the world
963 * and thus wouldn't need schedlock in that case.
964 * Called from:
965 *  thr_create()
966 *  proc_init() (UMA) via sched_newproc()
967 */
968void
969sched_init_concurrency(struct ksegrp *kg)
970{
971
972	CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
973	kg->kg_concurrency = 1;
974	kg->kg_avail_opennings = 1;
975}
976
977/*
978 * Change the concurrency of an existing ksegrp to N
979 * Called from:
980 *  kse_create()
981 *  kse_exit()
982 *  thread_exit()
983 *  thread_single()
984 */
985void
986sched_set_concurrency(struct ksegrp *kg, int concurrency)
987{
988
989	CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
990	    kg,
991	    concurrency,
992	    kg->kg_avail_opennings,
993	    kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
994	kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
995	kg->kg_concurrency = concurrency;
996}
997
998/*
999 * Called from thread_exit() for all exiting thread
1000 *
1001 * Not to be confused with sched_exit_thread()
1002 * that is only called from thread_exit() for threads exiting
1003 * without the rest of the process exiting because it is also called from
1004 * sched_exit() and we wouldn't want to call it twice.
1005 * XXX This can probably be fixed.
1006 */
1007void
1008sched_thread_exit(struct thread *td)
1009{
1010
1011	SLOT_RELEASE(td->td_ksegrp);
1012	slot_fill(td->td_ksegrp);
1013}
1014
1015#endif /* KERN_SWITCH_INCLUDE */
1016