kern_switch.c revision 147216
1/*-
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/***
28Here is the logic..
29
30If there are N processors, then there are at most N KSEs (kernel
31schedulable entities) working to process threads that belong to a
32KSEGROUP (kg). If there are X of these KSEs actually running at the
33moment in question, then there are at most M (N-X) of these KSEs on
34the run queue, as running KSEs are not on the queue.
35
36Runnable threads are queued off the KSEGROUP in priority order.
37If there are M or more threads runnable, the top M threads
38(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39their priority from those threads and are put on the run queue.
40
41The last thread that had a priority high enough to have a KSE associated
42with it, AND IS ON THE RUN QUEUE is pointed to by
43kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44assigned as all the available KSEs are activly running, or because there
45are no threads queued, that pointer is NULL.
46
47When a KSE is removed from the run queue to become runnable, we know
48it was associated with the highest priority thread in the queue (at the head
49of the queue). If it is also the last assigned we know M was 1 and must
50now be 0. Since the thread is no longer queued that pointer must be
51removed from it. Since we know there were no more KSEs available,
52(M was 1 and is now 0) and since we are not FREEING our KSE
53but using it, we know there are STILL no more KSEs available, we can prove
54that the next thread in the ksegrp list will not have a KSE to assign to
55it, so we can show that the pointer must be made 'invalid' (NULL).
56
57The pointer exists so that when a new thread is made runnable, it can
58have its priority compared with the last assigned thread to see if
59it should 'steal' its KSE or not.. i.e. is it 'earlier'
60on the list than that thread or later.. If it's earlier, then the KSE is
61removed from the last assigned (which is now not assigned a KSE)
62and reassigned to the new thread, which is placed earlier in the list.
63The pointer is then backed up to the previous thread (which may or may not
64be the new thread).
65
66When a thread sleeps or is removed, the KSE becomes available and if there
67are queued threads that are not assigned KSEs, the highest priority one of
68them is assigned the KSE, which is then placed back on the run queue at
69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70to point to it.
71
72The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75              \    \____
76               \        \
77    KSEGROUP---thread--thread--thread    (queued in priority order)
78        \                 /
79         \_______________/
80          (last_assigned)
81
82The result of this scheme is that the M available KSEs are always
83queued at the priorities they have inherrited from the M highest priority
84threads for that KSEGROUP. If this situation changes, the KSEs are
85reassigned to keep this true.
86***/
87
88#include <sys/cdefs.h>
89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 147216 2005-06-10 03:00:29Z ups $");
90
91#include "opt_sched.h"
92
93#ifndef KERN_SWITCH_INCLUDE
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kdb.h>
97#include <sys/kernel.h>
98#include <sys/ktr.h>
99#include <sys/lock.h>
100#include <sys/mutex.h>
101#include <sys/proc.h>
102#include <sys/queue.h>
103#include <sys/sched.h>
104#else  /* KERN_SWITCH_INCLUDE */
105#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
106#include <sys/smp.h>
107#endif
108#if defined(SMP) && defined(SCHED_4BSD)
109#include <sys/sysctl.h>
110#endif
111
112#ifdef FULL_PREEMPTION
113#ifndef PREEMPTION
114#error "The FULL_PREEMPTION option requires the PREEMPTION option"
115#endif
116#endif
117
118CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
119
120#define td_kse td_sched
121
122/*
123 * kern.sched.preemption allows user space to determine if preemption support
124 * is compiled in or not.  It is not currently a boot or runtime flag that
125 * can be changed.
126 */
127#ifdef PREEMPTION
128static int kern_sched_preemption = 1;
129#else
130static int kern_sched_preemption = 0;
131#endif
132SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
133    &kern_sched_preemption, 0, "Kernel preemption enabled");
134
135/************************************************************************
136 * Functions that manipulate runnability from a thread perspective.	*
137 ************************************************************************/
138/*
139 * Select the KSE that will be run next.  From that find the thread, and
140 * remove it from the KSEGRP's run queue.  If there is thread clustering,
141 * this will be what does it.
142 */
143struct thread *
144choosethread(void)
145{
146	struct kse *ke;
147	struct thread *td;
148	struct ksegrp *kg;
149
150#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
151	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
152		/* Shutting down, run idlethread on AP's */
153		td = PCPU_GET(idlethread);
154		ke = td->td_kse;
155		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
156		ke->ke_flags |= KEF_DIDRUN;
157		TD_SET_RUNNING(td);
158		return (td);
159	}
160#endif
161
162retry:
163	ke = sched_choose();
164	if (ke) {
165		td = ke->ke_thread;
166		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
167		kg = ke->ke_ksegrp;
168		if (td->td_proc->p_flag & P_HADTHREADS) {
169			if (kg->kg_last_assigned == td) {
170				kg->kg_last_assigned = TAILQ_PREV(td,
171				    threadqueue, td_runq);
172			}
173			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
174		}
175		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
176		    td, td->td_priority);
177	} else {
178		/* Simulate runq_choose() having returned the idle thread */
179		td = PCPU_GET(idlethread);
180		ke = td->td_kse;
181		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
182	}
183	ke->ke_flags |= KEF_DIDRUN;
184
185	/*
186	 * If we are in panic, only allow system threads,
187	 * plus the one we are running in, to be run.
188	 */
189	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
190	    (td->td_flags & TDF_INPANIC) == 0)) {
191		/* note that it is no longer on the run queue */
192		TD_SET_CAN_RUN(td);
193		goto retry;
194	}
195
196	TD_SET_RUNNING(td);
197	return (td);
198}
199
200/*
201 * Given a surplus system slot, try assign a new runnable thread to it.
202 * Called from:
203 *  sched_thread_exit()  (local)
204 *  sched_switch()  (local)
205 *  sched_thread_exit()  (local)
206 *  remrunqueue()  (local)  (not at the moment)
207 */
208static void
209slot_fill(struct ksegrp *kg)
210{
211	struct thread *td;
212
213	mtx_assert(&sched_lock, MA_OWNED);
214	while (kg->kg_avail_opennings > 0) {
215		/*
216		 * Find the first unassigned thread
217		 */
218		if ((td = kg->kg_last_assigned) != NULL)
219			td = TAILQ_NEXT(td, td_runq);
220		else
221			td = TAILQ_FIRST(&kg->kg_runq);
222
223		/*
224		 * If we found one, send it to the system scheduler.
225		 */
226		if (td) {
227			kg->kg_last_assigned = td;
228			sched_add(td, SRQ_YIELDING);
229			CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
230		} else {
231			/* no threads to use up the slots. quit now */
232			break;
233		}
234	}
235}
236
237#ifdef	SCHED_4BSD
238/*
239 * Remove a thread from its KSEGRP's run queue.
240 * This in turn may remove it from a KSE if it was already assigned
241 * to one, possibly causing a new thread to be assigned to the KSE
242 * and the KSE getting a new priority.
243 */
244static void
245remrunqueue(struct thread *td)
246{
247	struct thread *td2, *td3;
248	struct ksegrp *kg;
249	struct kse *ke;
250
251	mtx_assert(&sched_lock, MA_OWNED);
252	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
253	kg = td->td_ksegrp;
254	ke = td->td_kse;
255	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
256	TD_SET_CAN_RUN(td);
257	/*
258	 * If it is not a threaded process, take the shortcut.
259	 */
260	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
261		/* remve from sys run queue and free up a slot */
262		sched_rem(td);
263		ke->ke_state = KES_THREAD;
264		return;
265	}
266   	td3 = TAILQ_PREV(td, threadqueue, td_runq);
267	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
268	if (ke->ke_state == KES_ONRUNQ) {
269		/*
270		 * This thread has been assigned to the system run queue.
271		 * We need to dissociate it and try assign the
272		 * KSE to the next available thread. Then, we should
273		 * see if we need to move the KSE in the run queues.
274		 */
275		sched_rem(td);
276		ke->ke_state = KES_THREAD;
277		td2 = kg->kg_last_assigned;
278		KASSERT((td2 != NULL), ("last assigned has wrong value"));
279		if (td2 == td)
280			kg->kg_last_assigned = td3;
281		/* slot_fill(kg); */ /* will replace it with another */
282	}
283}
284#endif
285
286/*
287 * Change the priority of a thread that is on the run queue.
288 */
289void
290adjustrunqueue( struct thread *td, int newpri)
291{
292	struct ksegrp *kg;
293	struct kse *ke;
294
295	mtx_assert(&sched_lock, MA_OWNED);
296	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
297
298	ke = td->td_kse;
299	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
300	/*
301	 * If it is not a threaded process, take the shortcut.
302	 */
303	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
304		/* We only care about the kse in the run queue. */
305		td->td_priority = newpri;
306		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
307			sched_rem(td);
308			sched_add(td, SRQ_BORING);
309		}
310		return;
311	}
312
313	/* It is a threaded process */
314	kg = td->td_ksegrp;
315	if (ke->ke_state == KES_ONRUNQ) {
316		if (kg->kg_last_assigned == td) {
317			kg->kg_last_assigned =
318			    TAILQ_PREV(td, threadqueue, td_runq);
319		}
320		sched_rem(td);
321	}
322	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
323	TD_SET_CAN_RUN(td);
324	td->td_priority = newpri;
325	setrunqueue(td, SRQ_BORING);
326}
327
328/*
329 * This function is called when a thread is about to be put on a
330 * ksegrp run queue because it has been made runnable or its
331 * priority has been adjusted and the ksegrp does not have a
332 * free kse slot.  It determines if a thread from the same ksegrp
333 * should be preempted.  If so, it tries to switch threads
334 * if the thread is on the same cpu or notifies another cpu that
335 * it should switch threads.
336 */
337
338static void
339maybe_preempt_in_ksegrp(struct thread *td)
340#if  !defined(SMP)
341{
342	struct thread *running_thread;
343
344	mtx_assert(&sched_lock, MA_OWNED);
345	running_thread = curthread;
346
347	if (running_thread->td_ksegrp != td->td_ksegrp)
348		return;
349
350	if (td->td_priority >= running_thread->td_priority)
351		return;
352#ifdef PREEMPTION
353#ifndef FULL_PREEMPTION
354	if (td->td_priority > PRI_MAX_ITHD) {
355		running_thread->td_flags |= TDF_NEEDRESCHED;
356		return;
357	}
358#endif /* FULL_PREEMPTION */
359
360	if (running_thread->td_critnest > 1)
361		running_thread->td_owepreempt = 1;
362	 else
363		 mi_switch(SW_INVOL, NULL);
364
365#else /* PREEMPTION */
366	running_thread->td_flags |= TDF_NEEDRESCHED;
367#endif /* PREEMPTION */
368	return;
369}
370
371#else /* SMP */
372{
373	struct thread *running_thread;
374	int worst_pri;
375	struct ksegrp *kg;
376	cpumask_t cpumask,dontuse;
377	struct pcpu *pc;
378	struct pcpu *best_pcpu;
379	struct thread *cputhread;
380
381	mtx_assert(&sched_lock, MA_OWNED);
382
383	running_thread = curthread;
384
385#if !defined(KSEG_PEEMPT_BEST_CPU)
386	if (running_thread->td_ksegrp != td->td_ksegrp) {
387#endif
388		kg = td->td_ksegrp;
389
390		/* if someone is ahead of this thread, wait our turn */
391		if (td != TAILQ_FIRST(&kg->kg_runq))
392			return;
393
394		worst_pri = td->td_priority;
395		best_pcpu = NULL;
396		dontuse   = stopped_cpus | idle_cpus_mask;
397
398		/*
399		 * Find a cpu with the worst priority that runs at thread from
400		 * the same  ksegrp - if multiple exist give first the last run
401		 * cpu and then the current cpu priority
402		 */
403
404		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
405			cpumask   = pc->pc_cpumask;
406			cputhread = pc->pc_curthread;
407
408			if ((cpumask & dontuse)  ||
409			    cputhread->td_ksegrp != kg)
410				continue;
411
412			if (cputhread->td_priority > worst_pri) {
413				worst_pri = cputhread->td_priority;
414				best_pcpu = pc;
415				continue;
416			}
417
418			if (cputhread->td_priority == worst_pri &&
419			    best_pcpu != NULL &&
420			    (td->td_lastcpu == pc->pc_cpuid ||
421				(PCPU_GET(cpumask) == cpumask &&
422				    td->td_lastcpu != best_pcpu->pc_cpuid)))
423			    best_pcpu = pc;
424		}
425
426		/* Check if we need to preempt someone */
427		if (best_pcpu == NULL)
428			return;
429
430#if defined(IPI_PREEMPTION) && defined(PREEMPTION)
431#if !defined(FULL_PREEMPTION)
432		if (td->td_priority <= PRI_MAX_ITHD)
433#endif /* ! FULL_PREEMPTION */
434			{
435				ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT);
436				return;
437			}
438#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
439
440		if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
441			best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
442			ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
443			return;
444		}
445#if !defined(KSEG_PEEMPT_BEST_CPU)
446	}
447#endif
448
449	if (td->td_priority >= running_thread->td_priority)
450		return;
451#ifdef PREEMPTION
452
453#if !defined(FULL_PREEMPTION)
454	if (td->td_priority > PRI_MAX_ITHD) {
455		running_thread->td_flags |= TDF_NEEDRESCHED;
456	}
457#endif /* ! FULL_PREEMPTION */
458
459	if (running_thread->td_critnest > 1)
460		running_thread->td_owepreempt = 1;
461	 else
462		 mi_switch(SW_INVOL, NULL);
463
464#else /* PREEMPTION */
465	running_thread->td_flags |= TDF_NEEDRESCHED;
466#endif /* PREEMPTION */
467	return;
468}
469#endif /* !SMP */
470
471
472int limitcount;
473void
474setrunqueue(struct thread *td, int flags)
475{
476	struct ksegrp *kg;
477	struct thread *td2;
478	struct thread *tda;
479
480	CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
481	    td, td->td_ksegrp, td->td_proc->p_pid);
482	CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
483            td, td->td_proc->p_comm, td->td_priority, curthread,
484            curthread->td_proc->p_comm);
485	mtx_assert(&sched_lock, MA_OWNED);
486	KASSERT((td->td_inhibitors == 0),
487			("setrunqueue: trying to run inhibitted thread"));
488	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
489	    ("setrunqueue: bad thread state"));
490	TD_SET_RUNQ(td);
491	kg = td->td_ksegrp;
492	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
493		/*
494		 * Common path optimisation: Only one of everything
495		 * and the KSE is always already attached.
496		 * Totally ignore the ksegrp run queue.
497		 */
498		if (kg->kg_avail_opennings != 1) {
499			if (limitcount < 1) {
500				limitcount++;
501				printf("pid %d: corrected slot count (%d->1)\n",
502				    td->td_proc->p_pid, kg->kg_avail_opennings);
503
504			}
505			kg->kg_avail_opennings = 1;
506		}
507		sched_add(td, flags);
508		return;
509	}
510
511	/*
512	 * If the concurrency has reduced, and we would go in the
513	 * assigned section, then keep removing entries from the
514	 * system run queue, until we are not in that section
515	 * or there is room for us to be put in that section.
516	 * What we MUST avoid is the case where there are threads of less
517	 * priority than the new one scheduled, but it can not
518	 * be scheduled itself. That would lead to a non contiguous set
519	 * of scheduled threads, and everything would break.
520	 */
521	tda = kg->kg_last_assigned;
522	while ((kg->kg_avail_opennings <= 0) &&
523	    (tda && (tda->td_priority > td->td_priority))) {
524		/*
525		 * None free, but there is one we can commandeer.
526		 */
527		CTR2(KTR_RUNQ,
528		    "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
529		sched_rem(tda);
530		tda = kg->kg_last_assigned =
531		    TAILQ_PREV(tda, threadqueue, td_runq);
532	}
533
534	/*
535	 * Add the thread to the ksegrp's run queue at
536	 * the appropriate place.
537	 */
538	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
539		if (td2->td_priority > td->td_priority) {
540			TAILQ_INSERT_BEFORE(td2, td, td_runq);
541			break;
542		}
543	}
544	if (td2 == NULL) {
545		/* We ran off the end of the TAILQ or it was empty. */
546		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
547	}
548
549	/*
550	 * If we have a slot to use, then put the thread on the system
551	 * run queue and if needed, readjust the last_assigned pointer.
552	 * it may be that we need to schedule something anyhow
553	 * even if the availabel slots are -ve so that
554	 * all the items < last_assigned are scheduled.
555	 */
556	if (kg->kg_avail_opennings > 0) {
557		if (tda == NULL) {
558			/*
559			 * No pre-existing last assigned so whoever is first
560			 * gets the slot.. (maybe us)
561			 */
562			td2 = TAILQ_FIRST(&kg->kg_runq);
563			kg->kg_last_assigned = td2;
564		} else if (tda->td_priority > td->td_priority) {
565			td2 = td;
566		} else {
567			/*
568			 * We are past last_assigned, so
569			 * give the next slot to whatever is next,
570			 * which may or may not be us.
571			 */
572			td2 = TAILQ_NEXT(tda, td_runq);
573			kg->kg_last_assigned = td2;
574		}
575		sched_add(td2, flags);
576	} else {
577		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
578			td, td->td_ksegrp, td->td_proc->p_pid);
579		if ((flags & SRQ_YIELDING) == 0)
580			maybe_preempt_in_ksegrp(td);
581	}
582}
583
584/*
585 * Kernel thread preemption implementation.  Critical sections mark
586 * regions of code in which preemptions are not allowed.
587 */
588void
589critical_enter(void)
590{
591	struct thread *td;
592
593	td = curthread;
594	td->td_critnest++;
595	CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
596	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
597}
598
599void
600critical_exit(void)
601{
602	struct thread *td;
603
604	td = curthread;
605	KASSERT(td->td_critnest != 0,
606	    ("critical_exit: td_critnest == 0"));
607#ifdef PREEMPTION
608	if (td->td_critnest == 1) {
609		td->td_critnest = 0;
610		mtx_assert(&sched_lock, MA_NOTOWNED);
611		if (td->td_owepreempt) {
612			td->td_critnest = 1;
613			mtx_lock_spin(&sched_lock);
614			td->td_critnest--;
615			mi_switch(SW_INVOL, NULL);
616			mtx_unlock_spin(&sched_lock);
617		}
618	} else
619#endif
620		td->td_critnest--;
621
622
623	CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
624	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
625}
626
627/*
628 * This function is called when a thread is about to be put on run queue
629 * because it has been made runnable or its priority has been adjusted.  It
630 * determines if the new thread should be immediately preempted to.  If so,
631 * it switches to it and eventually returns true.  If not, it returns false
632 * so that the caller may place the thread on an appropriate run queue.
633 */
634int
635maybe_preempt(struct thread *td)
636{
637#ifdef PREEMPTION
638	struct thread *ctd;
639	int cpri, pri;
640#endif
641
642	mtx_assert(&sched_lock, MA_OWNED);
643#ifdef PREEMPTION
644	/*
645	 * The new thread should not preempt the current thread if any of the
646	 * following conditions are true:
647	 *
648	 *  - The kernel is in the throes of crashing (panicstr).
649	 *  - The current thread has a higher (numerically lower) or
650	 *    equivalent priority.  Note that this prevents curthread from
651	 *    trying to preempt to itself.
652	 *  - It is too early in the boot for context switches (cold is set).
653	 *  - The current thread has an inhibitor set or is in the process of
654	 *    exiting.  In this case, the current thread is about to switch
655	 *    out anyways, so there's no point in preempting.  If we did,
656	 *    the current thread would not be properly resumed as well, so
657	 *    just avoid that whole landmine.
658	 *  - If the new thread's priority is not a realtime priority and
659	 *    the current thread's priority is not an idle priority and
660	 *    FULL_PREEMPTION is disabled.
661	 *
662	 * If all of these conditions are false, but the current thread is in
663	 * a nested critical section, then we have to defer the preemption
664	 * until we exit the critical section.  Otherwise, switch immediately
665	 * to the new thread.
666	 */
667	ctd = curthread;
668	KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
669	  ("thread has no (or wrong) sched-private part."));
670	KASSERT((td->td_inhibitors == 0),
671			("maybe_preempt: trying to run inhibitted thread"));
672	pri = td->td_priority;
673	cpri = ctd->td_priority;
674	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
675	    TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD)
676		return (0);
677#ifndef FULL_PREEMPTION
678	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
679		return (0);
680#endif
681
682	if (ctd->td_critnest > 1) {
683		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
684		    ctd->td_critnest);
685		ctd->td_owepreempt = 1;
686		return (0);
687	}
688
689	/*
690	 * Thread is runnable but not yet put on system run queue.
691	 */
692	MPASS(TD_ON_RUNQ(td));
693	MPASS(td->td_sched->ke_state != KES_ONRUNQ);
694	if (td->td_proc->p_flag & P_HADTHREADS) {
695		/*
696		 * If this is a threaded process we actually ARE on the
697		 * ksegrp run queue so take it off that first.
698		 * Also undo any damage done to the last_assigned pointer.
699		 * XXX Fix setrunqueue so this isn't needed
700		 */
701		struct ksegrp *kg;
702
703		kg = td->td_ksegrp;
704		if (kg->kg_last_assigned == td)
705			kg->kg_last_assigned =
706			    TAILQ_PREV(td, threadqueue, td_runq);
707		TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
708	}
709
710	TD_SET_RUNNING(td);
711	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
712	    td->td_proc->p_pid, td->td_proc->p_comm);
713	mi_switch(SW_INVOL|SW_PREEMPT, td);
714	return (1);
715#else
716	return (0);
717#endif
718}
719
720#if 0
721#ifndef PREEMPTION
722/* XXX: There should be a non-static version of this. */
723static void
724printf_caddr_t(void *data)
725{
726	printf("%s", (char *)data);
727}
728static char preempt_warning[] =
729    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
730SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
731    preempt_warning)
732#endif
733#endif
734
735/************************************************************************
736 * SYSTEM RUN QUEUE manipulations and tests				*
737 ************************************************************************/
738/*
739 * Initialize a run structure.
740 */
741void
742runq_init(struct runq *rq)
743{
744	int i;
745
746	bzero(rq, sizeof *rq);
747	for (i = 0; i < RQ_NQS; i++)
748		TAILQ_INIT(&rq->rq_queues[i]);
749}
750
751/*
752 * Clear the status bit of the queue corresponding to priority level pri,
753 * indicating that it is empty.
754 */
755static __inline void
756runq_clrbit(struct runq *rq, int pri)
757{
758	struct rqbits *rqb;
759
760	rqb = &rq->rq_status;
761	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
762	    rqb->rqb_bits[RQB_WORD(pri)],
763	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
764	    RQB_BIT(pri), RQB_WORD(pri));
765	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
766}
767
768/*
769 * Find the index of the first non-empty run queue.  This is done by
770 * scanning the status bits, a set bit indicates a non-empty queue.
771 */
772static __inline int
773runq_findbit(struct runq *rq)
774{
775	struct rqbits *rqb;
776	int pri;
777	int i;
778
779	rqb = &rq->rq_status;
780	for (i = 0; i < RQB_LEN; i++)
781		if (rqb->rqb_bits[i]) {
782			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
783			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
784			    rqb->rqb_bits[i], i, pri);
785			return (pri);
786		}
787
788	return (-1);
789}
790
791/*
792 * Set the status bit of the queue corresponding to priority level pri,
793 * indicating that it is non-empty.
794 */
795static __inline void
796runq_setbit(struct runq *rq, int pri)
797{
798	struct rqbits *rqb;
799
800	rqb = &rq->rq_status;
801	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
802	    rqb->rqb_bits[RQB_WORD(pri)],
803	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
804	    RQB_BIT(pri), RQB_WORD(pri));
805	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
806}
807
808/*
809 * Add the KSE to the queue specified by its priority, and set the
810 * corresponding status bit.
811 */
812void
813runq_add(struct runq *rq, struct kse *ke, int flags)
814{
815	struct rqhead *rqh;
816	int pri;
817
818	pri = ke->ke_thread->td_priority / RQ_PPQ;
819	ke->ke_rqindex = pri;
820	runq_setbit(rq, pri);
821	rqh = &rq->rq_queues[pri];
822	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
823	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
824	if (flags & SRQ_PREEMPTED) {
825		TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
826	} else {
827		TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
828	}
829}
830
831/*
832 * Return true if there are runnable processes of any priority on the run
833 * queue, false otherwise.  Has no side effects, does not modify the run
834 * queue structure.
835 */
836int
837runq_check(struct runq *rq)
838{
839	struct rqbits *rqb;
840	int i;
841
842	rqb = &rq->rq_status;
843	for (i = 0; i < RQB_LEN; i++)
844		if (rqb->rqb_bits[i]) {
845			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
846			    rqb->rqb_bits[i], i);
847			return (1);
848		}
849	CTR0(KTR_RUNQ, "runq_check: empty");
850
851	return (0);
852}
853
854#if defined(SMP) && defined(SCHED_4BSD)
855int runq_fuzz = 1;
856SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
857#endif
858
859/*
860 * Find the highest priority process on the run queue.
861 */
862struct kse *
863runq_choose(struct runq *rq)
864{
865	struct rqhead *rqh;
866	struct kse *ke;
867	int pri;
868
869	mtx_assert(&sched_lock, MA_OWNED);
870	while ((pri = runq_findbit(rq)) != -1) {
871		rqh = &rq->rq_queues[pri];
872#if defined(SMP) && defined(SCHED_4BSD)
873		/* fuzz == 1 is normal.. 0 or less are ignored */
874		if (runq_fuzz > 1) {
875			/*
876			 * In the first couple of entries, check if
877			 * there is one for our CPU as a preference.
878			 */
879			int count = runq_fuzz;
880			int cpu = PCPU_GET(cpuid);
881			struct kse *ke2;
882			ke2 = ke = TAILQ_FIRST(rqh);
883
884			while (count-- && ke2) {
885				if (ke->ke_thread->td_lastcpu == cpu) {
886					ke = ke2;
887					break;
888				}
889				ke2 = TAILQ_NEXT(ke2, ke_procq);
890			}
891		} else
892#endif
893			ke = TAILQ_FIRST(rqh);
894		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
895		CTR3(KTR_RUNQ,
896		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
897		return (ke);
898	}
899	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
900
901	return (NULL);
902}
903
904/*
905 * Remove the KSE from the queue specified by its priority, and clear the
906 * corresponding status bit if the queue becomes empty.
907 * Caller must set ke->ke_state afterwards.
908 */
909void
910runq_remove(struct runq *rq, struct kse *ke)
911{
912	struct rqhead *rqh;
913	int pri;
914
915	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
916		("runq_remove: process swapped out"));
917	pri = ke->ke_rqindex;
918	rqh = &rq->rq_queues[pri];
919	CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
920	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
921	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
922	TAILQ_REMOVE(rqh, ke, ke_procq);
923	if (TAILQ_EMPTY(rqh)) {
924		CTR0(KTR_RUNQ, "runq_remove: empty");
925		runq_clrbit(rq, pri);
926	}
927}
928
929/****** functions that are temporarily here ***********/
930#include <vm/uma.h>
931extern struct mtx kse_zombie_lock;
932
933/*
934 *  Allocate scheduler specific per-process resources.
935 * The thread and ksegrp have already been linked in.
936 * In this case just set the default concurrency value.
937 *
938 * Called from:
939 *  proc_init() (UMA init method)
940 */
941void
942sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
943{
944
945	/* This can go in sched_fork */
946	sched_init_concurrency(kg);
947}
948
949/*
950 * thread is being either created or recycled.
951 * Fix up the per-scheduler resources associated with it.
952 * Called from:
953 *  sched_fork_thread()
954 *  thread_dtor()  (*may go away)
955 *  thread_init()  (*may go away)
956 */
957void
958sched_newthread(struct thread *td)
959{
960	struct td_sched *ke;
961
962	ke = (struct td_sched *) (td + 1);
963	bzero(ke, sizeof(*ke));
964	td->td_sched     = ke;
965	ke->ke_thread	= td;
966	ke->ke_state	= KES_THREAD;
967}
968
969/*
970 * Set up an initial concurrency of 1
971 * and set the given thread (if given) to be using that
972 * concurrency slot.
973 * May be used "offline"..before the ksegrp is attached to the world
974 * and thus wouldn't need schedlock in that case.
975 * Called from:
976 *  thr_create()
977 *  proc_init() (UMA) via sched_newproc()
978 */
979void
980sched_init_concurrency(struct ksegrp *kg)
981{
982
983	CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
984	kg->kg_concurrency = 1;
985	kg->kg_avail_opennings = 1;
986}
987
988/*
989 * Change the concurrency of an existing ksegrp to N
990 * Called from:
991 *  kse_create()
992 *  kse_exit()
993 *  thread_exit()
994 *  thread_single()
995 */
996void
997sched_set_concurrency(struct ksegrp *kg, int concurrency)
998{
999
1000	CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
1001	    kg,
1002	    concurrency,
1003	    kg->kg_avail_opennings,
1004	    kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
1005	kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
1006	kg->kg_concurrency = concurrency;
1007}
1008
1009/*
1010 * Called from thread_exit() for all exiting thread
1011 *
1012 * Not to be confused with sched_exit_thread()
1013 * that is only called from thread_exit() for threads exiting
1014 * without the rest of the process exiting because it is also called from
1015 * sched_exit() and we wouldn't want to call it twice.
1016 * XXX This can probably be fixed.
1017 */
1018void
1019sched_thread_exit(struct thread *td)
1020{
1021
1022	SLOT_RELEASE(td->td_ksegrp);
1023	slot_fill(td->td_ksegrp);
1024}
1025
1026#endif /* KERN_SWITCH_INCLUDE */
1027