kern_switch.c revision 136583
1/*
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27/***
28Here is the logic..
29
30If there are N processors, then there are at most N KSEs (kernel
31schedulable entities) working to process threads that belong to a
32KSEGROUP (kg). If there are X of these KSEs actually running at the
33moment in question, then there are at most M (N-X) of these KSEs on
34the run queue, as running KSEs are not on the queue.
35
36Runnable threads are queued off the KSEGROUP in priority order.
37If there are M or more threads runnable, the top M threads
38(by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39their priority from those threads and are put on the run queue.
40
41The last thread that had a priority high enough to have a KSE associated
42with it, AND IS ON THE RUN QUEUE is pointed to by
43kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44assigned as all the available KSEs are activly running, or because there
45are no threads queued, that pointer is NULL.
46
47When a KSE is removed from the run queue to become runnable, we know
48it was associated with the highest priority thread in the queue (at the head
49of the queue). If it is also the last assigned we know M was 1 and must
50now be 0. Since the thread is no longer queued that pointer must be
51removed from it. Since we know there were no more KSEs available,
52(M was 1 and is now 0) and since we are not FREEING our KSE
53but using it, we know there are STILL no more KSEs available, we can prove
54that the next thread in the ksegrp list will not have a KSE to assign to
55it, so we can show that the pointer must be made 'invalid' (NULL).
56
57The pointer exists so that when a new thread is made runnable, it can
58have its priority compared with the last assigned thread to see if
59it should 'steal' its KSE or not.. i.e. is it 'earlier'
60on the list than that thread or later.. If it's earlier, then the KSE is
61removed from the last assigned (which is now not assigned a KSE)
62and reassigned to the new thread, which is placed earlier in the list.
63The pointer is then backed up to the previous thread (which may or may not
64be the new thread).
65
66When a thread sleeps or is removed, the KSE becomes available and if there
67are queued threads that are not assigned KSEs, the highest priority one of
68them is assigned the KSE, which is then placed back on the run queue at
69the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70to point to it.
71
72The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--...    (KSEs queued at priorities from threads)
75              \    \____
76               \        \
77    KSEGROUP---thread--thread--thread    (queued in priority order)
78        \                 /
79         \_______________/
80          (last_assigned)
81
82The result of this scheme is that the M available KSEs are always
83queued at the priorities they have inherrited from the M highest priority
84threads for that KSEGROUP. If this situation changes, the KSEs are
85reassigned to keep this true.
86***/
87
88#include <sys/cdefs.h>
89__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 136583 2004-10-16 06:38:22Z scottl $");
90
91#include "opt_sched.h"
92
93#ifndef KERN_SWITCH_INCLUDE
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kdb.h>
97#include <sys/kernel.h>
98#include <sys/ktr.h>
99#include <sys/lock.h>
100#include <sys/mutex.h>
101#include <sys/proc.h>
102#include <sys/queue.h>
103#include <sys/sched.h>
104#else  /* KERN_SWITCH_INCLUDE */
105#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
106#include <sys/smp.h>
107#endif
108#include <machine/critical.h>
109#if defined(SMP) && defined(SCHED_4BSD)
110#include <sys/sysctl.h>
111#endif
112
113#ifdef FULL_PREEMPTION
114#ifndef PREEMPTION
115#error "The FULL_PREEMPTION option requires the PREEMPTION option"
116#endif
117#endif
118
119CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
120
121#define td_kse td_sched
122
123/************************************************************************
124 * Functions that manipulate runnability from a thread perspective.	*
125 ************************************************************************/
126/*
127 * Select the KSE that will be run next.  From that find the thread, and
128 * remove it from the KSEGRP's run queue.  If there is thread clustering,
129 * this will be what does it.
130 */
131struct thread *
132choosethread(void)
133{
134	struct kse *ke;
135	struct thread *td;
136	struct ksegrp *kg;
137
138#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
139	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
140		/* Shutting down, run idlethread on AP's */
141		td = PCPU_GET(idlethread);
142		ke = td->td_kse;
143		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
144		ke->ke_flags |= KEF_DIDRUN;
145		TD_SET_RUNNING(td);
146		return (td);
147	}
148#endif
149
150retry:
151	ke = sched_choose();
152	if (ke) {
153		td = ke->ke_thread;
154		KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
155		kg = ke->ke_ksegrp;
156		if (td->td_proc->p_flag & P_HADTHREADS) {
157			if (kg->kg_last_assigned == td) {
158				kg->kg_last_assigned = TAILQ_PREV(td,
159				    threadqueue, td_runq);
160			}
161			TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
162			kg->kg_runnable--;
163		}
164		CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
165		    td, td->td_priority);
166	} else {
167		/* Simulate runq_choose() having returned the idle thread */
168		td = PCPU_GET(idlethread);
169		ke = td->td_kse;
170		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
171	}
172	ke->ke_flags |= KEF_DIDRUN;
173
174	/*
175	 * If we are in panic, only allow system threads,
176	 * plus the one we are running in, to be run.
177	 */
178	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
179	    (td->td_flags & TDF_INPANIC) == 0)) {
180		/* note that it is no longer on the run queue */
181		TD_SET_CAN_RUN(td);
182		goto retry;
183	}
184
185	TD_SET_RUNNING(td);
186	return (td);
187}
188
189/*
190 * Given a surplus system slot, try assign a new runnable thread to it.
191 * Called from:
192 *  sched_thread_exit()  (local)
193 *  sched_switch()  (local)
194 *  sched_thread_exit()  (local)
195 *  remrunqueue()  (local)  (not at the moment)
196 */
197static void
198slot_fill(struct ksegrp *kg)
199{
200	struct thread *td;
201
202	mtx_assert(&sched_lock, MA_OWNED);
203	while (kg->kg_avail_opennings > 0) {
204		/*
205		 * Find the first unassigned thread
206		 */
207		if ((td = kg->kg_last_assigned) != NULL)
208			td = TAILQ_NEXT(td, td_runq);
209		else
210			td = TAILQ_FIRST(&kg->kg_runq);
211
212		/*
213		 * If we found one, send it to the system scheduler.
214		 */
215		if (td) {
216			kg->kg_last_assigned = td;
217			sched_add(td, SRQ_YIELDING);
218			CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
219		} else {
220			/* no threads to use up the slots. quit now */
221			break;
222		}
223	}
224}
225
226#ifdef	SCHED_4BSD
227/*
228 * Remove a thread from its KSEGRP's run queue.
229 * This in turn may remove it from a KSE if it was already assigned
230 * to one, possibly causing a new thread to be assigned to the KSE
231 * and the KSE getting a new priority.
232 */
233static void
234remrunqueue(struct thread *td)
235{
236	struct thread *td2, *td3;
237	struct ksegrp *kg;
238	struct kse *ke;
239
240	mtx_assert(&sched_lock, MA_OWNED);
241	KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
242	kg = td->td_ksegrp;
243	ke = td->td_kse;
244	CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
245	TD_SET_CAN_RUN(td);
246	/*
247	 * If it is not a threaded process, take the shortcut.
248	 */
249	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
250		/* remve from sys run queue and free up a slot */
251		sched_rem(td);
252		ke->ke_state = KES_THREAD;
253		return;
254	}
255   	td3 = TAILQ_PREV(td, threadqueue, td_runq);
256	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
257	kg->kg_runnable--;
258	if (ke->ke_state == KES_ONRUNQ) {
259		/*
260		 * This thread has been assigned to the system run queue.
261		 * We need to dissociate it and try assign the
262		 * KSE to the next available thread. Then, we should
263		 * see if we need to move the KSE in the run queues.
264		 */
265		sched_rem(td);
266		ke->ke_state = KES_THREAD;
267		td2 = kg->kg_last_assigned;
268		KASSERT((td2 != NULL), ("last assigned has wrong value"));
269		if (td2 == td)
270			kg->kg_last_assigned = td3;
271		/* slot_fill(kg); */ /* will replace it with another */
272	}
273}
274#endif
275
276/*
277 * Change the priority of a thread that is on the run queue.
278 */
279void
280adjustrunqueue( struct thread *td, int newpri)
281{
282	struct ksegrp *kg;
283	struct kse *ke;
284
285	mtx_assert(&sched_lock, MA_OWNED);
286	KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
287
288	ke = td->td_kse;
289	CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
290	/*
291	 * If it is not a threaded process, take the shortcut.
292	 */
293	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
294		/* We only care about the kse in the run queue. */
295		td->td_priority = newpri;
296		if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
297			sched_rem(td);
298			sched_add(td, SRQ_BORING);
299		}
300		return;
301	}
302
303	/* It is a threaded process */
304	kg = td->td_ksegrp;
305	if (ke->ke_state == KES_ONRUNQ) {
306		if (kg->kg_last_assigned == td) {
307			kg->kg_last_assigned =
308			    TAILQ_PREV(td, threadqueue, td_runq);
309		}
310		sched_rem(td);
311	}
312	TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
313	kg->kg_runnable--;
314	TD_SET_CAN_RUN(td);
315	td->td_priority = newpri;
316	setrunqueue(td, SRQ_BORING);
317}
318
319/*
320 * This function is called when a thread is about to be put on a
321 * ksegrp run queue because it has been made runnable or its
322 * priority has been adjusted and the ksegrp does not have a
323 * free kse slot.  It determines if a thread from the same ksegrp
324 * should be preempted.  If so, it tries to switch threads
325 * if the thread is on the same cpu or notifies another cpu that
326 * it should switch threads.
327 */
328
329static void
330maybe_preempt_in_ksegrp(struct thread *td)
331#if  !defined(SMP)
332{
333	struct thread *running_thread;
334
335#ifndef FULL_PREEMPTION
336	int pri;
337	pri = td->td_priority;
338	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD))
339		return;
340#endif
341	mtx_assert(&sched_lock, MA_OWNED);
342	running_thread = curthread;
343
344	if (running_thread->td_ksegrp != td->td_ksegrp)
345		return;
346
347	if (td->td_priority > running_thread->td_priority)
348		return;
349#ifdef PREEMPTION
350	if (running_thread->td_critnest > 1)
351		running_thread->td_pflags |= TDP_OWEPREEMPT;
352	 else
353		 mi_switch(SW_INVOL, NULL);
354
355#else
356	running_thread->td_flags |= TDF_NEEDRESCHED;
357#endif
358	return;
359}
360
361#else /* SMP */
362{
363	struct thread *running_thread;
364	int worst_pri;
365	struct ksegrp *kg;
366	cpumask_t cpumask,dontuse;
367	struct pcpu *pc;
368	struct pcpu *best_pcpu;
369	struct thread *cputhread;
370
371#ifndef FULL_PREEMPTION
372	int pri;
373	pri = td->td_priority;
374	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD))
375		return;
376#endif
377
378	mtx_assert(&sched_lock, MA_OWNED);
379
380	running_thread = curthread;
381
382#if !defined(KSEG_PEEMPT_BEST_CPU)
383	if (running_thread->td_ksegrp != td->td_ksegrp) {
384#endif
385		kg = td->td_ksegrp;
386
387		/* if someone is ahead of this thread, wait our turn */
388		if (td != TAILQ_FIRST(&kg->kg_runq))
389			return;
390
391		worst_pri = td->td_priority;
392		best_pcpu = NULL;
393		dontuse   = stopped_cpus | idle_cpus_mask;
394
395		/*
396		 * Find a cpu with the worst priority that runs at thread from
397		 * the same  ksegrp - if multiple exist give first the last run
398		 * cpu and then the current cpu priority
399		 */
400
401		SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
402			cpumask   = pc->pc_cpumask;
403			cputhread = pc->pc_curthread;
404
405			if ((cpumask & dontuse)  ||
406			    cputhread->td_ksegrp != kg)
407				continue;
408
409			if (cputhread->td_priority > worst_pri) {
410				worst_pri = cputhread->td_priority;
411				best_pcpu = pc;
412				continue;
413			}
414
415			if (cputhread->td_priority == worst_pri &&
416			    best_pcpu != NULL &&
417			    (td->td_lastcpu == pc->pc_cpuid ||
418				(PCPU_GET(cpumask) == cpumask &&
419				    td->td_lastcpu != best_pcpu->pc_cpuid)))
420			    best_pcpu = pc;
421		}
422
423		/* Check if we need to preempt someone */
424		if (best_pcpu == NULL)
425			return;
426
427		if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
428			best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
429			ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
430			return;
431		}
432#if !defined(KSEG_PEEMPT_BEST_CPU)
433	}
434#endif
435
436	if (td->td_priority > running_thread->td_priority)
437		return;
438#ifdef PREEMPTION
439	if (running_thread->td_critnest > 1)
440		running_thread->td_pflags |= TDP_OWEPREEMPT;
441	 else
442		 mi_switch(SW_INVOL, NULL);
443
444#else
445	running_thread->td_flags |= TDF_NEEDRESCHED;
446#endif
447	return;
448}
449#endif /* !SMP */
450
451
452int limitcount;
453void
454setrunqueue(struct thread *td, int flags)
455{
456	struct ksegrp *kg;
457	struct thread *td2;
458	struct thread *tda;
459
460	CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
461	    td, td->td_ksegrp, td->td_proc->p_pid);
462	mtx_assert(&sched_lock, MA_OWNED);
463	KASSERT((td->td_inhibitors == 0),
464			("setrunqueue: trying to run inhibitted thread"));
465	KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
466	    ("setrunqueue: bad thread state"));
467	TD_SET_RUNQ(td);
468	kg = td->td_ksegrp;
469	if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
470		/*
471		 * Common path optimisation: Only one of everything
472		 * and the KSE is always already attached.
473		 * Totally ignore the ksegrp run queue.
474		 */
475		if (kg->kg_avail_opennings != 1) {
476			if (limitcount < 1) {
477				limitcount++;
478				printf("pid %d: corrected slot count (%d->1)\n",
479				    td->td_proc->p_pid, kg->kg_avail_opennings);
480
481			}
482			kg->kg_avail_opennings = 1;
483		}
484		sched_add(td, flags);
485		return;
486	}
487
488	/*
489	 * If the concurrency has reduced, and we would go in the
490	 * assigned section, then keep removing entries from the
491	 * system run queue, until we are not in that section
492	 * or there is room for us to be put in that section.
493	 * What we MUST avoid is the case where there are threads of less
494	 * priority than the new one scheduled, but it can not
495	 * be scheduled itself. That would lead to a non contiguous set
496	 * of scheduled threads, and everything would break.
497	 */
498	tda = kg->kg_last_assigned;
499	while ((kg->kg_avail_opennings <= 0) &&
500	    (tda && (tda->td_priority > td->td_priority))) {
501		/*
502		 * None free, but there is one we can commandeer.
503		 */
504		CTR2(KTR_RUNQ,
505		    "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
506		sched_rem(tda);
507		tda = kg->kg_last_assigned =
508		    TAILQ_PREV(tda, threadqueue, td_runq);
509	}
510
511	/*
512	 * Add the thread to the ksegrp's run queue at
513	 * the appropriate place.
514	 */
515	TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
516		if (td2->td_priority > td->td_priority) {
517			kg->kg_runnable++;
518			TAILQ_INSERT_BEFORE(td2, td, td_runq);
519			break;
520		}
521	}
522	if (td2 == NULL) {
523		/* We ran off the end of the TAILQ or it was empty. */
524		kg->kg_runnable++;
525		TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
526	}
527
528	/*
529	 * If we have a slot to use, then put the thread on the system
530	 * run queue and if needed, readjust the last_assigned pointer.
531	 * it may be that we need to schedule something anyhow
532	 * even if the availabel slots are -ve so that
533	 * all the items < last_assigned are scheduled.
534	 */
535	if (kg->kg_avail_opennings > 0) {
536		if (tda == NULL) {
537			/*
538			 * No pre-existing last assigned so whoever is first
539			 * gets the slot.. (maybe us)
540			 */
541			td2 = TAILQ_FIRST(&kg->kg_runq);
542			kg->kg_last_assigned = td2;
543		} else if (tda->td_priority > td->td_priority) {
544			td2 = td;
545		} else {
546			/*
547			 * We are past last_assigned, so
548			 * give the next slot to whatever is next,
549			 * which may or may not be us.
550			 */
551			td2 = TAILQ_NEXT(tda, td_runq);
552			kg->kg_last_assigned = td2;
553		}
554		sched_add(td2, flags);
555	} else {
556		CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
557			td, td->td_ksegrp, td->td_proc->p_pid);
558		if ((flags & SRQ_YIELDING) == 0)
559			maybe_preempt_in_ksegrp(td);
560	}
561}
562
563/*
564 * Kernel thread preemption implementation.  Critical sections mark
565 * regions of code in which preemptions are not allowed.
566 */
567void
568critical_enter(void)
569{
570	struct thread *td;
571
572	td = curthread;
573	if (td->td_critnest == 0)
574		cpu_critical_enter(td);
575	td->td_critnest++;
576}
577
578void
579critical_exit(void)
580{
581	struct thread *td;
582
583	td = curthread;
584	KASSERT(td->td_critnest != 0,
585	    ("critical_exit: td_critnest == 0"));
586	if (td->td_critnest == 1) {
587		if (td->td_pflags & TDP_WAKEPROC0) {
588			td->td_pflags &= ~TDP_WAKEPROC0;
589			wakeup(&proc0);
590		}
591#ifdef PREEMPTION
592		mtx_assert(&sched_lock, MA_NOTOWNED);
593		if (td->td_pflags & TDP_OWEPREEMPT) {
594			mtx_lock_spin(&sched_lock);
595			mi_switch(SW_INVOL, NULL);
596			mtx_unlock_spin(&sched_lock);
597		}
598#endif
599		td->td_critnest = 0;
600		cpu_critical_exit(td);
601	} else {
602		td->td_critnest--;
603	}
604}
605
606/*
607 * This function is called when a thread is about to be put on run queue
608 * because it has been made runnable or its priority has been adjusted.  It
609 * determines if the new thread should be immediately preempted to.  If so,
610 * it switches to it and eventually returns true.  If not, it returns false
611 * so that the caller may place the thread on an appropriate run queue.
612 */
613int
614maybe_preempt(struct thread *td)
615{
616#ifdef PREEMPTION
617	struct thread *ctd;
618	int cpri, pri;
619#endif
620
621	mtx_assert(&sched_lock, MA_OWNED);
622#ifdef PREEMPTION
623	/*
624	 * The new thread should not preempt the current thread if any of the
625	 * following conditions are true:
626	 *
627	 *  - The current thread has a higher (numerically lower) or
628	 *    equivalent priority.  Note that this prevents curthread from
629	 *    trying to preempt to itself.
630	 *  - It is too early in the boot for context switches (cold is set).
631	 *  - The current thread has an inhibitor set or is in the process of
632	 *    exiting.  In this case, the current thread is about to switch
633	 *    out anyways, so there's no point in preempting.  If we did,
634	 *    the current thread would not be properly resumed as well, so
635	 *    just avoid that whole landmine.
636	 *  - If the new thread's priority is not a realtime priority and
637	 *    the current thread's priority is not an idle priority and
638	 *    FULL_PREEMPTION is disabled.
639	 *
640	 * If all of these conditions are false, but the current thread is in
641	 * a nested critical section, then we have to defer the preemption
642	 * until we exit the critical section.  Otherwise, switch immediately
643	 * to the new thread.
644	 */
645	ctd = curthread;
646	KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
647	  ("thread has no (or wrong) sched-private part."));
648	KASSERT((td->td_inhibitors == 0),
649			("maybe_preempt: trying to run inhibitted thread"));
650	pri = td->td_priority;
651	cpri = ctd->td_priority;
652	if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
653	    td->td_kse->ke_state != KES_THREAD)
654		return (0);
655#ifndef FULL_PREEMPTION
656	if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
657	    !(cpri >= PRI_MIN_IDLE))
658		return (0);
659#endif
660	if (ctd->td_critnest > 1) {
661		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
662		    ctd->td_critnest);
663		ctd->td_pflags |= TDP_OWEPREEMPT;
664		return (0);
665	}
666
667	/*
668	 * Thread is runnable but not yet put on system run queue.
669	 */
670	MPASS(TD_ON_RUNQ(td));
671	MPASS(td->td_sched->ke_state != KES_ONRUNQ);
672	if (td->td_proc->p_flag & P_HADTHREADS) {
673		/*
674		 * If this is a threaded process we actually ARE on the
675		 * ksegrp run queue so take it off that first.
676		 * Also undo any damage done to the last_assigned pointer.
677		 * XXX Fix setrunqueue so this isn't needed
678		 */
679		struct ksegrp *kg;
680
681		kg = td->td_ksegrp;
682		if (kg->kg_last_assigned == td)
683			kg->kg_last_assigned =
684			    TAILQ_PREV(td, threadqueue, td_runq);
685		TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
686	}
687
688	TD_SET_RUNNING(td);
689	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
690	    td->td_proc->p_pid, td->td_proc->p_comm);
691	mi_switch(SW_INVOL|SW_PREEMPT, td);
692	return (1);
693#else
694	return (0);
695#endif
696}
697
698#if 0
699#ifndef PREEMPTION
700/* XXX: There should be a non-static version of this. */
701static void
702printf_caddr_t(void *data)
703{
704	printf("%s", (char *)data);
705}
706static char preempt_warning[] =
707    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
708SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
709    preempt_warning)
710#endif
711#endif
712
713/************************************************************************
714 * SYSTEM RUN QUEUE manipulations and tests				*
715 ************************************************************************/
716/*
717 * Initialize a run structure.
718 */
719void
720runq_init(struct runq *rq)
721{
722	int i;
723
724	bzero(rq, sizeof *rq);
725	for (i = 0; i < RQ_NQS; i++)
726		TAILQ_INIT(&rq->rq_queues[i]);
727}
728
729/*
730 * Clear the status bit of the queue corresponding to priority level pri,
731 * indicating that it is empty.
732 */
733static __inline void
734runq_clrbit(struct runq *rq, int pri)
735{
736	struct rqbits *rqb;
737
738	rqb = &rq->rq_status;
739	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
740	    rqb->rqb_bits[RQB_WORD(pri)],
741	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
742	    RQB_BIT(pri), RQB_WORD(pri));
743	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
744}
745
746/*
747 * Find the index of the first non-empty run queue.  This is done by
748 * scanning the status bits, a set bit indicates a non-empty queue.
749 */
750static __inline int
751runq_findbit(struct runq *rq)
752{
753	struct rqbits *rqb;
754	int pri;
755	int i;
756
757	rqb = &rq->rq_status;
758	for (i = 0; i < RQB_LEN; i++)
759		if (rqb->rqb_bits[i]) {
760			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
761			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
762			    rqb->rqb_bits[i], i, pri);
763			return (pri);
764		}
765
766	return (-1);
767}
768
769/*
770 * Set the status bit of the queue corresponding to priority level pri,
771 * indicating that it is non-empty.
772 */
773static __inline void
774runq_setbit(struct runq *rq, int pri)
775{
776	struct rqbits *rqb;
777
778	rqb = &rq->rq_status;
779	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
780	    rqb->rqb_bits[RQB_WORD(pri)],
781	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
782	    RQB_BIT(pri), RQB_WORD(pri));
783	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
784}
785
786/*
787 * Add the KSE to the queue specified by its priority, and set the
788 * corresponding status bit.
789 */
790void
791runq_add(struct runq *rq, struct kse *ke, int flags)
792{
793	struct rqhead *rqh;
794	int pri;
795
796	pri = ke->ke_thread->td_priority / RQ_PPQ;
797	ke->ke_rqindex = pri;
798	runq_setbit(rq, pri);
799	rqh = &rq->rq_queues[pri];
800	CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
801	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
802	if (flags & SRQ_PREEMPTED) {
803		TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
804	} else {
805		TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
806	}
807}
808
809/*
810 * Return true if there are runnable processes of any priority on the run
811 * queue, false otherwise.  Has no side effects, does not modify the run
812 * queue structure.
813 */
814int
815runq_check(struct runq *rq)
816{
817	struct rqbits *rqb;
818	int i;
819
820	rqb = &rq->rq_status;
821	for (i = 0; i < RQB_LEN; i++)
822		if (rqb->rqb_bits[i]) {
823			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
824			    rqb->rqb_bits[i], i);
825			return (1);
826		}
827	CTR0(KTR_RUNQ, "runq_check: empty");
828
829	return (0);
830}
831
832#if defined(SMP) && defined(SCHED_4BSD)
833int runq_fuzz = 1;
834SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
835#endif
836
837/*
838 * Find the highest priority process on the run queue.
839 */
840struct kse *
841runq_choose(struct runq *rq)
842{
843	struct rqhead *rqh;
844	struct kse *ke;
845	int pri;
846
847	mtx_assert(&sched_lock, MA_OWNED);
848	while ((pri = runq_findbit(rq)) != -1) {
849		rqh = &rq->rq_queues[pri];
850#if defined(SMP) && defined(SCHED_4BSD)
851		/* fuzz == 1 is normal.. 0 or less are ignored */
852		if (runq_fuzz > 1) {
853			/*
854			 * In the first couple of entries, check if
855			 * there is one for our CPU as a preference.
856			 */
857			int count = runq_fuzz;
858			int cpu = PCPU_GET(cpuid);
859			struct kse *ke2;
860			ke2 = ke = TAILQ_FIRST(rqh);
861
862			while (count-- && ke2) {
863				if (ke->ke_thread->td_lastcpu == cpu) {
864					ke = ke2;
865					break;
866				}
867				ke2 = TAILQ_NEXT(ke2, ke_procq);
868			}
869		} else
870#endif
871			ke = TAILQ_FIRST(rqh);
872		KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
873		CTR3(KTR_RUNQ,
874		    "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
875		return (ke);
876	}
877	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
878
879	return (NULL);
880}
881
882/*
883 * Remove the KSE from the queue specified by its priority, and clear the
884 * corresponding status bit if the queue becomes empty.
885 * Caller must set ke->ke_state afterwards.
886 */
887void
888runq_remove(struct runq *rq, struct kse *ke)
889{
890	struct rqhead *rqh;
891	int pri;
892
893	KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
894		("runq_remove: process swapped out"));
895	pri = ke->ke_rqindex;
896	rqh = &rq->rq_queues[pri];
897	CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
898	    ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
899	KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
900	TAILQ_REMOVE(rqh, ke, ke_procq);
901	if (TAILQ_EMPTY(rqh)) {
902		CTR0(KTR_RUNQ, "runq_remove: empty");
903		runq_clrbit(rq, pri);
904	}
905}
906
907/****** functions that are temporarily here ***********/
908#include <vm/uma.h>
909#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
910extern struct mtx kse_zombie_lock;
911
912/*
913 *  Allocate scheduler specific per-process resources.
914 * The thread and ksegrp have already been linked in.
915 * In this case just set the default concurrency value.
916 *
917 * Called from:
918 *  proc_init() (UMA init method)
919 */
920void
921sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
922{
923
924	/* This can go in sched_fork */
925	sched_init_concurrency(kg);
926}
927
928#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
929/*
930 * thread is being either created or recycled.
931 * Fix up the per-scheduler resources associated with it.
932 * Called from:
933 *  sched_fork_thread()
934 *  thread_dtor()  (*may go away)
935 *  thread_init()  (*may go away)
936 */
937void
938sched_newthread(struct thread *td)
939{
940	struct td_sched *ke;
941
942	ke = (struct td_sched *) (td + 1);
943	bzero(ke, sizeof(*ke));
944	td->td_sched     = ke;
945	ke->ke_thread	= td;
946	ke->ke_oncpu	= NOCPU;
947	ke->ke_state	= KES_THREAD;
948}
949
950/*
951 * Set up an initial concurrency of 1
952 * and set the given thread (if given) to be using that
953 * concurrency slot.
954 * May be used "offline"..before the ksegrp is attached to the world
955 * and thus wouldn't need schedlock in that case.
956 * Called from:
957 *  thr_create()
958 *  proc_init() (UMA) via sched_newproc()
959 */
960void
961sched_init_concurrency(struct ksegrp *kg)
962{
963
964	CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
965	kg->kg_concurrency = 1;
966	kg->kg_avail_opennings = 1;
967}
968
969/*
970 * Change the concurrency of an existing ksegrp to N
971 * Called from:
972 *  kse_create()
973 *  kse_exit()
974 *  thread_exit()
975 *  thread_single()
976 */
977void
978sched_set_concurrency(struct ksegrp *kg, int concurrency)
979{
980
981	CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
982	    kg,
983	    concurrency,
984	    kg->kg_avail_opennings,
985	    kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
986	kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
987	kg->kg_concurrency = concurrency;
988}
989
990/*
991 * Called from thread_exit() for all exiting thread
992 *
993 * Not to be confused with sched_exit_thread()
994 * that is only called from thread_exit() for threads exiting
995 * without the rest of the process exiting because it is also called from
996 * sched_exit() and we wouldn't want to call it twice.
997 * XXX This can probably be fixed.
998 */
999void
1000sched_thread_exit(struct thread *td)
1001{
1002
1003	SLOT_RELEASE(td->td_ksegrp);
1004	slot_fill(td->td_ksegrp);
1005}
1006
1007#endif /* KERN_SWITCH_INCLUDE */
1008