kern_switch.c revision 170293
1/*-
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 170293 2007-06-04 23:50:30Z jeff $");
30
31#include "opt_sched.h"
32
33#ifndef KERN_SWITCH_INCLUDE
34#include <sys/param.h>
35#include <sys/systm.h>
36#include <sys/kdb.h>
37#include <sys/kernel.h>
38#include <sys/ktr.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/queue.h>
43#include <sys/sched.h>
44#else  /* KERN_SWITCH_INCLUDE */
45#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
46#include <sys/smp.h>
47#endif
48#if defined(SMP) && defined(SCHED_4BSD)
49#include <sys/sysctl.h>
50#endif
51
52#include <machine/cpu.h>
53
54/* Uncomment this to enable logging of critical_enter/exit. */
55#if 0
56#define	KTR_CRITICAL	KTR_SCHED
57#else
58#define	KTR_CRITICAL	0
59#endif
60
61#ifdef FULL_PREEMPTION
62#ifndef PREEMPTION
63#error "The FULL_PREEMPTION option requires the PREEMPTION option"
64#endif
65#endif
66
67CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
68
69/*
70 * kern.sched.preemption allows user space to determine if preemption support
71 * is compiled in or not.  It is not currently a boot or runtime flag that
72 * can be changed.
73 */
74#ifdef PREEMPTION
75static int kern_sched_preemption = 1;
76#else
77static int kern_sched_preemption = 0;
78#endif
79SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
80    &kern_sched_preemption, 0, "Kernel preemption enabled");
81
82#ifdef SCHED_STATS
83long switch_preempt;
84long switch_owepreempt;
85long switch_turnstile;
86long switch_sleepq;
87long switch_sleepqtimo;
88long switch_relinquish;
89long switch_needresched;
90static SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
91SYSCTL_INT(_kern_sched_stats, OID_AUTO, preempt, CTLFLAG_RD, &switch_preempt, 0, "");
92SYSCTL_INT(_kern_sched_stats, OID_AUTO, owepreempt, CTLFLAG_RD, &switch_owepreempt, 0, "");
93SYSCTL_INT(_kern_sched_stats, OID_AUTO, turnstile, CTLFLAG_RD, &switch_turnstile, 0, "");
94SYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepq, CTLFLAG_RD, &switch_sleepq, 0, "");
95SYSCTL_INT(_kern_sched_stats, OID_AUTO, sleepqtimo, CTLFLAG_RD, &switch_sleepqtimo, 0, "");
96SYSCTL_INT(_kern_sched_stats, OID_AUTO, relinquish, CTLFLAG_RD, &switch_relinquish, 0, "");
97SYSCTL_INT(_kern_sched_stats, OID_AUTO, needresched, CTLFLAG_RD, &switch_needresched, 0, "");
98static int
99sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
100{
101        int error;
102	int val;
103
104        val = 0;
105        error = sysctl_handle_int(oidp, &val, 0, req);
106        if (error != 0 || req->newptr == NULL)
107                return (error);
108        if (val == 0)
109                return (0);
110	switch_preempt = 0;
111	switch_owepreempt = 0;
112	switch_turnstile = 0;
113	switch_sleepq = 0;
114	switch_sleepqtimo = 0;
115	switch_relinquish = 0;
116	switch_needresched = 0;
117
118	return (0);
119}
120
121SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
122    0, sysctl_stats_reset, "I", "Reset scheduler statistics");
123#endif
124
125/************************************************************************
126 * Functions that manipulate runnability from a thread perspective.	*
127 ************************************************************************/
128/*
129 * Select the thread that will be run next.
130 */
131struct thread *
132choosethread(void)
133{
134	struct thread *td;
135
136#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
137	if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
138		/* Shutting down, run idlethread on AP's */
139		td = PCPU_GET(idlethread);
140		CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
141		TD_SET_RUNNING(td);
142		return (td);
143	}
144#endif
145
146retry:
147	td = sched_choose();
148
149	/*
150	 * If we are in panic, only allow system threads,
151	 * plus the one we are running in, to be run.
152	 */
153	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
154	    (td->td_flags & TDF_INPANIC) == 0)) {
155		/* note that it is no longer on the run queue */
156		TD_SET_CAN_RUN(td);
157		goto retry;
158	}
159
160	TD_SET_RUNNING(td);
161	return (td);
162}
163
164/*
165 * Kernel thread preemption implementation.  Critical sections mark
166 * regions of code in which preemptions are not allowed.
167 */
168void
169critical_enter(void)
170{
171	struct thread *td;
172
173	td = curthread;
174	td->td_critnest++;
175	CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
176	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
177}
178
179void
180critical_exit(void)
181{
182	struct thread *td;
183
184	td = curthread;
185	KASSERT(td->td_critnest != 0,
186	    ("critical_exit: td_critnest == 0"));
187#ifdef PREEMPTION
188	if (td->td_critnest == 1) {
189		td->td_critnest = 0;
190		if (td->td_owepreempt) {
191			td->td_critnest = 1;
192			thread_lock(td);
193			td->td_critnest--;
194			SCHED_STAT_INC(switch_owepreempt);
195			mi_switch(SW_INVOL, NULL);
196			thread_unlock(td);
197		}
198	} else
199#endif
200		td->td_critnest--;
201
202	CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
203	    (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
204}
205
206/*
207 * This function is called when a thread is about to be put on run queue
208 * because it has been made runnable or its priority has been adjusted.  It
209 * determines if the new thread should be immediately preempted to.  If so,
210 * it switches to it and eventually returns true.  If not, it returns false
211 * so that the caller may place the thread on an appropriate run queue.
212 */
213int
214maybe_preempt(struct thread *td)
215{
216#ifdef PREEMPTION
217	struct thread *ctd;
218	int cpri, pri;
219#endif
220
221#ifdef PREEMPTION
222	/*
223	 * The new thread should not preempt the current thread if any of the
224	 * following conditions are true:
225	 *
226	 *  - The kernel is in the throes of crashing (panicstr).
227	 *  - The current thread has a higher (numerically lower) or
228	 *    equivalent priority.  Note that this prevents curthread from
229	 *    trying to preempt to itself.
230	 *  - It is too early in the boot for context switches (cold is set).
231	 *  - The current thread has an inhibitor set or is in the process of
232	 *    exiting.  In this case, the current thread is about to switch
233	 *    out anyways, so there's no point in preempting.  If we did,
234	 *    the current thread would not be properly resumed as well, so
235	 *    just avoid that whole landmine.
236	 *  - If the new thread's priority is not a realtime priority and
237	 *    the current thread's priority is not an idle priority and
238	 *    FULL_PREEMPTION is disabled.
239	 *
240	 * If all of these conditions are false, but the current thread is in
241	 * a nested critical section, then we have to defer the preemption
242	 * until we exit the critical section.  Otherwise, switch immediately
243	 * to the new thread.
244	 */
245	ctd = curthread;
246	THREAD_LOCK_ASSERT(td, MA_OWNED);
247	KASSERT ((ctd->td_sched != NULL && ctd->td_sched->ts_thread == ctd),
248	  ("thread has no (or wrong) sched-private part."));
249	KASSERT((td->td_inhibitors == 0),
250			("maybe_preempt: trying to run inhibited thread"));
251	pri = td->td_priority;
252	cpri = ctd->td_priority;
253	if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
254	    TD_IS_INHIBITED(ctd))
255		return (0);
256#ifndef FULL_PREEMPTION
257	if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
258		return (0);
259#endif
260
261	if (ctd->td_critnest > 1) {
262		CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
263		    ctd->td_critnest);
264		ctd->td_owepreempt = 1;
265		return (0);
266	}
267	/*
268	 * Thread is runnable but not yet put on system run queue.
269	 */
270	MPASS(ctd->td_lock == &sched_lock);
271	MPASS(td->td_lock == &sched_lock);
272	MPASS(TD_ON_RUNQ(td));
273	TD_SET_RUNNING(td);
274	CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
275	    td->td_proc->p_pid, td->td_proc->p_comm);
276	SCHED_STAT_INC(switch_preempt);
277	mi_switch(SW_INVOL|SW_PREEMPT, td);
278	/*
279	 * td's lock pointer may have changed.  We have to return with it
280	 * locked.
281	 */
282	spinlock_enter();
283	thread_unlock(ctd);
284	thread_lock(td);
285	spinlock_exit();
286	return (1);
287#else
288	return (0);
289#endif
290}
291
292#if 0
293#ifndef PREEMPTION
294/* XXX: There should be a non-static version of this. */
295static void
296printf_caddr_t(void *data)
297{
298	printf("%s", (char *)data);
299}
300static char preempt_warning[] =
301    "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
302SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
303    preempt_warning)
304#endif
305#endif
306
307/************************************************************************
308 * SYSTEM RUN QUEUE manipulations and tests				*
309 ************************************************************************/
310/*
311 * Initialize a run structure.
312 */
313void
314runq_init(struct runq *rq)
315{
316	int i;
317
318	bzero(rq, sizeof *rq);
319	for (i = 0; i < RQ_NQS; i++)
320		TAILQ_INIT(&rq->rq_queues[i]);
321}
322
323/*
324 * Clear the status bit of the queue corresponding to priority level pri,
325 * indicating that it is empty.
326 */
327static __inline void
328runq_clrbit(struct runq *rq, int pri)
329{
330	struct rqbits *rqb;
331
332	rqb = &rq->rq_status;
333	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
334	    rqb->rqb_bits[RQB_WORD(pri)],
335	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
336	    RQB_BIT(pri), RQB_WORD(pri));
337	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
338}
339
340/*
341 * Find the index of the first non-empty run queue.  This is done by
342 * scanning the status bits, a set bit indicates a non-empty queue.
343 */
344static __inline int
345runq_findbit(struct runq *rq)
346{
347	struct rqbits *rqb;
348	int pri;
349	int i;
350
351	rqb = &rq->rq_status;
352	for (i = 0; i < RQB_LEN; i++)
353		if (rqb->rqb_bits[i]) {
354			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
355			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
356			    rqb->rqb_bits[i], i, pri);
357			return (pri);
358		}
359
360	return (-1);
361}
362
363static __inline int
364runq_findbit_from(struct runq *rq, u_char start)
365{
366	struct rqbits *rqb;
367	int bit;
368	int pri;
369	int i;
370
371	rqb = &rq->rq_status;
372	bit = start & (RQB_BPW -1);
373	pri = 0;
374	CTR1(KTR_RUNQ, "runq_findbit_from: start %d", start);
375again:
376	for (i = RQB_WORD(start); i < RQB_LEN; i++) {
377		CTR3(KTR_RUNQ, "runq_findbit_from: bits %d = %#x bit = %d",
378		    i, rqb->rqb_bits[i], bit);
379		if (rqb->rqb_bits[i]) {
380			if (bit != 0) {
381				for (pri = bit; pri < RQB_BPW; pri++)
382					if (rqb->rqb_bits[i] & (1ul << pri))
383						break;
384				bit = 0;
385				if (pri >= RQB_BPW)
386					continue;
387			} else
388				pri = RQB_FFS(rqb->rqb_bits[i]);
389			pri += (i << RQB_L2BPW);
390			CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
391			    rqb->rqb_bits[i], i, pri);
392			return (pri);
393		}
394		bit = 0;
395	}
396	if (start != 0) {
397		CTR0(KTR_RUNQ, "runq_findbit_from: restarting");
398		start = 0;
399		goto again;
400	}
401
402	return (-1);
403}
404
405/*
406 * Set the status bit of the queue corresponding to priority level pri,
407 * indicating that it is non-empty.
408 */
409static __inline void
410runq_setbit(struct runq *rq, int pri)
411{
412	struct rqbits *rqb;
413
414	rqb = &rq->rq_status;
415	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
416	    rqb->rqb_bits[RQB_WORD(pri)],
417	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
418	    RQB_BIT(pri), RQB_WORD(pri));
419	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
420}
421
422/*
423 * Add the thread to the queue specified by its priority, and set the
424 * corresponding status bit.
425 */
426void
427runq_add(struct runq *rq, struct td_sched *ts, int flags)
428{
429	struct rqhead *rqh;
430	int pri;
431
432	pri = ts->ts_thread->td_priority / RQ_PPQ;
433	ts->ts_rqindex = pri;
434	runq_setbit(rq, pri);
435	rqh = &rq->rq_queues[pri];
436	CTR5(KTR_RUNQ, "runq_add: td=%p ts=%p pri=%d %d rqh=%p",
437	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
438	if (flags & SRQ_PREEMPTED) {
439		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
440	} else {
441		TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
442	}
443}
444
445void
446runq_add_pri(struct runq *rq, struct td_sched *ts, u_char pri, int flags)
447{
448	struct rqhead *rqh;
449
450	KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
451	ts->ts_rqindex = pri;
452	runq_setbit(rq, pri);
453	rqh = &rq->rq_queues[pri];
454	CTR5(KTR_RUNQ, "runq_add_pri: td=%p ke=%p pri=%d idx=%d rqh=%p",
455	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
456	if (flags & SRQ_PREEMPTED) {
457		TAILQ_INSERT_HEAD(rqh, ts, ts_procq);
458	} else {
459		TAILQ_INSERT_TAIL(rqh, ts, ts_procq);
460	}
461}
462/*
463 * Return true if there are runnable processes of any priority on the run
464 * queue, false otherwise.  Has no side effects, does not modify the run
465 * queue structure.
466 */
467int
468runq_check(struct runq *rq)
469{
470	struct rqbits *rqb;
471	int i;
472
473	rqb = &rq->rq_status;
474	for (i = 0; i < RQB_LEN; i++)
475		if (rqb->rqb_bits[i]) {
476			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
477			    rqb->rqb_bits[i], i);
478			return (1);
479		}
480	CTR0(KTR_RUNQ, "runq_check: empty");
481
482	return (0);
483}
484
485#if defined(SMP) && defined(SCHED_4BSD)
486int runq_fuzz = 1;
487SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
488#endif
489
490/*
491 * Find the highest priority process on the run queue.
492 */
493struct td_sched *
494runq_choose(struct runq *rq)
495{
496	struct rqhead *rqh;
497	struct td_sched *ts;
498	int pri;
499
500	while ((pri = runq_findbit(rq)) != -1) {
501		rqh = &rq->rq_queues[pri];
502#if defined(SMP) && defined(SCHED_4BSD)
503		/* fuzz == 1 is normal.. 0 or less are ignored */
504		if (runq_fuzz > 1) {
505			/*
506			 * In the first couple of entries, check if
507			 * there is one for our CPU as a preference.
508			 */
509			int count = runq_fuzz;
510			int cpu = PCPU_GET(cpuid);
511			struct td_sched *ts2;
512			ts2 = ts = TAILQ_FIRST(rqh);
513
514			while (count-- && ts2) {
515				if (ts->ts_thread->td_lastcpu == cpu) {
516					ts = ts2;
517					break;
518				}
519				ts2 = TAILQ_NEXT(ts2, ts_procq);
520			}
521		} else
522#endif
523			ts = TAILQ_FIRST(rqh);
524		KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
525		CTR3(KTR_RUNQ,
526		    "runq_choose: pri=%d td_sched=%p rqh=%p", pri, ts, rqh);
527		return (ts);
528	}
529	CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
530
531	return (NULL);
532}
533
534struct td_sched *
535runq_choose_from(struct runq *rq, u_char idx)
536{
537	struct rqhead *rqh;
538	struct td_sched *ts;
539	int pri;
540
541	if ((pri = runq_findbit_from(rq, idx)) != -1) {
542		rqh = &rq->rq_queues[pri];
543		ts = TAILQ_FIRST(rqh);
544		KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
545		CTR4(KTR_RUNQ,
546		    "runq_choose_from: pri=%d kse=%p idx=%d rqh=%p",
547		    pri, ts, ts->ts_rqindex, rqh);
548		return (ts);
549	}
550	CTR1(KTR_RUNQ, "runq_choose_from: idleproc pri=%d", pri);
551
552	return (NULL);
553}
554/*
555 * Remove the thread from the queue specified by its priority, and clear the
556 * corresponding status bit if the queue becomes empty.
557 * Caller must set state afterwards.
558 */
559void
560runq_remove(struct runq *rq, struct td_sched *ts)
561{
562
563	runq_remove_idx(rq, ts, NULL);
564}
565
566void
567runq_remove_idx(struct runq *rq, struct td_sched *ts, u_char *idx)
568{
569	struct rqhead *rqh;
570	u_char pri;
571
572	KASSERT(ts->ts_thread->td_proc->p_sflag & PS_INMEM,
573		("runq_remove_idx: process swapped out"));
574	pri = ts->ts_rqindex;
575	KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
576	rqh = &rq->rq_queues[pri];
577	CTR5(KTR_RUNQ, "runq_remove_idx: td=%p, ts=%p pri=%d %d rqh=%p",
578	    ts->ts_thread, ts, ts->ts_thread->td_priority, pri, rqh);
579	{
580		struct td_sched *nts;
581
582		TAILQ_FOREACH(nts, rqh, ts_procq)
583			if (nts == ts)
584				break;
585		if (ts != nts)
586			panic("runq_remove_idx: ts %p not on rqindex %d",
587			    ts, pri);
588	}
589	TAILQ_REMOVE(rqh, ts, ts_procq);
590	if (TAILQ_EMPTY(rqh)) {
591		CTR0(KTR_RUNQ, "runq_remove_idx: empty");
592		runq_clrbit(rq, pri);
593		if (idx != NULL && *idx == pri)
594			*idx = (pri + 1) % RQ_NQS;
595	}
596}
597
598/****** functions that are temporarily here ***********/
599#include <vm/uma.h>
600extern struct mtx kse_zombie_lock;
601
602/*
603 *  Allocate scheduler specific per-process resources.
604 * The thread and proc have already been linked in.
605 *
606 * Called from:
607 *  proc_init() (UMA init method)
608 */
609void
610sched_newproc(struct proc *p, struct thread *td)
611{
612}
613
614/*
615 * thread is being either created or recycled.
616 * Fix up the per-scheduler resources associated with it.
617 * Called from:
618 *  sched_fork_thread()
619 *  thread_dtor()  (*may go away)
620 *  thread_init()  (*may go away)
621 */
622void
623sched_newthread(struct thread *td)
624{
625	struct td_sched *ts;
626
627	ts = (struct td_sched *) (td + 1);
628	bzero(ts, sizeof(*ts));
629	td->td_sched     = ts;
630	ts->ts_thread	= td;
631}
632
633/*
634 * Called from:
635 *  thr_create()
636 *  proc_init() (UMA) via sched_newproc()
637 */
638void
639sched_init_concurrency(struct proc *p)
640{
641}
642
643/*
644 * Change the concurrency of an existing proc to N
645 * Called from:
646 *  kse_create()
647 *  kse_exit()
648 *  thread_exit()
649 *  thread_single()
650 */
651void
652sched_set_concurrency(struct proc *p, int concurrency)
653{
654}
655
656#endif /* KERN_SWITCH_INCLUDE */
657