kern_switch.c revision 178961
1/*-
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/kern/kern_switch.c 178961 2008-05-12 06:42:06Z julian $");
30
31#include "opt_sched.h"
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/kdb.h>
36#include <sys/kernel.h>
37#include <sys/ktr.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/proc.h>
41#include <sys/queue.h>
42#include <sys/sched.h>
43#include <sys/smp.h>
44#include <sys/sysctl.h>
45
46#include <machine/cpu.h>
47
48/* Uncomment this to enable logging of critical_enter/exit. */
49#if 0
50#define	KTR_CRITICAL	KTR_SCHED
51#else
52#define	KTR_CRITICAL	0
53#endif
54
55#ifdef FULL_PREEMPTION
56#ifndef PREEMPTION
57#error "The FULL_PREEMPTION option requires the PREEMPTION option"
58#endif
59#endif
60
61CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
62
63/*
64 * kern.sched.preemption allows user space to determine if preemption support
65 * is compiled in or not.  It is not currently a boot or runtime flag that
66 * can be changed.
67 */
68#ifdef PREEMPTION
69static int kern_sched_preemption = 1;
70#else
71static int kern_sched_preemption = 0;
72#endif
73SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
74    &kern_sched_preemption, 0, "Kernel preemption enabled");
75
76/*
77 * Support for scheduler stats exported via kern.sched.stats.  All stats may
78 * be reset with kern.sched.stats.reset = 1.  Stats may be defined elsewhere
79 * with SCHED_STAT_DEFINE().
80 */
81#ifdef SCHED_STATS
82long sched_switch_stats[SWT_COUNT];	/* Switch reasons from mi_switch(). */
83
84SYSCTL_NODE(_kern_sched, OID_AUTO, stats, CTLFLAG_RW, 0, "switch stats");
85SCHED_STAT_DEFINE_VAR(uncategorized, &sched_switch_stats[SWT_NONE], "");
86SCHED_STAT_DEFINE_VAR(preempt, &sched_switch_stats[SWT_PREEMPT], "");
87SCHED_STAT_DEFINE_VAR(owepreempt, &sched_switch_stats[SWT_OWEPREEMPT], "");
88SCHED_STAT_DEFINE_VAR(turnstile, &sched_switch_stats[SWT_TURNSTILE], "");
89SCHED_STAT_DEFINE_VAR(sleepq, &sched_switch_stats[SWT_SLEEPQ], "");
90SCHED_STAT_DEFINE_VAR(sleepqtimo, &sched_switch_stats[SWT_SLEEPQTIMO], "");
91SCHED_STAT_DEFINE_VAR(relinquish, &sched_switch_stats[SWT_RELINQUISH], "");
92SCHED_STAT_DEFINE_VAR(needresched, &sched_switch_stats[SWT_NEEDRESCHED], "");
93SCHED_STAT_DEFINE_VAR(idle, &sched_switch_stats[SWT_IDLE], "");
94SCHED_STAT_DEFINE_VAR(iwait, &sched_switch_stats[SWT_IWAIT], "");
95SCHED_STAT_DEFINE_VAR(suspend, &sched_switch_stats[SWT_SUSPEND], "");
96SCHED_STAT_DEFINE_VAR(remotepreempt, &sched_switch_stats[SWT_REMOTEPREEMPT],
97    "");
98SCHED_STAT_DEFINE_VAR(remotewakeidle, &sched_switch_stats[SWT_REMOTEWAKEIDLE],
99    "");
100
101static int
102sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
103{
104	struct sysctl_oid *p;
105        int error;
106	int val;
107
108        val = 0;
109        error = sysctl_handle_int(oidp, &val, 0, req);
110        if (error != 0 || req->newptr == NULL)
111                return (error);
112        if (val == 0)
113                return (0);
114	/*
115	 * Traverse the list of children of _kern_sched_stats and reset each
116	 * to 0.  Skip the reset entry.
117	 */
118	SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
119		if (p == oidp || p->oid_arg1 == NULL)
120			continue;
121		*(long *)p->oid_arg1 = 0;
122	}
123	return (0);
124}
125
126SYSCTL_PROC(_kern_sched_stats, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_WR, NULL,
127    0, sysctl_stats_reset, "I", "Reset scheduler statistics");
128#endif
129
130/************************************************************************
131 * Functions that manipulate runnability from a thread perspective.	*
132 ************************************************************************/
133/*
134 * Select the thread that will be run next.
135 */
136struct thread *
137choosethread(void)
138{
139	struct thread *td;
140
141retry:
142	td = sched_choose();
143
144	/*
145	 * If we are in panic, only allow system threads,
146	 * plus the one we are running in, to be run.
147	 */
148	if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
149	    (td->td_flags & TDF_INPANIC) == 0)) {
150		/* note that it is no longer on the run queue */
151		TD_SET_CAN_RUN(td);
152		goto retry;
153	}
154
155	TD_SET_RUNNING(td);
156	return (td);
157}
158
159/*
160 * Kernel thread preemption implementation.  Critical sections mark
161 * regions of code in which preemptions are not allowed.
162 */
163void
164critical_enter(void)
165{
166	struct thread *td;
167
168	td = curthread;
169	td->td_critnest++;
170	CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
171	    (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
172}
173
174void
175critical_exit(void)
176{
177	struct thread *td;
178	int flags;
179
180	td = curthread;
181	KASSERT(td->td_critnest != 0,
182	    ("critical_exit: td_critnest == 0"));
183
184	if (td->td_critnest == 1) {
185		td->td_critnest = 0;
186		if (td->td_owepreempt) {
187			td->td_critnest = 1;
188			thread_lock(td);
189			td->td_critnest--;
190			flags = SW_INVOL | SW_PREEMPT;
191			if (TD_IS_IDLETHREAD(td))
192				flags |= SWT_IDLE;
193			else
194				flags |= SWT_OWEPREEMPT;
195			mi_switch(flags, NULL);
196			thread_unlock(td);
197		}
198	} else
199		td->td_critnest--;
200
201	CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
202	    (long)td->td_proc->p_pid, td->td_name, td->td_critnest);
203}
204
205/************************************************************************
206 * SYSTEM RUN QUEUE manipulations and tests				*
207 ************************************************************************/
208/*
209 * Initialize a run structure.
210 */
211void
212runq_init(struct runq *rq)
213{
214	int i;
215
216	bzero(rq, sizeof *rq);
217	for (i = 0; i < RQ_NQS; i++)
218		TAILQ_INIT(&rq->rq_queues[i]);
219}
220
221/*
222 * Clear the status bit of the queue corresponding to priority level pri,
223 * indicating that it is empty.
224 */
225static __inline void
226runq_clrbit(struct runq *rq, int pri)
227{
228	struct rqbits *rqb;
229
230	rqb = &rq->rq_status;
231	CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
232	    rqb->rqb_bits[RQB_WORD(pri)],
233	    rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
234	    RQB_BIT(pri), RQB_WORD(pri));
235	rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
236}
237
238/*
239 * Find the index of the first non-empty run queue.  This is done by
240 * scanning the status bits, a set bit indicates a non-empty queue.
241 */
242static __inline int
243runq_findbit(struct runq *rq)
244{
245	struct rqbits *rqb;
246	int pri;
247	int i;
248
249	rqb = &rq->rq_status;
250	for (i = 0; i < RQB_LEN; i++)
251		if (rqb->rqb_bits[i]) {
252			pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
253			CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
254			    rqb->rqb_bits[i], i, pri);
255			return (pri);
256		}
257
258	return (-1);
259}
260
261static __inline int
262runq_findbit_from(struct runq *rq, u_char pri)
263{
264	struct rqbits *rqb;
265	rqb_word_t mask;
266	int i;
267
268	/*
269	 * Set the mask for the first word so we ignore priorities before 'pri'.
270	 */
271	mask = (rqb_word_t)-1 << (pri & (RQB_BPW - 1));
272	rqb = &rq->rq_status;
273again:
274	for (i = RQB_WORD(pri); i < RQB_LEN; mask = -1, i++) {
275		mask = rqb->rqb_bits[i] & mask;
276		if (mask == 0)
277			continue;
278		pri = RQB_FFS(mask) + (i << RQB_L2BPW);
279		CTR3(KTR_RUNQ, "runq_findbit_from: bits=%#x i=%d pri=%d",
280		    mask, i, pri);
281		return (pri);
282	}
283	if (pri == 0)
284		return (-1);
285	/*
286	 * Wrap back around to the beginning of the list just once so we
287	 * scan the whole thing.
288	 */
289	pri = 0;
290	goto again;
291}
292
293/*
294 * Set the status bit of the queue corresponding to priority level pri,
295 * indicating that it is non-empty.
296 */
297static __inline void
298runq_setbit(struct runq *rq, int pri)
299{
300	struct rqbits *rqb;
301
302	rqb = &rq->rq_status;
303	CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
304	    rqb->rqb_bits[RQB_WORD(pri)],
305	    rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
306	    RQB_BIT(pri), RQB_WORD(pri));
307	rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
308}
309
310/*
311 * Add the thread to the queue specified by its priority, and set the
312 * corresponding status bit.
313 */
314void
315runq_add(struct runq *rq, struct thread *td, int flags)
316{
317	struct rqhead *rqh;
318	int pri;
319
320	pri = td->td_priority / RQ_PPQ;
321	td->td_rqindex = pri;
322	runq_setbit(rq, pri);
323	rqh = &rq->rq_queues[pri];
324	CTR4(KTR_RUNQ, "runq_add: td=%p pri=%d %d rqh=%p",
325	    td, td->td_priority, pri, rqh);
326	if (flags & SRQ_PREEMPTED) {
327		TAILQ_INSERT_HEAD(rqh, td, td_runq);
328	} else {
329		TAILQ_INSERT_TAIL(rqh, td, td_runq);
330	}
331}
332
333void
334runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
335{
336	struct rqhead *rqh;
337
338	KASSERT(pri < RQ_NQS, ("runq_add_pri: %d out of range", pri));
339	td->td_rqindex = pri;
340	runq_setbit(rq, pri);
341	rqh = &rq->rq_queues[pri];
342	CTR4(KTR_RUNQ, "runq_add_pri: td=%p pri=%d idx=%d rqh=%p",
343	    td, td->td_priority, pri, rqh);
344	if (flags & SRQ_PREEMPTED) {
345		TAILQ_INSERT_HEAD(rqh, td, td_runq);
346	} else {
347		TAILQ_INSERT_TAIL(rqh, td, td_runq);
348	}
349}
350/*
351 * Return true if there are runnable processes of any priority on the run
352 * queue, false otherwise.  Has no side effects, does not modify the run
353 * queue structure.
354 */
355int
356runq_check(struct runq *rq)
357{
358	struct rqbits *rqb;
359	int i;
360
361	rqb = &rq->rq_status;
362	for (i = 0; i < RQB_LEN; i++)
363		if (rqb->rqb_bits[i]) {
364			CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
365			    rqb->rqb_bits[i], i);
366			return (1);
367		}
368	CTR0(KTR_RUNQ, "runq_check: empty");
369
370	return (0);
371}
372
373/*
374 * Find the highest priority process on the run queue.
375 */
376struct thread *
377runq_choose_fuzz(struct runq *rq, int fuzz)
378{
379	struct rqhead *rqh;
380	struct thread *td;
381	int pri;
382
383	while ((pri = runq_findbit(rq)) != -1) {
384		rqh = &rq->rq_queues[pri];
385		/* fuzz == 1 is normal.. 0 or less are ignored */
386		if (fuzz > 1) {
387			/*
388			 * In the first couple of entries, check if
389			 * there is one for our CPU as a preference.
390			 */
391			int count = fuzz;
392			int cpu = PCPU_GET(cpuid);
393			struct thread *td2;
394			td2 = td = TAILQ_FIRST(rqh);
395
396			while (count-- && td2) {
397				if (td2->td_lastcpu == cpu) {
398					td = td2;
399					break;
400				}
401				td2 = TAILQ_NEXT(td2, td_runq);
402			}
403		} else
404			td = TAILQ_FIRST(rqh);
405		KASSERT(td != NULL, ("runq_choose_fuzz: no proc on busy queue"));
406		CTR3(KTR_RUNQ,
407		    "runq_choose_fuzz: pri=%d thread=%p rqh=%p", pri, td, rqh);
408		return (td);
409	}
410	CTR1(KTR_RUNQ, "runq_choose_fuzz: idleproc pri=%d", pri);
411
412	return (NULL);
413}
414
415/*
416 * Find the highest priority process on the run queue.
417 */
418struct thread *
419runq_choose(struct runq *rq)
420{
421	struct rqhead *rqh;
422	struct thread *td;
423	int pri;
424
425	while ((pri = runq_findbit(rq)) != -1) {
426		rqh = &rq->rq_queues[pri];
427		td = TAILQ_FIRST(rqh);
428		KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
429		CTR3(KTR_RUNQ,
430		    "runq_choose: pri=%d thread=%p rqh=%p", pri, td, rqh);
431		return (td);
432	}
433	CTR1(KTR_RUNQ, "runq_choose: idlethread pri=%d", pri);
434
435	return (NULL);
436}
437
438struct thread *
439runq_choose_from(struct runq *rq, u_char idx)
440{
441	struct rqhead *rqh;
442	struct thread *td;
443	int pri;
444
445	if ((pri = runq_findbit_from(rq, idx)) != -1) {
446		rqh = &rq->rq_queues[pri];
447		td = TAILQ_FIRST(rqh);
448		KASSERT(td != NULL, ("runq_choose: no thread on busy queue"));
449		CTR4(KTR_RUNQ,
450		    "runq_choose_from: pri=%d thread=%p idx=%d rqh=%p",
451		    pri, td, td->td_rqindex, rqh);
452		return (td);
453	}
454	CTR1(KTR_RUNQ, "runq_choose_from: idlethread pri=%d", pri);
455
456	return (NULL);
457}
458/*
459 * Remove the thread from the queue specified by its priority, and clear the
460 * corresponding status bit if the queue becomes empty.
461 * Caller must set state afterwards.
462 */
463void
464runq_remove(struct runq *rq, struct thread *td)
465{
466
467	runq_remove_idx(rq, td, NULL);
468}
469
470void
471runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
472{
473	struct rqhead *rqh;
474	u_char pri;
475
476	KASSERT(td->td_flags & TDF_INMEM,
477		("runq_remove_idx: thread swapped out"));
478	pri = td->td_rqindex;
479	KASSERT(pri < RQ_NQS, ("runq_remove_idx: Invalid index %d\n", pri));
480	rqh = &rq->rq_queues[pri];
481	CTR4(KTR_RUNQ, "runq_remove_idx: td=%p, pri=%d %d rqh=%p",
482	    td, td->td_priority, pri, rqh);
483	TAILQ_REMOVE(rqh, td, td_runq);
484	if (TAILQ_EMPTY(rqh)) {
485		CTR0(KTR_RUNQ, "runq_remove_idx: empty");
486		runq_clrbit(rq, pri);
487		if (idx != NULL && *idx == pri)
488			*idx = (pri + 1) % RQ_NQS;
489	}
490}
491