kern_synch.c revision 248470
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_synch.c 248470 2013-03-18 17:23:58Z jhb $");
39
40#include "opt_kdtrace.h"
41#include "opt_ktrace.h"
42#include "opt_sched.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/condvar.h>
47#include <sys/kdb.h>
48#include <sys/kernel.h>
49#include <sys/ktr.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/resourcevar.h>
54#include <sys/sched.h>
55#include <sys/sdt.h>
56#include <sys/signalvar.h>
57#include <sys/sleepqueue.h>
58#include <sys/smp.h>
59#include <sys/sx.h>
60#include <sys/sysctl.h>
61#include <sys/sysproto.h>
62#include <sys/vmmeter.h>
63#ifdef KTRACE
64#include <sys/uio.h>
65#include <sys/ktrace.h>
66#endif
67
68#include <machine/cpu.h>
69
70#ifdef XEN
71#include <vm/vm.h>
72#include <vm/vm_param.h>
73#include <vm/pmap.h>
74#endif
75
76#define	KTDSTATE(td)							\
77	(((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep"  :		\
78	((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" :	\
79	((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" :		\
80	((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" :		\
81	((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding")
82
83static void synch_setup(void *dummy);
84SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
85    NULL);
86
87int	hogticks;
88static uint8_t pause_wchan[MAXCPU];
89
90static struct callout loadav_callout;
91
92struct loadavg averunnable =
93	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
94/*
95 * Constants for averages over 1, 5, and 15 minutes
96 * when sampling at 5 second intervals.
97 */
98static fixpt_t cexp[3] = {
99	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
100	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
101	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
102};
103
104/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
105static int      fscale __unused = FSCALE;
106SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
107
108static void	loadav(void *arg);
109
110SDT_PROVIDER_DECLARE(sched);
111SDT_PROBE_DEFINE(sched, , , preempt, preempt);
112
113/*
114 * These probes reference Solaris features that are not implemented in FreeBSD.
115 * Create the probes anyway for compatibility with existing D scripts; they'll
116 * just never fire.
117 */
118SDT_PROBE_DEFINE(sched, , , cpucaps_sleep, cpucaps-sleep);
119SDT_PROBE_DEFINE(sched, , , cpucaps_wakeup, cpucaps-wakeup);
120SDT_PROBE_DEFINE(sched, , , schedctl_nopreempt, schedctl-nopreempt);
121SDT_PROBE_DEFINE(sched, , , schedctl_preempt, schedctl-preempt);
122SDT_PROBE_DEFINE(sched, , , schedctl_yield, schedctl-yield);
123
124void
125sleepinit(void)
126{
127
128	hogticks = (hz / 10) * 2;	/* Default only. */
129	init_sleepqueues();
130}
131
132/*
133 * General sleep call.  Suspends the current thread until a wakeup is
134 * performed on the specified identifier.  The thread will then be made
135 * runnable with the specified priority.  Sleeps at most timo/hz seconds
136 * (0 means no timeout).  If pri includes the PCATCH flag, let signals
137 * interrupt the sleep, otherwise ignore them while sleeping.  Returns 0 if
138 * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
139 * signal becomes pending, ERESTART is returned if the current system
140 * call should be restarted if possible, and EINTR is returned if the system
141 * call should be interrupted by the signal (return EINTR).
142 *
143 * The lock argument is unlocked before the caller is suspended, and
144 * re-locked before _sleep() returns.  If priority includes the PDROP
145 * flag the lock is not re-locked before returning.
146 */
147int
148_sleep(void *ident, struct lock_object *lock, int priority,
149    const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
150{
151	struct thread *td;
152	struct proc *p;
153	struct lock_class *class;
154	int catch, lock_state, pri, rval, sleepq_flags;
155	WITNESS_SAVE_DECL(lock_witness);
156
157	td = curthread;
158	p = td->td_proc;
159#ifdef KTRACE
160	if (KTRPOINT(td, KTR_CSW))
161		ktrcsw(1, 0, wmesg);
162#endif
163	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
164	    "Sleeping on \"%s\"", wmesg);
165	KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL,
166	    ("sleeping without a lock"));
167	KASSERT(p != NULL, ("msleep1"));
168	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
169	if (priority & PDROP)
170		KASSERT(lock != NULL && lock != &Giant.lock_object,
171		    ("PDROP requires a non-Giant lock"));
172	if (lock != NULL)
173		class = LOCK_CLASS(lock);
174	else
175		class = NULL;
176
177	if (cold || SCHEDULER_STOPPED()) {
178		/*
179		 * During autoconfiguration, just return;
180		 * don't run any other threads or panic below,
181		 * in case this is the idle thread and already asleep.
182		 * XXX: this used to do "s = splhigh(); splx(safepri);
183		 * splx(s);" to give interrupts a chance, but there is
184		 * no way to give interrupts a chance now.
185		 */
186		if (lock != NULL && priority & PDROP)
187			class->lc_unlock(lock);
188		return (0);
189	}
190	catch = priority & PCATCH;
191	pri = priority & PRIMASK;
192
193	/*
194	 * If we are already on a sleep queue, then remove us from that
195	 * sleep queue first.  We have to do this to handle recursive
196	 * sleeps.
197	 */
198	if (TD_ON_SLEEPQ(td))
199		sleepq_remove(td, td->td_wchan);
200
201	if ((uint8_t *)ident >= &pause_wchan[0] &&
202	    (uint8_t *)ident <= &pause_wchan[MAXCPU - 1])
203		sleepq_flags = SLEEPQ_PAUSE;
204	else
205		sleepq_flags = SLEEPQ_SLEEP;
206	if (catch)
207		sleepq_flags |= SLEEPQ_INTERRUPTIBLE;
208
209	sleepq_lock(ident);
210	CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)",
211	    td->td_tid, p->p_pid, td->td_name, wmesg, ident);
212
213	if (lock == &Giant.lock_object)
214		mtx_assert(&Giant, MA_OWNED);
215	DROP_GIANT();
216	if (lock != NULL && lock != &Giant.lock_object &&
217	    !(class->lc_flags & LC_SLEEPABLE)) {
218		WITNESS_SAVE(lock, lock_witness);
219		lock_state = class->lc_unlock(lock);
220	} else
221		/* GCC needs to follow the Yellow Brick Road */
222		lock_state = -1;
223
224	/*
225	 * We put ourselves on the sleep queue and start our timeout
226	 * before calling thread_suspend_check, as we could stop there,
227	 * and a wakeup or a SIGCONT (or both) could occur while we were
228	 * stopped without resuming us.  Thus, we must be ready for sleep
229	 * when cursig() is called.  If the wakeup happens while we're
230	 * stopped, then td will no longer be on a sleep queue upon
231	 * return from cursig().
232	 */
233	sleepq_add(ident, lock, wmesg, sleepq_flags, 0);
234	if (sbt != 0)
235		sleepq_set_timeout_sbt(ident, sbt, pr, flags);
236	if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
237		sleepq_release(ident);
238		WITNESS_SAVE(lock, lock_witness);
239		lock_state = class->lc_unlock(lock);
240		sleepq_lock(ident);
241	}
242	if (sbt != 0 && catch)
243		rval = sleepq_timedwait_sig(ident, pri);
244	else if (sbt != 0)
245		rval = sleepq_timedwait(ident, pri);
246	else if (catch)
247		rval = sleepq_wait_sig(ident, pri);
248	else {
249		sleepq_wait(ident, pri);
250		rval = 0;
251	}
252#ifdef KTRACE
253	if (KTRPOINT(td, KTR_CSW))
254		ktrcsw(0, 0, wmesg);
255#endif
256	PICKUP_GIANT();
257	if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) {
258		class->lc_lock(lock, lock_state);
259		WITNESS_RESTORE(lock, lock_witness);
260	}
261	return (rval);
262}
263
264int
265msleep_spin_sbt(void *ident, struct mtx *mtx, const char *wmesg,
266    sbintime_t sbt, sbintime_t pr, int flags)
267{
268	struct thread *td;
269	struct proc *p;
270	int rval;
271	WITNESS_SAVE_DECL(mtx);
272
273	td = curthread;
274	p = td->td_proc;
275	KASSERT(mtx != NULL, ("sleeping without a mutex"));
276	KASSERT(p != NULL, ("msleep1"));
277	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
278
279	if (cold || SCHEDULER_STOPPED()) {
280		/*
281		 * During autoconfiguration, just return;
282		 * don't run any other threads or panic below,
283		 * in case this is the idle thread and already asleep.
284		 * XXX: this used to do "s = splhigh(); splx(safepri);
285		 * splx(s);" to give interrupts a chance, but there is
286		 * no way to give interrupts a chance now.
287		 */
288		return (0);
289	}
290
291	sleepq_lock(ident);
292	CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)",
293	    td->td_tid, p->p_pid, td->td_name, wmesg, ident);
294
295	DROP_GIANT();
296	mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
297	WITNESS_SAVE(&mtx->lock_object, mtx);
298	mtx_unlock_spin(mtx);
299
300	/*
301	 * We put ourselves on the sleep queue and start our timeout.
302	 */
303	sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
304	if (sbt != 0)
305		sleepq_set_timeout_sbt(ident, sbt, pr, flags);
306
307	/*
308	 * Can't call ktrace with any spin locks held so it can lock the
309	 * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold
310	 * any spin lock.  Thus, we have to drop the sleepq spin lock while
311	 * we handle those requests.  This is safe since we have placed our
312	 * thread on the sleep queue already.
313	 */
314#ifdef KTRACE
315	if (KTRPOINT(td, KTR_CSW)) {
316		sleepq_release(ident);
317		ktrcsw(1, 0, wmesg);
318		sleepq_lock(ident);
319	}
320#endif
321#ifdef WITNESS
322	sleepq_release(ident);
323	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"",
324	    wmesg);
325	sleepq_lock(ident);
326#endif
327	if (sbt != 0)
328		rval = sleepq_timedwait(ident, 0);
329	else {
330		sleepq_wait(ident, 0);
331		rval = 0;
332	}
333#ifdef KTRACE
334	if (KTRPOINT(td, KTR_CSW))
335		ktrcsw(0, 0, wmesg);
336#endif
337	PICKUP_GIANT();
338	mtx_lock_spin(mtx);
339	WITNESS_RESTORE(&mtx->lock_object, mtx);
340	return (rval);
341}
342
343/*
344 * pause() delays the calling thread by the given number of system ticks.
345 * During cold bootup, pause() uses the DELAY() function instead of
346 * the tsleep() function to do the waiting. The "timo" argument must be
347 * greater than or equal to zero. A "timo" value of zero is equivalent
348 * to a "timo" value of one.
349 */
350int
351pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
352{
353	int sbt_sec;
354
355	sbt_sec = sbintime_getsec(sbt);
356	KASSERT(sbt_sec >= 0, ("pause: timo must be >= 0"));
357
358	/* silently convert invalid timeouts */
359	if (sbt == 0)
360		sbt = tick_sbt;
361
362	if (cold) {
363		/*
364		 * We delay one second at a time to avoid overflowing the
365		 * system specific DELAY() function(s):
366		 */
367		while (sbt_sec > 0) {
368			DELAY(1000000);
369			sbt_sec--;
370		}
371		DELAY((sbt & 0xffffffff) / SBT_1US);
372		return (0);
373	}
374	return (_sleep(&pause_wchan[curcpu], NULL, 0, wmesg, sbt, pr, flags));
375}
376
377/*
378 * Make all threads sleeping on the specified identifier runnable.
379 */
380void
381wakeup(void *ident)
382{
383	int wakeup_swapper;
384
385	sleepq_lock(ident);
386	wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
387	sleepq_release(ident);
388	if (wakeup_swapper) {
389		KASSERT(ident != &proc0,
390		    ("wakeup and wakeup_swapper and proc0"));
391		kick_proc0();
392	}
393}
394
395/*
396 * Make a thread sleeping on the specified identifier runnable.
397 * May wake more than one thread if a target thread is currently
398 * swapped out.
399 */
400void
401wakeup_one(void *ident)
402{
403	int wakeup_swapper;
404
405	sleepq_lock(ident);
406	wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0);
407	sleepq_release(ident);
408	if (wakeup_swapper)
409		kick_proc0();
410}
411
412static void
413kdb_switch(void)
414{
415	thread_unlock(curthread);
416	kdb_backtrace();
417	kdb_reenter();
418	panic("%s: did not reenter debugger", __func__);
419}
420
421/*
422 * The machine independent parts of context switching.
423 */
424void
425mi_switch(int flags, struct thread *newtd)
426{
427	uint64_t runtime, new_switchtime;
428	struct thread *td;
429	struct proc *p;
430
431	td = curthread;			/* XXX */
432	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
433	p = td->td_proc;		/* XXX */
434	KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
435#ifdef INVARIANTS
436	if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
437		mtx_assert(&Giant, MA_NOTOWNED);
438#endif
439	KASSERT(td->td_critnest == 1 || panicstr,
440	    ("mi_switch: switch in a critical section"));
441	KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
442	    ("mi_switch: switch must be voluntary or involuntary"));
443	KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself"));
444
445	/*
446	 * Don't perform context switches from the debugger.
447	 */
448	if (kdb_active)
449		kdb_switch();
450	if (SCHEDULER_STOPPED())
451		return;
452	if (flags & SW_VOL) {
453		td->td_ru.ru_nvcsw++;
454		td->td_swvoltick = ticks;
455	} else
456		td->td_ru.ru_nivcsw++;
457#ifdef SCHED_STATS
458	SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]);
459#endif
460	/*
461	 * Compute the amount of time during which the current
462	 * thread was running, and add that to its total so far.
463	 */
464	new_switchtime = cpu_ticks();
465	runtime = new_switchtime - PCPU_GET(switchtime);
466	td->td_runtime += runtime;
467	td->td_incruntime += runtime;
468	PCPU_SET(switchtime, new_switchtime);
469	td->td_generation++;	/* bump preempt-detect counter */
470	PCPU_INC(cnt.v_swtch);
471	PCPU_SET(switchticks, ticks);
472	CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
473	    td->td_tid, td->td_sched, p->p_pid, td->td_name);
474#if (KTR_COMPILE & KTR_SCHED) != 0
475	if (TD_IS_IDLETHREAD(td))
476		KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
477		    "prio:%d", td->td_priority);
478	else
479		KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
480		    "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
481		    "lockname:\"%s\"", td->td_lockname);
482#endif
483	SDT_PROBE0(sched, , , preempt);
484#ifdef XEN
485	PT_UPDATES_FLUSH();
486#endif
487	sched_switch(td, newtd, flags);
488	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
489	    "prio:%d", td->td_priority);
490
491	CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
492	    td->td_tid, td->td_sched, p->p_pid, td->td_name);
493
494	/*
495	 * If the last thread was exiting, finish cleaning it up.
496	 */
497	if ((td = PCPU_GET(deadthread))) {
498		PCPU_SET(deadthread, NULL);
499		thread_stash(td);
500	}
501}
502
503/*
504 * Change thread state to be runnable, placing it on the run queue if
505 * it is in memory.  If it is swapped out, return true so our caller
506 * will know to awaken the swapper.
507 */
508int
509setrunnable(struct thread *td)
510{
511
512	THREAD_LOCK_ASSERT(td, MA_OWNED);
513	KASSERT(td->td_proc->p_state != PRS_ZOMBIE,
514	    ("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
515	switch (td->td_state) {
516	case TDS_RUNNING:
517	case TDS_RUNQ:
518		return (0);
519	case TDS_INHIBITED:
520		/*
521		 * If we are only inhibited because we are swapped out
522		 * then arange to swap in this process. Otherwise just return.
523		 */
524		if (td->td_inhibitors != TDI_SWAPPED)
525			return (0);
526		/* FALLTHROUGH */
527	case TDS_CAN_RUN:
528		break;
529	default:
530		printf("state is 0x%x", td->td_state);
531		panic("setrunnable(2)");
532	}
533	if ((td->td_flags & TDF_INMEM) == 0) {
534		if ((td->td_flags & TDF_SWAPINREQ) == 0) {
535			td->td_flags |= TDF_SWAPINREQ;
536			return (1);
537		}
538	} else
539		sched_wakeup(td);
540	return (0);
541}
542
543/*
544 * Compute a tenex style load average of a quantity on
545 * 1, 5 and 15 minute intervals.
546 */
547static void
548loadav(void *arg)
549{
550	int i, nrun;
551	struct loadavg *avg;
552
553	nrun = sched_load();
554	avg = &averunnable;
555
556	for (i = 0; i < 3; i++)
557		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
558		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
559
560	/*
561	 * Schedule the next update to occur after 5 seconds, but add a
562	 * random variation to avoid synchronisation with processes that
563	 * run at regular intervals.
564	 */
565	callout_reset_sbt(&loadav_callout,
566	    tick_sbt * (hz * 4 + (int)(random() % (hz * 2 + 1))), 0,
567	    loadav, NULL, C_DIRECT_EXEC | C_HARDCLOCK);
568}
569
570/* ARGSUSED */
571static void
572synch_setup(void *dummy)
573{
574	callout_init(&loadav_callout, CALLOUT_MPSAFE);
575
576	/* Kick off timeout driven events by calling first time. */
577	loadav(NULL);
578}
579
580int
581should_yield(void)
582{
583
584	return (ticks - curthread->td_swvoltick >= hogticks);
585}
586
587void
588maybe_yield(void)
589{
590
591	if (should_yield())
592		kern_yield(PRI_USER);
593}
594
595void
596kern_yield(int prio)
597{
598	struct thread *td;
599
600	td = curthread;
601	DROP_GIANT();
602	thread_lock(td);
603	if (prio == PRI_USER)
604		prio = td->td_user_pri;
605	if (prio >= 0)
606		sched_prio(td, prio);
607	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
608	thread_unlock(td);
609	PICKUP_GIANT();
610}
611
612/*
613 * General purpose yield system call.
614 */
615int
616sys_yield(struct thread *td, struct yield_args *uap)
617{
618
619	thread_lock(td);
620	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
621		sched_prio(td, PRI_MAX_TIMESHARE);
622	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
623	thread_unlock(td);
624	td->td_retval[0] = 0;
625	return (0);
626}
627