kern_synch.c revision 181921
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_synch.c 181921 2008-08-20 12:20:22Z ed $");
39
40#include "opt_ktrace.h"
41#include "opt_sched.h"
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/condvar.h>
46#include <sys/kdb.h>
47#include <sys/kernel.h>
48#include <sys/ktr.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/proc.h>
52#include <sys/resourcevar.h>
53#include <sys/sched.h>
54#include <sys/signalvar.h>
55#include <sys/sleepqueue.h>
56#include <sys/smp.h>
57#include <sys/sx.h>
58#include <sys/sysctl.h>
59#include <sys/sysproto.h>
60#include <sys/vmmeter.h>
61#ifdef KTRACE
62#include <sys/uio.h>
63#include <sys/ktrace.h>
64#endif
65
66#include <machine/cpu.h>
67
68static void synch_setup(void *dummy);
69SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
70    NULL);
71
72int	hogticks;
73static int pause_wchan;
74
75static struct callout loadav_callout;
76
77struct loadavg averunnable =
78	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
79/*
80 * Constants for averages over 1, 5, and 15 minutes
81 * when sampling at 5 second intervals.
82 */
83static fixpt_t cexp[3] = {
84	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
85	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
86	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
87};
88
89/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
90static int      fscale __unused = FSCALE;
91SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
92
93static void	loadav(void *arg);
94
95void
96sleepinit(void)
97{
98
99	hogticks = (hz / 10) * 2;	/* Default only. */
100	init_sleepqueues();
101}
102
103/*
104 * General sleep call.  Suspends the current thread until a wakeup is
105 * performed on the specified identifier.  The thread will then be made
106 * runnable with the specified priority.  Sleeps at most timo/hz seconds
107 * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
108 * before and after sleeping, else signals are not checked.  Returns 0 if
109 * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
110 * signal needs to be delivered, ERESTART is returned if the current system
111 * call should be restarted if possible, and EINTR is returned if the system
112 * call should be interrupted by the signal (return EINTR).
113 *
114 * The lock argument is unlocked before the caller is suspended, and
115 * re-locked before _sleep() returns.  If priority includes the PDROP
116 * flag the lock is not re-locked before returning.
117 */
118int
119_sleep(void *ident, struct lock_object *lock, int priority,
120    const char *wmesg, int timo)
121{
122	struct thread *td;
123	struct proc *p;
124	struct lock_class *class;
125	int catch, flags, lock_state, pri, rval;
126	WITNESS_SAVE_DECL(lock_witness);
127
128	td = curthread;
129	p = td->td_proc;
130#ifdef KTRACE
131	if (KTRPOINT(td, KTR_CSW))
132		ktrcsw(1, 0);
133#endif
134	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
135	    "Sleeping on \"%s\"", wmesg);
136	KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL,
137	    ("sleeping without a lock"));
138	KASSERT(p != NULL, ("msleep1"));
139	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
140	if (priority & PDROP)
141		KASSERT(lock != NULL && lock != &Giant.lock_object,
142		    ("PDROP requires a non-Giant lock"));
143	if (lock != NULL)
144		class = LOCK_CLASS(lock);
145	else
146		class = NULL;
147
148	if (cold) {
149		/*
150		 * During autoconfiguration, just return;
151		 * don't run any other threads or panic below,
152		 * in case this is the idle thread and already asleep.
153		 * XXX: this used to do "s = splhigh(); splx(safepri);
154		 * splx(s);" to give interrupts a chance, but there is
155		 * no way to give interrupts a chance now.
156		 */
157		if (lock != NULL && priority & PDROP)
158			class->lc_unlock(lock);
159		return (0);
160	}
161	catch = priority & PCATCH;
162	pri = priority & PRIMASK;
163	rval = 0;
164
165	/*
166	 * If we are already on a sleep queue, then remove us from that
167	 * sleep queue first.  We have to do this to handle recursive
168	 * sleeps.
169	 */
170	if (TD_ON_SLEEPQ(td))
171		sleepq_remove(td, td->td_wchan);
172
173	if (ident == &pause_wchan)
174		flags = SLEEPQ_PAUSE;
175	else
176		flags = SLEEPQ_SLEEP;
177	if (catch)
178		flags |= SLEEPQ_INTERRUPTIBLE;
179
180	sleepq_lock(ident);
181	CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)",
182	    td->td_tid, p->p_pid, td->td_name, wmesg, ident);
183
184	DROP_GIANT();
185	if (lock != NULL && lock != &Giant.lock_object &&
186	    !(class->lc_flags & LC_SLEEPABLE)) {
187		WITNESS_SAVE(lock, lock_witness);
188		lock_state = class->lc_unlock(lock);
189	} else
190		/* GCC needs to follow the Yellow Brick Road */
191		lock_state = -1;
192
193	/*
194	 * We put ourselves on the sleep queue and start our timeout
195	 * before calling thread_suspend_check, as we could stop there,
196	 * and a wakeup or a SIGCONT (or both) could occur while we were
197	 * stopped without resuming us.  Thus, we must be ready for sleep
198	 * when cursig() is called.  If the wakeup happens while we're
199	 * stopped, then td will no longer be on a sleep queue upon
200	 * return from cursig().
201	 */
202	sleepq_add(ident, lock, wmesg, flags, 0);
203	if (timo)
204		sleepq_set_timeout(ident, timo);
205	if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
206		sleepq_release(ident);
207		WITNESS_SAVE(lock, lock_witness);
208		lock_state = class->lc_unlock(lock);
209		sleepq_lock(ident);
210	}
211	if (timo && catch)
212		rval = sleepq_timedwait_sig(ident, pri);
213	else if (timo)
214		rval = sleepq_timedwait(ident, pri);
215	else if (catch)
216		rval = sleepq_wait_sig(ident, pri);
217	else {
218		sleepq_wait(ident, pri);
219		rval = 0;
220	}
221#ifdef KTRACE
222	if (KTRPOINT(td, KTR_CSW))
223		ktrcsw(0, 0);
224#endif
225	PICKUP_GIANT();
226	if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) {
227		class->lc_lock(lock, lock_state);
228		WITNESS_RESTORE(lock, lock_witness);
229	}
230	return (rval);
231}
232
233int
234msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
235{
236	struct thread *td;
237	struct proc *p;
238	int rval;
239	WITNESS_SAVE_DECL(mtx);
240
241	td = curthread;
242	p = td->td_proc;
243	KASSERT(mtx != NULL, ("sleeping without a mutex"));
244	KASSERT(p != NULL, ("msleep1"));
245	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
246
247	if (cold) {
248		/*
249		 * During autoconfiguration, just return;
250		 * don't run any other threads or panic below,
251		 * in case this is the idle thread and already asleep.
252		 * XXX: this used to do "s = splhigh(); splx(safepri);
253		 * splx(s);" to give interrupts a chance, but there is
254		 * no way to give interrupts a chance now.
255		 */
256		return (0);
257	}
258
259	sleepq_lock(ident);
260	CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)",
261	    td->td_tid, p->p_pid, td->td_name, wmesg, ident);
262
263	DROP_GIANT();
264	mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
265	WITNESS_SAVE(&mtx->lock_object, mtx);
266	mtx_unlock_spin(mtx);
267
268	/*
269	 * We put ourselves on the sleep queue and start our timeout.
270	 */
271	sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
272	if (timo)
273		sleepq_set_timeout(ident, timo);
274
275	/*
276	 * Can't call ktrace with any spin locks held so it can lock the
277	 * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold
278	 * any spin lock.  Thus, we have to drop the sleepq spin lock while
279	 * we handle those requests.  This is safe since we have placed our
280	 * thread on the sleep queue already.
281	 */
282#ifdef KTRACE
283	if (KTRPOINT(td, KTR_CSW)) {
284		sleepq_release(ident);
285		ktrcsw(1, 0);
286		sleepq_lock(ident);
287	}
288#endif
289#ifdef WITNESS
290	sleepq_release(ident);
291	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"",
292	    wmesg);
293	sleepq_lock(ident);
294#endif
295	if (timo)
296		rval = sleepq_timedwait(ident, 0);
297	else {
298		sleepq_wait(ident, 0);
299		rval = 0;
300	}
301#ifdef KTRACE
302	if (KTRPOINT(td, KTR_CSW))
303		ktrcsw(0, 0);
304#endif
305	PICKUP_GIANT();
306	mtx_lock_spin(mtx);
307	WITNESS_RESTORE(&mtx->lock_object, mtx);
308	return (rval);
309}
310
311/*
312 * pause() is like tsleep() except that the intention is to not be
313 * explicitly woken up by another thread.  Instead, the current thread
314 * simply wishes to sleep until the timeout expires.  It is
315 * implemented using a dummy wait channel.
316 */
317int
318pause(const char *wmesg, int timo)
319{
320
321	KASSERT(timo != 0, ("pause: timeout required"));
322	return (tsleep(&pause_wchan, 0, wmesg, timo));
323}
324
325/*
326 * Make all threads sleeping on the specified identifier runnable.
327 */
328void
329wakeup(void *ident)
330{
331	int wakeup_swapper;
332
333	sleepq_lock(ident);
334	wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
335	sleepq_release(ident);
336	if (wakeup_swapper)
337		kick_proc0();
338}
339
340/*
341 * Make a thread sleeping on the specified identifier runnable.
342 * May wake more than one thread if a target thread is currently
343 * swapped out.
344 */
345void
346wakeup_one(void *ident)
347{
348	int wakeup_swapper;
349
350	sleepq_lock(ident);
351	wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0);
352	sleepq_release(ident);
353	if (wakeup_swapper)
354		kick_proc0();
355}
356
357static void
358kdb_switch(void)
359{
360	thread_unlock(curthread);
361	kdb_backtrace();
362	kdb_reenter();
363	panic("%s: did not reenter debugger", __func__);
364}
365
366/*
367 * The machine independent parts of context switching.
368 */
369void
370mi_switch(int flags, struct thread *newtd)
371{
372	uint64_t runtime, new_switchtime;
373	struct thread *td;
374	struct proc *p;
375
376	td = curthread;			/* XXX */
377	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
378	p = td->td_proc;		/* XXX */
379	KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
380#ifdef INVARIANTS
381	if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
382		mtx_assert(&Giant, MA_NOTOWNED);
383#endif
384	KASSERT(td->td_critnest == 1 || (td->td_critnest == 2 &&
385	    (td->td_owepreempt) && (flags & SW_INVOL) != 0 &&
386	    newtd == NULL) || panicstr,
387	    ("mi_switch: switch in a critical section"));
388	KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
389	    ("mi_switch: switch must be voluntary or involuntary"));
390	KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself"));
391
392	/*
393	 * Don't perform context switches from the debugger.
394	 */
395	if (kdb_active)
396		kdb_switch();
397	if (flags & SW_VOL)
398		td->td_ru.ru_nvcsw++;
399	else
400		td->td_ru.ru_nivcsw++;
401#ifdef SCHED_STATS
402	SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]);
403#endif
404	/*
405	 * Compute the amount of time during which the current
406	 * thread was running, and add that to its total so far.
407	 */
408	new_switchtime = cpu_ticks();
409	runtime = new_switchtime - PCPU_GET(switchtime);
410	td->td_runtime += runtime;
411	td->td_incruntime += runtime;
412	PCPU_SET(switchtime, new_switchtime);
413	td->td_generation++;	/* bump preempt-detect counter */
414	PCPU_INC(cnt.v_swtch);
415	PCPU_SET(switchticks, ticks);
416	CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
417	    td->td_tid, td->td_sched, p->p_pid, td->td_name);
418#if (KTR_COMPILE & KTR_SCHED) != 0
419	if (TD_IS_IDLETHREAD(td))
420		CTR3(KTR_SCHED, "mi_switch: %p(%s) prio %d idle",
421		    td, td->td_name, td->td_priority);
422	else if (newtd != NULL)
423		CTR5(KTR_SCHED,
424		    "mi_switch: %p(%s) prio %d preempted by %p(%s)",
425		    td, td->td_name, td->td_priority, newtd,
426		    newtd->td_name);
427	else
428		CTR6(KTR_SCHED,
429		    "mi_switch: %p(%s) prio %d inhibit %d wmesg %s lock %s",
430		    td, td->td_name, td->td_priority,
431		    td->td_inhibitors, td->td_wmesg, td->td_lockname);
432#endif
433	sched_switch(td, newtd, flags);
434	CTR3(KTR_SCHED, "mi_switch: running %p(%s) prio %d",
435	    td, td->td_name, td->td_priority);
436
437	CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
438	    td->td_tid, td->td_sched, p->p_pid, td->td_name);
439
440	/*
441	 * If the last thread was exiting, finish cleaning it up.
442	 */
443	if ((td = PCPU_GET(deadthread))) {
444		PCPU_SET(deadthread, NULL);
445		thread_stash(td);
446	}
447}
448
449/*
450 * Change thread state to be runnable, placing it on the run queue if
451 * it is in memory.  If it is swapped out, return true so our caller
452 * will know to awaken the swapper.
453 */
454int
455setrunnable(struct thread *td)
456{
457
458	THREAD_LOCK_ASSERT(td, MA_OWNED);
459	KASSERT(td->td_proc->p_state != PRS_ZOMBIE,
460	    ("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
461	switch (td->td_state) {
462	case TDS_RUNNING:
463	case TDS_RUNQ:
464		return (0);
465	case TDS_INHIBITED:
466		/*
467		 * If we are only inhibited because we are swapped out
468		 * then arange to swap in this process. Otherwise just return.
469		 */
470		if (td->td_inhibitors != TDI_SWAPPED)
471			return (0);
472		/* FALLTHROUGH */
473	case TDS_CAN_RUN:
474		break;
475	default:
476		printf("state is 0x%x", td->td_state);
477		panic("setrunnable(2)");
478	}
479	if ((td->td_flags & TDF_INMEM) == 0) {
480		if ((td->td_flags & TDF_SWAPINREQ) == 0) {
481			td->td_flags |= TDF_SWAPINREQ;
482			return (1);
483		}
484	} else
485		sched_wakeup(td);
486	return (0);
487}
488
489/*
490 * Compute a tenex style load average of a quantity on
491 * 1, 5 and 15 minute intervals.
492 */
493static void
494loadav(void *arg)
495{
496	int i, nrun;
497	struct loadavg *avg;
498
499	nrun = sched_load();
500	avg = &averunnable;
501
502	for (i = 0; i < 3; i++)
503		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
504		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
505
506	/*
507	 * Schedule the next update to occur after 5 seconds, but add a
508	 * random variation to avoid synchronisation with processes that
509	 * run at regular intervals.
510	 */
511	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
512	    loadav, NULL);
513}
514
515/* ARGSUSED */
516static void
517synch_setup(void *dummy)
518{
519	callout_init(&loadav_callout, CALLOUT_MPSAFE);
520
521	/* Kick off timeout driven events by calling first time. */
522	loadav(NULL);
523}
524
525/*
526 * General purpose yield system call.
527 */
528int
529yield(struct thread *td, struct yield_args *uap)
530{
531
532	thread_lock(td);
533	sched_prio(td, PRI_MAX_TIMESHARE);
534	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
535	thread_unlock(td);
536	td->td_retval[0] = 0;
537	return (0);
538}
539