kern_synch.c revision 165741
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_synch.c 165741 2007-01-03 02:38:41Z jeff $");
39
40#include "opt_ktrace.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/condvar.h>
45#include <sys/kdb.h>
46#include <sys/kernel.h>
47#include <sys/ktr.h>
48#include <sys/lock.h>
49#include <sys/mutex.h>
50#include <sys/proc.h>
51#include <sys/resourcevar.h>
52#include <sys/sched.h>
53#include <sys/signalvar.h>
54#include <sys/sleepqueue.h>
55#include <sys/smp.h>
56#include <sys/sx.h>
57#include <sys/sysctl.h>
58#include <sys/sysproto.h>
59#include <sys/vmmeter.h>
60#ifdef KTRACE
61#include <sys/uio.h>
62#include <sys/ktrace.h>
63#endif
64
65#include <machine/cpu.h>
66
67static void synch_setup(void *dummy);
68SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup, NULL)
69
70int	hogticks;
71int	lbolt;
72
73static struct callout loadav_callout;
74static struct callout lbolt_callout;
75
76struct loadavg averunnable =
77	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
78/*
79 * Constants for averages over 1, 5, and 15 minutes
80 * when sampling at 5 second intervals.
81 */
82static fixpt_t cexp[3] = {
83	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
84	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
85	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
86};
87
88/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
89static int      fscale __unused = FSCALE;
90SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
91
92static void	loadav(void *arg);
93static void	lboltcb(void *arg);
94
95void
96sleepinit(void)
97{
98
99	hogticks = (hz / 10) * 2;	/* Default only. */
100	init_sleepqueues();
101}
102
103/*
104 * General sleep call.  Suspends the current thread until a wakeup is
105 * performed on the specified identifier.  The thread will then be made
106 * runnable with the specified priority.  Sleeps at most timo/hz seconds
107 * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
108 * before and after sleeping, else signals are not checked.  Returns 0 if
109 * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
110 * signal needs to be delivered, ERESTART is returned if the current system
111 * call should be restarted if possible, and EINTR is returned if the system
112 * call should be interrupted by the signal (return EINTR).
113 *
114 * The mutex argument is unlocked before the caller is suspended, and
115 * re-locked before msleep returns.  If priority includes the PDROP
116 * flag the mutex is not re-locked before returning.
117 */
118int
119msleep(ident, mtx, priority, wmesg, timo)
120	void *ident;
121	struct mtx *mtx;
122	int priority, timo;
123	const char *wmesg;
124{
125	struct thread *td;
126	struct proc *p;
127	int catch, rval, flags, pri;
128	WITNESS_SAVE_DECL(mtx);
129
130	td = curthread;
131	p = td->td_proc;
132#ifdef KTRACE
133	if (KTRPOINT(td, KTR_CSW))
134		ktrcsw(1, 0);
135#endif
136	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, mtx == NULL ? NULL :
137	    &mtx->mtx_object, "Sleeping on \"%s\"", wmesg);
138	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL ||
139	    ident == &lbolt, ("sleeping without a mutex"));
140	KASSERT(p != NULL, ("msleep1"));
141	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
142
143	if (cold) {
144		/*
145		 * During autoconfiguration, just return;
146		 * don't run any other threads or panic below,
147		 * in case this is the idle thread and already asleep.
148		 * XXX: this used to do "s = splhigh(); splx(safepri);
149		 * splx(s);" to give interrupts a chance, but there is
150		 * no way to give interrupts a chance now.
151		 */
152		if (mtx != NULL && priority & PDROP)
153			mtx_unlock(mtx);
154		return (0);
155	}
156	catch = priority & PCATCH;
157	rval = 0;
158
159	/*
160	 * If we are already on a sleep queue, then remove us from that
161	 * sleep queue first.  We have to do this to handle recursive
162	 * sleeps.
163	 */
164	if (TD_ON_SLEEPQ(td))
165		sleepq_remove(td, td->td_wchan);
166
167	flags = SLEEPQ_MSLEEP;
168	if (catch)
169		flags |= SLEEPQ_INTERRUPTIBLE;
170
171	sleepq_lock(ident);
172	CTR5(KTR_PROC, "msleep: thread %p (pid %ld, %s) on %s (%p)",
173	    (void *)td, (long)p->p_pid, p->p_comm, wmesg, ident);
174
175	DROP_GIANT();
176	if (mtx != NULL) {
177		mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
178		WITNESS_SAVE(&mtx->mtx_object, mtx);
179		mtx_unlock(mtx);
180	}
181
182	/*
183	 * We put ourselves on the sleep queue and start our timeout
184	 * before calling thread_suspend_check, as we could stop there,
185	 * and a wakeup or a SIGCONT (or both) could occur while we were
186	 * stopped without resuming us.  Thus, we must be ready for sleep
187	 * when cursig() is called.  If the wakeup happens while we're
188	 * stopped, then td will no longer be on a sleep queue upon
189	 * return from cursig().
190	 */
191	sleepq_add(ident, ident == &lbolt ? NULL : &mtx->mtx_object, wmesg,
192	    flags, 0);
193	if (timo)
194		sleepq_set_timeout(ident, timo);
195
196	/*
197	 * Adjust this thread's priority, if necessary.
198	 */
199	pri = priority & PRIMASK;
200	if (pri != 0 && pri != td->td_priority) {
201		mtx_lock_spin(&sched_lock);
202		sched_prio(td, pri);
203		mtx_unlock_spin(&sched_lock);
204	}
205
206	if (timo && catch)
207		rval = sleepq_timedwait_sig(ident);
208	else if (timo)
209		rval = sleepq_timedwait(ident);
210	else if (catch)
211		rval = sleepq_wait_sig(ident);
212	else {
213		sleepq_wait(ident);
214		rval = 0;
215	}
216#ifdef KTRACE
217	if (KTRPOINT(td, KTR_CSW))
218		ktrcsw(0, 0);
219#endif
220	PICKUP_GIANT();
221	if (mtx != NULL && !(priority & PDROP)) {
222		mtx_lock(mtx);
223		WITNESS_RESTORE(&mtx->mtx_object, mtx);
224	}
225	return (rval);
226}
227
228int
229msleep_spin(ident, mtx, wmesg, timo)
230	void *ident;
231	struct mtx *mtx;
232	const char *wmesg;
233	int timo;
234{
235	struct thread *td;
236	struct proc *p;
237	int rval;
238	WITNESS_SAVE_DECL(mtx);
239
240	td = curthread;
241	p = td->td_proc;
242	KASSERT(mtx != NULL, ("sleeping without a mutex"));
243	KASSERT(p != NULL, ("msleep1"));
244	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
245
246	if (cold) {
247		/*
248		 * During autoconfiguration, just return;
249		 * don't run any other threads or panic below,
250		 * in case this is the idle thread and already asleep.
251		 * XXX: this used to do "s = splhigh(); splx(safepri);
252		 * splx(s);" to give interrupts a chance, but there is
253		 * no way to give interrupts a chance now.
254		 */
255		return (0);
256	}
257
258	sleepq_lock(ident);
259	CTR5(KTR_PROC, "msleep_spin: thread %p (pid %ld, %s) on %s (%p)",
260	    (void *)td, (long)p->p_pid, p->p_comm, wmesg, ident);
261
262	DROP_GIANT();
263	mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
264	WITNESS_SAVE(&mtx->mtx_object, mtx);
265	mtx_unlock_spin(mtx);
266
267	/*
268	 * We put ourselves on the sleep queue and start our timeout.
269	 */
270	sleepq_add(ident, &mtx->mtx_object, wmesg, SLEEPQ_MSLEEP, 0);
271	if (timo)
272		sleepq_set_timeout(ident, timo);
273
274	/*
275	 * Can't call ktrace with any spin locks held so it can lock the
276	 * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold
277	 * any spin lock.  Thus, we have to drop the sleepq spin lock while
278	 * we handle those requests.  This is safe since we have placed our
279	 * thread on the sleep queue already.
280	 */
281#ifdef KTRACE
282	if (KTRPOINT(td, KTR_CSW)) {
283		sleepq_release(ident);
284		ktrcsw(1, 0);
285		sleepq_lock(ident);
286	}
287#endif
288#ifdef WITNESS
289	sleepq_release(ident);
290	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"",
291	    wmesg);
292	sleepq_lock(ident);
293#endif
294	if (timo)
295		rval = sleepq_timedwait(ident);
296	else {
297		sleepq_wait(ident);
298		rval = 0;
299	}
300#ifdef KTRACE
301	if (KTRPOINT(td, KTR_CSW))
302		ktrcsw(0, 0);
303#endif
304	PICKUP_GIANT();
305	mtx_lock_spin(mtx);
306	WITNESS_RESTORE(&mtx->mtx_object, mtx);
307	return (rval);
308}
309
310/*
311 * Make all threads sleeping on the specified identifier runnable.
312 */
313void
314wakeup(ident)
315	register void *ident;
316{
317
318	sleepq_lock(ident);
319	sleepq_broadcast(ident, SLEEPQ_MSLEEP, -1, 0);
320}
321
322/*
323 * Make a thread sleeping on the specified identifier runnable.
324 * May wake more than one thread if a target thread is currently
325 * swapped out.
326 */
327void
328wakeup_one(ident)
329	register void *ident;
330{
331
332	sleepq_lock(ident);
333	sleepq_signal(ident, SLEEPQ_MSLEEP, -1, 0);
334}
335
336/*
337 * The machine independent parts of context switching.
338 */
339void
340mi_switch(int flags, struct thread *newtd)
341{
342	uint64_t new_switchtime;
343	struct thread *td;
344	struct proc *p;
345
346	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
347	td = curthread;			/* XXX */
348	p = td->td_proc;		/* XXX */
349	KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
350#ifdef INVARIANTS
351	if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
352		mtx_assert(&Giant, MA_NOTOWNED);
353#endif
354	KASSERT(td->td_critnest == 1 || (td->td_critnest == 2 &&
355	    (td->td_owepreempt) && (flags & SW_INVOL) != 0 &&
356	    newtd == NULL) || panicstr,
357	    ("mi_switch: switch in a critical section"));
358	KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
359	    ("mi_switch: switch must be voluntary or involuntary"));
360	KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself"));
361
362	/*
363	 * Don't perform context switches from the debugger.
364	 */
365	if (kdb_active) {
366		mtx_unlock_spin(&sched_lock);
367		kdb_backtrace();
368		kdb_reenter();
369		panic("%s: did not reenter debugger", __func__);
370	}
371
372	if (flags & SW_VOL)
373		p->p_stats->p_ru.ru_nvcsw++;
374	else
375		p->p_stats->p_ru.ru_nivcsw++;
376
377	/*
378	 * Compute the amount of time during which the current
379	 * process was running, and add that to its total so far.
380	 */
381	new_switchtime = cpu_ticks();
382	p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
383	p->p_rux.rux_uticks += td->td_uticks;
384	td->td_uticks = 0;
385	p->p_rux.rux_iticks += td->td_iticks;
386	td->td_iticks = 0;
387	p->p_rux.rux_sticks += td->td_sticks;
388	td->td_sticks = 0;
389
390	td->td_generation++;	/* bump preempt-detect counter */
391
392	/*
393	 * Check if the process exceeds its cpu resource allocation.  If
394	 * it reaches the max, arrange to kill the process in ast().
395	 */
396	if (p->p_cpulimit != RLIM_INFINITY &&
397	    p->p_rux.rux_runtime >= p->p_cpulimit * cpu_tickrate()) {
398		p->p_sflag |= PS_XCPU;
399		td->td_flags |= TDF_ASTPENDING;
400	}
401
402	/*
403	 * Finish up stats for outgoing thread.
404	 */
405	cnt.v_swtch++;
406	PCPU_SET(switchtime, new_switchtime);
407	PCPU_SET(switchticks, ticks);
408	CTR4(KTR_PROC, "mi_switch: old thread %p (kse %p, pid %ld, %s)",
409	    (void *)td, td->td_sched, (long)p->p_pid, p->p_comm);
410#if (KTR_COMPILE & KTR_SCHED) != 0
411	if (td == PCPU_GET(idlethread))
412		CTR3(KTR_SCHED, "mi_switch: %p(%s) prio %d idle",
413		    td, td->td_proc->p_comm, td->td_priority);
414	else if (newtd != NULL)
415		CTR5(KTR_SCHED,
416		    "mi_switch: %p(%s) prio %d preempted by %p(%s)",
417		    td, td->td_proc->p_comm, td->td_priority, newtd,
418		    newtd->td_proc->p_comm);
419	else
420		CTR6(KTR_SCHED,
421		    "mi_switch: %p(%s) prio %d inhibit %d wmesg %s lock %s",
422		    td, td->td_proc->p_comm, td->td_priority,
423		    td->td_inhibitors, td->td_wmesg, td->td_lockname);
424#endif
425	/*
426	 * We call thread_switchout after the KTR_SCHED prints above so kse
427	 * selecting a new thread to run does not show up as a preemption.
428	 */
429#ifdef KSE
430	if ((flags & SW_VOL) && (td->td_proc->p_flag & P_SA))
431		newtd = thread_switchout(td, flags, newtd);
432#endif
433	sched_switch(td, newtd, flags);
434	CTR3(KTR_SCHED, "mi_switch: running %p(%s) prio %d",
435	    td, td->td_proc->p_comm, td->td_priority);
436
437	CTR4(KTR_PROC, "mi_switch: new thread %p (kse %p, pid %ld, %s)",
438	    (void *)td, td->td_sched, (long)p->p_pid, p->p_comm);
439
440	/*
441	 * If the last thread was exiting, finish cleaning it up.
442	 */
443	if ((td = PCPU_GET(deadthread))) {
444		PCPU_SET(deadthread, NULL);
445		thread_stash(td);
446	}
447}
448
449/*
450 * Change process state to be runnable,
451 * placing it on the run queue if it is in memory,
452 * and awakening the swapper if it isn't in memory.
453 */
454void
455setrunnable(struct thread *td)
456{
457	struct proc *p;
458
459	p = td->td_proc;
460	mtx_assert(&sched_lock, MA_OWNED);
461	switch (p->p_state) {
462	case PRS_ZOMBIE:
463		panic("setrunnable(1)");
464	default:
465		break;
466	}
467	switch (td->td_state) {
468	case TDS_RUNNING:
469	case TDS_RUNQ:
470		return;
471	case TDS_INHIBITED:
472		/*
473		 * If we are only inhibited because we are swapped out
474		 * then arange to swap in this process. Otherwise just return.
475		 */
476		if (td->td_inhibitors != TDI_SWAPPED)
477			return;
478		/* XXX: intentional fall-through ? */
479	case TDS_CAN_RUN:
480		break;
481	default:
482		printf("state is 0x%x", td->td_state);
483		panic("setrunnable(2)");
484	}
485	if ((p->p_sflag & PS_INMEM) == 0) {
486		if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
487			p->p_sflag |= PS_SWAPINREQ;
488			/*
489			 * due to a LOR between sched_lock and
490			 * the sleepqueue chain locks, use
491			 * lower level scheduling functions.
492			 */
493			kick_proc0();
494		}
495	} else
496		sched_wakeup(td);
497}
498
499/*
500 * Compute a tenex style load average of a quantity on
501 * 1, 5 and 15 minute intervals.
502 * XXXKSE   Needs complete rewrite when correct info is available.
503 * Completely Bogus.. only works with 1:1 (but compiles ok now :-)
504 */
505static void
506loadav(void *arg)
507{
508	int i, nrun;
509	struct loadavg *avg;
510
511	nrun = sched_load();
512	avg = &averunnable;
513
514	for (i = 0; i < 3; i++)
515		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
516		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
517
518	/*
519	 * Schedule the next update to occur after 5 seconds, but add a
520	 * random variation to avoid synchronisation with processes that
521	 * run at regular intervals.
522	 */
523	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
524	    loadav, NULL);
525}
526
527static void
528lboltcb(void *arg)
529{
530	wakeup(&lbolt);
531	callout_reset(&lbolt_callout, hz, lboltcb, NULL);
532}
533
534/* ARGSUSED */
535static void
536synch_setup(dummy)
537	void *dummy;
538{
539	callout_init(&loadav_callout, CALLOUT_MPSAFE);
540	callout_init(&lbolt_callout, CALLOUT_MPSAFE);
541
542	/* Kick off timeout driven events by calling first time. */
543	loadav(NULL);
544	lboltcb(NULL);
545}
546
547/*
548 * General purpose yield system call
549 */
550int
551yield(struct thread *td, struct yield_args *uap)
552{
553	mtx_assert(&Giant, MA_NOTOWNED);
554	(void)uap;
555	sched_relinquish(td);
556	return (0);
557}
558