kern_synch.c revision 111883
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39 * $FreeBSD: head/sys/kern/kern_synch.c 111883 2003-03-04 21:03:05Z jhb $
40 */
41
42#include "opt_ddb.h"
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/condvar.h>
48#include <sys/kernel.h>
49#include <sys/ktr.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/resourcevar.h>
54#include <sys/sched.h>
55#include <sys/signalvar.h>
56#include <sys/smp.h>
57#include <sys/sx.h>
58#include <sys/sysctl.h>
59#include <sys/sysproto.h>
60#include <sys/vmmeter.h>
61#ifdef DDB
62#include <ddb/ddb.h>
63#endif
64#ifdef KTRACE
65#include <sys/uio.h>
66#include <sys/ktrace.h>
67#endif
68
69#include <machine/cpu.h>
70
71static void sched_setup(void *dummy);
72SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
73
74int	hogticks;
75int	lbolt;
76
77static struct callout loadav_callout;
78static struct callout lbolt_callout;
79
80struct loadavg averunnable =
81	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
82/*
83 * Constants for averages over 1, 5, and 15 minutes
84 * when sampling at 5 second intervals.
85 */
86static fixpt_t cexp[3] = {
87	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
88	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
89	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
90};
91
92/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
93static int      fscale __unused = FSCALE;
94SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
95
96static void	endtsleep(void *);
97static void	loadav(void *arg);
98static void	lboltcb(void *arg);
99
100/*
101 * We're only looking at 7 bits of the address; everything is
102 * aligned to 4, lots of things are aligned to greater powers
103 * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
104 */
105#define TABLESIZE	128
106static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
107#define LOOKUP(x)	(((intptr_t)(x) >> 8) & (TABLESIZE - 1))
108
109void
110sleepinit(void)
111{
112	int i;
113
114	hogticks = (hz / 10) * 2;	/* Default only. */
115	for (i = 0; i < TABLESIZE; i++)
116		TAILQ_INIT(&slpque[i]);
117}
118
119/*
120 * General sleep call.  Suspends the current process until a wakeup is
121 * performed on the specified identifier.  The process will then be made
122 * runnable with the specified priority.  Sleeps at most timo/hz seconds
123 * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
124 * before and after sleeping, else signals are not checked.  Returns 0 if
125 * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
126 * signal needs to be delivered, ERESTART is returned if the current system
127 * call should be restarted if possible, and EINTR is returned if the system
128 * call should be interrupted by the signal (return EINTR).
129 *
130 * The mutex argument is exited before the caller is suspended, and
131 * entered before msleep returns.  If priority includes the PDROP
132 * flag the mutex is not entered before returning.
133 */
134
135int
136msleep(ident, mtx, priority, wmesg, timo)
137	void *ident;
138	struct mtx *mtx;
139	int priority, timo;
140	const char *wmesg;
141{
142	struct thread *td = curthread;
143	struct proc *p = td->td_proc;
144	int sig, catch = priority & PCATCH;
145	int rval = 0;
146	WITNESS_SAVE_DECL(mtx);
147
148#ifdef KTRACE
149	if (KTRPOINT(td, KTR_CSW))
150		ktrcsw(1, 0);
151#endif
152	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, &mtx->mtx_object,
153	    "Sleeping on \"%s\"", wmesg);
154	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
155	    ("sleeping without a mutex"));
156	/*
157	 * If we are capable of async syscalls and there isn't already
158	 * another one ready to return, start a new thread
159	 * and queue it as ready to run. Note that there is danger here
160	 * because we need to make sure that we don't sleep allocating
161	 * the thread (recursion here might be bad).
162	 * Hence the TDF_INMSLEEP flag.
163	 */
164	if (p->p_flag & P_THREADED) {
165		/*
166		 * Just don't bother if we are exiting
167		 * and not the exiting thread or thread was marked as
168		 * interrupted.
169		 */
170		if (catch &&
171		    (((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) ||
172		     (td->td_flags & TDF_INTERRUPT))) {
173			td->td_flags &= ~TDF_INTERRUPT;
174			return (EINTR);
175		}
176	}
177	mtx_lock_spin(&sched_lock);
178	if (cold ) {
179		/*
180		 * During autoconfiguration, just give interrupts
181		 * a chance, then just return.
182		 * Don't run any other procs or panic below,
183		 * in case this is the idle process and already asleep.
184		 */
185		if (mtx != NULL && priority & PDROP)
186			mtx_unlock(mtx);
187		mtx_unlock_spin(&sched_lock);
188		return (0);
189	}
190
191	DROP_GIANT();
192
193	if (mtx != NULL) {
194		mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
195		WITNESS_SAVE(&mtx->mtx_object, mtx);
196		mtx_unlock(mtx);
197		if (priority & PDROP)
198			mtx = NULL;
199	}
200
201	KASSERT(p != NULL, ("msleep1"));
202	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
203
204	CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
205	    td, p->p_pid, p->p_comm, wmesg, ident);
206
207	td->td_wchan = ident;
208	td->td_wmesg = wmesg;
209	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
210	TD_SET_ON_SLEEPQ(td);
211	if (timo)
212		callout_reset(&td->td_slpcallout, timo, endtsleep, td);
213	/*
214	 * We put ourselves on the sleep queue and start our timeout
215	 * before calling thread_suspend_check, as we could stop there, and
216	 * a wakeup or a SIGCONT (or both) could occur while we were stopped.
217	 * without resuming us, thus we must be ready for sleep
218	 * when cursig is called.  If the wakeup happens while we're
219	 * stopped, td->td_wchan will be 0 upon return from cursig.
220	 */
221	if (catch) {
222		CTR3(KTR_PROC, "msleep caught: thread %p (pid %d, %s)", td,
223		    p->p_pid, p->p_comm);
224		td->td_flags |= TDF_SINTR;
225		mtx_unlock_spin(&sched_lock);
226		PROC_LOCK(p);
227		sig = cursig(td);
228		if (sig == 0 && thread_suspend_check(1))
229			sig = SIGSTOP;
230		mtx_lock_spin(&sched_lock);
231		PROC_UNLOCK(p);
232		if (sig != 0) {
233			if (TD_ON_SLEEPQ(td))
234				unsleep(td);
235		} else if (!TD_ON_SLEEPQ(td))
236			catch = 0;
237	} else
238		sig = 0;
239
240	/*
241	 * Let the scheduler know we're about to voluntarily go to sleep.
242	 */
243	sched_sleep(td, priority & PRIMASK);
244
245	if (TD_ON_SLEEPQ(td)) {
246		p->p_stats->p_ru.ru_nvcsw++;
247		TD_SET_SLEEPING(td);
248		mi_switch();
249	}
250	/*
251	 * We're awake from voluntary sleep.
252	 */
253	CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid,
254	    p->p_comm);
255	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
256	td->td_flags &= ~TDF_SINTR;
257	if (td->td_flags & TDF_TIMEOUT) {
258		td->td_flags &= ~TDF_TIMEOUT;
259		if (sig == 0)
260			rval = EWOULDBLOCK;
261	} else if (td->td_flags & TDF_TIMOFAIL) {
262		td->td_flags &= ~TDF_TIMOFAIL;
263	} else if (timo && callout_stop(&td->td_slpcallout) == 0) {
264		/*
265		 * This isn't supposed to be pretty.  If we are here, then
266		 * the endtsleep() callout is currently executing on another
267		 * CPU and is either spinning on the sched_lock or will be
268		 * soon.  If we don't synchronize here, there is a chance
269		 * that this process may msleep() again before the callout
270		 * has a chance to run and the callout may end up waking up
271		 * the wrong msleep().  Yuck.
272		 */
273		TD_SET_SLEEPING(td);
274		p->p_stats->p_ru.ru_nivcsw++;
275		mi_switch();
276		td->td_flags &= ~TDF_TIMOFAIL;
277	}
278	if ((td->td_flags & TDF_INTERRUPT) && (priority & PCATCH) &&
279	    (rval == 0)) {
280		td->td_flags &= ~TDF_INTERRUPT;
281		rval = EINTR;
282	}
283	mtx_unlock_spin(&sched_lock);
284
285	if (rval == 0 && catch) {
286		PROC_LOCK(p);
287		/* XXX: shouldn't we always be calling cursig() */
288		if (sig != 0 || (sig = cursig(td))) {
289			if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
290				rval = EINTR;
291			else
292				rval = ERESTART;
293		}
294		PROC_UNLOCK(p);
295	}
296#ifdef KTRACE
297	if (KTRPOINT(td, KTR_CSW))
298		ktrcsw(0, 0);
299#endif
300	PICKUP_GIANT();
301	if (mtx != NULL) {
302		mtx_lock(mtx);
303		WITNESS_RESTORE(&mtx->mtx_object, mtx);
304	}
305	return (rval);
306}
307
308/*
309 * Implement timeout for msleep()
310 *
311 * If process hasn't been awakened (wchan non-zero),
312 * set timeout flag and undo the sleep.  If proc
313 * is stopped, just unsleep so it will remain stopped.
314 * MP-safe, called without the Giant mutex.
315 */
316static void
317endtsleep(arg)
318	void *arg;
319{
320	register struct thread *td = arg;
321
322	CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)",
323	    td, td->td_proc->p_pid, td->td_proc->p_comm);
324	mtx_lock_spin(&sched_lock);
325	/*
326	 * This is the other half of the synchronization with msleep()
327	 * described above.  If the TDS_TIMEOUT flag is set, we lost the
328	 * race and just need to put the process back on the runqueue.
329	 */
330	if (TD_ON_SLEEPQ(td)) {
331		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
332		TD_CLR_ON_SLEEPQ(td);
333		td->td_flags |= TDF_TIMEOUT;
334		td->td_wmesg = NULL;
335	} else {
336		td->td_flags |= TDF_TIMOFAIL;
337	}
338	TD_CLR_SLEEPING(td);
339	setrunnable(td);
340	mtx_unlock_spin(&sched_lock);
341}
342
343/*
344 * Abort a thread, as if an interrupt had occured.  Only abort
345 * interruptable waits (unfortunatly it isn't only safe to abort others).
346 * This is about identical to cv_abort().
347 * Think about merging them?
348 * Also, whatever the signal code does...
349 */
350void
351abortsleep(struct thread *td)
352{
353
354	mtx_assert(&sched_lock, MA_OWNED);
355	/*
356	 * If the TDF_TIMEOUT flag is set, just leave. A
357	 * timeout is scheduled anyhow.
358	 */
359	if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) {
360		if (TD_ON_SLEEPQ(td)) {
361			unsleep(td);
362			TD_CLR_SLEEPING(td);
363			setrunnable(td);
364		}
365	}
366}
367
368/*
369 * Remove a process from its wait queue
370 */
371void
372unsleep(struct thread *td)
373{
374
375	mtx_lock_spin(&sched_lock);
376	if (TD_ON_SLEEPQ(td)) {
377		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
378		TD_CLR_ON_SLEEPQ(td);
379		td->td_wmesg = NULL;
380	}
381	mtx_unlock_spin(&sched_lock);
382}
383
384/*
385 * Make all processes sleeping on the specified identifier runnable.
386 */
387void
388wakeup(ident)
389	register void *ident;
390{
391	register struct slpquehead *qp;
392	register struct thread *td;
393	struct thread *ntd;
394	struct proc *p;
395
396	mtx_lock_spin(&sched_lock);
397	qp = &slpque[LOOKUP(ident)];
398restart:
399	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
400		ntd = TAILQ_NEXT(td, td_slpq);
401		if (td->td_wchan == ident) {
402			unsleep(td);
403			TD_CLR_SLEEPING(td);
404			setrunnable(td);
405			p = td->td_proc;
406			CTR3(KTR_PROC,"wakeup: thread %p (pid %d, %s)",
407			    td, p->p_pid, p->p_comm);
408			goto restart;
409		}
410	}
411	mtx_unlock_spin(&sched_lock);
412}
413
414/*
415 * Make a process sleeping on the specified identifier runnable.
416 * May wake more than one process if a target process is currently
417 * swapped out.
418 */
419void
420wakeup_one(ident)
421	register void *ident;
422{
423	register struct slpquehead *qp;
424	register struct thread *td;
425	register struct proc *p;
426	struct thread *ntd;
427
428	mtx_lock_spin(&sched_lock);
429	qp = &slpque[LOOKUP(ident)];
430	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
431		ntd = TAILQ_NEXT(td, td_slpq);
432		if (td->td_wchan == ident) {
433			unsleep(td);
434			TD_CLR_SLEEPING(td);
435			setrunnable(td);
436			p = td->td_proc;
437			CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
438			    td, p->p_pid, p->p_comm);
439			break;
440		}
441	}
442	mtx_unlock_spin(&sched_lock);
443}
444
445/*
446 * The machine independent parts of mi_switch().
447 */
448void
449mi_switch(void)
450{
451	struct bintime new_switchtime;
452	struct thread *td = curthread;	/* XXX */
453	struct proc *p = td->td_proc;	/* XXX */
454	u_int sched_nest;
455
456	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
457
458	KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
459#ifdef INVARIANTS
460	if (!TD_ON_LOCK(td) &&
461	    !TD_ON_RUNQ(td) &&
462	    !TD_IS_RUNNING(td))
463		mtx_assert(&Giant, MA_NOTOWNED);
464#endif
465	KASSERT(td->td_critnest == 1,
466	    ("mi_switch: switch in a critical section"));
467
468	/*
469	 * Compute the amount of time during which the current
470	 * process was running, and add that to its total so far.
471	 */
472	binuptime(&new_switchtime);
473	bintime_add(&p->p_runtime, &new_switchtime);
474	bintime_sub(&p->p_runtime, PCPU_PTR(switchtime));
475
476#ifdef DDB
477	/*
478	 * Don't perform context switches from the debugger.
479	 */
480	if (db_active) {
481		mtx_unlock_spin(&sched_lock);
482		db_print_backtrace();
483		db_error("Context switches not allowed in the debugger.");
484	}
485#endif
486
487	/*
488	 * Check if the process exceeds its cpu resource allocation.  If
489	 * over max, arrange to kill the process in ast().
490	 */
491	if (p->p_cpulimit != RLIM_INFINITY &&
492	    p->p_runtime.sec > p->p_cpulimit) {
493		p->p_sflag |= PS_XCPU;
494		td->td_flags |= TDF_ASTPENDING;
495	}
496
497	/*
498	 * Finish up stats for outgoing thread.
499	 */
500	cnt.v_swtch++;
501	PCPU_SET(switchtime, new_switchtime);
502	CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid,
503	    p->p_comm);
504
505	sched_nest = sched_lock.mtx_recurse;
506	sched_switchout(td);
507
508	cpu_switch();		/* SHAZAM!!*/
509
510	sched_lock.mtx_recurse = sched_nest;
511	sched_lock.mtx_lock = (uintptr_t)td;
512	sched_switchin(td);
513
514	/*
515	 * Start setting up stats etc. for the incoming thread.
516	 * Similar code in fork_exit() is returned to by cpu_switch()
517	 * in the case of a new thread/process.
518	 */
519	CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid,
520	    p->p_comm);
521	if (PCPU_GET(switchtime.sec) == 0)
522		binuptime(PCPU_PTR(switchtime));
523	PCPU_SET(switchticks, ticks);
524
525	/*
526	 * Call the switchin function while still holding the scheduler lock
527	 * (used by the idlezero code and the general page-zeroing code)
528	 */
529	if (td->td_switchin)
530		td->td_switchin();
531
532	/*
533	 * If the last thread was exiting, finish cleaning it up.
534	 */
535	if ((td = PCPU_GET(deadthread))) {
536		PCPU_SET(deadthread, NULL);
537		thread_stash(td);
538	}
539}
540
541/*
542 * Change process state to be runnable,
543 * placing it on the run queue if it is in memory,
544 * and awakening the swapper if it isn't in memory.
545 */
546void
547setrunnable(struct thread *td)
548{
549	struct proc *p = td->td_proc;
550
551	mtx_assert(&sched_lock, MA_OWNED);
552	switch (p->p_state) {
553	case PRS_ZOMBIE:
554		panic("setrunnable(1)");
555	default:
556		break;
557	}
558	switch (td->td_state) {
559	case TDS_RUNNING:
560	case TDS_RUNQ:
561		return;
562	case TDS_INHIBITED:
563		/*
564		 * If we are only inhibited because we are swapped out
565		 * then arange to swap in this process. Otherwise just return.
566		 */
567		if (td->td_inhibitors != TDI_SWAPPED)
568			return;
569	case TDS_CAN_RUN:
570		break;
571	default:
572		printf("state is 0x%x", td->td_state);
573		panic("setrunnable(2)");
574	}
575	if ((p->p_sflag & PS_INMEM) == 0) {
576		if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
577			p->p_sflag |= PS_SWAPINREQ;
578			wakeup(&proc0);
579		}
580	} else
581		sched_wakeup(td);
582}
583
584/*
585 * Compute a tenex style load average of a quantity on
586 * 1, 5 and 15 minute intervals.
587 * XXXKSE   Needs complete rewrite when correct info is available.
588 * Completely Bogus.. only works with 1:1 (but compiles ok now :-)
589 */
590static void
591loadav(void *arg)
592{
593	int i, nrun;
594	struct loadavg *avg;
595	struct proc *p;
596	struct thread *td;
597
598	avg = &averunnable;
599	sx_slock(&allproc_lock);
600	nrun = 0;
601	FOREACH_PROC_IN_SYSTEM(p) {
602		FOREACH_THREAD_IN_PROC(p, td) {
603			switch (td->td_state) {
604			case TDS_RUNQ:
605			case TDS_RUNNING:
606				if ((p->p_flag & P_NOLOAD) != 0)
607					goto nextproc;
608				nrun++; /* XXXKSE */
609			default:
610				break;
611			}
612nextproc:
613			continue;
614		}
615	}
616	sx_sunlock(&allproc_lock);
617	for (i = 0; i < 3; i++)
618		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
619		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
620
621	/*
622	 * Schedule the next update to occur after 5 seconds, but add a
623	 * random variation to avoid synchronisation with processes that
624	 * run at regular intervals.
625	 */
626	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
627	    loadav, NULL);
628}
629
630static void
631lboltcb(void *arg)
632{
633	wakeup(&lbolt);
634	callout_reset(&lbolt_callout, hz, lboltcb, NULL);
635}
636
637/* ARGSUSED */
638static void
639sched_setup(dummy)
640	void *dummy;
641{
642	callout_init(&loadav_callout, 0);
643	callout_init(&lbolt_callout, 1);
644
645	/* Kick off timeout driven events by calling first time. */
646	loadav(NULL);
647	lboltcb(NULL);
648}
649
650/*
651 * General purpose yield system call
652 */
653int
654yield(struct thread *td, struct yield_args *uap)
655{
656	struct ksegrp *kg = td->td_ksegrp;
657
658	mtx_assert(&Giant, MA_NOTOWNED);
659	mtx_lock_spin(&sched_lock);
660	kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
661	sched_prio(td, PRI_MAX_TIMESHARE);
662	mi_switch();
663	mtx_unlock_spin(&sched_lock);
664	td->td_retval[0] = 0;
665
666	return (0);
667}
668
669