kern_synch.c revision 111585
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39 * $FreeBSD: head/sys/kern/kern_synch.c 111585 2003-02-27 02:05:19Z julian $
40 */
41
42#include "opt_ddb.h"
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/condvar.h>
48#include <sys/kernel.h>
49#include <sys/ktr.h>
50#include <sys/lock.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/resourcevar.h>
54#include <sys/sched.h>
55#include <sys/signalvar.h>
56#include <sys/smp.h>
57#include <sys/sx.h>
58#include <sys/sysctl.h>
59#include <sys/sysproto.h>
60#include <sys/vmmeter.h>
61#ifdef DDB
62#include <ddb/ddb.h>
63#endif
64#ifdef KTRACE
65#include <sys/uio.h>
66#include <sys/ktrace.h>
67#endif
68
69#include <machine/cpu.h>
70
71static void sched_setup(void *dummy);
72SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
73
74int	hogticks;
75int	lbolt;
76
77static struct callout loadav_callout;
78static struct callout lbolt_callout;
79
80struct loadavg averunnable =
81	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
82/*
83 * Constants for averages over 1, 5, and 15 minutes
84 * when sampling at 5 second intervals.
85 */
86static fixpt_t cexp[3] = {
87	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
88	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
89	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
90};
91
92/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
93static int      fscale __unused = FSCALE;
94SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
95
96static void	endtsleep(void *);
97static void	loadav(void *arg);
98static void	lboltcb(void *arg);
99
100/*
101 * We're only looking at 7 bits of the address; everything is
102 * aligned to 4, lots of things are aligned to greater powers
103 * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
104 */
105#define TABLESIZE	128
106static TAILQ_HEAD(slpquehead, thread) slpque[TABLESIZE];
107#define LOOKUP(x)	(((intptr_t)(x) >> 8) & (TABLESIZE - 1))
108
109void
110sleepinit(void)
111{
112	int i;
113
114	hogticks = (hz / 10) * 2;	/* Default only. */
115	for (i = 0; i < TABLESIZE; i++)
116		TAILQ_INIT(&slpque[i]);
117}
118
119/*
120 * General sleep call.  Suspends the current process until a wakeup is
121 * performed on the specified identifier.  The process will then be made
122 * runnable with the specified priority.  Sleeps at most timo/hz seconds
123 * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
124 * before and after sleeping, else signals are not checked.  Returns 0 if
125 * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
126 * signal needs to be delivered, ERESTART is returned if the current system
127 * call should be restarted if possible, and EINTR is returned if the system
128 * call should be interrupted by the signal (return EINTR).
129 *
130 * The mutex argument is exited before the caller is suspended, and
131 * entered before msleep returns.  If priority includes the PDROP
132 * flag the mutex is not entered before returning.
133 */
134
135int
136msleep(ident, mtx, priority, wmesg, timo)
137	void *ident;
138	struct mtx *mtx;
139	int priority, timo;
140	const char *wmesg;
141{
142	struct thread *td = curthread;
143	struct proc *p = td->td_proc;
144	int sig, catch = priority & PCATCH;
145	int rval = 0;
146	WITNESS_SAVE_DECL(mtx);
147
148#ifdef KTRACE
149	if (KTRPOINT(td, KTR_CSW))
150		ktrcsw(1, 0);
151#endif
152	WITNESS_SLEEP(0, &mtx->mtx_object);
153	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
154	    ("sleeping without a mutex"));
155	/*
156	 * If we are capable of async syscalls and there isn't already
157	 * another one ready to return, start a new thread
158	 * and queue it as ready to run. Note that there is danger here
159	 * because we need to make sure that we don't sleep allocating
160	 * the thread (recursion here might be bad).
161	 * Hence the TDF_INMSLEEP flag.
162	 */
163	if (p->p_flag & P_THREADED) {
164		/*
165		 * Just don't bother if we are exiting
166		 * and not the exiting thread or thread was marked as
167		 * interrupted.
168		 */
169		if (catch &&
170		    (((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) ||
171		     (td->td_flags & TDF_INTERRUPT))) {
172			td->td_flags &= ~TDF_INTERRUPT;
173			return (EINTR);
174		}
175	}
176	mtx_lock_spin(&sched_lock);
177	if (cold ) {
178		/*
179		 * During autoconfiguration, just give interrupts
180		 * a chance, then just return.
181		 * Don't run any other procs or panic below,
182		 * in case this is the idle process and already asleep.
183		 */
184		if (mtx != NULL && priority & PDROP)
185			mtx_unlock(mtx);
186		mtx_unlock_spin(&sched_lock);
187		return (0);
188	}
189
190	DROP_GIANT();
191
192	if (mtx != NULL) {
193		mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
194		WITNESS_SAVE(&mtx->mtx_object, mtx);
195		mtx_unlock(mtx);
196		if (priority & PDROP)
197			mtx = NULL;
198	}
199
200	KASSERT(p != NULL, ("msleep1"));
201	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
202
203	CTR5(KTR_PROC, "msleep: thread %p (pid %d, %s) on %s (%p)",
204	    td, p->p_pid, p->p_comm, wmesg, ident);
205
206	td->td_wchan = ident;
207	td->td_wmesg = wmesg;
208	TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], td, td_slpq);
209	TD_SET_ON_SLEEPQ(td);
210	if (timo)
211		callout_reset(&td->td_slpcallout, timo, endtsleep, td);
212	/*
213	 * We put ourselves on the sleep queue and start our timeout
214	 * before calling thread_suspend_check, as we could stop there, and
215	 * a wakeup or a SIGCONT (or both) could occur while we were stopped.
216	 * without resuming us, thus we must be ready for sleep
217	 * when cursig is called.  If the wakeup happens while we're
218	 * stopped, td->td_wchan will be 0 upon return from cursig.
219	 */
220	if (catch) {
221		CTR3(KTR_PROC, "msleep caught: thread %p (pid %d, %s)", td,
222		    p->p_pid, p->p_comm);
223		td->td_flags |= TDF_SINTR;
224		mtx_unlock_spin(&sched_lock);
225		PROC_LOCK(p);
226		sig = cursig(td);
227		if (sig == 0 && thread_suspend_check(1))
228			sig = SIGSTOP;
229		mtx_lock_spin(&sched_lock);
230		PROC_UNLOCK(p);
231		if (sig != 0) {
232			if (TD_ON_SLEEPQ(td))
233				unsleep(td);
234		} else if (!TD_ON_SLEEPQ(td))
235			catch = 0;
236	} else
237		sig = 0;
238
239	/*
240	 * Let the scheduler know we're about to voluntarily go to sleep.
241	 */
242	sched_sleep(td, priority & PRIMASK);
243
244	if (TD_ON_SLEEPQ(td)) {
245		p->p_stats->p_ru.ru_nvcsw++;
246		TD_SET_SLEEPING(td);
247		mi_switch();
248	}
249	/*
250	 * We're awake from voluntary sleep.
251	 */
252	CTR3(KTR_PROC, "msleep resume: thread %p (pid %d, %s)", td, p->p_pid,
253	    p->p_comm);
254	KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
255	td->td_flags &= ~TDF_SINTR;
256	if (td->td_flags & TDF_TIMEOUT) {
257		td->td_flags &= ~TDF_TIMEOUT;
258		if (sig == 0)
259			rval = EWOULDBLOCK;
260	} else if (td->td_flags & TDF_TIMOFAIL) {
261		td->td_flags &= ~TDF_TIMOFAIL;
262	} else if (timo && callout_stop(&td->td_slpcallout) == 0) {
263		/*
264		 * This isn't supposed to be pretty.  If we are here, then
265		 * the endtsleep() callout is currently executing on another
266		 * CPU and is either spinning on the sched_lock or will be
267		 * soon.  If we don't synchronize here, there is a chance
268		 * that this process may msleep() again before the callout
269		 * has a chance to run and the callout may end up waking up
270		 * the wrong msleep().  Yuck.
271		 */
272		TD_SET_SLEEPING(td);
273		p->p_stats->p_ru.ru_nivcsw++;
274		mi_switch();
275		td->td_flags &= ~TDF_TIMOFAIL;
276	}
277	if ((td->td_flags & TDF_INTERRUPT) && (priority & PCATCH) &&
278	    (rval == 0)) {
279		td->td_flags &= ~TDF_INTERRUPT;
280		rval = EINTR;
281	}
282	mtx_unlock_spin(&sched_lock);
283
284	if (rval == 0 && catch) {
285		PROC_LOCK(p);
286		/* XXX: shouldn't we always be calling cursig() */
287		if (sig != 0 || (sig = cursig(td))) {
288			if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
289				rval = EINTR;
290			else
291				rval = ERESTART;
292		}
293		PROC_UNLOCK(p);
294	}
295#ifdef KTRACE
296	if (KTRPOINT(td, KTR_CSW))
297		ktrcsw(0, 0);
298#endif
299	PICKUP_GIANT();
300	if (mtx != NULL) {
301		mtx_lock(mtx);
302		WITNESS_RESTORE(&mtx->mtx_object, mtx);
303	}
304	return (rval);
305}
306
307/*
308 * Implement timeout for msleep()
309 *
310 * If process hasn't been awakened (wchan non-zero),
311 * set timeout flag and undo the sleep.  If proc
312 * is stopped, just unsleep so it will remain stopped.
313 * MP-safe, called without the Giant mutex.
314 */
315static void
316endtsleep(arg)
317	void *arg;
318{
319	register struct thread *td = arg;
320
321	CTR3(KTR_PROC, "endtsleep: thread %p (pid %d, %s)",
322	    td, td->td_proc->p_pid, td->td_proc->p_comm);
323	mtx_lock_spin(&sched_lock);
324	/*
325	 * This is the other half of the synchronization with msleep()
326	 * described above.  If the TDS_TIMEOUT flag is set, we lost the
327	 * race and just need to put the process back on the runqueue.
328	 */
329	if (TD_ON_SLEEPQ(td)) {
330		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
331		TD_CLR_ON_SLEEPQ(td);
332		td->td_flags |= TDF_TIMEOUT;
333	} else {
334		td->td_flags |= TDF_TIMOFAIL;
335	}
336	TD_CLR_SLEEPING(td);
337	setrunnable(td);
338	mtx_unlock_spin(&sched_lock);
339}
340
341/*
342 * Abort a thread, as if an interrupt had occured.  Only abort
343 * interruptable waits (unfortunatly it isn't only safe to abort others).
344 * This is about identical to cv_abort().
345 * Think about merging them?
346 * Also, whatever the signal code does...
347 */
348void
349abortsleep(struct thread *td)
350{
351
352	mtx_assert(&sched_lock, MA_OWNED);
353	/*
354	 * If the TDF_TIMEOUT flag is set, just leave. A
355	 * timeout is scheduled anyhow.
356	 */
357	if ((td->td_flags & (TDF_TIMEOUT | TDF_SINTR)) == TDF_SINTR) {
358		if (TD_ON_SLEEPQ(td)) {
359			unsleep(td);
360			TD_CLR_SLEEPING(td);
361			setrunnable(td);
362		}
363	}
364}
365
366/*
367 * Remove a process from its wait queue
368 */
369void
370unsleep(struct thread *td)
371{
372
373	mtx_lock_spin(&sched_lock);
374	if (TD_ON_SLEEPQ(td)) {
375		TAILQ_REMOVE(&slpque[LOOKUP(td->td_wchan)], td, td_slpq);
376		TD_CLR_ON_SLEEPQ(td);
377	}
378	mtx_unlock_spin(&sched_lock);
379}
380
381/*
382 * Make all processes sleeping on the specified identifier runnable.
383 */
384void
385wakeup(ident)
386	register void *ident;
387{
388	register struct slpquehead *qp;
389	register struct thread *td;
390	struct thread *ntd;
391	struct proc *p;
392
393	mtx_lock_spin(&sched_lock);
394	qp = &slpque[LOOKUP(ident)];
395restart:
396	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
397		ntd = TAILQ_NEXT(td, td_slpq);
398		if (td->td_wchan == ident) {
399			unsleep(td);
400			TD_CLR_SLEEPING(td);
401			setrunnable(td);
402			p = td->td_proc;
403			CTR3(KTR_PROC,"wakeup: thread %p (pid %d, %s)",
404			    td, p->p_pid, p->p_comm);
405			goto restart;
406		}
407	}
408	mtx_unlock_spin(&sched_lock);
409}
410
411/*
412 * Make a process sleeping on the specified identifier runnable.
413 * May wake more than one process if a target process is currently
414 * swapped out.
415 */
416void
417wakeup_one(ident)
418	register void *ident;
419{
420	register struct slpquehead *qp;
421	register struct thread *td;
422	register struct proc *p;
423	struct thread *ntd;
424
425	mtx_lock_spin(&sched_lock);
426	qp = &slpque[LOOKUP(ident)];
427	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
428		ntd = TAILQ_NEXT(td, td_slpq);
429		if (td->td_wchan == ident) {
430			unsleep(td);
431			TD_CLR_SLEEPING(td);
432			setrunnable(td);
433			p = td->td_proc;
434			CTR3(KTR_PROC,"wakeup1: thread %p (pid %d, %s)",
435			    td, p->p_pid, p->p_comm);
436			break;
437		}
438	}
439	mtx_unlock_spin(&sched_lock);
440}
441
442/*
443 * The machine independent parts of mi_switch().
444 */
445void
446mi_switch(void)
447{
448	struct bintime new_switchtime;
449	struct thread *td = curthread;	/* XXX */
450	struct proc *p = td->td_proc;	/* XXX */
451	u_int sched_nest;
452
453	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
454
455	KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
456#ifdef INVARIANTS
457	if (!TD_ON_LOCK(td) &&
458	    !TD_ON_RUNQ(td) &&
459	    !TD_IS_RUNNING(td))
460		mtx_assert(&Giant, MA_NOTOWNED);
461#endif
462	KASSERT(td->td_critnest == 1,
463	    ("mi_switch: switch in a critical section"));
464
465	/*
466	 * Compute the amount of time during which the current
467	 * process was running, and add that to its total so far.
468	 */
469	binuptime(&new_switchtime);
470	bintime_add(&p->p_runtime, &new_switchtime);
471	bintime_sub(&p->p_runtime, PCPU_PTR(switchtime));
472
473#ifdef DDB
474	/*
475	 * Don't perform context switches from the debugger.
476	 */
477	if (db_active) {
478		mtx_unlock_spin(&sched_lock);
479		db_print_backtrace();
480		db_error("Context switches not allowed in the debugger.");
481	}
482#endif
483
484	/*
485	 * Check if the process exceeds its cpu resource allocation.  If
486	 * over max, arrange to kill the process in ast().
487	 */
488	if (p->p_cpulimit != RLIM_INFINITY &&
489	    p->p_runtime.sec > p->p_cpulimit) {
490		p->p_sflag |= PS_XCPU;
491		td->td_flags |= TDF_ASTPENDING;
492	}
493
494	/*
495	 * Finish up stats for outgoing thread.
496	 */
497	cnt.v_swtch++;
498	PCPU_SET(switchtime, new_switchtime);
499	CTR3(KTR_PROC, "mi_switch: old thread %p (pid %d, %s)", td, p->p_pid,
500	    p->p_comm);
501
502	sched_nest = sched_lock.mtx_recurse;
503	sched_switchout(td);
504
505	cpu_switch();		/* SHAZAM!!*/
506
507	sched_lock.mtx_recurse = sched_nest;
508	sched_lock.mtx_lock = (uintptr_t)td;
509	sched_switchin(td);
510
511	/*
512	 * Start setting up stats etc. for the incoming thread.
513	 * Similar code in fork_exit() is returned to by cpu_switch()
514	 * in the case of a new thread/process.
515	 */
516	CTR3(KTR_PROC, "mi_switch: new thread %p (pid %d, %s)", td, p->p_pid,
517	    p->p_comm);
518	if (PCPU_GET(switchtime.sec) == 0)
519		binuptime(PCPU_PTR(switchtime));
520	PCPU_SET(switchticks, ticks);
521
522	/*
523	 * Call the switchin function while still holding the scheduler lock
524	 * (used by the idlezero code and the general page-zeroing code)
525	 */
526	if (td->td_switchin)
527		td->td_switchin();
528
529	/*
530	 * If the last thread was exiting, finish cleaning it up.
531	 */
532	if ((td = PCPU_GET(deadthread))) {
533		PCPU_SET(deadthread, NULL);
534		thread_stash(td);
535	}
536}
537
538/*
539 * Change process state to be runnable,
540 * placing it on the run queue if it is in memory,
541 * and awakening the swapper if it isn't in memory.
542 */
543void
544setrunnable(struct thread *td)
545{
546	struct proc *p = td->td_proc;
547
548	mtx_assert(&sched_lock, MA_OWNED);
549	switch (p->p_state) {
550	case PRS_ZOMBIE:
551		panic("setrunnable(1)");
552	default:
553		break;
554	}
555	switch (td->td_state) {
556	case TDS_RUNNING:
557	case TDS_RUNQ:
558		return;
559	case TDS_INHIBITED:
560		/*
561		 * If we are only inhibited because we are swapped out
562		 * then arange to swap in this process. Otherwise just return.
563		 */
564		if (td->td_inhibitors != TDI_SWAPPED)
565			return;
566	case TDS_CAN_RUN:
567		break;
568	default:
569		printf("state is 0x%x", td->td_state);
570		panic("setrunnable(2)");
571	}
572	if ((p->p_sflag & PS_INMEM) == 0) {
573		if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
574			p->p_sflag |= PS_SWAPINREQ;
575			wakeup(&proc0);
576		}
577	} else
578		sched_wakeup(td);
579}
580
581/*
582 * Compute a tenex style load average of a quantity on
583 * 1, 5 and 15 minute intervals.
584 * XXXKSE   Needs complete rewrite when correct info is available.
585 * Completely Bogus.. only works with 1:1 (but compiles ok now :-)
586 */
587static void
588loadav(void *arg)
589{
590	int i, nrun;
591	struct loadavg *avg;
592	struct proc *p;
593	struct thread *td;
594
595	avg = &averunnable;
596	sx_slock(&allproc_lock);
597	nrun = 0;
598	FOREACH_PROC_IN_SYSTEM(p) {
599		FOREACH_THREAD_IN_PROC(p, td) {
600			switch (td->td_state) {
601			case TDS_RUNQ:
602			case TDS_RUNNING:
603				if ((p->p_flag & P_NOLOAD) != 0)
604					goto nextproc;
605				nrun++; /* XXXKSE */
606			default:
607				break;
608			}
609nextproc:
610			continue;
611		}
612	}
613	sx_sunlock(&allproc_lock);
614	for (i = 0; i < 3; i++)
615		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
616		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
617
618	/*
619	 * Schedule the next update to occur after 5 seconds, but add a
620	 * random variation to avoid synchronisation with processes that
621	 * run at regular intervals.
622	 */
623	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
624	    loadav, NULL);
625}
626
627static void
628lboltcb(void *arg)
629{
630	wakeup(&lbolt);
631	callout_reset(&lbolt_callout, hz, lboltcb, NULL);
632}
633
634/* ARGSUSED */
635static void
636sched_setup(dummy)
637	void *dummy;
638{
639	callout_init(&loadav_callout, 0);
640	callout_init(&lbolt_callout, 1);
641
642	/* Kick off timeout driven events by calling first time. */
643	loadav(NULL);
644	lboltcb(NULL);
645}
646
647/*
648 * General purpose yield system call
649 */
650int
651yield(struct thread *td, struct yield_args *uap)
652{
653	struct ksegrp *kg = td->td_ksegrp;
654
655	mtx_assert(&Giant, MA_NOTOWNED);
656	mtx_lock_spin(&sched_lock);
657	kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
658	sched_prio(td, PRI_MAX_TIMESHARE);
659	mi_switch();
660	mtx_unlock_spin(&sched_lock);
661	td->td_retval[0] = 0;
662
663	return (0);
664}
665
666