kern_synch.c revision 155534
1330449Seadler/*-
2330449Seadler * Copyright (c) 1982, 1986, 1990, 1991, 1993
3330449Seadler *	The Regents of the University of California.  All rights reserved.
416880Sgpalmer * (c) UNIX System Laboratories, Inc.
516880Sgpalmer * All or some portions of this file are derived from material licensed
616880Sgpalmer * to the University of California by American Telephone and Telegraph
716880Sgpalmer * Co. or Unix System Laboratories, Inc. and are reproduced herein with
816880Sgpalmer * the permission of UNIX System Laboratories, Inc.
916880Sgpalmer *
1016880Sgpalmer * Redistribution and use in source and binary forms, with or without
1116880Sgpalmer * modification, are permitted provided that the following conditions
1216880Sgpalmer * are met:
1316880Sgpalmer * 1. Redistributions of source code must retain the above copyright
1416880Sgpalmer *    notice, this list of conditions and the following disclaimer.
1516880Sgpalmer * 2. Redistributions in binary form must reproduce the above copyright
1616880Sgpalmer *    notice, this list of conditions and the following disclaimer in the
1716880Sgpalmer *    documentation and/or other materials provided with the distribution.
1816880Sgpalmer * 4. Neither the name of the University nor the names of its contributors
1916880Sgpalmer *    may be used to endorse or promote products derived from this software
2016880Sgpalmer *    without specific prior written permission.
2116880Sgpalmer *
2216880Sgpalmer * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2316880Sgpalmer * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2416880Sgpalmer * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2516880Sgpalmer * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2616880Sgpalmer * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2716880Sgpalmer * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2816880Sgpalmer * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2916880Sgpalmer * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3016880Sgpalmer * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3150479Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3216880Sgpalmer * SUCH DAMAGE.
3316880Sgpalmer *
3419984Smckay *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
3519984Smckay */
3619984Smckay
3719984Smckay#include <sys/cdefs.h>
3819984Smckay__FBSDID("$FreeBSD: head/sys/kern/kern_synch.c 155534 2006-02-11 09:33:07Z phk $");
3916880Sgpalmer
4016880Sgpalmer#include "opt_ktrace.h"
4116880Sgpalmer
4216880Sgpalmer#include <sys/param.h>
4316880Sgpalmer#include <sys/systm.h>
4416880Sgpalmer#include <sys/condvar.h>
4516880Sgpalmer#include <sys/kdb.h>
4616880Sgpalmer#include <sys/kernel.h>
4716880Sgpalmer#include <sys/ktr.h>
4819984Smckay#include <sys/lock.h>
4916880Sgpalmer#include <sys/mutex.h>
5019984Smckay#include <sys/proc.h>
5116880Sgpalmer#include <sys/resourcevar.h>
5216880Sgpalmer#include <sys/sched.h>
5316880Sgpalmer#include <sys/signalvar.h>
5416880Sgpalmer#include <sys/sleepqueue.h>
5516880Sgpalmer#include <sys/smp.h>
5619984Smckay#include <sys/sx.h>
5719984Smckay#include <sys/sysctl.h>
58103726Swollman#include <sys/sysproto.h>
5919984Smckay#include <sys/vmmeter.h>
6016880Sgpalmer#ifdef KTRACE
6116880Sgpalmer#include <sys/uio.h>
6216880Sgpalmer#include <sys/ktrace.h>
6316880Sgpalmer#endif
6416880Sgpalmer
6516880Sgpalmer#include <machine/cpu.h>
6616880Sgpalmer
6719984Smckaystatic void synch_setup(void *dummy);
6819984SmckaySYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup, NULL)
6916880Sgpalmer
7016880Sgpalmerint	hogticks;
7119984Smckayint	lbolt;
7219984Smckay
7316880Sgpalmerstatic struct callout loadav_callout;
7416880Sgpalmerstatic struct callout lbolt_callout;
7516880Sgpalmer
7616880Sgpalmerstruct loadavg averunnable =
7716880Sgpalmer	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
7816880Sgpalmer/*
7919984Smckay * Constants for averages over 1, 5, and 15 minutes
8016880Sgpalmer * when sampling at 5 second intervals.
8116880Sgpalmer */
8216880Sgpalmerstatic fixpt_t cexp[3] = {
8316880Sgpalmer	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
8418119Speter	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
8518119Speter	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
8618119Speter};
8716880Sgpalmer
8816880Sgpalmer/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
8916880Sgpalmerstatic int      fscale __unused = FSCALE;
9016880SgpalmerSYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
9119984Smckay
9216880Sgpalmerstatic void	loadav(void *arg);
9316880Sgpalmerstatic void	lboltcb(void *arg);
9416880Sgpalmer
9516880Sgpalmervoid
9616880Sgpalmersleepinit(void)
9716880Sgpalmer{
9819984Smckay
9919984Smckay	hogticks = (hz / 10) * 2;	/* Default only. */
10016880Sgpalmer	init_sleepqueues();
10119984Smckay}
10219984Smckay
10316880Sgpalmer/*
10416880Sgpalmer * General sleep call.  Suspends the current process until a wakeup is
10519984Smckay * performed on the specified identifier.  The process will then be made
10619984Smckay * runnable with the specified priority.  Sleeps at most timo/hz seconds
10716880Sgpalmer * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
10819984Smckay * before and after sleeping, else signals are not checked.  Returns 0 if
10916880Sgpalmer * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
11016880Sgpalmer * signal needs to be delivered, ERESTART is returned if the current system
11116880Sgpalmer * call should be restarted if possible, and EINTR is returned if the system
11219984Smckay * call should be interrupted by the signal (return EINTR).
11316880Sgpalmer *
11419984Smckay * The mutex argument is exited before the caller is suspended, and
11519984Smckay * entered before msleep returns.  If priority includes the PDROP
11619984Smckay * flag the mutex is not entered before returning.
11719984Smckay */
11819984Smckayint
11916880Sgpalmermsleep(ident, mtx, priority, wmesg, timo)
12019984Smckay	void *ident;
12119984Smckay	struct mtx *mtx;
12219984Smckay	int priority, timo;
12316880Sgpalmer	const char *wmesg;
12419984Smckay{
12516880Sgpalmer	struct thread *td;
12616880Sgpalmer	struct proc *p;
12716880Sgpalmer	int catch, rval, sig, flags;
12819984Smckay	WITNESS_SAVE_DECL(mtx);
12916880Sgpalmer
13016880Sgpalmer	td = curthread;
13119984Smckay	p = td->td_proc;
13216880Sgpalmer#ifdef KTRACE
13317131Sgpalmer	if (KTRPOINT(td, KTR_CSW))
13417131Sgpalmer		ktrcsw(1, 0);
13519984Smckay#endif
13617131Sgpalmer	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, mtx == NULL ? NULL :
13717131Sgpalmer	    &mtx->mtx_object, "Sleeping on \"%s\"", wmesg);
13816880Sgpalmer	KASSERT(timo != 0 || mtx_owned(&Giant) || mtx != NULL,
13919984Smckay	    ("sleeping without a mutex"));
14019984Smckay	KASSERT(p != NULL, ("msleep1"));
14119984Smckay	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
14219984Smckay
14319984Smckay	if (cold) {
14419984Smckay		/*
14519984Smckay		 * During autoconfiguration, just return;
14619984Smckay		 * don't run any other threads or panic below,
14719984Smckay		 * in case this is the idle thread and already asleep.
148228659Sdim		 * XXX: this used to do "s = splhigh(); splx(safepri);
14919984Smckay		 * splx(s);" to give interrupts a chance, but there is
15016880Sgpalmer		 * no way to give interrupts a chance now.
15119984Smckay		 */
15217131Sgpalmer		if (mtx != NULL && priority & PDROP)
15316880Sgpalmer			mtx_unlock(mtx);
15417131Sgpalmer		return (0);
15519984Smckay	}
15619984Smckay	catch = priority & PCATCH;
15716880Sgpalmer	rval = 0;
15816880Sgpalmer
15916880Sgpalmer	/*
16016880Sgpalmer	 * If we are already on a sleep queue, then remove us from that
161103726Swollman	 * sleep queue first.  We have to do this to handle recursive
16216880Sgpalmer	 * sleeps.
16319984Smckay	 */
16419984Smckay	if (TD_ON_SLEEPQ(td))
16519984Smckay		sleepq_remove(td, td->td_wchan);
16619984Smckay
16716880Sgpalmer	sleepq_lock(ident);
16819984Smckay	if (catch) {
16916880Sgpalmer		/*
17016880Sgpalmer		 * Don't bother sleeping if we are exiting and not the exiting
17116880Sgpalmer		 * thread or if our thread is marked as interrupted.
17219984Smckay		 */
17319984Smckay		mtx_lock_spin(&sched_lock);
17419984Smckay		rval = thread_sleep_check(td);
17516880Sgpalmer		mtx_unlock_spin(&sched_lock);
17616880Sgpalmer		if (rval != 0) {
17719984Smckay			sleepq_release(ident);
17816880Sgpalmer			if (mtx != NULL && priority & PDROP)
17919984Smckay				mtx_unlock(mtx);
18016880Sgpalmer			return (rval);
18119984Smckay		}
18219984Smckay	}
18316880Sgpalmer	CTR5(KTR_PROC, "msleep: thread %p (pid %ld, %s) on %s (%p)",
18419984Smckay	    (void *)td, (long)p->p_pid, p->p_comm, wmesg, ident);
18519984Smckay
18619984Smckay	DROP_GIANT();
18719984Smckay	if (mtx != NULL) {
18819984Smckay		mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
18919984Smckay		WITNESS_SAVE(&mtx->mtx_object, mtx);
19079452Sbrian		mtx_unlock(mtx);
19119984Smckay	}
19219984Smckay
19319984Smckay	/*
19419984Smckay	 * We put ourselves on the sleep queue and start our timeout
19519984Smckay	 * before calling thread_suspend_check, as we could stop there,
19619984Smckay	 * and a wakeup or a SIGCONT (or both) could occur while we were
19719984Smckay	 * stopped without resuming us.  Thus, we must be ready for sleep
19819984Smckay	 * when cursig() is called.  If the wakeup happens while we're
19919984Smckay	 * stopped, then td will no longer be on a sleep queue upon
20019984Smckay	 * return from cursig().
20119984Smckay	 */
20219984Smckay	flags = SLEEPQ_MSLEEP;
20319984Smckay	if (catch)
20419984Smckay		flags |= SLEEPQ_INTERRUPTIBLE;
20519984Smckay	sleepq_add(ident, mtx, wmesg, flags);
20619984Smckay	if (timo)
20719984Smckay		sleepq_set_timeout(ident, timo);
20816880Sgpalmer	if (catch) {
20919984Smckay		sig = sleepq_catch_signals(ident);
21019984Smckay	} else
21116880Sgpalmer		sig = 0;
212
213	/*
214	 * Adjust this thread's priority.
215	 */
216	mtx_lock_spin(&sched_lock);
217	sched_prio(td, priority & PRIMASK);
218	mtx_unlock_spin(&sched_lock);
219
220	if (timo && catch)
221		rval = sleepq_timedwait_sig(ident, sig != 0);
222	else if (timo)
223		rval = sleepq_timedwait(ident);
224	else if (catch)
225		rval = sleepq_wait_sig(ident);
226	else {
227		sleepq_wait(ident);
228		rval = 0;
229	}
230	if (rval == 0 && catch)
231		rval = sleepq_calc_signal_retval(sig);
232#ifdef KTRACE
233	if (KTRPOINT(td, KTR_CSW))
234		ktrcsw(0, 0);
235#endif
236	PICKUP_GIANT();
237	if (mtx != NULL && !(priority & PDROP)) {
238		mtx_lock(mtx);
239		WITNESS_RESTORE(&mtx->mtx_object, mtx);
240	}
241	return (rval);
242}
243
244int
245msleep_spin(ident, mtx, wmesg, timo)
246	void *ident;
247	struct mtx *mtx;
248	const char *wmesg;
249	int timo;
250{
251	struct thread *td;
252	struct proc *p;
253	int rval;
254	WITNESS_SAVE_DECL(mtx);
255
256	td = curthread;
257	p = td->td_proc;
258	KASSERT(mtx != NULL, ("sleeping without a mutex"));
259	KASSERT(p != NULL, ("msleep1"));
260	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
261
262	if (cold) {
263		/*
264		 * During autoconfiguration, just return;
265		 * don't run any other threads or panic below,
266		 * in case this is the idle thread and already asleep.
267		 * XXX: this used to do "s = splhigh(); splx(safepri);
268		 * splx(s);" to give interrupts a chance, but there is
269		 * no way to give interrupts a chance now.
270		 */
271		return (0);
272	}
273
274	sleepq_lock(ident);
275	CTR5(KTR_PROC, "msleep_spin: thread %p (pid %ld, %s) on %s (%p)",
276	    (void *)td, (long)p->p_pid, p->p_comm, wmesg, ident);
277
278	DROP_GIANT();
279	mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
280	WITNESS_SAVE(&mtx->mtx_object, mtx);
281	mtx_unlock_spin(mtx);
282
283	/*
284	 * We put ourselves on the sleep queue and start our timeout.
285	 */
286	sleepq_add(ident, mtx, wmesg, SLEEPQ_MSLEEP);
287	if (timo)
288		sleepq_set_timeout(ident, timo);
289
290	/*
291	 * Can't call ktrace with any spin locks held so it can lock the
292	 * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold
293	 * any spin lock.  Thus, we have to drop the sleepq spin lock while
294	 * we handle those requests.  This is safe since we have placed our
295	 * thread on the sleep queue already.
296	 */
297#ifdef KTRACE
298	if (KTRPOINT(td, KTR_CSW)) {
299		sleepq_release(ident);
300		ktrcsw(1, 0);
301		sleepq_lock(ident);
302	}
303#endif
304#ifdef WITNESS
305	sleepq_release(ident);
306	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"",
307	    wmesg);
308	sleepq_lock(ident);
309#endif
310	if (timo)
311		rval = sleepq_timedwait(ident);
312	else {
313		sleepq_wait(ident);
314		rval = 0;
315	}
316#ifdef KTRACE
317	if (KTRPOINT(td, KTR_CSW))
318		ktrcsw(0, 0);
319#endif
320	PICKUP_GIANT();
321	mtx_lock_spin(mtx);
322	WITNESS_RESTORE(&mtx->mtx_object, mtx);
323	return (rval);
324}
325
326/*
327 * Make all threads sleeping on the specified identifier runnable.
328 */
329void
330wakeup(ident)
331	register void *ident;
332{
333
334	sleepq_lock(ident);
335	sleepq_broadcast(ident, SLEEPQ_MSLEEP, -1);
336}
337
338/*
339 * Make a thread sleeping on the specified identifier runnable.
340 * May wake more than one thread if a target thread is currently
341 * swapped out.
342 */
343void
344wakeup_one(ident)
345	register void *ident;
346{
347
348	sleepq_lock(ident);
349	sleepq_signal(ident, SLEEPQ_MSLEEP, -1);
350}
351
352/*
353 * The machine independent parts of context switching.
354 */
355void
356mi_switch(int flags, struct thread *newtd)
357{
358	uint64_t new_switchtime;
359	struct thread *td;
360	struct proc *p;
361
362	mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
363	td = curthread;			/* XXX */
364	p = td->td_proc;		/* XXX */
365	KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
366#ifdef INVARIANTS
367	if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
368		mtx_assert(&Giant, MA_NOTOWNED);
369#endif
370	KASSERT(td->td_critnest == 1 || (td->td_critnest == 2 &&
371	    (td->td_owepreempt) && (flags & SW_INVOL) != 0 &&
372	    newtd == NULL) || panicstr,
373	    ("mi_switch: switch in a critical section"));
374	KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
375	    ("mi_switch: switch must be voluntary or involuntary"));
376	KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself"));
377
378	if (flags & SW_VOL)
379		p->p_stats->p_ru.ru_nvcsw++;
380	else
381		p->p_stats->p_ru.ru_nivcsw++;
382
383	/*
384	 * Compute the amount of time during which the current
385	 * process was running, and add that to its total so far.
386	 */
387	new_switchtime = cpu_ticks();
388	p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
389	p->p_rux.rux_uticks += td->td_uticks;
390	td->td_uticks = 0;
391	p->p_rux.rux_iticks += td->td_iticks;
392	td->td_iticks = 0;
393	p->p_rux.rux_sticks += td->td_sticks;
394	td->td_sticks = 0;
395
396	td->td_generation++;	/* bump preempt-detect counter */
397
398	/*
399	 * Don't perform context switches from the debugger.
400	 */
401	if (kdb_active) {
402		mtx_unlock_spin(&sched_lock);
403		kdb_backtrace();
404		kdb_reenter();
405		panic("%s: did not reenter debugger", __func__);
406	}
407
408	/*
409	 * Check if the process exceeds its cpu resource allocation.  If
410	 * it reaches the max, arrange to kill the process in ast().
411	 */
412	if (p->p_cpulimit != RLIM_INFINITY &&
413	    p->p_rux.rux_runtime >= p->p_cpulimit * cpu_tickrate()) {
414		p->p_sflag |= PS_XCPU;
415		td->td_flags |= TDF_ASTPENDING;
416	}
417
418	/*
419	 * Finish up stats for outgoing thread.
420	 */
421	cnt.v_swtch++;
422	PCPU_SET(switchtime, new_switchtime);
423	PCPU_SET(switchticks, ticks);
424	CTR4(KTR_PROC, "mi_switch: old thread %p (kse %p, pid %ld, %s)",
425	    (void *)td, td->td_sched, (long)p->p_pid, p->p_comm);
426	if ((flags & SW_VOL) && (td->td_proc->p_flag & P_SA))
427		newtd = thread_switchout(td, flags, newtd);
428#if (KTR_COMPILE & KTR_SCHED) != 0
429	if (td == PCPU_GET(idlethread))
430		CTR3(KTR_SCHED, "mi_switch: %p(%s) prio %d idle",
431		    td, td->td_proc->p_comm, td->td_priority);
432	else if (newtd != NULL)
433		CTR5(KTR_SCHED,
434		    "mi_switch: %p(%s) prio %d preempted by %p(%s)",
435		    td, td->td_proc->p_comm, td->td_priority, newtd,
436		    newtd->td_proc->p_comm);
437	else
438		CTR6(KTR_SCHED,
439		    "mi_switch: %p(%s) prio %d inhibit %d wmesg %s lock %s",
440		    td, td->td_proc->p_comm, td->td_priority,
441		    td->td_inhibitors, td->td_wmesg, td->td_lockname);
442#endif
443	sched_switch(td, newtd, flags);
444	CTR3(KTR_SCHED, "mi_switch: running %p(%s) prio %d",
445	    td, td->td_proc->p_comm, td->td_priority);
446
447	CTR4(KTR_PROC, "mi_switch: new thread %p (kse %p, pid %ld, %s)",
448	    (void *)td, td->td_sched, (long)p->p_pid, p->p_comm);
449
450	/*
451	 * If the last thread was exiting, finish cleaning it up.
452	 */
453	if ((td = PCPU_GET(deadthread))) {
454		PCPU_SET(deadthread, NULL);
455		thread_stash(td);
456	}
457}
458
459/*
460 * Change process state to be runnable,
461 * placing it on the run queue if it is in memory,
462 * and awakening the swapper if it isn't in memory.
463 */
464void
465setrunnable(struct thread *td)
466{
467	struct proc *p;
468
469	p = td->td_proc;
470	mtx_assert(&sched_lock, MA_OWNED);
471	switch (p->p_state) {
472	case PRS_ZOMBIE:
473		panic("setrunnable(1)");
474	default:
475		break;
476	}
477	switch (td->td_state) {
478	case TDS_RUNNING:
479	case TDS_RUNQ:
480		return;
481	case TDS_INHIBITED:
482		/*
483		 * If we are only inhibited because we are swapped out
484		 * then arange to swap in this process. Otherwise just return.
485		 */
486		if (td->td_inhibitors != TDI_SWAPPED)
487			return;
488		/* XXX: intentional fall-through ? */
489	case TDS_CAN_RUN:
490		break;
491	default:
492		printf("state is 0x%x", td->td_state);
493		panic("setrunnable(2)");
494	}
495	if ((p->p_sflag & PS_INMEM) == 0) {
496		if ((p->p_sflag & PS_SWAPPINGIN) == 0) {
497			p->p_sflag |= PS_SWAPINREQ;
498			/*
499			 * due to a LOR between sched_lock and
500			 * the sleepqueue chain locks, use
501			 * lower level scheduling functions.
502			 */
503			kick_proc0();
504		}
505	} else
506		sched_wakeup(td);
507}
508
509/*
510 * Compute a tenex style load average of a quantity on
511 * 1, 5 and 15 minute intervals.
512 * XXXKSE   Needs complete rewrite when correct info is available.
513 * Completely Bogus.. only works with 1:1 (but compiles ok now :-)
514 */
515static void
516loadav(void *arg)
517{
518	int i, nrun;
519	struct loadavg *avg;
520
521	nrun = sched_load();
522	avg = &averunnable;
523
524	for (i = 0; i < 3; i++)
525		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
526		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
527
528	/*
529	 * Schedule the next update to occur after 5 seconds, but add a
530	 * random variation to avoid synchronisation with processes that
531	 * run at regular intervals.
532	 */
533	callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
534	    loadav, NULL);
535}
536
537static void
538lboltcb(void *arg)
539{
540	wakeup(&lbolt);
541	callout_reset(&lbolt_callout, hz, lboltcb, NULL);
542}
543
544/* ARGSUSED */
545static void
546synch_setup(dummy)
547	void *dummy;
548{
549	callout_init(&loadav_callout, CALLOUT_MPSAFE);
550	callout_init(&lbolt_callout, CALLOUT_MPSAFE);
551
552	/* Kick off timeout driven events by calling first time. */
553	loadav(NULL);
554	lboltcb(NULL);
555}
556
557/*
558 * General purpose yield system call
559 */
560int
561yield(struct thread *td, struct yield_args *uap)
562{
563	struct ksegrp *kg;
564
565	kg = td->td_ksegrp;
566	mtx_assert(&Giant, MA_NOTOWNED);
567	mtx_lock_spin(&sched_lock);
568	sched_prio(td, PRI_MAX_TIMESHARE);
569	mi_switch(SW_VOL, NULL);
570	mtx_unlock_spin(&sched_lock);
571	td->td_retval[0] = 0;
572	return (0);
573}
574