kern_synch.c revision 277528
1/*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_synch.c 277528 2015-01-22 11:12:42Z hselasky $");
39
40#include "opt_ktrace.h"
41#include "opt_sched.h"
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/condvar.h>
46#include <sys/kdb.h>
47#include <sys/kernel.h>
48#include <sys/ktr.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/proc.h>
52#include <sys/resourcevar.h>
53#include <sys/sched.h>
54#include <sys/sdt.h>
55#include <sys/signalvar.h>
56#include <sys/sleepqueue.h>
57#include <sys/smp.h>
58#include <sys/sx.h>
59#include <sys/sysctl.h>
60#include <sys/sysproto.h>
61#include <sys/vmmeter.h>
62#ifdef KTRACE
63#include <sys/uio.h>
64#include <sys/ktrace.h>
65#endif
66
67#include <machine/cpu.h>
68
69#ifdef XEN
70#include <vm/vm.h>
71#include <vm/vm_param.h>
72#include <vm/pmap.h>
73#endif
74
75#define	KTDSTATE(td)							\
76	(((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep"  :		\
77	((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" :	\
78	((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" :		\
79	((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" :		\
80	((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding")
81
82static void synch_setup(void *dummy);
83SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
84    NULL);
85
86int	hogticks;
87static uint8_t pause_wchan[MAXCPU];
88
89static struct callout loadav_callout;
90
91struct loadavg averunnable =
92	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
93/*
94 * Constants for averages over 1, 5, and 15 minutes
95 * when sampling at 5 second intervals.
96 */
97static fixpt_t cexp[3] = {
98	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
99	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
100	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
101};
102
103/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
104SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, FSCALE, "");
105
106static void	loadav(void *arg);
107
108SDT_PROVIDER_DECLARE(sched);
109SDT_PROBE_DEFINE(sched, , , preempt);
110
111/*
112 * These probes reference Solaris features that are not implemented in FreeBSD.
113 * Create the probes anyway for compatibility with existing D scripts; they'll
114 * just never fire.
115 */
116SDT_PROBE_DEFINE(sched, , , cpucaps__sleep);
117SDT_PROBE_DEFINE(sched, , , cpucaps__wakeup);
118SDT_PROBE_DEFINE(sched, , , schedctl__nopreempt);
119SDT_PROBE_DEFINE(sched, , , schedctl__preempt);
120SDT_PROBE_DEFINE(sched, , , schedctl__yield);
121
122static void
123sleepinit(void *unused)
124{
125
126	hogticks = (hz / 10) * 2;	/* Default only. */
127	init_sleepqueues();
128}
129
130/*
131 * vmem tries to lock the sleepq mutexes when free'ing kva, so make sure
132 * it is available.
133 */
134SYSINIT(sleepinit, SI_SUB_KMEM, SI_ORDER_ANY, sleepinit, 0);
135
136/*
137 * General sleep call.  Suspends the current thread until a wakeup is
138 * performed on the specified identifier.  The thread will then be made
139 * runnable with the specified priority.  Sleeps at most sbt units of time
140 * (0 means no timeout).  If pri includes the PCATCH flag, let signals
141 * interrupt the sleep, otherwise ignore them while sleeping.  Returns 0 if
142 * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
143 * signal becomes pending, ERESTART is returned if the current system
144 * call should be restarted if possible, and EINTR is returned if the system
145 * call should be interrupted by the signal (return EINTR).
146 *
147 * The lock argument is unlocked before the caller is suspended, and
148 * re-locked before _sleep() returns.  If priority includes the PDROP
149 * flag the lock is not re-locked before returning.
150 */
151int
152_sleep(void *ident, struct lock_object *lock, int priority,
153    const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
154{
155	struct thread *td;
156	struct proc *p;
157	struct lock_class *class;
158	uintptr_t lock_state;
159	int catch, pri, rval, sleepq_flags;
160	WITNESS_SAVE_DECL(lock_witness);
161
162	td = curthread;
163	p = td->td_proc;
164#ifdef KTRACE
165	if (KTRPOINT(td, KTR_CSW))
166		ktrcsw(1, 0, wmesg);
167#endif
168	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
169	    "Sleeping on \"%s\"", wmesg);
170	KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL,
171	    ("sleeping without a lock"));
172	KASSERT(p != NULL, ("msleep1"));
173	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
174	if (priority & PDROP)
175		KASSERT(lock != NULL && lock != &Giant.lock_object,
176		    ("PDROP requires a non-Giant lock"));
177	if (lock != NULL)
178		class = LOCK_CLASS(lock);
179	else
180		class = NULL;
181
182	if (cold || SCHEDULER_STOPPED()) {
183		/*
184		 * During autoconfiguration, just return;
185		 * don't run any other threads or panic below,
186		 * in case this is the idle thread and already asleep.
187		 * XXX: this used to do "s = splhigh(); splx(safepri);
188		 * splx(s);" to give interrupts a chance, but there is
189		 * no way to give interrupts a chance now.
190		 */
191		if (lock != NULL && priority & PDROP)
192			class->lc_unlock(lock);
193		return (0);
194	}
195	catch = priority & PCATCH;
196	pri = priority & PRIMASK;
197
198	/*
199	 * If we are already on a sleep queue, then remove us from that
200	 * sleep queue first.  We have to do this to handle recursive
201	 * sleeps.
202	 */
203	if (TD_ON_SLEEPQ(td))
204		sleepq_remove(td, td->td_wchan);
205
206	if ((uint8_t *)ident >= &pause_wchan[0] &&
207	    (uint8_t *)ident <= &pause_wchan[MAXCPU - 1])
208		sleepq_flags = SLEEPQ_PAUSE;
209	else
210		sleepq_flags = SLEEPQ_SLEEP;
211	if (catch)
212		sleepq_flags |= SLEEPQ_INTERRUPTIBLE;
213
214	sleepq_lock(ident);
215	CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)",
216	    td->td_tid, p->p_pid, td->td_name, wmesg, ident);
217
218	if (lock == &Giant.lock_object)
219		mtx_assert(&Giant, MA_OWNED);
220	DROP_GIANT();
221	if (lock != NULL && lock != &Giant.lock_object &&
222	    !(class->lc_flags & LC_SLEEPABLE)) {
223		WITNESS_SAVE(lock, lock_witness);
224		lock_state = class->lc_unlock(lock);
225	} else
226		/* GCC needs to follow the Yellow Brick Road */
227		lock_state = -1;
228
229	/*
230	 * We put ourselves on the sleep queue and start our timeout
231	 * before calling thread_suspend_check, as we could stop there,
232	 * and a wakeup or a SIGCONT (or both) could occur while we were
233	 * stopped without resuming us.  Thus, we must be ready for sleep
234	 * when cursig() is called.  If the wakeup happens while we're
235	 * stopped, then td will no longer be on a sleep queue upon
236	 * return from cursig().
237	 */
238	sleepq_add(ident, lock, wmesg, sleepq_flags, 0);
239	if (sbt != 0)
240		sleepq_set_timeout_sbt(ident, sbt, pr, flags);
241	if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
242		sleepq_release(ident);
243		WITNESS_SAVE(lock, lock_witness);
244		lock_state = class->lc_unlock(lock);
245		sleepq_lock(ident);
246	}
247	if (sbt != 0 && catch)
248		rval = sleepq_timedwait_sig(ident, pri);
249	else if (sbt != 0)
250		rval = sleepq_timedwait(ident, pri);
251	else if (catch)
252		rval = sleepq_wait_sig(ident, pri);
253	else {
254		sleepq_wait(ident, pri);
255		rval = 0;
256	}
257#ifdef KTRACE
258	if (KTRPOINT(td, KTR_CSW))
259		ktrcsw(0, 0, wmesg);
260#endif
261	PICKUP_GIANT();
262	if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) {
263		class->lc_lock(lock, lock_state);
264		WITNESS_RESTORE(lock, lock_witness);
265	}
266	return (rval);
267}
268
269int
270msleep_spin_sbt(void *ident, struct mtx *mtx, const char *wmesg,
271    sbintime_t sbt, sbintime_t pr, int flags)
272{
273	struct thread *td;
274	struct proc *p;
275	int rval;
276	WITNESS_SAVE_DECL(mtx);
277
278	td = curthread;
279	p = td->td_proc;
280	KASSERT(mtx != NULL, ("sleeping without a mutex"));
281	KASSERT(p != NULL, ("msleep1"));
282	KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
283
284	if (cold || SCHEDULER_STOPPED()) {
285		/*
286		 * During autoconfiguration, just return;
287		 * don't run any other threads or panic below,
288		 * in case this is the idle thread and already asleep.
289		 * XXX: this used to do "s = splhigh(); splx(safepri);
290		 * splx(s);" to give interrupts a chance, but there is
291		 * no way to give interrupts a chance now.
292		 */
293		return (0);
294	}
295
296	sleepq_lock(ident);
297	CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)",
298	    td->td_tid, p->p_pid, td->td_name, wmesg, ident);
299
300	DROP_GIANT();
301	mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
302	WITNESS_SAVE(&mtx->lock_object, mtx);
303	mtx_unlock_spin(mtx);
304
305	/*
306	 * We put ourselves on the sleep queue and start our timeout.
307	 */
308	sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
309	if (sbt != 0)
310		sleepq_set_timeout_sbt(ident, sbt, pr, flags);
311
312	/*
313	 * Can't call ktrace with any spin locks held so it can lock the
314	 * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold
315	 * any spin lock.  Thus, we have to drop the sleepq spin lock while
316	 * we handle those requests.  This is safe since we have placed our
317	 * thread on the sleep queue already.
318	 */
319#ifdef KTRACE
320	if (KTRPOINT(td, KTR_CSW)) {
321		sleepq_release(ident);
322		ktrcsw(1, 0, wmesg);
323		sleepq_lock(ident);
324	}
325#endif
326#ifdef WITNESS
327	sleepq_release(ident);
328	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"",
329	    wmesg);
330	sleepq_lock(ident);
331#endif
332	if (sbt != 0)
333		rval = sleepq_timedwait(ident, 0);
334	else {
335		sleepq_wait(ident, 0);
336		rval = 0;
337	}
338#ifdef KTRACE
339	if (KTRPOINT(td, KTR_CSW))
340		ktrcsw(0, 0, wmesg);
341#endif
342	PICKUP_GIANT();
343	mtx_lock_spin(mtx);
344	WITNESS_RESTORE(&mtx->lock_object, mtx);
345	return (rval);
346}
347
348/*
349 * pause() delays the calling thread by the given number of system ticks.
350 * During cold bootup, pause() uses the DELAY() function instead of
351 * the tsleep() function to do the waiting. The "timo" argument must be
352 * greater than or equal to zero. A "timo" value of zero is equivalent
353 * to a "timo" value of one.
354 */
355int
356pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
357{
358	KASSERT(sbt >= 0, ("pause: timeout must be >= 0"));
359
360	/* silently convert invalid timeouts */
361	if (sbt == 0)
362		sbt = tick_sbt;
363
364	if (cold || kdb_active) {
365		/*
366		 * We delay one second at a time to avoid overflowing the
367		 * system specific DELAY() function(s):
368		 */
369		while (sbt >= SBT_1S) {
370			DELAY(1000000);
371			sbt -= SBT_1S;
372		}
373		/* Do the delay remainder, if any */
374		sbt = (sbt + SBT_1US - 1) / SBT_1US;
375		if (sbt > 0)
376			DELAY(sbt);
377		return (0);
378	}
379	return (_sleep(&pause_wchan[curcpu], NULL, 0, wmesg, sbt, pr, flags));
380}
381
382/*
383 * Make all threads sleeping on the specified identifier runnable.
384 */
385void
386wakeup(void *ident)
387{
388	int wakeup_swapper;
389
390	sleepq_lock(ident);
391	wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
392	sleepq_release(ident);
393	if (wakeup_swapper) {
394		KASSERT(ident != &proc0,
395		    ("wakeup and wakeup_swapper and proc0"));
396		kick_proc0();
397	}
398}
399
400/*
401 * Make a thread sleeping on the specified identifier runnable.
402 * May wake more than one thread if a target thread is currently
403 * swapped out.
404 */
405void
406wakeup_one(void *ident)
407{
408	int wakeup_swapper;
409
410	sleepq_lock(ident);
411	wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0);
412	sleepq_release(ident);
413	if (wakeup_swapper)
414		kick_proc0();
415}
416
417static void
418kdb_switch(void)
419{
420	thread_unlock(curthread);
421	kdb_backtrace();
422	kdb_reenter();
423	panic("%s: did not reenter debugger", __func__);
424}
425
426/*
427 * The machine independent parts of context switching.
428 */
429void
430mi_switch(int flags, struct thread *newtd)
431{
432	uint64_t runtime, new_switchtime;
433	struct thread *td;
434	struct proc *p;
435
436	td = curthread;			/* XXX */
437	THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
438	p = td->td_proc;		/* XXX */
439	KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
440#ifdef INVARIANTS
441	if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
442		mtx_assert(&Giant, MA_NOTOWNED);
443#endif
444	KASSERT(td->td_critnest == 1 || panicstr,
445	    ("mi_switch: switch in a critical section"));
446	KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
447	    ("mi_switch: switch must be voluntary or involuntary"));
448	KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself"));
449
450	/*
451	 * Don't perform context switches from the debugger.
452	 */
453	if (kdb_active)
454		kdb_switch();
455	if (SCHEDULER_STOPPED())
456		return;
457	if (flags & SW_VOL) {
458		td->td_ru.ru_nvcsw++;
459		td->td_swvoltick = ticks;
460	} else
461		td->td_ru.ru_nivcsw++;
462#ifdef SCHED_STATS
463	SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]);
464#endif
465	/*
466	 * Compute the amount of time during which the current
467	 * thread was running, and add that to its total so far.
468	 */
469	new_switchtime = cpu_ticks();
470	runtime = new_switchtime - PCPU_GET(switchtime);
471	td->td_runtime += runtime;
472	td->td_incruntime += runtime;
473	PCPU_SET(switchtime, new_switchtime);
474	td->td_generation++;	/* bump preempt-detect counter */
475	PCPU_INC(cnt.v_swtch);
476	PCPU_SET(switchticks, ticks);
477	CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
478	    td->td_tid, td->td_sched, p->p_pid, td->td_name);
479#if (KTR_COMPILE & KTR_SCHED) != 0
480	if (TD_IS_IDLETHREAD(td))
481		KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
482		    "prio:%d", td->td_priority);
483	else
484		KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
485		    "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
486		    "lockname:\"%s\"", td->td_lockname);
487#endif
488	SDT_PROBE0(sched, , , preempt);
489#ifdef XEN
490	PT_UPDATES_FLUSH();
491#endif
492	sched_switch(td, newtd, flags);
493	KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
494	    "prio:%d", td->td_priority);
495
496	CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
497	    td->td_tid, td->td_sched, p->p_pid, td->td_name);
498
499	/*
500	 * If the last thread was exiting, finish cleaning it up.
501	 */
502	if ((td = PCPU_GET(deadthread))) {
503		PCPU_SET(deadthread, NULL);
504		thread_stash(td);
505	}
506}
507
508/*
509 * Change thread state to be runnable, placing it on the run queue if
510 * it is in memory.  If it is swapped out, return true so our caller
511 * will know to awaken the swapper.
512 */
513int
514setrunnable(struct thread *td)
515{
516
517	THREAD_LOCK_ASSERT(td, MA_OWNED);
518	KASSERT(td->td_proc->p_state != PRS_ZOMBIE,
519	    ("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
520	switch (td->td_state) {
521	case TDS_RUNNING:
522	case TDS_RUNQ:
523		return (0);
524	case TDS_INHIBITED:
525		/*
526		 * If we are only inhibited because we are swapped out
527		 * then arange to swap in this process. Otherwise just return.
528		 */
529		if (td->td_inhibitors != TDI_SWAPPED)
530			return (0);
531		/* FALLTHROUGH */
532	case TDS_CAN_RUN:
533		break;
534	default:
535		printf("state is 0x%x", td->td_state);
536		panic("setrunnable(2)");
537	}
538	if ((td->td_flags & TDF_INMEM) == 0) {
539		if ((td->td_flags & TDF_SWAPINREQ) == 0) {
540			td->td_flags |= TDF_SWAPINREQ;
541			return (1);
542		}
543	} else
544		sched_wakeup(td);
545	return (0);
546}
547
548/*
549 * Compute a tenex style load average of a quantity on
550 * 1, 5 and 15 minute intervals.
551 */
552static void
553loadav(void *arg)
554{
555	int i, nrun;
556	struct loadavg *avg;
557
558	nrun = sched_load();
559	avg = &averunnable;
560
561	for (i = 0; i < 3; i++)
562		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
563		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
564
565	/*
566	 * Schedule the next update to occur after 5 seconds, but add a
567	 * random variation to avoid synchronisation with processes that
568	 * run at regular intervals.
569	 */
570	callout_reset_sbt(&loadav_callout,
571	    SBT_1US * (4000000 + (int)(random() % 2000001)), SBT_1US,
572	    loadav, NULL, C_DIRECT_EXEC | C_PREL(32));
573}
574
575/* ARGSUSED */
576static void
577synch_setup(void *dummy)
578{
579	callout_init(&loadav_callout, CALLOUT_MPSAFE);
580
581	/* Kick off timeout driven events by calling first time. */
582	loadav(NULL);
583}
584
585int
586should_yield(void)
587{
588
589	return ((u_int)ticks - (u_int)curthread->td_swvoltick >= hogticks);
590}
591
592void
593maybe_yield(void)
594{
595
596	if (should_yield())
597		kern_yield(PRI_USER);
598}
599
600void
601kern_yield(int prio)
602{
603	struct thread *td;
604
605	td = curthread;
606	DROP_GIANT();
607	thread_lock(td);
608	if (prio == PRI_USER)
609		prio = td->td_user_pri;
610	if (prio >= 0)
611		sched_prio(td, prio);
612	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
613	thread_unlock(td);
614	PICKUP_GIANT();
615}
616
617/*
618 * General purpose yield system call.
619 */
620int
621sys_yield(struct thread *td, struct yield_args *uap)
622{
623
624	thread_lock(td);
625	if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
626		sched_prio(td, PRI_MAX_TIMESHARE);
627	mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
628	thread_unlock(td);
629	td->td_retval[0] = 0;
630	return (0);
631}
632