1/*- 2 * Copyright (c) 1982, 1986, 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: stable/11/sys/kern/kern_synch.c 359652 2020-04-06 07:16:31Z hselasky $"); 39 40#include "opt_ktrace.h" 41#include "opt_sched.h" 42 43#include <sys/param.h> 44#include <sys/systm.h> 45#include <sys/condvar.h> 46#include <sys/kdb.h> 47#include <sys/kernel.h> 48#include <sys/ktr.h> 49#include <sys/lock.h> 50#include <sys/mutex.h> 51#include <sys/proc.h> 52#include <sys/resourcevar.h> 53#include <sys/sched.h> 54#include <sys/sdt.h> 55#include <sys/signalvar.h> 56#include <sys/sleepqueue.h> 57#include <sys/smp.h> 58#include <sys/sx.h> 59#include <sys/sysctl.h> 60#include <sys/sysproto.h> 61#include <sys/vmmeter.h> 62#ifdef KTRACE 63#include <sys/uio.h> 64#include <sys/ktrace.h> 65#endif 66 67#include <machine/cpu.h> 68 69static void synch_setup(void *dummy); 70SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup, 71 NULL); 72 73int hogticks; 74static uint8_t pause_wchan[MAXCPU]; 75 76static struct callout loadav_callout; 77 78struct loadavg averunnable = 79 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ 80/* 81 * Constants for averages over 1, 5, and 15 minutes 82 * when sampling at 5 second intervals. 83 */ 84static fixpt_t cexp[3] = { 85 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 86 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 87 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 88}; 89 90/* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */ 91SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, FSCALE, ""); 92 93static void loadav(void *arg); 94 95SDT_PROVIDER_DECLARE(sched); 96SDT_PROBE_DEFINE(sched, , , preempt); 97 98static void 99sleepinit(void *unused) 100{ 101 102 hogticks = (hz / 10) * 2; /* Default only. */ 103 init_sleepqueues(); 104} 105 106/* 107 * vmem tries to lock the sleepq mutexes when free'ing kva, so make sure 108 * it is available. 109 */ 110SYSINIT(sleepinit, SI_SUB_KMEM, SI_ORDER_ANY, sleepinit, NULL); 111 112/* 113 * General sleep call. Suspends the current thread until a wakeup is 114 * performed on the specified identifier. The thread will then be made 115 * runnable with the specified priority. Sleeps at most sbt units of time 116 * (0 means no timeout). If pri includes the PCATCH flag, let signals 117 * interrupt the sleep, otherwise ignore them while sleeping. Returns 0 if 118 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 119 * signal becomes pending, ERESTART is returned if the current system 120 * call should be restarted if possible, and EINTR is returned if the system 121 * call should be interrupted by the signal (return EINTR). 122 * 123 * The lock argument is unlocked before the caller is suspended, and 124 * re-locked before _sleep() returns. If priority includes the PDROP 125 * flag the lock is not re-locked before returning. 126 */ 127int 128_sleep(void *ident, struct lock_object *lock, int priority, 129 const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) 130{ 131 struct thread *td; 132 struct proc *p; 133 struct lock_class *class; 134 uintptr_t lock_state; 135 int catch, pri, rval, sleepq_flags; 136 WITNESS_SAVE_DECL(lock_witness); 137 138 td = curthread; 139 p = td->td_proc; 140#ifdef KTRACE 141 if (KTRPOINT(td, KTR_CSW)) 142 ktrcsw(1, 0, wmesg); 143#endif 144 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock, 145 "Sleeping on \"%s\"", wmesg); 146 KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL, 147 ("sleeping without a lock")); 148 KASSERT(p != NULL, ("msleep1")); 149 KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep")); 150 if (priority & PDROP) 151 KASSERT(lock != NULL && lock != &Giant.lock_object, 152 ("PDROP requires a non-Giant lock")); 153 if (lock != NULL) 154 class = LOCK_CLASS(lock); 155 else 156 class = NULL; 157 158 if (SCHEDULER_STOPPED_TD(td)) { 159 if (lock != NULL && priority & PDROP) 160 class->lc_unlock(lock); 161 return (0); 162 } 163 catch = priority & PCATCH; 164 pri = priority & PRIMASK; 165 166 KASSERT(!TD_ON_SLEEPQ(td), ("recursive sleep")); 167 168 if ((uint8_t *)ident >= &pause_wchan[0] && 169 (uint8_t *)ident <= &pause_wchan[MAXCPU - 1]) 170 sleepq_flags = SLEEPQ_PAUSE; 171 else 172 sleepq_flags = SLEEPQ_SLEEP; 173 if (catch) 174 sleepq_flags |= SLEEPQ_INTERRUPTIBLE; 175 176 sleepq_lock(ident); 177 CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)", 178 td->td_tid, p->p_pid, td->td_name, wmesg, ident); 179 180 if (lock == &Giant.lock_object) 181 mtx_assert(&Giant, MA_OWNED); 182 DROP_GIANT(); 183 if (lock != NULL && lock != &Giant.lock_object && 184 !(class->lc_flags & LC_SLEEPABLE)) { 185 WITNESS_SAVE(lock, lock_witness); 186 lock_state = class->lc_unlock(lock); 187 } else 188 /* GCC needs to follow the Yellow Brick Road */ 189 lock_state = -1; 190 191 /* 192 * We put ourselves on the sleep queue and start our timeout 193 * before calling thread_suspend_check, as we could stop there, 194 * and a wakeup or a SIGCONT (or both) could occur while we were 195 * stopped without resuming us. Thus, we must be ready for sleep 196 * when cursig() is called. If the wakeup happens while we're 197 * stopped, then td will no longer be on a sleep queue upon 198 * return from cursig(). 199 */ 200 sleepq_add(ident, lock, wmesg, sleepq_flags, 0); 201 if (sbt != 0) 202 sleepq_set_timeout_sbt(ident, sbt, pr, flags); 203 if (lock != NULL && class->lc_flags & LC_SLEEPABLE) { 204 sleepq_release(ident); 205 WITNESS_SAVE(lock, lock_witness); 206 lock_state = class->lc_unlock(lock); 207 sleepq_lock(ident); 208 } 209 if (sbt != 0 && catch) 210 rval = sleepq_timedwait_sig(ident, pri); 211 else if (sbt != 0) 212 rval = sleepq_timedwait(ident, pri); 213 else if (catch) 214 rval = sleepq_wait_sig(ident, pri); 215 else { 216 sleepq_wait(ident, pri); 217 rval = 0; 218 } 219#ifdef KTRACE 220 if (KTRPOINT(td, KTR_CSW)) 221 ktrcsw(0, 0, wmesg); 222#endif 223 PICKUP_GIANT(); 224 if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) { 225 class->lc_lock(lock, lock_state); 226 WITNESS_RESTORE(lock, lock_witness); 227 } 228 return (rval); 229} 230 231int 232msleep_spin_sbt(void *ident, struct mtx *mtx, const char *wmesg, 233 sbintime_t sbt, sbintime_t pr, int flags) 234{ 235 struct thread *td; 236 struct proc *p; 237 int rval; 238 WITNESS_SAVE_DECL(mtx); 239 240 td = curthread; 241 p = td->td_proc; 242 KASSERT(mtx != NULL, ("sleeping without a mutex")); 243 KASSERT(p != NULL, ("msleep1")); 244 KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep")); 245 246 if (SCHEDULER_STOPPED_TD(td)) 247 return (0); 248 249 sleepq_lock(ident); 250 CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)", 251 td->td_tid, p->p_pid, td->td_name, wmesg, ident); 252 253 DROP_GIANT(); 254 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); 255 WITNESS_SAVE(&mtx->lock_object, mtx); 256 mtx_unlock_spin(mtx); 257 258 /* 259 * We put ourselves on the sleep queue and start our timeout. 260 */ 261 sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0); 262 if (sbt != 0) 263 sleepq_set_timeout_sbt(ident, sbt, pr, flags); 264 265 /* 266 * Can't call ktrace with any spin locks held so it can lock the 267 * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold 268 * any spin lock. Thus, we have to drop the sleepq spin lock while 269 * we handle those requests. This is safe since we have placed our 270 * thread on the sleep queue already. 271 */ 272#ifdef KTRACE 273 if (KTRPOINT(td, KTR_CSW)) { 274 sleepq_release(ident); 275 ktrcsw(1, 0, wmesg); 276 sleepq_lock(ident); 277 } 278#endif 279#ifdef WITNESS 280 sleepq_release(ident); 281 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"", 282 wmesg); 283 sleepq_lock(ident); 284#endif 285 if (sbt != 0) 286 rval = sleepq_timedwait(ident, 0); 287 else { 288 sleepq_wait(ident, 0); 289 rval = 0; 290 } 291#ifdef KTRACE 292 if (KTRPOINT(td, KTR_CSW)) 293 ktrcsw(0, 0, wmesg); 294#endif 295 PICKUP_GIANT(); 296 mtx_lock_spin(mtx); 297 WITNESS_RESTORE(&mtx->lock_object, mtx); 298 return (rval); 299} 300 301/* 302 * pause_sbt() delays the calling thread by the given signed binary 303 * time. During cold bootup, pause_sbt() uses the DELAY() function 304 * instead of the _sleep() function to do the waiting. The "sbt" 305 * argument must be greater than or equal to zero. A "sbt" value of 306 * zero is equivalent to a "sbt" value of one tick. 307 */ 308int 309pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags) 310{ 311 KASSERT(sbt >= 0, ("pause_sbt: timeout must be >= 0")); 312 313 /* silently convert invalid timeouts */ 314 if (sbt == 0) 315 sbt = tick_sbt; 316 317 if ((cold && curthread == &thread0) || kdb_active || 318 SCHEDULER_STOPPED()) { 319 /* 320 * We delay one second at a time to avoid overflowing the 321 * system specific DELAY() function(s): 322 */ 323 while (sbt >= SBT_1S) { 324 DELAY(1000000); 325 sbt -= SBT_1S; 326 } 327 /* Do the delay remainder, if any */ 328 sbt = howmany(sbt, SBT_1US); 329 if (sbt > 0) 330 DELAY(sbt); 331 return (EWOULDBLOCK); 332 } 333 return (_sleep(&pause_wchan[curcpu], NULL, 334 (flags & C_CATCH) ? PCATCH : 0, wmesg, sbt, pr, flags)); 335} 336 337/* 338 * Make all threads sleeping on the specified identifier runnable. 339 */ 340void 341wakeup(void *ident) 342{ 343 int wakeup_swapper; 344 345 sleepq_lock(ident); 346 wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0); 347 sleepq_release(ident); 348 if (wakeup_swapper) { 349 KASSERT(ident != &proc0, 350 ("wakeup and wakeup_swapper and proc0")); 351 kick_proc0(); 352 } 353} 354 355/* 356 * Make a thread sleeping on the specified identifier runnable. 357 * May wake more than one thread if a target thread is currently 358 * swapped out. 359 */ 360void 361wakeup_one(void *ident) 362{ 363 int wakeup_swapper; 364 365 sleepq_lock(ident); 366 wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0); 367 sleepq_release(ident); 368 if (wakeup_swapper) 369 kick_proc0(); 370} 371 372void 373wakeup_any(void *ident) 374{ 375 int wakeup_swapper; 376 377 sleepq_lock(ident); 378 wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR, 379 0, 0); 380 sleepq_release(ident); 381 if (wakeup_swapper) 382 kick_proc0(); 383} 384 385static void 386kdb_switch(void) 387{ 388 thread_unlock(curthread); 389 kdb_backtrace(); 390 kdb_reenter(); 391 panic("%s: did not reenter debugger", __func__); 392} 393 394/* 395 * The machine independent parts of context switching. 396 */ 397void 398mi_switch(int flags, struct thread *newtd) 399{ 400 uint64_t runtime, new_switchtime; 401 struct thread *td; 402 403 td = curthread; /* XXX */ 404 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED); 405 KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code")); 406#ifdef INVARIANTS 407 if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td)) 408 mtx_assert(&Giant, MA_NOTOWNED); 409#endif 410 KASSERT(td->td_critnest == 1 || panicstr, 411 ("mi_switch: switch in a critical section")); 412 KASSERT((flags & (SW_INVOL | SW_VOL)) != 0, 413 ("mi_switch: switch must be voluntary or involuntary")); 414 KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself")); 415 416 /* 417 * Don't perform context switches from the debugger. 418 */ 419 if (kdb_active) 420 kdb_switch(); 421 if (SCHEDULER_STOPPED_TD(td)) 422 return; 423 if (flags & SW_VOL) { 424 td->td_ru.ru_nvcsw++; 425 td->td_swvoltick = ticks; 426 } else { 427 td->td_ru.ru_nivcsw++; 428 td->td_swinvoltick = ticks; 429 } 430#ifdef SCHED_STATS 431 SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]); 432#endif 433 /* 434 * Compute the amount of time during which the current 435 * thread was running, and add that to its total so far. 436 */ 437 new_switchtime = cpu_ticks(); 438 runtime = new_switchtime - PCPU_GET(switchtime); 439 td->td_runtime += runtime; 440 td->td_incruntime += runtime; 441 PCPU_SET(switchtime, new_switchtime); 442 td->td_generation++; /* bump preempt-detect counter */ 443 PCPU_INC(cnt.v_swtch); 444 PCPU_SET(switchticks, ticks); 445 CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)", 446 td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); 447#ifdef KDTRACE_HOOKS 448 if ((flags & SW_PREEMPT) != 0 || ((flags & SW_INVOL) != 0 && 449 (flags & SW_TYPE_MASK) == SWT_NEEDRESCHED)) 450 SDT_PROBE0(sched, , , preempt); 451#endif 452 sched_switch(td, newtd, flags); 453 CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)", 454 td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name); 455 456 /* 457 * If the last thread was exiting, finish cleaning it up. 458 */ 459 if ((td = PCPU_GET(deadthread))) { 460 PCPU_SET(deadthread, NULL); 461 thread_stash(td); 462 } 463} 464 465/* 466 * Change thread state to be runnable, placing it on the run queue if 467 * it is in memory. If it is swapped out, return true so our caller 468 * will know to awaken the swapper. 469 */ 470int 471setrunnable(struct thread *td) 472{ 473 474 THREAD_LOCK_ASSERT(td, MA_OWNED); 475 KASSERT(td->td_proc->p_state != PRS_ZOMBIE, 476 ("setrunnable: pid %d is a zombie", td->td_proc->p_pid)); 477 switch (td->td_state) { 478 case TDS_RUNNING: 479 case TDS_RUNQ: 480 return (0); 481 case TDS_INHIBITED: 482 /* 483 * If we are only inhibited because we are swapped out 484 * then arange to swap in this process. Otherwise just return. 485 */ 486 if (td->td_inhibitors != TDI_SWAPPED) 487 return (0); 488 /* FALLTHROUGH */ 489 case TDS_CAN_RUN: 490 break; 491 default: 492 printf("state is 0x%x", td->td_state); 493 panic("setrunnable(2)"); 494 } 495 if ((td->td_flags & TDF_INMEM) == 0) { 496 if ((td->td_flags & TDF_SWAPINREQ) == 0) { 497 td->td_flags |= TDF_SWAPINREQ; 498 return (1); 499 } 500 } else 501 sched_wakeup(td); 502 return (0); 503} 504 505/* 506 * Compute a tenex style load average of a quantity on 507 * 1, 5 and 15 minute intervals. 508 */ 509static void 510loadav(void *arg) 511{ 512 int i, nrun; 513 struct loadavg *avg; 514 515 nrun = sched_load(); 516 avg = &averunnable; 517 518 for (i = 0; i < 3; i++) 519 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 520 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 521 522 /* 523 * Schedule the next update to occur after 5 seconds, but add a 524 * random variation to avoid synchronisation with processes that 525 * run at regular intervals. 526 */ 527 callout_reset_sbt(&loadav_callout, 528 SBT_1US * (4000000 + (int)(random() % 2000001)), SBT_1US, 529 loadav, NULL, C_DIRECT_EXEC | C_PREL(32)); 530} 531 532/* ARGSUSED */ 533static void 534synch_setup(void *dummy) 535{ 536 callout_init(&loadav_callout, 1); 537 538 /* Kick off timeout driven events by calling first time. */ 539 loadav(NULL); 540} 541 542int 543should_yield(void) 544{ 545 546 return ((u_int)ticks - (u_int)curthread->td_swvoltick >= hogticks); 547} 548 549void 550maybe_yield(void) 551{ 552 553 if (should_yield()) 554 kern_yield(PRI_USER); 555} 556 557void 558kern_yield(int prio) 559{ 560 struct thread *td; 561 562 td = curthread; 563 DROP_GIANT(); 564 thread_lock(td); 565 if (prio == PRI_USER) 566 prio = td->td_user_pri; 567 if (prio >= 0) 568 sched_prio(td, prio); 569 mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 570 thread_unlock(td); 571 PICKUP_GIANT(); 572} 573 574/* 575 * General purpose yield system call. 576 */ 577int 578sys_yield(struct thread *td, struct yield_args *uap) 579{ 580 581 thread_lock(td); 582 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) 583 sched_prio(td, PRI_MAX_TIMESHARE); 584 mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 585 thread_unlock(td); 586 td->td_retval[0] = 0; 587 return (0); 588} 589