kern_synch.c revision 1.186
1/* $OpenBSD: kern_synch.c,v 1.186 2022/04/30 14:44:04 visa Exp $ */ 2/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4/* 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 38 */ 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/proc.h> 43#include <sys/kernel.h> 44#include <sys/signalvar.h> 45#include <sys/resourcevar.h> 46#include <sys/sched.h> 47#include <sys/timeout.h> 48#include <sys/mount.h> 49#include <sys/syscallargs.h> 50#include <sys/pool.h> 51#include <sys/refcnt.h> 52#include <sys/atomic.h> 53#include <sys/witness.h> 54#include <sys/tracepoint.h> 55 56#include <ddb/db_output.h> 57 58#include <machine/spinlock.h> 59 60#ifdef DIAGNOSTIC 61#include <sys/syslog.h> 62#endif 63 64#ifdef KTRACE 65#include <sys/ktrace.h> 66#endif 67 68int sleep_signal_check(void); 69int thrsleep(struct proc *, struct sys___thrsleep_args *); 70int thrsleep_unlock(void *); 71 72/* 73 * We're only looking at 7 bits of the address; everything is 74 * aligned to 4, lots of things are aligned to greater powers 75 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 76 */ 77#define TABLESIZE 128 78#define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 79TAILQ_HEAD(slpque,proc) slpque[TABLESIZE]; 80 81void 82sleep_queue_init(void) 83{ 84 int i; 85 86 for (i = 0; i < TABLESIZE; i++) 87 TAILQ_INIT(&slpque[i]); 88} 89 90/* 91 * Global sleep channel for threads that do not want to 92 * receive wakeup(9) broadcasts. 93 */ 94int nowake; 95 96/* 97 * During autoconfiguration or after a panic, a sleep will simply 98 * lower the priority briefly to allow interrupts, then return. 99 * The priority to be used (safepri) is machine-dependent, thus this 100 * value is initialized and maintained in the machine-dependent layers. 101 * This priority will typically be 0, or the lowest priority 102 * that is safe for use on the interrupt stack; it can be made 103 * higher to block network software interrupts after panics. 104 */ 105extern int safepri; 106 107/* 108 * General sleep call. Suspends the current process until a wakeup is 109 * performed on the specified identifier. The process will then be made 110 * runnable with the specified priority. Sleeps at most timo/hz seconds 111 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 112 * before and after sleeping, else signals are not checked. Returns 0 if 113 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 114 * signal needs to be delivered, ERESTART is returned if the current system 115 * call should be restarted if possible, and EINTR is returned if the system 116 * call should be interrupted by the signal (return EINTR). 117 */ 118int 119tsleep(const volatile void *ident, int priority, const char *wmesg, int timo) 120{ 121 struct sleep_state sls; 122#ifdef MULTIPROCESSOR 123 int hold_count; 124#endif 125 126 KASSERT((priority & ~(PRIMASK | PCATCH)) == 0); 127 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 128 129#ifdef MULTIPROCESSOR 130 KASSERT(timo || _kernel_lock_held()); 131#endif 132 133#ifdef DDB 134 if (cold == 2) 135 db_stack_dump(); 136#endif 137 if (cold || panicstr) { 138 int s; 139 /* 140 * After a panic, or during autoconfiguration, 141 * just give interrupts a chance, then just return; 142 * don't run any other procs or panic below, 143 * in case this is the idle process and already asleep. 144 */ 145 s = splhigh(); 146 splx(safepri); 147#ifdef MULTIPROCESSOR 148 if (_kernel_lock_held()) { 149 hold_count = __mp_release_all(&kernel_lock); 150 __mp_acquire_count(&kernel_lock, hold_count); 151 } 152#endif 153 splx(s); 154 return (0); 155 } 156 157 sleep_setup(&sls, ident, priority, wmesg, timo); 158 return sleep_finish(&sls, 1); 159} 160 161int 162tsleep_nsec(const volatile void *ident, int priority, const char *wmesg, 163 uint64_t nsecs) 164{ 165 uint64_t to_ticks; 166 167 if (nsecs == INFSLP) 168 return tsleep(ident, priority, wmesg, 0); 169#ifdef DIAGNOSTIC 170 if (nsecs == 0) { 171 log(LOG_WARNING, 172 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 173 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 174 wmesg); 175 } 176#endif 177 /* 178 * We want to sleep at least nsecs nanoseconds worth of ticks. 179 * 180 * - Clamp nsecs to prevent arithmetic overflow. 181 * 182 * - Round nsecs up to account for any nanoseconds that do not 183 * divide evenly into tick_nsec, otherwise we'll lose them to 184 * integer division in the next step. We add (tick_nsec - 1) 185 * to keep from introducing a spurious tick if there are no 186 * such nanoseconds, i.e. nsecs % tick_nsec == 0. 187 * 188 * - Divide the rounded value to a count of ticks. We divide 189 * by (tick_nsec + 1) to discard the extra tick introduced if, 190 * before rounding, nsecs % tick_nsec == 1. 191 * 192 * - Finally, add a tick to the result. We need to wait out 193 * the current tick before we can begin counting our interval, 194 * as we do not know how much time has elapsed since the 195 * current tick began. 196 */ 197 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 198 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 199 if (to_ticks > INT_MAX) 200 to_ticks = INT_MAX; 201 return tsleep(ident, priority, wmesg, (int)to_ticks); 202} 203 204/* 205 * Same as tsleep, but if we have a mutex provided, then once we've 206 * entered the sleep queue we drop the mutex. After sleeping we re-lock. 207 */ 208int 209msleep(const volatile void *ident, struct mutex *mtx, int priority, 210 const char *wmesg, int timo) 211{ 212 struct sleep_state sls; 213 int error, spl; 214#ifdef MULTIPROCESSOR 215 int hold_count; 216#endif 217 218 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 219 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 220 KASSERT(mtx != NULL); 221 222 if (priority & PCATCH) 223 KERNEL_ASSERT_LOCKED(); 224 225#ifdef DDB 226 if (cold == 2) 227 db_stack_dump(); 228#endif 229 if (cold || panicstr) { 230 /* 231 * After a panic, or during autoconfiguration, 232 * just give interrupts a chance, then just return; 233 * don't run any other procs or panic below, 234 * in case this is the idle process and already asleep. 235 */ 236 spl = MUTEX_OLDIPL(mtx); 237 MUTEX_OLDIPL(mtx) = safepri; 238 mtx_leave(mtx); 239#ifdef MULTIPROCESSOR 240 if (_kernel_lock_held()) { 241 hold_count = __mp_release_all(&kernel_lock); 242 __mp_acquire_count(&kernel_lock, hold_count); 243 } 244#endif 245 if ((priority & PNORELOCK) == 0) { 246 mtx_enter(mtx); 247 MUTEX_OLDIPL(mtx) = spl; 248 } else 249 splx(spl); 250 return (0); 251 } 252 253 sleep_setup(&sls, ident, priority, wmesg, timo); 254 255 /* XXX - We need to make sure that the mutex doesn't 256 * unblock splsched. This can be made a bit more 257 * correct when the sched_lock is a mutex. 258 */ 259 spl = MUTEX_OLDIPL(mtx); 260 MUTEX_OLDIPL(mtx) = splsched(); 261 mtx_leave(mtx); 262 /* signal may stop the process, release mutex before that */ 263 error = sleep_finish(&sls, 1); 264 265 if ((priority & PNORELOCK) == 0) { 266 mtx_enter(mtx); 267 MUTEX_OLDIPL(mtx) = spl; /* put the ipl back */ 268 } else 269 splx(spl); 270 271 return error; 272} 273 274int 275msleep_nsec(const volatile void *ident, struct mutex *mtx, int priority, 276 const char *wmesg, uint64_t nsecs) 277{ 278 uint64_t to_ticks; 279 280 if (nsecs == INFSLP) 281 return msleep(ident, mtx, priority, wmesg, 0); 282#ifdef DIAGNOSTIC 283 if (nsecs == 0) { 284 log(LOG_WARNING, 285 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 286 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 287 wmesg); 288 } 289#endif 290 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 291 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 292 if (to_ticks > INT_MAX) 293 to_ticks = INT_MAX; 294 return msleep(ident, mtx, priority, wmesg, (int)to_ticks); 295} 296 297/* 298 * Same as tsleep, but if we have a rwlock provided, then once we've 299 * entered the sleep queue we drop the it. After sleeping we re-lock. 300 */ 301int 302rwsleep(const volatile void *ident, struct rwlock *rwl, int priority, 303 const char *wmesg, int timo) 304{ 305 struct sleep_state sls; 306 int error, status; 307 308 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 309 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 310 rw_assert_anylock(rwl); 311 status = rw_status(rwl); 312 313 sleep_setup(&sls, ident, priority, wmesg, timo); 314 315 rw_exit(rwl); 316 /* signal may stop the process, release rwlock before that */ 317 error = sleep_finish(&sls, 1); 318 319 if ((priority & PNORELOCK) == 0) 320 rw_enter(rwl, status); 321 322 return error; 323} 324 325int 326rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority, 327 const char *wmesg, uint64_t nsecs) 328{ 329 uint64_t to_ticks; 330 331 if (nsecs == INFSLP) 332 return rwsleep(ident, rwl, priority, wmesg, 0); 333#ifdef DIAGNOSTIC 334 if (nsecs == 0) { 335 log(LOG_WARNING, 336 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 337 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 338 wmesg); 339 } 340#endif 341 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 342 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 343 if (to_ticks > INT_MAX) 344 to_ticks = INT_MAX; 345 return rwsleep(ident, rwl, priority, wmesg, (int)to_ticks); 346} 347 348void 349sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio, 350 const char *wmesg, int timo) 351{ 352 struct proc *p = curproc; 353 354#ifdef DIAGNOSTIC 355 if (p->p_flag & P_CANTSLEEP) 356 panic("sleep: %s failed insomnia", p->p_p->ps_comm); 357 if (ident == NULL) 358 panic("tsleep: no ident"); 359 if (p->p_stat != SONPROC) 360 panic("tsleep: not SONPROC"); 361#endif 362 363 sls->sls_catch = prio & PCATCH; 364 sls->sls_locked = 0; 365 sls->sls_timeout = 0; 366 367 /* 368 * The kernel has to be locked for signal processing. 369 * This is done here and not in sleep_finish() because 370 * KERNEL_LOCK() has to be taken before SCHED_LOCK(). 371 */ 372 if (sls->sls_catch != 0) { 373 KERNEL_LOCK(); 374 sls->sls_locked = 1; 375 } 376 377 SCHED_LOCK(sls->sls_s); 378 379 TRACEPOINT(sched, sleep, NULL); 380 381 p->p_wchan = ident; 382 p->p_wmesg = wmesg; 383 p->p_slptime = 0; 384 p->p_slppri = prio & PRIMASK; 385 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq); 386 387 KASSERT((p->p_flag & P_TIMEOUT) == 0); 388 if (timo) { 389 sls->sls_timeout = 1; 390 timeout_add(&p->p_sleep_to, timo); 391 } 392} 393 394int 395sleep_finish(struct sleep_state *sls, int do_sleep) 396{ 397 struct proc *p = curproc; 398 int error = 0, error1 = 0; 399 400 if (sls->sls_catch != 0) { 401 /* sleep_setup() has locked the kernel. */ 402 KERNEL_ASSERT_LOCKED(); 403 404 /* 405 * We put ourselves on the sleep queue and start our 406 * timeout before calling sleep_signal_check(), as we could 407 * stop there, and a wakeup or a SIGCONT (or both) could 408 * occur while we were stopped. A SIGCONT would cause 409 * us to be marked as SSLEEP without resuming us, thus 410 * we must be ready for sleep when sleep_signal_check() is 411 * called. 412 * If the wakeup happens while we're stopped, p->p_wchan 413 * will be NULL upon return from sleep_signal_check(). In 414 * that case we need to unwind immediately. 415 */ 416 atomic_setbits_int(&p->p_flag, P_SINTR); 417 if ((error = sleep_signal_check()) != 0) { 418 p->p_stat = SONPROC; 419 sls->sls_catch = 0; 420 do_sleep = 0; 421 } else if (p->p_wchan == NULL) { 422 sls->sls_catch = 0; 423 do_sleep = 0; 424 } 425 } 426 427 if (do_sleep) { 428 p->p_stat = SSLEEP; 429 p->p_ru.ru_nvcsw++; 430 SCHED_ASSERT_LOCKED(); 431 mi_switch(); 432 } else { 433 unsleep(p); 434 } 435 436#ifdef DIAGNOSTIC 437 if (p->p_stat != SONPROC) 438 panic("sleep_finish !SONPROC"); 439#endif 440 441 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; 442 SCHED_UNLOCK(sls->sls_s); 443 444 /* 445 * Even though this belongs to the signal handling part of sleep, 446 * we need to clear it before the ktrace. 447 */ 448 atomic_clearbits_int(&p->p_flag, P_SINTR); 449 450 if (sls->sls_timeout) { 451 if (p->p_flag & P_TIMEOUT) { 452 atomic_clearbits_int(&p->p_flag, P_TIMEOUT); 453 error1 = EWOULDBLOCK; 454 } else { 455 /* This must not sleep. */ 456 timeout_del_barrier(&p->p_sleep_to); 457 KASSERT((p->p_flag & P_TIMEOUT) == 0); 458 } 459 } 460 461 /* Check if thread was woken up because of a unwind or signal */ 462 if (sls->sls_catch != 0) 463 error = sleep_signal_check(); 464 465 if (sls->sls_locked) 466 KERNEL_UNLOCK(); 467 468 /* Signal errors are higher priority than timeouts. */ 469 if (error == 0 && error1 != 0) 470 error = error1; 471 472 return error; 473} 474 475/* 476 * Check and handle signals and suspensions around a sleep cycle. 477 */ 478int 479sleep_signal_check(void) 480{ 481 struct proc *p = curproc; 482 struct sigctx ctx; 483 int err, sig; 484 485 if ((err = single_thread_check(p, 1)) != 0) 486 return err; 487 if ((sig = cursig(p, &ctx)) != 0) { 488 if (ctx.sig_intr) 489 return EINTR; 490 else 491 return ERESTART; 492 } 493 return 0; 494} 495 496int 497wakeup_proc(struct proc *p, const volatile void *chan) 498{ 499 int s, awakened = 0; 500 501 SCHED_LOCK(s); 502 if (p->p_wchan != NULL && 503 ((chan == NULL) || (p->p_wchan == chan))) { 504 awakened = 1; 505 if (p->p_stat == SSLEEP) 506 setrunnable(p); 507 else 508 unsleep(p); 509 } 510 SCHED_UNLOCK(s); 511 512 return awakened; 513} 514 515 516/* 517 * Implement timeout for tsleep. 518 * If process hasn't been awakened (wchan non-zero), 519 * set timeout flag and undo the sleep. If proc 520 * is stopped, just unsleep so it will remain stopped. 521 */ 522void 523endtsleep(void *arg) 524{ 525 struct proc *p = arg; 526 int s; 527 528 SCHED_LOCK(s); 529 if (wakeup_proc(p, NULL)) 530 atomic_setbits_int(&p->p_flag, P_TIMEOUT); 531 SCHED_UNLOCK(s); 532} 533 534/* 535 * Remove a process from its wait queue 536 */ 537void 538unsleep(struct proc *p) 539{ 540 SCHED_ASSERT_LOCKED(); 541 542 if (p->p_wchan != NULL) { 543 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq); 544 p->p_wchan = NULL; 545 TRACEPOINT(sched, wakeup, p->p_tid + THREAD_PID_OFFSET, 546 p->p_p->ps_pid); 547 } 548} 549 550/* 551 * Make a number of processes sleeping on the specified identifier runnable. 552 */ 553void 554wakeup_n(const volatile void *ident, int n) 555{ 556 struct slpque *qp; 557 struct proc *p; 558 struct proc *pnext; 559 int s; 560 561 SCHED_LOCK(s); 562 qp = &slpque[LOOKUP(ident)]; 563 for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) { 564 pnext = TAILQ_NEXT(p, p_runq); 565 /* 566 * This happens if wakeup(9) is called after enqueuing 567 * itself on the sleep queue and both `ident' collide. 568 */ 569 if (p == curproc) 570 continue; 571#ifdef DIAGNOSTIC 572 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 573 panic("wakeup: p_stat is %d", (int)p->p_stat); 574#endif 575 if (wakeup_proc(p, ident)) 576 --n; 577 } 578 SCHED_UNLOCK(s); 579} 580 581/* 582 * Make all processes sleeping on the specified identifier runnable. 583 */ 584void 585wakeup(const volatile void *chan) 586{ 587 wakeup_n(chan, -1); 588} 589 590int 591sys_sched_yield(struct proc *p, void *v, register_t *retval) 592{ 593 struct proc *q; 594 uint8_t newprio; 595 int s; 596 597 SCHED_LOCK(s); 598 /* 599 * If one of the threads of a multi-threaded process called 600 * sched_yield(2), drop its priority to ensure its siblings 601 * can make some progress. 602 */ 603 newprio = p->p_usrpri; 604 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) 605 newprio = max(newprio, q->p_runpri); 606 setrunqueue(p->p_cpu, p, newprio); 607 p->p_ru.ru_nvcsw++; 608 mi_switch(); 609 SCHED_UNLOCK(s); 610 611 return (0); 612} 613 614int 615thrsleep_unlock(void *lock) 616{ 617 static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED; 618 _atomic_lock_t *atomiclock = lock; 619 620 if (!lock) 621 return 0; 622 623 return copyout(&unlocked, atomiclock, sizeof(unlocked)); 624} 625 626struct tslpentry { 627 TAILQ_ENTRY(tslpentry) tslp_link; 628 long tslp_ident; 629}; 630 631/* thrsleep queue shared between processes */ 632static struct tslpqueue thrsleep_queue = TAILQ_HEAD_INITIALIZER(thrsleep_queue); 633static struct rwlock thrsleep_lock = RWLOCK_INITIALIZER("thrsleeplk"); 634 635int 636thrsleep(struct proc *p, struct sys___thrsleep_args *v) 637{ 638 struct sys___thrsleep_args /* { 639 syscallarg(const volatile void *) ident; 640 syscallarg(clockid_t) clock_id; 641 syscallarg(const struct timespec *) tp; 642 syscallarg(void *) lock; 643 syscallarg(const int *) abort; 644 } */ *uap = v; 645 long ident = (long)SCARG(uap, ident); 646 struct tslpentry entry; 647 struct tslpqueue *queue; 648 struct rwlock *qlock; 649 struct timespec *tsp = (struct timespec *)SCARG(uap, tp); 650 void *lock = SCARG(uap, lock); 651 uint64_t nsecs = INFSLP; 652 int abort = 0, error; 653 clockid_t clock_id = SCARG(uap, clock_id); 654 655 if (ident == 0) 656 return (EINVAL); 657 if (tsp != NULL) { 658 struct timespec now; 659 660 if ((error = clock_gettime(p, clock_id, &now))) 661 return (error); 662#ifdef KTRACE 663 if (KTRPOINT(p, KTR_STRUCT)) 664 ktrabstimespec(p, tsp); 665#endif 666 667 if (timespeccmp(tsp, &now, <=)) { 668 /* already passed: still do the unlock */ 669 if ((error = thrsleep_unlock(lock))) 670 return (error); 671 return (EWOULDBLOCK); 672 } 673 674 timespecsub(tsp, &now, tsp); 675 nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP); 676 } 677 678 if (ident == -1) { 679 queue = &thrsleep_queue; 680 qlock = &thrsleep_lock; 681 } else { 682 queue = &p->p_p->ps_tslpqueue; 683 qlock = &p->p_p->ps_lock; 684 } 685 686 /* Interlock with wakeup. */ 687 entry.tslp_ident = ident; 688 rw_enter_write(qlock); 689 TAILQ_INSERT_TAIL(queue, &entry, tslp_link); 690 rw_exit_write(qlock); 691 692 error = thrsleep_unlock(lock); 693 694 if (error == 0 && SCARG(uap, abort) != NULL) 695 error = copyin(SCARG(uap, abort), &abort, sizeof(abort)); 696 697 rw_enter_write(qlock); 698 if (error != 0) 699 goto out; 700 if (abort != 0) { 701 error = EINTR; 702 goto out; 703 } 704 if (entry.tslp_ident != 0) { 705 error = rwsleep_nsec(&entry, qlock, PWAIT|PCATCH, "thrsleep", 706 nsecs); 707 } 708 709out: 710 if (entry.tslp_ident != 0) 711 TAILQ_REMOVE(queue, &entry, tslp_link); 712 rw_exit_write(qlock); 713 714 if (error == ERESTART) 715 error = ECANCELED; 716 717 return (error); 718 719} 720 721int 722sys___thrsleep(struct proc *p, void *v, register_t *retval) 723{ 724 struct sys___thrsleep_args /* { 725 syscallarg(const volatile void *) ident; 726 syscallarg(clockid_t) clock_id; 727 syscallarg(struct timespec *) tp; 728 syscallarg(void *) lock; 729 syscallarg(const int *) abort; 730 } */ *uap = v; 731 struct timespec ts; 732 int error; 733 734 if (SCARG(uap, tp) != NULL) { 735 if ((error = copyin(SCARG(uap, tp), &ts, sizeof(ts)))) { 736 *retval = error; 737 return 0; 738 } 739 if (!timespecisvalid(&ts)) { 740 *retval = EINVAL; 741 return 0; 742 } 743 SCARG(uap, tp) = &ts; 744 } 745 746 *retval = thrsleep(p, uap); 747 return 0; 748} 749 750int 751sys___thrwakeup(struct proc *p, void *v, register_t *retval) 752{ 753 struct sys___thrwakeup_args /* { 754 syscallarg(const volatile void *) ident; 755 syscallarg(int) n; 756 } */ *uap = v; 757 struct tslpentry *entry, *tmp; 758 struct tslpqueue *queue; 759 struct rwlock *qlock; 760 long ident = (long)SCARG(uap, ident); 761 int n = SCARG(uap, n); 762 int found = 0; 763 764 if (ident == 0) 765 *retval = EINVAL; 766 else { 767 if (ident == -1) { 768 queue = &thrsleep_queue; 769 qlock = &thrsleep_lock; 770 /* 771 * Wake up all waiters with ident -1. This is needed 772 * because ident -1 can be shared by multiple userspace 773 * lock state machines concurrently. The implementation 774 * has no way to direct the wakeup to a particular 775 * state machine. 776 */ 777 n = 0; 778 } else { 779 queue = &p->p_p->ps_tslpqueue; 780 qlock = &p->p_p->ps_lock; 781 } 782 783 rw_enter_write(qlock); 784 TAILQ_FOREACH_SAFE(entry, queue, tslp_link, tmp) { 785 if (entry->tslp_ident == ident) { 786 TAILQ_REMOVE(queue, entry, tslp_link); 787 entry->tslp_ident = 0; 788 wakeup_one(entry); 789 if (++found == n) 790 break; 791 } 792 } 793 rw_exit_write(qlock); 794 795 if (ident == -1) 796 *retval = 0; 797 else 798 *retval = found ? 0 : ESRCH; 799 } 800 801 return (0); 802} 803 804void 805refcnt_init(struct refcnt *r) 806{ 807 atomic_store_int(&r->r_refs, 1); 808} 809 810void 811refcnt_take(struct refcnt *r) 812{ 813 u_int refs; 814 815 refs = atomic_inc_int_nv(&r->r_refs); 816 KASSERT(refs != 0); 817 (void)refs; 818} 819 820int 821refcnt_rele(struct refcnt *r) 822{ 823 u_int refs; 824 825 membar_exit_before_atomic(); 826 refs = atomic_dec_int_nv(&r->r_refs); 827 KASSERT(refs != ~0); 828 if (refs == 0) { 829 membar_enter_after_atomic(); 830 return (1); 831 } 832 return (0); 833} 834 835void 836refcnt_rele_wake(struct refcnt *r) 837{ 838 if (refcnt_rele(r)) 839 wakeup_one(r); 840} 841 842void 843refcnt_finalize(struct refcnt *r, const char *wmesg) 844{ 845 struct sleep_state sls; 846 u_int refs; 847 848 membar_exit_before_atomic(); 849 refs = atomic_dec_int_nv(&r->r_refs); 850 KASSERT(refs != ~0); 851 while (refs) { 852 sleep_setup(&sls, r, PWAIT, wmesg, 0); 853 refs = atomic_load_int(&r->r_refs); 854 sleep_finish(&sls, refs); 855 } 856 /* Order subsequent loads and stores after refs == 0 load. */ 857 membar_sync(); 858} 859 860int 861refcnt_shared(struct refcnt *r) 862{ 863 u_int refs; 864 865 refs = atomic_load_int(&r->r_refs); 866 return (refs > 1); 867} 868 869unsigned int 870refcnt_read(struct refcnt *r) 871{ 872 u_int refs; 873 874 refs = atomic_load_int(&r->r_refs); 875 return (refs); 876} 877 878void 879cond_init(struct cond *c) 880{ 881 atomic_store_int(&c->c_wait, 1); 882} 883 884void 885cond_signal(struct cond *c) 886{ 887 atomic_store_int(&c->c_wait, 0); 888 889 wakeup_one(c); 890} 891 892void 893cond_wait(struct cond *c, const char *wmesg) 894{ 895 struct sleep_state sls; 896 unsigned int wait; 897 898 wait = atomic_load_int(&c->c_wait); 899 while (wait) { 900 sleep_setup(&sls, c, PWAIT, wmesg, 0); 901 wait = atomic_load_int(&c->c_wait); 902 sleep_finish(&sls, wait); 903 } 904} 905