kern_synch.c revision 1.175
1/* $OpenBSD: kern_synch.c,v 1.175 2021/02/08 08:18:45 mpi Exp $ */ 2/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4/* 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 38 */ 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/proc.h> 43#include <sys/kernel.h> 44#include <sys/signalvar.h> 45#include <sys/resourcevar.h> 46#include <sys/sched.h> 47#include <sys/timeout.h> 48#include <sys/mount.h> 49#include <sys/syscallargs.h> 50#include <sys/pool.h> 51#include <sys/refcnt.h> 52#include <sys/atomic.h> 53#include <sys/smr.h> 54#include <sys/witness.h> 55#include <sys/tracepoint.h> 56 57#include <ddb/db_output.h> 58 59#include <machine/spinlock.h> 60 61#ifdef DIAGNOSTIC 62#include <sys/syslog.h> 63#endif 64 65#ifdef KTRACE 66#include <sys/ktrace.h> 67#endif 68 69int sleep_signal_check(void); 70int thrsleep(struct proc *, struct sys___thrsleep_args *); 71int thrsleep_unlock(void *); 72 73/* 74 * We're only looking at 7 bits of the address; everything is 75 * aligned to 4, lots of things are aligned to greater powers 76 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 77 */ 78#define TABLESIZE 128 79#define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 80TAILQ_HEAD(slpque,proc) slpque[TABLESIZE]; 81 82void 83sleep_queue_init(void) 84{ 85 int i; 86 87 for (i = 0; i < TABLESIZE; i++) 88 TAILQ_INIT(&slpque[i]); 89} 90 91/* 92 * Global sleep channel for threads that do not want to 93 * receive wakeup(9) broadcasts. 94 */ 95int nowake; 96 97/* 98 * During autoconfiguration or after a panic, a sleep will simply 99 * lower the priority briefly to allow interrupts, then return. 100 * The priority to be used (safepri) is machine-dependent, thus this 101 * value is initialized and maintained in the machine-dependent layers. 102 * This priority will typically be 0, or the lowest priority 103 * that is safe for use on the interrupt stack; it can be made 104 * higher to block network software interrupts after panics. 105 */ 106extern int safepri; 107 108/* 109 * General sleep call. Suspends the current process until a wakeup is 110 * performed on the specified identifier. The process will then be made 111 * runnable with the specified priority. Sleeps at most timo/hz seconds 112 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 113 * before and after sleeping, else signals are not checked. Returns 0 if 114 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 115 * signal needs to be delivered, ERESTART is returned if the current system 116 * call should be restarted if possible, and EINTR is returned if the system 117 * call should be interrupted by the signal (return EINTR). 118 */ 119int 120tsleep(const volatile void *ident, int priority, const char *wmesg, int timo) 121{ 122 struct sleep_state sls; 123#ifdef MULTIPROCESSOR 124 int hold_count; 125#endif 126 127 KASSERT((priority & ~(PRIMASK | PCATCH)) == 0); 128 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 129 130#ifdef MULTIPROCESSOR 131 KASSERT(timo || _kernel_lock_held()); 132#endif 133 134#ifdef DDB 135 if (cold == 2) 136 db_stack_dump(); 137#endif 138 if (cold || panicstr) { 139 int s; 140 /* 141 * After a panic, or during autoconfiguration, 142 * just give interrupts a chance, then just return; 143 * don't run any other procs or panic below, 144 * in case this is the idle process and already asleep. 145 */ 146 s = splhigh(); 147 splx(safepri); 148#ifdef MULTIPROCESSOR 149 if (_kernel_lock_held()) { 150 hold_count = __mp_release_all(&kernel_lock); 151 __mp_acquire_count(&kernel_lock, hold_count); 152 } 153#endif 154 splx(s); 155 return (0); 156 } 157 158 sleep_setup(&sls, ident, priority, wmesg, timo); 159 return sleep_finish(&sls, 1); 160} 161 162int 163tsleep_nsec(const volatile void *ident, int priority, const char *wmesg, 164 uint64_t nsecs) 165{ 166 uint64_t to_ticks; 167 168 if (nsecs == INFSLP) 169 return tsleep(ident, priority, wmesg, 0); 170#ifdef DIAGNOSTIC 171 if (nsecs == 0) { 172 log(LOG_WARNING, 173 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 174 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 175 wmesg); 176 } 177#endif 178 /* 179 * We want to sleep at least nsecs nanoseconds worth of ticks. 180 * 181 * - Clamp nsecs to prevent arithmetic overflow. 182 * 183 * - Round nsecs up to account for any nanoseconds that do not 184 * divide evenly into tick_nsec, otherwise we'll lose them to 185 * integer division in the next step. We add (tick_nsec - 1) 186 * to keep from introducing a spurious tick if there are no 187 * such nanoseconds, i.e. nsecs % tick_nsec == 0. 188 * 189 * - Divide the rounded value to a count of ticks. We divide 190 * by (tick_nsec + 1) to discard the extra tick introduced if, 191 * before rounding, nsecs % tick_nsec == 1. 192 * 193 * - Finally, add a tick to the result. We need to wait out 194 * the current tick before we can begin counting our interval, 195 * as we do not know how much time has elapsed since the 196 * current tick began. 197 */ 198 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 199 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 200 if (to_ticks > INT_MAX) 201 to_ticks = INT_MAX; 202 return tsleep(ident, priority, wmesg, (int)to_ticks); 203} 204 205/* 206 * Same as tsleep, but if we have a mutex provided, then once we've 207 * entered the sleep queue we drop the mutex. After sleeping we re-lock. 208 */ 209int 210msleep(const volatile void *ident, struct mutex *mtx, int priority, 211 const char *wmesg, int timo) 212{ 213 struct sleep_state sls; 214 int error, spl; 215#ifdef MULTIPROCESSOR 216 int hold_count; 217#endif 218 219 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 220 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 221 KASSERT(mtx != NULL); 222 223 if (priority & PCATCH) 224 KERNEL_ASSERT_LOCKED(); 225 226 if (cold || panicstr) { 227 /* 228 * After a panic, or during autoconfiguration, 229 * just give interrupts a chance, then just return; 230 * don't run any other procs or panic below, 231 * in case this is the idle process and already asleep. 232 */ 233 spl = MUTEX_OLDIPL(mtx); 234 MUTEX_OLDIPL(mtx) = safepri; 235 mtx_leave(mtx); 236#ifdef MULTIPROCESSOR 237 if (_kernel_lock_held()) { 238 hold_count = __mp_release_all(&kernel_lock); 239 __mp_acquire_count(&kernel_lock, hold_count); 240 } 241#endif 242 if ((priority & PNORELOCK) == 0) { 243 mtx_enter(mtx); 244 MUTEX_OLDIPL(mtx) = spl; 245 } else 246 splx(spl); 247 return (0); 248 } 249 250 sleep_setup(&sls, ident, priority, wmesg, timo); 251 252 /* XXX - We need to make sure that the mutex doesn't 253 * unblock splsched. This can be made a bit more 254 * correct when the sched_lock is a mutex. 255 */ 256 spl = MUTEX_OLDIPL(mtx); 257 MUTEX_OLDIPL(mtx) = splsched(); 258 mtx_leave(mtx); 259 /* signal may stop the process, release mutex before that */ 260 error = sleep_finish(&sls, 1); 261 262 if ((priority & PNORELOCK) == 0) { 263 mtx_enter(mtx); 264 MUTEX_OLDIPL(mtx) = spl; /* put the ipl back */ 265 } else 266 splx(spl); 267 268 return error; 269} 270 271int 272msleep_nsec(const volatile void *ident, struct mutex *mtx, int priority, 273 const char *wmesg, uint64_t nsecs) 274{ 275 uint64_t to_ticks; 276 277 if (nsecs == INFSLP) 278 return msleep(ident, mtx, priority, wmesg, 0); 279#ifdef DIAGNOSTIC 280 if (nsecs == 0) { 281 log(LOG_WARNING, 282 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 283 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 284 wmesg); 285 } 286#endif 287 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 288 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 289 if (to_ticks > INT_MAX) 290 to_ticks = INT_MAX; 291 return msleep(ident, mtx, priority, wmesg, (int)to_ticks); 292} 293 294/* 295 * Same as tsleep, but if we have a rwlock provided, then once we've 296 * entered the sleep queue we drop the it. After sleeping we re-lock. 297 */ 298int 299rwsleep(const volatile void *ident, struct rwlock *rwl, int priority, 300 const char *wmesg, int timo) 301{ 302 struct sleep_state sls; 303 int error, status; 304 305 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 306 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 307 rw_assert_anylock(rwl); 308 status = rw_status(rwl); 309 310 sleep_setup(&sls, ident, priority, wmesg, timo); 311 312 rw_exit(rwl); 313 /* signal may stop the process, release rwlock before that */ 314 error = sleep_finish(&sls, 1); 315 316 if ((priority & PNORELOCK) == 0) 317 rw_enter(rwl, status); 318 319 return error; 320} 321 322int 323rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority, 324 const char *wmesg, uint64_t nsecs) 325{ 326 uint64_t to_ticks; 327 328 if (nsecs == INFSLP) 329 return rwsleep(ident, rwl, priority, wmesg, 0); 330#ifdef DIAGNOSTIC 331 if (nsecs == 0) { 332 log(LOG_WARNING, 333 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 334 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 335 wmesg); 336 } 337#endif 338 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 339 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 340 if (to_ticks > INT_MAX) 341 to_ticks = INT_MAX; 342 return rwsleep(ident, rwl, priority, wmesg, (int)to_ticks); 343} 344 345void 346sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio, 347 const char *wmesg, int timo) 348{ 349 struct proc *p = curproc; 350 351#ifdef DIAGNOSTIC 352 if (p->p_flag & P_CANTSLEEP) 353 panic("sleep: %s failed insomnia", p->p_p->ps_comm); 354 if (ident == NULL) 355 panic("tsleep: no ident"); 356 if (p->p_stat != SONPROC) 357 panic("tsleep: not SONPROC"); 358#endif 359 360 sls->sls_catch = prio & PCATCH; 361 sls->sls_locked = 0; 362 sls->sls_timeout = 0; 363 364 /* 365 * The kernel has to be locked for signal processing. 366 * This is done here and not in sleep_finish() because 367 * KERNEL_LOCK() has to be taken before SCHED_LOCK(). 368 */ 369 if (sls->sls_catch != 0) { 370 KERNEL_LOCK(); 371 sls->sls_locked = 1; 372 } 373 374 SCHED_LOCK(sls->sls_s); 375 376 TRACEPOINT(sched, sleep, NULL); 377 378 p->p_wchan = ident; 379 p->p_wmesg = wmesg; 380 p->p_slptime = 0; 381 p->p_slppri = prio & PRIMASK; 382 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq); 383 384 KASSERT((p->p_flag & P_TIMEOUT) == 0); 385 if (timo) { 386 sls->sls_timeout = 1; 387 timeout_add(&p->p_sleep_to, timo); 388 } 389} 390 391int 392sleep_finish(struct sleep_state *sls, int do_sleep) 393{ 394 struct proc *p = curproc; 395 int error = 0, error1 = 0; 396 397 if (sls->sls_catch != 0) { 398 /* sleep_setup() has locked the kernel. */ 399 KERNEL_ASSERT_LOCKED(); 400 401 /* 402 * We put ourselves on the sleep queue and start our 403 * timeout before calling sleep_signal_check(), as we could 404 * stop there, and a wakeup or a SIGCONT (or both) could 405 * occur while we were stopped. A SIGCONT would cause 406 * us to be marked as SSLEEP without resuming us, thus 407 * we must be ready for sleep when sleep_signal_check() is 408 * called. 409 * If the wakeup happens while we're stopped, p->p_wchan 410 * will be NULL upon return from sleep_signal_check(). In 411 * that case we need to unwind immediately. 412 */ 413 atomic_setbits_int(&p->p_flag, P_SINTR); 414 if ((error = sleep_signal_check()) != 0) { 415 p->p_stat = SONPROC; 416 sls->sls_catch = 0; 417 do_sleep = 0; 418 } else if (p->p_wchan == NULL) { 419 sls->sls_catch = 0; 420 do_sleep = 0; 421 } 422 } 423 424 if (do_sleep) { 425 p->p_stat = SSLEEP; 426 p->p_ru.ru_nvcsw++; 427 SCHED_ASSERT_LOCKED(); 428 mi_switch(); 429 } else { 430 unsleep(p); 431 } 432 433#ifdef DIAGNOSTIC 434 if (p->p_stat != SONPROC) 435 panic("sleep_finish !SONPROC"); 436#endif 437 438 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; 439 SCHED_UNLOCK(sls->sls_s); 440 441 /* 442 * Even though this belongs to the signal handling part of sleep, 443 * we need to clear it before the ktrace. 444 */ 445 atomic_clearbits_int(&p->p_flag, P_SINTR); 446 447 if (sls->sls_timeout) { 448 if (p->p_flag & P_TIMEOUT) { 449 atomic_clearbits_int(&p->p_flag, P_TIMEOUT); 450 error1 = EWOULDBLOCK; 451 } else { 452 /* This must not sleep. */ 453 timeout_del_barrier(&p->p_sleep_to); 454 KASSERT((p->p_flag & P_TIMEOUT) == 0); 455 } 456 } 457 458 /* Check if thread was woken up because of a unwind or signal */ 459 if (sls->sls_catch != 0) 460 error = sleep_signal_check(); 461 462 if (sls->sls_locked) 463 KERNEL_UNLOCK(); 464 465 /* Signal errors are higher priority than timeouts. */ 466 if (error == 0 && error1 != 0) 467 error = error1; 468 469 return error; 470} 471 472/* 473 * Check and handle signals and suspensions around a sleep cycle. 474 */ 475int 476sleep_signal_check(void) 477{ 478 struct proc *p = curproc; 479 int err, sig; 480 481 if ((err = single_thread_check(p, 1)) != 0) 482 return err; 483 if ((sig = CURSIG(p)) != 0) { 484 if (p->p_p->ps_sigacts->ps_sigintr & sigmask(sig)) 485 return EINTR; 486 else 487 return ERESTART; 488 } 489 return 0; 490} 491 492int 493wakeup_proc(struct proc *p, const volatile void *chan) 494{ 495 int s, awakened = 0; 496 497 SCHED_LOCK(s); 498 if (p->p_wchan != NULL && 499 ((chan == NULL) || (p->p_wchan == chan))) { 500 awakened = 1; 501 if (p->p_stat == SSLEEP) 502 setrunnable(p); 503 else 504 unsleep(p); 505 } 506 SCHED_UNLOCK(s); 507 508 return awakened; 509} 510 511 512/* 513 * Implement timeout for tsleep. 514 * If process hasn't been awakened (wchan non-zero), 515 * set timeout flag and undo the sleep. If proc 516 * is stopped, just unsleep so it will remain stopped. 517 */ 518void 519endtsleep(void *arg) 520{ 521 struct proc *p = arg; 522 int s; 523 524 SCHED_LOCK(s); 525 if (wakeup_proc(p, NULL)) 526 atomic_setbits_int(&p->p_flag, P_TIMEOUT); 527 SCHED_UNLOCK(s); 528} 529 530/* 531 * Remove a process from its wait queue 532 */ 533void 534unsleep(struct proc *p) 535{ 536 SCHED_ASSERT_LOCKED(); 537 538 if (p->p_wchan != NULL) { 539 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq); 540 p->p_wchan = NULL; 541 TRACEPOINT(sched, wakeup, p->p_tid, p->p_p->ps_pid); 542 } 543} 544 545/* 546 * Make a number of processes sleeping on the specified identifier runnable. 547 */ 548void 549wakeup_n(const volatile void *ident, int n) 550{ 551 struct slpque *qp; 552 struct proc *p; 553 struct proc *pnext; 554 int s; 555 556 SCHED_LOCK(s); 557 qp = &slpque[LOOKUP(ident)]; 558 for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) { 559 pnext = TAILQ_NEXT(p, p_runq); 560#ifdef DIAGNOSTIC 561 /* 562 * If the rwlock passed to rwsleep() is contended, the 563 * CPU will end up calling wakeup() between sleep_setup() 564 * and sleep_finish(). 565 */ 566 if (p == curproc) { 567 KASSERT(p->p_stat == SONPROC); 568 continue; 569 } 570 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 571 panic("wakeup: p_stat is %d", (int)p->p_stat); 572#endif 573 if (wakeup_proc(p, ident)) 574 --n; 575 } 576 SCHED_UNLOCK(s); 577} 578 579/* 580 * Make all processes sleeping on the specified identifier runnable. 581 */ 582void 583wakeup(const volatile void *chan) 584{ 585 wakeup_n(chan, -1); 586} 587 588int 589sys_sched_yield(struct proc *p, void *v, register_t *retval) 590{ 591 struct proc *q; 592 uint8_t newprio; 593 int s; 594 595 SCHED_LOCK(s); 596 /* 597 * If one of the threads of a multi-threaded process called 598 * sched_yield(2), drop its priority to ensure its siblings 599 * can make some progress. 600 */ 601 newprio = p->p_usrpri; 602 SMR_TAILQ_FOREACH_LOCKED(q, &p->p_p->ps_threads, p_thr_link) 603 newprio = max(newprio, q->p_runpri); 604 setrunqueue(p->p_cpu, p, newprio); 605 p->p_ru.ru_nvcsw++; 606 mi_switch(); 607 SCHED_UNLOCK(s); 608 609 return (0); 610} 611 612int 613thrsleep_unlock(void *lock) 614{ 615 static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED; 616 _atomic_lock_t *atomiclock = lock; 617 618 if (!lock) 619 return 0; 620 621 return copyout(&unlocked, atomiclock, sizeof(unlocked)); 622} 623 624struct tslpentry { 625 TAILQ_ENTRY(tslpentry) tslp_link; 626 long tslp_ident; 627}; 628 629/* thrsleep queue shared between processes */ 630static struct tslpqueue thrsleep_queue = TAILQ_HEAD_INITIALIZER(thrsleep_queue); 631static struct rwlock thrsleep_lock = RWLOCK_INITIALIZER("thrsleeplk"); 632 633int 634thrsleep(struct proc *p, struct sys___thrsleep_args *v) 635{ 636 struct sys___thrsleep_args /* { 637 syscallarg(const volatile void *) ident; 638 syscallarg(clockid_t) clock_id; 639 syscallarg(const struct timespec *) tp; 640 syscallarg(void *) lock; 641 syscallarg(const int *) abort; 642 } */ *uap = v; 643 long ident = (long)SCARG(uap, ident); 644 struct tslpentry entry; 645 struct tslpqueue *queue; 646 struct rwlock *qlock; 647 struct timespec *tsp = (struct timespec *)SCARG(uap, tp); 648 void *lock = SCARG(uap, lock); 649 uint64_t nsecs = INFSLP; 650 int abort = 0, error; 651 clockid_t clock_id = SCARG(uap, clock_id); 652 653 if (ident == 0) 654 return (EINVAL); 655 if (tsp != NULL) { 656 struct timespec now; 657 658 if ((error = clock_gettime(p, clock_id, &now))) 659 return (error); 660#ifdef KTRACE 661 if (KTRPOINT(p, KTR_STRUCT)) 662 ktrabstimespec(p, tsp); 663#endif 664 665 if (timespeccmp(tsp, &now, <=)) { 666 /* already passed: still do the unlock */ 667 if ((error = thrsleep_unlock(lock))) 668 return (error); 669 return (EWOULDBLOCK); 670 } 671 672 timespecsub(tsp, &now, tsp); 673 nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP); 674 } 675 676 if (ident == -1) { 677 queue = &thrsleep_queue; 678 qlock = &thrsleep_lock; 679 } else { 680 queue = &p->p_p->ps_tslpqueue; 681 qlock = &p->p_p->ps_lock; 682 } 683 684 /* Interlock with wakeup. */ 685 entry.tslp_ident = ident; 686 rw_enter_write(qlock); 687 TAILQ_INSERT_TAIL(queue, &entry, tslp_link); 688 rw_exit_write(qlock); 689 690 error = thrsleep_unlock(lock); 691 692 if (error == 0 && SCARG(uap, abort) != NULL) 693 error = copyin(SCARG(uap, abort), &abort, sizeof(abort)); 694 695 rw_enter_write(qlock); 696 if (error != 0) 697 goto out; 698 if (abort != 0) { 699 error = EINTR; 700 goto out; 701 } 702 if (entry.tslp_ident != 0) { 703 error = rwsleep_nsec(&entry, qlock, PWAIT|PCATCH, "thrsleep", 704 nsecs); 705 } 706 707out: 708 if (entry.tslp_ident != 0) 709 TAILQ_REMOVE(queue, &entry, tslp_link); 710 rw_exit_write(qlock); 711 712 if (error == ERESTART) 713 error = ECANCELED; 714 715 return (error); 716 717} 718 719int 720sys___thrsleep(struct proc *p, void *v, register_t *retval) 721{ 722 struct sys___thrsleep_args /* { 723 syscallarg(const volatile void *) ident; 724 syscallarg(clockid_t) clock_id; 725 syscallarg(struct timespec *) tp; 726 syscallarg(void *) lock; 727 syscallarg(const int *) abort; 728 } */ *uap = v; 729 struct timespec ts; 730 int error; 731 732 if (SCARG(uap, tp) != NULL) { 733 if ((error = copyin(SCARG(uap, tp), &ts, sizeof(ts)))) { 734 *retval = error; 735 return 0; 736 } 737 if (!timespecisvalid(&ts)) { 738 *retval = EINVAL; 739 return 0; 740 } 741 SCARG(uap, tp) = &ts; 742 } 743 744 *retval = thrsleep(p, uap); 745 return 0; 746} 747 748int 749sys___thrwakeup(struct proc *p, void *v, register_t *retval) 750{ 751 struct sys___thrwakeup_args /* { 752 syscallarg(const volatile void *) ident; 753 syscallarg(int) n; 754 } */ *uap = v; 755 struct tslpentry *entry, *tmp; 756 struct tslpqueue *queue; 757 struct rwlock *qlock; 758 long ident = (long)SCARG(uap, ident); 759 int n = SCARG(uap, n); 760 int found = 0; 761 762 if (ident == 0) 763 *retval = EINVAL; 764 else { 765 if (ident == -1) { 766 queue = &thrsleep_queue; 767 qlock = &thrsleep_lock; 768 /* 769 * Wake up all waiters with ident -1. This is needed 770 * because ident -1 can be shared by multiple userspace 771 * lock state machines concurrently. The implementation 772 * has no way to direct the wakeup to a particular 773 * state machine. 774 */ 775 n = 0; 776 } else { 777 queue = &p->p_p->ps_tslpqueue; 778 qlock = &p->p_p->ps_lock; 779 } 780 781 rw_enter_write(qlock); 782 TAILQ_FOREACH_SAFE(entry, queue, tslp_link, tmp) { 783 if (entry->tslp_ident == ident) { 784 TAILQ_REMOVE(queue, entry, tslp_link); 785 entry->tslp_ident = 0; 786 wakeup_one(entry); 787 if (++found == n) 788 break; 789 } 790 } 791 rw_exit_write(qlock); 792 793 if (ident == -1) 794 *retval = 0; 795 else 796 *retval = found ? 0 : ESRCH; 797 } 798 799 return (0); 800} 801 802void 803refcnt_init(struct refcnt *r) 804{ 805 r->refs = 1; 806} 807 808void 809refcnt_take(struct refcnt *r) 810{ 811#ifdef DIAGNOSTIC 812 u_int refcnt; 813 814 refcnt = atomic_inc_int_nv(&r->refs); 815 KASSERT(refcnt != 0); 816#else 817 atomic_inc_int(&r->refs); 818#endif 819} 820 821int 822refcnt_rele(struct refcnt *r) 823{ 824 u_int refcnt; 825 826 refcnt = atomic_dec_int_nv(&r->refs); 827 KASSERT(refcnt != ~0); 828 829 return (refcnt == 0); 830} 831 832void 833refcnt_rele_wake(struct refcnt *r) 834{ 835 if (refcnt_rele(r)) 836 wakeup_one(r); 837} 838 839void 840refcnt_finalize(struct refcnt *r, const char *wmesg) 841{ 842 struct sleep_state sls; 843 u_int refcnt; 844 845 refcnt = atomic_dec_int_nv(&r->refs); 846 while (refcnt) { 847 sleep_setup(&sls, r, PWAIT, wmesg, 0); 848 refcnt = r->refs; 849 sleep_finish(&sls, refcnt); 850 } 851} 852 853void 854cond_init(struct cond *c) 855{ 856 c->c_wait = 1; 857} 858 859void 860cond_signal(struct cond *c) 861{ 862 c->c_wait = 0; 863 864 wakeup_one(c); 865} 866 867void 868cond_wait(struct cond *c, const char *wmesg) 869{ 870 struct sleep_state sls; 871 int wait; 872 873 wait = c->c_wait; 874 while (wait) { 875 sleep_setup(&sls, c, PWAIT, wmesg, 0); 876 wait = c->c_wait; 877 sleep_finish(&sls, wait); 878 } 879} 880