kern_synch.c revision 1.201
1/* $OpenBSD: kern_synch.c,v 1.201 2024/03/30 13:33:20 mpi Exp $ */ 2/* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4/* 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 38 */ 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/proc.h> 43#include <sys/kernel.h> 44#include <sys/signalvar.h> 45#include <sys/sched.h> 46#include <sys/timeout.h> 47#include <sys/mount.h> 48#include <sys/syscallargs.h> 49#include <sys/refcnt.h> 50#include <sys/atomic.h> 51#include <sys/tracepoint.h> 52 53#include <ddb/db_output.h> 54 55#include <machine/spinlock.h> 56 57#ifdef DIAGNOSTIC 58#include <sys/syslog.h> 59#endif 60 61#ifdef KTRACE 62#include <sys/ktrace.h> 63#endif 64 65int sleep_signal_check(void); 66int thrsleep(struct proc *, struct sys___thrsleep_args *); 67int thrsleep_unlock(void *); 68 69/* 70 * We're only looking at 7 bits of the address; everything is 71 * aligned to 4, lots of things are aligned to greater powers 72 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 73 */ 74#define TABLESIZE 128 75#define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 76TAILQ_HEAD(slpque,proc) slpque[TABLESIZE]; 77 78void 79sleep_queue_init(void) 80{ 81 int i; 82 83 for (i = 0; i < TABLESIZE; i++) 84 TAILQ_INIT(&slpque[i]); 85} 86 87/* 88 * Global sleep channel for threads that do not want to 89 * receive wakeup(9) broadcasts. 90 */ 91int nowake; 92 93/* 94 * During autoconfiguration or after a panic, a sleep will simply 95 * lower the priority briefly to allow interrupts, then return. 96 * The priority to be used (safepri) is machine-dependent, thus this 97 * value is initialized and maintained in the machine-dependent layers. 98 * This priority will typically be 0, or the lowest priority 99 * that is safe for use on the interrupt stack; it can be made 100 * higher to block network software interrupts after panics. 101 */ 102extern int safepri; 103 104/* 105 * General sleep call. Suspends the current process until a wakeup is 106 * performed on the specified identifier. The process will then be made 107 * runnable with the specified priority. Sleeps at most timo/hz seconds 108 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 109 * before and after sleeping, else signals are not checked. Returns 0 if 110 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 111 * signal needs to be delivered, ERESTART is returned if the current system 112 * call should be restarted if possible, and EINTR is returned if the system 113 * call should be interrupted by the signal (return EINTR). 114 */ 115int 116tsleep(const volatile void *ident, int priority, const char *wmesg, int timo) 117{ 118#ifdef MULTIPROCESSOR 119 int hold_count; 120#endif 121 122 KASSERT((priority & ~(PRIMASK | PCATCH)) == 0); 123 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 124 125#ifdef MULTIPROCESSOR 126 KASSERT(ident == &nowake || timo || _kernel_lock_held()); 127#endif 128 129#ifdef DDB 130 if (cold == 2) 131 db_stack_dump(); 132#endif 133 if (cold || panicstr) { 134 int s; 135 /* 136 * After a panic, or during autoconfiguration, 137 * just give interrupts a chance, then just return; 138 * don't run any other procs or panic below, 139 * in case this is the idle process and already asleep. 140 */ 141 s = splhigh(); 142 splx(safepri); 143#ifdef MULTIPROCESSOR 144 if (_kernel_lock_held()) { 145 hold_count = __mp_release_all(&kernel_lock); 146 __mp_acquire_count(&kernel_lock, hold_count); 147 } 148#endif 149 splx(s); 150 return (0); 151 } 152 153 sleep_setup(ident, priority, wmesg); 154 return sleep_finish(timo, 1); 155} 156 157int 158tsleep_nsec(const volatile void *ident, int priority, const char *wmesg, 159 uint64_t nsecs) 160{ 161 uint64_t to_ticks; 162 163 if (nsecs == INFSLP) 164 return tsleep(ident, priority, wmesg, 0); 165#ifdef DIAGNOSTIC 166 if (nsecs == 0) { 167 log(LOG_WARNING, 168 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 169 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 170 wmesg); 171 } 172#endif 173 /* 174 * We want to sleep at least nsecs nanoseconds worth of ticks. 175 * 176 * - Clamp nsecs to prevent arithmetic overflow. 177 * 178 * - Round nsecs up to account for any nanoseconds that do not 179 * divide evenly into tick_nsec, otherwise we'll lose them to 180 * integer division in the next step. We add (tick_nsec - 1) 181 * to keep from introducing a spurious tick if there are no 182 * such nanoseconds, i.e. nsecs % tick_nsec == 0. 183 * 184 * - Divide the rounded value to a count of ticks. We divide 185 * by (tick_nsec + 1) to discard the extra tick introduced if, 186 * before rounding, nsecs % tick_nsec == 1. 187 * 188 * - Finally, add a tick to the result. We need to wait out 189 * the current tick before we can begin counting our interval, 190 * as we do not know how much time has elapsed since the 191 * current tick began. 192 */ 193 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 194 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 195 if (to_ticks > INT_MAX) 196 to_ticks = INT_MAX; 197 return tsleep(ident, priority, wmesg, (int)to_ticks); 198} 199 200/* 201 * Same as tsleep, but if we have a mutex provided, then once we've 202 * entered the sleep queue we drop the mutex. After sleeping we re-lock. 203 */ 204int 205msleep(const volatile void *ident, struct mutex *mtx, int priority, 206 const char *wmesg, int timo) 207{ 208 int error, spl; 209#ifdef MULTIPROCESSOR 210 int hold_count; 211#endif 212 213 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 214 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 215 KASSERT(mtx != NULL); 216 217#ifdef DDB 218 if (cold == 2) 219 db_stack_dump(); 220#endif 221 if (cold || panicstr) { 222 /* 223 * After a panic, or during autoconfiguration, 224 * just give interrupts a chance, then just return; 225 * don't run any other procs or panic below, 226 * in case this is the idle process and already asleep. 227 */ 228 spl = MUTEX_OLDIPL(mtx); 229 MUTEX_OLDIPL(mtx) = safepri; 230 mtx_leave(mtx); 231#ifdef MULTIPROCESSOR 232 if (_kernel_lock_held()) { 233 hold_count = __mp_release_all(&kernel_lock); 234 __mp_acquire_count(&kernel_lock, hold_count); 235 } 236#endif 237 if ((priority & PNORELOCK) == 0) { 238 mtx_enter(mtx); 239 MUTEX_OLDIPL(mtx) = spl; 240 } else 241 splx(spl); 242 return (0); 243 } 244 245 sleep_setup(ident, priority, wmesg); 246 247 mtx_leave(mtx); 248 /* signal may stop the process, release mutex before that */ 249 error = sleep_finish(timo, 1); 250 251 if ((priority & PNORELOCK) == 0) 252 mtx_enter(mtx); 253 254 return error; 255} 256 257int 258msleep_nsec(const volatile void *ident, struct mutex *mtx, int priority, 259 const char *wmesg, uint64_t nsecs) 260{ 261 uint64_t to_ticks; 262 263 if (nsecs == INFSLP) 264 return msleep(ident, mtx, priority, wmesg, 0); 265#ifdef DIAGNOSTIC 266 if (nsecs == 0) { 267 log(LOG_WARNING, 268 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 269 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 270 wmesg); 271 } 272#endif 273 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 274 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 275 if (to_ticks > INT_MAX) 276 to_ticks = INT_MAX; 277 return msleep(ident, mtx, priority, wmesg, (int)to_ticks); 278} 279 280/* 281 * Same as tsleep, but if we have a rwlock provided, then once we've 282 * entered the sleep queue we drop the it. After sleeping we re-lock. 283 */ 284int 285rwsleep(const volatile void *ident, struct rwlock *rwl, int priority, 286 const char *wmesg, int timo) 287{ 288 int error, status; 289 290 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0); 291 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0); 292 KASSERT(ident != rwl); 293 rw_assert_anylock(rwl); 294 status = rw_status(rwl); 295 296 sleep_setup(ident, priority, wmesg); 297 298 rw_exit(rwl); 299 /* signal may stop the process, release rwlock before that */ 300 error = sleep_finish(timo, 1); 301 302 if ((priority & PNORELOCK) == 0) 303 rw_enter(rwl, status); 304 305 return error; 306} 307 308int 309rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority, 310 const char *wmesg, uint64_t nsecs) 311{ 312 uint64_t to_ticks; 313 314 if (nsecs == INFSLP) 315 return rwsleep(ident, rwl, priority, wmesg, 0); 316#ifdef DIAGNOSTIC 317 if (nsecs == 0) { 318 log(LOG_WARNING, 319 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n", 320 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid, 321 wmesg); 322 } 323#endif 324 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec); 325 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1; 326 if (to_ticks > INT_MAX) 327 to_ticks = INT_MAX; 328 return rwsleep(ident, rwl, priority, wmesg, (int)to_ticks); 329} 330 331void 332sleep_setup(const volatile void *ident, int prio, const char *wmesg) 333{ 334 struct proc *p = curproc; 335 int s; 336 337#ifdef DIAGNOSTIC 338 if (p->p_flag & P_CANTSLEEP) 339 panic("sleep: %s failed insomnia", p->p_p->ps_comm); 340 if (ident == NULL) 341 panic("tsleep: no ident"); 342 if (p->p_stat != SONPROC) 343 panic("tsleep: not SONPROC"); 344#endif 345 346 SCHED_LOCK(s); 347 348 TRACEPOINT(sched, sleep, NULL); 349 350 p->p_wchan = ident; 351 p->p_wmesg = wmesg; 352 p->p_slptime = 0; 353 p->p_slppri = prio & PRIMASK; 354 atomic_setbits_int(&p->p_flag, P_WSLEEP); 355 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq); 356 if (prio & PCATCH) 357 atomic_setbits_int(&p->p_flag, P_SINTR); 358 p->p_stat = SSLEEP; 359 360 SCHED_UNLOCK(s); 361} 362 363int 364sleep_finish(int timo, int do_sleep) 365{ 366 struct proc *p = curproc; 367 int s, catch, error = 0, error1 = 0; 368 369 catch = p->p_flag & P_SINTR; 370 371 if (timo != 0) { 372 KASSERT((p->p_flag & P_TIMEOUT) == 0); 373 timeout_add(&p->p_sleep_to, timo); 374 } 375 376 if (catch != 0) { 377 /* 378 * We put ourselves on the sleep queue and start our 379 * timeout before calling sleep_signal_check(), as we could 380 * stop there, and a wakeup or a SIGCONT (or both) could 381 * occur while we were stopped. A SIGCONT would cause 382 * us to be marked as SSLEEP without resuming us, thus 383 * we must be ready for sleep when sleep_signal_check() is 384 * called. 385 */ 386 if ((error = sleep_signal_check()) != 0) { 387 catch = 0; 388 do_sleep = 0; 389 } 390 } 391 392 SCHED_LOCK(s); 393 /* 394 * If the wakeup happens while going to sleep, p->p_wchan 395 * will be NULL. In that case unwind immediately but still 396 * check for possible signals and timeouts. 397 */ 398 if (p->p_wchan == NULL) 399 do_sleep = 0; 400 atomic_clearbits_int(&p->p_flag, P_WSLEEP); 401 402 if (do_sleep) { 403 KASSERT(p->p_stat == SSLEEP || p->p_stat == SSTOP); 404 p->p_ru.ru_nvcsw++; 405 mi_switch(); 406 } else { 407 KASSERT(p->p_stat == SONPROC || p->p_stat == SSLEEP || 408 p->p_stat == SSTOP); 409 unsleep(p); 410 p->p_stat = SONPROC; 411 } 412 413#ifdef DIAGNOSTIC 414 if (p->p_stat != SONPROC) 415 panic("sleep_finish !SONPROC"); 416#endif 417 418 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; 419 SCHED_UNLOCK(s); 420 421 /* 422 * Even though this belongs to the signal handling part of sleep, 423 * we need to clear it before the ktrace. 424 */ 425 atomic_clearbits_int(&p->p_flag, P_SINTR); 426 427 if (timo != 0) { 428 if (p->p_flag & P_TIMEOUT) { 429 error1 = EWOULDBLOCK; 430 } else { 431 /* This can sleep. It must not use timeouts. */ 432 timeout_del_barrier(&p->p_sleep_to); 433 } 434 atomic_clearbits_int(&p->p_flag, P_TIMEOUT); 435 } 436 437 /* Check if thread was woken up because of a unwind or signal */ 438 if (catch != 0) 439 error = sleep_signal_check(); 440 441 /* Signal errors are higher priority than timeouts. */ 442 if (error == 0 && error1 != 0) 443 error = error1; 444 445 return error; 446} 447 448/* 449 * Check and handle signals and suspensions around a sleep cycle. 450 */ 451int 452sleep_signal_check(void) 453{ 454 struct proc *p = curproc; 455 struct sigctx ctx; 456 int err, sig; 457 458 if ((err = single_thread_check(p, 1)) != 0) 459 return err; 460 if ((sig = cursig(p, &ctx)) != 0) { 461 if (ctx.sig_intr) 462 return EINTR; 463 else 464 return ERESTART; 465 } 466 return 0; 467} 468 469int 470wakeup_proc(struct proc *p, int flags) 471{ 472 int awakened = 0; 473 474 SCHED_ASSERT_LOCKED(); 475 476 if (p->p_wchan != NULL) { 477 awakened = 1; 478 if (flags) 479 atomic_setbits_int(&p->p_flag, flags); 480#ifdef DIAGNOSTIC 481 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 482 panic("thread %d p_stat is %d", p->p_tid, p->p_stat); 483#endif 484 unsleep(p); 485 if (p->p_stat == SSLEEP) 486 setrunnable(p); 487 } 488 489 return awakened; 490} 491 492 493/* 494 * Implement timeout for tsleep. 495 * If process hasn't been awakened (wchan non-zero), 496 * set timeout flag and undo the sleep. If proc 497 * is stopped, just unsleep so it will remain stopped. 498 */ 499void 500endtsleep(void *arg) 501{ 502 struct proc *p = arg; 503 int s; 504 505 SCHED_LOCK(s); 506 wakeup_proc(p, P_TIMEOUT); 507 SCHED_UNLOCK(s); 508} 509 510/* 511 * Remove a process from its wait queue 512 */ 513void 514unsleep(struct proc *p) 515{ 516 SCHED_ASSERT_LOCKED(); 517 518 if (p->p_wchan != NULL) { 519 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq); 520 p->p_wchan = NULL; 521 TRACEPOINT(sched, unsleep, p->p_tid + THREAD_PID_OFFSET, 522 p->p_p->ps_pid); 523 } 524} 525 526/* 527 * Make a number of processes sleeping on the specified identifier runnable. 528 */ 529void 530wakeup_n(const volatile void *ident, int n) 531{ 532 struct slpque *qp, wakeq; 533 struct proc *p; 534 struct proc *pnext; 535 int s; 536 537 TAILQ_INIT(&wakeq); 538 539 SCHED_LOCK(s); 540 qp = &slpque[LOOKUP(ident)]; 541 for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) { 542 pnext = TAILQ_NEXT(p, p_runq); 543#ifdef DIAGNOSTIC 544 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 545 panic("thread %d p_stat is %d", p->p_tid, p->p_stat); 546#endif 547 KASSERT(p->p_wchan != NULL); 548 if (p->p_wchan == ident) { 549 TAILQ_REMOVE(qp, p, p_runq); 550 p->p_wchan = NULL; 551 TAILQ_INSERT_TAIL(&wakeq, p, p_runq); 552 --n; 553 } 554 } 555 while ((p = TAILQ_FIRST(&wakeq))) { 556 TAILQ_REMOVE(&wakeq, p, p_runq); 557 TRACEPOINT(sched, unsleep, p->p_tid + THREAD_PID_OFFSET, 558 p->p_p->ps_pid); 559 if (p->p_stat == SSLEEP) 560 setrunnable(p); 561 } 562 SCHED_UNLOCK(s); 563} 564 565/* 566 * Make all processes sleeping on the specified identifier runnable. 567 */ 568void 569wakeup(const volatile void *chan) 570{ 571 wakeup_n(chan, -1); 572} 573 574int 575sys_sched_yield(struct proc *p, void *v, register_t *retval) 576{ 577 struct proc *q; 578 uint8_t newprio; 579 int s; 580 581 SCHED_LOCK(s); 582 /* 583 * If one of the threads of a multi-threaded process called 584 * sched_yield(2), drop its priority to ensure its siblings 585 * can make some progress. 586 */ 587 newprio = p->p_usrpri; 588 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) 589 newprio = max(newprio, q->p_runpri); 590 setrunqueue(p->p_cpu, p, newprio); 591 p->p_ru.ru_nvcsw++; 592 mi_switch(); 593 SCHED_UNLOCK(s); 594 595 return (0); 596} 597 598int 599thrsleep_unlock(void *lock) 600{ 601 static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED; 602 _atomic_lock_t *atomiclock = lock; 603 604 if (!lock) 605 return 0; 606 607 return copyout(&unlocked, atomiclock, sizeof(unlocked)); 608} 609 610struct tslpentry { 611 TAILQ_ENTRY(tslpentry) tslp_link; 612 long tslp_ident; 613}; 614 615/* thrsleep queue shared between processes */ 616static struct tslpqueue thrsleep_queue = TAILQ_HEAD_INITIALIZER(thrsleep_queue); 617static struct rwlock thrsleep_lock = RWLOCK_INITIALIZER("thrsleeplk"); 618 619int 620thrsleep(struct proc *p, struct sys___thrsleep_args *v) 621{ 622 struct sys___thrsleep_args /* { 623 syscallarg(const volatile void *) ident; 624 syscallarg(clockid_t) clock_id; 625 syscallarg(const struct timespec *) tp; 626 syscallarg(void *) lock; 627 syscallarg(const int *) abort; 628 } */ *uap = v; 629 long ident = (long)SCARG(uap, ident); 630 struct tslpentry entry; 631 struct tslpqueue *queue; 632 struct rwlock *qlock; 633 struct timespec *tsp = (struct timespec *)SCARG(uap, tp); 634 void *lock = SCARG(uap, lock); 635 uint64_t nsecs = INFSLP; 636 int abort = 0, error; 637 clockid_t clock_id = SCARG(uap, clock_id); 638 639 if (ident == 0) 640 return (EINVAL); 641 if (tsp != NULL) { 642 struct timespec now; 643 644 if ((error = clock_gettime(p, clock_id, &now))) 645 return (error); 646#ifdef KTRACE 647 if (KTRPOINT(p, KTR_STRUCT)) 648 ktrabstimespec(p, tsp); 649#endif 650 651 if (timespeccmp(tsp, &now, <=)) { 652 /* already passed: still do the unlock */ 653 if ((error = thrsleep_unlock(lock))) 654 return (error); 655 return (EWOULDBLOCK); 656 } 657 658 timespecsub(tsp, &now, tsp); 659 nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP); 660 } 661 662 if (ident == -1) { 663 queue = &thrsleep_queue; 664 qlock = &thrsleep_lock; 665 } else { 666 queue = &p->p_p->ps_tslpqueue; 667 qlock = &p->p_p->ps_lock; 668 } 669 670 /* Interlock with wakeup. */ 671 entry.tslp_ident = ident; 672 rw_enter_write(qlock); 673 TAILQ_INSERT_TAIL(queue, &entry, tslp_link); 674 rw_exit_write(qlock); 675 676 error = thrsleep_unlock(lock); 677 678 if (error == 0 && SCARG(uap, abort) != NULL) 679 error = copyin(SCARG(uap, abort), &abort, sizeof(abort)); 680 681 rw_enter_write(qlock); 682 if (error != 0) 683 goto out; 684 if (abort != 0) { 685 error = EINTR; 686 goto out; 687 } 688 if (entry.tslp_ident != 0) { 689 error = rwsleep_nsec(&entry, qlock, PWAIT|PCATCH, "thrsleep", 690 nsecs); 691 } 692 693out: 694 if (entry.tslp_ident != 0) 695 TAILQ_REMOVE(queue, &entry, tslp_link); 696 rw_exit_write(qlock); 697 698 if (error == ERESTART) 699 error = ECANCELED; 700 701 return (error); 702 703} 704 705int 706sys___thrsleep(struct proc *p, void *v, register_t *retval) 707{ 708 struct sys___thrsleep_args /* { 709 syscallarg(const volatile void *) ident; 710 syscallarg(clockid_t) clock_id; 711 syscallarg(struct timespec *) tp; 712 syscallarg(void *) lock; 713 syscallarg(const int *) abort; 714 } */ *uap = v; 715 struct timespec ts; 716 int error; 717 718 if (SCARG(uap, tp) != NULL) { 719 if ((error = copyin(SCARG(uap, tp), &ts, sizeof(ts)))) { 720 *retval = error; 721 return 0; 722 } 723 if (!timespecisvalid(&ts)) { 724 *retval = EINVAL; 725 return 0; 726 } 727 SCARG(uap, tp) = &ts; 728 } 729 730 *retval = thrsleep(p, uap); 731 return 0; 732} 733 734int 735sys___thrwakeup(struct proc *p, void *v, register_t *retval) 736{ 737 struct sys___thrwakeup_args /* { 738 syscallarg(const volatile void *) ident; 739 syscallarg(int) n; 740 } */ *uap = v; 741 struct tslpentry *entry, *tmp; 742 struct tslpqueue *queue; 743 struct rwlock *qlock; 744 long ident = (long)SCARG(uap, ident); 745 int n = SCARG(uap, n); 746 int found = 0; 747 748 if (ident == 0) 749 *retval = EINVAL; 750 else { 751 if (ident == -1) { 752 queue = &thrsleep_queue; 753 qlock = &thrsleep_lock; 754 /* 755 * Wake up all waiters with ident -1. This is needed 756 * because ident -1 can be shared by multiple userspace 757 * lock state machines concurrently. The implementation 758 * has no way to direct the wakeup to a particular 759 * state machine. 760 */ 761 n = 0; 762 } else { 763 queue = &p->p_p->ps_tslpqueue; 764 qlock = &p->p_p->ps_lock; 765 } 766 767 rw_enter_write(qlock); 768 TAILQ_FOREACH_SAFE(entry, queue, tslp_link, tmp) { 769 if (entry->tslp_ident == ident) { 770 TAILQ_REMOVE(queue, entry, tslp_link); 771 entry->tslp_ident = 0; 772 wakeup_one(entry); 773 if (++found == n) 774 break; 775 } 776 } 777 rw_exit_write(qlock); 778 779 if (ident == -1) 780 *retval = 0; 781 else 782 *retval = found ? 0 : ESRCH; 783 } 784 785 return (0); 786} 787 788void 789refcnt_init(struct refcnt *r) 790{ 791 refcnt_init_trace(r, 0); 792} 793 794void 795refcnt_init_trace(struct refcnt *r, int idx) 796{ 797 r->r_traceidx = idx; 798 atomic_store_int(&r->r_refs, 1); 799 TRACEINDEX(refcnt, r->r_traceidx, r, 0, +1); 800} 801 802void 803refcnt_take(struct refcnt *r) 804{ 805 u_int refs; 806 807 refs = atomic_inc_int_nv(&r->r_refs); 808 KASSERT(refs != 0); 809 TRACEINDEX(refcnt, r->r_traceidx, r, refs - 1, +1); 810 (void)refs; 811} 812 813int 814refcnt_rele(struct refcnt *r) 815{ 816 u_int refs; 817 818 membar_exit_before_atomic(); 819 refs = atomic_dec_int_nv(&r->r_refs); 820 KASSERT(refs != ~0); 821 TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1); 822 if (refs == 0) { 823 membar_enter_after_atomic(); 824 return (1); 825 } 826 return (0); 827} 828 829void 830refcnt_rele_wake(struct refcnt *r) 831{ 832 if (refcnt_rele(r)) 833 wakeup_one(r); 834} 835 836void 837refcnt_finalize(struct refcnt *r, const char *wmesg) 838{ 839 u_int refs; 840 841 membar_exit_before_atomic(); 842 refs = atomic_dec_int_nv(&r->r_refs); 843 KASSERT(refs != ~0); 844 TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1); 845 while (refs) { 846 sleep_setup(r, PWAIT, wmesg); 847 refs = atomic_load_int(&r->r_refs); 848 sleep_finish(0, refs); 849 } 850 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0); 851 /* Order subsequent loads and stores after refs == 0 load. */ 852 membar_sync(); 853} 854 855int 856refcnt_shared(struct refcnt *r) 857{ 858 u_int refs; 859 860 refs = atomic_load_int(&r->r_refs); 861 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0); 862 return (refs > 1); 863} 864 865unsigned int 866refcnt_read(struct refcnt *r) 867{ 868 u_int refs; 869 870 refs = atomic_load_int(&r->r_refs); 871 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0); 872 return (refs); 873} 874 875void 876cond_init(struct cond *c) 877{ 878 atomic_store_int(&c->c_wait, 1); 879} 880 881void 882cond_signal(struct cond *c) 883{ 884 atomic_store_int(&c->c_wait, 0); 885 886 wakeup_one(c); 887} 888 889void 890cond_wait(struct cond *c, const char *wmesg) 891{ 892 unsigned int wait; 893 894 wait = atomic_load_int(&c->c_wait); 895 while (wait) { 896 sleep_setup(c, PWAIT, wmesg); 897 wait = atomic_load_int(&c->c_wait); 898 sleep_finish(0, wait); 899 } 900} 901