kern_umtx.c revision 139257
1/* 2 * Copyright (c) 2004, David Xu <davidxu@freebsd.org> 3 * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/kern/kern_umtx.c 139257 2004-12-24 11:30:55Z davidxu $"); 30 31#include <sys/param.h> 32#include <sys/kernel.h> 33#include <sys/limits.h> 34#include <sys/lock.h> 35#include <sys/malloc.h> 36#include <sys/mutex.h> 37#include <sys/proc.h> 38#include <sys/sysent.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/eventhandler.h> 42#include <sys/thr.h> 43#include <sys/umtx.h> 44 45#include <vm/vm.h> 46#include <vm/vm_param.h> 47#include <vm/pmap.h> 48#include <vm/vm_map.h> 49#include <vm/vm_object.h> 50 51#define UMTX_PRIVATE 0 52#define UMTX_SHARED 1 53 54#define UMTX_STATIC_SHARED 55 56struct umtx_key { 57 int type; 58 union { 59 struct { 60 vm_object_t object; 61 long offset; 62 } shared; 63 struct { 64 struct umtx *umtx; 65 long pid; 66 } private; 67 struct { 68 void *ptr; 69 long word; 70 } both; 71 } info; 72}; 73 74struct umtx_q { 75 LIST_ENTRY(umtx_q) uq_next; /* Linked list for the hash. */ 76 struct umtx_key uq_key; /* Umtx key. */ 77 struct thread *uq_thread; /* The thread waits on. */ 78 LIST_ENTRY(umtx_q) uq_rqnext; /* Linked list for requeuing. */ 79 vm_offset_t uq_addr; /* Umtx's virtual address. */ 80}; 81 82LIST_HEAD(umtx_head, umtx_q); 83struct umtxq_chain { 84 struct mtx uc_lock; /* Lock for this chain. */ 85 struct umtx_head uc_queue; /* List of sleep queues. */ 86#define UCF_BUSY 0x01 87#define UCF_WANT 0x02 88 int uc_flags; 89}; 90 91#define GOLDEN_RATIO_PRIME 2654404609U 92#define UMTX_CHAINS 128 93#define UMTX_SHIFTS (__WORD_BIT - 7) 94 95static struct umtxq_chain umtxq_chains[UMTX_CHAINS]; 96static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory"); 97 98#define UMTX_CONTESTED LONG_MIN 99 100static void umtxq_init_chains(void *); 101static int umtxq_hash(struct umtx_key *key); 102static struct mtx *umtxq_mtx(int chain); 103static void umtxq_lock(struct umtx_key *key); 104static void umtxq_unlock(struct umtx_key *key); 105static void umtxq_busy(struct umtx_key *key); 106static void umtxq_unbusy(struct umtx_key *key); 107static void umtxq_insert(struct umtx_q *uq); 108static void umtxq_remove(struct umtx_q *uq); 109static int umtxq_sleep(struct thread *td, struct umtx_key *key, 110 int prio, const char *wmesg, int timo); 111static int umtxq_count(struct umtx_key *key); 112static int umtxq_signal(struct umtx_key *key, int nr_wakeup); 113#ifdef UMTX_DYNAMIC_SHARED 114static void fork_handler(void *arg, struct proc *p1, struct proc *p2, 115 int flags); 116#endif 117static int umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2); 118static int umtx_key_get(struct thread *td, struct umtx *umtx, 119 struct umtx_key *key); 120static void umtx_key_release(struct umtx_key *key); 121 122SYSINIT(umtx, SI_SUB_EVENTHANDLER+1, SI_ORDER_MIDDLE, umtxq_init_chains, NULL); 123 124static void 125umtxq_init_chains(void *arg __unused) 126{ 127 int i; 128 129 for (i = 0; i < UMTX_CHAINS; ++i) { 130 mtx_init(&umtxq_chains[i].uc_lock, "umtxq_lock", NULL, 131 MTX_DEF | MTX_DUPOK); 132 LIST_INIT(&umtxq_chains[i].uc_queue); 133 umtxq_chains[i].uc_flags = 0; 134 } 135#ifdef UMTX_DYNAMIC_SHARED 136 EVENTHANDLER_REGISTER(process_fork, fork_handler, 0, 10000); 137#endif 138} 139 140static inline int 141umtxq_hash(struct umtx_key *key) 142{ 143 unsigned n = (uintptr_t)key->info.both.ptr + key->info.both.word; 144 return (((n * GOLDEN_RATIO_PRIME) >> UMTX_SHIFTS) % UMTX_CHAINS); 145} 146 147static inline int 148umtx_key_match(const struct umtx_key *k1, const struct umtx_key *k2) 149{ 150 return (k1->type == k2->type && 151 k1->info.both.ptr == k2->info.both.ptr && 152 k1->info.both.word == k2->info.both.word); 153} 154 155static inline struct mtx * 156umtxq_mtx(int chain) 157{ 158 return (&umtxq_chains[chain].uc_lock); 159} 160 161static inline void 162umtxq_busy(struct umtx_key *key) 163{ 164 int chain = umtxq_hash(key); 165 166 mtx_assert(umtxq_mtx(chain), MA_OWNED); 167 while (umtxq_chains[chain].uc_flags & UCF_BUSY) { 168 umtxq_chains[chain].uc_flags |= UCF_WANT; 169 msleep(&umtxq_chains[chain], umtxq_mtx(chain), 170 curthread->td_priority, "umtxq_busy", 0); 171 } 172 umtxq_chains[chain].uc_flags |= UCF_BUSY; 173} 174 175static inline void 176umtxq_unbusy(struct umtx_key *key) 177{ 178 int chain = umtxq_hash(key); 179 180 mtx_assert(umtxq_mtx(chain), MA_OWNED); 181 KASSERT(umtxq_chains[chain].uc_flags & UCF_BUSY, "not busy"); 182 umtxq_chains[chain].uc_flags &= ~UCF_BUSY; 183 if (umtxq_chains[chain].uc_flags & UCF_WANT) { 184 umtxq_chains[chain].uc_flags &= ~UCF_WANT; 185 wakeup(&umtxq_chains[chain]); 186 } 187} 188 189static inline void 190umtxq_lock(struct umtx_key *key) 191{ 192 int chain = umtxq_hash(key); 193 mtx_lock(umtxq_mtx(chain)); 194} 195 196static inline void 197umtxq_unlock(struct umtx_key *key) 198{ 199 int chain = umtxq_hash(key); 200 mtx_unlock(umtxq_mtx(chain)); 201} 202 203/* 204 * Insert a thread onto the umtx queue. 205 */ 206static inline void 207umtxq_insert(struct umtx_q *uq) 208{ 209 struct umtx_head *head; 210 int chain = umtxq_hash(&uq->uq_key); 211 212 mtx_assert(umtxq_mtx(chain), MA_OWNED); 213 head = &umtxq_chains[chain].uc_queue; 214 LIST_INSERT_HEAD(head, uq, uq_next); 215 uq->uq_thread->td_umtxq = uq; 216 mtx_lock_spin(&sched_lock); 217 uq->uq_thread->td_flags |= TDF_UMTXQ; 218 mtx_unlock_spin(&sched_lock); 219} 220 221/* 222 * Remove thread from the umtx queue. 223 */ 224static inline void 225umtxq_remove(struct umtx_q *uq) 226{ 227 mtx_assert(umtxq_mtx(umtxq_hash(&uq->uq_key)), MA_OWNED); 228 if (uq->uq_thread->td_flags & TDF_UMTXQ) { 229 LIST_REMOVE(uq, uq_next); 230 uq->uq_thread->td_umtxq = NULL; 231 /* turning off TDF_UMTXQ should be the last thing. */ 232 mtx_lock_spin(&sched_lock); 233 uq->uq_thread->td_flags &= ~TDF_UMTXQ; 234 mtx_unlock_spin(&sched_lock); 235 } 236} 237 238static int 239umtxq_count(struct umtx_key *key) 240{ 241 struct umtx_q *uq; 242 struct umtx_head *head; 243 int chain, count = 0; 244 245 chain = umtxq_hash(key); 246 mtx_assert(umtxq_mtx(chain), MA_OWNED); 247 head = &umtxq_chains[chain].uc_queue; 248 LIST_FOREACH(uq, head, uq_next) { 249 if (umtx_key_match(&uq->uq_key, key)) { 250 if (++count > 1) 251 break; 252 } 253 } 254 return (count); 255} 256 257static int 258umtxq_signal(struct umtx_key *key, int n_wake) 259{ 260 struct umtx_q *uq, *next; 261 struct umtx_head *head; 262 struct thread *blocked = NULL; 263 int chain, ret; 264 265 ret = 0; 266 chain = umtxq_hash(key); 267 mtx_assert(umtxq_mtx(chain), MA_OWNED); 268 head = &umtxq_chains[chain].uc_queue; 269 for (uq = LIST_FIRST(head); uq; uq = next) { 270 next = LIST_NEXT(uq, uq_next); 271 if (umtx_key_match(&uq->uq_key, key)) { 272 blocked = uq->uq_thread; 273 umtxq_remove(uq); 274 wakeup(blocked); 275 if (++ret >= n_wake) 276 break; 277 } 278 } 279 return (ret); 280} 281 282static inline int 283umtxq_sleep(struct thread *td, struct umtx_key *key, int priority, 284 const char *wmesg, int timo) 285{ 286 int chain = umtxq_hash(key); 287 288 return (msleep(td, umtxq_mtx(chain), priority, wmesg, timo)); 289} 290 291static int 292umtx_key_get(struct thread *td, struct umtx *umtx, struct umtx_key *key) 293{ 294#if defined(UMTX_DYNAMIC_SHARED) || defined(UMTX_STATIC_SHARED) 295 vm_map_t map; 296 vm_map_entry_t entry; 297 vm_pindex_t pindex; 298 vm_prot_t prot; 299 boolean_t wired; 300 301 map = &td->td_proc->p_vmspace->vm_map; 302 if (vm_map_lookup(&map, (vm_offset_t)umtx, VM_PROT_WRITE, 303 &entry, &key->info.shared.object, &pindex, &prot, 304 &wired) != KERN_SUCCESS) { 305 return EFAULT; 306 } 307#endif 308 309#if defined(UMTX_DYNAMIC_SHARED) 310 key->type = UMTX_SHARED; 311 key->info.shared.offset = entry->offset + entry->start - 312 (vm_offset_t)umtx; 313 /* 314 * Add object reference, if we don't do this, a buggy application 315 * deallocates the object, the object will be reused by other 316 * applications, then unlock will wake wrong thread. 317 */ 318 vm_object_reference(key->info.shared.object); 319 vm_map_lookup_done(map, entry); 320#elif defined(UMTX_STATIC_SHARED) 321 if (VM_INHERIT_SHARE == entry->inheritance) { 322 key->type = UMTX_SHARED; 323 key->info.shared.offset = entry->offset + entry->start - 324 (vm_offset_t)umtx; 325 vm_object_reference(key->info.shared.object); 326 } else { 327 key->type = UMTX_PRIVATE; 328 key->info.private.umtx = umtx; 329 key->info.private.pid = td->td_proc->p_pid; 330 } 331 vm_map_lookup_done(map, entry); 332#else 333 key->type = UMTX_PRIVATE; 334 key->info.private.umtx = umtx; 335 key->info.private.pid = td->td_proc->p_pid; 336#endif 337 return (0); 338} 339 340static inline void 341umtx_key_release(struct umtx_key *key) 342{ 343 if (key->type == UMTX_SHARED) 344 vm_object_deallocate(key->info.shared.object); 345} 346 347static inline int 348umtxq_queue_me(struct thread *td, struct umtx *umtx, struct umtx_q *uq) 349{ 350 int error; 351 352 if ((error = umtx_key_get(td, umtx, &uq->uq_key)) != 0) 353 return (error); 354 355 uq->uq_addr = (vm_offset_t)umtx; 356 uq->uq_thread = td; 357 umtxq_lock(&uq->uq_key); 358 /* hmm, for condition variable, we don't need busy flag. */ 359 umtxq_busy(&uq->uq_key); 360 umtxq_insert(uq); 361 umtxq_unbusy(&uq->uq_key); 362 umtxq_unlock(&uq->uq_key); 363 return (0); 364} 365 366#if defined(UMTX_DYNAMIC_SHARED) 367static void 368fork_handler(void *arg, struct proc *p1, struct proc *p2, int flags) 369{ 370 vm_map_t map; 371 vm_map_entry_t entry; 372 vm_object_t object; 373 vm_pindex_t pindex; 374 vm_prot_t prot; 375 boolean_t wired; 376 struct umtx_key key; 377 LIST_HEAD(, umtx_q) workq; 378 struct umtx_q *uq; 379 struct thread *td; 380 int onq; 381 382 LIST_INIT(&workq); 383 384 /* Collect threads waiting on umtxq */ 385 PROC_LOCK(p1); 386 FOREACH_THREAD_IN_PROC(p1, td) { 387 if (td->td_flags & TDF_UMTXQ) { 388 uq = td->td_umtxq; 389 if (uq) 390 LIST_INSERT_HEAD(&workq, uq, uq_rqnext); 391 } 392 } 393 PROC_UNLOCK(p1); 394 395 LIST_FOREACH(uq, &workq, uq_rqnext) { 396 map = &p1->p_vmspace->vm_map; 397 if (vm_map_lookup(&map, uq->uq_addr, VM_PROT_WRITE, 398 &entry, &object, &pindex, &prot, &wired) != KERN_SUCCESS) { 399 continue; 400 } 401 key.type = UMTX_SHARED; 402 key.info.shared.object = object; 403 key.info.shared.offset = entry->offset + entry->start - 404 uq->uq_addr; 405 if (umtx_key_match(&key, &uq->uq_key)) { 406 vm_map_lookup_done(map, entry); 407 continue; 408 } 409 410 umtxq_lock(&uq->uq_key); 411 umtxq_busy(&uq->uq_key); 412 if (uq->uq_thread->td_flags & TDF_UMTXQ) { 413 umtxq_remove(uq); 414 onq = 1; 415 } else 416 onq = 0; 417 umtxq_unbusy(&uq->uq_key); 418 umtxq_unlock(&uq->uq_key); 419 if (onq) { 420 vm_object_deallocate(uq->uq_key.info.shared.object); 421 uq->uq_key = key; 422 umtxq_lock(&uq->uq_key); 423 umtxq_busy(&uq->uq_key); 424 umtxq_insert(uq); 425 umtxq_unbusy(&uq->uq_key); 426 umtxq_unlock(&uq->uq_key); 427 vm_object_reference(uq->uq_key.info.shared.object); 428 } 429 vm_map_lookup_done(map, entry); 430 } 431} 432#endif 433 434static int 435_do_lock(struct thread *td, struct umtx *umtx, long id, int timo) 436{ 437 struct umtx_q uq; 438 intptr_t owner; 439 intptr_t old; 440 int error = 0; 441 442 /* 443 * Care must be exercised when dealing with umtx structure. It 444 * can fault on any access. 445 */ 446 447 for (;;) { 448 /* 449 * Try the uncontested case. This should be done in userland. 450 */ 451 owner = casuptr((intptr_t *)&umtx->u_owner, 452 UMTX_UNOWNED, id); 453 454 /* The acquire succeeded. */ 455 if (owner == UMTX_UNOWNED) 456 return (0); 457 458 /* The address was invalid. */ 459 if (owner == -1) 460 return (EFAULT); 461 462 /* If no one owns it but it is contested try to acquire it. */ 463 if (owner == UMTX_CONTESTED) { 464 owner = casuptr((intptr_t *)&umtx->u_owner, 465 UMTX_CONTESTED, id | UMTX_CONTESTED); 466 467 if (owner == UMTX_CONTESTED) 468 return (0); 469 470 /* The address was invalid. */ 471 if (owner == -1) 472 return (EFAULT); 473 474 /* If this failed the lock has changed, restart. */ 475 continue; 476 } 477 478 /* 479 * If we caught a signal, we have retried and now 480 * exit immediately. 481 */ 482 if (error || (error = umtxq_queue_me(td, umtx, &uq)) != 0) 483 return (error); 484 485 /* 486 * Set the contested bit so that a release in user space 487 * knows to use the system call for unlock. If this fails 488 * either some one else has acquired the lock or it has been 489 * released. 490 */ 491 old = casuptr((intptr_t *)&umtx->u_owner, owner, 492 owner | UMTX_CONTESTED); 493 494 /* The address was invalid. */ 495 if (old == -1) { 496 umtxq_lock(&uq.uq_key); 497 umtxq_busy(&uq.uq_key); 498 umtxq_remove(&uq); 499 umtxq_unbusy(&uq.uq_key); 500 umtxq_unlock(&uq.uq_key); 501 umtx_key_release(&uq.uq_key); 502 return (EFAULT); 503 } 504 505 /* 506 * We set the contested bit, sleep. Otherwise the lock changed 507 * and we need to retry or we lost a race to the thread 508 * unlocking the umtx. 509 */ 510 umtxq_lock(&uq.uq_key); 511 if (old == owner && (td->td_flags & TDF_UMTXQ)) { 512 error = umtxq_sleep(td, &uq.uq_key, 513 td->td_priority | PCATCH, 514 "umtx", timo); 515 } 516 umtxq_busy(&uq.uq_key); 517 umtxq_remove(&uq); 518 umtxq_unbusy(&uq.uq_key); 519 umtxq_unlock(&uq.uq_key); 520 umtx_key_release(&uq.uq_key); 521 } 522 523 return (0); 524} 525 526static int 527do_lock(struct thread *td, struct umtx *umtx, long id, 528 struct timespec *abstime) 529{ 530 struct timespec ts1, ts2; 531 struct timeval tv; 532 int timo, error; 533 534 if (abstime == NULL) { 535 error = _do_lock(td, umtx, id, 0); 536 } else { 537 for (;;) { 538 ts1 = *abstime; 539 getnanotime(&ts2); 540 timespecsub(&ts1, &ts2); 541 TIMESPEC_TO_TIMEVAL(&tv, &ts1); 542 if (tv.tv_sec < 0) { 543 error = EWOULDBLOCK; 544 break; 545 } 546 timo = tvtohz(&tv); 547 error = _do_lock(td, umtx, id, timo); 548 if (error != EWOULDBLOCK) { 549 if (error == ERESTART) 550 error = EINTR; 551 break; 552 } 553 } 554 } 555 return (error); 556} 557 558static int 559do_unlock(struct thread *td, struct umtx *umtx, long id) 560{ 561 struct umtx_key key; 562 intptr_t owner; 563 intptr_t old; 564 int error; 565 int count; 566 567 /* 568 * Make sure we own this mtx. 569 * 570 * XXX Need a {fu,su}ptr this is not correct on arch where 571 * sizeof(intptr_t) != sizeof(long). 572 */ 573 if ((owner = fuword(&umtx->u_owner)) == -1) 574 return (EFAULT); 575 576 if ((owner & ~UMTX_CONTESTED) != id) 577 return (EPERM); 578 579 /* We should only ever be in here for contested locks */ 580 if ((owner & UMTX_CONTESTED) == 0) 581 return (EINVAL); 582 583 if ((error = umtx_key_get(td, umtx, &key)) != 0) 584 return (error); 585 586 umtxq_lock(&key); 587 umtxq_busy(&key); 588 count = umtxq_count(&key); 589 umtxq_unlock(&key); 590 591 /* 592 * When unlocking the umtx, it must be marked as unowned if 593 * there is zero or one thread only waiting for it. 594 * Otherwise, it must be marked as contested. 595 */ 596 old = casuptr((intptr_t *)&umtx->u_owner, owner, 597 count <= 1 ? UMTX_UNOWNED : UMTX_CONTESTED); 598 umtxq_lock(&key); 599 umtxq_signal(&key, 0); 600 umtxq_unbusy(&key); 601 umtxq_unlock(&key); 602 umtx_key_release(&key); 603 if (old == -1) 604 return (EFAULT); 605 if (old != owner) 606 return (EINVAL); 607 return (0); 608} 609 610static int 611do_unlock_and_wait(struct thread *td, struct umtx *umtx, long id, void *uaddr, 612 struct timespec *abstime) 613{ 614 struct umtx_q uq; 615 intptr_t owner; 616 intptr_t old; 617 struct timespec ts1, ts2; 618 struct timeval tv; 619 int timo, error = 0; 620 621 if (umtx == uaddr) 622 return (EINVAL); 623 624 /* 625 * Make sure we own this mtx. 626 * 627 * XXX Need a {fu,su}ptr this is not correct on arch where 628 * sizeof(intptr_t) != sizeof(long). 629 */ 630 if ((owner = fuword(&umtx->u_owner)) == -1) 631 return (EFAULT); 632 633 if ((owner & ~UMTX_CONTESTED) != id) 634 return (EPERM); 635 636 if ((error = umtxq_queue_me(td, uaddr, &uq)) != 0) 637 return (error); 638 639 old = casuptr((intptr_t *)&umtx->u_owner, id, UMTX_UNOWNED); 640 if (old == -1) { 641 umtxq_lock(&uq.uq_key); 642 umtxq_remove(&uq); 643 umtxq_unlock(&uq.uq_key); 644 umtx_key_release(&uq.uq_key); 645 return (EFAULT); 646 } 647 if (old != id) { 648 error = do_unlock(td, umtx, id); 649 if (error) { 650 umtxq_lock(&uq.uq_key); 651 umtxq_remove(&uq); 652 umtxq_unlock(&uq.uq_key); 653 umtx_key_release(&uq.uq_key); 654 return (error); 655 } 656 } 657 if (abstime == NULL) { 658 umtxq_lock(&uq.uq_key); 659 if (td->td_flags & TDF_UMTXQ) 660 error = umtxq_sleep(td, &uq.uq_key, 661 td->td_priority | PCATCH, "ucond", 0); 662 if (!(td->td_flags & TDF_UMTXQ)) 663 error = 0; 664 else 665 umtxq_remove(&uq); 666 umtxq_unlock(&uq.uq_key); 667 } else { 668 for (;;) { 669 ts1 = *abstime; 670 getnanotime(&ts2); 671 timespecsub(&ts1, &ts2); 672 TIMESPEC_TO_TIMEVAL(&tv, &ts1); 673 umtxq_lock(&uq.uq_key); 674 if (tv.tv_sec < 0) { 675 error = EWOULDBLOCK; 676 break; 677 } 678 timo = tvtohz(&tv); 679 if (td->td_flags & TDF_UMTXQ) 680 error = umtxq_sleep(td, &uq.uq_key, 681 td->td_priority | PCATCH, 682 "ucond", timo); 683 if (!td->td_flags & TDF_UMTXQ) 684 break; 685 umtxq_unlock(&uq.uq_key); 686 } 687 if (!(td->td_flags & TDF_UMTXQ)) 688 error = 0; 689 else 690 umtxq_remove(&uq); 691 umtxq_unlock(&uq.uq_key); 692 } 693 umtx_key_release(&uq.uq_key); 694 if (error == ERESTART) 695 error = EINTR; 696 return (error); 697} 698 699static int 700do_wake(struct thread *td, void *uaddr, int n_wake) 701{ 702 struct umtx_key key; 703 int ret; 704 705 if ((ret = umtx_key_get(td, uaddr, &key)) != 0) 706 return (ret); 707 ret = umtxq_signal(&key, n_wake); 708 umtx_key_release(&key); 709 td->td_retval[0] = ret; 710 return (0); 711} 712 713int 714_umtx_lock(struct thread *td, struct _umtx_lock_args *uap) 715 /* struct umtx *umtx */ 716{ 717 return _do_lock(td, uap->umtx, td->td_tid, 0); 718} 719 720int 721_umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap) 722 /* struct umtx *umtx */ 723{ 724 return do_unlock(td, uap->umtx, td->td_tid); 725} 726 727int 728_umtx_op(struct thread *td, struct _umtx_op_args *uap) 729{ 730 struct timespec abstime; 731 struct timespec *ts; 732 int error; 733 734 switch(uap->op) { 735 case UMTX_OP_LOCK: 736 /* Allow a null timespec (wait forever). */ 737 if (uap->abstime == NULL) 738 ts = NULL; 739 else { 740 error = copyin(uap->abstime, &abstime, sizeof(abstime)); 741 if (error != 0) 742 return (error); 743 if (abstime.tv_nsec >= 1000000000 || 744 abstime.tv_nsec < 0) 745 return (EINVAL); 746 ts = &abstime; 747 } 748 return do_lock(td, uap->umtx, uap->id, ts); 749 case UMTX_OP_UNLOCK: 750 return do_unlock(td, uap->umtx, uap->id); 751 case UMTX_OP_UNLOCK_AND_WAIT: 752 /* Allow a null timespec (wait forever). */ 753 if (uap->abstime == NULL) 754 ts = NULL; 755 else { 756 error = copyin(uap->abstime, &abstime, sizeof(abstime)); 757 if (error != 0) 758 return (error); 759 if (abstime.tv_nsec >= 1000000000 || 760 abstime.tv_nsec < 0) 761 return (EINVAL); 762 ts = &abstime; 763 } 764 return do_unlock_and_wait(td, uap->umtx, uap->id, 765 uap->uaddr, ts); 766 case UMTX_OP_WAKE: 767 return do_wake(td, uap->uaddr, uap->id); 768 default: 769 return (EINVAL); 770 } 771} 772