thr_mutex.c revision 172491
1/* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 165967 2007-01-12 07:26:21Z imp $ 30 */ 31#include <stdlib.h> 32#include <errno.h> 33#include <string.h> 34#include <sys/param.h> 35#include <sys/queue.h> 36#include <pthread.h> 37#include "thr_private.h" 38 39#if defined(_PTHREADS_INVARIANTS) 40#define MUTEX_INIT_LINK(m) do { \ 41 (m)->m_qe.tqe_prev = NULL; \ 42 (m)->m_qe.tqe_next = NULL; \ 43} while (0) 44#define MUTEX_ASSERT_IS_OWNED(m) do { \ 45 if ((m)->m_qe.tqe_prev == NULL) \ 46 PANIC("mutex is not on list"); \ 47} while (0) 48#define MUTEX_ASSERT_NOT_OWNED(m) do { \ 49 if (((m)->m_qe.tqe_prev != NULL) || \ 50 ((m)->m_qe.tqe_next != NULL)) \ 51 PANIC("mutex is on list"); \ 52} while (0) 53#define THR_ASSERT_NOT_IN_SYNCQ(thr) do { \ 54 THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \ 55 "thread in syncq when it shouldn't be."); \ 56} while (0); 57#else 58#define MUTEX_INIT_LINK(m) 59#define MUTEX_ASSERT_IS_OWNED(m) 60#define MUTEX_ASSERT_NOT_OWNED(m) 61#define THR_ASSERT_NOT_IN_SYNCQ(thr) 62#endif 63 64#define THR_IN_MUTEXQ(thr) (((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0) 65#define MUTEX_DESTROY(m) do { \ 66 _lock_destroy(&(m)->m_lock); \ 67 free(m); \ 68} while (0) 69 70 71/* 72 * Prototypes 73 */ 74static struct kse_mailbox *mutex_handoff(struct pthread *, 75 struct pthread_mutex *); 76static inline int mutex_self_trylock(struct pthread *, pthread_mutex_t); 77static inline int mutex_self_lock(struct pthread *, pthread_mutex_t); 78static int mutex_unlock_common(pthread_mutex_t *, int); 79static void mutex_priority_adjust(struct pthread *, pthread_mutex_t); 80static void mutex_rescan_owned (struct pthread *, struct pthread *, 81 struct pthread_mutex *); 82static inline pthread_t mutex_queue_deq(pthread_mutex_t); 83static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); 84static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); 85static void mutex_lock_backout(void *arg); 86 87static struct pthread_mutex_attr static_mutex_attr = 88 PTHREAD_MUTEXATTR_STATIC_INITIALIZER; 89static pthread_mutexattr_t static_mattr = &static_mutex_attr; 90 91LT10_COMPAT_PRIVATE(__pthread_mutex_init); 92LT10_COMPAT_PRIVATE(_pthread_mutex_init); 93LT10_COMPAT_DEFAULT(pthread_mutex_init); 94LT10_COMPAT_PRIVATE(__pthread_mutex_lock); 95LT10_COMPAT_PRIVATE(_pthread_mutex_lock); 96LT10_COMPAT_DEFAULT(pthread_mutex_lock); 97LT10_COMPAT_PRIVATE(__pthread_mutex_timedlock); 98LT10_COMPAT_PRIVATE(_pthread_mutex_timedlock); 99LT10_COMPAT_DEFAULT(pthread_mutex_timedlock); 100LT10_COMPAT_PRIVATE(__pthread_mutex_trylock); 101LT10_COMPAT_PRIVATE(_pthread_mutex_trylock); 102LT10_COMPAT_DEFAULT(pthread_mutex_trylock); 103LT10_COMPAT_PRIVATE(_pthread_mutex_destroy); 104LT10_COMPAT_DEFAULT(pthread_mutex_destroy); 105LT10_COMPAT_PRIVATE(_pthread_mutex_unlock); 106LT10_COMPAT_DEFAULT(pthread_mutex_unlock); 107 108/* Single underscore versions provided for libc internal usage: */ 109__weak_reference(__pthread_mutex_init, pthread_mutex_init); 110__weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 111__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 112__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 113 114/* No difference between libc and application usage of these: */ 115__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 116__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 117 118 119 120int 121__pthread_mutex_init(pthread_mutex_t *mutex, 122 const pthread_mutexattr_t *mutex_attr) 123{ 124 struct pthread_mutex *pmutex; 125 enum pthread_mutextype type; 126 int protocol; 127 int ceiling; 128 int flags; 129 int ret = 0; 130 131 if (mutex == NULL) 132 ret = EINVAL; 133 134 /* Check if default mutex attributes: */ 135 else if (mutex_attr == NULL || *mutex_attr == NULL) { 136 /* Default to a (error checking) POSIX mutex: */ 137 type = PTHREAD_MUTEX_ERRORCHECK; 138 protocol = PTHREAD_PRIO_NONE; 139 ceiling = THR_MAX_PRIORITY; 140 flags = 0; 141 } 142 143 /* Check mutex type: */ 144 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || 145 ((*mutex_attr)->m_type >= PTHREAD_MUTEX_TYPE_MAX)) 146 /* Return an invalid argument error: */ 147 ret = EINVAL; 148 149 /* Check mutex protocol: */ 150 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || 151 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) 152 /* Return an invalid argument error: */ 153 ret = EINVAL; 154 155 else { 156 /* Use the requested mutex type and protocol: */ 157 type = (*mutex_attr)->m_type; 158 protocol = (*mutex_attr)->m_protocol; 159 ceiling = (*mutex_attr)->m_ceiling; 160 flags = (*mutex_attr)->m_flags; 161 } 162 163 /* Check no errors so far: */ 164 if (ret == 0) { 165 if ((pmutex = (pthread_mutex_t) 166 malloc(sizeof(struct pthread_mutex))) == NULL) 167 ret = ENOMEM; 168 else if (_lock_init(&pmutex->m_lock, LCK_ADAPTIVE, 169 _thr_lock_wait, _thr_lock_wakeup) != 0) { 170 free(pmutex); 171 *mutex = NULL; 172 ret = ENOMEM; 173 } else { 174 /* Set the mutex flags: */ 175 pmutex->m_flags = flags; 176 177 /* Process according to mutex type: */ 178 switch (type) { 179 /* case PTHREAD_MUTEX_DEFAULT: */ 180 case PTHREAD_MUTEX_ERRORCHECK: 181 case PTHREAD_MUTEX_NORMAL: 182 /* Nothing to do here. */ 183 break; 184 185 /* Single UNIX Spec 2 recursive mutex: */ 186 case PTHREAD_MUTEX_RECURSIVE: 187 /* Reset the mutex count: */ 188 pmutex->m_count = 0; 189 break; 190 191 /* Trap invalid mutex types: */ 192 default: 193 /* Return an invalid argument error: */ 194 ret = EINVAL; 195 break; 196 } 197 if (ret == 0) { 198 /* Initialise the rest of the mutex: */ 199 TAILQ_INIT(&pmutex->m_queue); 200 pmutex->m_flags |= MUTEX_FLAGS_INITED; 201 pmutex->m_owner = NULL; 202 pmutex->m_type = type; 203 pmutex->m_protocol = protocol; 204 pmutex->m_refcount = 0; 205 if (protocol == PTHREAD_PRIO_PROTECT) 206 pmutex->m_prio = ceiling; 207 else 208 pmutex->m_prio = -1; 209 pmutex->m_saved_prio = 0; 210 MUTEX_INIT_LINK(pmutex); 211 *mutex = pmutex; 212 } else { 213 /* Free the mutex lock structure: */ 214 MUTEX_DESTROY(pmutex); 215 *mutex = NULL; 216 } 217 } 218 } 219 /* Return the completion status: */ 220 return (ret); 221} 222 223int 224_pthread_mutex_init(pthread_mutex_t *mutex, 225 const pthread_mutexattr_t *mutex_attr) 226{ 227 struct pthread_mutex_attr mattr, *mattrp; 228 229 if ((mutex_attr == NULL) || (*mutex_attr == NULL)) 230 return (__pthread_mutex_init(mutex, &static_mattr)); 231 else { 232 mattr = **mutex_attr; 233 mattr.m_flags |= MUTEX_FLAGS_PRIVATE; 234 mattrp = &mattr; 235 return (__pthread_mutex_init(mutex, &mattrp)); 236 } 237} 238 239void 240_thr_mutex_reinit(pthread_mutex_t *mutex) 241{ 242 _lock_reinit(&(*mutex)->m_lock, LCK_ADAPTIVE, 243 _thr_lock_wait, _thr_lock_wakeup); 244 TAILQ_INIT(&(*mutex)->m_queue); 245 (*mutex)->m_owner = NULL; 246 (*mutex)->m_count = 0; 247 (*mutex)->m_refcount = 0; 248 (*mutex)->m_prio = 0; 249 (*mutex)->m_saved_prio = 0; 250} 251 252int 253_pthread_mutex_destroy(pthread_mutex_t *mutex) 254{ 255 struct pthread *curthread = _get_curthread(); 256 pthread_mutex_t m; 257 int ret = 0; 258 259 if (mutex == NULL || *mutex == NULL) 260 ret = EINVAL; 261 else { 262 /* Lock the mutex structure: */ 263 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock); 264 265 /* 266 * Check to see if this mutex is in use: 267 */ 268 if (((*mutex)->m_owner != NULL) || 269 (!TAILQ_EMPTY(&(*mutex)->m_queue)) || 270 ((*mutex)->m_refcount != 0)) { 271 ret = EBUSY; 272 273 /* Unlock the mutex structure: */ 274 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock); 275 } else { 276 /* 277 * Save a pointer to the mutex so it can be free'd 278 * and set the caller's pointer to NULL: 279 */ 280 m = *mutex; 281 *mutex = NULL; 282 283 /* Unlock the mutex structure: */ 284 THR_LOCK_RELEASE(curthread, &m->m_lock); 285 286 /* 287 * Free the memory allocated for the mutex 288 * structure: 289 */ 290 MUTEX_ASSERT_NOT_OWNED(m); 291 MUTEX_DESTROY(m); 292 } 293 } 294 295 /* Return the completion status: */ 296 return (ret); 297} 298 299static int 300init_static(struct pthread *thread, pthread_mutex_t *mutex) 301{ 302 int ret; 303 304 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 305 306 if (*mutex == NULL) 307 ret = pthread_mutex_init(mutex, NULL); 308 else 309 ret = 0; 310 311 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 312 313 return (ret); 314} 315 316static int 317init_static_private(struct pthread *thread, pthread_mutex_t *mutex) 318{ 319 int ret; 320 321 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 322 323 if (*mutex == NULL) 324 ret = pthread_mutex_init(mutex, &static_mattr); 325 else 326 ret = 0; 327 328 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 329 330 return (ret); 331} 332 333static int 334mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex) 335{ 336 int private; 337 int ret = 0; 338 339 THR_ASSERT((mutex != NULL) && (*mutex != NULL), 340 "Uninitialized mutex in pthread_mutex_trylock_basic"); 341 342 /* Lock the mutex structure: */ 343 THR_LOCK_ACQUIRE(curthread, &(*mutex)->m_lock); 344 private = (*mutex)->m_flags & MUTEX_FLAGS_PRIVATE; 345 346 /* 347 * If the mutex was statically allocated, properly 348 * initialize the tail queue. 349 */ 350 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 351 TAILQ_INIT(&(*mutex)->m_queue); 352 MUTEX_INIT_LINK(*mutex); 353 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 354 } 355 356 /* Process according to mutex type: */ 357 switch ((*mutex)->m_protocol) { 358 /* Default POSIX mutex: */ 359 case PTHREAD_PRIO_NONE: 360 /* Check if this mutex is not locked: */ 361 if ((*mutex)->m_owner == NULL) { 362 /* Lock the mutex for the running thread: */ 363 (*mutex)->m_owner = curthread; 364 365 /* Add to the list of owned mutexes: */ 366 MUTEX_ASSERT_NOT_OWNED(*mutex); 367 TAILQ_INSERT_TAIL(&curthread->mutexq, 368 (*mutex), m_qe); 369 } else if ((*mutex)->m_owner == curthread) 370 ret = mutex_self_trylock(curthread, *mutex); 371 else 372 /* Return a busy error: */ 373 ret = EBUSY; 374 break; 375 376 /* POSIX priority inheritence mutex: */ 377 case PTHREAD_PRIO_INHERIT: 378 /* Check if this mutex is not locked: */ 379 if ((*mutex)->m_owner == NULL) { 380 /* Lock the mutex for the running thread: */ 381 (*mutex)->m_owner = curthread; 382 383 THR_SCHED_LOCK(curthread, curthread); 384 /* Track number of priority mutexes owned: */ 385 curthread->priority_mutex_count++; 386 387 /* 388 * The mutex takes on the attributes of the 389 * running thread when there are no waiters. 390 */ 391 (*mutex)->m_prio = curthread->active_priority; 392 (*mutex)->m_saved_prio = 393 curthread->inherited_priority; 394 curthread->inherited_priority = (*mutex)->m_prio; 395 THR_SCHED_UNLOCK(curthread, curthread); 396 397 /* Add to the list of owned mutexes: */ 398 MUTEX_ASSERT_NOT_OWNED(*mutex); 399 TAILQ_INSERT_TAIL(&curthread->mutexq, 400 (*mutex), m_qe); 401 } else if ((*mutex)->m_owner == curthread) 402 ret = mutex_self_trylock(curthread, *mutex); 403 else 404 /* Return a busy error: */ 405 ret = EBUSY; 406 break; 407 408 /* POSIX priority protection mutex: */ 409 case PTHREAD_PRIO_PROTECT: 410 /* Check for a priority ceiling violation: */ 411 if (curthread->active_priority > (*mutex)->m_prio) 412 ret = EINVAL; 413 414 /* Check if this mutex is not locked: */ 415 else if ((*mutex)->m_owner == NULL) { 416 /* Lock the mutex for the running thread: */ 417 (*mutex)->m_owner = curthread; 418 419 THR_SCHED_LOCK(curthread, curthread); 420 /* Track number of priority mutexes owned: */ 421 curthread->priority_mutex_count++; 422 423 /* 424 * The running thread inherits the ceiling 425 * priority of the mutex and executes at that 426 * priority. 427 */ 428 curthread->active_priority = (*mutex)->m_prio; 429 (*mutex)->m_saved_prio = 430 curthread->inherited_priority; 431 curthread->inherited_priority = 432 (*mutex)->m_prio; 433 THR_SCHED_UNLOCK(curthread, curthread); 434 /* Add to the list of owned mutexes: */ 435 MUTEX_ASSERT_NOT_OWNED(*mutex); 436 TAILQ_INSERT_TAIL(&curthread->mutexq, 437 (*mutex), m_qe); 438 } else if ((*mutex)->m_owner == curthread) 439 ret = mutex_self_trylock(curthread, *mutex); 440 else 441 /* Return a busy error: */ 442 ret = EBUSY; 443 break; 444 445 /* Trap invalid mutex types: */ 446 default: 447 /* Return an invalid argument error: */ 448 ret = EINVAL; 449 break; 450 } 451 452 if (ret == 0 && private) 453 THR_CRITICAL_ENTER(curthread); 454 455 /* Unlock the mutex structure: */ 456 THR_LOCK_RELEASE(curthread, &(*mutex)->m_lock); 457 458 /* Return the completion status: */ 459 return (ret); 460} 461 462int 463__pthread_mutex_trylock(pthread_mutex_t *mutex) 464{ 465 struct pthread *curthread = _get_curthread(); 466 int ret = 0; 467 468 if (mutex == NULL) 469 ret = EINVAL; 470 471 /* 472 * If the mutex is statically initialized, perform the dynamic 473 * initialization: 474 */ 475 else if ((*mutex != NULL) || 476 ((ret = init_static(curthread, mutex)) == 0)) 477 ret = mutex_trylock_common(curthread, mutex); 478 479 return (ret); 480} 481 482int 483_pthread_mutex_trylock(pthread_mutex_t *mutex) 484{ 485 struct pthread *curthread = _get_curthread(); 486 int ret = 0; 487 488 if (mutex == NULL) 489 ret = EINVAL; 490 491 /* 492 * If the mutex is statically initialized, perform the dynamic 493 * initialization marking the mutex private (delete safe): 494 */ 495 else if ((*mutex != NULL) || 496 ((ret = init_static_private(curthread, mutex)) == 0)) 497 ret = mutex_trylock_common(curthread, mutex); 498 499 return (ret); 500} 501 502static int 503mutex_lock_common(struct pthread *curthread, pthread_mutex_t *m, 504 const struct timespec * abstime) 505{ 506 int private; 507 int ret = 0; 508 509 THR_ASSERT((m != NULL) && (*m != NULL), 510 "Uninitialized mutex in pthread_mutex_trylock_basic"); 511 512 if (abstime != NULL && (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 513 abstime->tv_nsec >= 1000000000)) 514 return (EINVAL); 515 516 /* Reset the interrupted flag: */ 517 curthread->interrupted = 0; 518 curthread->timeout = 0; 519 curthread->wakeup_time.tv_sec = -1; 520 521 private = (*m)->m_flags & MUTEX_FLAGS_PRIVATE; 522 523 /* 524 * Enter a loop waiting to become the mutex owner. We need a 525 * loop in case the waiting thread is interrupted by a signal 526 * to execute a signal handler. It is not (currently) possible 527 * to remain in the waiting queue while running a handler. 528 * Instead, the thread is interrupted and backed out of the 529 * waiting queue prior to executing the signal handler. 530 */ 531 do { 532 /* Lock the mutex structure: */ 533 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 534 535 /* 536 * If the mutex was statically allocated, properly 537 * initialize the tail queue. 538 */ 539 if (((*m)->m_flags & MUTEX_FLAGS_INITED) == 0) { 540 TAILQ_INIT(&(*m)->m_queue); 541 (*m)->m_flags |= MUTEX_FLAGS_INITED; 542 MUTEX_INIT_LINK(*m); 543 } 544 545 /* Process according to mutex type: */ 546 switch ((*m)->m_protocol) { 547 /* Default POSIX mutex: */ 548 case PTHREAD_PRIO_NONE: 549 if ((*m)->m_owner == NULL) { 550 /* Lock the mutex for this thread: */ 551 (*m)->m_owner = curthread; 552 553 /* Add to the list of owned mutexes: */ 554 MUTEX_ASSERT_NOT_OWNED(*m); 555 TAILQ_INSERT_TAIL(&curthread->mutexq, 556 (*m), m_qe); 557 if (private) 558 THR_CRITICAL_ENTER(curthread); 559 560 /* Unlock the mutex structure: */ 561 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 562 } else if ((*m)->m_owner == curthread) { 563 ret = mutex_self_lock(curthread, *m); 564 565 /* Unlock the mutex structure: */ 566 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 567 } else { 568 /* 569 * Join the queue of threads waiting to lock 570 * the mutex and save a pointer to the mutex. 571 */ 572 mutex_queue_enq(*m, curthread); 573 curthread->data.mutex = *m; 574 curthread->sigbackout = mutex_lock_backout; 575 /* 576 * This thread is active and is in a critical 577 * region (holding the mutex lock); we should 578 * be able to safely set the state. 579 */ 580 THR_SCHED_LOCK(curthread, curthread); 581 /* Set the wakeup time: */ 582 if (abstime) { 583 curthread->wakeup_time.tv_sec = 584 abstime->tv_sec; 585 curthread->wakeup_time.tv_nsec = 586 abstime->tv_nsec; 587 } 588 589 THR_SET_STATE(curthread, PS_MUTEX_WAIT); 590 THR_SCHED_UNLOCK(curthread, curthread); 591 592 /* Unlock the mutex structure: */ 593 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 594 595 /* Schedule the next thread: */ 596 _thr_sched_switch(curthread); 597 598 if (THR_IN_MUTEXQ(curthread)) { 599 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 600 mutex_queue_remove(*m, curthread); 601 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 602 } 603 /* 604 * Only clear these after assuring the 605 * thread is dequeued. 606 */ 607 curthread->data.mutex = NULL; 608 curthread->sigbackout = NULL; 609 } 610 break; 611 612 /* POSIX priority inheritence mutex: */ 613 case PTHREAD_PRIO_INHERIT: 614 /* Check if this mutex is not locked: */ 615 if ((*m)->m_owner == NULL) { 616 /* Lock the mutex for this thread: */ 617 (*m)->m_owner = curthread; 618 619 THR_SCHED_LOCK(curthread, curthread); 620 /* Track number of priority mutexes owned: */ 621 curthread->priority_mutex_count++; 622 623 /* 624 * The mutex takes on attributes of the 625 * running thread when there are no waiters. 626 * Make sure the thread's scheduling lock is 627 * held while priorities are adjusted. 628 */ 629 (*m)->m_prio = curthread->active_priority; 630 (*m)->m_saved_prio = 631 curthread->inherited_priority; 632 curthread->inherited_priority = (*m)->m_prio; 633 THR_SCHED_UNLOCK(curthread, curthread); 634 635 /* Add to the list of owned mutexes: */ 636 MUTEX_ASSERT_NOT_OWNED(*m); 637 TAILQ_INSERT_TAIL(&curthread->mutexq, 638 (*m), m_qe); 639 if (private) 640 THR_CRITICAL_ENTER(curthread); 641 642 /* Unlock the mutex structure: */ 643 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 644 } else if ((*m)->m_owner == curthread) { 645 ret = mutex_self_lock(curthread, *m); 646 647 /* Unlock the mutex structure: */ 648 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 649 } else { 650 /* 651 * Join the queue of threads waiting to lock 652 * the mutex and save a pointer to the mutex. 653 */ 654 mutex_queue_enq(*m, curthread); 655 curthread->data.mutex = *m; 656 curthread->sigbackout = mutex_lock_backout; 657 658 /* 659 * This thread is active and is in a critical 660 * region (holding the mutex lock); we should 661 * be able to safely set the state. 662 */ 663 if (curthread->active_priority > (*m)->m_prio) 664 /* Adjust priorities: */ 665 mutex_priority_adjust(curthread, *m); 666 667 THR_SCHED_LOCK(curthread, curthread); 668 /* Set the wakeup time: */ 669 if (abstime) { 670 curthread->wakeup_time.tv_sec = 671 abstime->tv_sec; 672 curthread->wakeup_time.tv_nsec = 673 abstime->tv_nsec; 674 } 675 THR_SET_STATE(curthread, PS_MUTEX_WAIT); 676 THR_SCHED_UNLOCK(curthread, curthread); 677 678 /* Unlock the mutex structure: */ 679 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 680 681 /* Schedule the next thread: */ 682 _thr_sched_switch(curthread); 683 684 if (THR_IN_MUTEXQ(curthread)) { 685 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 686 mutex_queue_remove(*m, curthread); 687 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 688 } 689 /* 690 * Only clear these after assuring the 691 * thread is dequeued. 692 */ 693 curthread->data.mutex = NULL; 694 curthread->sigbackout = NULL; 695 } 696 break; 697 698 /* POSIX priority protection mutex: */ 699 case PTHREAD_PRIO_PROTECT: 700 /* Check for a priority ceiling violation: */ 701 if (curthread->active_priority > (*m)->m_prio) { 702 /* Unlock the mutex structure: */ 703 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 704 ret = EINVAL; 705 } 706 /* Check if this mutex is not locked: */ 707 else if ((*m)->m_owner == NULL) { 708 /* 709 * Lock the mutex for the running 710 * thread: 711 */ 712 (*m)->m_owner = curthread; 713 714 THR_SCHED_LOCK(curthread, curthread); 715 /* Track number of priority mutexes owned: */ 716 curthread->priority_mutex_count++; 717 718 /* 719 * The running thread inherits the ceiling 720 * priority of the mutex and executes at that 721 * priority. Make sure the thread's 722 * scheduling lock is held while priorities 723 * are adjusted. 724 */ 725 curthread->active_priority = (*m)->m_prio; 726 (*m)->m_saved_prio = 727 curthread->inherited_priority; 728 curthread->inherited_priority = (*m)->m_prio; 729 THR_SCHED_UNLOCK(curthread, curthread); 730 731 /* Add to the list of owned mutexes: */ 732 MUTEX_ASSERT_NOT_OWNED(*m); 733 TAILQ_INSERT_TAIL(&curthread->mutexq, 734 (*m), m_qe); 735 if (private) 736 THR_CRITICAL_ENTER(curthread); 737 738 /* Unlock the mutex structure: */ 739 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 740 } else if ((*m)->m_owner == curthread) { 741 ret = mutex_self_lock(curthread, *m); 742 743 /* Unlock the mutex structure: */ 744 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 745 } else { 746 /* 747 * Join the queue of threads waiting to lock 748 * the mutex and save a pointer to the mutex. 749 */ 750 mutex_queue_enq(*m, curthread); 751 curthread->data.mutex = *m; 752 curthread->sigbackout = mutex_lock_backout; 753 754 /* Clear any previous error: */ 755 curthread->error = 0; 756 757 /* 758 * This thread is active and is in a critical 759 * region (holding the mutex lock); we should 760 * be able to safely set the state. 761 */ 762 763 THR_SCHED_LOCK(curthread, curthread); 764 /* Set the wakeup time: */ 765 if (abstime) { 766 curthread->wakeup_time.tv_sec = 767 abstime->tv_sec; 768 curthread->wakeup_time.tv_nsec = 769 abstime->tv_nsec; 770 } 771 THR_SET_STATE(curthread, PS_MUTEX_WAIT); 772 THR_SCHED_UNLOCK(curthread, curthread); 773 774 /* Unlock the mutex structure: */ 775 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 776 777 /* Schedule the next thread: */ 778 _thr_sched_switch(curthread); 779 780 if (THR_IN_MUTEXQ(curthread)) { 781 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 782 mutex_queue_remove(*m, curthread); 783 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 784 } 785 /* 786 * Only clear these after assuring the 787 * thread is dequeued. 788 */ 789 curthread->data.mutex = NULL; 790 curthread->sigbackout = NULL; 791 792 /* 793 * The threads priority may have changed while 794 * waiting for the mutex causing a ceiling 795 * violation. 796 */ 797 ret = curthread->error; 798 curthread->error = 0; 799 } 800 break; 801 802 /* Trap invalid mutex types: */ 803 default: 804 /* Unlock the mutex structure: */ 805 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 806 807 /* Return an invalid argument error: */ 808 ret = EINVAL; 809 break; 810 } 811 812 } while (((*m)->m_owner != curthread) && (ret == 0) && 813 (curthread->interrupted == 0) && (curthread->timeout == 0)); 814 815 if (ret == 0 && (*m)->m_owner != curthread && curthread->timeout) 816 ret = ETIMEDOUT; 817 818 /* 819 * Check to see if this thread was interrupted and 820 * is still in the mutex queue of waiting threads: 821 */ 822 if (curthread->interrupted != 0) { 823 /* Remove this thread from the mutex queue. */ 824 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 825 if (THR_IN_SYNCQ(curthread)) 826 mutex_queue_remove(*m, curthread); 827 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 828 829 /* Check for asynchronous cancellation. */ 830 if (curthread->continuation != NULL) 831 curthread->continuation((void *) curthread); 832 } 833 834 /* Return the completion status: */ 835 return (ret); 836} 837 838int 839__pthread_mutex_lock(pthread_mutex_t *m) 840{ 841 struct pthread *curthread; 842 int ret = 0; 843 844 if (_thr_initial == NULL) 845 _libpthread_init(NULL); 846 847 curthread = _get_curthread(); 848 if (m == NULL) 849 ret = EINVAL; 850 851 /* 852 * If the mutex is statically initialized, perform the dynamic 853 * initialization: 854 */ 855 else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0)) 856 ret = mutex_lock_common(curthread, m, NULL); 857 858 return (ret); 859} 860 861__strong_reference(__pthread_mutex_lock, _thr_mutex_lock); 862 863int 864_pthread_mutex_lock(pthread_mutex_t *m) 865{ 866 struct pthread *curthread; 867 int ret = 0; 868 869 if (_thr_initial == NULL) 870 _libpthread_init(NULL); 871 curthread = _get_curthread(); 872 873 if (m == NULL) 874 ret = EINVAL; 875 876 /* 877 * If the mutex is statically initialized, perform the dynamic 878 * initialization marking it private (delete safe): 879 */ 880 else if ((*m != NULL) || 881 ((ret = init_static_private(curthread, m)) == 0)) 882 ret = mutex_lock_common(curthread, m, NULL); 883 884 return (ret); 885} 886 887int 888__pthread_mutex_timedlock(pthread_mutex_t *m, 889 const struct timespec *abs_timeout) 890{ 891 struct pthread *curthread; 892 int ret = 0; 893 894 if (_thr_initial == NULL) 895 _libpthread_init(NULL); 896 897 curthread = _get_curthread(); 898 if (m == NULL) 899 ret = EINVAL; 900 901 /* 902 * If the mutex is statically initialized, perform the dynamic 903 * initialization: 904 */ 905 else if ((*m != NULL) || ((ret = init_static(curthread, m)) == 0)) 906 ret = mutex_lock_common(curthread, m, abs_timeout); 907 908 return (ret); 909} 910 911int 912_pthread_mutex_timedlock(pthread_mutex_t *m, 913 const struct timespec *abs_timeout) 914{ 915 struct pthread *curthread; 916 int ret = 0; 917 918 if (_thr_initial == NULL) 919 _libpthread_init(NULL); 920 curthread = _get_curthread(); 921 922 if (m == NULL) 923 ret = EINVAL; 924 925 /* 926 * If the mutex is statically initialized, perform the dynamic 927 * initialization marking it private (delete safe): 928 */ 929 else if ((*m != NULL) || 930 ((ret = init_static_private(curthread, m)) == 0)) 931 ret = mutex_lock_common(curthread, m, abs_timeout); 932 933 return (ret); 934} 935 936int 937_pthread_mutex_unlock(pthread_mutex_t *m) 938{ 939 return (mutex_unlock_common(m, /* add reference */ 0)); 940} 941 942__strong_reference(_pthread_mutex_unlock, _thr_mutex_unlock); 943 944int 945_mutex_cv_unlock(pthread_mutex_t *m) 946{ 947 return (mutex_unlock_common(m, /* add reference */ 1)); 948} 949 950int 951_mutex_cv_lock(pthread_mutex_t *m) 952{ 953 struct pthread *curthread; 954 int ret; 955 956 curthread = _get_curthread(); 957 if ((ret = _pthread_mutex_lock(m)) == 0) { 958 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 959 (*m)->m_refcount--; 960 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 961 } 962 return (ret); 963} 964 965static inline int 966mutex_self_trylock(struct pthread *curthread, pthread_mutex_t m) 967{ 968 int ret = 0; 969 970 switch (m->m_type) { 971 /* case PTHREAD_MUTEX_DEFAULT: */ 972 case PTHREAD_MUTEX_ERRORCHECK: 973 case PTHREAD_MUTEX_NORMAL: 974 ret = EBUSY; 975 break; 976 977 case PTHREAD_MUTEX_RECURSIVE: 978 /* Increment the lock count: */ 979 m->m_count++; 980 break; 981 982 default: 983 /* Trap invalid mutex types; */ 984 ret = EINVAL; 985 } 986 987 return (ret); 988} 989 990static inline int 991mutex_self_lock(struct pthread *curthread, pthread_mutex_t m) 992{ 993 int ret = 0; 994 995 /* 996 * Don't allow evil recursive mutexes for private use 997 * in libc and libpthread. 998 */ 999 if (m->m_flags & MUTEX_FLAGS_PRIVATE) 1000 PANIC("Recurse on a private mutex."); 1001 1002 switch (m->m_type) { 1003 /* case PTHREAD_MUTEX_DEFAULT: */ 1004 case PTHREAD_MUTEX_ERRORCHECK: 1005 /* 1006 * POSIX specifies that mutexes should return EDEADLK if a 1007 * recursive lock is detected. 1008 */ 1009 ret = EDEADLK; 1010 break; 1011 1012 case PTHREAD_MUTEX_NORMAL: 1013 /* 1014 * What SS2 define as a 'normal' mutex. Intentionally 1015 * deadlock on attempts to get a lock you already own. 1016 */ 1017 1018 THR_SCHED_LOCK(curthread, curthread); 1019 THR_SET_STATE(curthread, PS_DEADLOCK); 1020 THR_SCHED_UNLOCK(curthread, curthread); 1021 1022 /* Unlock the mutex structure: */ 1023 THR_LOCK_RELEASE(curthread, &m->m_lock); 1024 1025 /* Schedule the next thread: */ 1026 _thr_sched_switch(curthread); 1027 break; 1028 1029 case PTHREAD_MUTEX_RECURSIVE: 1030 /* Increment the lock count: */ 1031 m->m_count++; 1032 break; 1033 1034 default: 1035 /* Trap invalid mutex types; */ 1036 ret = EINVAL; 1037 } 1038 1039 return (ret); 1040} 1041 1042static int 1043mutex_unlock_common(pthread_mutex_t *m, int add_reference) 1044{ 1045 struct pthread *curthread = _get_curthread(); 1046 struct kse_mailbox *kmbx = NULL; 1047 int ret = 0; 1048 1049 if (m == NULL || *m == NULL) 1050 ret = EINVAL; 1051 else { 1052 /* Lock the mutex structure: */ 1053 THR_LOCK_ACQUIRE(curthread, &(*m)->m_lock); 1054 1055 /* Process according to mutex type: */ 1056 switch ((*m)->m_protocol) { 1057 /* Default POSIX mutex: */ 1058 case PTHREAD_PRIO_NONE: 1059 /* 1060 * Check if the running thread is not the owner of the 1061 * mutex: 1062 */ 1063 if ((*m)->m_owner != curthread) 1064 ret = EPERM; 1065 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) && 1066 ((*m)->m_count > 0)) 1067 /* Decrement the count: */ 1068 (*m)->m_count--; 1069 else { 1070 /* 1071 * Clear the count in case this is a recursive 1072 * mutex. 1073 */ 1074 (*m)->m_count = 0; 1075 1076 /* Remove the mutex from the threads queue. */ 1077 MUTEX_ASSERT_IS_OWNED(*m); 1078 TAILQ_REMOVE(&(*m)->m_owner->mutexq, 1079 (*m), m_qe); 1080 MUTEX_INIT_LINK(*m); 1081 1082 /* 1083 * Hand off the mutex to the next waiting 1084 * thread: 1085 */ 1086 kmbx = mutex_handoff(curthread, *m); 1087 } 1088 break; 1089 1090 /* POSIX priority inheritence mutex: */ 1091 case PTHREAD_PRIO_INHERIT: 1092 /* 1093 * Check if the running thread is not the owner of the 1094 * mutex: 1095 */ 1096 if ((*m)->m_owner != curthread) 1097 ret = EPERM; 1098 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) && 1099 ((*m)->m_count > 0)) 1100 /* Decrement the count: */ 1101 (*m)->m_count--; 1102 else { 1103 /* 1104 * Clear the count in case this is recursive 1105 * mutex. 1106 */ 1107 (*m)->m_count = 0; 1108 1109 /* 1110 * Restore the threads inherited priority and 1111 * recompute the active priority (being careful 1112 * not to override changes in the threads base 1113 * priority subsequent to locking the mutex). 1114 */ 1115 THR_SCHED_LOCK(curthread, curthread); 1116 curthread->inherited_priority = 1117 (*m)->m_saved_prio; 1118 curthread->active_priority = 1119 MAX(curthread->inherited_priority, 1120 curthread->base_priority); 1121 1122 /* 1123 * This thread now owns one less priority mutex. 1124 */ 1125 curthread->priority_mutex_count--; 1126 THR_SCHED_UNLOCK(curthread, curthread); 1127 1128 /* Remove the mutex from the threads queue. */ 1129 MUTEX_ASSERT_IS_OWNED(*m); 1130 TAILQ_REMOVE(&(*m)->m_owner->mutexq, 1131 (*m), m_qe); 1132 MUTEX_INIT_LINK(*m); 1133 1134 /* 1135 * Hand off the mutex to the next waiting 1136 * thread: 1137 */ 1138 kmbx = mutex_handoff(curthread, *m); 1139 } 1140 break; 1141 1142 /* POSIX priority ceiling mutex: */ 1143 case PTHREAD_PRIO_PROTECT: 1144 /* 1145 * Check if the running thread is not the owner of the 1146 * mutex: 1147 */ 1148 if ((*m)->m_owner != curthread) 1149 ret = EPERM; 1150 else if (((*m)->m_type == PTHREAD_MUTEX_RECURSIVE) && 1151 ((*m)->m_count > 0)) 1152 /* Decrement the count: */ 1153 (*m)->m_count--; 1154 else { 1155 /* 1156 * Clear the count in case this is a recursive 1157 * mutex. 1158 */ 1159 (*m)->m_count = 0; 1160 1161 /* 1162 * Restore the threads inherited priority and 1163 * recompute the active priority (being careful 1164 * not to override changes in the threads base 1165 * priority subsequent to locking the mutex). 1166 */ 1167 THR_SCHED_LOCK(curthread, curthread); 1168 curthread->inherited_priority = 1169 (*m)->m_saved_prio; 1170 curthread->active_priority = 1171 MAX(curthread->inherited_priority, 1172 curthread->base_priority); 1173 1174 /* 1175 * This thread now owns one less priority mutex. 1176 */ 1177 curthread->priority_mutex_count--; 1178 THR_SCHED_UNLOCK(curthread, curthread); 1179 1180 /* Remove the mutex from the threads queue. */ 1181 MUTEX_ASSERT_IS_OWNED(*m); 1182 TAILQ_REMOVE(&(*m)->m_owner->mutexq, 1183 (*m), m_qe); 1184 MUTEX_INIT_LINK(*m); 1185 1186 /* 1187 * Hand off the mutex to the next waiting 1188 * thread: 1189 */ 1190 kmbx = mutex_handoff(curthread, *m); 1191 } 1192 break; 1193 1194 /* Trap invalid mutex types: */ 1195 default: 1196 /* Return an invalid argument error: */ 1197 ret = EINVAL; 1198 break; 1199 } 1200 1201 if ((ret == 0) && (add_reference != 0)) 1202 /* Increment the reference count: */ 1203 (*m)->m_refcount++; 1204 1205 /* Leave the critical region if this is a private mutex. */ 1206 if ((ret == 0) && ((*m)->m_flags & MUTEX_FLAGS_PRIVATE)) 1207 THR_CRITICAL_LEAVE(curthread); 1208 1209 /* Unlock the mutex structure: */ 1210 THR_LOCK_RELEASE(curthread, &(*m)->m_lock); 1211 1212 if (kmbx != NULL) 1213 kse_wakeup(kmbx); 1214 } 1215 1216 /* Return the completion status: */ 1217 return (ret); 1218} 1219 1220 1221/* 1222 * This function is called when a change in base priority occurs for 1223 * a thread that is holding or waiting for a priority protection or 1224 * inheritence mutex. A change in a threads base priority can effect 1225 * changes to active priorities of other threads and to the ordering 1226 * of mutex locking by waiting threads. 1227 * 1228 * This must be called without the target thread's scheduling lock held. 1229 */ 1230void 1231_mutex_notify_priochange(struct pthread *curthread, struct pthread *pthread, 1232 int propagate_prio) 1233{ 1234 struct pthread_mutex *m; 1235 1236 /* Adjust the priorites of any owned priority mutexes: */ 1237 if (pthread->priority_mutex_count > 0) { 1238 /* 1239 * Rescan the mutexes owned by this thread and correct 1240 * their priorities to account for this threads change 1241 * in priority. This has the side effect of changing 1242 * the threads active priority. 1243 * 1244 * Be sure to lock the first mutex in the list of owned 1245 * mutexes. This acts as a barrier against another 1246 * simultaneous call to change the threads priority 1247 * and from the owning thread releasing the mutex. 1248 */ 1249 m = TAILQ_FIRST(&pthread->mutexq); 1250 if (m != NULL) { 1251 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1252 /* 1253 * Make sure the thread still owns the lock. 1254 */ 1255 if (m == TAILQ_FIRST(&pthread->mutexq)) 1256 mutex_rescan_owned(curthread, pthread, 1257 /* rescan all owned */ NULL); 1258 THR_LOCK_RELEASE(curthread, &m->m_lock); 1259 } 1260 } 1261 1262 /* 1263 * If this thread is waiting on a priority inheritence mutex, 1264 * check for priority adjustments. A change in priority can 1265 * also cause a ceiling violation(*) for a thread waiting on 1266 * a priority protection mutex; we don't perform the check here 1267 * as it is done in pthread_mutex_unlock. 1268 * 1269 * (*) It should be noted that a priority change to a thread 1270 * _after_ taking and owning a priority ceiling mutex 1271 * does not affect ownership of that mutex; the ceiling 1272 * priority is only checked before mutex ownership occurs. 1273 */ 1274 if (propagate_prio != 0) { 1275 /* 1276 * Lock the thread's scheduling queue. This is a bit 1277 * convoluted; the "in synchronization queue flag" can 1278 * only be cleared with both the thread's scheduling and 1279 * mutex locks held. The thread's pointer to the wanted 1280 * mutex is guaranteed to be valid during this time. 1281 */ 1282 THR_SCHED_LOCK(curthread, pthread); 1283 1284 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) == 0) || 1285 ((m = pthread->data.mutex) == NULL)) 1286 THR_SCHED_UNLOCK(curthread, pthread); 1287 else { 1288 /* 1289 * This thread is currently waiting on a mutex; unlock 1290 * the scheduling queue lock and lock the mutex. We 1291 * can't hold both at the same time because the locking 1292 * order could cause a deadlock. 1293 */ 1294 THR_SCHED_UNLOCK(curthread, pthread); 1295 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1296 1297 /* 1298 * Check to make sure this thread is still in the 1299 * same state (the lock above can yield the CPU to 1300 * another thread or the thread may be running on 1301 * another CPU). 1302 */ 1303 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1304 (pthread->data.mutex == m)) { 1305 /* 1306 * Remove and reinsert this thread into 1307 * the list of waiting threads to preserve 1308 * decreasing priority order. 1309 */ 1310 mutex_queue_remove(m, pthread); 1311 mutex_queue_enq(m, pthread); 1312 1313 if (m->m_protocol == PTHREAD_PRIO_INHERIT) 1314 /* Adjust priorities: */ 1315 mutex_priority_adjust(curthread, m); 1316 } 1317 1318 /* Unlock the mutex structure: */ 1319 THR_LOCK_RELEASE(curthread, &m->m_lock); 1320 } 1321 } 1322} 1323 1324/* 1325 * Called when a new thread is added to the mutex waiting queue or 1326 * when a threads priority changes that is already in the mutex 1327 * waiting queue. 1328 * 1329 * This must be called with the mutex locked by the current thread. 1330 */ 1331static void 1332mutex_priority_adjust(struct pthread *curthread, pthread_mutex_t mutex) 1333{ 1334 pthread_mutex_t m = mutex; 1335 struct pthread *pthread_next, *pthread = mutex->m_owner; 1336 int done, temp_prio; 1337 1338 /* 1339 * Calculate the mutex priority as the maximum of the highest 1340 * active priority of any waiting threads and the owning threads 1341 * active priority(*). 1342 * 1343 * (*) Because the owning threads current active priority may 1344 * reflect priority inherited from this mutex (and the mutex 1345 * priority may have changed) we must recalculate the active 1346 * priority based on the threads saved inherited priority 1347 * and its base priority. 1348 */ 1349 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ 1350 temp_prio = MAX(pthread_next->active_priority, 1351 MAX(m->m_saved_prio, pthread->base_priority)); 1352 1353 /* See if this mutex really needs adjusting: */ 1354 if (temp_prio == m->m_prio) 1355 /* No need to propagate the priority: */ 1356 return; 1357 1358 /* Set new priority of the mutex: */ 1359 m->m_prio = temp_prio; 1360 1361 /* 1362 * Don't unlock the mutex passed in as an argument. It is 1363 * expected to be locked and unlocked by the caller. 1364 */ 1365 done = 1; 1366 do { 1367 /* 1368 * Save the threads priority before rescanning the 1369 * owned mutexes: 1370 */ 1371 temp_prio = pthread->active_priority; 1372 1373 /* 1374 * Fix the priorities for all mutexes held by the owning 1375 * thread since taking this mutex. This also has a 1376 * potential side-effect of changing the threads priority. 1377 * 1378 * At this point the mutex is locked by the current thread. 1379 * The owning thread can't release the mutex until it is 1380 * unlocked, so we should be able to safely walk its list 1381 * of owned mutexes. 1382 */ 1383 mutex_rescan_owned(curthread, pthread, m); 1384 1385 /* 1386 * If this isn't the first time through the loop, 1387 * the current mutex needs to be unlocked. 1388 */ 1389 if (done == 0) 1390 THR_LOCK_RELEASE(curthread, &m->m_lock); 1391 1392 /* Assume we're done unless told otherwise: */ 1393 done = 1; 1394 1395 /* 1396 * If the thread is currently waiting on a mutex, check 1397 * to see if the threads new priority has affected the 1398 * priority of the mutex. 1399 */ 1400 if ((temp_prio != pthread->active_priority) && 1401 ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1402 ((m = pthread->data.mutex) != NULL) && 1403 (m->m_protocol == PTHREAD_PRIO_INHERIT)) { 1404 /* Lock the mutex structure: */ 1405 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1406 1407 /* 1408 * Make sure the thread is still waiting on the 1409 * mutex: 1410 */ 1411 if (((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) && 1412 (m == pthread->data.mutex)) { 1413 /* 1414 * The priority for this thread has changed. 1415 * Remove and reinsert this thread into the 1416 * list of waiting threads to preserve 1417 * decreasing priority order. 1418 */ 1419 mutex_queue_remove(m, pthread); 1420 mutex_queue_enq(m, pthread); 1421 1422 /* 1423 * Grab the waiting thread with highest 1424 * priority: 1425 */ 1426 pthread_next = TAILQ_FIRST(&m->m_queue); 1427 1428 /* 1429 * Calculate the mutex priority as the maximum 1430 * of the highest active priority of any 1431 * waiting threads and the owning threads 1432 * active priority. 1433 */ 1434 temp_prio = MAX(pthread_next->active_priority, 1435 MAX(m->m_saved_prio, 1436 m->m_owner->base_priority)); 1437 1438 if (temp_prio != m->m_prio) { 1439 /* 1440 * The priority needs to be propagated 1441 * to the mutex this thread is waiting 1442 * on and up to the owner of that mutex. 1443 */ 1444 m->m_prio = temp_prio; 1445 pthread = m->m_owner; 1446 1447 /* We're not done yet: */ 1448 done = 0; 1449 } 1450 } 1451 /* Only release the mutex if we're done: */ 1452 if (done != 0) 1453 THR_LOCK_RELEASE(curthread, &m->m_lock); 1454 } 1455 } while (done == 0); 1456} 1457 1458static void 1459mutex_rescan_owned(struct pthread *curthread, struct pthread *pthread, 1460 struct pthread_mutex *mutex) 1461{ 1462 struct pthread_mutex *m; 1463 struct pthread *pthread_next; 1464 int active_prio, inherited_prio; 1465 1466 /* 1467 * Start walking the mutexes the thread has taken since 1468 * taking this mutex. 1469 */ 1470 if (mutex == NULL) { 1471 /* 1472 * A null mutex means start at the beginning of the owned 1473 * mutex list. 1474 */ 1475 m = TAILQ_FIRST(&pthread->mutexq); 1476 1477 /* There is no inherited priority yet. */ 1478 inherited_prio = 0; 1479 } else { 1480 /* 1481 * The caller wants to start after a specific mutex. It 1482 * is assumed that this mutex is a priority inheritence 1483 * mutex and that its priority has been correctly 1484 * calculated. 1485 */ 1486 m = TAILQ_NEXT(mutex, m_qe); 1487 1488 /* Start inheriting priority from the specified mutex. */ 1489 inherited_prio = mutex->m_prio; 1490 } 1491 active_prio = MAX(inherited_prio, pthread->base_priority); 1492 1493 for (; m != NULL; m = TAILQ_NEXT(m, m_qe)) { 1494 /* 1495 * We only want to deal with priority inheritence 1496 * mutexes. This might be optimized by only placing 1497 * priority inheritence mutexes into the owned mutex 1498 * list, but it may prove to be useful having all 1499 * owned mutexes in this list. Consider a thread 1500 * exiting while holding mutexes... 1501 */ 1502 if (m->m_protocol == PTHREAD_PRIO_INHERIT) { 1503 /* 1504 * Fix the owners saved (inherited) priority to 1505 * reflect the priority of the previous mutex. 1506 */ 1507 m->m_saved_prio = inherited_prio; 1508 1509 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) 1510 /* Recalculate the priority of the mutex: */ 1511 m->m_prio = MAX(active_prio, 1512 pthread_next->active_priority); 1513 else 1514 m->m_prio = active_prio; 1515 1516 /* Recalculate new inherited and active priorities: */ 1517 inherited_prio = m->m_prio; 1518 active_prio = MAX(m->m_prio, pthread->base_priority); 1519 } 1520 } 1521 1522 /* 1523 * Fix the threads inherited priority and recalculate its 1524 * active priority. 1525 */ 1526 pthread->inherited_priority = inherited_prio; 1527 active_prio = MAX(inherited_prio, pthread->base_priority); 1528 1529 if (active_prio != pthread->active_priority) { 1530 /* Lock the thread's scheduling queue: */ 1531 THR_SCHED_LOCK(curthread, pthread); 1532 1533 if ((pthread->flags & THR_FLAGS_IN_RUNQ) == 0) { 1534 /* 1535 * This thread is not in a run queue. Just set 1536 * its active priority. 1537 */ 1538 pthread->active_priority = active_prio; 1539 } 1540 else { 1541 /* 1542 * This thread is in a run queue. Remove it from 1543 * the queue before changing its priority: 1544 */ 1545 THR_RUNQ_REMOVE(pthread); 1546 1547 /* 1548 * POSIX states that if the priority is being 1549 * lowered, the thread must be inserted at the 1550 * head of the queue for its priority if it owns 1551 * any priority protection or inheritence mutexes. 1552 */ 1553 if ((active_prio < pthread->active_priority) && 1554 (pthread->priority_mutex_count > 0)) { 1555 /* Set the new active priority. */ 1556 pthread->active_priority = active_prio; 1557 1558 THR_RUNQ_INSERT_HEAD(pthread); 1559 } else { 1560 /* Set the new active priority. */ 1561 pthread->active_priority = active_prio; 1562 1563 THR_RUNQ_INSERT_TAIL(pthread); 1564 } 1565 } 1566 THR_SCHED_UNLOCK(curthread, pthread); 1567 } 1568} 1569 1570void 1571_mutex_unlock_private(pthread_t pthread) 1572{ 1573 struct pthread_mutex *m, *m_next; 1574 1575 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 1576 m_next = TAILQ_NEXT(m, m_qe); 1577 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 1578 pthread_mutex_unlock(&m); 1579 } 1580} 1581 1582/* 1583 * This is called by the current thread when it wants to back out of a 1584 * mutex_lock in order to run a signal handler. 1585 */ 1586static void 1587mutex_lock_backout(void *arg) 1588{ 1589 struct pthread *curthread = (struct pthread *)arg; 1590 struct pthread_mutex *m; 1591 1592 if ((curthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) { 1593 /* 1594 * Any other thread may clear the "in sync queue flag", 1595 * but only the current thread can clear the pointer 1596 * to the mutex. So if the flag is set, we can 1597 * guarantee that the pointer to the mutex is valid. 1598 * The only problem may be if the mutex is destroyed 1599 * out from under us, but that should be considered 1600 * an application bug. 1601 */ 1602 m = curthread->data.mutex; 1603 1604 /* Lock the mutex structure: */ 1605 THR_LOCK_ACQUIRE(curthread, &m->m_lock); 1606 1607 1608 /* 1609 * Check to make sure this thread doesn't already own 1610 * the mutex. Since mutexes are unlocked with direct 1611 * handoffs, it is possible the previous owner gave it 1612 * to us after we checked the sync queue flag and before 1613 * we locked the mutex structure. 1614 */ 1615 if (m->m_owner == curthread) { 1616 THR_LOCK_RELEASE(curthread, &m->m_lock); 1617 mutex_unlock_common(&m, /* add_reference */ 0); 1618 } else { 1619 /* 1620 * Remove ourselves from the mutex queue and 1621 * clear the pointer to the mutex. We may no 1622 * longer be in the mutex queue, but the removal 1623 * function will DTRT. 1624 */ 1625 mutex_queue_remove(m, curthread); 1626 curthread->data.mutex = NULL; 1627 THR_LOCK_RELEASE(curthread, &m->m_lock); 1628 } 1629 } 1630 /* No need to call this again. */ 1631 curthread->sigbackout = NULL; 1632} 1633 1634/* 1635 * Dequeue a waiting thread from the head of a mutex queue in descending 1636 * priority order. 1637 * 1638 * In order to properly dequeue a thread from the mutex queue and 1639 * make it runnable without the possibility of errant wakeups, it 1640 * is necessary to lock the thread's scheduling queue while also 1641 * holding the mutex lock. 1642 */ 1643static struct kse_mailbox * 1644mutex_handoff(struct pthread *curthread, struct pthread_mutex *mutex) 1645{ 1646 struct kse_mailbox *kmbx = NULL; 1647 struct pthread *pthread; 1648 1649 /* Keep dequeueing until we find a valid thread: */ 1650 mutex->m_owner = NULL; 1651 pthread = TAILQ_FIRST(&mutex->m_queue); 1652 while (pthread != NULL) { 1653 /* Take the thread's scheduling lock: */ 1654 THR_SCHED_LOCK(curthread, pthread); 1655 1656 /* Remove the thread from the mutex queue: */ 1657 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1658 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1659 1660 /* 1661 * Only exit the loop if the thread hasn't been 1662 * cancelled. 1663 */ 1664 switch (mutex->m_protocol) { 1665 case PTHREAD_PRIO_NONE: 1666 /* 1667 * Assign the new owner and add the mutex to the 1668 * thread's list of owned mutexes. 1669 */ 1670 mutex->m_owner = pthread; 1671 TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe); 1672 break; 1673 1674 case PTHREAD_PRIO_INHERIT: 1675 /* 1676 * Assign the new owner and add the mutex to the 1677 * thread's list of owned mutexes. 1678 */ 1679 mutex->m_owner = pthread; 1680 TAILQ_INSERT_TAIL(&pthread->mutexq, mutex, m_qe); 1681 1682 /* Track number of priority mutexes owned: */ 1683 pthread->priority_mutex_count++; 1684 1685 /* 1686 * Set the priority of the mutex. Since our waiting 1687 * threads are in descending priority order, the 1688 * priority of the mutex becomes the active priority 1689 * of the thread we just dequeued. 1690 */ 1691 mutex->m_prio = pthread->active_priority; 1692 1693 /* Save the owning threads inherited priority: */ 1694 mutex->m_saved_prio = pthread->inherited_priority; 1695 1696 /* 1697 * The owning threads inherited priority now becomes 1698 * his active priority (the priority of the mutex). 1699 */ 1700 pthread->inherited_priority = mutex->m_prio; 1701 break; 1702 1703 case PTHREAD_PRIO_PROTECT: 1704 if (pthread->active_priority > mutex->m_prio) { 1705 /* 1706 * Either the mutex ceiling priority has 1707 * been lowered and/or this threads priority 1708 * has been raised subsequent to the thread 1709 * being queued on the waiting list. 1710 */ 1711 pthread->error = EINVAL; 1712 } 1713 else { 1714 /* 1715 * Assign the new owner and add the mutex 1716 * to the thread's list of owned mutexes. 1717 */ 1718 mutex->m_owner = pthread; 1719 TAILQ_INSERT_TAIL(&pthread->mutexq, 1720 mutex, m_qe); 1721 1722 /* Track number of priority mutexes owned: */ 1723 pthread->priority_mutex_count++; 1724 1725 /* 1726 * Save the owning threads inherited 1727 * priority: 1728 */ 1729 mutex->m_saved_prio = 1730 pthread->inherited_priority; 1731 1732 /* 1733 * The owning thread inherits the ceiling 1734 * priority of the mutex and executes at 1735 * that priority: 1736 */ 1737 pthread->inherited_priority = mutex->m_prio; 1738 pthread->active_priority = mutex->m_prio; 1739 1740 } 1741 break; 1742 } 1743 1744 /* Make the thread runnable and unlock the scheduling queue: */ 1745 kmbx = _thr_setrunnable_unlocked(pthread); 1746 1747 /* Add a preemption point. */ 1748 if ((curthread->kseg == pthread->kseg) && 1749 (pthread->active_priority > curthread->active_priority)) 1750 curthread->critical_yield = 1; 1751 1752 if (mutex->m_owner == pthread) { 1753 /* We're done; a valid owner was found. */ 1754 if (mutex->m_flags & MUTEX_FLAGS_PRIVATE) 1755 THR_CRITICAL_ENTER(pthread); 1756 THR_SCHED_UNLOCK(curthread, pthread); 1757 break; 1758 } 1759 THR_SCHED_UNLOCK(curthread, pthread); 1760 /* Get the next thread from the waiting queue: */ 1761 pthread = TAILQ_NEXT(pthread, sqe); 1762 } 1763 1764 if ((pthread == NULL) && (mutex->m_protocol == PTHREAD_PRIO_INHERIT)) 1765 /* This mutex has no priority: */ 1766 mutex->m_prio = 0; 1767 return (kmbx); 1768} 1769 1770/* 1771 * Dequeue a waiting thread from the head of a mutex queue in descending 1772 * priority order. 1773 */ 1774static inline pthread_t 1775mutex_queue_deq(struct pthread_mutex *mutex) 1776{ 1777 pthread_t pthread; 1778 1779 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { 1780 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1781 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1782 1783 /* 1784 * Only exit the loop if the thread hasn't been 1785 * cancelled. 1786 */ 1787 if (pthread->interrupted == 0) 1788 break; 1789 } 1790 1791 return (pthread); 1792} 1793 1794/* 1795 * Remove a waiting thread from a mutex queue in descending priority order. 1796 */ 1797static inline void 1798mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) 1799{ 1800 if ((pthread->sflags & THR_FLAGS_IN_SYNCQ) != 0) { 1801 TAILQ_REMOVE(&mutex->m_queue, pthread, sqe); 1802 pthread->sflags &= ~THR_FLAGS_IN_SYNCQ; 1803 } 1804} 1805 1806/* 1807 * Enqueue a waiting thread to a queue in descending priority order. 1808 */ 1809static inline void 1810mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) 1811{ 1812 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); 1813 1814 THR_ASSERT_NOT_IN_SYNCQ(pthread); 1815 /* 1816 * For the common case of all threads having equal priority, 1817 * we perform a quick check against the priority of the thread 1818 * at the tail of the queue. 1819 */ 1820 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) 1821 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, sqe); 1822 else { 1823 tid = TAILQ_FIRST(&mutex->m_queue); 1824 while (pthread->active_priority <= tid->active_priority) 1825 tid = TAILQ_NEXT(tid, sqe); 1826 TAILQ_INSERT_BEFORE(tid, pthread, sqe); 1827 } 1828 pthread->sflags |= THR_FLAGS_IN_SYNCQ; 1829} 1830