thr_mutex.c revision 56277
1/* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by John Birrell. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD: head/lib/libkse/thread/thr_mutex.c 56277 2000-01-19 07:04:50Z jasone $ 33 */ 34#include <stdlib.h> 35#include <errno.h> 36#include <string.h> 37#include <sys/param.h> 38#include <sys/queue.h> 39#ifdef _THREAD_SAFE 40#include <pthread.h> 41#include "pthread_private.h" 42 43#if defined(_PTHREADS_INVARIANTS) 44#define _MUTEX_INIT_LINK(m) do { \ 45 (m)->m_qe.tqe_prev = NULL; \ 46 (m)->m_qe.tqe_next = NULL; \ 47} while (0) 48#define _MUTEX_ASSERT_IS_OWNED(m) do { \ 49 if ((m)->m_qe.tqe_prev == NULL) \ 50 PANIC("mutex is not on list"); \ 51} while (0) 52#define _MUTEX_ASSERT_NOT_OWNED(m) do { \ 53 if (((m)->m_qe.tqe_prev != NULL) || \ 54 ((m)->m_qe.tqe_next != NULL)) \ 55 PANIC("mutex is on list"); \ 56} while (0) 57#else 58#define _MUTEX_INIT_LINK(m) 59#define _MUTEX_ASSERT_IS_OWNED(m) 60#define _MUTEX_ASSERT_NOT_OWNED(m) 61#endif 62 63/* 64 * Prototypes 65 */ 66static inline int mutex_self_trylock(pthread_mutex_t); 67static inline int mutex_self_lock(pthread_mutex_t); 68static inline int mutex_unlock_common(pthread_mutex_t *, int); 69static void mutex_priority_adjust(pthread_mutex_t); 70static void mutex_rescan_owned (pthread_t, pthread_mutex_t); 71static inline pthread_t mutex_queue_deq(pthread_mutex_t); 72static inline void mutex_queue_remove(pthread_mutex_t, pthread_t); 73static inline void mutex_queue_enq(pthread_mutex_t, pthread_t); 74 75 76static spinlock_t static_init_lock = _SPINLOCK_INITIALIZER; 77 78/* Reinitialize a mutex to defaults. */ 79int 80_mutex_reinit(pthread_mutex_t * mutex) 81{ 82 int ret = 0; 83 84 if (mutex == NULL) 85 ret = EINVAL; 86 else if (*mutex == NULL) 87 ret = pthread_mutex_init(mutex, NULL); 88 else { 89 /* 90 * Initialize the mutex structure: 91 */ 92 (*mutex)->m_type = PTHREAD_MUTEX_DEFAULT; 93 (*mutex)->m_protocol = PTHREAD_PRIO_NONE; 94 TAILQ_INIT(&(*mutex)->m_queue); 95 (*mutex)->m_owner = NULL; 96 (*mutex)->m_data.m_count = 0; 97 (*mutex)->m_flags &= MUTEX_FLAGS_PRIVATE; 98 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 99 (*mutex)->m_refcount = 0; 100 (*mutex)->m_prio = 0; 101 (*mutex)->m_saved_prio = 0; 102 _MUTEX_INIT_LINK(*mutex); 103 memset(&(*mutex)->lock, 0, sizeof((*mutex)->lock)); 104 } 105 return (ret); 106} 107 108int 109pthread_mutex_init(pthread_mutex_t * mutex, 110 const pthread_mutexattr_t * mutex_attr) 111{ 112 enum pthread_mutextype type; 113 int protocol; 114 int ceiling; 115 pthread_mutex_t pmutex; 116 int ret = 0; 117 118 if (mutex == NULL) 119 ret = EINVAL; 120 121 /* Check if default mutex attributes: */ 122 else if (mutex_attr == NULL || *mutex_attr == NULL) { 123 /* Default to a (error checking) POSIX mutex: */ 124 type = PTHREAD_MUTEX_ERRORCHECK; 125 protocol = PTHREAD_PRIO_NONE; 126 ceiling = PTHREAD_MAX_PRIORITY; 127 } 128 129 /* Check mutex type: */ 130 else if (((*mutex_attr)->m_type < PTHREAD_MUTEX_ERRORCHECK) || 131 ((*mutex_attr)->m_type >= MUTEX_TYPE_MAX)) 132 /* Return an invalid argument error: */ 133 ret = EINVAL; 134 135 /* Check mutex protocol: */ 136 else if (((*mutex_attr)->m_protocol < PTHREAD_PRIO_NONE) || 137 ((*mutex_attr)->m_protocol > PTHREAD_MUTEX_RECURSIVE)) 138 /* Return an invalid argument error: */ 139 ret = EINVAL; 140 141 else { 142 /* Use the requested mutex type and protocol: */ 143 type = (*mutex_attr)->m_type; 144 protocol = (*mutex_attr)->m_protocol; 145 ceiling = (*mutex_attr)->m_ceiling; 146 } 147 148 /* Check no errors so far: */ 149 if (ret == 0) { 150 if ((pmutex = (pthread_mutex_t) 151 malloc(sizeof(struct pthread_mutex))) == NULL) 152 ret = ENOMEM; 153 else { 154 /* Reset the mutex flags: */ 155 pmutex->m_flags = 0; 156 157 /* Process according to mutex type: */ 158 switch (type) { 159 /* case PTHREAD_MUTEX_DEFAULT: */ 160 case PTHREAD_MUTEX_ERRORCHECK: 161 case PTHREAD_MUTEX_NORMAL: 162 /* Nothing to do here. */ 163 break; 164 165 /* Single UNIX Spec 2 recursive mutex: */ 166 case PTHREAD_MUTEX_RECURSIVE: 167 /* Reset the mutex count: */ 168 pmutex->m_data.m_count = 0; 169 break; 170 171 /* Trap invalid mutex types: */ 172 default: 173 /* Return an invalid argument error: */ 174 ret = EINVAL; 175 break; 176 } 177 if (ret == 0) { 178 /* Initialise the rest of the mutex: */ 179 TAILQ_INIT(&pmutex->m_queue); 180 pmutex->m_flags |= MUTEX_FLAGS_INITED; 181 pmutex->m_owner = NULL; 182 pmutex->m_type = type; 183 pmutex->m_protocol = protocol; 184 pmutex->m_refcount = 0; 185 if (protocol == PTHREAD_PRIO_PROTECT) 186 pmutex->m_prio = ceiling; 187 else 188 pmutex->m_prio = 0; 189 pmutex->m_saved_prio = 0; 190 _MUTEX_INIT_LINK(pmutex); 191 memset(&pmutex->lock, 0, sizeof(pmutex->lock)); 192 *mutex = pmutex; 193 } else { 194 free(pmutex); 195 *mutex = NULL; 196 } 197 } 198 } 199 /* Return the completion status: */ 200 return(ret); 201} 202 203int 204pthread_mutex_destroy(pthread_mutex_t * mutex) 205{ 206 int ret = 0; 207 208 if (mutex == NULL || *mutex == NULL) 209 ret = EINVAL; 210 else { 211 /* Lock the mutex structure: */ 212 _SPINLOCK(&(*mutex)->lock); 213 214 /* 215 * Check to see if this mutex is in use: 216 */ 217 if (((*mutex)->m_owner != NULL) || 218 (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) || 219 ((*mutex)->m_refcount != 0)) { 220 ret = EBUSY; 221 222 /* Unlock the mutex structure: */ 223 _SPINUNLOCK(&(*mutex)->lock); 224 } 225 else { 226 /* 227 * Free the memory allocated for the mutex 228 * structure: 229 */ 230 _MUTEX_ASSERT_NOT_OWNED(*mutex); 231 free(*mutex); 232 233 /* 234 * Leave the caller's pointer NULL now that 235 * the mutex has been destroyed: 236 */ 237 *mutex = NULL; 238 } 239 } 240 241 /* Return the completion status: */ 242 return (ret); 243} 244 245static int 246init_static(pthread_mutex_t *mutex) 247{ 248 int ret; 249 250 _SPINLOCK(&static_init_lock); 251 252 if (*mutex == NULL) 253 ret = pthread_mutex_init(mutex, NULL); 254 else 255 ret = 0; 256 257 _SPINUNLOCK(&static_init_lock); 258 259 return(ret); 260} 261 262int 263pthread_mutex_trylock(pthread_mutex_t * mutex) 264{ 265 int ret = 0; 266 267 if (mutex == NULL) 268 ret = EINVAL; 269 270 /* 271 * If the mutex is statically initialized, perform the dynamic 272 * initialization: 273 */ 274 else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { 275 /* 276 * Defer signals to protect the scheduling queues from 277 * access by the signal handler: 278 */ 279 _thread_kern_sig_defer(); 280 281 /* Lock the mutex structure: */ 282 _SPINLOCK(&(*mutex)->lock); 283 284 /* 285 * If the mutex was statically allocated, properly 286 * initialize the tail queue. 287 */ 288 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 289 TAILQ_INIT(&(*mutex)->m_queue); 290 _MUTEX_INIT_LINK(*mutex); 291 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 292 } 293 294 /* Process according to mutex type: */ 295 switch ((*mutex)->m_protocol) { 296 /* Default POSIX mutex: */ 297 case PTHREAD_PRIO_NONE: 298 /* Check if this mutex is not locked: */ 299 if ((*mutex)->m_owner == NULL) { 300 /* Lock the mutex for the running thread: */ 301 (*mutex)->m_owner = _thread_run; 302 303 /* Add to the list of owned mutexes: */ 304 _MUTEX_ASSERT_NOT_OWNED(*mutex); 305 TAILQ_INSERT_TAIL(&_thread_run->mutexq, 306 (*mutex), m_qe); 307 } else if ((*mutex)->m_owner == _thread_run) 308 ret = mutex_self_trylock(*mutex); 309 else 310 /* Return a busy error: */ 311 ret = EBUSY; 312 break; 313 314 /* POSIX priority inheritence mutex: */ 315 case PTHREAD_PRIO_INHERIT: 316 /* Check if this mutex is not locked: */ 317 if ((*mutex)->m_owner == NULL) { 318 /* Lock the mutex for the running thread: */ 319 (*mutex)->m_owner = _thread_run; 320 321 /* Track number of priority mutexes owned: */ 322 _thread_run->priority_mutex_count++; 323 324 /* 325 * The mutex takes on the attributes of the 326 * running thread when there are no waiters. 327 */ 328 (*mutex)->m_prio = _thread_run->active_priority; 329 (*mutex)->m_saved_prio = 330 _thread_run->inherited_priority; 331 332 /* Add to the list of owned mutexes: */ 333 _MUTEX_ASSERT_NOT_OWNED(*mutex); 334 TAILQ_INSERT_TAIL(&_thread_run->mutexq, 335 (*mutex), m_qe); 336 } else if ((*mutex)->m_owner == _thread_run) 337 ret = mutex_self_trylock(*mutex); 338 else 339 /* Return a busy error: */ 340 ret = EBUSY; 341 break; 342 343 /* POSIX priority protection mutex: */ 344 case PTHREAD_PRIO_PROTECT: 345 /* Check for a priority ceiling violation: */ 346 if (_thread_run->active_priority > (*mutex)->m_prio) 347 ret = EINVAL; 348 349 /* Check if this mutex is not locked: */ 350 else if ((*mutex)->m_owner == NULL) { 351 /* Lock the mutex for the running thread: */ 352 (*mutex)->m_owner = _thread_run; 353 354 /* Track number of priority mutexes owned: */ 355 _thread_run->priority_mutex_count++; 356 357 /* 358 * The running thread inherits the ceiling 359 * priority of the mutex and executes at that 360 * priority. 361 */ 362 _thread_run->active_priority = (*mutex)->m_prio; 363 (*mutex)->m_saved_prio = 364 _thread_run->inherited_priority; 365 _thread_run->inherited_priority = 366 (*mutex)->m_prio; 367 368 /* Add to the list of owned mutexes: */ 369 _MUTEX_ASSERT_NOT_OWNED(*mutex); 370 TAILQ_INSERT_TAIL(&_thread_run->mutexq, 371 (*mutex), m_qe); 372 } else if ((*mutex)->m_owner == _thread_run) 373 ret = mutex_self_trylock(*mutex); 374 else 375 /* Return a busy error: */ 376 ret = EBUSY; 377 break; 378 379 /* Trap invalid mutex types: */ 380 default: 381 /* Return an invalid argument error: */ 382 ret = EINVAL; 383 break; 384 } 385 386 /* Unlock the mutex structure: */ 387 _SPINUNLOCK(&(*mutex)->lock); 388 389 /* 390 * Undefer and handle pending signals, yielding if 391 * necessary: 392 */ 393 _thread_kern_sig_undefer(); 394 } 395 396 /* Return the completion status: */ 397 return (ret); 398} 399 400int 401pthread_mutex_lock(pthread_mutex_t * mutex) 402{ 403 int ret = 0; 404 405 if (mutex == NULL) 406 ret = EINVAL; 407 408 /* 409 * If the mutex is statically initialized, perform the dynamic 410 * initialization: 411 */ 412 else if (*mutex != NULL || (ret = init_static(mutex)) == 0) { 413 /* 414 * Defer signals to protect the scheduling queues from 415 * access by the signal handler: 416 */ 417 _thread_kern_sig_defer(); 418 419 /* Lock the mutex structure: */ 420 _SPINLOCK(&(*mutex)->lock); 421 422 /* 423 * If the mutex was statically allocated, properly 424 * initialize the tail queue. 425 */ 426 if (((*mutex)->m_flags & MUTEX_FLAGS_INITED) == 0) { 427 TAILQ_INIT(&(*mutex)->m_queue); 428 (*mutex)->m_flags |= MUTEX_FLAGS_INITED; 429 _MUTEX_INIT_LINK(*mutex); 430 } 431 432 /* Reset the interrupted flag: */ 433 _thread_run->interrupted = 0; 434 435 /* Process according to mutex type: */ 436 switch ((*mutex)->m_protocol) { 437 /* Default POSIX mutex: */ 438 case PTHREAD_PRIO_NONE: 439 if ((*mutex)->m_owner == NULL) { 440 /* Lock the mutex for this thread: */ 441 (*mutex)->m_owner = _thread_run; 442 443 /* Add to the list of owned mutexes: */ 444 _MUTEX_ASSERT_NOT_OWNED(*mutex); 445 TAILQ_INSERT_TAIL(&_thread_run->mutexq, 446 (*mutex), m_qe); 447 448 } else if ((*mutex)->m_owner == _thread_run) 449 ret = mutex_self_lock(*mutex); 450 else { 451 /* 452 * Join the queue of threads waiting to lock 453 * the mutex: 454 */ 455 mutex_queue_enq(*mutex, _thread_run); 456 457 /* 458 * Keep a pointer to the mutex this thread 459 * is waiting on: 460 */ 461 _thread_run->data.mutex = *mutex; 462 463 /* 464 * Unlock the mutex structure and schedule the 465 * next thread: 466 */ 467 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, 468 &(*mutex)->lock, __FILE__, __LINE__); 469 470 /* Lock the mutex structure again: */ 471 _SPINLOCK(&(*mutex)->lock); 472 } 473 break; 474 475 /* POSIX priority inheritence mutex: */ 476 case PTHREAD_PRIO_INHERIT: 477 /* Check if this mutex is not locked: */ 478 if ((*mutex)->m_owner == NULL) { 479 /* Lock the mutex for this thread: */ 480 (*mutex)->m_owner = _thread_run; 481 482 /* Track number of priority mutexes owned: */ 483 _thread_run->priority_mutex_count++; 484 485 /* 486 * The mutex takes on attributes of the 487 * running thread when there are no waiters. 488 */ 489 (*mutex)->m_prio = _thread_run->active_priority; 490 (*mutex)->m_saved_prio = 491 _thread_run->inherited_priority; 492 _thread_run->inherited_priority = 493 (*mutex)->m_prio; 494 495 /* Add to the list of owned mutexes: */ 496 _MUTEX_ASSERT_NOT_OWNED(*mutex); 497 TAILQ_INSERT_TAIL(&_thread_run->mutexq, 498 (*mutex), m_qe); 499 500 } else if ((*mutex)->m_owner == _thread_run) 501 ret = mutex_self_lock(*mutex); 502 else { 503 /* 504 * Join the queue of threads waiting to lock 505 * the mutex: 506 */ 507 mutex_queue_enq(*mutex, _thread_run); 508 509 /* 510 * Keep a pointer to the mutex this thread 511 * is waiting on: 512 */ 513 _thread_run->data.mutex = *mutex; 514 515 if (_thread_run->active_priority > 516 (*mutex)->m_prio) 517 /* Adjust priorities: */ 518 mutex_priority_adjust(*mutex); 519 520 /* 521 * Unlock the mutex structure and schedule the 522 * next thread: 523 */ 524 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, 525 &(*mutex)->lock, __FILE__, __LINE__); 526 527 /* Lock the mutex structure again: */ 528 _SPINLOCK(&(*mutex)->lock); 529 } 530 break; 531 532 /* POSIX priority protection mutex: */ 533 case PTHREAD_PRIO_PROTECT: 534 /* Check for a priority ceiling violation: */ 535 if (_thread_run->active_priority > (*mutex)->m_prio) 536 ret = EINVAL; 537 538 /* Check if this mutex is not locked: */ 539 else if ((*mutex)->m_owner == NULL) { 540 /* 541 * Lock the mutex for the running 542 * thread: 543 */ 544 (*mutex)->m_owner = _thread_run; 545 546 /* Track number of priority mutexes owned: */ 547 _thread_run->priority_mutex_count++; 548 549 /* 550 * The running thread inherits the ceiling 551 * priority of the mutex and executes at that 552 * priority: 553 */ 554 _thread_run->active_priority = (*mutex)->m_prio; 555 (*mutex)->m_saved_prio = 556 _thread_run->inherited_priority; 557 _thread_run->inherited_priority = 558 (*mutex)->m_prio; 559 560 /* Add to the list of owned mutexes: */ 561 _MUTEX_ASSERT_NOT_OWNED(*mutex); 562 TAILQ_INSERT_TAIL(&_thread_run->mutexq, 563 (*mutex), m_qe); 564 } else if ((*mutex)->m_owner == _thread_run) 565 ret = mutex_self_lock(*mutex); 566 else { 567 /* 568 * Join the queue of threads waiting to lock 569 * the mutex: 570 */ 571 mutex_queue_enq(*mutex, _thread_run); 572 573 /* 574 * Keep a pointer to the mutex this thread 575 * is waiting on: 576 */ 577 _thread_run->data.mutex = *mutex; 578 579 /* Clear any previous error: */ 580 _thread_run->error = 0; 581 582 /* 583 * Unlock the mutex structure and schedule the 584 * next thread: 585 */ 586 _thread_kern_sched_state_unlock(PS_MUTEX_WAIT, 587 &(*mutex)->lock, __FILE__, __LINE__); 588 589 /* Lock the mutex structure again: */ 590 _SPINLOCK(&(*mutex)->lock); 591 592 /* 593 * The threads priority may have changed while 594 * waiting for the mutex causing a ceiling 595 * violation. 596 */ 597 ret = _thread_run->error; 598 _thread_run->error = 0; 599 } 600 break; 601 602 /* Trap invalid mutex types: */ 603 default: 604 /* Return an invalid argument error: */ 605 ret = EINVAL; 606 break; 607 } 608 609 /* 610 * Check to see if this thread was interrupted and 611 * is still in the mutex queue of waiting threads: 612 */ 613 if (_thread_run->interrupted != 0) 614 mutex_queue_remove(*mutex, _thread_run); 615 616 /* Unlock the mutex structure: */ 617 _SPINUNLOCK(&(*mutex)->lock); 618 619 /* 620 * Undefer and handle pending signals, yielding if 621 * necessary: 622 */ 623 _thread_kern_sig_undefer(); 624 625 if (_thread_run->interrupted != 0 && 626 _thread_run->continuation != NULL) 627 _thread_run->continuation((void *) _thread_run); 628 } 629 630 /* Return the completion status: */ 631 return (ret); 632} 633 634int 635pthread_mutex_unlock(pthread_mutex_t * mutex) 636{ 637 return (mutex_unlock_common(mutex, /* add reference */ 0)); 638} 639 640int 641_mutex_cv_unlock(pthread_mutex_t * mutex) 642{ 643 return (mutex_unlock_common(mutex, /* add reference */ 1)); 644} 645 646int 647_mutex_cv_lock(pthread_mutex_t * mutex) 648{ 649 int ret; 650 if ((ret = pthread_mutex_lock(mutex)) == 0) 651 (*mutex)->m_refcount--; 652 return (ret); 653} 654 655static inline int 656mutex_self_trylock(pthread_mutex_t mutex) 657{ 658 int ret = 0; 659 660 switch (mutex->m_type) { 661 662 /* case PTHREAD_MUTEX_DEFAULT: */ 663 case PTHREAD_MUTEX_ERRORCHECK: 664 case PTHREAD_MUTEX_NORMAL: 665 /* 666 * POSIX specifies that mutexes should return EDEADLK if a 667 * recursive lock is detected. 668 */ 669 ret = EBUSY; 670 break; 671 672 case PTHREAD_MUTEX_RECURSIVE: 673 /* Increment the lock count: */ 674 mutex->m_data.m_count++; 675 break; 676 677 default: 678 /* Trap invalid mutex types; */ 679 ret = EINVAL; 680 } 681 682 return(ret); 683} 684 685static inline int 686mutex_self_lock(pthread_mutex_t mutex) 687{ 688 int ret = 0; 689 690 switch (mutex->m_type) { 691 /* case PTHREAD_MUTEX_DEFAULT: */ 692 case PTHREAD_MUTEX_ERRORCHECK: 693 /* 694 * POSIX specifies that mutexes should return EDEADLK if a 695 * recursive lock is detected. 696 */ 697 ret = EDEADLK; 698 break; 699 700 case PTHREAD_MUTEX_NORMAL: 701 /* 702 * What SS2 define as a 'normal' mutex. Intentionally 703 * deadlock on attempts to get a lock you already own. 704 */ 705 _thread_kern_sched_state_unlock(PS_DEADLOCK, 706 &mutex->lock, __FILE__, __LINE__); 707 break; 708 709 case PTHREAD_MUTEX_RECURSIVE: 710 /* Increment the lock count: */ 711 mutex->m_data.m_count++; 712 break; 713 714 default: 715 /* Trap invalid mutex types; */ 716 ret = EINVAL; 717 } 718 719 return(ret); 720} 721 722static inline int 723mutex_unlock_common(pthread_mutex_t * mutex, int add_reference) 724{ 725 int ret = 0; 726 727 if (mutex == NULL || *mutex == NULL) { 728 ret = EINVAL; 729 } else { 730 /* 731 * Defer signals to protect the scheduling queues from 732 * access by the signal handler: 733 */ 734 _thread_kern_sig_defer(); 735 736 /* Lock the mutex structure: */ 737 _SPINLOCK(&(*mutex)->lock); 738 739 /* Process according to mutex type: */ 740 switch ((*mutex)->m_protocol) { 741 /* Default POSIX mutex: */ 742 case PTHREAD_PRIO_NONE: 743 /* 744 * Check if the running thread is not the owner of the 745 * mutex: 746 */ 747 if ((*mutex)->m_owner != _thread_run) { 748 /* 749 * Return an invalid argument error for no 750 * owner and a permission error otherwise: 751 */ 752 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 753 } 754 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 755 ((*mutex)->m_data.m_count > 1)) { 756 /* Decrement the count: */ 757 (*mutex)->m_data.m_count--; 758 } else { 759 /* 760 * Clear the count in case this is recursive 761 * mutex. 762 */ 763 (*mutex)->m_data.m_count = 0; 764 765 /* Remove the mutex from the threads queue. */ 766 _MUTEX_ASSERT_IS_OWNED(*mutex); 767 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 768 (*mutex), m_qe); 769 _MUTEX_INIT_LINK(*mutex); 770 771 /* 772 * Get the next thread from the queue of 773 * threads waiting on the mutex: 774 */ 775 if (((*mutex)->m_owner = 776 mutex_queue_deq(*mutex)) != NULL) { 777 /* 778 * Allow the new owner of the mutex to 779 * run: 780 */ 781 PTHREAD_NEW_STATE((*mutex)->m_owner, 782 PS_RUNNING); 783 784 /* 785 * Add the mutex to the threads list of 786 * owned mutexes: 787 */ 788 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 789 (*mutex), m_qe); 790 791 /* 792 * The owner is no longer waiting for 793 * this mutex: 794 */ 795 (*mutex)->m_owner->data.mutex = NULL; 796 } 797 } 798 break; 799 800 /* POSIX priority inheritence mutex: */ 801 case PTHREAD_PRIO_INHERIT: 802 /* 803 * Check if the running thread is not the owner of the 804 * mutex: 805 */ 806 if ((*mutex)->m_owner != _thread_run) { 807 /* 808 * Return an invalid argument error for no 809 * owner and a permission error otherwise: 810 */ 811 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 812 } 813 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 814 ((*mutex)->m_data.m_count > 1)) { 815 /* Decrement the count: */ 816 (*mutex)->m_data.m_count--; 817 } else { 818 /* 819 * Clear the count in case this is recursive 820 * mutex. 821 */ 822 (*mutex)->m_data.m_count = 0; 823 824 /* 825 * Restore the threads inherited priority and 826 * recompute the active priority (being careful 827 * not to override changes in the threads base 828 * priority subsequent to locking the mutex). 829 */ 830 _thread_run->inherited_priority = 831 (*mutex)->m_saved_prio; 832 _thread_run->active_priority = 833 MAX(_thread_run->inherited_priority, 834 _thread_run->base_priority); 835 836 /* 837 * This thread now owns one less priority mutex. 838 */ 839 _thread_run->priority_mutex_count--; 840 841 /* Remove the mutex from the threads queue. */ 842 _MUTEX_ASSERT_IS_OWNED(*mutex); 843 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 844 (*mutex), m_qe); 845 _MUTEX_INIT_LINK(*mutex); 846 847 /* 848 * Get the next thread from the queue of threads 849 * waiting on the mutex: 850 */ 851 if (((*mutex)->m_owner = 852 mutex_queue_deq(*mutex)) == NULL) 853 /* This mutex has no priority. */ 854 (*mutex)->m_prio = 0; 855 else { 856 /* 857 * Track number of priority mutexes owned: 858 */ 859 (*mutex)->m_owner->priority_mutex_count++; 860 861 /* 862 * Add the mutex to the threads list 863 * of owned mutexes: 864 */ 865 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 866 (*mutex), m_qe); 867 868 /* 869 * The owner is no longer waiting for 870 * this mutex: 871 */ 872 (*mutex)->m_owner->data.mutex = NULL; 873 874 /* 875 * Set the priority of the mutex. Since 876 * our waiting threads are in descending 877 * priority order, the priority of the 878 * mutex becomes the active priority of 879 * the thread we just dequeued. 880 */ 881 (*mutex)->m_prio = 882 (*mutex)->m_owner->active_priority; 883 884 /* 885 * Save the owning threads inherited 886 * priority: 887 */ 888 (*mutex)->m_saved_prio = 889 (*mutex)->m_owner->inherited_priority; 890 891 /* 892 * The owning threads inherited priority 893 * now becomes his active priority (the 894 * priority of the mutex). 895 */ 896 (*mutex)->m_owner->inherited_priority = 897 (*mutex)->m_prio; 898 899 /* 900 * Allow the new owner of the mutex to 901 * run: 902 */ 903 PTHREAD_NEW_STATE((*mutex)->m_owner, 904 PS_RUNNING); 905 } 906 } 907 break; 908 909 /* POSIX priority ceiling mutex: */ 910 case PTHREAD_PRIO_PROTECT: 911 /* 912 * Check if the running thread is not the owner of the 913 * mutex: 914 */ 915 if ((*mutex)->m_owner != _thread_run) { 916 /* 917 * Return an invalid argument error for no 918 * owner and a permission error otherwise: 919 */ 920 ret = (*mutex)->m_owner == NULL ? EINVAL : EPERM; 921 } 922 else if (((*mutex)->m_type == PTHREAD_MUTEX_RECURSIVE) && 923 ((*mutex)->m_data.m_count > 1)) { 924 /* Decrement the count: */ 925 (*mutex)->m_data.m_count--; 926 } else { 927 /* 928 * Clear the count in case this is recursive 929 * mutex. 930 */ 931 (*mutex)->m_data.m_count = 0; 932 933 /* 934 * Restore the threads inherited priority and 935 * recompute the active priority (being careful 936 * not to override changes in the threads base 937 * priority subsequent to locking the mutex). 938 */ 939 _thread_run->inherited_priority = 940 (*mutex)->m_saved_prio; 941 _thread_run->active_priority = 942 MAX(_thread_run->inherited_priority, 943 _thread_run->base_priority); 944 945 /* 946 * This thread now owns one less priority mutex. 947 */ 948 _thread_run->priority_mutex_count--; 949 950 /* Remove the mutex from the threads queue. */ 951 _MUTEX_ASSERT_IS_OWNED(*mutex); 952 TAILQ_REMOVE(&(*mutex)->m_owner->mutexq, 953 (*mutex), m_qe); 954 _MUTEX_INIT_LINK(*mutex); 955 956 /* 957 * Enter a loop to find a waiting thread whose 958 * active priority will not cause a ceiling 959 * violation: 960 */ 961 while ((((*mutex)->m_owner = 962 mutex_queue_deq(*mutex)) != NULL) && 963 ((*mutex)->m_owner->active_priority > 964 (*mutex)->m_prio)) { 965 /* 966 * Either the mutex ceiling priority 967 * been lowered and/or this threads 968 * priority has been raised subsequent 969 * to this thread being queued on the 970 * waiting list. 971 */ 972 (*mutex)->m_owner->error = EINVAL; 973 PTHREAD_NEW_STATE((*mutex)->m_owner, 974 PS_RUNNING); 975 /* 976 * The thread is no longer waiting for 977 * this mutex: 978 */ 979 (*mutex)->m_owner->data.mutex = NULL; 980 } 981 982 /* Check for a new owner: */ 983 if ((*mutex)->m_owner != NULL) { 984 /* 985 * Track number of priority mutexes owned: 986 */ 987 (*mutex)->m_owner->priority_mutex_count++; 988 989 /* 990 * Add the mutex to the threads list 991 * of owned mutexes: 992 */ 993 TAILQ_INSERT_TAIL(&(*mutex)->m_owner->mutexq, 994 (*mutex), m_qe); 995 996 /* 997 * The owner is no longer waiting for 998 * this mutex: 999 */ 1000 (*mutex)->m_owner->data.mutex = NULL; 1001 1002 /* 1003 * Save the owning threads inherited 1004 * priority: 1005 */ 1006 (*mutex)->m_saved_prio = 1007 (*mutex)->m_owner->inherited_priority; 1008 1009 /* 1010 * The owning thread inherits the 1011 * ceiling priority of the mutex and 1012 * executes at that priority: 1013 */ 1014 (*mutex)->m_owner->inherited_priority = 1015 (*mutex)->m_prio; 1016 (*mutex)->m_owner->active_priority = 1017 (*mutex)->m_prio; 1018 1019 /* 1020 * Allow the new owner of the mutex to 1021 * run: 1022 */ 1023 PTHREAD_NEW_STATE((*mutex)->m_owner, 1024 PS_RUNNING); 1025 } 1026 } 1027 break; 1028 1029 /* Trap invalid mutex types: */ 1030 default: 1031 /* Return an invalid argument error: */ 1032 ret = EINVAL; 1033 break; 1034 } 1035 1036 if ((ret == 0) && (add_reference != 0)) { 1037 /* Increment the reference count: */ 1038 (*mutex)->m_refcount++; 1039 } 1040 1041 /* Unlock the mutex structure: */ 1042 _SPINUNLOCK(&(*mutex)->lock); 1043 1044 /* 1045 * Undefer and handle pending signals, yielding if 1046 * necessary: 1047 */ 1048 _thread_kern_sig_undefer(); 1049 } 1050 1051 /* Return the completion status: */ 1052 return (ret); 1053} 1054 1055 1056/* 1057 * This function is called when a change in base priority occurs for 1058 * a thread that is holding or waiting for a priority protection or 1059 * inheritence mutex. A change in a threads base priority can effect 1060 * changes to active priorities of other threads and to the ordering 1061 * of mutex locking by waiting threads. 1062 * 1063 * This must be called while thread scheduling is deferred. 1064 */ 1065void 1066_mutex_notify_priochange(pthread_t pthread) 1067{ 1068 /* Adjust the priorites of any owned priority mutexes: */ 1069 if (pthread->priority_mutex_count > 0) { 1070 /* 1071 * Rescan the mutexes owned by this thread and correct 1072 * their priorities to account for this threads change 1073 * in priority. This has the side effect of changing 1074 * the threads active priority. 1075 */ 1076 mutex_rescan_owned(pthread, /* rescan all owned */ NULL); 1077 } 1078 1079 /* 1080 * If this thread is waiting on a priority inheritence mutex, 1081 * check for priority adjustments. A change in priority can 1082 * also effect a ceiling violation(*) for a thread waiting on 1083 * a priority protection mutex; we don't perform the check here 1084 * as it is done in pthread_mutex_unlock. 1085 * 1086 * (*) It should be noted that a priority change to a thread 1087 * _after_ taking and owning a priority ceiling mutex 1088 * does not affect ownership of that mutex; the ceiling 1089 * priority is only checked before mutex ownership occurs. 1090 */ 1091 if (pthread->state == PS_MUTEX_WAIT) { 1092 /* Lock the mutex structure: */ 1093 _SPINLOCK(&pthread->data.mutex->lock); 1094 1095 /* 1096 * Check to make sure this thread is still in the same state 1097 * (the spinlock above can yield the CPU to another thread): 1098 */ 1099 if (pthread->state == PS_MUTEX_WAIT) { 1100 /* 1101 * Remove and reinsert this thread into the list of 1102 * waiting threads to preserve decreasing priority 1103 * order. 1104 */ 1105 mutex_queue_remove(pthread->data.mutex, pthread); 1106 mutex_queue_enq(pthread->data.mutex, pthread); 1107 1108 if (pthread->data.mutex->m_protocol == 1109 PTHREAD_PRIO_INHERIT) { 1110 /* Adjust priorities: */ 1111 mutex_priority_adjust(pthread->data.mutex); 1112 } 1113 } 1114 1115 /* Unlock the mutex structure: */ 1116 _SPINUNLOCK(&pthread->data.mutex->lock); 1117 } 1118} 1119 1120/* 1121 * Called when a new thread is added to the mutex waiting queue or 1122 * when a threads priority changes that is already in the mutex 1123 * waiting queue. 1124 */ 1125static void 1126mutex_priority_adjust(pthread_mutex_t mutex) 1127{ 1128 pthread_t pthread_next, pthread = mutex->m_owner; 1129 int temp_prio; 1130 pthread_mutex_t m = mutex; 1131 1132 /* 1133 * Calculate the mutex priority as the maximum of the highest 1134 * active priority of any waiting threads and the owning threads 1135 * active priority(*). 1136 * 1137 * (*) Because the owning threads current active priority may 1138 * reflect priority inherited from this mutex (and the mutex 1139 * priority may have changed) we must recalculate the active 1140 * priority based on the threads saved inherited priority 1141 * and its base priority. 1142 */ 1143 pthread_next = TAILQ_FIRST(&m->m_queue); /* should never be NULL */ 1144 temp_prio = MAX(pthread_next->active_priority, 1145 MAX(m->m_saved_prio, pthread->base_priority)); 1146 1147 /* See if this mutex really needs adjusting: */ 1148 if (temp_prio == m->m_prio) 1149 /* No need to propagate the priority: */ 1150 return; 1151 1152 /* Set new priority of the mutex: */ 1153 m->m_prio = temp_prio; 1154 1155 while (m != NULL) { 1156 /* 1157 * Save the threads priority before rescanning the 1158 * owned mutexes: 1159 */ 1160 temp_prio = pthread->active_priority; 1161 1162 /* 1163 * Fix the priorities for all the mutexes this thread has 1164 * locked since taking this mutex. This also has a 1165 * potential side-effect of changing the threads priority. 1166 */ 1167 mutex_rescan_owned(pthread, m); 1168 1169 /* 1170 * If the thread is currently waiting on a mutex, check 1171 * to see if the threads new priority has affected the 1172 * priority of the mutex. 1173 */ 1174 if ((temp_prio != pthread->active_priority) && 1175 (pthread->state == PS_MUTEX_WAIT) && 1176 (pthread->data.mutex->m_protocol == PTHREAD_PRIO_INHERIT)) { 1177 /* Grab the mutex this thread is waiting on: */ 1178 m = pthread->data.mutex; 1179 1180 /* 1181 * The priority for this thread has changed. Remove 1182 * and reinsert this thread into the list of waiting 1183 * threads to preserve decreasing priority order. 1184 */ 1185 mutex_queue_remove(m, pthread); 1186 mutex_queue_enq(m, pthread); 1187 1188 /* Grab the waiting thread with highest priority: */ 1189 pthread_next = TAILQ_FIRST(&m->m_queue); 1190 1191 /* 1192 * Calculate the mutex priority as the maximum of the 1193 * highest active priority of any waiting threads and 1194 * the owning threads active priority. 1195 */ 1196 temp_prio = MAX(pthread_next->active_priority, 1197 MAX(m->m_saved_prio, m->m_owner->base_priority)); 1198 1199 if (temp_prio != m->m_prio) { 1200 /* 1201 * The priority needs to be propagated to the 1202 * mutex this thread is waiting on and up to 1203 * the owner of that mutex. 1204 */ 1205 m->m_prio = temp_prio; 1206 pthread = m->m_owner; 1207 } 1208 else 1209 /* We're done: */ 1210 m = NULL; 1211 1212 } 1213 else 1214 /* We're done: */ 1215 m = NULL; 1216 } 1217} 1218 1219static void 1220mutex_rescan_owned(pthread_t pthread, pthread_mutex_t mutex) 1221{ 1222 int active_prio, inherited_prio; 1223 pthread_mutex_t m; 1224 pthread_t pthread_next; 1225 1226 /* 1227 * Start walking the mutexes the thread has taken since 1228 * taking this mutex. 1229 */ 1230 if (mutex == NULL) { 1231 /* 1232 * A null mutex means start at the beginning of the owned 1233 * mutex list. 1234 */ 1235 m = TAILQ_FIRST(&pthread->mutexq); 1236 1237 /* There is no inherited priority yet. */ 1238 inherited_prio = 0; 1239 } 1240 else { 1241 /* 1242 * The caller wants to start after a specific mutex. It 1243 * is assumed that this mutex is a priority inheritence 1244 * mutex and that its priority has been correctly 1245 * calculated. 1246 */ 1247 m = TAILQ_NEXT(mutex, m_qe); 1248 1249 /* Start inheriting priority from the specified mutex. */ 1250 inherited_prio = mutex->m_prio; 1251 } 1252 active_prio = MAX(inherited_prio, pthread->base_priority); 1253 1254 while (m != NULL) { 1255 /* 1256 * We only want to deal with priority inheritence 1257 * mutexes. This might be optimized by only placing 1258 * priority inheritence mutexes into the owned mutex 1259 * list, but it may prove to be useful having all 1260 * owned mutexes in this list. Consider a thread 1261 * exiting while holding mutexes... 1262 */ 1263 if (m->m_protocol == PTHREAD_PRIO_INHERIT) { 1264 /* 1265 * Fix the owners saved (inherited) priority to 1266 * reflect the priority of the previous mutex. 1267 */ 1268 m->m_saved_prio = inherited_prio; 1269 1270 if ((pthread_next = TAILQ_FIRST(&m->m_queue)) != NULL) 1271 /* Recalculate the priority of the mutex: */ 1272 m->m_prio = MAX(active_prio, 1273 pthread_next->active_priority); 1274 else 1275 m->m_prio = active_prio; 1276 1277 /* Recalculate new inherited and active priorities: */ 1278 inherited_prio = m->m_prio; 1279 active_prio = MAX(m->m_prio, pthread->base_priority); 1280 } 1281 1282 /* Advance to the next mutex owned by this thread: */ 1283 m = TAILQ_NEXT(m, m_qe); 1284 } 1285 1286 /* 1287 * Fix the threads inherited priority and recalculate its 1288 * active priority. 1289 */ 1290 pthread->inherited_priority = inherited_prio; 1291 active_prio = MAX(inherited_prio, pthread->base_priority); 1292 1293 if (active_prio != pthread->active_priority) { 1294 /* 1295 * If this thread is in the priority queue, it must be 1296 * removed and reinserted for its new priority. 1297 */ 1298 if (pthread->flags & PTHREAD_FLAGS_IN_PRIOQ) { 1299 /* 1300 * Remove the thread from the priority queue 1301 * before changing its priority: 1302 */ 1303 PTHREAD_PRIOQ_REMOVE(pthread); 1304 1305 /* 1306 * POSIX states that if the priority is being 1307 * lowered, the thread must be inserted at the 1308 * head of the queue for its priority if it owns 1309 * any priority protection or inheritence mutexes. 1310 */ 1311 if ((active_prio < pthread->active_priority) && 1312 (pthread->priority_mutex_count > 0)) { 1313 /* Set the new active priority. */ 1314 pthread->active_priority = active_prio; 1315 1316 PTHREAD_PRIOQ_INSERT_HEAD(pthread); 1317 } 1318 else { 1319 /* Set the new active priority. */ 1320 pthread->active_priority = active_prio; 1321 1322 PTHREAD_PRIOQ_INSERT_TAIL(pthread); 1323 } 1324 } 1325 else { 1326 /* Set the new active priority. */ 1327 pthread->active_priority = active_prio; 1328 } 1329 } 1330} 1331 1332void 1333_mutex_unlock_private(pthread_t pthread) 1334{ 1335 struct pthread_mutex *m, *m_next; 1336 1337 for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) { 1338 m_next = TAILQ_NEXT(m, m_qe); 1339 if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0) 1340 pthread_mutex_unlock(&m); 1341 } 1342} 1343 1344/* 1345 * Dequeue a waiting thread from the head of a mutex queue in descending 1346 * priority order. 1347 */ 1348static inline pthread_t 1349mutex_queue_deq(pthread_mutex_t mutex) 1350{ 1351 pthread_t pthread; 1352 1353 while ((pthread = TAILQ_FIRST(&mutex->m_queue)) != NULL) { 1354 TAILQ_REMOVE(&mutex->m_queue, pthread, qe); 1355 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 1356 1357 /* 1358 * Only exit the loop if the thread hasn't been 1359 * cancelled. 1360 */ 1361 if (pthread->interrupted == 0) 1362 break; 1363 } 1364 1365 return(pthread); 1366} 1367 1368/* 1369 * Remove a waiting thread from a mutex queue in descending priority order. 1370 */ 1371static inline void 1372mutex_queue_remove(pthread_mutex_t mutex, pthread_t pthread) 1373{ 1374 if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) { 1375 TAILQ_REMOVE(&mutex->m_queue, pthread, qe); 1376 pthread->flags &= ~PTHREAD_FLAGS_IN_MUTEXQ; 1377 } 1378} 1379 1380/* 1381 * Enqueue a waiting thread to a queue in descending priority order. 1382 */ 1383static inline void 1384mutex_queue_enq(pthread_mutex_t mutex, pthread_t pthread) 1385{ 1386 pthread_t tid = TAILQ_LAST(&mutex->m_queue, mutex_head); 1387 1388 /* 1389 * For the common case of all threads having equal priority, 1390 * we perform a quick check against the priority of the thread 1391 * at the tail of the queue. 1392 */ 1393 if ((tid == NULL) || (pthread->active_priority <= tid->active_priority)) 1394 TAILQ_INSERT_TAIL(&mutex->m_queue, pthread, qe); 1395 else { 1396 tid = TAILQ_FIRST(&mutex->m_queue); 1397 while (pthread->active_priority <= tid->active_priority) 1398 tid = TAILQ_NEXT(tid, qe); 1399 TAILQ_INSERT_BEFORE(tid, pthread, qe); 1400 } 1401 pthread->flags |= PTHREAD_FLAGS_IN_MUTEXQ; 1402} 1403 1404#endif 1405