1/*- 2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: --- 27 unchanged lines hidden (view full) --- 36 * so should not be relied upon in combination with sx locks. 37 */ 38 39#include "opt_ddb.h" 40#include "opt_hwpmc_hooks.h" 41#include "opt_no_adaptive_sx.h" 42 43#include <sys/cdefs.h> |
44__FBSDID("$FreeBSD: stable/11/sys/kern/kern_sx.c 327413 2017-12-31 05:06:35Z mjg $"); |
45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/kdb.h> 49#include <sys/kernel.h> 50#include <sys/ktr.h> 51#include <sys/lock.h> 52#include <sys/mutex.h> --- 30 unchanged lines hidden (view full) --- 83/* 84 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 85 * drop Giant anytime we have to sleep or if we adaptively spin. 86 */ 87#define GIANT_DECLARE \ 88 int _giantcnt = 0; \ 89 WITNESS_SAVE_DECL(Giant) \ 90 |
91#define GIANT_SAVE(work) do { \ |
92 if (mtx_owned(&Giant)) { \ |
93 work++; \ |
94 WITNESS_SAVE(&Giant.lock_object, Giant); \ 95 while (mtx_owned(&Giant)) { \ 96 _giantcnt++; \ 97 mtx_unlock(&Giant); \ 98 } \ 99 } \ 100} while (0) 101 --- 150 unchanged lines hidden (view full) --- 252 253 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 254 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); 255 sx->sx_lock = SX_LOCK_DESTROYED; 256 lock_destroy(&sx->lock_object); 257} 258 259int |
260sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
261{ 262 uintptr_t x; 263 264 if (SCHEDULER_STOPPED()) 265 return (1); 266 267 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 268 ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", --- 15 unchanged lines hidden (view full) --- 284 } 285 } 286 287 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 288 return (0); 289} 290 291int |
292sx_try_slock_(struct sx *sx, const char *file, int line) 293{ 294 295 return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG)); 296} 297 298int |
299_sx_xlock(struct sx *sx, int opts, const char *file, int line) 300{ 301 uintptr_t tid, x; 302 int error = 0; 303 304 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 305 !TD_IS_IDLETHREAD(curthread), 306 ("sx_xlock() by idle thread %p on sx %s @ %s:%d", 307 curthread, sx->lock_object.lo_name, file, line)); 308 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 309 ("sx_xlock() of destroyed sx @ %s:%d", file, line)); 310 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 311 line, NULL); 312 tid = (uintptr_t)curthread; 313 x = SX_LOCK_UNLOCKED; 314 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) |
315 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG); |
316 else 317 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 318 0, 0, file, line, LOCKSTAT_WRITER); 319 if (!error) { 320 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, 321 file, line); 322 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 323 TD_LOCKS_INC(curthread); 324 } 325 326 return (error); 327} 328 329int |
330sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
331{ 332 struct thread *td; 333 uintptr_t tid, x; 334 int rval; 335 bool recursed; 336 337 td = curthread; 338 tid = (uintptr_t)td; --- 31 unchanged lines hidden (view full) --- 370 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 371 sx, 0, 0, file, line, LOCKSTAT_WRITER); 372 TD_LOCKS_INC(curthread); 373 } 374 375 return (rval); 376} 377 |
378int 379sx_try_xlock_(struct sx *sx, const char *file, int line) 380{ 381 382 return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG)); 383} 384 |
385void 386_sx_xunlock(struct sx *sx, const char *file, int line) 387{ 388 389 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 390 ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); 391 _sx_assert(sx, SA_XLOCKED, file, line); 392 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); --- 8 unchanged lines hidden (view full) --- 401} 402 403/* 404 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 405 * This will only succeed if this thread holds a single shared lock. 406 * Return 1 if if the upgrade succeed, 0 otherwise. 407 */ 408int |
409sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
410{ 411 uintptr_t x; 412 int success; 413 414 if (SCHEDULER_STOPPED()) 415 return (1); 416 417 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, --- 12 unchanged lines hidden (view full) --- 430 if (success) { 431 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 432 file, line); 433 LOCKSTAT_RECORD0(sx__upgrade, sx); 434 } 435 return (success); 436} 437 |
438int 439sx_try_upgrade_(struct sx *sx, const char *file, int line) 440{ 441 442 return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG)); 443} 444 |
445/* 446 * Downgrade an unrecursed exclusive lock into a single shared lock. 447 */ 448void |
449sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
450{ 451 uintptr_t x; 452 int wakeup_swapper; 453 454 if (SCHEDULER_STOPPED()) 455 return; 456 457 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, --- 45 unchanged lines hidden (view full) --- 503 if (wakeup_swapper) 504 kick_proc0(); 505 506out: 507 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 508 LOCKSTAT_RECORD0(sx__downgrade, sx); 509} 510 |
511void 512sx_downgrade_(struct sx *sx, const char *file, int line) 513{ 514 515 sx_downgrade_int(sx LOCK_FILE_LINE_ARG); 516} 517 |
518/* 519 * This function represents the so-called 'hard case' for sx_xlock 520 * operation. All 'easy case' failures are redirected to this. Note 521 * that ideally this would be a static function, but it needs to be 522 * accessible from at least sx.h. 523 */ 524int |
525_sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF) |
526{ 527 GIANT_DECLARE; |
528 uintptr_t tid; |
529#ifdef ADAPTIVE_SX 530 volatile struct thread *owner; 531 u_int i, spintries = 0; 532#endif 533#ifdef LOCK_PROFILING 534 uint64_t waittime = 0; 535 int contested = 0; 536#endif 537 int error = 0; 538#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 539 struct lock_delay_arg lda; 540#endif 541#ifdef KDTRACE_HOOKS |
542 u_int sleep_cnt = 0; 543 int64_t sleep_time = 0; 544 int64_t all_time = 0; 545#endif |
546#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 547 uintptr_t state; 548#endif 549 int extra_work = 0; |
550 |
551 tid = (uintptr_t)curthread; |
552 if (SCHEDULER_STOPPED()) 553 return (0); 554 555#if defined(ADAPTIVE_SX) 556 lock_delay_arg_init(&lda, &sx_delay); 557#elif defined(KDTRACE_HOOKS) 558 lock_delay_arg_init(&lda, NULL); 559#endif --- 12 unchanged lines hidden (view full) --- 572 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 573 return (0); 574 } 575 576 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 577 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 578 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 579 |
580#ifdef HWPMC_HOOKS 581 PMC_SOFT_CALL( , , lock, failed); 582#endif 583 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 584 &waittime); 585 586#ifdef LOCK_PROFILING 587 extra_work = 1; |
588 state = x; |
589#elif defined(KDTRACE_HOOKS) 590 extra_work = lockstat_enabled; 591 if (__predict_false(extra_work)) { 592 all_time -= lockstat_nsecs(&sx->lock_object); 593 state = x; 594 } |
595#endif |
596 |
597 for (;;) { 598 if (x == SX_LOCK_UNLOCKED) { 599 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 600 break; 601 continue; 602 } 603#ifdef KDTRACE_HOOKS 604 lda.spin_cnt++; 605#endif |
606#ifdef ADAPTIVE_SX 607 /* 608 * If the lock is write locked and the owner is 609 * running on another CPU, spin until the owner stops 610 * running or the state of the lock changes. 611 */ 612 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 613 if ((x & SX_LOCK_SHARED) == 0) { 614 owner = lv_sx_owner(x); 615 if (TD_IS_RUNNING(owner)) { 616 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 617 CTR3(KTR_LOCK, 618 "%s: spinning on %p held by %p", 619 __func__, sx, owner); 620 KTR_STATE1(KTR_SCHED, "thread", 621 sched_tdname(curthread), "spinning", 622 "lockname:\"%s\"", 623 sx->lock_object.lo_name); |
624 GIANT_SAVE(extra_work); |
625 do { 626 lock_delay(&lda); 627 x = SX_READ_VALUE(sx); 628 owner = lv_sx_owner(x); 629 } while (owner != NULL && 630 TD_IS_RUNNING(owner)); 631 KTR_STATE0(KTR_SCHED, "thread", 632 sched_tdname(curthread), "running"); 633 continue; 634 } 635 } else if (SX_SHARERS(x) && spintries < asx_retries) { 636 KTR_STATE1(KTR_SCHED, "thread", 637 sched_tdname(curthread), "spinning", 638 "lockname:\"%s\"", sx->lock_object.lo_name); |
639 GIANT_SAVE(extra_work); |
640 spintries++; 641 for (i = 0; i < asx_loops; i++) { 642 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 643 CTR4(KTR_LOCK, 644 "%s: shared spinning on %p with %u and %u", 645 __func__, sx, spintries, i); |
646 cpu_spinwait(); 647 x = SX_READ_VALUE(sx); |
648 if ((x & SX_LOCK_SHARED) == 0 || 649 SX_SHARERS(x) == 0) 650 break; |
651 } |
652#ifdef KDTRACE_HOOKS |
653 lda.spin_cnt += i; |
654#endif |
655 KTR_STATE0(KTR_SCHED, "thread", 656 sched_tdname(curthread), "running"); |
657 if (i != asx_loops) 658 continue; 659 } 660 } 661#endif 662 663 sleepq_lock(&sx->lock_object); 664 x = SX_READ_VALUE(sx); |
665retry_sleepq: |
666 667 /* 668 * If the lock was released while spinning on the 669 * sleep queue chain lock, try again. 670 */ 671 if (x == SX_LOCK_UNLOCKED) { 672 sleepq_release(&sx->lock_object); 673 continue; --- 23 unchanged lines hidden (view full) --- 697 * woken up and acquired the lock yet, sx_lock will be 698 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 699 * If we see that value, try to acquire it once. Note 700 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 701 * as there are other exclusive waiters still. If we 702 * fail, restart the loop. 703 */ 704 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) { |
705 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, 706 tid | SX_LOCK_EXCLUSIVE_WAITERS)) 707 goto retry_sleepq; |
708 sleepq_release(&sx->lock_object); |
709 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 710 __func__, sx); 711 break; |
712 } 713 714 /* 715 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 716 * than loop back and retry. 717 */ 718 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { |
719 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, |
720 x | SX_LOCK_EXCLUSIVE_WAITERS)) { |
721 goto retry_sleepq; |
722 } 723 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 724 CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 725 __func__, sx); 726 } 727 728 /* 729 * Since we have been unable to acquire the exclusive 730 * lock and the exclusive waiters flag is set, we have 731 * to sleep. 732 */ 733 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 734 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 735 __func__, sx); 736 737#ifdef KDTRACE_HOOKS 738 sleep_time -= lockstat_nsecs(&sx->lock_object); 739#endif |
740 GIANT_SAVE(extra_work); |
741 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 742 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 743 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); 744 if (!(opts & SX_INTERRUPTIBLE)) 745 sleepq_wait(&sx->lock_object, 0); 746 else 747 error = sleepq_wait_sig(&sx->lock_object, 0); 748#ifdef KDTRACE_HOOKS --- 7 unchanged lines hidden (view full) --- 756 __func__, sx); 757 break; 758 } 759 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 760 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 761 __func__, sx); 762 x = SX_READ_VALUE(sx); 763 } |
764#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 765 if (__predict_true(!extra_work)) 766 return (error); 767#endif |
768#ifdef KDTRACE_HOOKS 769 all_time += lockstat_nsecs(&sx->lock_object); 770 if (sleep_time) 771 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 772 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 773 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 774 if (lda.spin_cnt > sleep_cnt) 775 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, --- 9 unchanged lines hidden (view full) --- 785 786/* 787 * This function represents the so-called 'hard case' for sx_xunlock 788 * operation. All 'easy case' failures are redirected to this. Note 789 * that ideally this would be a static function, but it needs to be 790 * accessible from at least sx.h. 791 */ 792void |
793_sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) |
794{ |
795 uintptr_t tid, setx; |
796 int queue, wakeup_swapper; 797 798 if (SCHEDULER_STOPPED()) 799 return; 800 |
801 tid = (uintptr_t)curthread; |
802 |
803 if (__predict_false(x == tid)) 804 x = SX_READ_VALUE(sx); 805 806 MPASS(!(x & SX_LOCK_SHARED)); 807 808 if (__predict_false(x & SX_LOCK_RECURSED)) { |
809 /* The lock is recursed, unrecurse one level. */ 810 if ((--sx->sx_recurse) == 0) 811 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 812 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 813 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 814 return; 815 } 816 817 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); 818 if (x == tid && 819 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) 820 return; 821 |
822 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 823 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 824 825 sleepq_lock(&sx->lock_object); |
826 x = SX_READ_VALUE(sx); 827 MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)); |
828 829 /* 830 * The wake up algorithm here is quite simple and probably not 831 * ideal. It gives precedence to shared waiters if they are 832 * present. For this condition, we have to preserve the 833 * state of the exclusive waiters flag. 834 * If interruptible sleeps left the shared queue empty avoid a 835 * starvation for the threads sleeping on the exclusive queue by giving 836 * them precedence and cleaning up the shared waiters bit anyway. 837 */ |
838 setx = SX_LOCK_UNLOCKED; 839 queue = SQ_EXCLUSIVE_QUEUE; 840 if ((x & SX_LOCK_SHARED_WAITERS) != 0 && |
841 sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) { 842 queue = SQ_SHARED_QUEUE; |
843 setx |= (x & SX_LOCK_EXCLUSIVE_WAITERS); 844 } 845 atomic_store_rel_ptr(&sx->sx_lock, setx); |
846 847 /* Wake up all the waiters for the specific queue. */ 848 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 849 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 850 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 851 "exclusive"); |
852 |
853 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, 854 queue); 855 sleepq_release(&sx->lock_object); 856 if (wakeup_swapper) 857 kick_proc0(); 858} 859 860static bool __always_inline |
861__sx_slock_try(struct sx *sx, uintptr_t *xp LOCK_FILE_LINE_ARG_DEF) |
862{ 863 864 /* 865 * If no other thread has an exclusive lock then try to bump up 866 * the count of sharers. Since we have to preserve the state 867 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 868 * shared lock loop back and retry. 869 */ --- 7 unchanged lines hidden (view full) --- 877 (void *)(*xp + SX_ONE_SHARER)); 878 return (true); 879 } 880 } 881 return (false); 882} 883 884static int __noinline |
885_sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF) |
886{ 887 GIANT_DECLARE; 888#ifdef ADAPTIVE_SX 889 volatile struct thread *owner; 890#endif 891#ifdef LOCK_PROFILING 892 uint64_t waittime = 0; 893 int contested = 0; 894#endif 895 int error = 0; 896#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 897 struct lock_delay_arg lda; 898#endif 899#ifdef KDTRACE_HOOKS |
900 u_int sleep_cnt = 0; 901 int64_t sleep_time = 0; 902 int64_t all_time = 0; 903#endif |
904#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 905 uintptr_t state; 906#endif 907 int extra_work = 0; |
908 909 if (SCHEDULER_STOPPED()) 910 return (0); 911 912#if defined(ADAPTIVE_SX) 913 lock_delay_arg_init(&lda, &sx_delay); 914#elif defined(KDTRACE_HOOKS) 915 lock_delay_arg_init(&lda, NULL); 916#endif |
917 918#ifdef HWPMC_HOOKS 919 PMC_SOFT_CALL( , , lock, failed); 920#endif 921 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 922 &waittime); 923 924#ifdef LOCK_PROFILING 925 extra_work = 1; |
926 state = x; |
927#elif defined(KDTRACE_HOOKS) 928 extra_work = lockstat_enabled; 929 if (__predict_false(extra_work)) { 930 all_time -= lockstat_nsecs(&sx->lock_object); 931 state = x; 932 } |
933#endif 934 935 /* 936 * As with rwlocks, we don't make any attempt to try to block 937 * shared locks once there is an exclusive waiter. 938 */ 939 for (;;) { |
940 if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG)) |
941 break; 942#ifdef KDTRACE_HOOKS 943 lda.spin_cnt++; 944#endif 945 |
946#ifdef ADAPTIVE_SX 947 /* 948 * If the owner is running on another CPU, spin until 949 * the owner stops running or the state of the lock 950 * changes. 951 */ 952 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 953 owner = lv_sx_owner(x); 954 if (TD_IS_RUNNING(owner)) { 955 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 956 CTR3(KTR_LOCK, 957 "%s: spinning on %p held by %p", 958 __func__, sx, owner); 959 KTR_STATE1(KTR_SCHED, "thread", 960 sched_tdname(curthread), "spinning", 961 "lockname:\"%s\"", sx->lock_object.lo_name); |
962 GIANT_SAVE(extra_work); |
963 do { 964 lock_delay(&lda); 965 x = SX_READ_VALUE(sx); 966 owner = lv_sx_owner(x); 967 } while (owner != NULL && TD_IS_RUNNING(owner)); 968 KTR_STATE0(KTR_SCHED, "thread", 969 sched_tdname(curthread), "running"); 970 continue; 971 } 972 } 973#endif 974 975 /* 976 * Some other thread already has an exclusive lock, so 977 * start the process of blocking. 978 */ 979 sleepq_lock(&sx->lock_object); 980 x = SX_READ_VALUE(sx); |
981retry_sleepq: |
982 /* 983 * The lock could have been released while we spun. 984 * In this case loop back and retry. 985 */ 986 if (x & SX_LOCK_SHARED) { 987 sleepq_release(&sx->lock_object); 988 continue; 989 } --- 16 unchanged lines hidden (view full) --- 1006#endif 1007 1008 /* 1009 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 1010 * fail to set it drop the sleep queue lock and loop 1011 * back. 1012 */ 1013 if (!(x & SX_LOCK_SHARED_WAITERS)) { |
1014 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 1015 x | SX_LOCK_SHARED_WAITERS)) 1016 goto retry_sleepq; |
1017 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1018 CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 1019 __func__, sx); 1020 } 1021 1022 /* 1023 * Since we have been unable to acquire the shared lock, 1024 * we have to sleep. 1025 */ 1026 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1027 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 1028 __func__, sx); 1029 1030#ifdef KDTRACE_HOOKS 1031 sleep_time -= lockstat_nsecs(&sx->lock_object); 1032#endif |
1033 GIANT_SAVE(extra_work); |
1034 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 1035 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 1036 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); 1037 if (!(opts & SX_INTERRUPTIBLE)) 1038 sleepq_wait(&sx->lock_object, 0); 1039 else 1040 error = sleepq_wait_sig(&sx->lock_object, 0); 1041#ifdef KDTRACE_HOOKS --- 7 unchanged lines hidden (view full) --- 1049 __func__, sx); 1050 break; 1051 } 1052 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1053 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 1054 __func__, sx); 1055 x = SX_READ_VALUE(sx); 1056 } |
1057#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 1058 if (__predict_true(!extra_work)) 1059 return (error); 1060#endif |
1061#ifdef KDTRACE_HOOKS 1062 all_time += lockstat_nsecs(&sx->lock_object); 1063 if (sleep_time) 1064 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 1065 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1066 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 1067 if (lda.spin_cnt > sleep_cnt) 1068 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, --- 4 unchanged lines hidden (view full) --- 1073 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 1074 contested, waittime, file, line, LOCKSTAT_READER); 1075 } 1076 GIANT_RESTORE(); 1077 return (error); 1078} 1079 1080int |
1081_sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF) |
1082{ 1083 uintptr_t x; 1084 int error; 1085 1086 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 1087 !TD_IS_IDLETHREAD(curthread), 1088 ("sx_slock() by idle thread %p on sx %s @ %s:%d", 1089 curthread, sx->lock_object.lo_name, file, line)); 1090 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1091 ("sx_slock() of destroyed sx @ %s:%d", file, line)); 1092 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); 1093 1094 error = 0; 1095 x = SX_READ_VALUE(sx); 1096 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__acquire) || |
1097 !__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))) 1098 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG); |
1099 if (error == 0) { 1100 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 1101 WITNESS_LOCK(&sx->lock_object, 0, file, line); 1102 TD_LOCKS_INC(curthread); 1103 } 1104 return (error); 1105} 1106 |
1107int 1108_sx_slock(struct sx *sx, int opts, const char *file, int line) 1109{ 1110 1111 return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG)); 1112} 1113 |
1114static bool __always_inline 1115_sx_sunlock_try(struct sx *sx, uintptr_t *xp) 1116{ 1117 1118 for (;;) { 1119 /* 1120 * We should never have sharers while at least one thread 1121 * holds a shared lock. --- 35 unchanged lines hidden (view full) --- 1157 continue; 1158 } 1159 break; 1160 } 1161 return (false); 1162} 1163 1164static void __noinline |
1165_sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) |
1166{ 1167 int wakeup_swapper; |
1168 uintptr_t setx; |
1169 1170 if (SCHEDULER_STOPPED()) 1171 return; 1172 |
1173 if (_sx_sunlock_try(sx, &x)) 1174 goto out_lockstat; |
1175 |
1176 /* 1177 * At this point, there should just be one sharer with 1178 * exclusive waiters. 1179 */ 1180 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); |
1181 |
1182 sleepq_lock(&sx->lock_object); 1183 x = SX_READ_VALUE(sx); 1184 for (;;) { 1185 MPASS(x & SX_LOCK_EXCLUSIVE_WAITERS); 1186 MPASS(!(x & SX_LOCK_SHARED_WAITERS)); |
1187 /* 1188 * Wake up semantic here is quite simple: 1189 * Just wake up all the exclusive waiters. 1190 * Note that the state of the lock could have changed, 1191 * so if it fails loop back and retry. 1192 */ |
1193 setx = x - SX_ONE_SHARER; 1194 setx &= ~SX_LOCK_EXCLUSIVE_WAITERS; 1195 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx)) |
1196 continue; |
1197 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1198 CTR2(KTR_LOCK, "%s: %p waking up all thread on" 1199 "exclusive queue", __func__, sx); 1200 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 1201 0, SQ_EXCLUSIVE_QUEUE); |
1202 break; 1203 } |
1204 sleepq_release(&sx->lock_object); 1205 if (wakeup_swapper) 1206 kick_proc0(); 1207out_lockstat: |
1208 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); 1209} 1210 1211void |
1212_sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
1213{ 1214 uintptr_t x; 1215 1216 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1217 ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); 1218 _sx_assert(sx, SA_SLOCKED, file, line); 1219 WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 1220 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 1221 1222 x = SX_READ_VALUE(sx); 1223 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__release) || 1224 !_sx_sunlock_try(sx, &x))) |
1225 _sx_sunlock_hard(sx, x LOCK_FILE_LINE_ARG); |
1226 1227 TD_LOCKS_DEC(curthread); 1228} 1229 |
1230void 1231_sx_sunlock(struct sx *sx, const char *file, int line) 1232{ 1233 1234 _sx_sunlock_int(sx LOCK_FILE_LINE_ARG); 1235} 1236 |
1237#ifdef INVARIANT_SUPPORT 1238#ifndef INVARIANTS 1239#undef _sx_assert 1240#endif 1241 1242/* 1243 * In the non-WITNESS case, sx_assert() can only detect that at least 1244 * *some* thread owns an slock, but it cannot guarantee that *this* --- 157 unchanged lines hidden --- |