kern_sx.c (327409) | kern_sx.c (327413) |
---|---|
1/*- 2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: --- 27 unchanged lines hidden (view full) --- 36 * so should not be relied upon in combination with sx locks. 37 */ 38 39#include "opt_ddb.h" 40#include "opt_hwpmc_hooks.h" 41#include "opt_no_adaptive_sx.h" 42 43#include <sys/cdefs.h> | 1/*- 2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: --- 27 unchanged lines hidden (view full) --- 36 * so should not be relied upon in combination with sx locks. 37 */ 38 39#include "opt_ddb.h" 40#include "opt_hwpmc_hooks.h" 41#include "opt_no_adaptive_sx.h" 42 43#include <sys/cdefs.h> |
44__FBSDID("$FreeBSD: stable/11/sys/kern/kern_sx.c 327409 2017-12-31 03:35:34Z mjg $"); | 44__FBSDID("$FreeBSD: stable/11/sys/kern/kern_sx.c 327413 2017-12-31 05:06:35Z mjg $"); |
45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/kdb.h> 49#include <sys/kernel.h> 50#include <sys/ktr.h> 51#include <sys/lock.h> 52#include <sys/mutex.h> --- 30 unchanged lines hidden (view full) --- 83/* 84 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 85 * drop Giant anytime we have to sleep or if we adaptively spin. 86 */ 87#define GIANT_DECLARE \ 88 int _giantcnt = 0; \ 89 WITNESS_SAVE_DECL(Giant) \ 90 | 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/kdb.h> 49#include <sys/kernel.h> 50#include <sys/ktr.h> 51#include <sys/lock.h> 52#include <sys/mutex.h> --- 30 unchanged lines hidden (view full) --- 83/* 84 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 85 * drop Giant anytime we have to sleep or if we adaptively spin. 86 */ 87#define GIANT_DECLARE \ 88 int _giantcnt = 0; \ 89 WITNESS_SAVE_DECL(Giant) \ 90 |
91#define GIANT_SAVE() do { \ | 91#define GIANT_SAVE(work) do { \ |
92 if (mtx_owned(&Giant)) { \ | 92 if (mtx_owned(&Giant)) { \ |
93 work++; \ |
|
93 WITNESS_SAVE(&Giant.lock_object, Giant); \ 94 while (mtx_owned(&Giant)) { \ 95 _giantcnt++; \ 96 mtx_unlock(&Giant); \ 97 } \ 98 } \ 99} while (0) 100 --- 150 unchanged lines hidden (view full) --- 251 252 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 253 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); 254 sx->sx_lock = SX_LOCK_DESTROYED; 255 lock_destroy(&sx->lock_object); 256} 257 258int | 94 WITNESS_SAVE(&Giant.lock_object, Giant); \ 95 while (mtx_owned(&Giant)) { \ 96 _giantcnt++; \ 97 mtx_unlock(&Giant); \ 98 } \ 99 } \ 100} while (0) 101 --- 150 unchanged lines hidden (view full) --- 252 253 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 254 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); 255 sx->sx_lock = SX_LOCK_DESTROYED; 256 lock_destroy(&sx->lock_object); 257} 258 259int |
259sx_try_slock_(struct sx *sx, const char *file, int line) | 260sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
260{ 261 uintptr_t x; 262 263 if (SCHEDULER_STOPPED()) 264 return (1); 265 266 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 267 ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", --- 15 unchanged lines hidden (view full) --- 283 } 284 } 285 286 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 287 return (0); 288} 289 290int | 261{ 262 uintptr_t x; 263 264 if (SCHEDULER_STOPPED()) 265 return (1); 266 267 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 268 ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", --- 15 unchanged lines hidden (view full) --- 284 } 285 } 286 287 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 288 return (0); 289} 290 291int |
292sx_try_slock_(struct sx *sx, const char *file, int line) 293{ 294 295 return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG)); 296} 297 298int |
|
291_sx_xlock(struct sx *sx, int opts, const char *file, int line) 292{ 293 uintptr_t tid, x; 294 int error = 0; 295 296 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 297 !TD_IS_IDLETHREAD(curthread), 298 ("sx_xlock() by idle thread %p on sx %s @ %s:%d", 299 curthread, sx->lock_object.lo_name, file, line)); 300 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 301 ("sx_xlock() of destroyed sx @ %s:%d", file, line)); 302 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 303 line, NULL); 304 tid = (uintptr_t)curthread; 305 x = SX_LOCK_UNLOCKED; 306 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) | 299_sx_xlock(struct sx *sx, int opts, const char *file, int line) 300{ 301 uintptr_t tid, x; 302 int error = 0; 303 304 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 305 !TD_IS_IDLETHREAD(curthread), 306 ("sx_xlock() by idle thread %p on sx %s @ %s:%d", 307 curthread, sx->lock_object.lo_name, file, line)); 308 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 309 ("sx_xlock() of destroyed sx @ %s:%d", file, line)); 310 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 311 line, NULL); 312 tid = (uintptr_t)curthread; 313 x = SX_LOCK_UNLOCKED; 314 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) |
307 error = _sx_xlock_hard(sx, x, tid, opts, file, line); | 315 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG); |
308 else 309 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 310 0, 0, file, line, LOCKSTAT_WRITER); 311 if (!error) { 312 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, 313 file, line); 314 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 315 TD_LOCKS_INC(curthread); 316 } 317 318 return (error); 319} 320 321int | 316 else 317 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 318 0, 0, file, line, LOCKSTAT_WRITER); 319 if (!error) { 320 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, 321 file, line); 322 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 323 TD_LOCKS_INC(curthread); 324 } 325 326 return (error); 327} 328 329int |
322sx_try_xlock_(struct sx *sx, const char *file, int line) | 330sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
323{ 324 struct thread *td; 325 uintptr_t tid, x; 326 int rval; 327 bool recursed; 328 329 td = curthread; 330 tid = (uintptr_t)td; --- 31 unchanged lines hidden (view full) --- 362 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 363 sx, 0, 0, file, line, LOCKSTAT_WRITER); 364 TD_LOCKS_INC(curthread); 365 } 366 367 return (rval); 368} 369 | 331{ 332 struct thread *td; 333 uintptr_t tid, x; 334 int rval; 335 bool recursed; 336 337 td = curthread; 338 tid = (uintptr_t)td; --- 31 unchanged lines hidden (view full) --- 370 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 371 sx, 0, 0, file, line, LOCKSTAT_WRITER); 372 TD_LOCKS_INC(curthread); 373 } 374 375 return (rval); 376} 377 |
378int 379sx_try_xlock_(struct sx *sx, const char *file, int line) 380{ 381 382 return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG)); 383} 384 |
|
370void 371_sx_xunlock(struct sx *sx, const char *file, int line) 372{ 373 374 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 375 ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); 376 _sx_assert(sx, SA_XLOCKED, file, line); 377 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); --- 8 unchanged lines hidden (view full) --- 386} 387 388/* 389 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 390 * This will only succeed if this thread holds a single shared lock. 391 * Return 1 if if the upgrade succeed, 0 otherwise. 392 */ 393int | 385void 386_sx_xunlock(struct sx *sx, const char *file, int line) 387{ 388 389 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 390 ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); 391 _sx_assert(sx, SA_XLOCKED, file, line); 392 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); --- 8 unchanged lines hidden (view full) --- 401} 402 403/* 404 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 405 * This will only succeed if this thread holds a single shared lock. 406 * Return 1 if if the upgrade succeed, 0 otherwise. 407 */ 408int |
394sx_try_upgrade_(struct sx *sx, const char *file, int line) | 409sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
395{ 396 uintptr_t x; 397 int success; 398 399 if (SCHEDULER_STOPPED()) 400 return (1); 401 402 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, --- 12 unchanged lines hidden (view full) --- 415 if (success) { 416 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 417 file, line); 418 LOCKSTAT_RECORD0(sx__upgrade, sx); 419 } 420 return (success); 421} 422 | 410{ 411 uintptr_t x; 412 int success; 413 414 if (SCHEDULER_STOPPED()) 415 return (1); 416 417 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, --- 12 unchanged lines hidden (view full) --- 430 if (success) { 431 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 432 file, line); 433 LOCKSTAT_RECORD0(sx__upgrade, sx); 434 } 435 return (success); 436} 437 |
438int 439sx_try_upgrade_(struct sx *sx, const char *file, int line) 440{ 441 442 return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG)); 443} 444 |
|
423/* 424 * Downgrade an unrecursed exclusive lock into a single shared lock. 425 */ 426void | 445/* 446 * Downgrade an unrecursed exclusive lock into a single shared lock. 447 */ 448void |
427sx_downgrade_(struct sx *sx, const char *file, int line) | 449sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
428{ 429 uintptr_t x; 430 int wakeup_swapper; 431 432 if (SCHEDULER_STOPPED()) 433 return; 434 435 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, --- 45 unchanged lines hidden (view full) --- 481 if (wakeup_swapper) 482 kick_proc0(); 483 484out: 485 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 486 LOCKSTAT_RECORD0(sx__downgrade, sx); 487} 488 | 450{ 451 uintptr_t x; 452 int wakeup_swapper; 453 454 if (SCHEDULER_STOPPED()) 455 return; 456 457 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, --- 45 unchanged lines hidden (view full) --- 503 if (wakeup_swapper) 504 kick_proc0(); 505 506out: 507 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 508 LOCKSTAT_RECORD0(sx__downgrade, sx); 509} 510 |
511void 512sx_downgrade_(struct sx *sx, const char *file, int line) 513{ 514 515 sx_downgrade_int(sx LOCK_FILE_LINE_ARG); 516} 517 |
|
489/* 490 * This function represents the so-called 'hard case' for sx_xlock 491 * operation. All 'easy case' failures are redirected to this. Note 492 * that ideally this would be a static function, but it needs to be 493 * accessible from at least sx.h. 494 */ 495int | 518/* 519 * This function represents the so-called 'hard case' for sx_xlock 520 * operation. All 'easy case' failures are redirected to this. Note 521 * that ideally this would be a static function, but it needs to be 522 * accessible from at least sx.h. 523 */ 524int |
496_sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts, 497 const char *file, int line) | 525_sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF) |
498{ 499 GIANT_DECLARE; | 526{ 527 GIANT_DECLARE; |
528 uintptr_t tid; |
|
500#ifdef ADAPTIVE_SX 501 volatile struct thread *owner; 502 u_int i, spintries = 0; 503#endif 504#ifdef LOCK_PROFILING 505 uint64_t waittime = 0; 506 int contested = 0; 507#endif 508 int error = 0; 509#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 510 struct lock_delay_arg lda; 511#endif 512#ifdef KDTRACE_HOOKS | 529#ifdef ADAPTIVE_SX 530 volatile struct thread *owner; 531 u_int i, spintries = 0; 532#endif 533#ifdef LOCK_PROFILING 534 uint64_t waittime = 0; 535 int contested = 0; 536#endif 537 int error = 0; 538#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 539 struct lock_delay_arg lda; 540#endif 541#ifdef KDTRACE_HOOKS |
513 uintptr_t state; | |
514 u_int sleep_cnt = 0; 515 int64_t sleep_time = 0; 516 int64_t all_time = 0; 517#endif | 542 u_int sleep_cnt = 0; 543 int64_t sleep_time = 0; 544 int64_t all_time = 0; 545#endif |
546#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 547 uintptr_t state; 548#endif 549 int extra_work = 0; |
|
518 | 550 |
551 tid = (uintptr_t)curthread; |
|
519 if (SCHEDULER_STOPPED()) 520 return (0); 521 522#if defined(ADAPTIVE_SX) 523 lock_delay_arg_init(&lda, &sx_delay); 524#elif defined(KDTRACE_HOOKS) 525 lock_delay_arg_init(&lda, NULL); 526#endif --- 12 unchanged lines hidden (view full) --- 539 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 540 return (0); 541 } 542 543 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 544 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 545 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 546 | 552 if (SCHEDULER_STOPPED()) 553 return (0); 554 555#if defined(ADAPTIVE_SX) 556 lock_delay_arg_init(&lda, &sx_delay); 557#elif defined(KDTRACE_HOOKS) 558 lock_delay_arg_init(&lda, NULL); 559#endif --- 12 unchanged lines hidden (view full) --- 572 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 573 return (0); 574 } 575 576 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 577 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 578 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 579 |
547#ifdef KDTRACE_HOOKS 548 all_time -= lockstat_nsecs(&sx->lock_object); | 580#ifdef HWPMC_HOOKS 581 PMC_SOFT_CALL( , , lock, failed); 582#endif 583 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 584 &waittime); 585 586#ifdef LOCK_PROFILING 587 extra_work = 1; |
549 state = x; | 588 state = x; |
589#elif defined(KDTRACE_HOOKS) 590 extra_work = lockstat_enabled; 591 if (__predict_false(extra_work)) { 592 all_time -= lockstat_nsecs(&sx->lock_object); 593 state = x; 594 } |
|
550#endif | 595#endif |
596 |
|
551 for (;;) { 552 if (x == SX_LOCK_UNLOCKED) { 553 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 554 break; 555 continue; 556 } 557#ifdef KDTRACE_HOOKS 558 lda.spin_cnt++; 559#endif | 597 for (;;) { 598 if (x == SX_LOCK_UNLOCKED) { 599 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 600 break; 601 continue; 602 } 603#ifdef KDTRACE_HOOKS 604 lda.spin_cnt++; 605#endif |
560#ifdef HWPMC_HOOKS 561 PMC_SOFT_CALL( , , lock, failed); 562#endif 563 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 564 &waittime); | |
565#ifdef ADAPTIVE_SX 566 /* 567 * If the lock is write locked and the owner is 568 * running on another CPU, spin until the owner stops 569 * running or the state of the lock changes. 570 */ 571 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 572 if ((x & SX_LOCK_SHARED) == 0) { 573 owner = lv_sx_owner(x); 574 if (TD_IS_RUNNING(owner)) { 575 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 576 CTR3(KTR_LOCK, 577 "%s: spinning on %p held by %p", 578 __func__, sx, owner); 579 KTR_STATE1(KTR_SCHED, "thread", 580 sched_tdname(curthread), "spinning", 581 "lockname:\"%s\"", 582 sx->lock_object.lo_name); | 606#ifdef ADAPTIVE_SX 607 /* 608 * If the lock is write locked and the owner is 609 * running on another CPU, spin until the owner stops 610 * running or the state of the lock changes. 611 */ 612 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 613 if ((x & SX_LOCK_SHARED) == 0) { 614 owner = lv_sx_owner(x); 615 if (TD_IS_RUNNING(owner)) { 616 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 617 CTR3(KTR_LOCK, 618 "%s: spinning on %p held by %p", 619 __func__, sx, owner); 620 KTR_STATE1(KTR_SCHED, "thread", 621 sched_tdname(curthread), "spinning", 622 "lockname:\"%s\"", 623 sx->lock_object.lo_name); |
583 GIANT_SAVE(); | 624 GIANT_SAVE(extra_work); |
584 do { 585 lock_delay(&lda); 586 x = SX_READ_VALUE(sx); 587 owner = lv_sx_owner(x); 588 } while (owner != NULL && 589 TD_IS_RUNNING(owner)); 590 KTR_STATE0(KTR_SCHED, "thread", 591 sched_tdname(curthread), "running"); 592 continue; 593 } 594 } else if (SX_SHARERS(x) && spintries < asx_retries) { 595 KTR_STATE1(KTR_SCHED, "thread", 596 sched_tdname(curthread), "spinning", 597 "lockname:\"%s\"", sx->lock_object.lo_name); | 625 do { 626 lock_delay(&lda); 627 x = SX_READ_VALUE(sx); 628 owner = lv_sx_owner(x); 629 } while (owner != NULL && 630 TD_IS_RUNNING(owner)); 631 KTR_STATE0(KTR_SCHED, "thread", 632 sched_tdname(curthread), "running"); 633 continue; 634 } 635 } else if (SX_SHARERS(x) && spintries < asx_retries) { 636 KTR_STATE1(KTR_SCHED, "thread", 637 sched_tdname(curthread), "spinning", 638 "lockname:\"%s\"", sx->lock_object.lo_name); |
598 GIANT_SAVE(); | 639 GIANT_SAVE(extra_work); |
599 spintries++; 600 for (i = 0; i < asx_loops; i++) { 601 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 602 CTR4(KTR_LOCK, 603 "%s: shared spinning on %p with %u and %u", 604 __func__, sx, spintries, i); | 640 spintries++; 641 for (i = 0; i < asx_loops; i++) { 642 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 643 CTR4(KTR_LOCK, 644 "%s: shared spinning on %p with %u and %u", 645 __func__, sx, spintries, i); |
605 x = sx->sx_lock; | 646 cpu_spinwait(); 647 x = SX_READ_VALUE(sx); |
606 if ((x & SX_LOCK_SHARED) == 0 || 607 SX_SHARERS(x) == 0) 608 break; | 648 if ((x & SX_LOCK_SHARED) == 0 || 649 SX_SHARERS(x) == 0) 650 break; |
609 cpu_spinwait(); | 651 } |
610#ifdef KDTRACE_HOOKS | 652#ifdef KDTRACE_HOOKS |
611 lda.spin_cnt++; | 653 lda.spin_cnt += i; |
612#endif | 654#endif |
613 } | |
614 KTR_STATE0(KTR_SCHED, "thread", 615 sched_tdname(curthread), "running"); | 655 KTR_STATE0(KTR_SCHED, "thread", 656 sched_tdname(curthread), "running"); |
616 x = SX_READ_VALUE(sx); | |
617 if (i != asx_loops) 618 continue; 619 } 620 } 621#endif 622 623 sleepq_lock(&sx->lock_object); 624 x = SX_READ_VALUE(sx); | 657 if (i != asx_loops) 658 continue; 659 } 660 } 661#endif 662 663 sleepq_lock(&sx->lock_object); 664 x = SX_READ_VALUE(sx); |
665retry_sleepq: |
|
625 626 /* 627 * If the lock was released while spinning on the 628 * sleep queue chain lock, try again. 629 */ 630 if (x == SX_LOCK_UNLOCKED) { 631 sleepq_release(&sx->lock_object); 632 continue; --- 23 unchanged lines hidden (view full) --- 656 * woken up and acquired the lock yet, sx_lock will be 657 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 658 * If we see that value, try to acquire it once. Note 659 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 660 * as there are other exclusive waiters still. If we 661 * fail, restart the loop. 662 */ 663 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) { | 666 667 /* 668 * If the lock was released while spinning on the 669 * sleep queue chain lock, try again. 670 */ 671 if (x == SX_LOCK_UNLOCKED) { 672 sleepq_release(&sx->lock_object); 673 continue; --- 23 unchanged lines hidden (view full) --- 697 * woken up and acquired the lock yet, sx_lock will be 698 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 699 * If we see that value, try to acquire it once. Note 700 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 701 * as there are other exclusive waiters still. If we 702 * fail, restart the loop. 703 */ 704 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) { |
664 if (atomic_cmpset_acq_ptr(&sx->sx_lock, 665 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS, 666 tid | SX_LOCK_EXCLUSIVE_WAITERS)) { 667 sleepq_release(&sx->lock_object); 668 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 669 __func__, sx); 670 break; 671 } | 705 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, 706 tid | SX_LOCK_EXCLUSIVE_WAITERS)) 707 goto retry_sleepq; |
672 sleepq_release(&sx->lock_object); | 708 sleepq_release(&sx->lock_object); |
673 x = SX_READ_VALUE(sx); 674 continue; | 709 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 710 __func__, sx); 711 break; |
675 } 676 677 /* 678 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 679 * than loop back and retry. 680 */ 681 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { | 712 } 713 714 /* 715 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 716 * than loop back and retry. 717 */ 718 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) { |
682 if (!atomic_cmpset_ptr(&sx->sx_lock, x, | 719 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, |
683 x | SX_LOCK_EXCLUSIVE_WAITERS)) { | 720 x | SX_LOCK_EXCLUSIVE_WAITERS)) { |
684 sleepq_release(&sx->lock_object); 685 x = SX_READ_VALUE(sx); 686 continue; | 721 goto retry_sleepq; |
687 } 688 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 689 CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 690 __func__, sx); 691 } 692 693 /* 694 * Since we have been unable to acquire the exclusive 695 * lock and the exclusive waiters flag is set, we have 696 * to sleep. 697 */ 698 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 699 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 700 __func__, sx); 701 702#ifdef KDTRACE_HOOKS 703 sleep_time -= lockstat_nsecs(&sx->lock_object); 704#endif | 722 } 723 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 724 CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 725 __func__, sx); 726 } 727 728 /* 729 * Since we have been unable to acquire the exclusive 730 * lock and the exclusive waiters flag is set, we have 731 * to sleep. 732 */ 733 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 734 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 735 __func__, sx); 736 737#ifdef KDTRACE_HOOKS 738 sleep_time -= lockstat_nsecs(&sx->lock_object); 739#endif |
705 GIANT_SAVE(); | 740 GIANT_SAVE(extra_work); |
706 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 707 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 708 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); 709 if (!(opts & SX_INTERRUPTIBLE)) 710 sleepq_wait(&sx->lock_object, 0); 711 else 712 error = sleepq_wait_sig(&sx->lock_object, 0); 713#ifdef KDTRACE_HOOKS --- 7 unchanged lines hidden (view full) --- 721 __func__, sx); 722 break; 723 } 724 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 725 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 726 __func__, sx); 727 x = SX_READ_VALUE(sx); 728 } | 741 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 742 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 743 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); 744 if (!(opts & SX_INTERRUPTIBLE)) 745 sleepq_wait(&sx->lock_object, 0); 746 else 747 error = sleepq_wait_sig(&sx->lock_object, 0); 748#ifdef KDTRACE_HOOKS --- 7 unchanged lines hidden (view full) --- 756 __func__, sx); 757 break; 758 } 759 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 760 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 761 __func__, sx); 762 x = SX_READ_VALUE(sx); 763 } |
764#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 765 if (__predict_true(!extra_work)) 766 return (error); 767#endif |
|
729#ifdef KDTRACE_HOOKS 730 all_time += lockstat_nsecs(&sx->lock_object); 731 if (sleep_time) 732 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 733 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 734 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 735 if (lda.spin_cnt > sleep_cnt) 736 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, --- 9 unchanged lines hidden (view full) --- 746 747/* 748 * This function represents the so-called 'hard case' for sx_xunlock 749 * operation. All 'easy case' failures are redirected to this. Note 750 * that ideally this would be a static function, but it needs to be 751 * accessible from at least sx.h. 752 */ 753void | 768#ifdef KDTRACE_HOOKS 769 all_time += lockstat_nsecs(&sx->lock_object); 770 if (sleep_time) 771 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 772 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 773 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 774 if (lda.spin_cnt > sleep_cnt) 775 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, --- 9 unchanged lines hidden (view full) --- 785 786/* 787 * This function represents the so-called 'hard case' for sx_xunlock 788 * operation. All 'easy case' failures are redirected to this. Note 789 * that ideally this would be a static function, but it needs to be 790 * accessible from at least sx.h. 791 */ 792void |
754_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) | 793_sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) |
755{ | 794{ |
756 uintptr_t x; | 795 uintptr_t tid, setx; |
757 int queue, wakeup_swapper; 758 759 if (SCHEDULER_STOPPED()) 760 return; 761 | 796 int queue, wakeup_swapper; 797 798 if (SCHEDULER_STOPPED()) 799 return; 800 |
762 MPASS(!(sx->sx_lock & SX_LOCK_SHARED)); | 801 tid = (uintptr_t)curthread; |
763 | 802 |
764 x = SX_READ_VALUE(sx); 765 if (x & SX_LOCK_RECURSED) { | 803 if (__predict_false(x == tid)) 804 x = SX_READ_VALUE(sx); 805 806 MPASS(!(x & SX_LOCK_SHARED)); 807 808 if (__predict_false(x & SX_LOCK_RECURSED)) { |
766 /* The lock is recursed, unrecurse one level. */ 767 if ((--sx->sx_recurse) == 0) 768 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 769 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 770 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 771 return; 772 } 773 774 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); 775 if (x == tid && 776 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) 777 return; 778 | 809 /* The lock is recursed, unrecurse one level. */ 810 if ((--sx->sx_recurse) == 0) 811 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 812 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 813 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 814 return; 815 } 816 817 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); 818 if (x == tid && 819 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) 820 return; 821 |
779 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS | 780 SX_LOCK_EXCLUSIVE_WAITERS)); | |
781 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 782 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 783 784 sleepq_lock(&sx->lock_object); | 822 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 823 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 824 825 sleepq_lock(&sx->lock_object); |
785 x = SX_LOCK_UNLOCKED; | 826 x = SX_READ_VALUE(sx); 827 MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)); |
786 787 /* 788 * The wake up algorithm here is quite simple and probably not 789 * ideal. It gives precedence to shared waiters if they are 790 * present. For this condition, we have to preserve the 791 * state of the exclusive waiters flag. 792 * If interruptible sleeps left the shared queue empty avoid a 793 * starvation for the threads sleeping on the exclusive queue by giving 794 * them precedence and cleaning up the shared waiters bit anyway. 795 */ | 828 829 /* 830 * The wake up algorithm here is quite simple and probably not 831 * ideal. It gives precedence to shared waiters if they are 832 * present. For this condition, we have to preserve the 833 * state of the exclusive waiters flag. 834 * If interruptible sleeps left the shared queue empty avoid a 835 * starvation for the threads sleeping on the exclusive queue by giving 836 * them precedence and cleaning up the shared waiters bit anyway. 837 */ |
796 if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 && | 838 setx = SX_LOCK_UNLOCKED; 839 queue = SQ_EXCLUSIVE_QUEUE; 840 if ((x & SX_LOCK_SHARED_WAITERS) != 0 && |
797 sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) { 798 queue = SQ_SHARED_QUEUE; | 841 sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) { 842 queue = SQ_SHARED_QUEUE; |
799 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS); 800 } else 801 queue = SQ_EXCLUSIVE_QUEUE; | 843 setx |= (x & SX_LOCK_EXCLUSIVE_WAITERS); 844 } 845 atomic_store_rel_ptr(&sx->sx_lock, setx); |
802 803 /* Wake up all the waiters for the specific queue. */ 804 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 805 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 806 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 807 "exclusive"); | 846 847 /* Wake up all the waiters for the specific queue. */ 848 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 849 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 850 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 851 "exclusive"); |
808 atomic_store_rel_ptr(&sx->sx_lock, x); | 852 |
809 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, 810 queue); 811 sleepq_release(&sx->lock_object); 812 if (wakeup_swapper) 813 kick_proc0(); 814} 815 816static bool __always_inline | 853 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, 854 queue); 855 sleepq_release(&sx->lock_object); 856 if (wakeup_swapper) 857 kick_proc0(); 858} 859 860static bool __always_inline |
817__sx_slock_try(struct sx *sx, uintptr_t *xp, const char *file, int line) | 861__sx_slock_try(struct sx *sx, uintptr_t *xp LOCK_FILE_LINE_ARG_DEF) |
818{ 819 820 /* 821 * If no other thread has an exclusive lock then try to bump up 822 * the count of sharers. Since we have to preserve the state 823 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 824 * shared lock loop back and retry. 825 */ --- 7 unchanged lines hidden (view full) --- 833 (void *)(*xp + SX_ONE_SHARER)); 834 return (true); 835 } 836 } 837 return (false); 838} 839 840static int __noinline | 862{ 863 864 /* 865 * If no other thread has an exclusive lock then try to bump up 866 * the count of sharers. Since we have to preserve the state 867 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 868 * shared lock loop back and retry. 869 */ --- 7 unchanged lines hidden (view full) --- 877 (void *)(*xp + SX_ONE_SHARER)); 878 return (true); 879 } 880 } 881 return (false); 882} 883 884static int __noinline |
841_sx_slock_hard(struct sx *sx, int opts, const char *file, int line, uintptr_t x) | 885_sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF) |
842{ 843 GIANT_DECLARE; 844#ifdef ADAPTIVE_SX 845 volatile struct thread *owner; 846#endif 847#ifdef LOCK_PROFILING 848 uint64_t waittime = 0; 849 int contested = 0; 850#endif 851 int error = 0; 852#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 853 struct lock_delay_arg lda; 854#endif 855#ifdef KDTRACE_HOOKS | 886{ 887 GIANT_DECLARE; 888#ifdef ADAPTIVE_SX 889 volatile struct thread *owner; 890#endif 891#ifdef LOCK_PROFILING 892 uint64_t waittime = 0; 893 int contested = 0; 894#endif 895 int error = 0; 896#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 897 struct lock_delay_arg lda; 898#endif 899#ifdef KDTRACE_HOOKS |
856 uintptr_t state; | |
857 u_int sleep_cnt = 0; 858 int64_t sleep_time = 0; 859 int64_t all_time = 0; 860#endif | 900 u_int sleep_cnt = 0; 901 int64_t sleep_time = 0; 902 int64_t all_time = 0; 903#endif |
904#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 905 uintptr_t state; 906#endif 907 int extra_work = 0; |
|
861 862 if (SCHEDULER_STOPPED()) 863 return (0); 864 865#if defined(ADAPTIVE_SX) 866 lock_delay_arg_init(&lda, &sx_delay); 867#elif defined(KDTRACE_HOOKS) 868 lock_delay_arg_init(&lda, NULL); 869#endif | 908 909 if (SCHEDULER_STOPPED()) 910 return (0); 911 912#if defined(ADAPTIVE_SX) 913 lock_delay_arg_init(&lda, &sx_delay); 914#elif defined(KDTRACE_HOOKS) 915 lock_delay_arg_init(&lda, NULL); 916#endif |
870#ifdef KDTRACE_HOOKS 871 all_time -= lockstat_nsecs(&sx->lock_object); | 917 918#ifdef HWPMC_HOOKS 919 PMC_SOFT_CALL( , , lock, failed); 920#endif 921 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 922 &waittime); 923 924#ifdef LOCK_PROFILING 925 extra_work = 1; |
872 state = x; | 926 state = x; |
927#elif defined(KDTRACE_HOOKS) 928 extra_work = lockstat_enabled; 929 if (__predict_false(extra_work)) { 930 all_time -= lockstat_nsecs(&sx->lock_object); 931 state = x; 932 } |
|
873#endif 874 875 /* 876 * As with rwlocks, we don't make any attempt to try to block 877 * shared locks once there is an exclusive waiter. 878 */ 879 for (;;) { | 933#endif 934 935 /* 936 * As with rwlocks, we don't make any attempt to try to block 937 * shared locks once there is an exclusive waiter. 938 */ 939 for (;;) { |
880 if (__sx_slock_try(sx, &x, file, line)) | 940 if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG)) |
881 break; 882#ifdef KDTRACE_HOOKS 883 lda.spin_cnt++; 884#endif 885 | 941 break; 942#ifdef KDTRACE_HOOKS 943 lda.spin_cnt++; 944#endif 945 |
886#ifdef HWPMC_HOOKS 887 PMC_SOFT_CALL( , , lock, failed); 888#endif 889 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 890 &waittime); 891 | |
892#ifdef ADAPTIVE_SX 893 /* 894 * If the owner is running on another CPU, spin until 895 * the owner stops running or the state of the lock 896 * changes. 897 */ 898 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 899 owner = lv_sx_owner(x); 900 if (TD_IS_RUNNING(owner)) { 901 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 902 CTR3(KTR_LOCK, 903 "%s: spinning on %p held by %p", 904 __func__, sx, owner); 905 KTR_STATE1(KTR_SCHED, "thread", 906 sched_tdname(curthread), "spinning", 907 "lockname:\"%s\"", sx->lock_object.lo_name); | 946#ifdef ADAPTIVE_SX 947 /* 948 * If the owner is running on another CPU, spin until 949 * the owner stops running or the state of the lock 950 * changes. 951 */ 952 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 953 owner = lv_sx_owner(x); 954 if (TD_IS_RUNNING(owner)) { 955 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 956 CTR3(KTR_LOCK, 957 "%s: spinning on %p held by %p", 958 __func__, sx, owner); 959 KTR_STATE1(KTR_SCHED, "thread", 960 sched_tdname(curthread), "spinning", 961 "lockname:\"%s\"", sx->lock_object.lo_name); |
908 GIANT_SAVE(); | 962 GIANT_SAVE(extra_work); |
909 do { 910 lock_delay(&lda); 911 x = SX_READ_VALUE(sx); 912 owner = lv_sx_owner(x); 913 } while (owner != NULL && TD_IS_RUNNING(owner)); 914 KTR_STATE0(KTR_SCHED, "thread", 915 sched_tdname(curthread), "running"); 916 continue; 917 } 918 } 919#endif 920 921 /* 922 * Some other thread already has an exclusive lock, so 923 * start the process of blocking. 924 */ 925 sleepq_lock(&sx->lock_object); 926 x = SX_READ_VALUE(sx); | 963 do { 964 lock_delay(&lda); 965 x = SX_READ_VALUE(sx); 966 owner = lv_sx_owner(x); 967 } while (owner != NULL && TD_IS_RUNNING(owner)); 968 KTR_STATE0(KTR_SCHED, "thread", 969 sched_tdname(curthread), "running"); 970 continue; 971 } 972 } 973#endif 974 975 /* 976 * Some other thread already has an exclusive lock, so 977 * start the process of blocking. 978 */ 979 sleepq_lock(&sx->lock_object); 980 x = SX_READ_VALUE(sx); |
927 | 981retry_sleepq: |
928 /* 929 * The lock could have been released while we spun. 930 * In this case loop back and retry. 931 */ 932 if (x & SX_LOCK_SHARED) { 933 sleepq_release(&sx->lock_object); 934 continue; 935 } --- 16 unchanged lines hidden (view full) --- 952#endif 953 954 /* 955 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 956 * fail to set it drop the sleep queue lock and loop 957 * back. 958 */ 959 if (!(x & SX_LOCK_SHARED_WAITERS)) { | 982 /* 983 * The lock could have been released while we spun. 984 * In this case loop back and retry. 985 */ 986 if (x & SX_LOCK_SHARED) { 987 sleepq_release(&sx->lock_object); 988 continue; 989 } --- 16 unchanged lines hidden (view full) --- 1006#endif 1007 1008 /* 1009 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 1010 * fail to set it drop the sleep queue lock and loop 1011 * back. 1012 */ 1013 if (!(x & SX_LOCK_SHARED_WAITERS)) { |
960 if (!atomic_cmpset_ptr(&sx->sx_lock, x, 961 x | SX_LOCK_SHARED_WAITERS)) { 962 sleepq_release(&sx->lock_object); 963 x = SX_READ_VALUE(sx); 964 continue; 965 } | 1014 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 1015 x | SX_LOCK_SHARED_WAITERS)) 1016 goto retry_sleepq; |
966 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 967 CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 968 __func__, sx); 969 } 970 971 /* 972 * Since we have been unable to acquire the shared lock, 973 * we have to sleep. 974 */ 975 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 976 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 977 __func__, sx); 978 979#ifdef KDTRACE_HOOKS 980 sleep_time -= lockstat_nsecs(&sx->lock_object); 981#endif | 1017 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1018 CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 1019 __func__, sx); 1020 } 1021 1022 /* 1023 * Since we have been unable to acquire the shared lock, 1024 * we have to sleep. 1025 */ 1026 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1027 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 1028 __func__, sx); 1029 1030#ifdef KDTRACE_HOOKS 1031 sleep_time -= lockstat_nsecs(&sx->lock_object); 1032#endif |
982 GIANT_SAVE(); | 1033 GIANT_SAVE(extra_work); |
983 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 984 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 985 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); 986 if (!(opts & SX_INTERRUPTIBLE)) 987 sleepq_wait(&sx->lock_object, 0); 988 else 989 error = sleepq_wait_sig(&sx->lock_object, 0); 990#ifdef KDTRACE_HOOKS --- 7 unchanged lines hidden (view full) --- 998 __func__, sx); 999 break; 1000 } 1001 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1002 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 1003 __func__, sx); 1004 x = SX_READ_VALUE(sx); 1005 } | 1034 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 1035 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 1036 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); 1037 if (!(opts & SX_INTERRUPTIBLE)) 1038 sleepq_wait(&sx->lock_object, 0); 1039 else 1040 error = sleepq_wait_sig(&sx->lock_object, 0); 1041#ifdef KDTRACE_HOOKS --- 7 unchanged lines hidden (view full) --- 1049 __func__, sx); 1050 break; 1051 } 1052 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1053 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 1054 __func__, sx); 1055 x = SX_READ_VALUE(sx); 1056 } |
1057#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 1058 if (__predict_true(!extra_work)) 1059 return (error); 1060#endif |
|
1006#ifdef KDTRACE_HOOKS 1007 all_time += lockstat_nsecs(&sx->lock_object); 1008 if (sleep_time) 1009 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 1010 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1011 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 1012 if (lda.spin_cnt > sleep_cnt) 1013 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, --- 4 unchanged lines hidden (view full) --- 1018 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 1019 contested, waittime, file, line, LOCKSTAT_READER); 1020 } 1021 GIANT_RESTORE(); 1022 return (error); 1023} 1024 1025int | 1061#ifdef KDTRACE_HOOKS 1062 all_time += lockstat_nsecs(&sx->lock_object); 1063 if (sleep_time) 1064 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 1065 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1066 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 1067 if (lda.spin_cnt > sleep_cnt) 1068 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, --- 4 unchanged lines hidden (view full) --- 1073 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 1074 contested, waittime, file, line, LOCKSTAT_READER); 1075 } 1076 GIANT_RESTORE(); 1077 return (error); 1078} 1079 1080int |
1026_sx_slock(struct sx *sx, int opts, const char *file, int line) | 1081_sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF) |
1027{ 1028 uintptr_t x; 1029 int error; 1030 1031 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 1032 !TD_IS_IDLETHREAD(curthread), 1033 ("sx_slock() by idle thread %p on sx %s @ %s:%d", 1034 curthread, sx->lock_object.lo_name, file, line)); 1035 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1036 ("sx_slock() of destroyed sx @ %s:%d", file, line)); 1037 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); 1038 1039 error = 0; 1040 x = SX_READ_VALUE(sx); 1041 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__acquire) || | 1082{ 1083 uintptr_t x; 1084 int error; 1085 1086 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 1087 !TD_IS_IDLETHREAD(curthread), 1088 ("sx_slock() by idle thread %p on sx %s @ %s:%d", 1089 curthread, sx->lock_object.lo_name, file, line)); 1090 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1091 ("sx_slock() of destroyed sx @ %s:%d", file, line)); 1092 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); 1093 1094 error = 0; 1095 x = SX_READ_VALUE(sx); 1096 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__acquire) || |
1042 !__sx_slock_try(sx, &x, file, line))) 1043 error = _sx_slock_hard(sx, opts, file, line, x); | 1097 !__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))) 1098 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG); |
1044 if (error == 0) { 1045 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 1046 WITNESS_LOCK(&sx->lock_object, 0, file, line); 1047 TD_LOCKS_INC(curthread); 1048 } 1049 return (error); 1050} 1051 | 1099 if (error == 0) { 1100 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 1101 WITNESS_LOCK(&sx->lock_object, 0, file, line); 1102 TD_LOCKS_INC(curthread); 1103 } 1104 return (error); 1105} 1106 |
1107int 1108_sx_slock(struct sx *sx, int opts, const char *file, int line) 1109{ 1110 1111 return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG)); 1112} 1113 |
|
1052static bool __always_inline 1053_sx_sunlock_try(struct sx *sx, uintptr_t *xp) 1054{ 1055 1056 for (;;) { 1057 /* 1058 * We should never have sharers while at least one thread 1059 * holds a shared lock. --- 35 unchanged lines hidden (view full) --- 1095 continue; 1096 } 1097 break; 1098 } 1099 return (false); 1100} 1101 1102static void __noinline | 1114static bool __always_inline 1115_sx_sunlock_try(struct sx *sx, uintptr_t *xp) 1116{ 1117 1118 for (;;) { 1119 /* 1120 * We should never have sharers while at least one thread 1121 * holds a shared lock. --- 35 unchanged lines hidden (view full) --- 1157 continue; 1158 } 1159 break; 1160 } 1161 return (false); 1162} 1163 1164static void __noinline |
1103_sx_sunlock_hard(struct sx *sx, uintptr_t x, const char *file, int line) | 1165_sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) |
1104{ 1105 int wakeup_swapper; | 1166{ 1167 int wakeup_swapper; |
1168 uintptr_t setx; |
|
1106 1107 if (SCHEDULER_STOPPED()) 1108 return; 1109 | 1169 1170 if (SCHEDULER_STOPPED()) 1171 return; 1172 |
1110 for (;;) { 1111 if (_sx_sunlock_try(sx, &x)) 1112 break; | 1173 if (_sx_sunlock_try(sx, &x)) 1174 goto out_lockstat; |
1113 | 1175 |
1114 /* 1115 * At this point, there should just be one sharer with 1116 * exclusive waiters. 1117 */ 1118 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); | 1176 /* 1177 * At this point, there should just be one sharer with 1178 * exclusive waiters. 1179 */ 1180 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); |
1119 | 1181 |
1120 sleepq_lock(&sx->lock_object); 1121 | 1182 sleepq_lock(&sx->lock_object); 1183 x = SX_READ_VALUE(sx); 1184 for (;;) { 1185 MPASS(x & SX_LOCK_EXCLUSIVE_WAITERS); 1186 MPASS(!(x & SX_LOCK_SHARED_WAITERS)); |
1122 /* 1123 * Wake up semantic here is quite simple: 1124 * Just wake up all the exclusive waiters. 1125 * Note that the state of the lock could have changed, 1126 * so if it fails loop back and retry. 1127 */ | 1187 /* 1188 * Wake up semantic here is quite simple: 1189 * Just wake up all the exclusive waiters. 1190 * Note that the state of the lock could have changed, 1191 * so if it fails loop back and retry. 1192 */ |
1128 if (!atomic_cmpset_rel_ptr(&sx->sx_lock, 1129 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS, 1130 SX_LOCK_UNLOCKED)) { 1131 sleepq_release(&sx->lock_object); 1132 x = SX_READ_VALUE(sx); | 1193 setx = x - SX_ONE_SHARER; 1194 setx &= ~SX_LOCK_EXCLUSIVE_WAITERS; 1195 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx)) |
1133 continue; | 1196 continue; |
1134 } | |
1135 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1136 CTR2(KTR_LOCK, "%s: %p waking up all thread on" 1137 "exclusive queue", __func__, sx); 1138 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 1139 0, SQ_EXCLUSIVE_QUEUE); | 1197 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1198 CTR2(KTR_LOCK, "%s: %p waking up all thread on" 1199 "exclusive queue", __func__, sx); 1200 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 1201 0, SQ_EXCLUSIVE_QUEUE); |
1140 sleepq_release(&sx->lock_object); 1141 if (wakeup_swapper) 1142 kick_proc0(); | |
1143 break; 1144 } | 1202 break; 1203 } |
1204 sleepq_release(&sx->lock_object); 1205 if (wakeup_swapper) 1206 kick_proc0(); 1207out_lockstat: |
|
1145 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); 1146} 1147 1148void | 1208 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); 1209} 1210 1211void |
1149_sx_sunlock(struct sx *sx, const char *file, int line) | 1212_sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
1150{ 1151 uintptr_t x; 1152 1153 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1154 ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); 1155 _sx_assert(sx, SA_SLOCKED, file, line); 1156 WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 1157 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 1158 1159 x = SX_READ_VALUE(sx); 1160 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__release) || 1161 !_sx_sunlock_try(sx, &x))) | 1213{ 1214 uintptr_t x; 1215 1216 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1217 ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); 1218 _sx_assert(sx, SA_SLOCKED, file, line); 1219 WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 1220 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 1221 1222 x = SX_READ_VALUE(sx); 1223 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__release) || 1224 !_sx_sunlock_try(sx, &x))) |
1162 _sx_sunlock_hard(sx, x, file, line); | 1225 _sx_sunlock_hard(sx, x LOCK_FILE_LINE_ARG); |
1163 1164 TD_LOCKS_DEC(curthread); 1165} 1166 | 1226 1227 TD_LOCKS_DEC(curthread); 1228} 1229 |
1230void 1231_sx_sunlock(struct sx *sx, const char *file, int line) 1232{ 1233 1234 _sx_sunlock_int(sx LOCK_FILE_LINE_ARG); 1235} 1236 |
|
1167#ifdef INVARIANT_SUPPORT 1168#ifndef INVARIANTS 1169#undef _sx_assert 1170#endif 1171 1172/* 1173 * In the non-WITNESS case, sx_assert() can only detect that at least 1174 * *some* thread owns an slock, but it cannot guarantee that *this* --- 157 unchanged lines hidden --- | 1237#ifdef INVARIANT_SUPPORT 1238#ifndef INVARIANTS 1239#undef _sx_assert 1240#endif 1241 1242/* 1243 * In the non-WITNESS case, sx_assert() can only detect that at least 1244 * *some* thread owns an slock, but it cannot guarantee that *this* --- 157 unchanged lines hidden --- |