1/*- 2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org> 3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice(s), this list of conditions and the following disclaimer as 11 * the first lines of this file unmodified other than the possible 12 * addition of one or more copyright notices. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice(s), this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 27 * DAMAGE. 28 */ 29 30/* 31 * Shared/exclusive locks. This implementation attempts to ensure 32 * deterministic lock granting behavior, so that slocks and xlocks are 33 * interleaved. 34 * 35 * Priority propagation will not generally raise the priority of lock holders, 36 * so should not be relied upon in combination with sx locks. 37 */ 38 39#include "opt_ddb.h" 40#include "opt_hwpmc_hooks.h" 41#include "opt_no_adaptive_sx.h" 42 43#include <sys/cdefs.h>
|
44__FBSDID("$FreeBSD: stable/11/sys/kern/kern_sx.c 327409 2017-12-31 03:35:34Z mjg $");
|
44__FBSDID("$FreeBSD: stable/11/sys/kern/kern_sx.c 327413 2017-12-31 05:06:35Z mjg $"); |
45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/kdb.h> 49#include <sys/kernel.h> 50#include <sys/ktr.h> 51#include <sys/lock.h> 52#include <sys/mutex.h> 53#include <sys/proc.h> 54#include <sys/sched.h> 55#include <sys/sleepqueue.h> 56#include <sys/sx.h> 57#include <sys/smp.h> 58#include <sys/sysctl.h> 59 60#if defined(SMP) && !defined(NO_ADAPTIVE_SX) 61#include <machine/cpu.h> 62#endif 63 64#ifdef DDB 65#include <ddb/ddb.h> 66#endif 67 68#if defined(SMP) && !defined(NO_ADAPTIVE_SX) 69#define ADAPTIVE_SX 70#endif 71 72CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE); 73 74#ifdef HWPMC_HOOKS 75#include <sys/pmckern.h> 76PMC_SOFT_DECLARE( , , lock, failed); 77#endif 78 79/* Handy macros for sleep queues. */ 80#define SQ_EXCLUSIVE_QUEUE 0 81#define SQ_SHARED_QUEUE 1 82 83/* 84 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We 85 * drop Giant anytime we have to sleep or if we adaptively spin. 86 */ 87#define GIANT_DECLARE \ 88 int _giantcnt = 0; \ 89 WITNESS_SAVE_DECL(Giant) \ 90
|
91#define GIANT_SAVE() do { \
|
91#define GIANT_SAVE(work) do { \ |
92 if (mtx_owned(&Giant)) { \
|
93 work++; \ |
94 WITNESS_SAVE(&Giant.lock_object, Giant); \ 95 while (mtx_owned(&Giant)) { \ 96 _giantcnt++; \ 97 mtx_unlock(&Giant); \ 98 } \ 99 } \ 100} while (0) 101 102#define GIANT_RESTORE() do { \ 103 if (_giantcnt > 0) { \ 104 mtx_assert(&Giant, MA_NOTOWNED); \ 105 while (_giantcnt--) \ 106 mtx_lock(&Giant); \ 107 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 108 } \ 109} while (0) 110 111/* 112 * Returns true if an exclusive lock is recursed. It assumes 113 * curthread currently has an exclusive lock. 114 */ 115#define sx_recursed(sx) ((sx)->sx_recurse != 0) 116 117static void assert_sx(const struct lock_object *lock, int what); 118#ifdef DDB 119static void db_show_sx(const struct lock_object *lock); 120#endif 121static void lock_sx(struct lock_object *lock, uintptr_t how); 122#ifdef KDTRACE_HOOKS 123static int owner_sx(const struct lock_object *lock, struct thread **owner); 124#endif 125static uintptr_t unlock_sx(struct lock_object *lock); 126 127struct lock_class lock_class_sx = { 128 .lc_name = "sx", 129 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, 130 .lc_assert = assert_sx, 131#ifdef DDB 132 .lc_ddb_show = db_show_sx, 133#endif 134 .lc_lock = lock_sx, 135 .lc_unlock = unlock_sx, 136#ifdef KDTRACE_HOOKS 137 .lc_owner = owner_sx, 138#endif 139}; 140 141#ifndef INVARIANTS 142#define _sx_assert(sx, what, file, line) 143#endif 144 145#ifdef ADAPTIVE_SX 146static __read_frequently u_int asx_retries = 10; 147static __read_frequently u_int asx_loops = 10000; 148static SYSCTL_NODE(_debug, OID_AUTO, sx, CTLFLAG_RD, NULL, "sxlock debugging"); 149SYSCTL_UINT(_debug_sx, OID_AUTO, retries, CTLFLAG_RW, &asx_retries, 0, ""); 150SYSCTL_UINT(_debug_sx, OID_AUTO, loops, CTLFLAG_RW, &asx_loops, 0, ""); 151 152static struct lock_delay_config __read_frequently sx_delay; 153 154SYSCTL_INT(_debug_sx, OID_AUTO, delay_base, CTLFLAG_RW, &sx_delay.base, 155 0, ""); 156SYSCTL_INT(_debug_sx, OID_AUTO, delay_max, CTLFLAG_RW, &sx_delay.max, 157 0, ""); 158 159LOCK_DELAY_SYSINIT_DEFAULT(sx_delay); 160#endif 161 162void 163assert_sx(const struct lock_object *lock, int what) 164{ 165 166 sx_assert((const struct sx *)lock, what); 167} 168 169void 170lock_sx(struct lock_object *lock, uintptr_t how) 171{ 172 struct sx *sx; 173 174 sx = (struct sx *)lock; 175 if (how) 176 sx_slock(sx); 177 else 178 sx_xlock(sx); 179} 180 181uintptr_t 182unlock_sx(struct lock_object *lock) 183{ 184 struct sx *sx; 185 186 sx = (struct sx *)lock; 187 sx_assert(sx, SA_LOCKED | SA_NOTRECURSED); 188 if (sx_xlocked(sx)) { 189 sx_xunlock(sx); 190 return (0); 191 } else { 192 sx_sunlock(sx); 193 return (1); 194 } 195} 196 197#ifdef KDTRACE_HOOKS 198int 199owner_sx(const struct lock_object *lock, struct thread **owner) 200{ 201 const struct sx *sx = (const struct sx *)lock; 202 uintptr_t x = sx->sx_lock; 203 204 *owner = (struct thread *)SX_OWNER(x); 205 return ((x & SX_LOCK_SHARED) != 0 ? (SX_SHARERS(x) != 0) : 206 (*owner != NULL)); 207} 208#endif 209 210void 211sx_sysinit(void *arg) 212{ 213 struct sx_args *sargs = arg; 214 215 sx_init_flags(sargs->sa_sx, sargs->sa_desc, sargs->sa_flags); 216} 217 218void 219sx_init_flags(struct sx *sx, const char *description, int opts) 220{ 221 int flags; 222 223 MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | 224 SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0); 225 ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, 226 ("%s: sx_lock not aligned for %s: %p", __func__, description, 227 &sx->sx_lock)); 228 229 flags = LO_SLEEPABLE | LO_UPGRADABLE; 230 if (opts & SX_DUPOK) 231 flags |= LO_DUPOK; 232 if (opts & SX_NOPROFILE) 233 flags |= LO_NOPROFILE; 234 if (!(opts & SX_NOWITNESS)) 235 flags |= LO_WITNESS; 236 if (opts & SX_RECURSE) 237 flags |= LO_RECURSABLE; 238 if (opts & SX_QUIET) 239 flags |= LO_QUIET; 240 if (opts & SX_NEW) 241 flags |= LO_NEW; 242 243 flags |= opts & SX_NOADAPTIVE; 244 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); 245 sx->sx_lock = SX_LOCK_UNLOCKED; 246 sx->sx_recurse = 0; 247} 248 249void 250sx_destroy(struct sx *sx) 251{ 252 253 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held")); 254 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed")); 255 sx->sx_lock = SX_LOCK_DESTROYED; 256 lock_destroy(&sx->lock_object); 257} 258 259int
|
259sx_try_slock_(struct sx *sx, const char *file, int line)
|
260sx_try_slock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
261{ 262 uintptr_t x; 263 264 if (SCHEDULER_STOPPED()) 265 return (1); 266 267 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 268 ("sx_try_slock() by idle thread %p on sx %s @ %s:%d", 269 curthread, sx->lock_object.lo_name, file, line)); 270 271 x = sx->sx_lock; 272 for (;;) { 273 KASSERT(x != SX_LOCK_DESTROYED, 274 ("sx_try_slock() of destroyed sx @ %s:%d", file, line)); 275 if (!(x & SX_LOCK_SHARED)) 276 break; 277 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, x + SX_ONE_SHARER)) { 278 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line); 279 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line); 280 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 281 sx, 0, 0, file, line, LOCKSTAT_READER); 282 TD_LOCKS_INC(curthread); 283 return (1); 284 } 285 } 286 287 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line); 288 return (0); 289} 290 291int
|
292sx_try_slock_(struct sx *sx, const char *file, int line) 293{ 294 295 return (sx_try_slock_int(sx LOCK_FILE_LINE_ARG)); 296} 297 298int |
299_sx_xlock(struct sx *sx, int opts, const char *file, int line) 300{ 301 uintptr_t tid, x; 302 int error = 0; 303 304 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 305 !TD_IS_IDLETHREAD(curthread), 306 ("sx_xlock() by idle thread %p on sx %s @ %s:%d", 307 curthread, sx->lock_object.lo_name, file, line)); 308 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 309 ("sx_xlock() of destroyed sx @ %s:%d", file, line)); 310 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, 311 line, NULL); 312 tid = (uintptr_t)curthread; 313 x = SX_LOCK_UNLOCKED; 314 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid))
|
307 error = _sx_xlock_hard(sx, x, tid, opts, file, line);
|
315 error = _sx_xlock_hard(sx, x, opts LOCK_FILE_LINE_ARG); |
316 else 317 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 318 0, 0, file, line, LOCKSTAT_WRITER); 319 if (!error) { 320 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, 321 file, line); 322 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 323 TD_LOCKS_INC(curthread); 324 } 325 326 return (error); 327} 328 329int
|
322sx_try_xlock_(struct sx *sx, const char *file, int line)
|
330sx_try_xlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
331{ 332 struct thread *td; 333 uintptr_t tid, x; 334 int rval; 335 bool recursed; 336 337 td = curthread; 338 tid = (uintptr_t)td; 339 if (SCHEDULER_STOPPED_TD(td)) 340 return (1); 341 342 KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(td), 343 ("sx_try_xlock() by idle thread %p on sx %s @ %s:%d", 344 curthread, sx->lock_object.lo_name, file, line)); 345 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 346 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); 347 348 rval = 1; 349 recursed = false; 350 x = SX_LOCK_UNLOCKED; 351 for (;;) { 352 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 353 break; 354 if (x == SX_LOCK_UNLOCKED) 355 continue; 356 if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) { 357 sx->sx_recurse++; 358 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 359 break; 360 } 361 rval = 0; 362 break; 363 } 364 365 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); 366 if (rval) { 367 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 368 file, line); 369 if (!recursed) 370 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, 371 sx, 0, 0, file, line, LOCKSTAT_WRITER); 372 TD_LOCKS_INC(curthread); 373 } 374 375 return (rval); 376} 377
|
378int 379sx_try_xlock_(struct sx *sx, const char *file, int line) 380{ 381 382 return (sx_try_xlock_int(sx LOCK_FILE_LINE_ARG)); 383} 384 |
385void 386_sx_xunlock(struct sx *sx, const char *file, int line) 387{ 388 389 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 390 ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); 391 _sx_assert(sx, SA_XLOCKED, file, line); 392 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line); 393 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file, 394 line); 395#if LOCK_DEBUG > 0 396 _sx_xunlock_hard(sx, (uintptr_t)curthread, file, line); 397#else 398 __sx_xunlock(sx, curthread, file, line); 399#endif 400 TD_LOCKS_DEC(curthread); 401} 402 403/* 404 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock. 405 * This will only succeed if this thread holds a single shared lock. 406 * Return 1 if if the upgrade succeed, 0 otherwise. 407 */ 408int
|
394sx_try_upgrade_(struct sx *sx, const char *file, int line)
|
409sx_try_upgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
410{ 411 uintptr_t x; 412 int success; 413 414 if (SCHEDULER_STOPPED()) 415 return (1); 416 417 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 418 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line)); 419 _sx_assert(sx, SA_SLOCKED, file, line); 420 421 /* 422 * Try to switch from one shared lock to an exclusive lock. We need 423 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that 424 * we will wake up the exclusive waiters when we drop the lock. 425 */ 426 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS; 427 success = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x, 428 (uintptr_t)curthread | x); 429 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line); 430 if (success) { 431 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, 432 file, line); 433 LOCKSTAT_RECORD0(sx__upgrade, sx); 434 } 435 return (success); 436} 437
|
438int 439sx_try_upgrade_(struct sx *sx, const char *file, int line) 440{ 441 442 return (sx_try_upgrade_int(sx LOCK_FILE_LINE_ARG)); 443} 444 |
445/* 446 * Downgrade an unrecursed exclusive lock into a single shared lock. 447 */ 448void
|
427sx_downgrade_(struct sx *sx, const char *file, int line)
|
449sx_downgrade_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
450{ 451 uintptr_t x; 452 int wakeup_swapper; 453 454 if (SCHEDULER_STOPPED()) 455 return; 456 457 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 458 ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); 459 _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); 460#ifndef INVARIANTS 461 if (sx_recursed(sx)) 462 panic("downgrade of a recursed lock"); 463#endif 464 465 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line); 466 467 /* 468 * Try to switch from an exclusive lock with no shared waiters 469 * to one sharer with no shared waiters. If there are 470 * exclusive waiters, we don't need to lock the sleep queue so 471 * long as we preserve the flag. We do one quick try and if 472 * that fails we grab the sleepq lock to keep the flags from 473 * changing and do it the slow way. 474 * 475 * We have to lock the sleep queue if there are shared waiters 476 * so we can wake them up. 477 */ 478 x = sx->sx_lock; 479 if (!(x & SX_LOCK_SHARED_WAITERS) && 480 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) | 481 (x & SX_LOCK_EXCLUSIVE_WAITERS))) 482 goto out; 483 484 /* 485 * Lock the sleep queue so we can read the waiters bits 486 * without any races and wakeup any shared waiters. 487 */ 488 sleepq_lock(&sx->lock_object); 489 490 /* 491 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single 492 * shared lock. If there are any shared waiters, wake them up. 493 */ 494 wakeup_swapper = 0; 495 x = sx->sx_lock; 496 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | 497 (x & SX_LOCK_EXCLUSIVE_WAITERS)); 498 if (x & SX_LOCK_SHARED_WAITERS) 499 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 500 0, SQ_SHARED_QUEUE); 501 sleepq_release(&sx->lock_object); 502 503 if (wakeup_swapper) 504 kick_proc0(); 505 506out: 507 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line); 508 LOCKSTAT_RECORD0(sx__downgrade, sx); 509} 510
|
511void 512sx_downgrade_(struct sx *sx, const char *file, int line) 513{ 514 515 sx_downgrade_int(sx LOCK_FILE_LINE_ARG); 516} 517 |
518/* 519 * This function represents the so-called 'hard case' for sx_xlock 520 * operation. All 'easy case' failures are redirected to this. Note 521 * that ideally this would be a static function, but it needs to be 522 * accessible from at least sx.h. 523 */ 524int
|
496_sx_xlock_hard(struct sx *sx, uintptr_t x, uintptr_t tid, int opts,
497 const char *file, int line)
|
525_sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF) |
526{ 527 GIANT_DECLARE;
|
528 uintptr_t tid; |
529#ifdef ADAPTIVE_SX 530 volatile struct thread *owner; 531 u_int i, spintries = 0; 532#endif 533#ifdef LOCK_PROFILING 534 uint64_t waittime = 0; 535 int contested = 0; 536#endif 537 int error = 0; 538#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 539 struct lock_delay_arg lda; 540#endif 541#ifdef KDTRACE_HOOKS
|
513 uintptr_t state;
|
542 u_int sleep_cnt = 0; 543 int64_t sleep_time = 0; 544 int64_t all_time = 0; 545#endif
|
546#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 547 uintptr_t state; 548#endif 549 int extra_work = 0; |
550
|
551 tid = (uintptr_t)curthread; |
552 if (SCHEDULER_STOPPED()) 553 return (0); 554 555#if defined(ADAPTIVE_SX) 556 lock_delay_arg_init(&lda, &sx_delay); 557#elif defined(KDTRACE_HOOKS) 558 lock_delay_arg_init(&lda, NULL); 559#endif 560 561 if (__predict_false(x == SX_LOCK_UNLOCKED)) 562 x = SX_READ_VALUE(sx); 563 564 /* If we already hold an exclusive lock, then recurse. */ 565 if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) { 566 KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, 567 ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", 568 sx->lock_object.lo_name, file, line)); 569 sx->sx_recurse++; 570 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 571 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 572 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx); 573 return (0); 574 } 575 576 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 577 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, 578 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); 579
|
547#ifdef KDTRACE_HOOKS
548 all_time -= lockstat_nsecs(&sx->lock_object);
|
580#ifdef HWPMC_HOOKS 581 PMC_SOFT_CALL( , , lock, failed); 582#endif 583 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 584 &waittime); 585 586#ifdef LOCK_PROFILING 587 extra_work = 1; |
588 state = x;
|
589#elif defined(KDTRACE_HOOKS) 590 extra_work = lockstat_enabled; 591 if (__predict_false(extra_work)) { 592 all_time -= lockstat_nsecs(&sx->lock_object); 593 state = x; 594 } |
595#endif
|
596 |
597 for (;;) { 598 if (x == SX_LOCK_UNLOCKED) { 599 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) 600 break; 601 continue; 602 } 603#ifdef KDTRACE_HOOKS 604 lda.spin_cnt++; 605#endif
|
560#ifdef HWPMC_HOOKS
561 PMC_SOFT_CALL( , , lock, failed);
562#endif
563 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
564 &waittime);
|
606#ifdef ADAPTIVE_SX 607 /* 608 * If the lock is write locked and the owner is 609 * running on another CPU, spin until the owner stops 610 * running or the state of the lock changes. 611 */ 612 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 613 if ((x & SX_LOCK_SHARED) == 0) { 614 owner = lv_sx_owner(x); 615 if (TD_IS_RUNNING(owner)) { 616 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 617 CTR3(KTR_LOCK, 618 "%s: spinning on %p held by %p", 619 __func__, sx, owner); 620 KTR_STATE1(KTR_SCHED, "thread", 621 sched_tdname(curthread), "spinning", 622 "lockname:\"%s\"", 623 sx->lock_object.lo_name);
|
583 GIANT_SAVE();
|
624 GIANT_SAVE(extra_work); |
625 do { 626 lock_delay(&lda); 627 x = SX_READ_VALUE(sx); 628 owner = lv_sx_owner(x); 629 } while (owner != NULL && 630 TD_IS_RUNNING(owner)); 631 KTR_STATE0(KTR_SCHED, "thread", 632 sched_tdname(curthread), "running"); 633 continue; 634 } 635 } else if (SX_SHARERS(x) && spintries < asx_retries) { 636 KTR_STATE1(KTR_SCHED, "thread", 637 sched_tdname(curthread), "spinning", 638 "lockname:\"%s\"", sx->lock_object.lo_name);
|
598 GIANT_SAVE();
|
639 GIANT_SAVE(extra_work); |
640 spintries++; 641 for (i = 0; i < asx_loops; i++) { 642 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 643 CTR4(KTR_LOCK, 644 "%s: shared spinning on %p with %u and %u", 645 __func__, sx, spintries, i);
|
605 x = sx->sx_lock;
|
646 cpu_spinwait(); 647 x = SX_READ_VALUE(sx); |
648 if ((x & SX_LOCK_SHARED) == 0 || 649 SX_SHARERS(x) == 0) 650 break;
|
609 cpu_spinwait();
|
651 } |
652#ifdef KDTRACE_HOOKS
|
611 lda.spin_cnt++;
|
653 lda.spin_cnt += i; |
654#endif
|
613 }
|
655 KTR_STATE0(KTR_SCHED, "thread", 656 sched_tdname(curthread), "running");
|
616 x = SX_READ_VALUE(sx);
|
657 if (i != asx_loops) 658 continue; 659 } 660 } 661#endif 662 663 sleepq_lock(&sx->lock_object); 664 x = SX_READ_VALUE(sx);
|
665retry_sleepq: |
666 667 /* 668 * If the lock was released while spinning on the 669 * sleep queue chain lock, try again. 670 */ 671 if (x == SX_LOCK_UNLOCKED) { 672 sleepq_release(&sx->lock_object); 673 continue; 674 } 675 676#ifdef ADAPTIVE_SX 677 /* 678 * The current lock owner might have started executing 679 * on another CPU (or the lock could have changed 680 * owners) while we were waiting on the sleep queue 681 * chain lock. If so, drop the sleep queue lock and try 682 * again. 683 */ 684 if (!(x & SX_LOCK_SHARED) && 685 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 686 owner = (struct thread *)SX_OWNER(x); 687 if (TD_IS_RUNNING(owner)) { 688 sleepq_release(&sx->lock_object); 689 continue; 690 } 691 } 692#endif 693 694 /* 695 * If an exclusive lock was released with both shared 696 * and exclusive waiters and a shared waiter hasn't 697 * woken up and acquired the lock yet, sx_lock will be 698 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS. 699 * If we see that value, try to acquire it once. Note 700 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS 701 * as there are other exclusive waiters still. If we 702 * fail, restart the loop. 703 */ 704 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
|
664 if (atomic_cmpset_acq_ptr(&sx->sx_lock,
665 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
666 tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
667 sleepq_release(&sx->lock_object);
668 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
669 __func__, sx);
670 break;
671 }
|
705 if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, 706 tid | SX_LOCK_EXCLUSIVE_WAITERS)) 707 goto retry_sleepq; |
708 sleepq_release(&sx->lock_object);
|
673 x = SX_READ_VALUE(sx);
674 continue;
|
709 CTR2(KTR_LOCK, "%s: %p claimed by new writer", 710 __func__, sx); 711 break; |
712 } 713 714 /* 715 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail, 716 * than loop back and retry. 717 */ 718 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
|
682 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
|
719 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, |
720 x | SX_LOCK_EXCLUSIVE_WAITERS)) {
|
684 sleepq_release(&sx->lock_object);
685 x = SX_READ_VALUE(sx);
686 continue;
|
721 goto retry_sleepq; |
722 } 723 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 724 CTR2(KTR_LOCK, "%s: %p set excl waiters flag", 725 __func__, sx); 726 } 727 728 /* 729 * Since we have been unable to acquire the exclusive 730 * lock and the exclusive waiters flag is set, we have 731 * to sleep. 732 */ 733 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 734 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 735 __func__, sx); 736 737#ifdef KDTRACE_HOOKS 738 sleep_time -= lockstat_nsecs(&sx->lock_object); 739#endif
|
705 GIANT_SAVE();
|
740 GIANT_SAVE(extra_work); |
741 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 742 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 743 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE); 744 if (!(opts & SX_INTERRUPTIBLE)) 745 sleepq_wait(&sx->lock_object, 0); 746 else 747 error = sleepq_wait_sig(&sx->lock_object, 0); 748#ifdef KDTRACE_HOOKS 749 sleep_time += lockstat_nsecs(&sx->lock_object); 750 sleep_cnt++; 751#endif 752 if (error) { 753 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 754 CTR2(KTR_LOCK, 755 "%s: interruptible sleep by %p suspended by signal", 756 __func__, sx); 757 break; 758 } 759 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 760 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 761 __func__, sx); 762 x = SX_READ_VALUE(sx); 763 }
|
764#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 765 if (__predict_true(!extra_work)) 766 return (error); 767#endif |
768#ifdef KDTRACE_HOOKS 769 all_time += lockstat_nsecs(&sx->lock_object); 770 if (sleep_time) 771 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 772 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 773 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 774 if (lda.spin_cnt > sleep_cnt) 775 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 776 LOCKSTAT_WRITER, (state & SX_LOCK_SHARED) == 0, 777 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 778#endif 779 if (!error) 780 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 781 contested, waittime, file, line, LOCKSTAT_WRITER); 782 GIANT_RESTORE(); 783 return (error); 784} 785 786/* 787 * This function represents the so-called 'hard case' for sx_xunlock 788 * operation. All 'easy case' failures are redirected to this. Note 789 * that ideally this would be a static function, but it needs to be 790 * accessible from at least sx.h. 791 */ 792void
|
754_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
|
793_sx_xunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) |
794{
|
756 uintptr_t x;
|
795 uintptr_t tid, setx; |
796 int queue, wakeup_swapper; 797 798 if (SCHEDULER_STOPPED()) 799 return; 800
|
762 MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
|
801 tid = (uintptr_t)curthread; |
802
|
764 x = SX_READ_VALUE(sx);
765 if (x & SX_LOCK_RECURSED) {
|
803 if (__predict_false(x == tid)) 804 x = SX_READ_VALUE(sx); 805 806 MPASS(!(x & SX_LOCK_SHARED)); 807 808 if (__predict_false(x & SX_LOCK_RECURSED)) { |
809 /* The lock is recursed, unrecurse one level. */ 810 if ((--sx->sx_recurse) == 0) 811 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED); 812 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 813 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); 814 return; 815 } 816 817 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_WRITER); 818 if (x == tid && 819 atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED)) 820 return; 821
|
779 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
780 SX_LOCK_EXCLUSIVE_WAITERS));
|
822 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 823 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx); 824 825 sleepq_lock(&sx->lock_object);
|
785 x = SX_LOCK_UNLOCKED;
|
826 x = SX_READ_VALUE(sx); 827 MPASS(x & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)); |
828 829 /* 830 * The wake up algorithm here is quite simple and probably not 831 * ideal. It gives precedence to shared waiters if they are 832 * present. For this condition, we have to preserve the 833 * state of the exclusive waiters flag. 834 * If interruptible sleeps left the shared queue empty avoid a 835 * starvation for the threads sleeping on the exclusive queue by giving 836 * them precedence and cleaning up the shared waiters bit anyway. 837 */
|
796 if ((sx->sx_lock & SX_LOCK_SHARED_WAITERS) != 0 &&
|
838 setx = SX_LOCK_UNLOCKED; 839 queue = SQ_EXCLUSIVE_QUEUE; 840 if ((x & SX_LOCK_SHARED_WAITERS) != 0 && |
841 sleepq_sleepcnt(&sx->lock_object, SQ_SHARED_QUEUE) != 0) { 842 queue = SQ_SHARED_QUEUE;
|
799 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
800 } else
801 queue = SQ_EXCLUSIVE_QUEUE;
|
843 setx |= (x & SX_LOCK_EXCLUSIVE_WAITERS); 844 } 845 atomic_store_rel_ptr(&sx->sx_lock, setx); |
846 847 /* Wake up all the waiters for the specific queue. */ 848 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 849 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue", 850 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" : 851 "exclusive");
|
808 atomic_store_rel_ptr(&sx->sx_lock, x);
|
852 |
853 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, 854 queue); 855 sleepq_release(&sx->lock_object); 856 if (wakeup_swapper) 857 kick_proc0(); 858} 859 860static bool __always_inline
|
817__sx_slock_try(struct sx *sx, uintptr_t *xp, const char *file, int line)
|
861__sx_slock_try(struct sx *sx, uintptr_t *xp LOCK_FILE_LINE_ARG_DEF) |
862{ 863 864 /* 865 * If no other thread has an exclusive lock then try to bump up 866 * the count of sharers. Since we have to preserve the state 867 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the 868 * shared lock loop back and retry. 869 */ 870 while (*xp & SX_LOCK_SHARED) { 871 MPASS(!(*xp & SX_LOCK_SHARED_WAITERS)); 872 if (atomic_fcmpset_acq_ptr(&sx->sx_lock, xp, 873 *xp + SX_ONE_SHARER)) { 874 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 875 CTR4(KTR_LOCK, "%s: %p succeed %p -> %p", 876 __func__, sx, (void *)*xp, 877 (void *)(*xp + SX_ONE_SHARER)); 878 return (true); 879 } 880 } 881 return (false); 882} 883 884static int __noinline
|
841_sx_slock_hard(struct sx *sx, int opts, const char *file, int line, uintptr_t x)
|
885_sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF) |
886{ 887 GIANT_DECLARE; 888#ifdef ADAPTIVE_SX 889 volatile struct thread *owner; 890#endif 891#ifdef LOCK_PROFILING 892 uint64_t waittime = 0; 893 int contested = 0; 894#endif 895 int error = 0; 896#if defined(ADAPTIVE_SX) || defined(KDTRACE_HOOKS) 897 struct lock_delay_arg lda; 898#endif 899#ifdef KDTRACE_HOOKS
|
856 uintptr_t state;
|
900 u_int sleep_cnt = 0; 901 int64_t sleep_time = 0; 902 int64_t all_time = 0; 903#endif
|
904#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 905 uintptr_t state; 906#endif 907 int extra_work = 0; |
908 909 if (SCHEDULER_STOPPED()) 910 return (0); 911 912#if defined(ADAPTIVE_SX) 913 lock_delay_arg_init(&lda, &sx_delay); 914#elif defined(KDTRACE_HOOKS) 915 lock_delay_arg_init(&lda, NULL); 916#endif
|
870#ifdef KDTRACE_HOOKS
871 all_time -= lockstat_nsecs(&sx->lock_object);
|
917 918#ifdef HWPMC_HOOKS 919 PMC_SOFT_CALL( , , lock, failed); 920#endif 921 lock_profile_obtain_lock_failed(&sx->lock_object, &contested, 922 &waittime); 923 924#ifdef LOCK_PROFILING 925 extra_work = 1; |
926 state = x;
|
927#elif defined(KDTRACE_HOOKS) 928 extra_work = lockstat_enabled; 929 if (__predict_false(extra_work)) { 930 all_time -= lockstat_nsecs(&sx->lock_object); 931 state = x; 932 } |
933#endif 934 935 /* 936 * As with rwlocks, we don't make any attempt to try to block 937 * shared locks once there is an exclusive waiter. 938 */ 939 for (;;) {
|
880 if (__sx_slock_try(sx, &x, file, line))
|
940 if (__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG)) |
941 break; 942#ifdef KDTRACE_HOOKS 943 lda.spin_cnt++; 944#endif 945
|
886#ifdef HWPMC_HOOKS
887 PMC_SOFT_CALL( , , lock, failed);
888#endif
889 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
890 &waittime);
891
|
946#ifdef ADAPTIVE_SX 947 /* 948 * If the owner is running on another CPU, spin until 949 * the owner stops running or the state of the lock 950 * changes. 951 */ 952 if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 953 owner = lv_sx_owner(x); 954 if (TD_IS_RUNNING(owner)) { 955 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 956 CTR3(KTR_LOCK, 957 "%s: spinning on %p held by %p", 958 __func__, sx, owner); 959 KTR_STATE1(KTR_SCHED, "thread", 960 sched_tdname(curthread), "spinning", 961 "lockname:\"%s\"", sx->lock_object.lo_name);
|
908 GIANT_SAVE();
|
962 GIANT_SAVE(extra_work); |
963 do { 964 lock_delay(&lda); 965 x = SX_READ_VALUE(sx); 966 owner = lv_sx_owner(x); 967 } while (owner != NULL && TD_IS_RUNNING(owner)); 968 KTR_STATE0(KTR_SCHED, "thread", 969 sched_tdname(curthread), "running"); 970 continue; 971 } 972 } 973#endif 974 975 /* 976 * Some other thread already has an exclusive lock, so 977 * start the process of blocking. 978 */ 979 sleepq_lock(&sx->lock_object); 980 x = SX_READ_VALUE(sx);
|
927
|
981retry_sleepq: |
982 /* 983 * The lock could have been released while we spun. 984 * In this case loop back and retry. 985 */ 986 if (x & SX_LOCK_SHARED) { 987 sleepq_release(&sx->lock_object); 988 continue; 989 } 990 991#ifdef ADAPTIVE_SX 992 /* 993 * If the owner is running on another CPU, spin until 994 * the owner stops running or the state of the lock 995 * changes. 996 */ 997 if (!(x & SX_LOCK_SHARED) && 998 (sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) { 999 owner = (struct thread *)SX_OWNER(x); 1000 if (TD_IS_RUNNING(owner)) { 1001 sleepq_release(&sx->lock_object); 1002 x = SX_READ_VALUE(sx); 1003 continue; 1004 } 1005 } 1006#endif 1007 1008 /* 1009 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we 1010 * fail to set it drop the sleep queue lock and loop 1011 * back. 1012 */ 1013 if (!(x & SX_LOCK_SHARED_WAITERS)) {
|
960 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
961 x | SX_LOCK_SHARED_WAITERS)) {
962 sleepq_release(&sx->lock_object);
963 x = SX_READ_VALUE(sx);
964 continue;
965 }
|
1014 if (!atomic_fcmpset_ptr(&sx->sx_lock, &x, 1015 x | SX_LOCK_SHARED_WAITERS)) 1016 goto retry_sleepq; |
1017 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1018 CTR2(KTR_LOCK, "%s: %p set shared waiters flag", 1019 __func__, sx); 1020 } 1021 1022 /* 1023 * Since we have been unable to acquire the shared lock, 1024 * we have to sleep. 1025 */ 1026 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1027 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue", 1028 __func__, sx); 1029 1030#ifdef KDTRACE_HOOKS 1031 sleep_time -= lockstat_nsecs(&sx->lock_object); 1032#endif
|
982 GIANT_SAVE();
|
1033 GIANT_SAVE(extra_work); |
1034 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name, 1035 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ? 1036 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE); 1037 if (!(opts & SX_INTERRUPTIBLE)) 1038 sleepq_wait(&sx->lock_object, 0); 1039 else 1040 error = sleepq_wait_sig(&sx->lock_object, 0); 1041#ifdef KDTRACE_HOOKS 1042 sleep_time += lockstat_nsecs(&sx->lock_object); 1043 sleep_cnt++; 1044#endif 1045 if (error) { 1046 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1047 CTR2(KTR_LOCK, 1048 "%s: interruptible sleep by %p suspended by signal", 1049 __func__, sx); 1050 break; 1051 } 1052 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1053 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue", 1054 __func__, sx); 1055 x = SX_READ_VALUE(sx); 1056 }
|
1057#if defined(KDTRACE_HOOKS) || defined(LOCK_PROFILING) 1058 if (__predict_true(!extra_work)) 1059 return (error); 1060#endif |
1061#ifdef KDTRACE_HOOKS 1062 all_time += lockstat_nsecs(&sx->lock_object); 1063 if (sleep_time) 1064 LOCKSTAT_RECORD4(sx__block, sx, sleep_time, 1065 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1066 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 1067 if (lda.spin_cnt > sleep_cnt) 1068 LOCKSTAT_RECORD4(sx__spin, sx, all_time - sleep_time, 1069 LOCKSTAT_READER, (state & SX_LOCK_SHARED) == 0, 1070 (state & SX_LOCK_SHARED) == 0 ? 0 : SX_SHARERS(state)); 1071#endif 1072 if (error == 0) { 1073 LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(sx__acquire, sx, 1074 contested, waittime, file, line, LOCKSTAT_READER); 1075 } 1076 GIANT_RESTORE(); 1077 return (error); 1078} 1079 1080int
|
1026_sx_slock(struct sx *sx, int opts, const char *file, int line)
|
1081_sx_slock_int(struct sx *sx, int opts LOCK_FILE_LINE_ARG_DEF) |
1082{ 1083 uintptr_t x; 1084 int error; 1085 1086 KASSERT(kdb_active != 0 || SCHEDULER_STOPPED() || 1087 !TD_IS_IDLETHREAD(curthread), 1088 ("sx_slock() by idle thread %p on sx %s @ %s:%d", 1089 curthread, sx->lock_object.lo_name, file, line)); 1090 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1091 ("sx_slock() of destroyed sx @ %s:%d", file, line)); 1092 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line, NULL); 1093 1094 error = 0; 1095 x = SX_READ_VALUE(sx); 1096 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__acquire) ||
|
1042 !__sx_slock_try(sx, &x, file, line)))
1043 error = _sx_slock_hard(sx, opts, file, line, x);
|
1097 !__sx_slock_try(sx, &x LOCK_FILE_LINE_ARG))) 1098 error = _sx_slock_hard(sx, opts, x LOCK_FILE_LINE_ARG); |
1099 if (error == 0) { 1100 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line); 1101 WITNESS_LOCK(&sx->lock_object, 0, file, line); 1102 TD_LOCKS_INC(curthread); 1103 } 1104 return (error); 1105} 1106
|
1107int 1108_sx_slock(struct sx *sx, int opts, const char *file, int line) 1109{ 1110 1111 return (_sx_slock_int(sx, opts LOCK_FILE_LINE_ARG)); 1112} 1113 |
1114static bool __always_inline 1115_sx_sunlock_try(struct sx *sx, uintptr_t *xp) 1116{ 1117 1118 for (;;) { 1119 /* 1120 * We should never have sharers while at least one thread 1121 * holds a shared lock. 1122 */ 1123 KASSERT(!(*xp & SX_LOCK_SHARED_WAITERS), 1124 ("%s: waiting sharers", __func__)); 1125 1126 /* 1127 * See if there is more than one shared lock held. If 1128 * so, just drop one and return. 1129 */ 1130 if (SX_SHARERS(*xp) > 1) { 1131 if (atomic_fcmpset_rel_ptr(&sx->sx_lock, xp, 1132 *xp - SX_ONE_SHARER)) { 1133 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1134 CTR4(KTR_LOCK, 1135 "%s: %p succeeded %p -> %p", 1136 __func__, sx, (void *)*xp, 1137 (void *)(*xp - SX_ONE_SHARER)); 1138 return (true); 1139 } 1140 continue; 1141 } 1142 1143 /* 1144 * If there aren't any waiters for an exclusive lock, 1145 * then try to drop it quickly. 1146 */ 1147 if (!(*xp & SX_LOCK_EXCLUSIVE_WAITERS)) { 1148 MPASS(*xp == SX_SHARERS_LOCK(1)); 1149 *xp = SX_SHARERS_LOCK(1); 1150 if (atomic_fcmpset_rel_ptr(&sx->sx_lock, 1151 xp, SX_LOCK_UNLOCKED)) { 1152 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1153 CTR2(KTR_LOCK, "%s: %p last succeeded", 1154 __func__, sx); 1155 return (true); 1156 } 1157 continue; 1158 } 1159 break; 1160 } 1161 return (false); 1162} 1163 1164static void __noinline
|
1103_sx_sunlock_hard(struct sx *sx, uintptr_t x, const char *file, int line)
|
1165_sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF) |
1166{ 1167 int wakeup_swapper;
|
1168 uintptr_t setx; |
1169 1170 if (SCHEDULER_STOPPED()) 1171 return; 1172
|
1110 for (;;) {
1111 if (_sx_sunlock_try(sx, &x))
1112 break;
|
1173 if (_sx_sunlock_try(sx, &x)) 1174 goto out_lockstat; |
1175
|
1114 /*
1115 * At this point, there should just be one sharer with
1116 * exclusive waiters.
1117 */
1118 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
|
1176 /* 1177 * At this point, there should just be one sharer with 1178 * exclusive waiters. 1179 */ 1180 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS)); |
1181
|
1120 sleepq_lock(&sx->lock_object);
1121
|
1182 sleepq_lock(&sx->lock_object); 1183 x = SX_READ_VALUE(sx); 1184 for (;;) { 1185 MPASS(x & SX_LOCK_EXCLUSIVE_WAITERS); 1186 MPASS(!(x & SX_LOCK_SHARED_WAITERS)); |
1187 /* 1188 * Wake up semantic here is quite simple: 1189 * Just wake up all the exclusive waiters. 1190 * Note that the state of the lock could have changed, 1191 * so if it fails loop back and retry. 1192 */
|
1128 if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
1129 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
1130 SX_LOCK_UNLOCKED)) {
1131 sleepq_release(&sx->lock_object);
1132 x = SX_READ_VALUE(sx);
|
1193 setx = x - SX_ONE_SHARER; 1194 setx &= ~SX_LOCK_EXCLUSIVE_WAITERS; 1195 if (!atomic_fcmpset_rel_ptr(&sx->sx_lock, &x, setx)) |
1196 continue;
|
1134 }
|
1197 if (LOCK_LOG_TEST(&sx->lock_object, 0)) 1198 CTR2(KTR_LOCK, "%s: %p waking up all thread on" 1199 "exclusive queue", __func__, sx); 1200 wakeup_swapper = sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 1201 0, SQ_EXCLUSIVE_QUEUE);
|
1140 sleepq_release(&sx->lock_object);
1141 if (wakeup_swapper)
1142 kick_proc0();
|
1202 break; 1203 }
|
1204 sleepq_release(&sx->lock_object); 1205 if (wakeup_swapper) 1206 kick_proc0(); 1207out_lockstat: |
1208 LOCKSTAT_PROFILE_RELEASE_RWLOCK(sx__release, sx, LOCKSTAT_READER); 1209} 1210 1211void
|
1149_sx_sunlock(struct sx *sx, const char *file, int line)
|
1212_sx_sunlock_int(struct sx *sx LOCK_FILE_LINE_ARG_DEF) |
1213{ 1214 uintptr_t x; 1215 1216 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, 1217 ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); 1218 _sx_assert(sx, SA_SLOCKED, file, line); 1219 WITNESS_UNLOCK(&sx->lock_object, 0, file, line); 1220 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line); 1221 1222 x = SX_READ_VALUE(sx); 1223 if (__predict_false(LOCKSTAT_OOL_PROFILE_ENABLED(sx__release) || 1224 !_sx_sunlock_try(sx, &x)))
|
1162 _sx_sunlock_hard(sx, x, file, line);
|
1225 _sx_sunlock_hard(sx, x LOCK_FILE_LINE_ARG); |
1226 1227 TD_LOCKS_DEC(curthread); 1228} 1229
|
1230void 1231_sx_sunlock(struct sx *sx, const char *file, int line) 1232{ 1233 1234 _sx_sunlock_int(sx LOCK_FILE_LINE_ARG); 1235} 1236 |
1237#ifdef INVARIANT_SUPPORT 1238#ifndef INVARIANTS 1239#undef _sx_assert 1240#endif 1241 1242/* 1243 * In the non-WITNESS case, sx_assert() can only detect that at least 1244 * *some* thread owns an slock, but it cannot guarantee that *this* 1245 * thread owns an slock. 1246 */ 1247void 1248_sx_assert(const struct sx *sx, int what, const char *file, int line) 1249{ 1250#ifndef WITNESS 1251 int slocked = 0; 1252#endif 1253 1254 if (panicstr != NULL) 1255 return; 1256 switch (what) { 1257 case SA_SLOCKED: 1258 case SA_SLOCKED | SA_NOTRECURSED: 1259 case SA_SLOCKED | SA_RECURSED: 1260#ifndef WITNESS 1261 slocked = 1; 1262 /* FALLTHROUGH */ 1263#endif 1264 case SA_LOCKED: 1265 case SA_LOCKED | SA_NOTRECURSED: 1266 case SA_LOCKED | SA_RECURSED: 1267#ifdef WITNESS 1268 witness_assert(&sx->lock_object, what, file, line); 1269#else 1270 /* 1271 * If some other thread has an exclusive lock or we 1272 * have one and are asserting a shared lock, fail. 1273 * Also, if no one has a lock at all, fail. 1274 */ 1275 if (sx->sx_lock == SX_LOCK_UNLOCKED || 1276 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked || 1277 sx_xholder(sx) != curthread))) 1278 panic("Lock %s not %slocked @ %s:%d\n", 1279 sx->lock_object.lo_name, slocked ? "share " : "", 1280 file, line); 1281 1282 if (!(sx->sx_lock & SX_LOCK_SHARED)) { 1283 if (sx_recursed(sx)) { 1284 if (what & SA_NOTRECURSED) 1285 panic("Lock %s recursed @ %s:%d\n", 1286 sx->lock_object.lo_name, file, 1287 line); 1288 } else if (what & SA_RECURSED) 1289 panic("Lock %s not recursed @ %s:%d\n", 1290 sx->lock_object.lo_name, file, line); 1291 } 1292#endif 1293 break; 1294 case SA_XLOCKED: 1295 case SA_XLOCKED | SA_NOTRECURSED: 1296 case SA_XLOCKED | SA_RECURSED: 1297 if (sx_xholder(sx) != curthread) 1298 panic("Lock %s not exclusively locked @ %s:%d\n", 1299 sx->lock_object.lo_name, file, line); 1300 if (sx_recursed(sx)) { 1301 if (what & SA_NOTRECURSED) 1302 panic("Lock %s recursed @ %s:%d\n", 1303 sx->lock_object.lo_name, file, line); 1304 } else if (what & SA_RECURSED) 1305 panic("Lock %s not recursed @ %s:%d\n", 1306 sx->lock_object.lo_name, file, line); 1307 break; 1308 case SA_UNLOCKED: 1309#ifdef WITNESS 1310 witness_assert(&sx->lock_object, what, file, line); 1311#else 1312 /* 1313 * If we hold an exclusve lock fail. We can't 1314 * reliably check to see if we hold a shared lock or 1315 * not. 1316 */ 1317 if (sx_xholder(sx) == curthread) 1318 panic("Lock %s exclusively locked @ %s:%d\n", 1319 sx->lock_object.lo_name, file, line); 1320#endif 1321 break; 1322 default: 1323 panic("Unknown sx lock assertion: %d @ %s:%d", what, file, 1324 line); 1325 } 1326} 1327#endif /* INVARIANT_SUPPORT */ 1328 1329#ifdef DDB 1330static void 1331db_show_sx(const struct lock_object *lock) 1332{ 1333 struct thread *td; 1334 const struct sx *sx; 1335 1336 sx = (const struct sx *)lock; 1337 1338 db_printf(" state: "); 1339 if (sx->sx_lock == SX_LOCK_UNLOCKED) 1340 db_printf("UNLOCKED\n"); 1341 else if (sx->sx_lock == SX_LOCK_DESTROYED) { 1342 db_printf("DESTROYED\n"); 1343 return; 1344 } else if (sx->sx_lock & SX_LOCK_SHARED) 1345 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock)); 1346 else { 1347 td = sx_xholder(sx); 1348 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1349 td->td_tid, td->td_proc->p_pid, td->td_name); 1350 if (sx_recursed(sx)) 1351 db_printf(" recursed: %d\n", sx->sx_recurse); 1352 } 1353 1354 db_printf(" waiters: "); 1355 switch(sx->sx_lock & 1356 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) { 1357 case SX_LOCK_SHARED_WAITERS: 1358 db_printf("shared\n"); 1359 break; 1360 case SX_LOCK_EXCLUSIVE_WAITERS: 1361 db_printf("exclusive\n"); 1362 break; 1363 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS: 1364 db_printf("exclusive and shared\n"); 1365 break; 1366 default: 1367 db_printf("none\n"); 1368 } 1369} 1370 1371/* 1372 * Check to see if a thread that is blocked on a sleep queue is actually 1373 * blocked on an sx lock. If so, output some details and return true. 1374 * If the lock has an exclusive owner, return that in *ownerp. 1375 */ 1376int 1377sx_chain(struct thread *td, struct thread **ownerp) 1378{ 1379 struct sx *sx; 1380 1381 /* 1382 * Check to see if this thread is blocked on an sx lock. 1383 * First, we check the lock class. If that is ok, then we 1384 * compare the lock name against the wait message. 1385 */ 1386 sx = td->td_wchan; 1387 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx || 1388 sx->lock_object.lo_name != td->td_wmesg) 1389 return (0); 1390 1391 /* We think we have an sx lock, so output some details. */ 1392 db_printf("blocked on sx \"%s\" ", td->td_wmesg); 1393 *ownerp = sx_xholder(sx); 1394 if (sx->sx_lock & SX_LOCK_SHARED) 1395 db_printf("SLOCK (count %ju)\n", 1396 (uintmax_t)SX_SHARERS(sx->sx_lock)); 1397 else 1398 db_printf("XLOCK\n"); 1399 return (1); 1400} 1401#endif
|