kern_lock.c revision 255745
1139804Simp/*- 2177957Sattilio * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 3177957Sattilio * All rights reserved. 424269Speter * 524269Speter * Redistribution and use in source and binary forms, with or without 624269Speter * modification, are permitted provided that the following conditions 724269Speter * are met: 824269Speter * 1. Redistributions of source code must retain the above copyright 9177957Sattilio * notice(s), this list of conditions and the following disclaimer as 10177957Sattilio * the first lines of this file unmodified other than the possible 11177957Sattilio * addition of one or more copyright notices. 1224269Speter * 2. Redistributions in binary form must reproduce the above copyright 13177957Sattilio * notice(s), this list of conditions and the following disclaimer in the 1424269Speter * documentation and/or other materials provided with the distribution. 1524269Speter * 16177957Sattilio * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17177957Sattilio * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18177957Sattilio * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19177957Sattilio * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20177957Sattilio * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21177957Sattilio * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22177957Sattilio * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23177957Sattilio * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2424269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25177957Sattilio * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26177957Sattilio * DAMAGE. 2724269Speter */ 2824269Speter 29194317Sattilio#include "opt_adaptive_lockmgrs.h" 30177957Sattilio#include "opt_ddb.h" 31233628Sfabient#include "opt_hwpmc_hooks.h" 32192853Ssson#include "opt_kdtrace.h" 33177957Sattilio 34116182Sobrien#include <sys/cdefs.h> 35116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 255745 2013-09-20 23:06:21Z davide $"); 36116182Sobrien 3724269Speter#include <sys/param.h> 38244582Sattilio#include <sys/kdb.h> 3984812Sjhb#include <sys/ktr.h> 4024269Speter#include <sys/lock.h> 41177957Sattilio#include <sys/lock_profile.h> 42102477Sbde#include <sys/lockmgr.h> 4367353Sjhb#include <sys/mutex.h> 44102477Sbde#include <sys/proc.h> 45177957Sattilio#include <sys/sleepqueue.h> 46148668Sjeff#ifdef DEBUG_LOCKS 47148668Sjeff#include <sys/stack.h> 48148668Sjeff#endif 49194317Sattilio#include <sys/sysctl.h> 50177957Sattilio#include <sys/systm.h> 5124269Speter 52177957Sattilio#include <machine/cpu.h> 53176014Sattilio 54161322Sjhb#ifdef DDB 55161322Sjhb#include <ddb/ddb.h> 56161322Sjhb#endif 57161322Sjhb 58233628Sfabient#ifdef HWPMC_HOOKS 59233628Sfabient#include <sys/pmckern.h> 60233628SfabientPMC_SOFT_DECLARE( , , lock, failed); 61233628Sfabient#endif 62233628Sfabient 63194317SattilioCTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) == 64194317Sattilio (LK_ADAPTIVE | LK_NOSHARE)); 65194317SattilioCTASSERT(LK_UNLOCKED == (LK_UNLOCKED & 66194317Sattilio ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); 67177957Sattilio 68177957Sattilio#define SQ_EXCLUSIVE_QUEUE 0 69177957Sattilio#define SQ_SHARED_QUEUE 1 70177957Sattilio 71177957Sattilio#ifndef INVARIANTS 72177957Sattilio#define _lockmgr_assert(lk, what, file, line) 73177957Sattilio#define TD_LOCKS_INC(td) 74177957Sattilio#define TD_LOCKS_DEC(td) 75177957Sattilio#else 76177957Sattilio#define TD_LOCKS_INC(td) ((td)->td_locks++) 77177957Sattilio#define TD_LOCKS_DEC(td) ((td)->td_locks--) 78177957Sattilio#endif 79177957Sattilio#define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 80177957Sattilio#define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 81177957Sattilio 82177957Sattilio#ifndef DEBUG_LOCKS 83177957Sattilio#define STACK_PRINT(lk) 84177957Sattilio#define STACK_SAVE(lk) 85177957Sattilio#define STACK_ZERO(lk) 86177957Sattilio#else 87177957Sattilio#define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 88177957Sattilio#define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 89177957Sattilio#define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 90177957Sattilio#endif 91177957Sattilio 92177957Sattilio#define LOCK_LOG2(lk, string, arg1, arg2) \ 93177957Sattilio if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 94177957Sattilio CTR2(KTR_LOCK, (string), (arg1), (arg2)) 95177957Sattilio#define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 96177957Sattilio if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 97177957Sattilio CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 98177957Sattilio 99178159Sattilio#define GIANT_DECLARE \ 100178159Sattilio int _i = 0; \ 101178159Sattilio WITNESS_SAVE_DECL(Giant) 102178159Sattilio#define GIANT_RESTORE() do { \ 103178159Sattilio if (_i > 0) { \ 104178159Sattilio while (_i--) \ 105178159Sattilio mtx_lock(&Giant); \ 106178159Sattilio WITNESS_RESTORE(&Giant.lock_object, Giant); \ 107178159Sattilio } \ 108178159Sattilio} while (0) 109178159Sattilio#define GIANT_SAVE() do { \ 110178159Sattilio if (mtx_owned(&Giant)) { \ 111178159Sattilio WITNESS_SAVE(&Giant.lock_object, Giant); \ 112178159Sattilio while (mtx_owned(&Giant)) { \ 113178159Sattilio _i++; \ 114178159Sattilio mtx_unlock(&Giant); \ 115178159Sattilio } \ 116178159Sattilio } \ 117178159Sattilio} while (0) 118178159Sattilio 119177957Sattilio#define LK_CAN_SHARE(x) \ 120177957Sattilio (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \ 121194317Sattilio ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \ 122177982Sattilio curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT))) 123178159Sattilio#define LK_TRYOP(x) \ 124178159Sattilio ((x) & LK_NOWAIT) 125177957Sattilio 126178159Sattilio#define LK_CAN_WITNESS(x) \ 127178159Sattilio (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 128178159Sattilio#define LK_TRYWIT(x) \ 129178159Sattilio (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 130178159Sattilio 131194317Sattilio#define LK_CAN_ADAPT(lk, f) \ 132194317Sattilio (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \ 133194317Sattilio ((f) & LK_SLEEPFAIL) == 0) 134194317Sattilio 135177957Sattilio#define lockmgr_disowned(lk) \ 136177957Sattilio (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 137177957Sattilio 138177957Sattilio#define lockmgr_xlocked(lk) \ 139177957Sattilio (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 140177957Sattilio 141227588Spjdstatic void assert_lockmgr(const struct lock_object *lock, int how); 142177957Sattilio#ifdef DDB 143227588Spjdstatic void db_show_lockmgr(const struct lock_object *lock); 144177957Sattilio#endif 145255745Sdavidestatic void lock_lockmgr(struct lock_object *lock, uintptr_t how); 146192853Ssson#ifdef KDTRACE_HOOKS 147227588Spjdstatic int owner_lockmgr(const struct lock_object *lock, 148227588Spjd struct thread **owner); 149192853Ssson#endif 150255745Sdavidestatic uintptr_t unlock_lockmgr(struct lock_object *lock); 151177957Sattilio 152164246Skmacystruct lock_class lock_class_lockmgr = { 153167366Sjhb .lc_name = "lockmgr", 154177957Sattilio .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 155173733Sattilio .lc_assert = assert_lockmgr, 156164246Skmacy#ifdef DDB 157167368Sjhb .lc_ddb_show = db_show_lockmgr, 158164246Skmacy#endif 159167368Sjhb .lc_lock = lock_lockmgr, 160192853Ssson .lc_unlock = unlock_lockmgr, 161192853Ssson#ifdef KDTRACE_HOOKS 162192853Ssson .lc_owner = owner_lockmgr, 163192853Ssson#endif 164164246Skmacy}; 165164246Skmacy 166194317Sattilio#ifdef ADAPTIVE_LOCKMGRS 167194317Sattiliostatic u_int alk_retries = 10; 168194317Sattiliostatic u_int alk_loops = 10000; 169227309Sedstatic SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, 170227309Sed "lockmgr debugging"); 171194317SattilioSYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, ""); 172194317SattilioSYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, ""); 173194317Sattilio#endif 174194317Sattilio 175177957Sattiliostatic __inline struct thread * 176227588Spjdlockmgr_xholder(const struct lock *lk) 177177957Sattilio{ 178177957Sattilio uintptr_t x; 179176249Sattilio 180177957Sattilio x = lk->lk_lock; 181177957Sattilio return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 182177957Sattilio} 183177957Sattilio 18424269Speter/* 185177957Sattilio * It assumes sleepq_lock held and returns with this one unheld. 186177957Sattilio * It also assumes the generic interlock is sane and previously checked. 187177957Sattilio * If LK_INTERLOCK is specified the interlock is not reacquired after the 188177957Sattilio * sleep. 18924269Speter */ 190177957Sattiliostatic __inline int 191177957Sattiliosleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 192177957Sattilio const char *wmesg, int pri, int timo, int queue) 193177957Sattilio{ 194178159Sattilio GIANT_DECLARE; 195177957Sattilio struct lock_class *class; 196177957Sattilio int catch, error; 19724269Speter 198177957Sattilio class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 199179306Sattilio catch = pri & PCATCH; 200177957Sattilio pri &= PRIMASK; 201177957Sattilio error = 0; 202177957Sattilio 203177957Sattilio LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 204177957Sattilio (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 205177957Sattilio 206177957Sattilio if (flags & LK_INTERLOCK) 207177957Sattilio class->lc_unlock(ilk); 208200447Sattilio if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) 209200447Sattilio lk->lk_exslpfail++; 210178159Sattilio GIANT_SAVE(); 211177957Sattilio sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 212177957Sattilio SLEEPQ_INTERRUPTIBLE : 0), queue); 213177957Sattilio if ((flags & LK_TIMELOCK) && timo) 214177957Sattilio sleepq_set_timeout(&lk->lock_object, timo); 215177957Sattilio 216177957Sattilio /* 217177957Sattilio * Decisional switch for real sleeping. 218177957Sattilio */ 219177957Sattilio if ((flags & LK_TIMELOCK) && timo && catch) 220177957Sattilio error = sleepq_timedwait_sig(&lk->lock_object, pri); 221177957Sattilio else if ((flags & LK_TIMELOCK) && timo) 222177957Sattilio error = sleepq_timedwait(&lk->lock_object, pri); 223177957Sattilio else if (catch) 224177957Sattilio error = sleepq_wait_sig(&lk->lock_object, pri); 225177957Sattilio else 226177957Sattilio sleepq_wait(&lk->lock_object, pri); 227178159Sattilio GIANT_RESTORE(); 228177957Sattilio if ((flags & LK_SLEEPFAIL) && error == 0) 229177957Sattilio error = ENOLCK; 230177957Sattilio 231177957Sattilio return (error); 232177957Sattilio} 233177957Sattilio 234181334Sjhbstatic __inline int 235177957Sattiliowakeupshlk(struct lock *lk, const char *file, int line) 236177957Sattilio{ 237177957Sattilio uintptr_t v, x; 238200447Sattilio u_int realexslp; 239181334Sjhb int queue, wakeup_swapper; 240177957Sattilio 241178159Sattilio WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 242177957Sattilio LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 243177957Sattilio 244181334Sjhb wakeup_swapper = 0; 245177957Sattilio for (;;) { 246177957Sattilio x = lk->lk_lock; 247177957Sattilio 248177957Sattilio /* 249177957Sattilio * If there is more than one shared lock held, just drop one 250177957Sattilio * and return. 251177957Sattilio */ 252177957Sattilio if (LK_SHARERS(x) > 1) { 253197735Sattilio if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, 254177957Sattilio x - LK_ONE_SHARER)) 255177957Sattilio break; 256177957Sattilio continue; 257177957Sattilio } 258177957Sattilio 259177957Sattilio /* 260177957Sattilio * If there are not waiters on the exclusive queue, drop the 261177957Sattilio * lock quickly. 262177957Sattilio */ 263177957Sattilio if ((x & LK_ALL_WAITERS) == 0) { 264194317Sattilio MPASS((x & ~LK_EXCLUSIVE_SPINNERS) == 265194317Sattilio LK_SHARERS_LOCK(1)); 266197735Sattilio if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED)) 267177957Sattilio break; 268177957Sattilio continue; 269177957Sattilio } 270177957Sattilio 271177957Sattilio /* 272177957Sattilio * We should have a sharer with waiters, so enter the hard 273177957Sattilio * path in order to handle wakeups correctly. 274177957Sattilio */ 275177957Sattilio sleepq_lock(&lk->lock_object); 276194317Sattilio x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 277177957Sattilio v = LK_UNLOCKED; 278177957Sattilio 279177957Sattilio /* 280177957Sattilio * If the lock has exclusive waiters, give them preference in 281177957Sattilio * order to avoid deadlock with shared runners up. 282200447Sattilio * If interruptible sleeps left the exclusive queue empty 283200447Sattilio * avoid a starvation for the threads sleeping on the shared 284200447Sattilio * queue by giving them precedence and cleaning up the 285200447Sattilio * exclusive waiters bit anyway. 286201709Sattilio * Please note that lk_exslpfail count may be lying about 287201709Sattilio * the real number of waiters with the LK_SLEEPFAIL flag on 288201709Sattilio * because they may be used in conjuction with interruptible 289201710Sattilio * sleeps so lk_exslpfail might be considered an 'upper limit' 290201710Sattilio * bound, including the edge cases. 291177957Sattilio */ 292200447Sattilio realexslp = sleepq_sleepcnt(&lk->lock_object, 293200447Sattilio SQ_EXCLUSIVE_QUEUE); 294200447Sattilio if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 295200447Sattilio if (lk->lk_exslpfail < realexslp) { 296200447Sattilio lk->lk_exslpfail = 0; 297200447Sattilio queue = SQ_EXCLUSIVE_QUEUE; 298200447Sattilio v |= (x & LK_SHARED_WAITERS); 299200447Sattilio } else { 300200447Sattilio lk->lk_exslpfail = 0; 301200447Sattilio LOCK_LOG2(lk, 302200447Sattilio "%s: %p has only LK_SLEEPFAIL sleepers", 303200447Sattilio __func__, lk); 304200447Sattilio LOCK_LOG2(lk, 305200447Sattilio "%s: %p waking up threads on the exclusive queue", 306200447Sattilio __func__, lk); 307200447Sattilio wakeup_swapper = 308200447Sattilio sleepq_broadcast(&lk->lock_object, 309200447Sattilio SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 310200447Sattilio queue = SQ_SHARED_QUEUE; 311200447Sattilio } 312200447Sattilio 313177957Sattilio } else { 314201703Sattilio 315201703Sattilio /* 316201703Sattilio * Exclusive waiters sleeping with LK_SLEEPFAIL on 317201703Sattilio * and using interruptible sleeps/timeout may have 318201703Sattilio * left spourious lk_exslpfail counts on, so clean 319201703Sattilio * it up anyway. 320201703Sattilio */ 321201703Sattilio lk->lk_exslpfail = 0; 322177957Sattilio queue = SQ_SHARED_QUEUE; 323177957Sattilio } 324177957Sattilio 325197735Sattilio if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 326177957Sattilio v)) { 327177957Sattilio sleepq_release(&lk->lock_object); 328177957Sattilio continue; 329177957Sattilio } 330177957Sattilio LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 331177957Sattilio __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 332177957Sattilio "exclusive"); 333200447Sattilio wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 334181334Sjhb 0, queue); 335177957Sattilio sleepq_release(&lk->lock_object); 336177957Sattilio break; 337177957Sattilio } 338177957Sattilio 339177957Sattilio lock_profile_release_lock(&lk->lock_object); 340252212Sjhb TD_LOCKS_DEC(curthread); 341252212Sjhb TD_SLOCKS_DEC(curthread); 342181334Sjhb return (wakeup_swapper); 343177957Sattilio} 344177957Sattilio 345177957Sattiliostatic void 346227588Spjdassert_lockmgr(const struct lock_object *lock, int what) 347173733Sattilio{ 348173733Sattilio 349173733Sattilio panic("lockmgr locks do not support assertions"); 350173733Sattilio} 351173733Sattilio 352177957Sattiliostatic void 353255745Sdavidelock_lockmgr(struct lock_object *lock, uintptr_t how) 354167368Sjhb{ 355167368Sjhb 356167368Sjhb panic("lockmgr locks do not support sleep interlocking"); 357167368Sjhb} 358167368Sjhb 359255745Sdavidestatic uintptr_t 360167368Sjhbunlock_lockmgr(struct lock_object *lock) 361167368Sjhb{ 362167368Sjhb 363167368Sjhb panic("lockmgr locks do not support sleep interlocking"); 364167368Sjhb} 365167368Sjhb 366192853Ssson#ifdef KDTRACE_HOOKS 367192853Sssonstatic int 368227588Spjdowner_lockmgr(const struct lock_object *lock, struct thread **owner) 369192853Ssson{ 370192853Ssson 371192853Ssson panic("lockmgr locks do not support owner inquiring"); 372192853Ssson} 373192853Ssson#endif 374192853Ssson 375177957Sattiliovoid 376177957Sattiliolockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 377177957Sattilio{ 378177957Sattilio int iflags; 37929653Sdyson 380177957Sattilio MPASS((flags & ~LK_INIT_MASK) == 0); 381196334Sattilio ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, 382196334Sattilio ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, 383196334Sattilio &lk->lk_lock)); 38424269Speter 385193307Sattilio iflags = LO_SLEEPABLE | LO_UPGRADABLE; 386193307Sattilio if (flags & LK_CANRECURSE) 387193307Sattilio iflags |= LO_RECURSABLE; 388177957Sattilio if ((flags & LK_NODUP) == 0) 389177957Sattilio iflags |= LO_DUPOK; 390177957Sattilio if (flags & LK_NOPROFILE) 391177957Sattilio iflags |= LO_NOPROFILE; 392177957Sattilio if ((flags & LK_NOWITNESS) == 0) 393177957Sattilio iflags |= LO_WITNESS; 394177957Sattilio if (flags & LK_QUIET) 395177957Sattilio iflags |= LO_QUIET; 396250411Smarcel if (flags & LK_IS_VNODE) 397250411Smarcel iflags |= LO_IS_VNODE; 398194317Sattilio iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE); 399177957Sattilio 400252212Sjhb lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 401177957Sattilio lk->lk_lock = LK_UNLOCKED; 402177957Sattilio lk->lk_recurse = 0; 403200447Sattilio lk->lk_exslpfail = 0; 404177957Sattilio lk->lk_timo = timo; 405177957Sattilio lk->lk_pri = pri; 406177957Sattilio STACK_ZERO(lk); 40728345Sdyson} 40824269Speter 409211531Sjhb/* 410211531Sjhb * XXX: Gross hacks to manipulate external lock flags after 411211531Sjhb * initialization. Used for certain vnode and buf locks. 412211531Sjhb */ 413177957Sattiliovoid 414211531Sjhblockallowshare(struct lock *lk) 415211531Sjhb{ 416211531Sjhb 417211531Sjhb lockmgr_assert(lk, KA_XLOCKED); 418211531Sjhb lk->lock_object.lo_flags &= ~LK_NOSHARE; 419211531Sjhb} 420211531Sjhb 421211531Sjhbvoid 422211531Sjhblockallowrecurse(struct lock *lk) 423211531Sjhb{ 424211531Sjhb 425211531Sjhb lockmgr_assert(lk, KA_XLOCKED); 426211531Sjhb lk->lock_object.lo_flags |= LO_RECURSABLE; 427211531Sjhb} 428211531Sjhb 429211531Sjhbvoid 430211531Sjhblockdisablerecurse(struct lock *lk) 431211531Sjhb{ 432211531Sjhb 433211531Sjhb lockmgr_assert(lk, KA_XLOCKED); 434211531Sjhb lk->lock_object.lo_flags &= ~LO_RECURSABLE; 435211531Sjhb} 436211531Sjhb 437211531Sjhbvoid 438177957Sattiliolockdestroy(struct lock *lk) 439177957Sattilio{ 44042453Seivind 441177957Sattilio KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 442177957Sattilio KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 443200447Sattilio KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters")); 444177957Sattilio lock_destroy(&lk->lock_object); 44528345Sdyson} 44628345Sdyson 447177957Sattilioint 448177957Sattilio__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 449177957Sattilio const char *wmesg, int pri, int timo, const char *file, int line) 450140711Sjeff{ 451178159Sattilio GIANT_DECLARE; 452177957Sattilio struct lock_class *class; 453176320Sattilio const char *iwmesg; 454177957Sattilio uintptr_t tid, v, x; 455200447Sattilio u_int op, realexslp; 456189846Sjeff int error, ipri, itimo, queue, wakeup_swapper; 457189846Sjeff#ifdef LOCK_PROFILING 458189846Sjeff uint64_t waittime = 0; 459189846Sjeff int contested = 0; 460189846Sjeff#endif 461194317Sattilio#ifdef ADAPTIVE_LOCKMGRS 462194317Sattilio volatile struct thread *owner; 463194317Sattilio u_int i, spintries = 0; 464194317Sattilio#endif 465176320Sattilio 466177957Sattilio error = 0; 467177957Sattilio tid = (uintptr_t)curthread; 468177957Sattilio op = (flags & LK_TYPE_MASK); 469177957Sattilio iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 470177957Sattilio ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 471177957Sattilio itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 472176320Sattilio 473177957Sattilio MPASS((flags & ~LK_TOTAL_MASK) == 0); 474178150Sattilio KASSERT((op & (op - 1)) == 0, 475178150Sattilio ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 476177957Sattilio KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 477177957Sattilio (op != LK_DOWNGRADE && op != LK_RELEASE), 478177957Sattilio ("%s: Invalid flags in regard of the operation desired @ %s:%d", 479177957Sattilio __func__, file, line)); 480177957Sattilio KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 481177957Sattilio ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 482177957Sattilio __func__, file, line)); 483244582Sattilio KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread), 484240424Sattilio ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread, 485240424Sattilio lk->lock_object.lo_name, file, line)); 48666615Sjasone 487177957Sattilio class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 488177957Sattilio if (panicstr != NULL) { 489177957Sattilio if (flags & LK_INTERLOCK) 490177957Sattilio class->lc_unlock(ilk); 491177957Sattilio return (0); 49228345Sdyson } 49328345Sdyson 494224581Skib if (lk->lock_object.lo_flags & LK_NOSHARE) { 495224581Skib switch (op) { 496224581Skib case LK_SHARED: 497224581Skib op = LK_EXCLUSIVE; 498224581Skib break; 499224581Skib case LK_UPGRADE: 500224581Skib case LK_DOWNGRADE: 501224581Skib _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, 502224581Skib file, line); 503245113Smjg if (flags & LK_INTERLOCK) 504245113Smjg class->lc_unlock(ilk); 505224581Skib return (0); 506224581Skib } 507224581Skib } 508164159Skmacy 509181334Sjhb wakeup_swapper = 0; 510177957Sattilio switch (op) { 511177957Sattilio case LK_SHARED: 512178159Sattilio if (LK_CAN_WITNESS(flags)) 513178159Sattilio WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 514251326Sjhb file, line, flags & LK_INTERLOCK ? ilk : NULL); 515177957Sattilio for (;;) { 516177957Sattilio x = lk->lk_lock; 517174948Sattilio 518177957Sattilio /* 519177957Sattilio * If no other thread has an exclusive lock, or 520177957Sattilio * no exclusive waiter is present, bump the count of 521177957Sattilio * sharers. Since we have to preserve the state of 522177957Sattilio * waiters, if we fail to acquire the shared lock 523177957Sattilio * loop back and retry. 524177957Sattilio */ 525177957Sattilio if (LK_CAN_SHARE(x)) { 526177957Sattilio if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 527177957Sattilio x + LK_ONE_SHARER)) 528177957Sattilio break; 529177957Sattilio continue; 530177957Sattilio } 531233628Sfabient#ifdef HWPMC_HOOKS 532233628Sfabient PMC_SOFT_CALL( , , lock, failed); 533233628Sfabient#endif 534177957Sattilio lock_profile_obtain_lock_failed(&lk->lock_object, 535177957Sattilio &contested, &waittime); 53628345Sdyson 537177957Sattilio /* 538180798Skib * If the lock is already held by curthread in 539177957Sattilio * exclusive way avoid a deadlock. 540177957Sattilio */ 541177957Sattilio if (LK_HOLDER(x) == tid) { 542177957Sattilio LOCK_LOG2(lk, 543180798Skib "%s: %p already held in exclusive mode", 544177957Sattilio __func__, lk); 545177957Sattilio error = EDEADLK; 546177957Sattilio break; 547177957Sattilio } 548140711Sjeff 549177957Sattilio /* 550177957Sattilio * If the lock is expected to not sleep just give up 551177957Sattilio * and return. 552177957Sattilio */ 553177957Sattilio if (LK_TRYOP(flags)) { 554177957Sattilio LOCK_LOG2(lk, "%s: %p fails the try operation", 555177957Sattilio __func__, lk); 556177957Sattilio error = EBUSY; 557177957Sattilio break; 558177957Sattilio } 55928345Sdyson 560194317Sattilio#ifdef ADAPTIVE_LOCKMGRS 561177957Sattilio /* 562194317Sattilio * If the owner is running on another CPU, spin until 563194317Sattilio * the owner stops running or the state of the lock 564196772Sattilio * changes. We need a double-state handle here 565196772Sattilio * because for a failed acquisition the lock can be 566196772Sattilio * either held in exclusive mode or shared mode 567196772Sattilio * (for the writer starvation avoidance technique). 568194317Sattilio */ 569194317Sattilio if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 570194317Sattilio LK_HOLDER(x) != LK_KERNPROC) { 571194317Sattilio owner = (struct thread *)LK_HOLDER(x); 572194317Sattilio if (LOCK_LOG_TEST(&lk->lock_object, 0)) 573194317Sattilio CTR3(KTR_LOCK, 574194317Sattilio "%s: spinning on %p held by %p", 575194317Sattilio __func__, lk, owner); 576194317Sattilio 577194317Sattilio /* 578194317Sattilio * If we are holding also an interlock drop it 579194317Sattilio * in order to avoid a deadlock if the lockmgr 580194317Sattilio * owner is adaptively spinning on the 581194317Sattilio * interlock itself. 582194317Sattilio */ 583194317Sattilio if (flags & LK_INTERLOCK) { 584194317Sattilio class->lc_unlock(ilk); 585194317Sattilio flags &= ~LK_INTERLOCK; 586194317Sattilio } 587194317Sattilio GIANT_SAVE(); 588194317Sattilio while (LK_HOLDER(lk->lk_lock) == 589194317Sattilio (uintptr_t)owner && TD_IS_RUNNING(owner)) 590194317Sattilio cpu_spinwait(); 591196772Sattilio GIANT_RESTORE(); 592196772Sattilio continue; 593194317Sattilio } else if (LK_CAN_ADAPT(lk, flags) && 594196772Sattilio (x & LK_SHARE) != 0 && LK_SHARERS(x) && 595194317Sattilio spintries < alk_retries) { 596194317Sattilio if (flags & LK_INTERLOCK) { 597194317Sattilio class->lc_unlock(ilk); 598194317Sattilio flags &= ~LK_INTERLOCK; 599194317Sattilio } 600194317Sattilio GIANT_SAVE(); 601194317Sattilio spintries++; 602194317Sattilio for (i = 0; i < alk_loops; i++) { 603194317Sattilio if (LOCK_LOG_TEST(&lk->lock_object, 0)) 604194317Sattilio CTR4(KTR_LOCK, 605194317Sattilio "%s: shared spinning on %p with %u and %u", 606194317Sattilio __func__, lk, spintries, i); 607194317Sattilio x = lk->lk_lock; 608194317Sattilio if ((x & LK_SHARE) == 0 || 609194317Sattilio LK_CAN_SHARE(x) != 0) 610194317Sattilio break; 611194317Sattilio cpu_spinwait(); 612194317Sattilio } 613196772Sattilio GIANT_RESTORE(); 614194317Sattilio if (i != alk_loops) 615194317Sattilio continue; 616194317Sattilio } 617194317Sattilio#endif 618194317Sattilio 619194317Sattilio /* 620177957Sattilio * Acquire the sleepqueue chain lock because we 621177957Sattilio * probabilly will need to manipulate waiters flags. 622177957Sattilio */ 623177957Sattilio sleepq_lock(&lk->lock_object); 624177957Sattilio x = lk->lk_lock; 625111463Sjeff 626177957Sattilio /* 627177957Sattilio * if the lock can be acquired in shared mode, try 628177957Sattilio * again. 629177957Sattilio */ 630177957Sattilio if (LK_CAN_SHARE(x)) { 631177957Sattilio sleepq_release(&lk->lock_object); 632177957Sattilio continue; 633177957Sattilio } 63424269Speter 635194317Sattilio#ifdef ADAPTIVE_LOCKMGRS 636177957Sattilio /* 637194317Sattilio * The current lock owner might have started executing 638194317Sattilio * on another CPU (or the lock could have changed 639194317Sattilio * owner) while we were waiting on the turnstile 640194317Sattilio * chain lock. If so, drop the turnstile lock and try 641194317Sattilio * again. 642194317Sattilio */ 643194317Sattilio if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 644194317Sattilio LK_HOLDER(x) != LK_KERNPROC) { 645194317Sattilio owner = (struct thread *)LK_HOLDER(x); 646194317Sattilio if (TD_IS_RUNNING(owner)) { 647194317Sattilio sleepq_release(&lk->lock_object); 648194317Sattilio continue; 649194317Sattilio } 650194317Sattilio } 651194317Sattilio#endif 652194317Sattilio 653194317Sattilio /* 654177957Sattilio * Try to set the LK_SHARED_WAITERS flag. If we fail, 655177957Sattilio * loop back and retry. 656177957Sattilio */ 657177957Sattilio if ((x & LK_SHARED_WAITERS) == 0) { 658177957Sattilio if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, 659177957Sattilio x | LK_SHARED_WAITERS)) { 660177957Sattilio sleepq_release(&lk->lock_object); 661177957Sattilio continue; 662177957Sattilio } 663177957Sattilio LOCK_LOG2(lk, "%s: %p set shared waiters flag", 664177957Sattilio __func__, lk); 665177957Sattilio } 66624269Speter 667177957Sattilio /* 668177957Sattilio * As far as we have been unable to acquire the 669177957Sattilio * shared lock and the shared waiters flag is set, 670177957Sattilio * we will sleep. 671177957Sattilio */ 672177957Sattilio error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 673177957Sattilio SQ_SHARED_QUEUE); 674177957Sattilio flags &= ~LK_INTERLOCK; 675177957Sattilio if (error) { 676177957Sattilio LOCK_LOG3(lk, 677177957Sattilio "%s: interrupted sleep for %p with %d", 678177957Sattilio __func__, lk, error); 679177957Sattilio break; 680177957Sattilio } 681177957Sattilio LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 682177957Sattilio __func__, lk); 683177957Sattilio } 684177957Sattilio if (error == 0) { 685177957Sattilio lock_profile_obtain_lock_success(&lk->lock_object, 686177957Sattilio contested, waittime, file, line); 687177957Sattilio LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, 688176014Sattilio line); 689178159Sattilio WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, 690178159Sattilio line); 691177957Sattilio TD_LOCKS_INC(curthread); 692177957Sattilio TD_SLOCKS_INC(curthread); 693177957Sattilio STACK_SAVE(lk); 694177957Sattilio } 695177957Sattilio break; 696177957Sattilio case LK_UPGRADE: 697177957Sattilio _lockmgr_assert(lk, KA_SLOCKED, file, line); 698194317Sattilio v = lk->lk_lock; 699194317Sattilio x = v & LK_ALL_WAITERS; 700194317Sattilio v &= LK_EXCLUSIVE_SPINNERS; 701177957Sattilio 70244681Sjulian /* 703177957Sattilio * Try to switch from one shared lock to an exclusive one. 704177957Sattilio * We need to preserve waiters flags during the operation. 70544681Sjulian */ 706194317Sattilio if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v, 707177957Sattilio tid | x)) { 708177957Sattilio LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 709177957Sattilio line); 710178159Sattilio WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 711178159Sattilio LK_TRYWIT(flags), file, line); 712177957Sattilio TD_SLOCKS_DEC(curthread); 71324269Speter break; 71424269Speter } 715177957Sattilio 71624269Speter /* 717177957Sattilio * We have been unable to succeed in upgrading, so just 718177957Sattilio * give up the shared lock. 71924269Speter */ 720182010Sjhb wakeup_swapper |= wakeupshlk(lk, file, line); 72124269Speter 722177957Sattilio /* FALLTHROUGH */ 723177957Sattilio case LK_EXCLUSIVE: 724178159Sattilio if (LK_CAN_WITNESS(flags)) 725178159Sattilio WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 726251326Sjhb LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 727251326Sjhb ilk : NULL); 72824269Speter 72924269Speter /* 730180798Skib * If curthread already holds the lock and this one is 731177957Sattilio * allowed to recurse, simply recurse on it. 73224269Speter */ 733177957Sattilio if (lockmgr_xlocked(lk)) { 734177957Sattilio if ((flags & LK_CANRECURSE) == 0 && 735193307Sattilio (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { 736177957Sattilio 737177957Sattilio /* 738177957Sattilio * If the lock is expected to not panic just 739177957Sattilio * give up and return. 740177957Sattilio */ 741177957Sattilio if (LK_TRYOP(flags)) { 742177957Sattilio LOCK_LOG2(lk, 743177957Sattilio "%s: %p fails the try operation", 744177957Sattilio __func__, lk); 745177957Sattilio error = EBUSY; 746177957Sattilio break; 747177957Sattilio } 748177957Sattilio if (flags & LK_INTERLOCK) 749177957Sattilio class->lc_unlock(ilk); 750177957Sattilio panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", 751177957Sattilio __func__, iwmesg, file, line); 752177957Sattilio } 753177957Sattilio lk->lk_recurse++; 754177957Sattilio LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 755177957Sattilio LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 756177957Sattilio lk->lk_recurse, file, line); 757178159Sattilio WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 758178159Sattilio LK_TRYWIT(flags), file, line); 759177957Sattilio TD_LOCKS_INC(curthread); 76024269Speter break; 76124269Speter } 762177957Sattilio 763177957Sattilio while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, 764177957Sattilio tid)) { 765233628Sfabient#ifdef HWPMC_HOOKS 766233628Sfabient PMC_SOFT_CALL( , , lock, failed); 767233628Sfabient#endif 768177957Sattilio lock_profile_obtain_lock_failed(&lk->lock_object, 769177957Sattilio &contested, &waittime); 770177957Sattilio 77124269Speter /* 772177957Sattilio * If the lock is expected to not sleep just give up 773177957Sattilio * and return. 77424269Speter */ 775177957Sattilio if (LK_TRYOP(flags)) { 776177957Sattilio LOCK_LOG2(lk, "%s: %p fails the try operation", 777177957Sattilio __func__, lk); 778177957Sattilio error = EBUSY; 779177957Sattilio break; 780177957Sattilio } 78134194Sdyson 782194317Sattilio#ifdef ADAPTIVE_LOCKMGRS 783177957Sattilio /* 784194317Sattilio * If the owner is running on another CPU, spin until 785194317Sattilio * the owner stops running or the state of the lock 786194317Sattilio * changes. 787194317Sattilio */ 788194317Sattilio x = lk->lk_lock; 789194317Sattilio if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 790194317Sattilio LK_HOLDER(x) != LK_KERNPROC) { 791194317Sattilio owner = (struct thread *)LK_HOLDER(x); 792194317Sattilio if (LOCK_LOG_TEST(&lk->lock_object, 0)) 793194317Sattilio CTR3(KTR_LOCK, 794194317Sattilio "%s: spinning on %p held by %p", 795194317Sattilio __func__, lk, owner); 796194317Sattilio 797194317Sattilio /* 798194317Sattilio * If we are holding also an interlock drop it 799194317Sattilio * in order to avoid a deadlock if the lockmgr 800194317Sattilio * owner is adaptively spinning on the 801194317Sattilio * interlock itself. 802194317Sattilio */ 803194317Sattilio if (flags & LK_INTERLOCK) { 804194317Sattilio class->lc_unlock(ilk); 805194317Sattilio flags &= ~LK_INTERLOCK; 806194317Sattilio } 807194317Sattilio GIANT_SAVE(); 808194317Sattilio while (LK_HOLDER(lk->lk_lock) == 809194317Sattilio (uintptr_t)owner && TD_IS_RUNNING(owner)) 810194317Sattilio cpu_spinwait(); 811196772Sattilio GIANT_RESTORE(); 812196772Sattilio continue; 813194317Sattilio } else if (LK_CAN_ADAPT(lk, flags) && 814194317Sattilio (x & LK_SHARE) != 0 && LK_SHARERS(x) && 815194317Sattilio spintries < alk_retries) { 816194317Sattilio if ((x & LK_EXCLUSIVE_SPINNERS) == 0 && 817194317Sattilio !atomic_cmpset_ptr(&lk->lk_lock, x, 818194317Sattilio x | LK_EXCLUSIVE_SPINNERS)) 819194317Sattilio continue; 820194317Sattilio if (flags & LK_INTERLOCK) { 821194317Sattilio class->lc_unlock(ilk); 822194317Sattilio flags &= ~LK_INTERLOCK; 823194317Sattilio } 824194317Sattilio GIANT_SAVE(); 825194317Sattilio spintries++; 826194317Sattilio for (i = 0; i < alk_loops; i++) { 827194317Sattilio if (LOCK_LOG_TEST(&lk->lock_object, 0)) 828194317Sattilio CTR4(KTR_LOCK, 829194317Sattilio "%s: shared spinning on %p with %u and %u", 830194317Sattilio __func__, lk, spintries, i); 831194317Sattilio if ((lk->lk_lock & 832194317Sattilio LK_EXCLUSIVE_SPINNERS) == 0) 833194317Sattilio break; 834194317Sattilio cpu_spinwait(); 835194317Sattilio } 836196772Sattilio GIANT_RESTORE(); 837194317Sattilio if (i != alk_loops) 838194317Sattilio continue; 839194317Sattilio } 840194317Sattilio#endif 841194317Sattilio 842194317Sattilio /* 843177957Sattilio * Acquire the sleepqueue chain lock because we 844177957Sattilio * probabilly will need to manipulate waiters flags. 845177957Sattilio */ 846177957Sattilio sleepq_lock(&lk->lock_object); 847177957Sattilio x = lk->lk_lock; 848177957Sattilio 849177957Sattilio /* 850177957Sattilio * if the lock has been released while we spun on 851177957Sattilio * the sleepqueue chain lock just try again. 852177957Sattilio */ 853177957Sattilio if (x == LK_UNLOCKED) { 854177957Sattilio sleepq_release(&lk->lock_object); 855177957Sattilio continue; 856134365Skan } 85724269Speter 858194317Sattilio#ifdef ADAPTIVE_LOCKMGRS 85924269Speter /* 860194317Sattilio * The current lock owner might have started executing 861194317Sattilio * on another CPU (or the lock could have changed 862194317Sattilio * owner) while we were waiting on the turnstile 863194317Sattilio * chain lock. If so, drop the turnstile lock and try 864194317Sattilio * again. 865194317Sattilio */ 866194317Sattilio if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && 867194317Sattilio LK_HOLDER(x) != LK_KERNPROC) { 868194317Sattilio owner = (struct thread *)LK_HOLDER(x); 869194317Sattilio if (TD_IS_RUNNING(owner)) { 870194317Sattilio sleepq_release(&lk->lock_object); 871194317Sattilio continue; 872194317Sattilio } 873194317Sattilio } 874194317Sattilio#endif 875194317Sattilio 876194317Sattilio /* 877177957Sattilio * The lock can be in the state where there is a 878177957Sattilio * pending queue of waiters, but still no owner. 879177957Sattilio * This happens when the lock is contested and an 880177957Sattilio * owner is going to claim the lock. 881177957Sattilio * If curthread is the one successfully acquiring it 882177957Sattilio * claim lock ownership and return, preserving waiters 883177957Sattilio * flags. 88424269Speter */ 885194317Sattilio v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 886194317Sattilio if ((x & ~v) == LK_UNLOCKED) { 887194317Sattilio v &= ~LK_EXCLUSIVE_SPINNERS; 888177957Sattilio if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 889177957Sattilio tid | v)) { 890177957Sattilio sleepq_release(&lk->lock_object); 891177957Sattilio LOCK_LOG2(lk, 892177957Sattilio "%s: %p claimed by a new writer", 893177957Sattilio __func__, lk); 894177957Sattilio break; 895177957Sattilio } 896177957Sattilio sleepq_release(&lk->lock_object); 897177957Sattilio continue; 898177957Sattilio } 899177957Sattilio 900177957Sattilio /* 901177957Sattilio * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 902177957Sattilio * fail, loop back and retry. 903177957Sattilio */ 904177957Sattilio if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 905177957Sattilio if (!atomic_cmpset_ptr(&lk->lk_lock, x, 906177957Sattilio x | LK_EXCLUSIVE_WAITERS)) { 907177957Sattilio sleepq_release(&lk->lock_object); 908177957Sattilio continue; 909177957Sattilio } 910177957Sattilio LOCK_LOG2(lk, "%s: %p set excl waiters flag", 911177957Sattilio __func__, lk); 912177957Sattilio } 913177957Sattilio 914177957Sattilio /* 915177957Sattilio * As far as we have been unable to acquire the 916177957Sattilio * exclusive lock and the exclusive waiters flag 917177957Sattilio * is set, we will sleep. 918177957Sattilio */ 919177957Sattilio error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 920177957Sattilio SQ_EXCLUSIVE_QUEUE); 921177957Sattilio flags &= ~LK_INTERLOCK; 922177957Sattilio if (error) { 923177957Sattilio LOCK_LOG3(lk, 924177957Sattilio "%s: interrupted sleep for %p with %d", 925177957Sattilio __func__, lk, error); 92648301Smckusick break; 92748301Smckusick } 928177957Sattilio LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 929177957Sattilio __func__, lk); 93024269Speter } 931177957Sattilio if (error == 0) { 932177957Sattilio lock_profile_obtain_lock_success(&lk->lock_object, 933177957Sattilio contested, waittime, file, line); 934177957Sattilio LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 935177957Sattilio lk->lk_recurse, file, line); 936178159Sattilio WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 937178159Sattilio LK_TRYWIT(flags), file, line); 938177957Sattilio TD_LOCKS_INC(curthread); 939177957Sattilio STACK_SAVE(lk); 940177957Sattilio } 941177957Sattilio break; 942177957Sattilio case LK_DOWNGRADE: 943243900Sattilio _lockmgr_assert(lk, KA_XLOCKED, file, line); 944178159Sattilio LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 945178159Sattilio WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 946243900Sattilio 947243900Sattilio /* 948243900Sattilio * Panic if the lock is recursed. 949243900Sattilio */ 950243900Sattilio if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 951243900Sattilio if (flags & LK_INTERLOCK) 952243900Sattilio class->lc_unlock(ilk); 953243900Sattilio panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n", 954243900Sattilio __func__, iwmesg, file, line); 955243900Sattilio } 956178159Sattilio TD_SLOCKS_INC(curthread); 957177957Sattilio 95824269Speter /* 959177957Sattilio * In order to preserve waiters flags, just spin. 96024269Speter */ 961177957Sattilio for (;;) { 962194317Sattilio x = lk->lk_lock; 963194317Sattilio MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 964194317Sattilio x &= LK_ALL_WAITERS; 965177957Sattilio if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 966178159Sattilio LK_SHARERS_LOCK(1) | x)) 967177957Sattilio break; 968177957Sattilio cpu_spinwait(); 96924269Speter } 97024269Speter break; 971177957Sattilio case LK_RELEASE: 972177957Sattilio _lockmgr_assert(lk, KA_LOCKED, file, line); 973177957Sattilio x = lk->lk_lock; 97424269Speter 975177957Sattilio if ((x & LK_SHARE) == 0) { 976177957Sattilio 977177957Sattilio /* 978177957Sattilio * As first option, treact the lock as if it has not 979177957Sattilio * any waiter. 980177957Sattilio * Fix-up the tid var if the lock has been disowned. 981177957Sattilio */ 982177957Sattilio if (LK_HOLDER(x) == LK_KERNPROC) 983177957Sattilio tid = LK_KERNPROC; 984178159Sattilio else { 985178159Sattilio WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, 986178159Sattilio file, line); 987177957Sattilio TD_LOCKS_DEC(curthread); 988178159Sattilio } 989177957Sattilio LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, 990177957Sattilio lk->lk_recurse, file, line); 991177957Sattilio 992177957Sattilio /* 993177957Sattilio * The lock is held in exclusive mode. 994177957Sattilio * If the lock is recursed also, then unrecurse it. 995177957Sattilio */ 996177957Sattilio if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 997177957Sattilio LOCK_LOG2(lk, "%s: %p unrecursing", __func__, 998177957Sattilio lk); 999177957Sattilio lk->lk_recurse--; 1000177957Sattilio break; 1001176014Sattilio } 1002189788Sjeff if (tid != LK_KERNPROC) 1003189788Sjeff lock_profile_release_lock(&lk->lock_object); 1004177957Sattilio 1005177957Sattilio if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, 1006177957Sattilio LK_UNLOCKED)) 1007177957Sattilio break; 1008177957Sattilio 1009177957Sattilio sleepq_lock(&lk->lock_object); 1010194317Sattilio x = lk->lk_lock; 1011177957Sattilio v = LK_UNLOCKED; 1012177957Sattilio 1013177957Sattilio /* 1014177957Sattilio * If the lock has exclusive waiters, give them 1015177957Sattilio * preference in order to avoid deadlock with 1016177957Sattilio * shared runners up. 1017200447Sattilio * If interruptible sleeps left the exclusive queue 1018200447Sattilio * empty avoid a starvation for the threads sleeping 1019200447Sattilio * on the shared queue by giving them precedence 1020200447Sattilio * and cleaning up the exclusive waiters bit anyway. 1021201709Sattilio * Please note that lk_exslpfail count may be lying 1022201709Sattilio * about the real number of waiters with the 1023201709Sattilio * LK_SLEEPFAIL flag on because they may be used in 1024201709Sattilio * conjuction with interruptible sleeps so 1025201710Sattilio * lk_exslpfail might be considered an 'upper limit' 1026201710Sattilio * bound, including the edge cases. 1027177957Sattilio */ 1028194317Sattilio MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1029200447Sattilio realexslp = sleepq_sleepcnt(&lk->lock_object, 1030200447Sattilio SQ_EXCLUSIVE_QUEUE); 1031200447Sattilio if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) { 1032200447Sattilio if (lk->lk_exslpfail < realexslp) { 1033200447Sattilio lk->lk_exslpfail = 0; 1034200447Sattilio queue = SQ_EXCLUSIVE_QUEUE; 1035200447Sattilio v |= (x & LK_SHARED_WAITERS); 1036200447Sattilio } else { 1037200447Sattilio lk->lk_exslpfail = 0; 1038200447Sattilio LOCK_LOG2(lk, 1039200447Sattilio "%s: %p has only LK_SLEEPFAIL sleepers", 1040200447Sattilio __func__, lk); 1041200447Sattilio LOCK_LOG2(lk, 1042200447Sattilio "%s: %p waking up threads on the exclusive queue", 1043200447Sattilio __func__, lk); 1044200447Sattilio wakeup_swapper = 1045200447Sattilio sleepq_broadcast(&lk->lock_object, 1046200447Sattilio SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE); 1047200447Sattilio queue = SQ_SHARED_QUEUE; 1048200447Sattilio } 1049177957Sattilio } else { 1050201703Sattilio 1051201703Sattilio /* 1052201703Sattilio * Exclusive waiters sleeping with LK_SLEEPFAIL 1053201703Sattilio * on and using interruptible sleeps/timeout 1054201703Sattilio * may have left spourious lk_exslpfail counts 1055201703Sattilio * on, so clean it up anyway. 1056201703Sattilio */ 1057201703Sattilio lk->lk_exslpfail = 0; 1058177957Sattilio queue = SQ_SHARED_QUEUE; 105924269Speter } 1060149723Sssouhlal 1061177957Sattilio LOCK_LOG3(lk, 1062177957Sattilio "%s: %p waking up threads on the %s queue", 1063177957Sattilio __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 1064177957Sattilio "exclusive"); 1065177957Sattilio atomic_store_rel_ptr(&lk->lk_lock, v); 1066200447Sattilio wakeup_swapper |= sleepq_broadcast(&lk->lock_object, 1067181334Sjhb SLEEPQ_LK, 0, queue); 1068177957Sattilio sleepq_release(&lk->lock_object); 1069177957Sattilio break; 1070177957Sattilio } else 1071181334Sjhb wakeup_swapper = wakeupshlk(lk, file, line); 107224269Speter break; 1073177957Sattilio case LK_DRAIN: 1074178159Sattilio if (LK_CAN_WITNESS(flags)) 1075178159Sattilio WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 1076251326Sjhb LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? 1077251326Sjhb ilk : NULL); 107824269Speter 107924269Speter /* 1080180798Skib * Trying to drain a lock we already own will result in a 1081177957Sattilio * deadlock. 108224269Speter */ 1083177957Sattilio if (lockmgr_xlocked(lk)) { 1084177957Sattilio if (flags & LK_INTERLOCK) 1085177957Sattilio class->lc_unlock(ilk); 1086177957Sattilio panic("%s: draining %s with the lock held @ %s:%d\n", 1087177957Sattilio __func__, iwmesg, file, line); 1088177957Sattilio } 108928345Sdyson 1090177957Sattilio while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 1091233628Sfabient#ifdef HWPMC_HOOKS 1092233628Sfabient PMC_SOFT_CALL( , , lock, failed); 1093233628Sfabient#endif 1094177957Sattilio lock_profile_obtain_lock_failed(&lk->lock_object, 1095177957Sattilio &contested, &waittime); 109624269Speter 1097177957Sattilio /* 1098177957Sattilio * If the lock is expected to not sleep just give up 1099177957Sattilio * and return. 1100177957Sattilio */ 1101177957Sattilio if (LK_TRYOP(flags)) { 1102177957Sattilio LOCK_LOG2(lk, "%s: %p fails the try operation", 1103177957Sattilio __func__, lk); 1104177957Sattilio error = EBUSY; 1105177957Sattilio break; 1106177957Sattilio } 110724269Speter 1108177957Sattilio /* 1109177957Sattilio * Acquire the sleepqueue chain lock because we 1110177957Sattilio * probabilly will need to manipulate waiters flags. 1111177957Sattilio */ 1112177957Sattilio sleepq_lock(&lk->lock_object); 1113177957Sattilio x = lk->lk_lock; 111429653Sdyson 1115177957Sattilio /* 1116177957Sattilio * if the lock has been released while we spun on 1117177957Sattilio * the sleepqueue chain lock just try again. 1118177957Sattilio */ 1119177957Sattilio if (x == LK_UNLOCKED) { 1120177957Sattilio sleepq_release(&lk->lock_object); 1121177957Sattilio continue; 1122177957Sattilio } 1123176320Sattilio 1124194317Sattilio v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); 1125194317Sattilio if ((x & ~v) == LK_UNLOCKED) { 1126194317Sattilio v = (x & ~LK_EXCLUSIVE_SPINNERS); 1127200447Sattilio 1128200447Sattilio /* 1129200447Sattilio * If interruptible sleeps left the exclusive 1130200447Sattilio * queue empty avoid a starvation for the 1131200447Sattilio * threads sleeping on the shared queue by 1132200447Sattilio * giving them precedence and cleaning up the 1133200447Sattilio * exclusive waiters bit anyway. 1134201709Sattilio * Please note that lk_exslpfail count may be 1135201709Sattilio * lying about the real number of waiters with 1136201709Sattilio * the LK_SLEEPFAIL flag on because they may 1137201709Sattilio * be used in conjuction with interruptible 1138201710Sattilio * sleeps so lk_exslpfail might be considered 1139201710Sattilio * an 'upper limit' bound, including the edge 1140201709Sattilio * cases. 1141200447Sattilio */ 1142177957Sattilio if (v & LK_EXCLUSIVE_WAITERS) { 1143177957Sattilio queue = SQ_EXCLUSIVE_QUEUE; 1144177957Sattilio v &= ~LK_EXCLUSIVE_WAITERS; 1145177957Sattilio } else { 1146201703Sattilio 1147201703Sattilio /* 1148201703Sattilio * Exclusive waiters sleeping with 1149201703Sattilio * LK_SLEEPFAIL on and using 1150201703Sattilio * interruptible sleeps/timeout may 1151201703Sattilio * have left spourious lk_exslpfail 1152201703Sattilio * counts on, so clean it up anyway. 1153201703Sattilio */ 1154177957Sattilio MPASS(v & LK_SHARED_WAITERS); 1155201703Sattilio lk->lk_exslpfail = 0; 1156177957Sattilio queue = SQ_SHARED_QUEUE; 1157177957Sattilio v &= ~LK_SHARED_WAITERS; 1158177957Sattilio } 1159200447Sattilio if (queue == SQ_EXCLUSIVE_QUEUE) { 1160200447Sattilio realexslp = 1161200447Sattilio sleepq_sleepcnt(&lk->lock_object, 1162200447Sattilio SQ_EXCLUSIVE_QUEUE); 1163200447Sattilio if (lk->lk_exslpfail >= realexslp) { 1164200447Sattilio lk->lk_exslpfail = 0; 1165200447Sattilio queue = SQ_SHARED_QUEUE; 1166200447Sattilio v &= ~LK_SHARED_WAITERS; 1167200447Sattilio if (realexslp != 0) { 1168200447Sattilio LOCK_LOG2(lk, 1169200447Sattilio "%s: %p has only LK_SLEEPFAIL sleepers", 1170200447Sattilio __func__, lk); 1171200447Sattilio LOCK_LOG2(lk, 1172200447Sattilio "%s: %p waking up threads on the exclusive queue", 1173200447Sattilio __func__, lk); 1174200447Sattilio wakeup_swapper = 1175200447Sattilio sleepq_broadcast( 1176200447Sattilio &lk->lock_object, 1177200447Sattilio SLEEPQ_LK, 0, 1178200447Sattilio SQ_EXCLUSIVE_QUEUE); 1179200447Sattilio } 1180200447Sattilio } else 1181200447Sattilio lk->lk_exslpfail = 0; 1182200447Sattilio } 1183177957Sattilio if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 1184177957Sattilio sleepq_release(&lk->lock_object); 1185177957Sattilio continue; 1186177957Sattilio } 1187177957Sattilio LOCK_LOG3(lk, 1188177957Sattilio "%s: %p waking up all threads on the %s queue", 1189177957Sattilio __func__, lk, queue == SQ_SHARED_QUEUE ? 1190177957Sattilio "shared" : "exclusive"); 1191182010Sjhb wakeup_swapper |= sleepq_broadcast( 1192181334Sjhb &lk->lock_object, SLEEPQ_LK, 0, queue); 1193177957Sattilio 1194177957Sattilio /* 1195177957Sattilio * If shared waiters have been woken up we need 1196177957Sattilio * to wait for one of them to acquire the lock 1197177957Sattilio * before to set the exclusive waiters in 1198177957Sattilio * order to avoid a deadlock. 1199177957Sattilio */ 1200177957Sattilio if (queue == SQ_SHARED_QUEUE) { 1201177957Sattilio for (v = lk->lk_lock; 1202177957Sattilio (v & LK_SHARE) && !LK_SHARERS(v); 1203177957Sattilio v = lk->lk_lock) 1204177957Sattilio cpu_spinwait(); 1205177957Sattilio } 1206177957Sattilio } 1207177957Sattilio 1208177957Sattilio /* 1209177957Sattilio * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 1210177957Sattilio * fail, loop back and retry. 1211177957Sattilio */ 1212177957Sattilio if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 1213177957Sattilio if (!atomic_cmpset_ptr(&lk->lk_lock, x, 1214177957Sattilio x | LK_EXCLUSIVE_WAITERS)) { 1215177957Sattilio sleepq_release(&lk->lock_object); 1216177957Sattilio continue; 1217177957Sattilio } 1218177957Sattilio LOCK_LOG2(lk, "%s: %p set drain waiters flag", 1219177957Sattilio __func__, lk); 1220177957Sattilio } 1221177957Sattilio 1222177957Sattilio /* 1223177957Sattilio * As far as we have been unable to acquire the 1224177957Sattilio * exclusive lock and the exclusive waiters flag 1225177957Sattilio * is set, we will sleep. 1226177957Sattilio */ 1227177957Sattilio if (flags & LK_INTERLOCK) { 1228177957Sattilio class->lc_unlock(ilk); 1229177957Sattilio flags &= ~LK_INTERLOCK; 1230177957Sattilio } 1231178159Sattilio GIANT_SAVE(); 1232177957Sattilio sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 1233177957Sattilio SQ_EXCLUSIVE_QUEUE); 1234177957Sattilio sleepq_wait(&lk->lock_object, ipri & PRIMASK); 1235178159Sattilio GIANT_RESTORE(); 1236177957Sattilio LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 1237177957Sattilio __func__, lk); 123829653Sdyson } 1239177957Sattilio 1240177957Sattilio if (error == 0) { 1241177957Sattilio lock_profile_obtain_lock_success(&lk->lock_object, 1242177957Sattilio contested, waittime, file, line); 1243177957Sattilio LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 1244177957Sattilio lk->lk_recurse, file, line); 1245178159Sattilio WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 1246178159Sattilio LK_TRYWIT(flags), file, line); 1247177957Sattilio TD_LOCKS_INC(curthread); 1248177957Sattilio STACK_SAVE(lk); 1249177957Sattilio } 1250177957Sattilio break; 1251177957Sattilio default: 1252177957Sattilio if (flags & LK_INTERLOCK) 1253177957Sattilio class->lc_unlock(ilk); 1254177957Sattilio panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 125529653Sdyson } 1256177957Sattilio 1257177957Sattilio if (flags & LK_INTERLOCK) 1258177957Sattilio class->lc_unlock(ilk); 1259181334Sjhb if (wakeup_swapper) 1260181334Sjhb kick_proc0(); 1261177957Sattilio 1262177957Sattilio return (error); 126329653Sdyson} 126429653Sdyson 126529653Sdysonvoid 1266177957Sattilio_lockmgr_disown(struct lock *lk, const char *file, int line) 126729653Sdyson{ 1268177957Sattilio uintptr_t tid, x; 1269176014Sattilio 1270228424Savg if (SCHEDULER_STOPPED()) 1271228424Savg return; 1272228424Savg 1273177957Sattilio tid = (uintptr_t)curthread; 1274243900Sattilio _lockmgr_assert(lk, KA_XLOCKED, file, line); 127529653Sdyson 1276177957Sattilio /* 1277243900Sattilio * Panic if the lock is recursed. 1278243900Sattilio */ 1279243900Sattilio if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) 1280243900Sattilio panic("%s: disown a recursed lockmgr @ %s:%d\n", 1281243900Sattilio __func__, file, line); 1282243900Sattilio 1283243900Sattilio /* 1284180798Skib * If the owner is already LK_KERNPROC just skip the whole operation. 1285177957Sattilio */ 1286177957Sattilio if (LK_HOLDER(lk->lk_lock) != tid) 1287177957Sattilio return; 1288189788Sjeff lock_profile_release_lock(&lk->lock_object); 1289178159Sattilio LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 1290178159Sattilio WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 1291178159Sattilio TD_LOCKS_DEC(curthread); 1292199008Sattilio STACK_SAVE(lk); 129329653Sdyson 1294177957Sattilio /* 1295177957Sattilio * In order to preserve waiters flags, just spin. 1296177957Sattilio */ 1297177957Sattilio for (;;) { 1298194317Sattilio x = lk->lk_lock; 1299194317Sattilio MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); 1300194317Sattilio x &= LK_ALL_WAITERS; 1301178166Sattilio if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 1302178159Sattilio LK_KERNPROC | x)) 1303177957Sattilio return; 1304177957Sattilio cpu_spinwait(); 1305177957Sattilio } 130666615Sjasone} 130766615Sjasone 1308175166Sattiliovoid 1309227588Spjdlockmgr_printinfo(const struct lock *lk) 1310175166Sattilio{ 1311175166Sattilio struct thread *td; 1312177957Sattilio uintptr_t x; 1313175166Sattilio 1314177957Sattilio if (lk->lk_lock == LK_UNLOCKED) 1315188244Sjhb printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 1316177957Sattilio else if (lk->lk_lock & LK_SHARE) 1317188244Sjhb printf("lock type %s: SHARED (count %ju)\n", 1318177957Sattilio lk->lock_object.lo_name, 1319177957Sattilio (uintmax_t)LK_SHARERS(lk->lk_lock)); 1320177957Sattilio else { 1321177957Sattilio td = lockmgr_xholder(lk); 1322232547Sivoras printf("lock type %s: EXCL by thread %p " 1323232547Sivoras "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td, 1324232547Sivoras td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid); 1325177957Sattilio } 1326175166Sattilio 1327177957Sattilio x = lk->lk_lock; 1328177957Sattilio if (x & LK_EXCLUSIVE_WAITERS) 1329177957Sattilio printf(" with exclusive waiters pending\n"); 1330177957Sattilio if (x & LK_SHARED_WAITERS) 1331177957Sattilio printf(" with shared waiters pending\n"); 1332194317Sattilio if (x & LK_EXCLUSIVE_SPINNERS) 1333194317Sattilio printf(" with exclusive spinners pending\n"); 1334177957Sattilio 1335177957Sattilio STACK_PRINT(lk); 1336175166Sattilio} 1337175166Sattilio 133829653Sdysonint 1339227588Spjdlockstatus(const struct lock *lk) 134029653Sdyson{ 1341177957Sattilio uintptr_t v, x; 1342177957Sattilio int ret; 134329653Sdyson 1344177957Sattilio ret = LK_SHARED; 1345177957Sattilio x = lk->lk_lock; 1346177957Sattilio v = LK_HOLDER(x); 1347175635Sattilio 1348177957Sattilio if ((x & LK_SHARE) == 0) { 1349177957Sattilio if (v == (uintptr_t)curthread || v == LK_KERNPROC) 1350177957Sattilio ret = LK_EXCLUSIVE; 135154444Seivind else 1352177957Sattilio ret = LK_EXCLOTHER; 1353177957Sattilio } else if (x == LK_UNLOCKED) 1354177957Sattilio ret = 0; 135529653Sdyson 1356177957Sattilio return (ret); 135724269Speter} 1358161322Sjhb 1359176249Sattilio#ifdef INVARIANT_SUPPORT 1360219028Snetchild 1361219028SnetchildFEATURE(invariant_support, 1362219028Snetchild "Support for modules compiled with INVARIANTS option"); 1363219028Snetchild 1364176249Sattilio#ifndef INVARIANTS 1365177957Sattilio#undef _lockmgr_assert 1366176249Sattilio#endif 1367176249Sattilio 1368176249Sattiliovoid 1369227588Spjd_lockmgr_assert(const struct lock *lk, int what, const char *file, int line) 1370176249Sattilio{ 1371176249Sattilio int slocked = 0; 1372176249Sattilio 1373176249Sattilio if (panicstr != NULL) 1374176249Sattilio return; 1375176249Sattilio switch (what) { 1376176249Sattilio case KA_SLOCKED: 1377176249Sattilio case KA_SLOCKED | KA_NOTRECURSED: 1378176249Sattilio case KA_SLOCKED | KA_RECURSED: 1379176249Sattilio slocked = 1; 1380176249Sattilio case KA_LOCKED: 1381176249Sattilio case KA_LOCKED | KA_NOTRECURSED: 1382176249Sattilio case KA_LOCKED | KA_RECURSED: 1383178159Sattilio#ifdef WITNESS 1384178159Sattilio 1385178159Sattilio /* 1386178159Sattilio * We cannot trust WITNESS if the lock is held in exclusive 1387178159Sattilio * mode and a call to lockmgr_disown() happened. 1388178159Sattilio * Workaround this skipping the check if the lock is held in 1389178159Sattilio * exclusive mode even for the KA_LOCKED case. 1390178159Sattilio */ 1391178159Sattilio if (slocked || (lk->lk_lock & LK_SHARE)) { 1392178159Sattilio witness_assert(&lk->lock_object, what, file, line); 1393178159Sattilio break; 1394178159Sattilio } 1395178159Sattilio#endif 1396177957Sattilio if (lk->lk_lock == LK_UNLOCKED || 1397177957Sattilio ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 1398177957Sattilio (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 1399176249Sattilio panic("Lock %s not %slocked @ %s:%d\n", 1400177957Sattilio lk->lock_object.lo_name, slocked ? "share" : "", 1401176249Sattilio file, line); 1402177957Sattilio 1403177957Sattilio if ((lk->lk_lock & LK_SHARE) == 0) { 1404177957Sattilio if (lockmgr_recursed(lk)) { 1405176249Sattilio if (what & KA_NOTRECURSED) 1406176249Sattilio panic("Lock %s recursed @ %s:%d\n", 1407177957Sattilio lk->lock_object.lo_name, file, 1408177957Sattilio line); 1409176249Sattilio } else if (what & KA_RECURSED) 1410176249Sattilio panic("Lock %s not recursed @ %s:%d\n", 1411177957Sattilio lk->lock_object.lo_name, file, line); 1412176249Sattilio } 1413176249Sattilio break; 1414176249Sattilio case KA_XLOCKED: 1415176249Sattilio case KA_XLOCKED | KA_NOTRECURSED: 1416176249Sattilio case KA_XLOCKED | KA_RECURSED: 1417177957Sattilio if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 1418176249Sattilio panic("Lock %s not exclusively locked @ %s:%d\n", 1419177957Sattilio lk->lock_object.lo_name, file, line); 1420177957Sattilio if (lockmgr_recursed(lk)) { 1421176249Sattilio if (what & KA_NOTRECURSED) 1422176249Sattilio panic("Lock %s recursed @ %s:%d\n", 1423177957Sattilio lk->lock_object.lo_name, file, line); 1424176249Sattilio } else if (what & KA_RECURSED) 1425176249Sattilio panic("Lock %s not recursed @ %s:%d\n", 1426177957Sattilio lk->lock_object.lo_name, file, line); 1427176249Sattilio break; 1428176249Sattilio case KA_UNLOCKED: 1429177957Sattilio if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 1430176249Sattilio panic("Lock %s exclusively locked @ %s:%d\n", 1431177957Sattilio lk->lock_object.lo_name, file, line); 1432176249Sattilio break; 1433176249Sattilio default: 1434177957Sattilio panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1435177957Sattilio line); 1436176249Sattilio } 1437176249Sattilio} 1438177957Sattilio#endif 1439176249Sattilio 1440161322Sjhb#ifdef DDB 1441161337Sjhbint 1442161337Sjhblockmgr_chain(struct thread *td, struct thread **ownerp) 1443161337Sjhb{ 1444177957Sattilio struct lock *lk; 1445161337Sjhb 1446177957Sattilio lk = td->td_wchan; 1447161337Sjhb 1448177957Sattilio if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1449177957Sattilio return (0); 1450177957Sattilio db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1451177957Sattilio if (lk->lk_lock & LK_SHARE) 1452177957Sattilio db_printf("SHARED (count %ju)\n", 1453177957Sattilio (uintmax_t)LK_SHARERS(lk->lk_lock)); 1454177957Sattilio else 1455177957Sattilio db_printf("EXCL\n"); 1456177957Sattilio *ownerp = lockmgr_xholder(lk); 1457161337Sjhb 1458161337Sjhb return (1); 1459161337Sjhb} 1460161337Sjhb 1461177957Sattiliostatic void 1462227588Spjddb_show_lockmgr(const struct lock_object *lock) 1463161322Sjhb{ 1464161322Sjhb struct thread *td; 1465227588Spjd const struct lock *lk; 1466161322Sjhb 1467227588Spjd lk = (const struct lock *)lock; 1468161322Sjhb 1469168070Sjhb db_printf(" state: "); 1470177957Sattilio if (lk->lk_lock == LK_UNLOCKED) 1471161322Sjhb db_printf("UNLOCKED\n"); 1472177957Sattilio else if (lk->lk_lock & LK_SHARE) 1473177957Sattilio db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1474177957Sattilio else { 1475177957Sattilio td = lockmgr_xholder(lk); 1476177957Sattilio if (td == (struct thread *)LK_KERNPROC) 1477177957Sattilio db_printf("XLOCK: LK_KERNPROC\n"); 1478177957Sattilio else 1479177957Sattilio db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1480177957Sattilio td->td_tid, td->td_proc->p_pid, 1481177957Sattilio td->td_proc->p_comm); 1482177957Sattilio if (lockmgr_recursed(lk)) 1483177957Sattilio db_printf(" recursed: %d\n", lk->lk_recurse); 1484177957Sattilio } 1485177957Sattilio db_printf(" waiters: "); 1486177957Sattilio switch (lk->lk_lock & LK_ALL_WAITERS) { 1487177957Sattilio case LK_SHARED_WAITERS: 1488177957Sattilio db_printf("shared\n"); 1489192022Strasz break; 1490177957Sattilio case LK_EXCLUSIVE_WAITERS: 1491177957Sattilio db_printf("exclusive\n"); 1492177957Sattilio break; 1493177957Sattilio case LK_ALL_WAITERS: 1494177957Sattilio db_printf("shared and exclusive\n"); 1495177957Sattilio break; 1496177957Sattilio default: 1497177957Sattilio db_printf("none\n"); 1498177957Sattilio } 1499194317Sattilio db_printf(" spinners: "); 1500194317Sattilio if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS) 1501194317Sattilio db_printf("exclusive\n"); 1502194317Sattilio else 1503194317Sattilio db_printf("none\n"); 1504161322Sjhb} 1505161322Sjhb#endif 1506