kern_lock.c revision 189788
1139804Simp/*- 2177957Sattilio * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org> 3177957Sattilio * All rights reserved. 424269Speter * 524269Speter * Redistribution and use in source and binary forms, with or without 624269Speter * modification, are permitted provided that the following conditions 724269Speter * are met: 824269Speter * 1. Redistributions of source code must retain the above copyright 9177957Sattilio * notice(s), this list of conditions and the following disclaimer as 10177957Sattilio * the first lines of this file unmodified other than the possible 11177957Sattilio * addition of one or more copyright notices. 1224269Speter * 2. Redistributions in binary form must reproduce the above copyright 13177957Sattilio * notice(s), this list of conditions and the following disclaimer in the 1424269Speter * documentation and/or other materials provided with the distribution. 1524269Speter * 16177957Sattilio * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 17177957Sattilio * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18177957Sattilio * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19177957Sattilio * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY 20177957Sattilio * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21177957Sattilio * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22177957Sattilio * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23177957Sattilio * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2424269Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25177957Sattilio * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 26177957Sattilio * DAMAGE. 2724269Speter */ 2824269Speter 29177957Sattilio#include "opt_ddb.h" 30177957Sattilio 31116182Sobrien#include <sys/cdefs.h> 32116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_lock.c 189788 2009-03-14 11:43:02Z jeff $"); 33116182Sobrien 3424269Speter#include <sys/param.h> 3584812Sjhb#include <sys/ktr.h> 3624269Speter#include <sys/lock.h> 37177957Sattilio#include <sys/lock_profile.h> 38102477Sbde#include <sys/lockmgr.h> 3967353Sjhb#include <sys/mutex.h> 40102477Sbde#include <sys/proc.h> 41177957Sattilio#include <sys/sleepqueue.h> 42148668Sjeff#ifdef DEBUG_LOCKS 43148668Sjeff#include <sys/stack.h> 44148668Sjeff#endif 45177957Sattilio#include <sys/systm.h> 4624269Speter 47177957Sattilio#include <machine/cpu.h> 48176014Sattilio 49161322Sjhb#ifdef DDB 50161322Sjhb#include <ddb/ddb.h> 51161322Sjhb#endif 52161322Sjhb 53177957SattilioCTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) == 54177957Sattilio (LK_CANRECURSE | LK_NOSHARE)); 55177957Sattilio 56177957Sattilio#define SQ_EXCLUSIVE_QUEUE 0 57177957Sattilio#define SQ_SHARED_QUEUE 1 58177957Sattilio 59177957Sattilio#ifndef INVARIANTS 60177957Sattilio#define _lockmgr_assert(lk, what, file, line) 61177957Sattilio#define TD_LOCKS_INC(td) 62177957Sattilio#define TD_LOCKS_DEC(td) 63177957Sattilio#else 64177957Sattilio#define TD_LOCKS_INC(td) ((td)->td_locks++) 65177957Sattilio#define TD_LOCKS_DEC(td) ((td)->td_locks--) 66177957Sattilio#endif 67177957Sattilio#define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++) 68177957Sattilio#define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--) 69177957Sattilio 70177957Sattilio#ifndef DEBUG_LOCKS 71177957Sattilio#define STACK_PRINT(lk) 72177957Sattilio#define STACK_SAVE(lk) 73177957Sattilio#define STACK_ZERO(lk) 74177957Sattilio#else 75177957Sattilio#define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) 76177957Sattilio#define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) 77177957Sattilio#define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) 78177957Sattilio#endif 79177957Sattilio 80177957Sattilio#define LOCK_LOG2(lk, string, arg1, arg2) \ 81177957Sattilio if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 82177957Sattilio CTR2(KTR_LOCK, (string), (arg1), (arg2)) 83177957Sattilio#define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ 84177957Sattilio if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ 85177957Sattilio CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) 86177957Sattilio 87178159Sattilio#define GIANT_DECLARE \ 88178159Sattilio int _i = 0; \ 89178159Sattilio WITNESS_SAVE_DECL(Giant) 90178159Sattilio#define GIANT_RESTORE() do { \ 91178159Sattilio if (_i > 0) { \ 92178159Sattilio while (_i--) \ 93178159Sattilio mtx_lock(&Giant); \ 94178159Sattilio WITNESS_RESTORE(&Giant.lock_object, Giant); \ 95178159Sattilio } \ 96178159Sattilio} while (0) 97178159Sattilio#define GIANT_SAVE() do { \ 98178159Sattilio if (mtx_owned(&Giant)) { \ 99178159Sattilio WITNESS_SAVE(&Giant.lock_object, Giant); \ 100178159Sattilio while (mtx_owned(&Giant)) { \ 101178159Sattilio _i++; \ 102178159Sattilio mtx_unlock(&Giant); \ 103178159Sattilio } \ 104178159Sattilio } \ 105178159Sattilio} while (0) 106178159Sattilio 107177957Sattilio#define LK_CAN_SHARE(x) \ 108177957Sattilio (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \ 109177982Sattilio curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT))) 110178159Sattilio#define LK_TRYOP(x) \ 111178159Sattilio ((x) & LK_NOWAIT) 112177957Sattilio 113178159Sattilio#define LK_CAN_WITNESS(x) \ 114178159Sattilio (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x)) 115178159Sattilio#define LK_TRYWIT(x) \ 116178159Sattilio (LK_TRYOP(x) ? LOP_TRYLOCK : 0) 117178159Sattilio 118177957Sattilio#define lockmgr_disowned(lk) \ 119177957Sattilio (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) 120177957Sattilio 121177957Sattilio#define lockmgr_xlocked(lk) \ 122177957Sattilio (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) 123177957Sattilio 124177957Sattiliostatic void assert_lockmgr(struct lock_object *lock, int how); 125177957Sattilio#ifdef DDB 126177957Sattiliostatic void db_show_lockmgr(struct lock_object *lock); 127177957Sattilio#endif 128177957Sattiliostatic void lock_lockmgr(struct lock_object *lock, int how); 129177957Sattiliostatic int unlock_lockmgr(struct lock_object *lock); 130177957Sattilio 131164246Skmacystruct lock_class lock_class_lockmgr = { 132167366Sjhb .lc_name = "lockmgr", 133177957Sattilio .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, 134173733Sattilio .lc_assert = assert_lockmgr, 135164246Skmacy#ifdef DDB 136167368Sjhb .lc_ddb_show = db_show_lockmgr, 137164246Skmacy#endif 138167368Sjhb .lc_lock = lock_lockmgr, 139177957Sattilio .lc_unlock = unlock_lockmgr 140164246Skmacy}; 141164246Skmacy 142177957Sattiliostatic __inline struct thread * 143177957Sattiliolockmgr_xholder(struct lock *lk) 144177957Sattilio{ 145177957Sattilio uintptr_t x; 146176249Sattilio 147177957Sattilio x = lk->lk_lock; 148177957Sattilio return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); 149177957Sattilio} 150177957Sattilio 15124269Speter/* 152177957Sattilio * It assumes sleepq_lock held and returns with this one unheld. 153177957Sattilio * It also assumes the generic interlock is sane and previously checked. 154177957Sattilio * If LK_INTERLOCK is specified the interlock is not reacquired after the 155177957Sattilio * sleep. 15624269Speter */ 157177957Sattiliostatic __inline int 158177957Sattiliosleeplk(struct lock *lk, u_int flags, struct lock_object *ilk, 159177957Sattilio const char *wmesg, int pri, int timo, int queue) 160177957Sattilio{ 161178159Sattilio GIANT_DECLARE; 162177957Sattilio struct lock_class *class; 163177957Sattilio int catch, error; 16424269Speter 165177957Sattilio class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 166179306Sattilio catch = pri & PCATCH; 167177957Sattilio pri &= PRIMASK; 168177957Sattilio error = 0; 169177957Sattilio 170177957Sattilio LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, 171177957Sattilio (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); 172177957Sattilio 173177957Sattilio if (flags & LK_INTERLOCK) 174177957Sattilio class->lc_unlock(ilk); 175178159Sattilio GIANT_SAVE(); 176177957Sattilio sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? 177177957Sattilio SLEEPQ_INTERRUPTIBLE : 0), queue); 178177957Sattilio if ((flags & LK_TIMELOCK) && timo) 179177957Sattilio sleepq_set_timeout(&lk->lock_object, timo); 180177957Sattilio 181177957Sattilio /* 182177957Sattilio * Decisional switch for real sleeping. 183177957Sattilio */ 184177957Sattilio if ((flags & LK_TIMELOCK) && timo && catch) 185177957Sattilio error = sleepq_timedwait_sig(&lk->lock_object, pri); 186177957Sattilio else if ((flags & LK_TIMELOCK) && timo) 187177957Sattilio error = sleepq_timedwait(&lk->lock_object, pri); 188177957Sattilio else if (catch) 189177957Sattilio error = sleepq_wait_sig(&lk->lock_object, pri); 190177957Sattilio else 191177957Sattilio sleepq_wait(&lk->lock_object, pri); 192178159Sattilio GIANT_RESTORE(); 193177957Sattilio if ((flags & LK_SLEEPFAIL) && error == 0) 194177957Sattilio error = ENOLCK; 195177957Sattilio 196177957Sattilio return (error); 197177957Sattilio} 198177957Sattilio 199181334Sjhbstatic __inline int 200177957Sattiliowakeupshlk(struct lock *lk, const char *file, int line) 201177957Sattilio{ 202177957Sattilio uintptr_t v, x; 203181334Sjhb int queue, wakeup_swapper; 204177957Sattilio 205177957Sattilio TD_LOCKS_DEC(curthread); 206177957Sattilio TD_SLOCKS_DEC(curthread); 207178159Sattilio WITNESS_UNLOCK(&lk->lock_object, 0, file, line); 208177957Sattilio LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); 209177957Sattilio 210181334Sjhb wakeup_swapper = 0; 211177957Sattilio for (;;) { 212177957Sattilio x = lk->lk_lock; 213177957Sattilio 214177957Sattilio /* 215177957Sattilio * If there is more than one shared lock held, just drop one 216177957Sattilio * and return. 217177957Sattilio */ 218177957Sattilio if (LK_SHARERS(x) > 1) { 219177957Sattilio if (atomic_cmpset_ptr(&lk->lk_lock, x, 220177957Sattilio x - LK_ONE_SHARER)) 221177957Sattilio break; 222177957Sattilio continue; 223177957Sattilio } 224177957Sattilio 225177957Sattilio /* 226177957Sattilio * If there are not waiters on the exclusive queue, drop the 227177957Sattilio * lock quickly. 228177957Sattilio */ 229177957Sattilio if ((x & LK_ALL_WAITERS) == 0) { 230177957Sattilio MPASS(x == LK_SHARERS_LOCK(1)); 231177957Sattilio if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1), 232177957Sattilio LK_UNLOCKED)) 233177957Sattilio break; 234177957Sattilio continue; 235177957Sattilio } 236177957Sattilio 237177957Sattilio /* 238177957Sattilio * We should have a sharer with waiters, so enter the hard 239177957Sattilio * path in order to handle wakeups correctly. 240177957Sattilio */ 241177957Sattilio sleepq_lock(&lk->lock_object); 242177957Sattilio x = lk->lk_lock & LK_ALL_WAITERS; 243177957Sattilio v = LK_UNLOCKED; 244177957Sattilio 245177957Sattilio /* 246177957Sattilio * If the lock has exclusive waiters, give them preference in 247177957Sattilio * order to avoid deadlock with shared runners up. 248177957Sattilio */ 249177957Sattilio if (x & LK_EXCLUSIVE_WAITERS) { 250177957Sattilio queue = SQ_EXCLUSIVE_QUEUE; 251177957Sattilio v |= (x & LK_SHARED_WAITERS); 252177957Sattilio } else { 253177957Sattilio MPASS(x == LK_SHARED_WAITERS); 254177957Sattilio queue = SQ_SHARED_QUEUE; 255177957Sattilio } 256177957Sattilio 257177957Sattilio if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 258177957Sattilio v)) { 259177957Sattilio sleepq_release(&lk->lock_object); 260177957Sattilio continue; 261177957Sattilio } 262177957Sattilio LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue", 263177957Sattilio __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 264177957Sattilio "exclusive"); 265181334Sjhb wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 266181334Sjhb 0, queue); 267177957Sattilio sleepq_release(&lk->lock_object); 268177957Sattilio break; 269177957Sattilio } 270177957Sattilio 271177957Sattilio lock_profile_release_lock(&lk->lock_object); 272181334Sjhb return (wakeup_swapper); 273177957Sattilio} 274177957Sattilio 275177957Sattiliostatic void 276173733Sattilioassert_lockmgr(struct lock_object *lock, int what) 277173733Sattilio{ 278173733Sattilio 279173733Sattilio panic("lockmgr locks do not support assertions"); 280173733Sattilio} 281173733Sattilio 282177957Sattiliostatic void 283167368Sjhblock_lockmgr(struct lock_object *lock, int how) 284167368Sjhb{ 285167368Sjhb 286167368Sjhb panic("lockmgr locks do not support sleep interlocking"); 287167368Sjhb} 288167368Sjhb 289177957Sattiliostatic int 290167368Sjhbunlock_lockmgr(struct lock_object *lock) 291167368Sjhb{ 292167368Sjhb 293167368Sjhb panic("lockmgr locks do not support sleep interlocking"); 294167368Sjhb} 295167368Sjhb 296177957Sattiliovoid 297177957Sattiliolockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) 298177957Sattilio{ 299177957Sattilio int iflags; 30029653Sdyson 301177957Sattilio MPASS((flags & ~LK_INIT_MASK) == 0); 30224269Speter 303177957Sattilio iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE; 304177957Sattilio if ((flags & LK_NODUP) == 0) 305177957Sattilio iflags |= LO_DUPOK; 306177957Sattilio if (flags & LK_NOPROFILE) 307177957Sattilio iflags |= LO_NOPROFILE; 308177957Sattilio if ((flags & LK_NOWITNESS) == 0) 309177957Sattilio iflags |= LO_WITNESS; 310177957Sattilio if (flags & LK_QUIET) 311177957Sattilio iflags |= LO_QUIET; 312177957Sattilio iflags |= flags & (LK_CANRECURSE | LK_NOSHARE); 313177957Sattilio 314177957Sattilio lk->lk_lock = LK_UNLOCKED; 315177957Sattilio lk->lk_recurse = 0; 316177957Sattilio lk->lk_timo = timo; 317177957Sattilio lk->lk_pri = pri; 318177957Sattilio lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); 319177957Sattilio STACK_ZERO(lk); 32028345Sdyson} 32124269Speter 322177957Sattiliovoid 323177957Sattiliolockdestroy(struct lock *lk) 324177957Sattilio{ 32542453Seivind 326177957Sattilio KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); 327177957Sattilio KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); 328177957Sattilio lock_destroy(&lk->lock_object); 32928345Sdyson} 33028345Sdyson 331177957Sattilioint 332177957Sattilio__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, 333177957Sattilio const char *wmesg, int pri, int timo, const char *file, int line) 334140711Sjeff{ 335178159Sattilio GIANT_DECLARE; 336177957Sattilio uint64_t waittime; 337177957Sattilio struct lock_class *class; 338176320Sattilio const char *iwmesg; 339177957Sattilio uintptr_t tid, v, x; 340177957Sattilio u_int op; 341181334Sjhb int contested, error, ipri, itimo, queue, wakeup_swapper; 342176320Sattilio 343177957Sattilio contested = 0; 344177957Sattilio error = 0; 345177957Sattilio waittime = 0; 346177957Sattilio tid = (uintptr_t)curthread; 347177957Sattilio op = (flags & LK_TYPE_MASK); 348177957Sattilio iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; 349177957Sattilio ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; 350177957Sattilio itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; 351176320Sattilio 352177957Sattilio MPASS((flags & ~LK_TOTAL_MASK) == 0); 353178150Sattilio KASSERT((op & (op - 1)) == 0, 354178150Sattilio ("%s: Invalid requested operation @ %s:%d", __func__, file, line)); 355177957Sattilio KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || 356177957Sattilio (op != LK_DOWNGRADE && op != LK_RELEASE), 357177957Sattilio ("%s: Invalid flags in regard of the operation desired @ %s:%d", 358177957Sattilio __func__, file, line)); 359177957Sattilio KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL, 360177957Sattilio ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d", 361177957Sattilio __func__, file, line)); 36266615Sjasone 363177957Sattilio class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL; 364177957Sattilio if (panicstr != NULL) { 365177957Sattilio if (flags & LK_INTERLOCK) 366177957Sattilio class->lc_unlock(ilk); 367177957Sattilio return (0); 36828345Sdyson } 36928345Sdyson 370177957Sattilio if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE)) 371177957Sattilio op = LK_EXCLUSIVE; 372164159Skmacy 373181334Sjhb wakeup_swapper = 0; 374177957Sattilio switch (op) { 375177957Sattilio case LK_SHARED: 376178159Sattilio if (LK_CAN_WITNESS(flags)) 377178159Sattilio WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, 378182914Sjhb file, line, ilk); 379177957Sattilio for (;;) { 380177957Sattilio x = lk->lk_lock; 381174948Sattilio 382177957Sattilio /* 383177957Sattilio * If no other thread has an exclusive lock, or 384177957Sattilio * no exclusive waiter is present, bump the count of 385177957Sattilio * sharers. Since we have to preserve the state of 386177957Sattilio * waiters, if we fail to acquire the shared lock 387177957Sattilio * loop back and retry. 388177957Sattilio */ 389177957Sattilio if (LK_CAN_SHARE(x)) { 390177957Sattilio if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 391177957Sattilio x + LK_ONE_SHARER)) 392177957Sattilio break; 393177957Sattilio continue; 394177957Sattilio } 395177957Sattilio lock_profile_obtain_lock_failed(&lk->lock_object, 396177957Sattilio &contested, &waittime); 39728345Sdyson 398177957Sattilio /* 399180798Skib * If the lock is already held by curthread in 400177957Sattilio * exclusive way avoid a deadlock. 401177957Sattilio */ 402177957Sattilio if (LK_HOLDER(x) == tid) { 403177957Sattilio LOCK_LOG2(lk, 404180798Skib "%s: %p already held in exclusive mode", 405177957Sattilio __func__, lk); 406177957Sattilio error = EDEADLK; 407177957Sattilio break; 408177957Sattilio } 409140711Sjeff 410177957Sattilio /* 411177957Sattilio * If the lock is expected to not sleep just give up 412177957Sattilio * and return. 413177957Sattilio */ 414177957Sattilio if (LK_TRYOP(flags)) { 415177957Sattilio LOCK_LOG2(lk, "%s: %p fails the try operation", 416177957Sattilio __func__, lk); 417177957Sattilio error = EBUSY; 418177957Sattilio break; 419177957Sattilio } 42028345Sdyson 421177957Sattilio /* 422177957Sattilio * Acquire the sleepqueue chain lock because we 423177957Sattilio * probabilly will need to manipulate waiters flags. 424177957Sattilio */ 425177957Sattilio sleepq_lock(&lk->lock_object); 426177957Sattilio x = lk->lk_lock; 427111463Sjeff 428177957Sattilio /* 429177957Sattilio * if the lock can be acquired in shared mode, try 430177957Sattilio * again. 431177957Sattilio */ 432177957Sattilio if (LK_CAN_SHARE(x)) { 433177957Sattilio sleepq_release(&lk->lock_object); 434177957Sattilio continue; 435177957Sattilio } 43624269Speter 437177957Sattilio /* 438177957Sattilio * Try to set the LK_SHARED_WAITERS flag. If we fail, 439177957Sattilio * loop back and retry. 440177957Sattilio */ 441177957Sattilio if ((x & LK_SHARED_WAITERS) == 0) { 442177957Sattilio if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, 443177957Sattilio x | LK_SHARED_WAITERS)) { 444177957Sattilio sleepq_release(&lk->lock_object); 445177957Sattilio continue; 446177957Sattilio } 447177957Sattilio LOCK_LOG2(lk, "%s: %p set shared waiters flag", 448177957Sattilio __func__, lk); 449177957Sattilio } 45024269Speter 451177957Sattilio /* 452177957Sattilio * As far as we have been unable to acquire the 453177957Sattilio * shared lock and the shared waiters flag is set, 454177957Sattilio * we will sleep. 455177957Sattilio */ 456177957Sattilio error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 457177957Sattilio SQ_SHARED_QUEUE); 458177957Sattilio flags &= ~LK_INTERLOCK; 459177957Sattilio if (error) { 460177957Sattilio LOCK_LOG3(lk, 461177957Sattilio "%s: interrupted sleep for %p with %d", 462177957Sattilio __func__, lk, error); 463177957Sattilio break; 464177957Sattilio } 465177957Sattilio LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 466177957Sattilio __func__, lk); 467177957Sattilio } 468177957Sattilio if (error == 0) { 469177957Sattilio lock_profile_obtain_lock_success(&lk->lock_object, 470177957Sattilio contested, waittime, file, line); 471177957Sattilio LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, 472176014Sattilio line); 473178159Sattilio WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, 474178159Sattilio line); 475177957Sattilio TD_LOCKS_INC(curthread); 476177957Sattilio TD_SLOCKS_INC(curthread); 477177957Sattilio STACK_SAVE(lk); 478177957Sattilio } 479177957Sattilio break; 480177957Sattilio case LK_UPGRADE: 481177957Sattilio _lockmgr_assert(lk, KA_SLOCKED, file, line); 482177957Sattilio x = lk->lk_lock & LK_ALL_WAITERS; 483177957Sattilio 48444681Sjulian /* 485177957Sattilio * Try to switch from one shared lock to an exclusive one. 486177957Sattilio * We need to preserve waiters flags during the operation. 48744681Sjulian */ 488177957Sattilio if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, 489177957Sattilio tid | x)) { 490177957Sattilio LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, 491177957Sattilio line); 492178159Sattilio WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | 493178159Sattilio LK_TRYWIT(flags), file, line); 494177957Sattilio TD_SLOCKS_DEC(curthread); 49524269Speter break; 49624269Speter } 497177957Sattilio 49824269Speter /* 499177957Sattilio * We have been unable to succeed in upgrading, so just 500177957Sattilio * give up the shared lock. 50124269Speter */ 502182010Sjhb wakeup_swapper |= wakeupshlk(lk, file, line); 50324269Speter 504177957Sattilio /* FALLTHROUGH */ 505177957Sattilio case LK_EXCLUSIVE: 506178159Sattilio if (LK_CAN_WITNESS(flags)) 507178159Sattilio WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 508182914Sjhb LOP_EXCLUSIVE, file, line, ilk); 50924269Speter 51024269Speter /* 511180798Skib * If curthread already holds the lock and this one is 512177957Sattilio * allowed to recurse, simply recurse on it. 51324269Speter */ 514177957Sattilio if (lockmgr_xlocked(lk)) { 515177957Sattilio if ((flags & LK_CANRECURSE) == 0 && 516177957Sattilio (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) { 517177957Sattilio 518177957Sattilio /* 519177957Sattilio * If the lock is expected to not panic just 520177957Sattilio * give up and return. 521177957Sattilio */ 522177957Sattilio if (LK_TRYOP(flags)) { 523177957Sattilio LOCK_LOG2(lk, 524177957Sattilio "%s: %p fails the try operation", 525177957Sattilio __func__, lk); 526177957Sattilio error = EBUSY; 527177957Sattilio break; 528177957Sattilio } 529177957Sattilio if (flags & LK_INTERLOCK) 530177957Sattilio class->lc_unlock(ilk); 531177957Sattilio panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", 532177957Sattilio __func__, iwmesg, file, line); 533177957Sattilio } 534177957Sattilio lk->lk_recurse++; 535177957Sattilio LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); 536177957Sattilio LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 537177957Sattilio lk->lk_recurse, file, line); 538178159Sattilio WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 539178159Sattilio LK_TRYWIT(flags), file, line); 540177957Sattilio TD_LOCKS_INC(curthread); 54124269Speter break; 54224269Speter } 543177957Sattilio 544177957Sattilio while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, 545177957Sattilio tid)) { 546177957Sattilio lock_profile_obtain_lock_failed(&lk->lock_object, 547177957Sattilio &contested, &waittime); 548177957Sattilio 54924269Speter /* 550177957Sattilio * If the lock is expected to not sleep just give up 551177957Sattilio * and return. 55224269Speter */ 553177957Sattilio if (LK_TRYOP(flags)) { 554177957Sattilio LOCK_LOG2(lk, "%s: %p fails the try operation", 555177957Sattilio __func__, lk); 556177957Sattilio error = EBUSY; 557177957Sattilio break; 558177957Sattilio } 55934194Sdyson 560177957Sattilio /* 561177957Sattilio * Acquire the sleepqueue chain lock because we 562177957Sattilio * probabilly will need to manipulate waiters flags. 563177957Sattilio */ 564177957Sattilio sleepq_lock(&lk->lock_object); 565177957Sattilio x = lk->lk_lock; 566177957Sattilio v = x & LK_ALL_WAITERS; 567177957Sattilio 568177957Sattilio /* 569177957Sattilio * if the lock has been released while we spun on 570177957Sattilio * the sleepqueue chain lock just try again. 571177957Sattilio */ 572177957Sattilio if (x == LK_UNLOCKED) { 573177957Sattilio sleepq_release(&lk->lock_object); 574177957Sattilio continue; 575134365Skan } 57624269Speter 57724269Speter /* 578177957Sattilio * The lock can be in the state where there is a 579177957Sattilio * pending queue of waiters, but still no owner. 580177957Sattilio * This happens when the lock is contested and an 581177957Sattilio * owner is going to claim the lock. 582177957Sattilio * If curthread is the one successfully acquiring it 583177957Sattilio * claim lock ownership and return, preserving waiters 584177957Sattilio * flags. 58524269Speter */ 586177957Sattilio if (x == (LK_UNLOCKED | v)) { 587177957Sattilio if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, 588177957Sattilio tid | v)) { 589177957Sattilio sleepq_release(&lk->lock_object); 590177957Sattilio LOCK_LOG2(lk, 591177957Sattilio "%s: %p claimed by a new writer", 592177957Sattilio __func__, lk); 593177957Sattilio break; 594177957Sattilio } 595177957Sattilio sleepq_release(&lk->lock_object); 596177957Sattilio continue; 597177957Sattilio } 598177957Sattilio 599177957Sattilio /* 600177957Sattilio * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 601177957Sattilio * fail, loop back and retry. 602177957Sattilio */ 603177957Sattilio if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 604177957Sattilio if (!atomic_cmpset_ptr(&lk->lk_lock, x, 605177957Sattilio x | LK_EXCLUSIVE_WAITERS)) { 606177957Sattilio sleepq_release(&lk->lock_object); 607177957Sattilio continue; 608177957Sattilio } 609177957Sattilio LOCK_LOG2(lk, "%s: %p set excl waiters flag", 610177957Sattilio __func__, lk); 611177957Sattilio } 612177957Sattilio 613177957Sattilio /* 614177957Sattilio * As far as we have been unable to acquire the 615177957Sattilio * exclusive lock and the exclusive waiters flag 616177957Sattilio * is set, we will sleep. 617177957Sattilio */ 618177957Sattilio error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, 619177957Sattilio SQ_EXCLUSIVE_QUEUE); 620177957Sattilio flags &= ~LK_INTERLOCK; 621177957Sattilio if (error) { 622177957Sattilio LOCK_LOG3(lk, 623177957Sattilio "%s: interrupted sleep for %p with %d", 624177957Sattilio __func__, lk, error); 62548301Smckusick break; 62648301Smckusick } 627177957Sattilio LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 628177957Sattilio __func__, lk); 62924269Speter } 630177957Sattilio if (error == 0) { 631177957Sattilio lock_profile_obtain_lock_success(&lk->lock_object, 632177957Sattilio contested, waittime, file, line); 633177957Sattilio LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, 634177957Sattilio lk->lk_recurse, file, line); 635178159Sattilio WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 636178159Sattilio LK_TRYWIT(flags), file, line); 637177957Sattilio TD_LOCKS_INC(curthread); 638177957Sattilio STACK_SAVE(lk); 639177957Sattilio } 640177957Sattilio break; 641177957Sattilio case LK_DOWNGRADE: 642177957Sattilio _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); 643178159Sattilio LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); 644178159Sattilio WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line); 645178159Sattilio TD_SLOCKS_INC(curthread); 646177957Sattilio 64724269Speter /* 648177957Sattilio * In order to preserve waiters flags, just spin. 64924269Speter */ 650177957Sattilio for (;;) { 651177957Sattilio x = lk->lk_lock & LK_ALL_WAITERS; 652177957Sattilio if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 653178159Sattilio LK_SHARERS_LOCK(1) | x)) 654177957Sattilio break; 655177957Sattilio cpu_spinwait(); 65624269Speter } 65724269Speter break; 658177957Sattilio case LK_RELEASE: 659177957Sattilio _lockmgr_assert(lk, KA_LOCKED, file, line); 660177957Sattilio x = lk->lk_lock; 66124269Speter 662177957Sattilio if ((x & LK_SHARE) == 0) { 663177957Sattilio 664177957Sattilio /* 665177957Sattilio * As first option, treact the lock as if it has not 666177957Sattilio * any waiter. 667177957Sattilio * Fix-up the tid var if the lock has been disowned. 668177957Sattilio */ 669177957Sattilio if (LK_HOLDER(x) == LK_KERNPROC) 670177957Sattilio tid = LK_KERNPROC; 671178159Sattilio else { 672178159Sattilio WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, 673178159Sattilio file, line); 674177957Sattilio TD_LOCKS_DEC(curthread); 675178159Sattilio } 676177957Sattilio LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, 677177957Sattilio lk->lk_recurse, file, line); 678177957Sattilio 679177957Sattilio /* 680177957Sattilio * The lock is held in exclusive mode. 681177957Sattilio * If the lock is recursed also, then unrecurse it. 682177957Sattilio */ 683177957Sattilio if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { 684177957Sattilio LOCK_LOG2(lk, "%s: %p unrecursing", __func__, 685177957Sattilio lk); 686177957Sattilio lk->lk_recurse--; 687177957Sattilio break; 688176014Sattilio } 689189788Sjeff if (tid != LK_KERNPROC) 690189788Sjeff lock_profile_release_lock(&lk->lock_object); 691177957Sattilio 692177957Sattilio if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, 693177957Sattilio LK_UNLOCKED)) 694177957Sattilio break; 695177957Sattilio 696177957Sattilio sleepq_lock(&lk->lock_object); 697177957Sattilio x = lk->lk_lock & LK_ALL_WAITERS; 698177957Sattilio v = LK_UNLOCKED; 699177957Sattilio 700177957Sattilio /* 701177957Sattilio * If the lock has exclusive waiters, give them 702177957Sattilio * preference in order to avoid deadlock with 703177957Sattilio * shared runners up. 704177957Sattilio */ 705177957Sattilio if (x & LK_EXCLUSIVE_WAITERS) { 706177957Sattilio queue = SQ_EXCLUSIVE_QUEUE; 707177957Sattilio v |= (x & LK_SHARED_WAITERS); 708177957Sattilio } else { 709177957Sattilio MPASS(x == LK_SHARED_WAITERS); 710177957Sattilio queue = SQ_SHARED_QUEUE; 71124269Speter } 712149723Sssouhlal 713177957Sattilio LOCK_LOG3(lk, 714177957Sattilio "%s: %p waking up threads on the %s queue", 715177957Sattilio __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" : 716177957Sattilio "exclusive"); 717177957Sattilio atomic_store_rel_ptr(&lk->lk_lock, v); 718181334Sjhb wakeup_swapper = sleepq_broadcast(&lk->lock_object, 719181334Sjhb SLEEPQ_LK, 0, queue); 720177957Sattilio sleepq_release(&lk->lock_object); 721177957Sattilio break; 722177957Sattilio } else 723181334Sjhb wakeup_swapper = wakeupshlk(lk, file, line); 72424269Speter break; 725177957Sattilio case LK_DRAIN: 726178159Sattilio if (LK_CAN_WITNESS(flags)) 727178159Sattilio WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | 728182914Sjhb LOP_EXCLUSIVE, file, line, ilk); 72924269Speter 73024269Speter /* 731180798Skib * Trying to drain a lock we already own will result in a 732177957Sattilio * deadlock. 73324269Speter */ 734177957Sattilio if (lockmgr_xlocked(lk)) { 735177957Sattilio if (flags & LK_INTERLOCK) 736177957Sattilio class->lc_unlock(ilk); 737177957Sattilio panic("%s: draining %s with the lock held @ %s:%d\n", 738177957Sattilio __func__, iwmesg, file, line); 739177957Sattilio } 74028345Sdyson 741177957Sattilio while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { 742177957Sattilio lock_profile_obtain_lock_failed(&lk->lock_object, 743177957Sattilio &contested, &waittime); 74424269Speter 745177957Sattilio /* 746177957Sattilio * If the lock is expected to not sleep just give up 747177957Sattilio * and return. 748177957Sattilio */ 749177957Sattilio if (LK_TRYOP(flags)) { 750177957Sattilio LOCK_LOG2(lk, "%s: %p fails the try operation", 751177957Sattilio __func__, lk); 752177957Sattilio error = EBUSY; 753177957Sattilio break; 754177957Sattilio } 75524269Speter 756177957Sattilio /* 757177957Sattilio * Acquire the sleepqueue chain lock because we 758177957Sattilio * probabilly will need to manipulate waiters flags. 759177957Sattilio */ 760177957Sattilio sleepq_lock(&lk->lock_object); 761177957Sattilio x = lk->lk_lock; 762177957Sattilio v = x & LK_ALL_WAITERS; 76329653Sdyson 764177957Sattilio /* 765177957Sattilio * if the lock has been released while we spun on 766177957Sattilio * the sleepqueue chain lock just try again. 767177957Sattilio */ 768177957Sattilio if (x == LK_UNLOCKED) { 769177957Sattilio sleepq_release(&lk->lock_object); 770177957Sattilio continue; 771177957Sattilio } 772176320Sattilio 773177957Sattilio if (x == (LK_UNLOCKED | v)) { 774177957Sattilio v = x; 775177957Sattilio if (v & LK_EXCLUSIVE_WAITERS) { 776177957Sattilio queue = SQ_EXCLUSIVE_QUEUE; 777177957Sattilio v &= ~LK_EXCLUSIVE_WAITERS; 778177957Sattilio } else { 779177957Sattilio MPASS(v & LK_SHARED_WAITERS); 780177957Sattilio queue = SQ_SHARED_QUEUE; 781177957Sattilio v &= ~LK_SHARED_WAITERS; 782177957Sattilio } 783177957Sattilio if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { 784177957Sattilio sleepq_release(&lk->lock_object); 785177957Sattilio continue; 786177957Sattilio } 787177957Sattilio LOCK_LOG3(lk, 788177957Sattilio "%s: %p waking up all threads on the %s queue", 789177957Sattilio __func__, lk, queue == SQ_SHARED_QUEUE ? 790177957Sattilio "shared" : "exclusive"); 791182010Sjhb wakeup_swapper |= sleepq_broadcast( 792181334Sjhb &lk->lock_object, SLEEPQ_LK, 0, queue); 793177957Sattilio 794177957Sattilio /* 795177957Sattilio * If shared waiters have been woken up we need 796177957Sattilio * to wait for one of them to acquire the lock 797177957Sattilio * before to set the exclusive waiters in 798177957Sattilio * order to avoid a deadlock. 799177957Sattilio */ 800177957Sattilio if (queue == SQ_SHARED_QUEUE) { 801177957Sattilio for (v = lk->lk_lock; 802177957Sattilio (v & LK_SHARE) && !LK_SHARERS(v); 803177957Sattilio v = lk->lk_lock) 804177957Sattilio cpu_spinwait(); 805177957Sattilio } 806177957Sattilio } 807177957Sattilio 808177957Sattilio /* 809177957Sattilio * Try to set the LK_EXCLUSIVE_WAITERS flag. If we 810177957Sattilio * fail, loop back and retry. 811177957Sattilio */ 812177957Sattilio if ((x & LK_EXCLUSIVE_WAITERS) == 0) { 813177957Sattilio if (!atomic_cmpset_ptr(&lk->lk_lock, x, 814177957Sattilio x | LK_EXCLUSIVE_WAITERS)) { 815177957Sattilio sleepq_release(&lk->lock_object); 816177957Sattilio continue; 817177957Sattilio } 818177957Sattilio LOCK_LOG2(lk, "%s: %p set drain waiters flag", 819177957Sattilio __func__, lk); 820177957Sattilio } 821177957Sattilio 822177957Sattilio /* 823177957Sattilio * As far as we have been unable to acquire the 824177957Sattilio * exclusive lock and the exclusive waiters flag 825177957Sattilio * is set, we will sleep. 826177957Sattilio */ 827177957Sattilio if (flags & LK_INTERLOCK) { 828177957Sattilio class->lc_unlock(ilk); 829177957Sattilio flags &= ~LK_INTERLOCK; 830177957Sattilio } 831178159Sattilio GIANT_SAVE(); 832177957Sattilio sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, 833177957Sattilio SQ_EXCLUSIVE_QUEUE); 834177957Sattilio sleepq_wait(&lk->lock_object, ipri & PRIMASK); 835178159Sattilio GIANT_RESTORE(); 836177957Sattilio LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", 837177957Sattilio __func__, lk); 83829653Sdyson } 839177957Sattilio 840177957Sattilio if (error == 0) { 841177957Sattilio lock_profile_obtain_lock_success(&lk->lock_object, 842177957Sattilio contested, waittime, file, line); 843177957Sattilio LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, 844177957Sattilio lk->lk_recurse, file, line); 845178159Sattilio WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | 846178159Sattilio LK_TRYWIT(flags), file, line); 847177957Sattilio TD_LOCKS_INC(curthread); 848177957Sattilio STACK_SAVE(lk); 849177957Sattilio } 850177957Sattilio break; 851177957Sattilio default: 852177957Sattilio if (flags & LK_INTERLOCK) 853177957Sattilio class->lc_unlock(ilk); 854177957Sattilio panic("%s: unknown lockmgr request 0x%x\n", __func__, op); 85529653Sdyson } 856177957Sattilio 857177957Sattilio if (flags & LK_INTERLOCK) 858177957Sattilio class->lc_unlock(ilk); 859181334Sjhb if (wakeup_swapper) 860181334Sjhb kick_proc0(); 861177957Sattilio 862177957Sattilio return (error); 86329653Sdyson} 86429653Sdyson 86529653Sdysonvoid 866177957Sattilio_lockmgr_disown(struct lock *lk, const char *file, int line) 86729653Sdyson{ 868177957Sattilio uintptr_t tid, x; 869176014Sattilio 870177957Sattilio tid = (uintptr_t)curthread; 871177957Sattilio _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); 87229653Sdyson 873177957Sattilio /* 874180798Skib * If the owner is already LK_KERNPROC just skip the whole operation. 875177957Sattilio */ 876177957Sattilio if (LK_HOLDER(lk->lk_lock) != tid) 877177957Sattilio return; 878189788Sjeff lock_profile_release_lock(&lk->lock_object); 879178159Sattilio LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); 880178159Sattilio WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); 881178159Sattilio TD_LOCKS_DEC(curthread); 88229653Sdyson 883177957Sattilio /* 884177957Sattilio * In order to preserve waiters flags, just spin. 885177957Sattilio */ 886177957Sattilio for (;;) { 887177957Sattilio x = lk->lk_lock & LK_ALL_WAITERS; 888178166Sattilio if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, 889178159Sattilio LK_KERNPROC | x)) 890177957Sattilio return; 891177957Sattilio cpu_spinwait(); 892177957Sattilio } 89366615Sjasone} 89466615Sjasone 895175166Sattiliovoid 896177957Sattiliolockmgr_printinfo(struct lock *lk) 897175166Sattilio{ 898175166Sattilio struct thread *td; 899177957Sattilio uintptr_t x; 900175166Sattilio 901177957Sattilio if (lk->lk_lock == LK_UNLOCKED) 902188244Sjhb printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name); 903177957Sattilio else if (lk->lk_lock & LK_SHARE) 904188244Sjhb printf("lock type %s: SHARED (count %ju)\n", 905177957Sattilio lk->lock_object.lo_name, 906177957Sattilio (uintmax_t)LK_SHARERS(lk->lk_lock)); 907177957Sattilio else { 908177957Sattilio td = lockmgr_xholder(lk); 909188244Sjhb printf("lock type %s: EXCL by thread %p (pid %d)\n", 910177957Sattilio lk->lock_object.lo_name, td, td->td_proc->p_pid); 911177957Sattilio } 912175166Sattilio 913177957Sattilio x = lk->lk_lock; 914177957Sattilio if (x & LK_EXCLUSIVE_WAITERS) 915177957Sattilio printf(" with exclusive waiters pending\n"); 916177957Sattilio if (x & LK_SHARED_WAITERS) 917177957Sattilio printf(" with shared waiters pending\n"); 918177957Sattilio 919177957Sattilio STACK_PRINT(lk); 920175166Sattilio} 921175166Sattilio 92229653Sdysonint 923177957Sattiliolockstatus(struct lock *lk) 92429653Sdyson{ 925177957Sattilio uintptr_t v, x; 926177957Sattilio int ret; 92729653Sdyson 928177957Sattilio ret = LK_SHARED; 929177957Sattilio x = lk->lk_lock; 930177957Sattilio v = LK_HOLDER(x); 931175635Sattilio 932177957Sattilio if ((x & LK_SHARE) == 0) { 933177957Sattilio if (v == (uintptr_t)curthread || v == LK_KERNPROC) 934177957Sattilio ret = LK_EXCLUSIVE; 93554444Seivind else 936177957Sattilio ret = LK_EXCLOTHER; 937177957Sattilio } else if (x == LK_UNLOCKED) 938177957Sattilio ret = 0; 93929653Sdyson 940177957Sattilio return (ret); 94124269Speter} 942161322Sjhb 943176249Sattilio#ifdef INVARIANT_SUPPORT 944176249Sattilio#ifndef INVARIANTS 945177957Sattilio#undef _lockmgr_assert 946176249Sattilio#endif 947176249Sattilio 948176249Sattiliovoid 949177957Sattilio_lockmgr_assert(struct lock *lk, int what, const char *file, int line) 950176249Sattilio{ 951176249Sattilio int slocked = 0; 952176249Sattilio 953176249Sattilio if (panicstr != NULL) 954176249Sattilio return; 955176249Sattilio switch (what) { 956176249Sattilio case KA_SLOCKED: 957176249Sattilio case KA_SLOCKED | KA_NOTRECURSED: 958176249Sattilio case KA_SLOCKED | KA_RECURSED: 959176249Sattilio slocked = 1; 960176249Sattilio case KA_LOCKED: 961176249Sattilio case KA_LOCKED | KA_NOTRECURSED: 962176249Sattilio case KA_LOCKED | KA_RECURSED: 963178159Sattilio#ifdef WITNESS 964178159Sattilio 965178159Sattilio /* 966178159Sattilio * We cannot trust WITNESS if the lock is held in exclusive 967178159Sattilio * mode and a call to lockmgr_disown() happened. 968178159Sattilio * Workaround this skipping the check if the lock is held in 969178159Sattilio * exclusive mode even for the KA_LOCKED case. 970178159Sattilio */ 971178159Sattilio if (slocked || (lk->lk_lock & LK_SHARE)) { 972178159Sattilio witness_assert(&lk->lock_object, what, file, line); 973178159Sattilio break; 974178159Sattilio } 975178159Sattilio#endif 976177957Sattilio if (lk->lk_lock == LK_UNLOCKED || 977177957Sattilio ((lk->lk_lock & LK_SHARE) == 0 && (slocked || 978177957Sattilio (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) 979176249Sattilio panic("Lock %s not %slocked @ %s:%d\n", 980177957Sattilio lk->lock_object.lo_name, slocked ? "share" : "", 981176249Sattilio file, line); 982177957Sattilio 983177957Sattilio if ((lk->lk_lock & LK_SHARE) == 0) { 984177957Sattilio if (lockmgr_recursed(lk)) { 985176249Sattilio if (what & KA_NOTRECURSED) 986176249Sattilio panic("Lock %s recursed @ %s:%d\n", 987177957Sattilio lk->lock_object.lo_name, file, 988177957Sattilio line); 989176249Sattilio } else if (what & KA_RECURSED) 990176249Sattilio panic("Lock %s not recursed @ %s:%d\n", 991177957Sattilio lk->lock_object.lo_name, file, line); 992176249Sattilio } 993176249Sattilio break; 994176249Sattilio case KA_XLOCKED: 995176249Sattilio case KA_XLOCKED | KA_NOTRECURSED: 996176249Sattilio case KA_XLOCKED | KA_RECURSED: 997177957Sattilio if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) 998176249Sattilio panic("Lock %s not exclusively locked @ %s:%d\n", 999177957Sattilio lk->lock_object.lo_name, file, line); 1000177957Sattilio if (lockmgr_recursed(lk)) { 1001176249Sattilio if (what & KA_NOTRECURSED) 1002176249Sattilio panic("Lock %s recursed @ %s:%d\n", 1003177957Sattilio lk->lock_object.lo_name, file, line); 1004176249Sattilio } else if (what & KA_RECURSED) 1005176249Sattilio panic("Lock %s not recursed @ %s:%d\n", 1006177957Sattilio lk->lock_object.lo_name, file, line); 1007176249Sattilio break; 1008176249Sattilio case KA_UNLOCKED: 1009177957Sattilio if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) 1010176249Sattilio panic("Lock %s exclusively locked @ %s:%d\n", 1011177957Sattilio lk->lock_object.lo_name, file, line); 1012176249Sattilio break; 1013176249Sattilio default: 1014177957Sattilio panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, 1015177957Sattilio line); 1016176249Sattilio } 1017176249Sattilio} 1018177957Sattilio#endif 1019176249Sattilio 1020161322Sjhb#ifdef DDB 1021161337Sjhbint 1022161337Sjhblockmgr_chain(struct thread *td, struct thread **ownerp) 1023161337Sjhb{ 1024177957Sattilio struct lock *lk; 1025161337Sjhb 1026177957Sattilio lk = td->td_wchan; 1027161337Sjhb 1028177957Sattilio if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) 1029177957Sattilio return (0); 1030177957Sattilio db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); 1031177957Sattilio if (lk->lk_lock & LK_SHARE) 1032177957Sattilio db_printf("SHARED (count %ju)\n", 1033177957Sattilio (uintmax_t)LK_SHARERS(lk->lk_lock)); 1034177957Sattilio else 1035177957Sattilio db_printf("EXCL\n"); 1036177957Sattilio *ownerp = lockmgr_xholder(lk); 1037161337Sjhb 1038161337Sjhb return (1); 1039161337Sjhb} 1040161337Sjhb 1041177957Sattiliostatic void 1042164246Skmacydb_show_lockmgr(struct lock_object *lock) 1043161322Sjhb{ 1044161322Sjhb struct thread *td; 1045177957Sattilio struct lock *lk; 1046161322Sjhb 1047177957Sattilio lk = (struct lock *)lock; 1048161322Sjhb 1049168070Sjhb db_printf(" state: "); 1050177957Sattilio if (lk->lk_lock == LK_UNLOCKED) 1051161322Sjhb db_printf("UNLOCKED\n"); 1052177957Sattilio else if (lk->lk_lock & LK_SHARE) 1053177957Sattilio db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); 1054177957Sattilio else { 1055177957Sattilio td = lockmgr_xholder(lk); 1056177957Sattilio if (td == (struct thread *)LK_KERNPROC) 1057177957Sattilio db_printf("XLOCK: LK_KERNPROC\n"); 1058177957Sattilio else 1059177957Sattilio db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, 1060177957Sattilio td->td_tid, td->td_proc->p_pid, 1061177957Sattilio td->td_proc->p_comm); 1062177957Sattilio if (lockmgr_recursed(lk)) 1063177957Sattilio db_printf(" recursed: %d\n", lk->lk_recurse); 1064177957Sattilio } 1065177957Sattilio db_printf(" waiters: "); 1066177957Sattilio switch (lk->lk_lock & LK_ALL_WAITERS) { 1067177957Sattilio case LK_SHARED_WAITERS: 1068177957Sattilio db_printf("shared\n"); 1069177957Sattilio case LK_EXCLUSIVE_WAITERS: 1070177957Sattilio db_printf("exclusive\n"); 1071177957Sattilio break; 1072177957Sattilio case LK_ALL_WAITERS: 1073177957Sattilio db_printf("shared and exclusive\n"); 1074177957Sattilio break; 1075177957Sattilio default: 1076177957Sattilio db_printf("none\n"); 1077177957Sattilio } 1078161322Sjhb} 1079161322Sjhb#endif 1080