kern_rwlock.c revision 157846
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 157846 2006-04-18 18:27:54Z jhb $");
36
37#include "opt_ddb.h"
38
39#include <sys/param.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/rwlock.h>
45#include <sys/systm.h>
46#include <sys/turnstile.h>
47
48#include <machine/cpu.h>
49
50#ifdef DDB
51#include <ddb/ddb.h>
52
53static void	db_show_rwlock(struct lock_object *lock);
54#endif
55
56struct lock_class lock_class_rw = {
57	"rw",
58	LC_SLEEPLOCK | LC_RECURSABLE /* | LC_UPGRADABLE */,
59#ifdef DDB
60	db_show_rwlock
61#endif
62};
63
64/*
65 * Return a pointer to the owning thread if the lock is write-locked or
66 * NULL if the lock is unlocked or read-locked.
67 */
68#define	rw_wowner(rw)							\
69	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
70	    (struct thread *)RW_OWNER((rw)->rw_lock))
71
72/*
73 * Return a pointer to the owning thread for this lock who should receive
74 * any priority lent by threads that block on this lock.  Currently this
75 * is identical to rw_wowner().
76 */
77#define	rw_owner(rw)		rw_wowner(rw)
78
79#ifndef INVARIANTS
80#define	_rw_assert(rw, what, file, line)
81#endif
82
83void
84rw_init(struct rwlock *rw, const char *name)
85{
86
87	rw->rw_lock = RW_UNLOCKED;
88
89	lock_init(&rw->rw_object, &lock_class_rw, name, NULL, LO_WITNESS |
90	    LO_RECURSABLE /* | LO_UPGRADABLE */);
91}
92
93void
94rw_destroy(struct rwlock *rw)
95{
96
97	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
98	lock_destroy(&rw->rw_object);
99}
100
101void
102rw_sysinit(void *arg)
103{
104	struct rw_args *args = arg;
105
106	rw_init(args->ra_rw, args->ra_desc);
107}
108
109void
110_rw_wlock(struct rwlock *rw, const char *file, int line)
111{
112
113	MPASS(curthread != NULL);
114	KASSERT(rw_wowner(rw) != curthread,
115	    ("%s (%s): wlock already held @ %s:%d", __func__,
116	    rw->rw_object.lo_name, file, line));
117	WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
118	    line);
119	__rw_wlock(rw, curthread, file, line);
120	LOCK_LOG_LOCK("WLOCK", &rw->rw_object, 0, 0, file, line);
121	WITNESS_LOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
122}
123
124void
125_rw_wunlock(struct rwlock *rw, const char *file, int line)
126{
127
128	MPASS(curthread != NULL);
129	_rw_assert(rw, RA_WLOCKED, file, line);
130	WITNESS_UNLOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
131	LOCK_LOG_LOCK("WUNLOCK", &rw->rw_object, 0, 0, file, line);
132	__rw_wunlock(rw, curthread, file, line);
133}
134
135void
136_rw_rlock(struct rwlock *rw, const char *file, int line)
137{
138	volatile struct thread *owner;
139	uintptr_t x;
140
141	KASSERT(rw_wowner(rw) != curthread,
142	    ("%s (%s): wlock already held @ %s:%d", __func__,
143	    rw->rw_object.lo_name, file, line));
144	WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER, file, line);
145
146	/*
147	 * Note that we don't make any attempt to try to block read
148	 * locks once a writer has blocked on the lock.  The reason is
149	 * that we currently allow for read locks to recurse and we
150	 * don't keep track of all the holders of read locks.  Thus, if
151	 * we were to block readers once a writer blocked and a reader
152	 * tried to recurse on their reader lock after a writer had
153	 * blocked we would end up in a deadlock since the reader would
154	 * be blocked on the writer, and the writer would be blocked
155	 * waiting for the reader to release its original read lock.
156	 */
157	for (;;) {
158		/*
159		 * Handle the easy case.  If no other thread has a write
160		 * lock, then try to bump up the count of read locks.  Note
161		 * that we have to preserve the current state of the
162		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
163		 * read lock, then rw_lock must have changed, so restart
164		 * the loop.  Note that this handles the case of a
165		 * completely unlocked rwlock since such a lock is encoded
166		 * as a read lock with no waiters.
167		 */
168		x = rw->rw_lock;
169		if (x & RW_LOCK_READ) {
170
171			/*
172			 * The RW_LOCK_READ_WAITERS flag should only be set
173			 * if another thread currently holds a write lock,
174			 * and in that case RW_LOCK_READ should be clear.
175			 */
176			MPASS((x & RW_LOCK_READ_WAITERS) == 0);
177			if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
178			    x + RW_ONE_READER)) {
179				if (LOCK_LOG_TEST(&rw->rw_object, 0))
180					CTR4(KTR_LOCK,
181					    "%s: %p succeed %p -> %p", __func__,
182					    rw, (void *)x,
183					    (void *)(x + RW_ONE_READER));
184				break;
185			}
186			cpu_spinwait();
187			continue;
188		}
189
190		/*
191		 * Okay, now it's the hard case.  Some other thread already
192		 * has a write lock, so acquire the turnstile lock so we can
193		 * begin the process of blocking.
194		 */
195		turnstile_lock(&rw->rw_object);
196
197		/*
198		 * The lock might have been released while we spun, so
199		 * recheck its state and restart the loop if there is no
200		 * longer a write lock.
201		 */
202		x = rw->rw_lock;
203		if (x & RW_LOCK_READ) {
204			turnstile_release(&rw->rw_object);
205			cpu_spinwait();
206			continue;
207		}
208
209		/*
210		 * Ok, it's still a write lock.  If the RW_LOCK_READ_WAITERS
211		 * flag is already set, then we can go ahead and block.  If
212		 * it is not set then try to set it.  If we fail to set it
213		 * drop the turnstile lock and restart the loop.
214		 */
215		if (!(x & RW_LOCK_READ_WAITERS)) {
216			if (!atomic_cmpset_ptr(&rw->rw_lock, x,
217			    x | RW_LOCK_READ_WAITERS)) {
218				turnstile_release(&rw->rw_object);
219				cpu_spinwait();
220				continue;
221			}
222			if (LOCK_LOG_TEST(&rw->rw_object, 0))
223				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
224				    __func__, rw);
225		}
226
227#ifdef SMP
228		/*
229		 * If the owner is running on another CPU, spin until
230		 * the owner stops running or the state of the lock
231		 * changes.
232		 */
233		owner = (struct thread *)RW_OWNER(x);
234		if (TD_IS_RUNNING(owner)) {
235			turnstile_release(&rw->rw_object);
236			if (LOCK_LOG_TEST(&rw->rw_object, 0))
237				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
238				    __func__, rw, owner);
239			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
240			    TD_IS_RUNNING(owner))
241				cpu_spinwait();
242			continue;
243		}
244#endif
245
246		/*
247		 * We were unable to acquire the lock and the read waiters
248		 * flag is set, so we must block on the turnstile.
249		 */
250		if (LOCK_LOG_TEST(&rw->rw_object, 0))
251			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
252			    rw);
253		turnstile_wait(&rw->rw_object, rw_owner(rw), TS_SHARED_QUEUE);
254		if (LOCK_LOG_TEST(&rw->rw_object, 0))
255			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
256			    __func__, rw);
257	}
258
259	/*
260	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
261	 * however.  turnstiles don't like owners changing between calls to
262	 * turnstile_wait() currently.
263	 */
264
265	LOCK_LOG_LOCK("RLOCK", &rw->rw_object, 0, 0, file, line);
266	WITNESS_LOCK(&rw->rw_object, 0, file, line);
267}
268
269void
270_rw_runlock(struct rwlock *rw, const char *file, int line)
271{
272	struct turnstile *ts;
273	uintptr_t x;
274
275	_rw_assert(rw, RA_RLOCKED, file, line);
276	WITNESS_UNLOCK(&rw->rw_object, 0, file, line);
277	LOCK_LOG_LOCK("RUNLOCK", &rw->rw_object, 0, 0, file, line);
278
279	/* TODO: drop "owner of record" here. */
280
281	for (;;) {
282		/*
283		 * See if there is more than one read lock held.  If so,
284		 * just drop one and return.
285		 */
286		x = rw->rw_lock;
287		if (RW_READERS(x) > 1) {
288			if (atomic_cmpset_ptr(&rw->rw_lock, x,
289			    x - RW_ONE_READER)) {
290				if (LOCK_LOG_TEST(&rw->rw_object, 0))
291					CTR4(KTR_LOCK,
292					    "%s: %p succeeded %p -> %p",
293					    __func__, rw, (void *)x,
294					    (void *)(x - RW_ONE_READER));
295				break;
296			}
297			continue;
298		}
299
300		/*
301		 * We should never have read waiters while at least one
302		 * thread holds a read lock.  (See note above)
303		 */
304		KASSERT(!(x & RW_LOCK_READ_WAITERS),
305		    ("%s: waiting readers", __func__));
306
307		/*
308		 * If there aren't any waiters for a write lock, then try
309		 * to drop it quickly.
310		 */
311		if (!(x & RW_LOCK_WRITE_WAITERS)) {
312
313			/*
314			 * There shouldn't be any flags set and we should
315			 * be the only read lock.  If we fail to release
316			 * the single read lock, then another thread might
317			 * have just acquired a read lock, so go back up
318			 * to the multiple read locks case.
319			 */
320			MPASS(x == RW_READERS_LOCK(1));
321			if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
322			    RW_UNLOCKED)) {
323				if (LOCK_LOG_TEST(&rw->rw_object, 0))
324					CTR2(KTR_LOCK, "%s: %p last succeeded",
325					    __func__, rw);
326				break;
327			}
328			continue;
329		}
330
331		/*
332		 * There should just be one reader with one or more
333		 * writers waiting.
334		 */
335		MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS));
336
337		/*
338		 * Ok, we know we have a waiting writer and we think we
339		 * are the last reader, so grab the turnstile lock.
340		 */
341		turnstile_lock(&rw->rw_object);
342
343		/*
344		 * Try to drop our lock leaving the lock in a unlocked
345		 * state.
346		 *
347		 * If you wanted to do explicit lock handoff you'd have to
348		 * do it here.  You'd also want to use turnstile_signal()
349		 * and you'd have to handle the race where a higher
350		 * priority thread blocks on the write lock before the
351		 * thread you wakeup actually runs and have the new thread
352		 * "steal" the lock.  For now it's a lot simpler to just
353		 * wakeup all of the waiters.
354		 *
355		 * As above, if we fail, then another thread might have
356		 * acquired a read lock, so drop the turnstile lock and
357		 * restart.
358		 */
359		if (!atomic_cmpset_ptr(&rw->rw_lock,
360		    RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) {
361			turnstile_release(&rw->rw_object);
362			continue;
363		}
364		if (LOCK_LOG_TEST(&rw->rw_object, 0))
365			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
366			    __func__, rw);
367
368		/*
369		 * Ok.  The lock is released and all that's left is to
370		 * wake up the waiters.  Note that the lock might not be
371		 * free anymore, but in that case the writers will just
372		 * block again if they run before the new lock holder(s)
373		 * release the lock.
374		 */
375		ts = turnstile_lookup(&rw->rw_object);
376		MPASS(ts != NULL);
377		turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
378		turnstile_unpend(ts, TS_SHARED_LOCK);
379		break;
380	}
381}
382
383/*
384 * This function is called when we are unable to obtain a write lock on the
385 * first try.  This means that at least one other thread holds either a
386 * read or write lock.
387 */
388void
389_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
390{
391	volatile struct thread *owner;
392	uintptr_t v;
393
394	if (LOCK_LOG_TEST(&rw->rw_object, 0))
395		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
396		    rw->rw_object.lo_name, (void *)rw->rw_lock, file, line);
397
398	while (!_rw_write_lock(rw, tid)) {
399		turnstile_lock(&rw->rw_object);
400		v = rw->rw_lock;
401
402		/*
403		 * If the lock was released while spinning on the
404		 * turnstile chain lock, try again.
405		 */
406		if (v == RW_UNLOCKED) {
407			turnstile_release(&rw->rw_object);
408			cpu_spinwait();
409			continue;
410		}
411
412		/*
413		 * If the lock was released by a writer with both readers
414		 * and writers waiting and a reader hasn't woken up and
415		 * acquired the lock yet, rw_lock will be set to the
416		 * value RW_UNLOCKED | RW_LOCK_WRITE_WAITERS.  If we see
417		 * that value, try to acquire it once.  Note that we have
418		 * to preserve the RW_LOCK_WRITE_WAITERS flag as there are
419		 * other writers waiting still. If we fail, restart the
420		 * loop.
421		 */
422		if (v == (RW_UNLOCKED | RW_LOCK_WRITE_WAITERS)) {
423			if (atomic_cmpset_acq_ptr(&rw->rw_lock,
424			    RW_UNLOCKED | RW_LOCK_WRITE_WAITERS,
425			    tid | RW_LOCK_WRITE_WAITERS)) {
426				turnstile_claim(&rw->rw_object);
427				CTR2(KTR_LOCK, "%s: %p claimed by new writer",
428				    __func__, rw);
429				break;
430			}
431			turnstile_release(&rw->rw_object);
432			cpu_spinwait();
433			continue;
434		}
435
436		/*
437		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
438		 * set it.  If we fail to set it, then loop back and try
439		 * again.
440		 */
441		if (!(v & RW_LOCK_WRITE_WAITERS)) {
442			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
443			    v | RW_LOCK_WRITE_WAITERS)) {
444				turnstile_release(&rw->rw_object);
445				cpu_spinwait();
446				continue;
447			}
448			if (LOCK_LOG_TEST(&rw->rw_object, 0))
449				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
450				    __func__, rw);
451		}
452
453#ifdef SMP
454		/*
455		 * If the lock is write locked and the owner is
456		 * running on another CPU, spin until the owner stops
457		 * running or the state of the lock changes.
458		 */
459		owner = (struct thread *)RW_OWNER(v);
460		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
461			turnstile_release(&rw->rw_object);
462			if (LOCK_LOG_TEST(&rw->rw_object, 0))
463				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
464				    __func__, rw, owner);
465			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
466			    TD_IS_RUNNING(owner))
467				cpu_spinwait();
468			continue;
469		}
470#endif
471
472		/*
473		 * We were unable to acquire the lock and the write waiters
474		 * flag is set, so we must block on the turnstile.
475		 */
476		if (LOCK_LOG_TEST(&rw->rw_object, 0))
477			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
478			    rw);
479		turnstile_wait(&rw->rw_object, rw_owner(rw),
480		    TS_EXCLUSIVE_QUEUE);
481		if (LOCK_LOG_TEST(&rw->rw_object, 0))
482			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
483			    __func__, rw);
484	}
485}
486
487/*
488 * This function is called if the first try at releasing a write lock failed.
489 * This means that one of the 2 waiter bits must be set indicating that at
490 * least one thread is waiting on this lock.
491 */
492void
493_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
494{
495	struct turnstile *ts;
496	uintptr_t v;
497	int queue;
498
499	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
500	    ("%s: neither of the waiter flags are set", __func__));
501
502	if (LOCK_LOG_TEST(&rw->rw_object, 0))
503		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
504
505	turnstile_lock(&rw->rw_object);
506	ts = turnstile_lookup(&rw->rw_object);
507
508#ifdef SMP
509	/*
510	 * There might not be a turnstile for this lock if all of
511	 * the waiters are adaptively spinning.  In that case, just
512	 * reset the lock to the unlocked state and return.
513	 */
514	if (ts == NULL) {
515		atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED);
516		if (LOCK_LOG_TEST(&rw->rw_object, 0))
517			CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw);
518		turnstile_release(&rw->rw_object);
519		return;
520	}
521#else
522	MPASS(ts != NULL);
523#endif
524
525	/*
526	 * Use the same algo as sx locks for now.  Prefer waking up shared
527	 * waiters if we have any over writers.  This is probably not ideal.
528	 *
529	 * 'v' is the value we are going to write back to rw_lock.  If we
530	 * have waiters on both queues, we need to preserve the state of
531	 * the waiter flag for the queue we don't wake up.  For now this is
532	 * hardcoded for the algorithm mentioned above.
533	 *
534	 * In the case of both readers and writers waiting we wakeup the
535	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
536	 * new writer comes in before a reader it will claim the lock up
537	 * above.  There is probably a potential priority inversion in
538	 * there that could be worked around either by waking both queues
539	 * of waiters or doing some complicated lock handoff gymnastics.
540	 *
541	 * Note that in the SMP case, if both flags are set, there might
542	 * not be any actual writers on the turnstile as they might all
543	 * be spinning.  In that case, we don't want to preserve the
544	 * RW_LOCK_WRITE_WAITERS flag as the turnstile is going to go
545	 * away once we wakeup all the readers.
546	 */
547	v = RW_UNLOCKED;
548	if (rw->rw_lock & RW_LOCK_READ_WAITERS) {
549		queue = TS_SHARED_QUEUE;
550#ifdef SMP
551		if (rw->rw_lock & RW_LOCK_WRITE_WAITERS &&
552		    !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
553			v |= RW_LOCK_WRITE_WAITERS;
554#else
555		v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
556#endif
557	} else
558		queue = TS_EXCLUSIVE_QUEUE;
559
560#ifdef SMP
561	/*
562	 * We have to make sure that we actually have waiters to
563	 * wakeup.  If they are all spinning, then we just need to
564	 * disown the turnstile and return.
565	 */
566	if (turnstile_empty(ts, queue)) {
567		if (LOCK_LOG_TEST(&rw->rw_object, 0))
568			CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw);
569		atomic_store_rel_ptr(&rw->rw_lock, v);
570		turnstile_disown(ts);
571		return;
572	}
573#endif
574
575	/* Wake up all waiters for the specific queue. */
576	if (LOCK_LOG_TEST(&rw->rw_object, 0))
577		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
578		    queue == TS_SHARED_QUEUE ? "read" : "write");
579	turnstile_broadcast(ts, queue);
580	atomic_store_rel_ptr(&rw->rw_lock, v);
581	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
582}
583
584#ifdef INVARIANT_SUPPORT
585#ifndef INVARIANTS
586#undef _rw_assert
587#endif
588
589/*
590 * In the non-WITNESS case, rw_assert() can only detect that at least
591 * *some* thread owns an rlock, but it cannot guarantee that *this*
592 * thread owns an rlock.
593 */
594void
595_rw_assert(struct rwlock *rw, int what, const char *file, int line)
596{
597
598	if (panicstr != NULL)
599		return;
600	switch (what) {
601	case RA_LOCKED:
602	case RA_RLOCKED:
603#ifdef WITNESS
604		witness_assert(&rw->rw_object, what, file, line);
605#else
606		/*
607		 * If some other thread has a write lock or we have one
608		 * and are asserting a read lock, fail.  Also, if no one
609		 * has a lock at all, fail.
610		 */
611		if (rw->rw_lock == RW_UNLOCKED ||
612		    (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
613		    rw_wowner(rw) != curthread)))
614			panic("Lock %s not %slocked @ %s:%d\n",
615			    rw->rw_object.lo_name, (what == RA_RLOCKED) ?
616			    "read " : "", file, line);
617#endif
618		break;
619	case RA_WLOCKED:
620		if (rw_wowner(rw) != curthread)
621			panic("Lock %s not exclusively locked @ %s:%d\n",
622			    rw->rw_object.lo_name, file, line);
623		break;
624	case RA_UNLOCKED:
625#ifdef WITNESS
626		witness_assert(&rw->rw_object, what, file, line);
627#else
628		/*
629		 * If we hold a write lock fail.  We can't reliably check
630		 * to see if we hold a read lock or not.
631		 */
632		if (rw_wowner(rw) == curthread)
633			panic("Lock %s exclusively locked @ %s:%d\n",
634			    rw->rw_object.lo_name, file, line);
635#endif
636		break;
637	default:
638		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
639		    line);
640	}
641}
642#endif /* INVARIANT_SUPPORT */
643
644#ifdef DDB
645void
646db_show_rwlock(struct lock_object *lock)
647{
648	struct rwlock *rw;
649	struct thread *td;
650
651	rw = (struct rwlock *)lock;
652
653	db_printf(" state: ");
654	if (rw->rw_lock == RW_UNLOCKED)
655		db_printf("UNLOCKED\n");
656	else if (rw->rw_lock & RW_LOCK_READ)
657		db_printf("RLOCK: %jd locks\n",
658		    (intmax_t)(RW_READERS(rw->rw_lock)));
659	else {
660		td = rw_wowner(rw);
661		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
662		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
663	}
664	db_printf(" waiters: ");
665	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
666	case RW_LOCK_READ_WAITERS:
667		db_printf("readers\n");
668		break;
669	case RW_LOCK_WRITE_WAITERS:
670		db_printf("writers\n");
671		break;
672	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
673		db_printf("readers and waiters\n");
674		break;
675	default:
676		db_printf("none\n");
677		break;
678	}
679}
680
681#endif
682