kern_rwlock.c revision 174629
1154941Sjhb/*-
2154941Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3154941Sjhb * All rights reserved.
4154941Sjhb *
5154941Sjhb * Redistribution and use in source and binary forms, with or without
6154941Sjhb * modification, are permitted provided that the following conditions
7154941Sjhb * are met:
8154941Sjhb * 1. Redistributions of source code must retain the above copyright
9154941Sjhb *    notice, this list of conditions and the following disclaimer.
10154941Sjhb * 2. Redistributions in binary form must reproduce the above copyright
11154941Sjhb *    notice, this list of conditions and the following disclaimer in the
12154941Sjhb *    documentation and/or other materials provided with the distribution.
13154941Sjhb * 3. Neither the name of the author nor the names of any co-contributors
14154941Sjhb *    may be used to endorse or promote products derived from this software
15154941Sjhb *    without specific prior written permission.
16154941Sjhb *
17154941Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18154941Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19154941Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20154941Sjhb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21154941Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22154941Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23154941Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24154941Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25154941Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26154941Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27154941Sjhb * SUCH DAMAGE.
28154941Sjhb */
29154941Sjhb
30154941Sjhb/*
31154941Sjhb * Machine independent bits of reader/writer lock implementation.
32154941Sjhb */
33154941Sjhb
34154941Sjhb#include <sys/cdefs.h>
35154941Sjhb__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 174629 2007-12-15 23:13:31Z jeff $");
36154941Sjhb
37154941Sjhb#include "opt_ddb.h"
38167801Sjhb#include "opt_no_adaptive_rwlocks.h"
39154941Sjhb
40154941Sjhb#include <sys/param.h>
41154941Sjhb#include <sys/ktr.h>
42154941Sjhb#include <sys/lock.h>
43154941Sjhb#include <sys/mutex.h>
44154941Sjhb#include <sys/proc.h>
45154941Sjhb#include <sys/rwlock.h>
46154941Sjhb#include <sys/systm.h>
47154941Sjhb#include <sys/turnstile.h>
48171516Sattilio
49154941Sjhb#include <machine/cpu.h>
50154941Sjhb
51171052SattilioCTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE);
52171052Sattilio
53167801Sjhb#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
54167801Sjhb#define	ADAPTIVE_RWLOCKS
55167801Sjhb#endif
56167801Sjhb
57154941Sjhb#ifdef DDB
58154941Sjhb#include <ddb/ddb.h>
59154941Sjhb
60154941Sjhbstatic void	db_show_rwlock(struct lock_object *lock);
61154941Sjhb#endif
62173733Sattiliostatic void	assert_rw(struct lock_object *lock, int what);
63167368Sjhbstatic void	lock_rw(struct lock_object *lock, int how);
64167368Sjhbstatic int	unlock_rw(struct lock_object *lock);
65154941Sjhb
66154941Sjhbstruct lock_class lock_class_rw = {
67167365Sjhb	.lc_name = "rw",
68167365Sjhb	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
69173733Sattilio	.lc_assert = assert_rw,
70154941Sjhb#ifdef DDB
71167365Sjhb	.lc_ddb_show = db_show_rwlock,
72154941Sjhb#endif
73167368Sjhb	.lc_lock = lock_rw,
74167368Sjhb	.lc_unlock = unlock_rw,
75154941Sjhb};
76154941Sjhb
77157826Sjhb/*
78157826Sjhb * Return a pointer to the owning thread if the lock is write-locked or
79157826Sjhb * NULL if the lock is unlocked or read-locked.
80157826Sjhb */
81157826Sjhb#define	rw_wowner(rw)							\
82154941Sjhb	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
83154941Sjhb	    (struct thread *)RW_OWNER((rw)->rw_lock))
84154941Sjhb
85157826Sjhb/*
86171052Sattilio * Returns if a write owner is recursed.  Write ownership is not assured
87171052Sattilio * here and should be previously checked.
88171052Sattilio */
89171052Sattilio#define	rw_recursed(rw)		((rw)->rw_recurse != 0)
90171052Sattilio
91171052Sattilio/*
92171052Sattilio * Return true if curthread helds the lock.
93171052Sattilio */
94171052Sattilio#define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
95171052Sattilio
96171052Sattilio/*
97157826Sjhb * Return a pointer to the owning thread for this lock who should receive
98157826Sjhb * any priority lent by threads that block on this lock.  Currently this
99157826Sjhb * is identical to rw_wowner().
100157826Sjhb */
101157826Sjhb#define	rw_owner(rw)		rw_wowner(rw)
102157826Sjhb
103154941Sjhb#ifndef INVARIANTS
104154941Sjhb#define	_rw_assert(rw, what, file, line)
105154941Sjhb#endif
106154941Sjhb
107154941Sjhbvoid
108173733Sattilioassert_rw(struct lock_object *lock, int what)
109173733Sattilio{
110173733Sattilio
111173733Sattilio	rw_assert((struct rwlock *)lock, what);
112173733Sattilio}
113173733Sattilio
114173733Sattiliovoid
115167368Sjhblock_rw(struct lock_object *lock, int how)
116167368Sjhb{
117167368Sjhb	struct rwlock *rw;
118167368Sjhb
119167368Sjhb	rw = (struct rwlock *)lock;
120167368Sjhb	if (how)
121167368Sjhb		rw_wlock(rw);
122167368Sjhb	else
123167368Sjhb		rw_rlock(rw);
124167368Sjhb}
125167368Sjhb
126167368Sjhbint
127167368Sjhbunlock_rw(struct lock_object *lock)
128167368Sjhb{
129167368Sjhb	struct rwlock *rw;
130167368Sjhb
131167368Sjhb	rw = (struct rwlock *)lock;
132167368Sjhb	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
133167368Sjhb	if (rw->rw_lock & RW_LOCK_READ) {
134167368Sjhb		rw_runlock(rw);
135167368Sjhb		return (0);
136167368Sjhb	} else {
137167368Sjhb		rw_wunlock(rw);
138167368Sjhb		return (1);
139167368Sjhb	}
140167368Sjhb}
141167368Sjhb
142167368Sjhbvoid
143171052Sattiliorw_init_flags(struct rwlock *rw, const char *name, int opts)
144154941Sjhb{
145171052Sattilio	int flags;
146154941Sjhb
147171052Sattilio	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
148171052Sattilio	    RW_RECURSE)) == 0);
149171052Sattilio
150171052Sattilio	flags = LO_UPGRADABLE | LO_RECURSABLE;
151171052Sattilio	if (opts & RW_DUPOK)
152171052Sattilio		flags |= LO_DUPOK;
153171052Sattilio	if (opts & RW_NOPROFILE)
154171052Sattilio		flags |= LO_NOPROFILE;
155171052Sattilio	if (!(opts & RW_NOWITNESS))
156171052Sattilio		flags |= LO_WITNESS;
157171052Sattilio	if (opts & RW_QUIET)
158171052Sattilio		flags |= LO_QUIET;
159171052Sattilio	flags |= opts & RW_RECURSE;
160171052Sattilio
161154941Sjhb	rw->rw_lock = RW_UNLOCKED;
162171052Sattilio	rw->rw_recurse = 0;
163171052Sattilio	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
164154941Sjhb}
165154941Sjhb
166154941Sjhbvoid
167154941Sjhbrw_destroy(struct rwlock *rw)
168154941Sjhb{
169154941Sjhb
170154941Sjhb	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
171171052Sattilio	KASSERT(rw->rw_recurse == 0, ("rw lock still recursed"));
172169394Sjhb	rw->rw_lock = RW_DESTROYED;
173167787Sjhb	lock_destroy(&rw->lock_object);
174154941Sjhb}
175154941Sjhb
176154941Sjhbvoid
177154941Sjhbrw_sysinit(void *arg)
178154941Sjhb{
179154941Sjhb	struct rw_args *args = arg;
180154941Sjhb
181154941Sjhb	rw_init(args->ra_rw, args->ra_desc);
182154941Sjhb}
183154941Sjhb
184167024Srwatsonint
185167024Srwatsonrw_wowned(struct rwlock *rw)
186167024Srwatson{
187167024Srwatson
188167024Srwatson	return (rw_wowner(rw) == curthread);
189167024Srwatson}
190167024Srwatson
191154941Sjhbvoid
192154941Sjhb_rw_wlock(struct rwlock *rw, const char *file, int line)
193154941Sjhb{
194154941Sjhb
195154941Sjhb	MPASS(curthread != NULL);
196169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
197169394Sjhb	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
198167787Sjhb	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
199154941Sjhb	    line);
200154941Sjhb	__rw_wlock(rw, curthread, file, line);
201171052Sattilio	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
202167787Sjhb	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
203160771Sjhb	curthread->td_locks++;
204154941Sjhb}
205154941Sjhb
206154941Sjhbvoid
207154941Sjhb_rw_wunlock(struct rwlock *rw, const char *file, int line)
208154941Sjhb{
209154941Sjhb
210154941Sjhb	MPASS(curthread != NULL);
211169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
212169394Sjhb	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
213154941Sjhb	_rw_assert(rw, RA_WLOCKED, file, line);
214160771Sjhb	curthread->td_locks--;
215167787Sjhb	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
216171052Sattilio	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
217171052Sattilio	    line);
218171052Sattilio	if (!rw_recursed(rw))
219171052Sattilio		lock_profile_release_lock(&rw->lock_object);
220154941Sjhb	__rw_wunlock(rw, curthread, file, line);
221154941Sjhb}
222154941Sjhb
223154941Sjhbvoid
224154941Sjhb_rw_rlock(struct rwlock *rw, const char *file, int line)
225154941Sjhb{
226170295Sjeff	struct turnstile *ts;
227167801Sjhb#ifdef ADAPTIVE_RWLOCKS
228157846Sjhb	volatile struct thread *owner;
229157851Swkoszek#endif
230167307Sjhb	uint64_t waittime = 0;
231167054Skmacy	int contested = 0;
232154941Sjhb	uintptr_t x;
233154941Sjhb
234169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
235169394Sjhb	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
236157826Sjhb	KASSERT(rw_wowner(rw) != curthread,
237154941Sjhb	    ("%s (%s): wlock already held @ %s:%d", __func__,
238167787Sjhb	    rw->lock_object.lo_name, file, line));
239167787Sjhb	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line);
240154941Sjhb
241154941Sjhb	/*
242154941Sjhb	 * Note that we don't make any attempt to try to block read
243154941Sjhb	 * locks once a writer has blocked on the lock.  The reason is
244154941Sjhb	 * that we currently allow for read locks to recurse and we
245154941Sjhb	 * don't keep track of all the holders of read locks.  Thus, if
246154941Sjhb	 * we were to block readers once a writer blocked and a reader
247154941Sjhb	 * tried to recurse on their reader lock after a writer had
248154941Sjhb	 * blocked we would end up in a deadlock since the reader would
249154941Sjhb	 * be blocked on the writer, and the writer would be blocked
250154941Sjhb	 * waiting for the reader to release its original read lock.
251154941Sjhb	 */
252154941Sjhb	for (;;) {
253154941Sjhb		/*
254154941Sjhb		 * Handle the easy case.  If no other thread has a write
255154941Sjhb		 * lock, then try to bump up the count of read locks.  Note
256154941Sjhb		 * that we have to preserve the current state of the
257154941Sjhb		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
258154941Sjhb		 * read lock, then rw_lock must have changed, so restart
259154941Sjhb		 * the loop.  Note that this handles the case of a
260154941Sjhb		 * completely unlocked rwlock since such a lock is encoded
261154941Sjhb		 * as a read lock with no waiters.
262154941Sjhb		 */
263154941Sjhb		x = rw->rw_lock;
264154941Sjhb		if (x & RW_LOCK_READ) {
265154941Sjhb
266154941Sjhb			/*
267154941Sjhb			 * The RW_LOCK_READ_WAITERS flag should only be set
268154941Sjhb			 * if another thread currently holds a write lock,
269154941Sjhb			 * and in that case RW_LOCK_READ should be clear.
270154941Sjhb			 */
271154941Sjhb			MPASS((x & RW_LOCK_READ_WAITERS) == 0);
272154941Sjhb			if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
273154941Sjhb			    x + RW_ONE_READER)) {
274167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
275154941Sjhb					CTR4(KTR_LOCK,
276154941Sjhb					    "%s: %p succeed %p -> %p", __func__,
277154941Sjhb					    rw, (void *)x,
278154941Sjhb					    (void *)(x + RW_ONE_READER));
279154941Sjhb				break;
280154941Sjhb			}
281157846Sjhb			cpu_spinwait();
282154941Sjhb			continue;
283154941Sjhb		}
284174629Sjeff		lock_profile_obtain_lock_failed(&rw->lock_object,
285174629Sjeff		    &contested, &waittime);
286154941Sjhb
287173960Sattilio#ifdef ADAPTIVE_RWLOCKS
288154941Sjhb		/*
289173960Sattilio		 * If the owner is running on another CPU, spin until
290173960Sattilio		 * the owner stops running or the state of the lock
291173960Sattilio		 * changes.
292173960Sattilio		 */
293173960Sattilio		owner = (struct thread *)RW_OWNER(x);
294173960Sattilio		if (TD_IS_RUNNING(owner)) {
295173960Sattilio			if (LOCK_LOG_TEST(&rw->lock_object, 0))
296173960Sattilio				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
297173960Sattilio				    __func__, rw, owner);
298173960Sattilio			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
299173960Sattilio			    TD_IS_RUNNING(owner))
300173960Sattilio				cpu_spinwait();
301173960Sattilio			continue;
302173960Sattilio		}
303173960Sattilio#endif
304173960Sattilio
305173960Sattilio		/*
306154941Sjhb		 * Okay, now it's the hard case.  Some other thread already
307154941Sjhb		 * has a write lock, so acquire the turnstile lock so we can
308154941Sjhb		 * begin the process of blocking.
309154941Sjhb		 */
310170295Sjeff		ts = turnstile_trywait(&rw->lock_object);
311154941Sjhb
312154941Sjhb		/*
313154941Sjhb		 * The lock might have been released while we spun, so
314154941Sjhb		 * recheck its state and restart the loop if there is no
315154941Sjhb		 * longer a write lock.
316154941Sjhb		 */
317154941Sjhb		x = rw->rw_lock;
318154941Sjhb		if (x & RW_LOCK_READ) {
319170295Sjeff			turnstile_cancel(ts);
320157846Sjhb			cpu_spinwait();
321154941Sjhb			continue;
322154941Sjhb		}
323154941Sjhb
324173960Sattilio#ifdef ADAPTIVE_RWLOCKS
325154941Sjhb		/*
326173960Sattilio		 * If the current owner of the lock is executing on another
327173960Sattilio		 * CPU quit the hard path and try to spin.
328173960Sattilio		 */
329173960Sattilio		owner = (struct thread *)RW_OWNER(x);
330173960Sattilio		if (TD_IS_RUNNING(owner)) {
331173960Sattilio			turnstile_cancel(ts);
332173960Sattilio			cpu_spinwait();
333173960Sattilio			continue;
334173960Sattilio		}
335173960Sattilio#endif
336173960Sattilio
337173960Sattilio		/*
338154941Sjhb		 * Ok, it's still a write lock.  If the RW_LOCK_READ_WAITERS
339154941Sjhb		 * flag is already set, then we can go ahead and block.  If
340154941Sjhb		 * it is not set then try to set it.  If we fail to set it
341154941Sjhb		 * drop the turnstile lock and restart the loop.
342154941Sjhb		 */
343157826Sjhb		if (!(x & RW_LOCK_READ_WAITERS)) {
344157826Sjhb			if (!atomic_cmpset_ptr(&rw->rw_lock, x,
345157826Sjhb			    x | RW_LOCK_READ_WAITERS)) {
346170295Sjeff				turnstile_cancel(ts);
347157826Sjhb				cpu_spinwait();
348157826Sjhb				continue;
349157826Sjhb			}
350167787Sjhb			if (LOCK_LOG_TEST(&rw->lock_object, 0))
351157826Sjhb				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
352157826Sjhb				    __func__, rw);
353154941Sjhb		}
354154941Sjhb
355154941Sjhb		/*
356154941Sjhb		 * We were unable to acquire the lock and the read waiters
357154941Sjhb		 * flag is set, so we must block on the turnstile.
358154941Sjhb		 */
359167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
360154941Sjhb			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
361154941Sjhb			    rw);
362170295Sjeff		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
363167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
364154941Sjhb			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
365154941Sjhb			    __func__, rw);
366154941Sjhb	}
367154941Sjhb
368154941Sjhb	/*
369154941Sjhb	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
370154941Sjhb	 * however.  turnstiles don't like owners changing between calls to
371154941Sjhb	 * turnstile_wait() currently.
372154941Sjhb	 */
373174629Sjeff	lock_profile_obtain_lock_success( &rw->lock_object, contested,
374174629Sjeff	    waittime, file, line);
375167787Sjhb	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
376167787Sjhb	WITNESS_LOCK(&rw->lock_object, 0, file, line);
377160771Sjhb	curthread->td_locks++;
378154941Sjhb}
379154941Sjhb
380154941Sjhbvoid
381154941Sjhb_rw_runlock(struct rwlock *rw, const char *file, int line)
382154941Sjhb{
383154941Sjhb	struct turnstile *ts;
384154941Sjhb	uintptr_t x;
385154941Sjhb
386169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
387169394Sjhb	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
388154941Sjhb	_rw_assert(rw, RA_RLOCKED, file, line);
389160771Sjhb	curthread->td_locks--;
390167787Sjhb	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
391167787Sjhb	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
392154941Sjhb
393154941Sjhb	/* TODO: drop "owner of record" here. */
394154941Sjhb
395154941Sjhb	for (;;) {
396154941Sjhb		/*
397154941Sjhb		 * See if there is more than one read lock held.  If so,
398154941Sjhb		 * just drop one and return.
399154941Sjhb		 */
400154941Sjhb		x = rw->rw_lock;
401154941Sjhb		if (RW_READERS(x) > 1) {
402154941Sjhb			if (atomic_cmpset_ptr(&rw->rw_lock, x,
403154941Sjhb			    x - RW_ONE_READER)) {
404167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
405154941Sjhb					CTR4(KTR_LOCK,
406154941Sjhb					    "%s: %p succeeded %p -> %p",
407154941Sjhb					    __func__, rw, (void *)x,
408154941Sjhb					    (void *)(x - RW_ONE_READER));
409154941Sjhb				break;
410154941Sjhb			}
411154941Sjhb			continue;
412167307Sjhb		}
413154941Sjhb
414164159Skmacy
415154941Sjhb		/*
416154941Sjhb		 * We should never have read waiters while at least one
417154941Sjhb		 * thread holds a read lock.  (See note above)
418154941Sjhb		 */
419154941Sjhb		KASSERT(!(x & RW_LOCK_READ_WAITERS),
420154941Sjhb		    ("%s: waiting readers", __func__));
421154941Sjhb
422154941Sjhb		/*
423154941Sjhb		 * If there aren't any waiters for a write lock, then try
424154941Sjhb		 * to drop it quickly.
425154941Sjhb		 */
426154941Sjhb		if (!(x & RW_LOCK_WRITE_WAITERS)) {
427154941Sjhb
428154941Sjhb			/*
429154941Sjhb			 * There shouldn't be any flags set and we should
430154941Sjhb			 * be the only read lock.  If we fail to release
431154941Sjhb			 * the single read lock, then another thread might
432154941Sjhb			 * have just acquired a read lock, so go back up
433154941Sjhb			 * to the multiple read locks case.
434154941Sjhb			 */
435154941Sjhb			MPASS(x == RW_READERS_LOCK(1));
436154941Sjhb			if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
437154941Sjhb			    RW_UNLOCKED)) {
438167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
439154941Sjhb					CTR2(KTR_LOCK, "%s: %p last succeeded",
440154941Sjhb					    __func__, rw);
441154941Sjhb				break;
442154941Sjhb			}
443154941Sjhb			continue;
444154941Sjhb		}
445154941Sjhb
446154941Sjhb		/*
447154941Sjhb		 * There should just be one reader with one or more
448154941Sjhb		 * writers waiting.
449154941Sjhb		 */
450154941Sjhb		MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS));
451154941Sjhb
452154941Sjhb		/*
453154941Sjhb		 * Ok, we know we have a waiting writer and we think we
454154941Sjhb		 * are the last reader, so grab the turnstile lock.
455154941Sjhb		 */
456170295Sjeff		turnstile_chain_lock(&rw->lock_object);
457154941Sjhb
458154941Sjhb		/*
459154941Sjhb		 * Try to drop our lock leaving the lock in a unlocked
460154941Sjhb		 * state.
461154941Sjhb		 *
462154941Sjhb		 * If you wanted to do explicit lock handoff you'd have to
463154941Sjhb		 * do it here.  You'd also want to use turnstile_signal()
464154941Sjhb		 * and you'd have to handle the race where a higher
465154941Sjhb		 * priority thread blocks on the write lock before the
466154941Sjhb		 * thread you wakeup actually runs and have the new thread
467154941Sjhb		 * "steal" the lock.  For now it's a lot simpler to just
468154941Sjhb		 * wakeup all of the waiters.
469154941Sjhb		 *
470154941Sjhb		 * As above, if we fail, then another thread might have
471154941Sjhb		 * acquired a read lock, so drop the turnstile lock and
472154941Sjhb		 * restart.
473154941Sjhb		 */
474154941Sjhb		if (!atomic_cmpset_ptr(&rw->rw_lock,
475154941Sjhb		    RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) {
476170295Sjeff			turnstile_chain_unlock(&rw->lock_object);
477154941Sjhb			continue;
478154941Sjhb		}
479167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
480154941Sjhb			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
481154941Sjhb			    __func__, rw);
482154941Sjhb
483154941Sjhb		/*
484154941Sjhb		 * Ok.  The lock is released and all that's left is to
485154941Sjhb		 * wake up the waiters.  Note that the lock might not be
486154941Sjhb		 * free anymore, but in that case the writers will just
487154941Sjhb		 * block again if they run before the new lock holder(s)
488154941Sjhb		 * release the lock.
489154941Sjhb		 */
490167787Sjhb		ts = turnstile_lookup(&rw->lock_object);
491157846Sjhb		MPASS(ts != NULL);
492154941Sjhb		turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
493154941Sjhb		turnstile_unpend(ts, TS_SHARED_LOCK);
494170295Sjeff		turnstile_chain_unlock(&rw->lock_object);
495154941Sjhb		break;
496154941Sjhb	}
497174629Sjeff	lock_profile_release_lock(&rw->lock_object);
498154941Sjhb}
499154941Sjhb
500154941Sjhb/*
501154941Sjhb * This function is called when we are unable to obtain a write lock on the
502154941Sjhb * first try.  This means that at least one other thread holds either a
503154941Sjhb * read or write lock.
504154941Sjhb */
505154941Sjhbvoid
506154941Sjhb_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
507154941Sjhb{
508170295Sjeff	struct turnstile *ts;
509167801Sjhb#ifdef ADAPTIVE_RWLOCKS
510157846Sjhb	volatile struct thread *owner;
511157851Swkoszek#endif
512171516Sattilio	uint64_t waittime = 0;
513154941Sjhb	uintptr_t v;
514171516Sattilio	int contested = 0;
515154941Sjhb
516171052Sattilio	if (rw_wlocked(rw)) {
517171052Sattilio		KASSERT(rw->lock_object.lo_flags & RW_RECURSE,
518171052Sattilio		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
519171052Sattilio		    __func__, rw->lock_object.lo_name, file, line));
520171052Sattilio		rw->rw_recurse++;
521171052Sattilio		atomic_set_ptr(&rw->rw_lock, RW_LOCK_RECURSED);
522171052Sattilio		if (LOCK_LOG_TEST(&rw->lock_object, 0))
523171052Sattilio			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
524171052Sattilio		return;
525171052Sattilio	}
526171052Sattilio
527167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
528154941Sjhb		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
529167787Sjhb		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
530154941Sjhb
531154941Sjhb	while (!_rw_write_lock(rw, tid)) {
532174629Sjeff		lock_profile_obtain_lock_failed(&rw->lock_object,
533174629Sjeff		    &contested, &waittime);
534173960Sattilio#ifdef ADAPTIVE_RWLOCKS
535173960Sattilio		/*
536173960Sattilio		 * If the lock is write locked and the owner is
537173960Sattilio		 * running on another CPU, spin until the owner stops
538173960Sattilio		 * running or the state of the lock changes.
539173960Sattilio		 */
540173960Sattilio		v = rw->rw_lock;
541173960Sattilio		owner = (struct thread *)RW_OWNER(v);
542173960Sattilio		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
543173960Sattilio			if (LOCK_LOG_TEST(&rw->lock_object, 0))
544173960Sattilio				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
545173960Sattilio				    __func__, rw, owner);
546173960Sattilio			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
547173960Sattilio			    TD_IS_RUNNING(owner))
548173960Sattilio				cpu_spinwait();
549173960Sattilio			continue;
550173960Sattilio		}
551173960Sattilio#endif
552173960Sattilio
553170295Sjeff		ts = turnstile_trywait(&rw->lock_object);
554154941Sjhb		v = rw->rw_lock;
555154941Sjhb
556154941Sjhb		/*
557154941Sjhb		 * If the lock was released while spinning on the
558154941Sjhb		 * turnstile chain lock, try again.
559154941Sjhb		 */
560154941Sjhb		if (v == RW_UNLOCKED) {
561170295Sjeff			turnstile_cancel(ts);
562154941Sjhb			cpu_spinwait();
563154941Sjhb			continue;
564154941Sjhb		}
565154941Sjhb
566173960Sattilio#ifdef ADAPTIVE_RWLOCKS
567154941Sjhb		/*
568173960Sattilio		 * If the current owner of the lock is executing on another
569173960Sattilio		 * CPU quit the hard path and try to spin.
570173960Sattilio		 */
571173960Sattilio		if (!(v & RW_LOCK_READ)) {
572173960Sattilio			owner = (struct thread *)RW_OWNER(v);
573173960Sattilio			if (TD_IS_RUNNING(owner)) {
574173960Sattilio				turnstile_cancel(ts);
575173960Sattilio				cpu_spinwait();
576173960Sattilio				continue;
577173960Sattilio			}
578173960Sattilio		}
579173960Sattilio#endif
580173960Sattilio
581173960Sattilio		/*
582154941Sjhb		 * If the lock was released by a writer with both readers
583154941Sjhb		 * and writers waiting and a reader hasn't woken up and
584154941Sjhb		 * acquired the lock yet, rw_lock will be set to the
585154941Sjhb		 * value RW_UNLOCKED | RW_LOCK_WRITE_WAITERS.  If we see
586154941Sjhb		 * that value, try to acquire it once.  Note that we have
587154941Sjhb		 * to preserve the RW_LOCK_WRITE_WAITERS flag as there are
588168073Sjhb		 * other writers waiting still.  If we fail, restart the
589154941Sjhb		 * loop.
590154941Sjhb		 */
591154941Sjhb		if (v == (RW_UNLOCKED | RW_LOCK_WRITE_WAITERS)) {
592154941Sjhb			if (atomic_cmpset_acq_ptr(&rw->rw_lock,
593154941Sjhb			    RW_UNLOCKED | RW_LOCK_WRITE_WAITERS,
594154941Sjhb			    tid | RW_LOCK_WRITE_WAITERS)) {
595170295Sjeff				turnstile_claim(ts);
596154941Sjhb				CTR2(KTR_LOCK, "%s: %p claimed by new writer",
597154941Sjhb				    __func__, rw);
598154941Sjhb				break;
599154941Sjhb			}
600170295Sjeff			turnstile_cancel(ts);
601154941Sjhb			cpu_spinwait();
602154941Sjhb			continue;
603154941Sjhb		}
604154941Sjhb
605154941Sjhb		/*
606154941Sjhb		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
607154941Sjhb		 * set it.  If we fail to set it, then loop back and try
608154941Sjhb		 * again.
609154941Sjhb		 */
610157826Sjhb		if (!(v & RW_LOCK_WRITE_WAITERS)) {
611157826Sjhb			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
612157826Sjhb			    v | RW_LOCK_WRITE_WAITERS)) {
613170295Sjeff				turnstile_cancel(ts);
614157826Sjhb				cpu_spinwait();
615157826Sjhb				continue;
616157826Sjhb			}
617167787Sjhb			if (LOCK_LOG_TEST(&rw->lock_object, 0))
618157826Sjhb				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
619157826Sjhb				    __func__, rw);
620154941Sjhb		}
621154941Sjhb
622157846Sjhb		/*
623154941Sjhb		 * We were unable to acquire the lock and the write waiters
624154941Sjhb		 * flag is set, so we must block on the turnstile.
625154941Sjhb		 */
626167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
627154941Sjhb			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
628154941Sjhb			    rw);
629170295Sjeff		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
630167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
631154941Sjhb			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
632154941Sjhb			    __func__, rw);
633154941Sjhb	}
634171516Sattilio	lock_profile_obtain_lock_success(&rw->lock_object, contested, waittime,
635171516Sattilio	    file, line);
636154941Sjhb}
637154941Sjhb
638154941Sjhb/*
639154941Sjhb * This function is called if the first try at releasing a write lock failed.
640154941Sjhb * This means that one of the 2 waiter bits must be set indicating that at
641154941Sjhb * least one thread is waiting on this lock.
642154941Sjhb */
643154941Sjhbvoid
644154941Sjhb_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
645154941Sjhb{
646154941Sjhb	struct turnstile *ts;
647154941Sjhb	uintptr_t v;
648154941Sjhb	int queue;
649154941Sjhb
650171052Sattilio	if (rw_wlocked(rw) && rw_recursed(rw)) {
651171052Sattilio		if ((--rw->rw_recurse) == 0)
652171052Sattilio			atomic_clear_ptr(&rw->rw_lock, RW_LOCK_RECURSED);
653171052Sattilio		if (LOCK_LOG_TEST(&rw->lock_object, 0))
654171052Sattilio			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
655171052Sattilio		return;
656171052Sattilio	}
657171052Sattilio
658154941Sjhb	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
659154941Sjhb	    ("%s: neither of the waiter flags are set", __func__));
660154941Sjhb
661167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
662154941Sjhb		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
663154941Sjhb
664170295Sjeff	turnstile_chain_lock(&rw->lock_object);
665167787Sjhb	ts = turnstile_lookup(&rw->lock_object);
666154941Sjhb
667154941Sjhb	MPASS(ts != NULL);
668154941Sjhb
669154941Sjhb	/*
670154941Sjhb	 * Use the same algo as sx locks for now.  Prefer waking up shared
671154941Sjhb	 * waiters if we have any over writers.  This is probably not ideal.
672154941Sjhb	 *
673154941Sjhb	 * 'v' is the value we are going to write back to rw_lock.  If we
674154941Sjhb	 * have waiters on both queues, we need to preserve the state of
675154941Sjhb	 * the waiter flag for the queue we don't wake up.  For now this is
676154941Sjhb	 * hardcoded for the algorithm mentioned above.
677154941Sjhb	 *
678154941Sjhb	 * In the case of both readers and writers waiting we wakeup the
679154941Sjhb	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
680154941Sjhb	 * new writer comes in before a reader it will claim the lock up
681154941Sjhb	 * above.  There is probably a potential priority inversion in
682154941Sjhb	 * there that could be worked around either by waking both queues
683154941Sjhb	 * of waiters or doing some complicated lock handoff gymnastics.
684154941Sjhb	 */
685157846Sjhb	v = RW_UNLOCKED;
686154941Sjhb	if (rw->rw_lock & RW_LOCK_READ_WAITERS) {
687154941Sjhb		queue = TS_SHARED_QUEUE;
688157846Sjhb		v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
689157846Sjhb	} else
690154941Sjhb		queue = TS_EXCLUSIVE_QUEUE;
691157846Sjhb
692157846Sjhb	/* Wake up all waiters for the specific queue. */
693167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
694154941Sjhb		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
695154941Sjhb		    queue == TS_SHARED_QUEUE ? "read" : "write");
696154941Sjhb	turnstile_broadcast(ts, queue);
697154941Sjhb	atomic_store_rel_ptr(&rw->rw_lock, v);
698154941Sjhb	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
699170295Sjeff	turnstile_chain_unlock(&rw->lock_object);
700154941Sjhb}
701154941Sjhb
702157882Sjhb/*
703157882Sjhb * Attempt to do a non-blocking upgrade from a read lock to a write
704157882Sjhb * lock.  This will only succeed if this thread holds a single read
705157882Sjhb * lock.  Returns true if the upgrade succeeded and false otherwise.
706157882Sjhb */
707157882Sjhbint
708157882Sjhb_rw_try_upgrade(struct rwlock *rw, const char *file, int line)
709157882Sjhb{
710157882Sjhb	uintptr_t v, tid;
711170295Sjeff	struct turnstile *ts;
712157882Sjhb	int success;
713157882Sjhb
714169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
715169394Sjhb	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
716157882Sjhb	_rw_assert(rw, RA_RLOCKED, file, line);
717157882Sjhb
718157882Sjhb	/*
719157882Sjhb	 * Attempt to switch from one reader to a writer.  If there
720157882Sjhb	 * are any write waiters, then we will have to lock the
721157882Sjhb	 * turnstile first to prevent races with another writer
722157882Sjhb	 * calling turnstile_wait() before we have claimed this
723157882Sjhb	 * turnstile.  So, do the simple case of no waiters first.
724157882Sjhb	 */
725157882Sjhb	tid = (uintptr_t)curthread;
726157882Sjhb	if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) {
727168073Sjhb		success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
728168073Sjhb		    tid);
729157882Sjhb		goto out;
730157882Sjhb	}
731157882Sjhb
732157882Sjhb	/*
733157882Sjhb	 * Ok, we think we have write waiters, so lock the
734157882Sjhb	 * turnstile.
735157882Sjhb	 */
736170295Sjeff	ts = turnstile_trywait(&rw->lock_object);
737157882Sjhb
738157882Sjhb	/*
739157882Sjhb	 * Try to switch from one reader to a writer again.  This time
740157882Sjhb	 * we honor the current state of the RW_LOCK_WRITE_WAITERS
741157882Sjhb	 * flag.  If we obtain the lock with the flag set, then claim
742173960Sattilio	 * ownership of the turnstile.
743157882Sjhb	 */
744157882Sjhb	v = rw->rw_lock & RW_LOCK_WRITE_WAITERS;
745168073Sjhb	success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
746157882Sjhb	    tid | v);
747157882Sjhb	if (success && v)
748170295Sjeff		turnstile_claim(ts);
749157882Sjhb	else
750170295Sjeff		turnstile_cancel(ts);
751157882Sjhbout:
752167787Sjhb	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
753157882Sjhb	if (success)
754167787Sjhb		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
755157882Sjhb		    file, line);
756157882Sjhb	return (success);
757157882Sjhb}
758157882Sjhb
759157882Sjhb/*
760157882Sjhb * Downgrade a write lock into a single read lock.
761157882Sjhb */
762157882Sjhbvoid
763157882Sjhb_rw_downgrade(struct rwlock *rw, const char *file, int line)
764157882Sjhb{
765157882Sjhb	struct turnstile *ts;
766157882Sjhb	uintptr_t tid, v;
767157882Sjhb
768169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
769169394Sjhb	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
770171052Sattilio	_rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line);
771171052Sattilio#ifndef INVARIANTS
772171052Sattilio	if (rw_recursed(rw))
773171052Sattilio		panic("downgrade of a recursed lock");
774171052Sattilio#endif
775157882Sjhb
776167787Sjhb	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
777157882Sjhb
778157882Sjhb	/*
779157882Sjhb	 * Convert from a writer to a single reader.  First we handle
780157882Sjhb	 * the easy case with no waiters.  If there are any waiters, we
781157882Sjhb	 * lock the turnstile, "disown" the lock, and awaken any read
782157882Sjhb	 * waiters.
783157882Sjhb	 */
784157882Sjhb	tid = (uintptr_t)curthread;
785157882Sjhb	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
786157882Sjhb		goto out;
787157882Sjhb
788157882Sjhb	/*
789157882Sjhb	 * Ok, we think we have waiters, so lock the turnstile so we can
790157882Sjhb	 * read the waiter flags without any races.
791157882Sjhb	 */
792170295Sjeff	turnstile_chain_lock(&rw->lock_object);
793157882Sjhb	v = rw->rw_lock;
794157882Sjhb	MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS));
795157882Sjhb
796157882Sjhb	/*
797157882Sjhb	 * Downgrade from a write lock while preserving
798157882Sjhb	 * RW_LOCK_WRITE_WAITERS and give up ownership of the
799157882Sjhb	 * turnstile.  If there are any read waiters, wake them up.
800157882Sjhb	 */
801167787Sjhb	ts = turnstile_lookup(&rw->lock_object);
802157882Sjhb	MPASS(ts != NULL);
803157882Sjhb	if (v & RW_LOCK_READ_WAITERS)
804157882Sjhb		turnstile_broadcast(ts, TS_SHARED_QUEUE);
805157882Sjhb	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) |
806157882Sjhb	    (v & RW_LOCK_WRITE_WAITERS));
807157882Sjhb	if (v & RW_LOCK_READ_WAITERS)
808157882Sjhb		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
809170295Sjeff	else if (ts)
810157882Sjhb		turnstile_disown(ts);
811170295Sjeff	turnstile_chain_unlock(&rw->lock_object);
812157882Sjhbout:
813167787Sjhb	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
814157882Sjhb}
815157882Sjhb
816154941Sjhb#ifdef INVARIANT_SUPPORT
817155162Sscottl#ifndef INVARIANTS
818154941Sjhb#undef _rw_assert
819154941Sjhb#endif
820154941Sjhb
821154941Sjhb/*
822154941Sjhb * In the non-WITNESS case, rw_assert() can only detect that at least
823154941Sjhb * *some* thread owns an rlock, but it cannot guarantee that *this*
824154941Sjhb * thread owns an rlock.
825154941Sjhb */
826154941Sjhbvoid
827154941Sjhb_rw_assert(struct rwlock *rw, int what, const char *file, int line)
828154941Sjhb{
829154941Sjhb
830154941Sjhb	if (panicstr != NULL)
831154941Sjhb		return;
832154941Sjhb	switch (what) {
833154941Sjhb	case RA_LOCKED:
834171052Sattilio	case RA_LOCKED | RA_RECURSED:
835171052Sattilio	case RA_LOCKED | RA_NOTRECURSED:
836154941Sjhb	case RA_RLOCKED:
837154941Sjhb#ifdef WITNESS
838167787Sjhb		witness_assert(&rw->lock_object, what, file, line);
839154941Sjhb#else
840154941Sjhb		/*
841154941Sjhb		 * If some other thread has a write lock or we have one
842154941Sjhb		 * and are asserting a read lock, fail.  Also, if no one
843154941Sjhb		 * has a lock at all, fail.
844154941Sjhb		 */
845155061Sscottl		if (rw->rw_lock == RW_UNLOCKED ||
846155061Sscottl		    (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
847157826Sjhb		    rw_wowner(rw) != curthread)))
848154941Sjhb			panic("Lock %s not %slocked @ %s:%d\n",
849167787Sjhb			    rw->lock_object.lo_name, (what == RA_RLOCKED) ?
850154941Sjhb			    "read " : "", file, line);
851171052Sattilio
852171052Sattilio		if (!(rw->rw_lock & RW_LOCK_READ)) {
853171052Sattilio			if (rw_recursed(rw)) {
854171052Sattilio				if (what & RA_NOTRECURSED)
855171052Sattilio					panic("Lock %s recursed @ %s:%d\n",
856171052Sattilio					    rw->lock_object.lo_name, file,
857171052Sattilio					    line);
858171052Sattilio			} else if (what & RA_RECURSED)
859171052Sattilio				panic("Lock %s not recursed @ %s:%d\n",
860171052Sattilio				    rw->lock_object.lo_name, file, line);
861171052Sattilio		}
862154941Sjhb#endif
863154941Sjhb		break;
864154941Sjhb	case RA_WLOCKED:
865171052Sattilio	case RA_WLOCKED | RA_RECURSED:
866171052Sattilio	case RA_WLOCKED | RA_NOTRECURSED:
867157826Sjhb		if (rw_wowner(rw) != curthread)
868154941Sjhb			panic("Lock %s not exclusively locked @ %s:%d\n",
869167787Sjhb			    rw->lock_object.lo_name, file, line);
870171052Sattilio		if (rw_recursed(rw)) {
871171052Sattilio			if (what & RA_NOTRECURSED)
872171052Sattilio				panic("Lock %s recursed @ %s:%d\n",
873171052Sattilio				    rw->lock_object.lo_name, file, line);
874171052Sattilio		} else if (what & RA_RECURSED)
875171052Sattilio			panic("Lock %s not recursed @ %s:%d\n",
876171052Sattilio			    rw->lock_object.lo_name, file, line);
877154941Sjhb		break;
878154941Sjhb	case RA_UNLOCKED:
879154941Sjhb#ifdef WITNESS
880167787Sjhb		witness_assert(&rw->lock_object, what, file, line);
881154941Sjhb#else
882154941Sjhb		/*
883154941Sjhb		 * If we hold a write lock fail.  We can't reliably check
884154941Sjhb		 * to see if we hold a read lock or not.
885154941Sjhb		 */
886157826Sjhb		if (rw_wowner(rw) == curthread)
887154941Sjhb			panic("Lock %s exclusively locked @ %s:%d\n",
888167787Sjhb			    rw->lock_object.lo_name, file, line);
889154941Sjhb#endif
890154941Sjhb		break;
891154941Sjhb	default:
892154941Sjhb		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
893154941Sjhb		    line);
894154941Sjhb	}
895154941Sjhb}
896154941Sjhb#endif /* INVARIANT_SUPPORT */
897154941Sjhb
898154941Sjhb#ifdef DDB
899154941Sjhbvoid
900154941Sjhbdb_show_rwlock(struct lock_object *lock)
901154941Sjhb{
902154941Sjhb	struct rwlock *rw;
903154941Sjhb	struct thread *td;
904154941Sjhb
905154941Sjhb	rw = (struct rwlock *)lock;
906154941Sjhb
907154941Sjhb	db_printf(" state: ");
908154941Sjhb	if (rw->rw_lock == RW_UNLOCKED)
909154941Sjhb		db_printf("UNLOCKED\n");
910169394Sjhb	else if (rw->rw_lock == RW_DESTROYED) {
911169394Sjhb		db_printf("DESTROYED\n");
912169394Sjhb		return;
913169394Sjhb	} else if (rw->rw_lock & RW_LOCK_READ)
914167504Sjhb		db_printf("RLOCK: %ju locks\n",
915167504Sjhb		    (uintmax_t)(RW_READERS(rw->rw_lock)));
916154941Sjhb	else {
917157826Sjhb		td = rw_wowner(rw);
918154941Sjhb		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
919173600Sjulian		    td->td_tid, td->td_proc->p_pid, td->td_name);
920171052Sattilio		if (rw_recursed(rw))
921171052Sattilio			db_printf(" recursed: %u\n", rw->rw_recurse);
922154941Sjhb	}
923154941Sjhb	db_printf(" waiters: ");
924154941Sjhb	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
925154941Sjhb	case RW_LOCK_READ_WAITERS:
926154941Sjhb		db_printf("readers\n");
927154941Sjhb		break;
928154941Sjhb	case RW_LOCK_WRITE_WAITERS:
929154941Sjhb		db_printf("writers\n");
930154941Sjhb		break;
931154941Sjhb	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
932167492Sjhb		db_printf("readers and writers\n");
933154941Sjhb		break;
934154941Sjhb	default:
935154941Sjhb		db_printf("none\n");
936154941Sjhb		break;
937154941Sjhb	}
938154941Sjhb}
939154941Sjhb
940154941Sjhb#endif
941