kern_rwlock.c revision 315378
1154941Sjhb/*-
2154941Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3154941Sjhb * All rights reserved.
4154941Sjhb *
5154941Sjhb * Redistribution and use in source and binary forms, with or without
6154941Sjhb * modification, are permitted provided that the following conditions
7154941Sjhb * are met:
8154941Sjhb * 1. Redistributions of source code must retain the above copyright
9154941Sjhb *    notice, this list of conditions and the following disclaimer.
10154941Sjhb * 2. Redistributions in binary form must reproduce the above copyright
11154941Sjhb *    notice, this list of conditions and the following disclaimer in the
12154941Sjhb *    documentation and/or other materials provided with the distribution.
13154941Sjhb *
14154941Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15154941Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16154941Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17154941Sjhb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18154941Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19154941Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20154941Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21154941Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22154941Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23154941Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24154941Sjhb * SUCH DAMAGE.
25154941Sjhb */
26154941Sjhb
27154941Sjhb/*
28154941Sjhb * Machine independent bits of reader/writer lock implementation.
29154941Sjhb */
30154941Sjhb
31154941Sjhb#include <sys/cdefs.h>
32154941Sjhb__FBSDID("$FreeBSD: stable/11/sys/kern/kern_rwlock.c 315378 2017-03-16 06:45:36Z mjg $");
33154941Sjhb
34154941Sjhb#include "opt_ddb.h"
35233628Sfabient#include "opt_hwpmc_hooks.h"
36167801Sjhb#include "opt_no_adaptive_rwlocks.h"
37154941Sjhb
38154941Sjhb#include <sys/param.h>
39244582Sattilio#include <sys/kdb.h>
40154941Sjhb#include <sys/ktr.h>
41177912Sjeff#include <sys/kernel.h>
42154941Sjhb#include <sys/lock.h>
43154941Sjhb#include <sys/mutex.h>
44154941Sjhb#include <sys/proc.h>
45154941Sjhb#include <sys/rwlock.h>
46274092Sjhb#include <sys/sched.h>
47303953Smjg#include <sys/smp.h>
48177912Sjeff#include <sys/sysctl.h>
49154941Sjhb#include <sys/systm.h>
50154941Sjhb#include <sys/turnstile.h>
51171516Sattilio
52154941Sjhb#include <machine/cpu.h>
53154941Sjhb
54167801Sjhb#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
55167801Sjhb#define	ADAPTIVE_RWLOCKS
56167801Sjhb#endif
57167801Sjhb
58233628Sfabient#ifdef HWPMC_HOOKS
59233628Sfabient#include <sys/pmckern.h>
60233628SfabientPMC_SOFT_DECLARE( , , lock, failed);
61233628Sfabient#endif
62233628Sfabient
63242515Sattilio/*
64242515Sattilio * Return the rwlock address when the lock cookie address is provided.
65242515Sattilio * This functionality assumes that struct rwlock* have a member named rw_lock.
66242515Sattilio */
67242515Sattilio#define	rwlock2rw(c)	(__containerof(c, struct rwlock, rw_lock))
68242515Sattilio
69154941Sjhb#ifdef DDB
70154941Sjhb#include <ddb/ddb.h>
71154941Sjhb
72227588Spjdstatic void	db_show_rwlock(const struct lock_object *lock);
73154941Sjhb#endif
74227588Spjdstatic void	assert_rw(const struct lock_object *lock, int what);
75255745Sdavidestatic void	lock_rw(struct lock_object *lock, uintptr_t how);
76192853Ssson#ifdef KDTRACE_HOOKS
77227588Spjdstatic int	owner_rw(const struct lock_object *lock, struct thread **owner);
78192853Ssson#endif
79255745Sdavidestatic uintptr_t unlock_rw(struct lock_object *lock);
80154941Sjhb
81154941Sjhbstruct lock_class lock_class_rw = {
82167365Sjhb	.lc_name = "rw",
83167365Sjhb	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
84173733Sattilio	.lc_assert = assert_rw,
85154941Sjhb#ifdef DDB
86167365Sjhb	.lc_ddb_show = db_show_rwlock,
87154941Sjhb#endif
88167368Sjhb	.lc_lock = lock_rw,
89167368Sjhb	.lc_unlock = unlock_rw,
90192853Ssson#ifdef KDTRACE_HOOKS
91192853Ssson	.lc_owner = owner_rw,
92192853Ssson#endif
93154941Sjhb};
94154941Sjhb
95303953Smjg#ifdef ADAPTIVE_RWLOCKS
96303953Smjgstatic int rowner_retries = 10;
97303953Smjgstatic int rowner_loops = 10000;
98303953Smjgstatic SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
99303953Smjg    "rwlock debugging");
100303953SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
101303953SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
102303953Smjg
103315339Smjgstatic struct lock_delay_config __read_mostly rw_delay;
104303953Smjg
105315339SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, delay_base, CTLFLAG_RW, &rw_delay.base,
106303953Smjg    0, "");
107303953SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
108303953Smjg    0, "");
109303953Smjg
110315339SmjgLOCK_DELAY_SYSINIT_DEFAULT(rw_delay);
111303953Smjg#endif
112303953Smjg
113157826Sjhb/*
114157826Sjhb * Return a pointer to the owning thread if the lock is write-locked or
115157826Sjhb * NULL if the lock is unlocked or read-locked.
116157826Sjhb */
117154941Sjhb
118315341Smjg#define	lv_rw_wowner(v)							\
119315341Smjg	((v) & RW_LOCK_READ ? NULL :					\
120315341Smjg	 (struct thread *)RW_OWNER((v)))
121315341Smjg
122315341Smjg#define	rw_wowner(rw)	lv_rw_wowner(RW_READ_VALUE(rw))
123315341Smjg
124157826Sjhb/*
125171052Sattilio * Returns if a write owner is recursed.  Write ownership is not assured
126171052Sattilio * here and should be previously checked.
127171052Sattilio */
128171052Sattilio#define	rw_recursed(rw)		((rw)->rw_recurse != 0)
129171052Sattilio
130171052Sattilio/*
131171052Sattilio * Return true if curthread helds the lock.
132171052Sattilio */
133171052Sattilio#define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
134171052Sattilio
135171052Sattilio/*
136157826Sjhb * Return a pointer to the owning thread for this lock who should receive
137157826Sjhb * any priority lent by threads that block on this lock.  Currently this
138157826Sjhb * is identical to rw_wowner().
139157826Sjhb */
140157826Sjhb#define	rw_owner(rw)		rw_wowner(rw)
141157826Sjhb
142154941Sjhb#ifndef INVARIANTS
143242515Sattilio#define	__rw_assert(c, what, file, line)
144154941Sjhb#endif
145154941Sjhb
146154941Sjhbvoid
147227588Spjdassert_rw(const struct lock_object *lock, int what)
148173733Sattilio{
149173733Sattilio
150227588Spjd	rw_assert((const struct rwlock *)lock, what);
151173733Sattilio}
152173733Sattilio
153173733Sattiliovoid
154255745Sdavidelock_rw(struct lock_object *lock, uintptr_t how)
155167368Sjhb{
156167368Sjhb	struct rwlock *rw;
157167368Sjhb
158167368Sjhb	rw = (struct rwlock *)lock;
159167368Sjhb	if (how)
160255788Sdavide		rw_rlock(rw);
161255788Sdavide	else
162167368Sjhb		rw_wlock(rw);
163167368Sjhb}
164167368Sjhb
165255745Sdavideuintptr_t
166167368Sjhbunlock_rw(struct lock_object *lock)
167167368Sjhb{
168167368Sjhb	struct rwlock *rw;
169167368Sjhb
170167368Sjhb	rw = (struct rwlock *)lock;
171167368Sjhb	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
172167368Sjhb	if (rw->rw_lock & RW_LOCK_READ) {
173167368Sjhb		rw_runlock(rw);
174255788Sdavide		return (1);
175167368Sjhb	} else {
176167368Sjhb		rw_wunlock(rw);
177255788Sdavide		return (0);
178167368Sjhb	}
179167368Sjhb}
180167368Sjhb
181192853Ssson#ifdef KDTRACE_HOOKS
182192853Sssonint
183227588Spjdowner_rw(const struct lock_object *lock, struct thread **owner)
184192853Ssson{
185227588Spjd	const struct rwlock *rw = (const struct rwlock *)lock;
186192853Ssson	uintptr_t x = rw->rw_lock;
187192853Ssson
188192853Ssson	*owner = rw_wowner(rw);
189192853Ssson	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
190192853Ssson	    (*owner != NULL));
191192853Ssson}
192192853Ssson#endif
193192853Ssson
194167368Sjhbvoid
195242515Sattilio_rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
196154941Sjhb{
197242515Sattilio	struct rwlock *rw;
198171052Sattilio	int flags;
199154941Sjhb
200242515Sattilio	rw = rwlock2rw(c);
201242515Sattilio
202171052Sattilio	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
203275751Sdchagin	    RW_RECURSE | RW_NEW)) == 0);
204196334Sattilio	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
205196334Sattilio	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
206196334Sattilio	    &rw->rw_lock));
207171052Sattilio
208193307Sattilio	flags = LO_UPGRADABLE;
209171052Sattilio	if (opts & RW_DUPOK)
210171052Sattilio		flags |= LO_DUPOK;
211171052Sattilio	if (opts & RW_NOPROFILE)
212171052Sattilio		flags |= LO_NOPROFILE;
213171052Sattilio	if (!(opts & RW_NOWITNESS))
214171052Sattilio		flags |= LO_WITNESS;
215193307Sattilio	if (opts & RW_RECURSE)
216193307Sattilio		flags |= LO_RECURSABLE;
217171052Sattilio	if (opts & RW_QUIET)
218171052Sattilio		flags |= LO_QUIET;
219275751Sdchagin	if (opts & RW_NEW)
220275751Sdchagin		flags |= LO_NEW;
221171052Sattilio
222252212Sjhb	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
223154941Sjhb	rw->rw_lock = RW_UNLOCKED;
224171052Sattilio	rw->rw_recurse = 0;
225154941Sjhb}
226154941Sjhb
227154941Sjhbvoid
228242515Sattilio_rw_destroy(volatile uintptr_t *c)
229154941Sjhb{
230242515Sattilio	struct rwlock *rw;
231154941Sjhb
232242515Sattilio	rw = rwlock2rw(c);
233242515Sattilio
234205626Sbz	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
235205626Sbz	KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
236169394Sjhb	rw->rw_lock = RW_DESTROYED;
237167787Sjhb	lock_destroy(&rw->lock_object);
238154941Sjhb}
239154941Sjhb
240154941Sjhbvoid
241154941Sjhbrw_sysinit(void *arg)
242154941Sjhb{
243154941Sjhb	struct rw_args *args = arg;
244154941Sjhb
245242515Sattilio	rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
246154941Sjhb}
247154941Sjhb
248185778Skmacyvoid
249185778Skmacyrw_sysinit_flags(void *arg)
250185778Skmacy{
251185778Skmacy	struct rw_args_flags *args = arg;
252185778Skmacy
253242515Sattilio	rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
254242515Sattilio	    args->ra_flags);
255185778Skmacy}
256185778Skmacy
257167024Srwatsonint
258242515Sattilio_rw_wowned(const volatile uintptr_t *c)
259167024Srwatson{
260167024Srwatson
261242515Sattilio	return (rw_wowner(rwlock2rw(c)) == curthread);
262167024Srwatson}
263167024Srwatson
264154941Sjhbvoid
265242515Sattilio_rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
266154941Sjhb{
267242515Sattilio	struct rwlock *rw;
268315378Smjg	uintptr_t tid, v;
269154941Sjhb
270228424Savg	if (SCHEDULER_STOPPED())
271228424Savg		return;
272242515Sattilio
273242515Sattilio	rw = rwlock2rw(c);
274242515Sattilio
275244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
276240424Sattilio	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
277240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
278169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
279169394Sjhb	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
280167787Sjhb	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
281182914Sjhb	    line, NULL);
282315378Smjg	tid = (uintptr_t)curthread;
283315378Smjg	v = RW_UNLOCKED;
284315378Smjg	if (!_rw_write_lock_fetch(rw, &v, tid))
285315378Smjg		_rw_wlock_hard(rw, v, tid, file, line);
286315378Smjg	else
287315378Smjg		LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw,
288315378Smjg		    0, 0, file, line, LOCKSTAT_WRITER);
289315378Smjg
290171052Sattilio	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
291167787Sjhb	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
292286166Smarkj	TD_LOCKS_INC(curthread);
293154941Sjhb}
294154941Sjhb
295177843Sattilioint
296242515Sattilio__rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
297177843Sattilio{
298242515Sattilio	struct rwlock *rw;
299177843Sattilio	int rval;
300177843Sattilio
301228424Savg	if (SCHEDULER_STOPPED())
302228424Savg		return (1);
303228424Savg
304242515Sattilio	rw = rwlock2rw(c);
305242515Sattilio
306244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
307240424Sattilio	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
308240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
309177843Sattilio	KASSERT(rw->rw_lock != RW_DESTROYED,
310177843Sattilio	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
311177843Sattilio
312193307Sattilio	if (rw_wlocked(rw) &&
313193307Sattilio	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
314177843Sattilio		rw->rw_recurse++;
315177843Sattilio		rval = 1;
316177843Sattilio	} else
317177843Sattilio		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
318177843Sattilio		    (uintptr_t)curthread);
319177843Sattilio
320177843Sattilio	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
321177843Sattilio	if (rval) {
322177843Sattilio		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
323177843Sattilio		    file, line);
324284297Savg		if (!rw_recursed(rw))
325285704Smarkj			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
326285704Smarkj			    rw, 0, 0, file, line, LOCKSTAT_WRITER);
327286166Smarkj		TD_LOCKS_INC(curthread);
328177843Sattilio	}
329177843Sattilio	return (rval);
330177843Sattilio}
331177843Sattilio
332154941Sjhbvoid
333242515Sattilio_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
334154941Sjhb{
335242515Sattilio	struct rwlock *rw;
336154941Sjhb
337228424Savg	if (SCHEDULER_STOPPED())
338228424Savg		return;
339242515Sattilio
340242515Sattilio	rw = rwlock2rw(c);
341242515Sattilio
342169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
343169394Sjhb	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
344242515Sattilio	__rw_assert(c, RA_WLOCKED, file, line);
345167787Sjhb	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
346171052Sattilio	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
347171052Sattilio	    line);
348315378Smjg	if (rw->rw_recurse)
349315378Smjg		rw->rw_recurse--;
350315378Smjg	else
351315378Smjg		_rw_wunlock_hard(rw, (uintptr_t)curthread, file, line);
352315378Smjg
353286166Smarkj	TD_LOCKS_DEC(curthread);
354154941Sjhb}
355286166Smarkj
356176017Sjeff/*
357176017Sjeff * Determines whether a new reader can acquire a lock.  Succeeds if the
358176017Sjeff * reader already owns a read lock and the lock is locked for read to
359176017Sjeff * prevent deadlock from reader recursion.  Also succeeds if the lock
360176017Sjeff * is unlocked and has no writer waiters or spinners.  Failing otherwise
361176017Sjeff * prioritizes writers before readers.
362176017Sjeff */
363176017Sjeff#define	RW_CAN_READ(_rw)						\
364176017Sjeff    ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
365176017Sjeff    (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
366176017Sjeff    RW_LOCK_READ)
367154941Sjhb
368154941Sjhbvoid
369242515Sattilio__rw_rlock(volatile uintptr_t *c, const char *file, int line)
370154941Sjhb{
371242515Sattilio	struct rwlock *rw;
372170295Sjeff	struct turnstile *ts;
373167801Sjhb#ifdef ADAPTIVE_RWLOCKS
374157846Sjhb	volatile struct thread *owner;
375177912Sjeff	int spintries = 0;
376177912Sjeff	int i;
377157851Swkoszek#endif
378189846Sjeff#ifdef LOCK_PROFILING
379167307Sjhb	uint64_t waittime = 0;
380167054Skmacy	int contested = 0;
381189846Sjeff#endif
382176017Sjeff	uintptr_t v;
383303953Smjg#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
384303953Smjg	struct lock_delay_arg lda;
385303953Smjg#endif
386192853Ssson#ifdef KDTRACE_HOOKS
387284297Savg	uintptr_t state;
388303953Smjg	u_int sleep_cnt = 0;
389192853Ssson	int64_t sleep_time = 0;
390284297Savg	int64_t all_time = 0;
391192853Ssson#endif
392154941Sjhb
393228424Savg	if (SCHEDULER_STOPPED())
394228424Savg		return;
395228424Savg
396303953Smjg#if defined(ADAPTIVE_RWLOCKS)
397303953Smjg	lock_delay_arg_init(&lda, &rw_delay);
398303953Smjg#elif defined(KDTRACE_HOOKS)
399303953Smjg	lock_delay_arg_init(&lda, NULL);
400303953Smjg#endif
401242515Sattilio	rw = rwlock2rw(c);
402242515Sattilio
403244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
404240424Sattilio	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
405240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
406169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
407169394Sjhb	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
408157826Sjhb	KASSERT(rw_wowner(rw) != curthread,
409251323Sjhb	    ("rw_rlock: wlock already held for %s @ %s:%d",
410167787Sjhb	    rw->lock_object.lo_name, file, line));
411182914Sjhb	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
412154941Sjhb
413284297Savg#ifdef KDTRACE_HOOKS
414285664Smarkj	all_time -= lockstat_nsecs(&rw->lock_object);
415284297Savg#endif
416315341Smjg	v = RW_READ_VALUE(rw);
417315341Smjg#ifdef KDTRACE_HOOKS
418315341Smjg	state = v;
419315341Smjg#endif
420154941Sjhb	for (;;) {
421154941Sjhb		/*
422154941Sjhb		 * Handle the easy case.  If no other thread has a write
423154941Sjhb		 * lock, then try to bump up the count of read locks.  Note
424154941Sjhb		 * that we have to preserve the current state of the
425154941Sjhb		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
426154941Sjhb		 * read lock, then rw_lock must have changed, so restart
427154941Sjhb		 * the loop.  Note that this handles the case of a
428154941Sjhb		 * completely unlocked rwlock since such a lock is encoded
429154941Sjhb		 * as a read lock with no waiters.
430154941Sjhb		 */
431176017Sjeff		if (RW_CAN_READ(v)) {
432154941Sjhb			/*
433154941Sjhb			 * The RW_LOCK_READ_WAITERS flag should only be set
434176017Sjeff			 * if the lock has been unlocked and write waiters
435176017Sjeff			 * were present.
436154941Sjhb			 */
437315377Smjg			if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v,
438176017Sjeff			    v + RW_ONE_READER)) {
439167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
440154941Sjhb					CTR4(KTR_LOCK,
441154941Sjhb					    "%s: %p succeed %p -> %p", __func__,
442176017Sjeff					    rw, (void *)v,
443176017Sjeff					    (void *)(v + RW_ONE_READER));
444154941Sjhb				break;
445154941Sjhb			}
446154941Sjhb			continue;
447154941Sjhb		}
448285706Smarkj#ifdef KDTRACE_HOOKS
449303953Smjg		lda.spin_cnt++;
450285706Smarkj#endif
451233628Sfabient#ifdef HWPMC_HOOKS
452233628Sfabient		PMC_SOFT_CALL( , , lock, failed);
453233628Sfabient#endif
454174629Sjeff		lock_profile_obtain_lock_failed(&rw->lock_object,
455174629Sjeff		    &contested, &waittime);
456154941Sjhb
457173960Sattilio#ifdef ADAPTIVE_RWLOCKS
458154941Sjhb		/*
459173960Sattilio		 * If the owner is running on another CPU, spin until
460173960Sattilio		 * the owner stops running or the state of the lock
461173960Sattilio		 * changes.
462173960Sattilio		 */
463176017Sjeff		if ((v & RW_LOCK_READ) == 0) {
464176017Sjeff			owner = (struct thread *)RW_OWNER(v);
465176017Sjeff			if (TD_IS_RUNNING(owner)) {
466176017Sjeff				if (LOCK_LOG_TEST(&rw->lock_object, 0))
467176017Sjeff					CTR3(KTR_LOCK,
468176017Sjeff					    "%s: spinning on %p held by %p",
469176017Sjeff					    __func__, rw, owner);
470274092Sjhb				KTR_STATE1(KTR_SCHED, "thread",
471274092Sjhb				    sched_tdname(curthread), "spinning",
472274092Sjhb				    "lockname:\"%s\"", rw->lock_object.lo_name);
473315341Smjg				do {
474303953Smjg					lock_delay(&lda);
475315341Smjg					v = RW_READ_VALUE(rw);
476315341Smjg					owner = lv_rw_wowner(v);
477315341Smjg				} while (owner != NULL && TD_IS_RUNNING(owner));
478274092Sjhb				KTR_STATE0(KTR_SCHED, "thread",
479274092Sjhb				    sched_tdname(curthread), "running");
480176017Sjeff				continue;
481176017Sjeff			}
482177912Sjeff		} else if (spintries < rowner_retries) {
483177912Sjeff			spintries++;
484274092Sjhb			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
485274092Sjhb			    "spinning", "lockname:\"%s\"",
486274092Sjhb			    rw->lock_object.lo_name);
487177912Sjeff			for (i = 0; i < rowner_loops; i++) {
488315341Smjg				v = RW_READ_VALUE(rw);
489177912Sjeff				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
490177912Sjeff					break;
491177912Sjeff				cpu_spinwait();
492177912Sjeff			}
493315341Smjg			v = RW_READ_VALUE(rw);
494259509Sattilio#ifdef KDTRACE_HOOKS
495303953Smjg			lda.spin_cnt += rowner_loops - i;
496259509Sattilio#endif
497274092Sjhb			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
498274092Sjhb			    "running");
499177912Sjeff			if (i != rowner_loops)
500177912Sjeff				continue;
501173960Sattilio		}
502173960Sattilio#endif
503173960Sattilio
504173960Sattilio		/*
505154941Sjhb		 * Okay, now it's the hard case.  Some other thread already
506176017Sjeff		 * has a write lock or there are write waiters present,
507176017Sjeff		 * acquire the turnstile lock so we can begin the process
508176017Sjeff		 * of blocking.
509154941Sjhb		 */
510170295Sjeff		ts = turnstile_trywait(&rw->lock_object);
511154941Sjhb
512154941Sjhb		/*
513154941Sjhb		 * The lock might have been released while we spun, so
514176017Sjeff		 * recheck its state and restart the loop if needed.
515154941Sjhb		 */
516315341Smjg		v = RW_READ_VALUE(rw);
517176017Sjeff		if (RW_CAN_READ(v)) {
518170295Sjeff			turnstile_cancel(ts);
519154941Sjhb			continue;
520154941Sjhb		}
521154941Sjhb
522173960Sattilio#ifdef ADAPTIVE_RWLOCKS
523154941Sjhb		/*
524193035Sjhb		 * The current lock owner might have started executing
525193035Sjhb		 * on another CPU (or the lock could have changed
526193035Sjhb		 * owners) while we were waiting on the turnstile
527193035Sjhb		 * chain lock.  If so, drop the turnstile lock and try
528193035Sjhb		 * again.
529173960Sattilio		 */
530176017Sjeff		if ((v & RW_LOCK_READ) == 0) {
531176017Sjeff			owner = (struct thread *)RW_OWNER(v);
532176017Sjeff			if (TD_IS_RUNNING(owner)) {
533176017Sjeff				turnstile_cancel(ts);
534176017Sjeff				continue;
535176017Sjeff			}
536173960Sattilio		}
537173960Sattilio#endif
538173960Sattilio
539173960Sattilio		/*
540176017Sjeff		 * The lock is held in write mode or it already has waiters.
541154941Sjhb		 */
542176017Sjeff		MPASS(!RW_CAN_READ(v));
543176017Sjeff
544176017Sjeff		/*
545176017Sjeff		 * If the RW_LOCK_READ_WAITERS flag is already set, then
546176017Sjeff		 * we can go ahead and block.  If it is not set then try
547176017Sjeff		 * to set it.  If we fail to set it drop the turnstile
548176017Sjeff		 * lock and restart the loop.
549176017Sjeff		 */
550176017Sjeff		if (!(v & RW_LOCK_READ_WAITERS)) {
551176017Sjeff			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
552176017Sjeff			    v | RW_LOCK_READ_WAITERS)) {
553170295Sjeff				turnstile_cancel(ts);
554315341Smjg				v = RW_READ_VALUE(rw);
555157826Sjhb				continue;
556157826Sjhb			}
557167787Sjhb			if (LOCK_LOG_TEST(&rw->lock_object, 0))
558157826Sjhb				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
559157826Sjhb				    __func__, rw);
560154941Sjhb		}
561154941Sjhb
562154941Sjhb		/*
563154941Sjhb		 * We were unable to acquire the lock and the read waiters
564154941Sjhb		 * flag is set, so we must block on the turnstile.
565154941Sjhb		 */
566167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
567154941Sjhb			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
568154941Sjhb			    rw);
569192853Ssson#ifdef KDTRACE_HOOKS
570285664Smarkj		sleep_time -= lockstat_nsecs(&rw->lock_object);
571192853Ssson#endif
572170295Sjeff		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
573192853Ssson#ifdef KDTRACE_HOOKS
574285664Smarkj		sleep_time += lockstat_nsecs(&rw->lock_object);
575192853Ssson		sleep_cnt++;
576192853Ssson#endif
577167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
578154941Sjhb			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
579154941Sjhb			    __func__, rw);
580315341Smjg		v = RW_READ_VALUE(rw);
581154941Sjhb	}
582284297Savg#ifdef KDTRACE_HOOKS
583285664Smarkj	all_time += lockstat_nsecs(&rw->lock_object);
584284297Savg	if (sleep_time)
585285703Smarkj		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
586284297Savg		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
587284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
588154941Sjhb
589284297Savg	/* Record only the loops spinning and not sleeping. */
590303953Smjg	if (lda.spin_cnt > sleep_cnt)
591285703Smarkj		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
592284297Savg		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
593284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
594284297Savg#endif
595154941Sjhb	/*
596154941Sjhb	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
597154941Sjhb	 * however.  turnstiles don't like owners changing between calls to
598154941Sjhb	 * turnstile_wait() currently.
599154941Sjhb	 */
600285704Smarkj	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
601285704Smarkj	    waittime, file, line, LOCKSTAT_READER);
602167787Sjhb	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
603167787Sjhb	WITNESS_LOCK(&rw->lock_object, 0, file, line);
604286166Smarkj	TD_LOCKS_INC(curthread);
605176017Sjeff	curthread->td_rw_rlocks++;
606154941Sjhb}
607154941Sjhb
608177843Sattilioint
609242515Sattilio__rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
610177843Sattilio{
611242515Sattilio	struct rwlock *rw;
612177843Sattilio	uintptr_t x;
613177843Sattilio
614228424Savg	if (SCHEDULER_STOPPED())
615228424Savg		return (1);
616228424Savg
617242515Sattilio	rw = rwlock2rw(c);
618242515Sattilio
619244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
620240424Sattilio	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
621240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
622240424Sattilio
623177843Sattilio	for (;;) {
624177843Sattilio		x = rw->rw_lock;
625177843Sattilio		KASSERT(rw->rw_lock != RW_DESTROYED,
626177843Sattilio		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
627177843Sattilio		if (!(x & RW_LOCK_READ))
628177843Sattilio			break;
629177843Sattilio		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
630177843Sattilio			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
631177843Sattilio			    line);
632177843Sattilio			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
633285704Smarkj			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
634285704Smarkj			    rw, 0, 0, file, line, LOCKSTAT_READER);
635286166Smarkj			TD_LOCKS_INC(curthread);
636177843Sattilio			curthread->td_rw_rlocks++;
637177843Sattilio			return (1);
638177843Sattilio		}
639177843Sattilio	}
640177843Sattilio
641177843Sattilio	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
642177843Sattilio	return (0);
643177843Sattilio}
644177843Sattilio
645154941Sjhbvoid
646242515Sattilio_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
647154941Sjhb{
648242515Sattilio	struct rwlock *rw;
649154941Sjhb	struct turnstile *ts;
650176017Sjeff	uintptr_t x, v, queue;
651154941Sjhb
652228424Savg	if (SCHEDULER_STOPPED())
653228424Savg		return;
654228424Savg
655242515Sattilio	rw = rwlock2rw(c);
656242515Sattilio
657169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
658169394Sjhb	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
659242515Sattilio	__rw_assert(c, RA_RLOCKED, file, line);
660167787Sjhb	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
661167787Sjhb	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
662154941Sjhb
663154941Sjhb	/* TODO: drop "owner of record" here. */
664315341Smjg	x = RW_READ_VALUE(rw);
665154941Sjhb	for (;;) {
666154941Sjhb		/*
667154941Sjhb		 * See if there is more than one read lock held.  If so,
668154941Sjhb		 * just drop one and return.
669154941Sjhb		 */
670154941Sjhb		if (RW_READERS(x) > 1) {
671315377Smjg			if (atomic_fcmpset_rel_ptr(&rw->rw_lock, &x,
672154941Sjhb			    x - RW_ONE_READER)) {
673167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
674154941Sjhb					CTR4(KTR_LOCK,
675154941Sjhb					    "%s: %p succeeded %p -> %p",
676154941Sjhb					    __func__, rw, (void *)x,
677154941Sjhb					    (void *)(x - RW_ONE_READER));
678154941Sjhb				break;
679154941Sjhb			}
680154941Sjhb			continue;
681167307Sjhb		}
682154941Sjhb		/*
683154941Sjhb		 * If there aren't any waiters for a write lock, then try
684154941Sjhb		 * to drop it quickly.
685154941Sjhb		 */
686176017Sjeff		if (!(x & RW_LOCK_WAITERS)) {
687176017Sjeff			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
688176017Sjeff			    RW_READERS_LOCK(1));
689315377Smjg			if (atomic_fcmpset_rel_ptr(&rw->rw_lock, &x,
690197643Sattilio			    RW_UNLOCKED)) {
691167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
692154941Sjhb					CTR2(KTR_LOCK, "%s: %p last succeeded",
693154941Sjhb					    __func__, rw);
694154941Sjhb				break;
695154941Sjhb			}
696154941Sjhb			continue;
697154941Sjhb		}
698154941Sjhb		/*
699176017Sjeff		 * Ok, we know we have waiters and we think we are the
700176017Sjeff		 * last reader, so grab the turnstile lock.
701154941Sjhb		 */
702170295Sjeff		turnstile_chain_lock(&rw->lock_object);
703176017Sjeff		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
704176017Sjeff		MPASS(v & RW_LOCK_WAITERS);
705154941Sjhb
706154941Sjhb		/*
707154941Sjhb		 * Try to drop our lock leaving the lock in a unlocked
708154941Sjhb		 * state.
709154941Sjhb		 *
710154941Sjhb		 * If you wanted to do explicit lock handoff you'd have to
711154941Sjhb		 * do it here.  You'd also want to use turnstile_signal()
712154941Sjhb		 * and you'd have to handle the race where a higher
713154941Sjhb		 * priority thread blocks on the write lock before the
714154941Sjhb		 * thread you wakeup actually runs and have the new thread
715154941Sjhb		 * "steal" the lock.  For now it's a lot simpler to just
716154941Sjhb		 * wakeup all of the waiters.
717154941Sjhb		 *
718154941Sjhb		 * As above, if we fail, then another thread might have
719154941Sjhb		 * acquired a read lock, so drop the turnstile lock and
720154941Sjhb		 * restart.
721154941Sjhb		 */
722176017Sjeff		x = RW_UNLOCKED;
723176017Sjeff		if (v & RW_LOCK_WRITE_WAITERS) {
724176017Sjeff			queue = TS_EXCLUSIVE_QUEUE;
725176017Sjeff			x |= (v & RW_LOCK_READ_WAITERS);
726176017Sjeff		} else
727176017Sjeff			queue = TS_SHARED_QUEUE;
728197643Sattilio		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
729176017Sjeff		    x)) {
730170295Sjeff			turnstile_chain_unlock(&rw->lock_object);
731315341Smjg			x = RW_READ_VALUE(rw);
732154941Sjhb			continue;
733154941Sjhb		}
734167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
735154941Sjhb			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
736154941Sjhb			    __func__, rw);
737154941Sjhb
738154941Sjhb		/*
739154941Sjhb		 * Ok.  The lock is released and all that's left is to
740154941Sjhb		 * wake up the waiters.  Note that the lock might not be
741154941Sjhb		 * free anymore, but in that case the writers will just
742154941Sjhb		 * block again if they run before the new lock holder(s)
743154941Sjhb		 * release the lock.
744154941Sjhb		 */
745167787Sjhb		ts = turnstile_lookup(&rw->lock_object);
746157846Sjhb		MPASS(ts != NULL);
747176017Sjeff		turnstile_broadcast(ts, queue);
748154941Sjhb		turnstile_unpend(ts, TS_SHARED_LOCK);
749170295Sjeff		turnstile_chain_unlock(&rw->lock_object);
750154941Sjhb		break;
751154941Sjhb	}
752285704Smarkj	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
753286166Smarkj	TD_LOCKS_DEC(curthread);
754252212Sjhb	curthread->td_rw_rlocks--;
755154941Sjhb}
756154941Sjhb
757154941Sjhb/*
758154941Sjhb * This function is called when we are unable to obtain a write lock on the
759154941Sjhb * first try.  This means that at least one other thread holds either a
760154941Sjhb * read or write lock.
761154941Sjhb */
762154941Sjhbvoid
763315377Smjg__rw_wlock_hard(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
764315377Smjg    const char *file, int line)
765154941Sjhb{
766242515Sattilio	struct rwlock *rw;
767170295Sjeff	struct turnstile *ts;
768167801Sjhb#ifdef ADAPTIVE_RWLOCKS
769157846Sjhb	volatile struct thread *owner;
770176017Sjeff	int spintries = 0;
771176017Sjeff	int i;
772157851Swkoszek#endif
773315377Smjg	uintptr_t x;
774189846Sjeff#ifdef LOCK_PROFILING
775171516Sattilio	uint64_t waittime = 0;
776171516Sattilio	int contested = 0;
777189846Sjeff#endif
778303953Smjg#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
779303953Smjg	struct lock_delay_arg lda;
780303953Smjg#endif
781192853Ssson#ifdef KDTRACE_HOOKS
782284297Savg	uintptr_t state;
783303953Smjg	u_int sleep_cnt = 0;
784192853Ssson	int64_t sleep_time = 0;
785284297Savg	int64_t all_time = 0;
786192853Ssson#endif
787154941Sjhb
788228424Savg	if (SCHEDULER_STOPPED())
789228424Savg		return;
790228424Savg
791303953Smjg#if defined(ADAPTIVE_RWLOCKS)
792303953Smjg	lock_delay_arg_init(&lda, &rw_delay);
793303953Smjg#elif defined(KDTRACE_HOOKS)
794303953Smjg	lock_delay_arg_init(&lda, NULL);
795303953Smjg#endif
796242515Sattilio	rw = rwlock2rw(c);
797315378Smjg	if (__predict_false(v == RW_UNLOCKED))
798315378Smjg		v = RW_READ_VALUE(rw);
799242515Sattilio
800315341Smjg	if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
801193307Sattilio		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
802171052Sattilio		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
803171052Sattilio		    __func__, rw->lock_object.lo_name, file, line));
804171052Sattilio		rw->rw_recurse++;
805171052Sattilio		if (LOCK_LOG_TEST(&rw->lock_object, 0))
806171052Sattilio			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
807171052Sattilio		return;
808171052Sattilio	}
809171052Sattilio
810167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
811154941Sjhb		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
812167787Sjhb		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
813154941Sjhb
814284297Savg#ifdef KDTRACE_HOOKS
815285664Smarkj	all_time -= lockstat_nsecs(&rw->lock_object);
816315341Smjg	state = v;
817284297Savg#endif
818301157Smjg	for (;;) {
819315341Smjg		if (v == RW_UNLOCKED) {
820315377Smjg			if (_rw_write_lock_fetch(rw, &v, tid))
821315341Smjg				break;
822315341Smjg			continue;
823315341Smjg		}
824192853Ssson#ifdef KDTRACE_HOOKS
825303953Smjg		lda.spin_cnt++;
826192853Ssson#endif
827233628Sfabient#ifdef HWPMC_HOOKS
828233628Sfabient		PMC_SOFT_CALL( , , lock, failed);
829233628Sfabient#endif
830174629Sjeff		lock_profile_obtain_lock_failed(&rw->lock_object,
831174629Sjeff		    &contested, &waittime);
832173960Sattilio#ifdef ADAPTIVE_RWLOCKS
833173960Sattilio		/*
834173960Sattilio		 * If the lock is write locked and the owner is
835173960Sattilio		 * running on another CPU, spin until the owner stops
836173960Sattilio		 * running or the state of the lock changes.
837173960Sattilio		 */
838315341Smjg		owner = lv_rw_wowner(v);
839173960Sattilio		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
840173960Sattilio			if (LOCK_LOG_TEST(&rw->lock_object, 0))
841173960Sattilio				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
842173960Sattilio				    __func__, rw, owner);
843274092Sjhb			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
844274092Sjhb			    "spinning", "lockname:\"%s\"",
845274092Sjhb			    rw->lock_object.lo_name);
846315341Smjg			do {
847303953Smjg				lock_delay(&lda);
848315341Smjg				v = RW_READ_VALUE(rw);
849315341Smjg				owner = lv_rw_wowner(v);
850315341Smjg			} while (owner != NULL && TD_IS_RUNNING(owner));
851274092Sjhb			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
852274092Sjhb			    "running");
853173960Sattilio			continue;
854173960Sattilio		}
855177912Sjeff		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
856177912Sjeff		    spintries < rowner_retries) {
857176017Sjeff			if (!(v & RW_LOCK_WRITE_SPINNER)) {
858176017Sjeff				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
859176017Sjeff				    v | RW_LOCK_WRITE_SPINNER)) {
860315341Smjg					v = RW_READ_VALUE(rw);
861176017Sjeff					continue;
862176017Sjeff				}
863176017Sjeff			}
864176017Sjeff			spintries++;
865274092Sjhb			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
866274092Sjhb			    "spinning", "lockname:\"%s\"",
867274092Sjhb			    rw->lock_object.lo_name);
868177912Sjeff			for (i = 0; i < rowner_loops; i++) {
869176017Sjeff				if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
870176017Sjeff					break;
871176017Sjeff				cpu_spinwait();
872176017Sjeff			}
873274092Sjhb			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
874274092Sjhb			    "running");
875315341Smjg			v = RW_READ_VALUE(rw);
876192853Ssson#ifdef KDTRACE_HOOKS
877303953Smjg			lda.spin_cnt += rowner_loops - i;
878192853Ssson#endif
879177912Sjeff			if (i != rowner_loops)
880176017Sjeff				continue;
881176017Sjeff		}
882173960Sattilio#endif
883170295Sjeff		ts = turnstile_trywait(&rw->lock_object);
884315341Smjg		v = RW_READ_VALUE(rw);
885154941Sjhb
886173960Sattilio#ifdef ADAPTIVE_RWLOCKS
887154941Sjhb		/*
888193035Sjhb		 * The current lock owner might have started executing
889193035Sjhb		 * on another CPU (or the lock could have changed
890193035Sjhb		 * owners) while we were waiting on the turnstile
891193035Sjhb		 * chain lock.  If so, drop the turnstile lock and try
892193035Sjhb		 * again.
893173960Sattilio		 */
894173960Sattilio		if (!(v & RW_LOCK_READ)) {
895173960Sattilio			owner = (struct thread *)RW_OWNER(v);
896173960Sattilio			if (TD_IS_RUNNING(owner)) {
897173960Sattilio				turnstile_cancel(ts);
898173960Sattilio				continue;
899173960Sattilio			}
900173960Sattilio		}
901173960Sattilio#endif
902173960Sattilio		/*
903179334Sattilio		 * Check for the waiters flags about this rwlock.
904179334Sattilio		 * If the lock was released, without maintain any pending
905179334Sattilio		 * waiters queue, simply try to acquire it.
906179334Sattilio		 * If a pending waiters queue is present, claim the lock
907179334Sattilio		 * ownership and maintain the pending queue.
908154941Sjhb		 */
909176017Sjeff		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
910176017Sjeff		if ((v & ~x) == RW_UNLOCKED) {
911176017Sjeff			x &= ~RW_LOCK_WRITE_SPINNER;
912176017Sjeff			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
913176017Sjeff				if (x)
914176017Sjeff					turnstile_claim(ts);
915176017Sjeff				else
916176017Sjeff					turnstile_cancel(ts);
917154941Sjhb				break;
918154941Sjhb			}
919170295Sjeff			turnstile_cancel(ts);
920315341Smjg			v = RW_READ_VALUE(rw);
921154941Sjhb			continue;
922154941Sjhb		}
923154941Sjhb		/*
924154941Sjhb		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
925154941Sjhb		 * set it.  If we fail to set it, then loop back and try
926154941Sjhb		 * again.
927154941Sjhb		 */
928157826Sjhb		if (!(v & RW_LOCK_WRITE_WAITERS)) {
929157826Sjhb			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
930157826Sjhb			    v | RW_LOCK_WRITE_WAITERS)) {
931170295Sjeff				turnstile_cancel(ts);
932315341Smjg				v = RW_READ_VALUE(rw);
933157826Sjhb				continue;
934157826Sjhb			}
935167787Sjhb			if (LOCK_LOG_TEST(&rw->lock_object, 0))
936157826Sjhb				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
937157826Sjhb				    __func__, rw);
938154941Sjhb		}
939157846Sjhb		/*
940154941Sjhb		 * We were unable to acquire the lock and the write waiters
941154941Sjhb		 * flag is set, so we must block on the turnstile.
942154941Sjhb		 */
943167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
944154941Sjhb			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
945154941Sjhb			    rw);
946192853Ssson#ifdef KDTRACE_HOOKS
947285664Smarkj		sleep_time -= lockstat_nsecs(&rw->lock_object);
948192853Ssson#endif
949170295Sjeff		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
950192853Ssson#ifdef KDTRACE_HOOKS
951285664Smarkj		sleep_time += lockstat_nsecs(&rw->lock_object);
952192853Ssson		sleep_cnt++;
953192853Ssson#endif
954167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
955154941Sjhb			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
956154941Sjhb			    __func__, rw);
957176017Sjeff#ifdef ADAPTIVE_RWLOCKS
958176017Sjeff		spintries = 0;
959176017Sjeff#endif
960315341Smjg		v = RW_READ_VALUE(rw);
961154941Sjhb	}
962192853Ssson#ifdef KDTRACE_HOOKS
963285664Smarkj	all_time += lockstat_nsecs(&rw->lock_object);
964192853Ssson	if (sleep_time)
965285703Smarkj		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
966284297Savg		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
967284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
968192853Ssson
969284297Savg	/* Record only the loops spinning and not sleeping. */
970303953Smjg	if (lda.spin_cnt > sleep_cnt)
971285703Smarkj		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
972303953Smjg		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
973284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
974192853Ssson#endif
975285704Smarkj	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
976285704Smarkj	    waittime, file, line, LOCKSTAT_WRITER);
977154941Sjhb}
978154941Sjhb
979154941Sjhb/*
980154941Sjhb * This function is called if the first try at releasing a write lock failed.
981154941Sjhb * This means that one of the 2 waiter bits must be set indicating that at
982154941Sjhb * least one thread is waiting on this lock.
983154941Sjhb */
984154941Sjhbvoid
985242515Sattilio__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
986242515Sattilio    int line)
987154941Sjhb{
988242515Sattilio	struct rwlock *rw;
989154941Sjhb	struct turnstile *ts;
990154941Sjhb	uintptr_t v;
991154941Sjhb	int queue;
992154941Sjhb
993228424Savg	if (SCHEDULER_STOPPED())
994228424Savg		return;
995228424Savg
996242515Sattilio	rw = rwlock2rw(c);
997315378Smjg	MPASS(!rw_recursed(rw));
998242515Sattilio
999315378Smjg	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw,
1000315378Smjg	    LOCKSTAT_WRITER);
1001315378Smjg	if (_rw_write_unlock(rw, tid))
1002171052Sattilio		return;
1003171052Sattilio
1004154941Sjhb	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
1005154941Sjhb	    ("%s: neither of the waiter flags are set", __func__));
1006154941Sjhb
1007167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
1008154941Sjhb		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
1009154941Sjhb
1010170295Sjeff	turnstile_chain_lock(&rw->lock_object);
1011167787Sjhb	ts = turnstile_lookup(&rw->lock_object);
1012154941Sjhb	MPASS(ts != NULL);
1013154941Sjhb
1014154941Sjhb	/*
1015154941Sjhb	 * Use the same algo as sx locks for now.  Prefer waking up shared
1016154941Sjhb	 * waiters if we have any over writers.  This is probably not ideal.
1017154941Sjhb	 *
1018154941Sjhb	 * 'v' is the value we are going to write back to rw_lock.  If we
1019154941Sjhb	 * have waiters on both queues, we need to preserve the state of
1020154941Sjhb	 * the waiter flag for the queue we don't wake up.  For now this is
1021154941Sjhb	 * hardcoded for the algorithm mentioned above.
1022154941Sjhb	 *
1023154941Sjhb	 * In the case of both readers and writers waiting we wakeup the
1024154941Sjhb	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
1025154941Sjhb	 * new writer comes in before a reader it will claim the lock up
1026154941Sjhb	 * above.  There is probably a potential priority inversion in
1027154941Sjhb	 * there that could be worked around either by waking both queues
1028154941Sjhb	 * of waiters or doing some complicated lock handoff gymnastics.
1029154941Sjhb	 */
1030157846Sjhb	v = RW_UNLOCKED;
1031176076Sjeff	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
1032176076Sjeff		queue = TS_EXCLUSIVE_QUEUE;
1033176076Sjeff		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
1034176076Sjeff	} else
1035154941Sjhb		queue = TS_SHARED_QUEUE;
1036157846Sjhb
1037157846Sjhb	/* Wake up all waiters for the specific queue. */
1038167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
1039154941Sjhb		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
1040154941Sjhb		    queue == TS_SHARED_QUEUE ? "read" : "write");
1041154941Sjhb	turnstile_broadcast(ts, queue);
1042154941Sjhb	atomic_store_rel_ptr(&rw->rw_lock, v);
1043154941Sjhb	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1044170295Sjeff	turnstile_chain_unlock(&rw->lock_object);
1045154941Sjhb}
1046154941Sjhb
1047157882Sjhb/*
1048157882Sjhb * Attempt to do a non-blocking upgrade from a read lock to a write
1049157882Sjhb * lock.  This will only succeed if this thread holds a single read
1050157882Sjhb * lock.  Returns true if the upgrade succeeded and false otherwise.
1051157882Sjhb */
1052157882Sjhbint
1053242515Sattilio__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
1054157882Sjhb{
1055242515Sattilio	struct rwlock *rw;
1056176017Sjeff	uintptr_t v, x, tid;
1057170295Sjeff	struct turnstile *ts;
1058157882Sjhb	int success;
1059157882Sjhb
1060228424Savg	if (SCHEDULER_STOPPED())
1061228424Savg		return (1);
1062228424Savg
1063242515Sattilio	rw = rwlock2rw(c);
1064242515Sattilio
1065169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
1066169394Sjhb	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1067242515Sattilio	__rw_assert(c, RA_RLOCKED, file, line);
1068157882Sjhb
1069157882Sjhb	/*
1070157882Sjhb	 * Attempt to switch from one reader to a writer.  If there
1071157882Sjhb	 * are any write waiters, then we will have to lock the
1072157882Sjhb	 * turnstile first to prevent races with another writer
1073157882Sjhb	 * calling turnstile_wait() before we have claimed this
1074157882Sjhb	 * turnstile.  So, do the simple case of no waiters first.
1075157882Sjhb	 */
1076157882Sjhb	tid = (uintptr_t)curthread;
1077176017Sjeff	success = 0;
1078176017Sjeff	for (;;) {
1079176017Sjeff		v = rw->rw_lock;
1080176017Sjeff		if (RW_READERS(v) > 1)
1081176017Sjeff			break;
1082176017Sjeff		if (!(v & RW_LOCK_WAITERS)) {
1083315377Smjg			success = atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid);
1084176017Sjeff			if (!success)
1085176017Sjeff				continue;
1086176017Sjeff			break;
1087176017Sjeff		}
1088157882Sjhb
1089176017Sjeff		/*
1090176017Sjeff		 * Ok, we think we have waiters, so lock the turnstile.
1091176017Sjeff		 */
1092176017Sjeff		ts = turnstile_trywait(&rw->lock_object);
1093176017Sjeff		v = rw->rw_lock;
1094176017Sjeff		if (RW_READERS(v) > 1) {
1095176017Sjeff			turnstile_cancel(ts);
1096176017Sjeff			break;
1097176017Sjeff		}
1098176017Sjeff		/*
1099176017Sjeff		 * Try to switch from one reader to a writer again.  This time
1100176017Sjeff		 * we honor the current state of the waiters flags.
1101176017Sjeff		 * If we obtain the lock with the flags set, then claim
1102176017Sjeff		 * ownership of the turnstile.
1103176017Sjeff		 */
1104176017Sjeff		x = rw->rw_lock & RW_LOCK_WAITERS;
1105176017Sjeff		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
1106176017Sjeff		if (success) {
1107176017Sjeff			if (x)
1108176017Sjeff				turnstile_claim(ts);
1109176017Sjeff			else
1110176017Sjeff				turnstile_cancel(ts);
1111176017Sjeff			break;
1112176017Sjeff		}
1113170295Sjeff		turnstile_cancel(ts);
1114176017Sjeff	}
1115167787Sjhb	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1116176017Sjeff	if (success) {
1117176017Sjeff		curthread->td_rw_rlocks--;
1118167787Sjhb		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1119157882Sjhb		    file, line);
1120285703Smarkj		LOCKSTAT_RECORD0(rw__upgrade, rw);
1121176017Sjeff	}
1122157882Sjhb	return (success);
1123157882Sjhb}
1124157882Sjhb
1125157882Sjhb/*
1126157882Sjhb * Downgrade a write lock into a single read lock.
1127157882Sjhb */
1128157882Sjhbvoid
1129242515Sattilio__rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1130157882Sjhb{
1131242515Sattilio	struct rwlock *rw;
1132157882Sjhb	struct turnstile *ts;
1133157882Sjhb	uintptr_t tid, v;
1134176017Sjeff	int rwait, wwait;
1135157882Sjhb
1136228424Savg	if (SCHEDULER_STOPPED())
1137228424Savg		return;
1138228424Savg
1139242515Sattilio	rw = rwlock2rw(c);
1140242515Sattilio
1141169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
1142169394Sjhb	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1143242515Sattilio	__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
1144171052Sattilio#ifndef INVARIANTS
1145171052Sattilio	if (rw_recursed(rw))
1146171052Sattilio		panic("downgrade of a recursed lock");
1147171052Sattilio#endif
1148157882Sjhb
1149167787Sjhb	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1150157882Sjhb
1151157882Sjhb	/*
1152157882Sjhb	 * Convert from a writer to a single reader.  First we handle
1153157882Sjhb	 * the easy case with no waiters.  If there are any waiters, we
1154176017Sjeff	 * lock the turnstile and "disown" the lock.
1155157882Sjhb	 */
1156157882Sjhb	tid = (uintptr_t)curthread;
1157157882Sjhb	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1158157882Sjhb		goto out;
1159157882Sjhb
1160157882Sjhb	/*
1161157882Sjhb	 * Ok, we think we have waiters, so lock the turnstile so we can
1162157882Sjhb	 * read the waiter flags without any races.
1163157882Sjhb	 */
1164170295Sjeff	turnstile_chain_lock(&rw->lock_object);
1165176017Sjeff	v = rw->rw_lock & RW_LOCK_WAITERS;
1166176017Sjeff	rwait = v & RW_LOCK_READ_WAITERS;
1167176017Sjeff	wwait = v & RW_LOCK_WRITE_WAITERS;
1168176017Sjeff	MPASS(rwait | wwait);
1169157882Sjhb
1170157882Sjhb	/*
1171176017Sjeff	 * Downgrade from a write lock while preserving waiters flag
1172176017Sjeff	 * and give up ownership of the turnstile.
1173157882Sjhb	 */
1174167787Sjhb	ts = turnstile_lookup(&rw->lock_object);
1175157882Sjhb	MPASS(ts != NULL);
1176176017Sjeff	if (!wwait)
1177176017Sjeff		v &= ~RW_LOCK_READ_WAITERS;
1178176017Sjeff	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1179176017Sjeff	/*
1180176017Sjeff	 * Wake other readers if there are no writers pending.  Otherwise they
1181176017Sjeff	 * won't be able to acquire the lock anyway.
1182176017Sjeff	 */
1183176017Sjeff	if (rwait && !wwait) {
1184157882Sjhb		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1185157882Sjhb		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1186176017Sjeff	} else
1187157882Sjhb		turnstile_disown(ts);
1188170295Sjeff	turnstile_chain_unlock(&rw->lock_object);
1189157882Sjhbout:
1190176017Sjeff	curthread->td_rw_rlocks++;
1191167787Sjhb	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1192285703Smarkj	LOCKSTAT_RECORD0(rw__downgrade, rw);
1193157882Sjhb}
1194157882Sjhb
1195154941Sjhb#ifdef INVARIANT_SUPPORT
1196155162Sscottl#ifndef INVARIANTS
1197242515Sattilio#undef __rw_assert
1198154941Sjhb#endif
1199154941Sjhb
1200154941Sjhb/*
1201154941Sjhb * In the non-WITNESS case, rw_assert() can only detect that at least
1202154941Sjhb * *some* thread owns an rlock, but it cannot guarantee that *this*
1203154941Sjhb * thread owns an rlock.
1204154941Sjhb */
1205154941Sjhbvoid
1206242515Sattilio__rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1207154941Sjhb{
1208242515Sattilio	const struct rwlock *rw;
1209154941Sjhb
1210154941Sjhb	if (panicstr != NULL)
1211154941Sjhb		return;
1212242515Sattilio
1213242515Sattilio	rw = rwlock2rw(c);
1214242515Sattilio
1215154941Sjhb	switch (what) {
1216154941Sjhb	case RA_LOCKED:
1217171052Sattilio	case RA_LOCKED | RA_RECURSED:
1218171052Sattilio	case RA_LOCKED | RA_NOTRECURSED:
1219154941Sjhb	case RA_RLOCKED:
1220251323Sjhb	case RA_RLOCKED | RA_RECURSED:
1221251323Sjhb	case RA_RLOCKED | RA_NOTRECURSED:
1222154941Sjhb#ifdef WITNESS
1223167787Sjhb		witness_assert(&rw->lock_object, what, file, line);
1224154941Sjhb#else
1225154941Sjhb		/*
1226154941Sjhb		 * If some other thread has a write lock or we have one
1227154941Sjhb		 * and are asserting a read lock, fail.  Also, if no one
1228154941Sjhb		 * has a lock at all, fail.
1229154941Sjhb		 */
1230155061Sscottl		if (rw->rw_lock == RW_UNLOCKED ||
1231251323Sjhb		    (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1232157826Sjhb		    rw_wowner(rw) != curthread)))
1233154941Sjhb			panic("Lock %s not %slocked @ %s:%d\n",
1234251323Sjhb			    rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1235154941Sjhb			    "read " : "", file, line);
1236171052Sattilio
1237251323Sjhb		if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1238171052Sattilio			if (rw_recursed(rw)) {
1239171052Sattilio				if (what & RA_NOTRECURSED)
1240171052Sattilio					panic("Lock %s recursed @ %s:%d\n",
1241171052Sattilio					    rw->lock_object.lo_name, file,
1242171052Sattilio					    line);
1243171052Sattilio			} else if (what & RA_RECURSED)
1244171052Sattilio				panic("Lock %s not recursed @ %s:%d\n",
1245171052Sattilio				    rw->lock_object.lo_name, file, line);
1246171052Sattilio		}
1247154941Sjhb#endif
1248154941Sjhb		break;
1249154941Sjhb	case RA_WLOCKED:
1250171052Sattilio	case RA_WLOCKED | RA_RECURSED:
1251171052Sattilio	case RA_WLOCKED | RA_NOTRECURSED:
1252157826Sjhb		if (rw_wowner(rw) != curthread)
1253154941Sjhb			panic("Lock %s not exclusively locked @ %s:%d\n",
1254167787Sjhb			    rw->lock_object.lo_name, file, line);
1255171052Sattilio		if (rw_recursed(rw)) {
1256171052Sattilio			if (what & RA_NOTRECURSED)
1257171052Sattilio				panic("Lock %s recursed @ %s:%d\n",
1258171052Sattilio				    rw->lock_object.lo_name, file, line);
1259171052Sattilio		} else if (what & RA_RECURSED)
1260171052Sattilio			panic("Lock %s not recursed @ %s:%d\n",
1261171052Sattilio			    rw->lock_object.lo_name, file, line);
1262154941Sjhb		break;
1263154941Sjhb	case RA_UNLOCKED:
1264154941Sjhb#ifdef WITNESS
1265167787Sjhb		witness_assert(&rw->lock_object, what, file, line);
1266154941Sjhb#else
1267154941Sjhb		/*
1268154941Sjhb		 * If we hold a write lock fail.  We can't reliably check
1269154941Sjhb		 * to see if we hold a read lock or not.
1270154941Sjhb		 */
1271157826Sjhb		if (rw_wowner(rw) == curthread)
1272154941Sjhb			panic("Lock %s exclusively locked @ %s:%d\n",
1273167787Sjhb			    rw->lock_object.lo_name, file, line);
1274154941Sjhb#endif
1275154941Sjhb		break;
1276154941Sjhb	default:
1277154941Sjhb		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1278154941Sjhb		    line);
1279154941Sjhb	}
1280154941Sjhb}
1281154941Sjhb#endif /* INVARIANT_SUPPORT */
1282154941Sjhb
1283154941Sjhb#ifdef DDB
1284154941Sjhbvoid
1285227588Spjddb_show_rwlock(const struct lock_object *lock)
1286154941Sjhb{
1287227588Spjd	const struct rwlock *rw;
1288154941Sjhb	struct thread *td;
1289154941Sjhb
1290227588Spjd	rw = (const struct rwlock *)lock;
1291154941Sjhb
1292154941Sjhb	db_printf(" state: ");
1293154941Sjhb	if (rw->rw_lock == RW_UNLOCKED)
1294154941Sjhb		db_printf("UNLOCKED\n");
1295169394Sjhb	else if (rw->rw_lock == RW_DESTROYED) {
1296169394Sjhb		db_printf("DESTROYED\n");
1297169394Sjhb		return;
1298169394Sjhb	} else if (rw->rw_lock & RW_LOCK_READ)
1299167504Sjhb		db_printf("RLOCK: %ju locks\n",
1300167504Sjhb		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1301154941Sjhb	else {
1302157826Sjhb		td = rw_wowner(rw);
1303154941Sjhb		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1304173600Sjulian		    td->td_tid, td->td_proc->p_pid, td->td_name);
1305171052Sattilio		if (rw_recursed(rw))
1306171052Sattilio			db_printf(" recursed: %u\n", rw->rw_recurse);
1307154941Sjhb	}
1308154941Sjhb	db_printf(" waiters: ");
1309154941Sjhb	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1310154941Sjhb	case RW_LOCK_READ_WAITERS:
1311154941Sjhb		db_printf("readers\n");
1312154941Sjhb		break;
1313154941Sjhb	case RW_LOCK_WRITE_WAITERS:
1314154941Sjhb		db_printf("writers\n");
1315154941Sjhb		break;
1316154941Sjhb	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1317167492Sjhb		db_printf("readers and writers\n");
1318154941Sjhb		break;
1319154941Sjhb	default:
1320154941Sjhb		db_printf("none\n");
1321154941Sjhb		break;
1322154941Sjhb	}
1323154941Sjhb}
1324154941Sjhb
1325154941Sjhb#endif
1326