1154941Sjhb/*-
2154941Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3154941Sjhb * All rights reserved.
4154941Sjhb *
5154941Sjhb * Redistribution and use in source and binary forms, with or without
6154941Sjhb * modification, are permitted provided that the following conditions
7154941Sjhb * are met:
8154941Sjhb * 1. Redistributions of source code must retain the above copyright
9154941Sjhb *    notice, this list of conditions and the following disclaimer.
10154941Sjhb * 2. Redistributions in binary form must reproduce the above copyright
11154941Sjhb *    notice, this list of conditions and the following disclaimer in the
12154941Sjhb *    documentation and/or other materials provided with the distribution.
13154941Sjhb *
14154941Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15154941Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16154941Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17154941Sjhb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18154941Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19154941Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20154941Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21154941Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22154941Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23154941Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24154941Sjhb * SUCH DAMAGE.
25154941Sjhb */
26154941Sjhb
27154941Sjhb/*
28154941Sjhb * Machine independent bits of reader/writer lock implementation.
29154941Sjhb */
30154941Sjhb
31154941Sjhb#include <sys/cdefs.h>
32154941Sjhb__FBSDID("$FreeBSD: releng/11.0/sys/kern/kern_rwlock.c 303953 2016-08-11 09:28:49Z mjg $");
33154941Sjhb
34154941Sjhb#include "opt_ddb.h"
35233628Sfabient#include "opt_hwpmc_hooks.h"
36167801Sjhb#include "opt_no_adaptive_rwlocks.h"
37154941Sjhb
38154941Sjhb#include <sys/param.h>
39244582Sattilio#include <sys/kdb.h>
40154941Sjhb#include <sys/ktr.h>
41177912Sjeff#include <sys/kernel.h>
42154941Sjhb#include <sys/lock.h>
43154941Sjhb#include <sys/mutex.h>
44154941Sjhb#include <sys/proc.h>
45154941Sjhb#include <sys/rwlock.h>
46274092Sjhb#include <sys/sched.h>
47303953Smjg#include <sys/smp.h>
48177912Sjeff#include <sys/sysctl.h>
49154941Sjhb#include <sys/systm.h>
50154941Sjhb#include <sys/turnstile.h>
51171516Sattilio
52154941Sjhb#include <machine/cpu.h>
53154941Sjhb
54167801Sjhb#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
55167801Sjhb#define	ADAPTIVE_RWLOCKS
56167801Sjhb#endif
57167801Sjhb
58233628Sfabient#ifdef HWPMC_HOOKS
59233628Sfabient#include <sys/pmckern.h>
60233628SfabientPMC_SOFT_DECLARE( , , lock, failed);
61233628Sfabient#endif
62233628Sfabient
63242515Sattilio/*
64242515Sattilio * Return the rwlock address when the lock cookie address is provided.
65242515Sattilio * This functionality assumes that struct rwlock* have a member named rw_lock.
66242515Sattilio */
67242515Sattilio#define	rwlock2rw(c)	(__containerof(c, struct rwlock, rw_lock))
68242515Sattilio
69154941Sjhb#ifdef DDB
70154941Sjhb#include <ddb/ddb.h>
71154941Sjhb
72227588Spjdstatic void	db_show_rwlock(const struct lock_object *lock);
73154941Sjhb#endif
74227588Spjdstatic void	assert_rw(const struct lock_object *lock, int what);
75255745Sdavidestatic void	lock_rw(struct lock_object *lock, uintptr_t how);
76192853Ssson#ifdef KDTRACE_HOOKS
77227588Spjdstatic int	owner_rw(const struct lock_object *lock, struct thread **owner);
78192853Ssson#endif
79255745Sdavidestatic uintptr_t unlock_rw(struct lock_object *lock);
80154941Sjhb
81154941Sjhbstruct lock_class lock_class_rw = {
82167365Sjhb	.lc_name = "rw",
83167365Sjhb	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
84173733Sattilio	.lc_assert = assert_rw,
85154941Sjhb#ifdef DDB
86167365Sjhb	.lc_ddb_show = db_show_rwlock,
87154941Sjhb#endif
88167368Sjhb	.lc_lock = lock_rw,
89167368Sjhb	.lc_unlock = unlock_rw,
90192853Ssson#ifdef KDTRACE_HOOKS
91192853Ssson	.lc_owner = owner_rw,
92192853Ssson#endif
93154941Sjhb};
94154941Sjhb
95303953Smjg#ifdef ADAPTIVE_RWLOCKS
96303953Smjgstatic int rowner_retries = 10;
97303953Smjgstatic int rowner_loops = 10000;
98303953Smjgstatic SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
99303953Smjg    "rwlock debugging");
100303953SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
101303953SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
102303953Smjg
103303953Smjgstatic struct lock_delay_config rw_delay = {
104303953Smjg	.initial	= 1000,
105303953Smjg	.step		= 500,
106303953Smjg	.min		= 100,
107303953Smjg	.max		= 5000,
108303953Smjg};
109303953Smjg
110303953SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, delay_initial, CTLFLAG_RW, &rw_delay.initial,
111303953Smjg    0, "");
112303953SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, delay_step, CTLFLAG_RW, &rw_delay.step,
113303953Smjg    0, "");
114303953SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, delay_min, CTLFLAG_RW, &rw_delay.min,
115303953Smjg    0, "");
116303953SmjgSYSCTL_INT(_debug_rwlock, OID_AUTO, delay_max, CTLFLAG_RW, &rw_delay.max,
117303953Smjg    0, "");
118303953Smjg
119303953Smjgstatic void
120303953Smjgrw_delay_sysinit(void *dummy)
121303953Smjg{
122303953Smjg
123303953Smjg	rw_delay.initial = mp_ncpus * 25;
124303953Smjg	rw_delay.step = (mp_ncpus * 25) / 2;
125303953Smjg	rw_delay.min = mp_ncpus * 5;
126303953Smjg	rw_delay.max = mp_ncpus * 25 * 10;
127303953Smjg}
128303953SmjgLOCK_DELAY_SYSINIT(rw_delay_sysinit);
129303953Smjg#endif
130303953Smjg
131157826Sjhb/*
132157826Sjhb * Return a pointer to the owning thread if the lock is write-locked or
133157826Sjhb * NULL if the lock is unlocked or read-locked.
134157826Sjhb */
135157826Sjhb#define	rw_wowner(rw)							\
136154941Sjhb	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
137154941Sjhb	    (struct thread *)RW_OWNER((rw)->rw_lock))
138154941Sjhb
139157826Sjhb/*
140171052Sattilio * Returns if a write owner is recursed.  Write ownership is not assured
141171052Sattilio * here and should be previously checked.
142171052Sattilio */
143171052Sattilio#define	rw_recursed(rw)		((rw)->rw_recurse != 0)
144171052Sattilio
145171052Sattilio/*
146171052Sattilio * Return true if curthread helds the lock.
147171052Sattilio */
148171052Sattilio#define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
149171052Sattilio
150171052Sattilio/*
151157826Sjhb * Return a pointer to the owning thread for this lock who should receive
152157826Sjhb * any priority lent by threads that block on this lock.  Currently this
153157826Sjhb * is identical to rw_wowner().
154157826Sjhb */
155157826Sjhb#define	rw_owner(rw)		rw_wowner(rw)
156157826Sjhb
157154941Sjhb#ifndef INVARIANTS
158242515Sattilio#define	__rw_assert(c, what, file, line)
159154941Sjhb#endif
160154941Sjhb
161154941Sjhbvoid
162227588Spjdassert_rw(const struct lock_object *lock, int what)
163173733Sattilio{
164173733Sattilio
165227588Spjd	rw_assert((const struct rwlock *)lock, what);
166173733Sattilio}
167173733Sattilio
168173733Sattiliovoid
169255745Sdavidelock_rw(struct lock_object *lock, uintptr_t how)
170167368Sjhb{
171167368Sjhb	struct rwlock *rw;
172167368Sjhb
173167368Sjhb	rw = (struct rwlock *)lock;
174167368Sjhb	if (how)
175255788Sdavide		rw_rlock(rw);
176255788Sdavide	else
177167368Sjhb		rw_wlock(rw);
178167368Sjhb}
179167368Sjhb
180255745Sdavideuintptr_t
181167368Sjhbunlock_rw(struct lock_object *lock)
182167368Sjhb{
183167368Sjhb	struct rwlock *rw;
184167368Sjhb
185167368Sjhb	rw = (struct rwlock *)lock;
186167368Sjhb	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
187167368Sjhb	if (rw->rw_lock & RW_LOCK_READ) {
188167368Sjhb		rw_runlock(rw);
189255788Sdavide		return (1);
190167368Sjhb	} else {
191167368Sjhb		rw_wunlock(rw);
192255788Sdavide		return (0);
193167368Sjhb	}
194167368Sjhb}
195167368Sjhb
196192853Ssson#ifdef KDTRACE_HOOKS
197192853Sssonint
198227588Spjdowner_rw(const struct lock_object *lock, struct thread **owner)
199192853Ssson{
200227588Spjd	const struct rwlock *rw = (const struct rwlock *)lock;
201192853Ssson	uintptr_t x = rw->rw_lock;
202192853Ssson
203192853Ssson	*owner = rw_wowner(rw);
204192853Ssson	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
205192853Ssson	    (*owner != NULL));
206192853Ssson}
207192853Ssson#endif
208192853Ssson
209167368Sjhbvoid
210242515Sattilio_rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
211154941Sjhb{
212242515Sattilio	struct rwlock *rw;
213171052Sattilio	int flags;
214154941Sjhb
215242515Sattilio	rw = rwlock2rw(c);
216242515Sattilio
217171052Sattilio	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
218275751Sdchagin	    RW_RECURSE | RW_NEW)) == 0);
219196334Sattilio	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
220196334Sattilio	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
221196334Sattilio	    &rw->rw_lock));
222171052Sattilio
223193307Sattilio	flags = LO_UPGRADABLE;
224171052Sattilio	if (opts & RW_DUPOK)
225171052Sattilio		flags |= LO_DUPOK;
226171052Sattilio	if (opts & RW_NOPROFILE)
227171052Sattilio		flags |= LO_NOPROFILE;
228171052Sattilio	if (!(opts & RW_NOWITNESS))
229171052Sattilio		flags |= LO_WITNESS;
230193307Sattilio	if (opts & RW_RECURSE)
231193307Sattilio		flags |= LO_RECURSABLE;
232171052Sattilio	if (opts & RW_QUIET)
233171052Sattilio		flags |= LO_QUIET;
234275751Sdchagin	if (opts & RW_NEW)
235275751Sdchagin		flags |= LO_NEW;
236171052Sattilio
237252212Sjhb	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
238154941Sjhb	rw->rw_lock = RW_UNLOCKED;
239171052Sattilio	rw->rw_recurse = 0;
240154941Sjhb}
241154941Sjhb
242154941Sjhbvoid
243242515Sattilio_rw_destroy(volatile uintptr_t *c)
244154941Sjhb{
245242515Sattilio	struct rwlock *rw;
246154941Sjhb
247242515Sattilio	rw = rwlock2rw(c);
248242515Sattilio
249205626Sbz	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
250205626Sbz	KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
251169394Sjhb	rw->rw_lock = RW_DESTROYED;
252167787Sjhb	lock_destroy(&rw->lock_object);
253154941Sjhb}
254154941Sjhb
255154941Sjhbvoid
256154941Sjhbrw_sysinit(void *arg)
257154941Sjhb{
258154941Sjhb	struct rw_args *args = arg;
259154941Sjhb
260242515Sattilio	rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
261154941Sjhb}
262154941Sjhb
263185778Skmacyvoid
264185778Skmacyrw_sysinit_flags(void *arg)
265185778Skmacy{
266185778Skmacy	struct rw_args_flags *args = arg;
267185778Skmacy
268242515Sattilio	rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
269242515Sattilio	    args->ra_flags);
270185778Skmacy}
271185778Skmacy
272167024Srwatsonint
273242515Sattilio_rw_wowned(const volatile uintptr_t *c)
274167024Srwatson{
275167024Srwatson
276242515Sattilio	return (rw_wowner(rwlock2rw(c)) == curthread);
277167024Srwatson}
278167024Srwatson
279154941Sjhbvoid
280242515Sattilio_rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
281154941Sjhb{
282242515Sattilio	struct rwlock *rw;
283154941Sjhb
284228424Savg	if (SCHEDULER_STOPPED())
285228424Savg		return;
286242515Sattilio
287242515Sattilio	rw = rwlock2rw(c);
288242515Sattilio
289244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
290240424Sattilio	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
291240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
292169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
293169394Sjhb	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
294167787Sjhb	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
295182914Sjhb	    line, NULL);
296154941Sjhb	__rw_wlock(rw, curthread, file, line);
297171052Sattilio	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
298167787Sjhb	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
299286166Smarkj	TD_LOCKS_INC(curthread);
300154941Sjhb}
301154941Sjhb
302177843Sattilioint
303242515Sattilio__rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
304177843Sattilio{
305242515Sattilio	struct rwlock *rw;
306177843Sattilio	int rval;
307177843Sattilio
308228424Savg	if (SCHEDULER_STOPPED())
309228424Savg		return (1);
310228424Savg
311242515Sattilio	rw = rwlock2rw(c);
312242515Sattilio
313244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
314240424Sattilio	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
315240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
316177843Sattilio	KASSERT(rw->rw_lock != RW_DESTROYED,
317177843Sattilio	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
318177843Sattilio
319193307Sattilio	if (rw_wlocked(rw) &&
320193307Sattilio	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
321177843Sattilio		rw->rw_recurse++;
322177843Sattilio		rval = 1;
323177843Sattilio	} else
324177843Sattilio		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
325177843Sattilio		    (uintptr_t)curthread);
326177843Sattilio
327177843Sattilio	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
328177843Sattilio	if (rval) {
329177843Sattilio		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
330177843Sattilio		    file, line);
331284297Savg		if (!rw_recursed(rw))
332285704Smarkj			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
333285704Smarkj			    rw, 0, 0, file, line, LOCKSTAT_WRITER);
334286166Smarkj		TD_LOCKS_INC(curthread);
335177843Sattilio	}
336177843Sattilio	return (rval);
337177843Sattilio}
338177843Sattilio
339154941Sjhbvoid
340242515Sattilio_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
341154941Sjhb{
342242515Sattilio	struct rwlock *rw;
343154941Sjhb
344228424Savg	if (SCHEDULER_STOPPED())
345228424Savg		return;
346242515Sattilio
347242515Sattilio	rw = rwlock2rw(c);
348242515Sattilio
349169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
350169394Sjhb	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
351242515Sattilio	__rw_assert(c, RA_WLOCKED, file, line);
352167787Sjhb	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
353171052Sattilio	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
354171052Sattilio	    line);
355154941Sjhb	__rw_wunlock(rw, curthread, file, line);
356286166Smarkj	TD_LOCKS_DEC(curthread);
357154941Sjhb}
358286166Smarkj
359176017Sjeff/*
360176017Sjeff * Determines whether a new reader can acquire a lock.  Succeeds if the
361176017Sjeff * reader already owns a read lock and the lock is locked for read to
362176017Sjeff * prevent deadlock from reader recursion.  Also succeeds if the lock
363176017Sjeff * is unlocked and has no writer waiters or spinners.  Failing otherwise
364176017Sjeff * prioritizes writers before readers.
365176017Sjeff */
366176017Sjeff#define	RW_CAN_READ(_rw)						\
367176017Sjeff    ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
368176017Sjeff    (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
369176017Sjeff    RW_LOCK_READ)
370154941Sjhb
371154941Sjhbvoid
372242515Sattilio__rw_rlock(volatile uintptr_t *c, const char *file, int line)
373154941Sjhb{
374242515Sattilio	struct rwlock *rw;
375170295Sjeff	struct turnstile *ts;
376167801Sjhb#ifdef ADAPTIVE_RWLOCKS
377157846Sjhb	volatile struct thread *owner;
378177912Sjeff	int spintries = 0;
379177912Sjeff	int i;
380157851Swkoszek#endif
381189846Sjeff#ifdef LOCK_PROFILING
382167307Sjhb	uint64_t waittime = 0;
383167054Skmacy	int contested = 0;
384189846Sjeff#endif
385176017Sjeff	uintptr_t v;
386303953Smjg#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
387303953Smjg	struct lock_delay_arg lda;
388303953Smjg#endif
389192853Ssson#ifdef KDTRACE_HOOKS
390284297Savg	uintptr_t state;
391303953Smjg	u_int sleep_cnt = 0;
392192853Ssson	int64_t sleep_time = 0;
393284297Savg	int64_t all_time = 0;
394192853Ssson#endif
395154941Sjhb
396228424Savg	if (SCHEDULER_STOPPED())
397228424Savg		return;
398228424Savg
399303953Smjg#if defined(ADAPTIVE_RWLOCKS)
400303953Smjg	lock_delay_arg_init(&lda, &rw_delay);
401303953Smjg#elif defined(KDTRACE_HOOKS)
402303953Smjg	lock_delay_arg_init(&lda, NULL);
403303953Smjg#endif
404242515Sattilio	rw = rwlock2rw(c);
405242515Sattilio
406244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
407240424Sattilio	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
408240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
409169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
410169394Sjhb	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
411157826Sjhb	KASSERT(rw_wowner(rw) != curthread,
412251323Sjhb	    ("rw_rlock: wlock already held for %s @ %s:%d",
413167787Sjhb	    rw->lock_object.lo_name, file, line));
414182914Sjhb	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
415154941Sjhb
416284297Savg#ifdef KDTRACE_HOOKS
417285664Smarkj	all_time -= lockstat_nsecs(&rw->lock_object);
418284297Savg	state = rw->rw_lock;
419284297Savg#endif
420154941Sjhb	for (;;) {
421154941Sjhb		/*
422154941Sjhb		 * Handle the easy case.  If no other thread has a write
423154941Sjhb		 * lock, then try to bump up the count of read locks.  Note
424154941Sjhb		 * that we have to preserve the current state of the
425154941Sjhb		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
426154941Sjhb		 * read lock, then rw_lock must have changed, so restart
427154941Sjhb		 * the loop.  Note that this handles the case of a
428154941Sjhb		 * completely unlocked rwlock since such a lock is encoded
429154941Sjhb		 * as a read lock with no waiters.
430154941Sjhb		 */
431176017Sjeff		v = rw->rw_lock;
432176017Sjeff		if (RW_CAN_READ(v)) {
433154941Sjhb			/*
434154941Sjhb			 * The RW_LOCK_READ_WAITERS flag should only be set
435176017Sjeff			 * if the lock has been unlocked and write waiters
436176017Sjeff			 * were present.
437154941Sjhb			 */
438176017Sjeff			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
439176017Sjeff			    v + RW_ONE_READER)) {
440167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
441154941Sjhb					CTR4(KTR_LOCK,
442154941Sjhb					    "%s: %p succeed %p -> %p", __func__,
443176017Sjeff					    rw, (void *)v,
444176017Sjeff					    (void *)(v + RW_ONE_READER));
445154941Sjhb				break;
446154941Sjhb			}
447154941Sjhb			continue;
448154941Sjhb		}
449285706Smarkj#ifdef KDTRACE_HOOKS
450303953Smjg		lda.spin_cnt++;
451285706Smarkj#endif
452233628Sfabient#ifdef HWPMC_HOOKS
453233628Sfabient		PMC_SOFT_CALL( , , lock, failed);
454233628Sfabient#endif
455174629Sjeff		lock_profile_obtain_lock_failed(&rw->lock_object,
456174629Sjeff		    &contested, &waittime);
457154941Sjhb
458173960Sattilio#ifdef ADAPTIVE_RWLOCKS
459154941Sjhb		/*
460173960Sattilio		 * If the owner is running on another CPU, spin until
461173960Sattilio		 * the owner stops running or the state of the lock
462173960Sattilio		 * changes.
463173960Sattilio		 */
464176017Sjeff		if ((v & RW_LOCK_READ) == 0) {
465176017Sjeff			owner = (struct thread *)RW_OWNER(v);
466176017Sjeff			if (TD_IS_RUNNING(owner)) {
467176017Sjeff				if (LOCK_LOG_TEST(&rw->lock_object, 0))
468176017Sjeff					CTR3(KTR_LOCK,
469176017Sjeff					    "%s: spinning on %p held by %p",
470176017Sjeff					    __func__, rw, owner);
471274092Sjhb				KTR_STATE1(KTR_SCHED, "thread",
472274092Sjhb				    sched_tdname(curthread), "spinning",
473274092Sjhb				    "lockname:\"%s\"", rw->lock_object.lo_name);
474176017Sjeff				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
475303953Smjg				    owner && TD_IS_RUNNING(owner))
476303953Smjg					lock_delay(&lda);
477274092Sjhb				KTR_STATE0(KTR_SCHED, "thread",
478274092Sjhb				    sched_tdname(curthread), "running");
479176017Sjeff				continue;
480176017Sjeff			}
481177912Sjeff		} else if (spintries < rowner_retries) {
482177912Sjeff			spintries++;
483274092Sjhb			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
484274092Sjhb			    "spinning", "lockname:\"%s\"",
485274092Sjhb			    rw->lock_object.lo_name);
486177912Sjeff			for (i = 0; i < rowner_loops; i++) {
487177912Sjeff				v = rw->rw_lock;
488177912Sjeff				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
489177912Sjeff					break;
490177912Sjeff				cpu_spinwait();
491177912Sjeff			}
492259509Sattilio#ifdef KDTRACE_HOOKS
493303953Smjg			lda.spin_cnt += rowner_loops - i;
494259509Sattilio#endif
495274092Sjhb			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
496274092Sjhb			    "running");
497177912Sjeff			if (i != rowner_loops)
498177912Sjeff				continue;
499173960Sattilio		}
500173960Sattilio#endif
501173960Sattilio
502173960Sattilio		/*
503154941Sjhb		 * Okay, now it's the hard case.  Some other thread already
504176017Sjeff		 * has a write lock or there are write waiters present,
505176017Sjeff		 * acquire the turnstile lock so we can begin the process
506176017Sjeff		 * of blocking.
507154941Sjhb		 */
508170295Sjeff		ts = turnstile_trywait(&rw->lock_object);
509154941Sjhb
510154941Sjhb		/*
511154941Sjhb		 * The lock might have been released while we spun, so
512176017Sjeff		 * recheck its state and restart the loop if needed.
513154941Sjhb		 */
514176017Sjeff		v = rw->rw_lock;
515176017Sjeff		if (RW_CAN_READ(v)) {
516170295Sjeff			turnstile_cancel(ts);
517154941Sjhb			continue;
518154941Sjhb		}
519154941Sjhb
520173960Sattilio#ifdef ADAPTIVE_RWLOCKS
521154941Sjhb		/*
522193035Sjhb		 * The current lock owner might have started executing
523193035Sjhb		 * on another CPU (or the lock could have changed
524193035Sjhb		 * owners) while we were waiting on the turnstile
525193035Sjhb		 * chain lock.  If so, drop the turnstile lock and try
526193035Sjhb		 * again.
527173960Sattilio		 */
528176017Sjeff		if ((v & RW_LOCK_READ) == 0) {
529176017Sjeff			owner = (struct thread *)RW_OWNER(v);
530176017Sjeff			if (TD_IS_RUNNING(owner)) {
531176017Sjeff				turnstile_cancel(ts);
532176017Sjeff				continue;
533176017Sjeff			}
534173960Sattilio		}
535173960Sattilio#endif
536173960Sattilio
537173960Sattilio		/*
538176017Sjeff		 * The lock is held in write mode or it already has waiters.
539154941Sjhb		 */
540176017Sjeff		MPASS(!RW_CAN_READ(v));
541176017Sjeff
542176017Sjeff		/*
543176017Sjeff		 * If the RW_LOCK_READ_WAITERS flag is already set, then
544176017Sjeff		 * we can go ahead and block.  If it is not set then try
545176017Sjeff		 * to set it.  If we fail to set it drop the turnstile
546176017Sjeff		 * lock and restart the loop.
547176017Sjeff		 */
548176017Sjeff		if (!(v & RW_LOCK_READ_WAITERS)) {
549176017Sjeff			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
550176017Sjeff			    v | RW_LOCK_READ_WAITERS)) {
551170295Sjeff				turnstile_cancel(ts);
552157826Sjhb				continue;
553157826Sjhb			}
554167787Sjhb			if (LOCK_LOG_TEST(&rw->lock_object, 0))
555157826Sjhb				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
556157826Sjhb				    __func__, rw);
557154941Sjhb		}
558154941Sjhb
559154941Sjhb		/*
560154941Sjhb		 * We were unable to acquire the lock and the read waiters
561154941Sjhb		 * flag is set, so we must block on the turnstile.
562154941Sjhb		 */
563167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
564154941Sjhb			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
565154941Sjhb			    rw);
566192853Ssson#ifdef KDTRACE_HOOKS
567285664Smarkj		sleep_time -= lockstat_nsecs(&rw->lock_object);
568192853Ssson#endif
569170295Sjeff		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
570192853Ssson#ifdef KDTRACE_HOOKS
571285664Smarkj		sleep_time += lockstat_nsecs(&rw->lock_object);
572192853Ssson		sleep_cnt++;
573192853Ssson#endif
574167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
575154941Sjhb			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
576154941Sjhb			    __func__, rw);
577154941Sjhb	}
578284297Savg#ifdef KDTRACE_HOOKS
579285664Smarkj	all_time += lockstat_nsecs(&rw->lock_object);
580284297Savg	if (sleep_time)
581285703Smarkj		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
582284297Savg		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
583284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
584154941Sjhb
585284297Savg	/* Record only the loops spinning and not sleeping. */
586303953Smjg	if (lda.spin_cnt > sleep_cnt)
587285703Smarkj		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
588284297Savg		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
589284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
590284297Savg#endif
591154941Sjhb	/*
592154941Sjhb	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
593154941Sjhb	 * however.  turnstiles don't like owners changing between calls to
594154941Sjhb	 * turnstile_wait() currently.
595154941Sjhb	 */
596285704Smarkj	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
597285704Smarkj	    waittime, file, line, LOCKSTAT_READER);
598167787Sjhb	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
599167787Sjhb	WITNESS_LOCK(&rw->lock_object, 0, file, line);
600286166Smarkj	TD_LOCKS_INC(curthread);
601176017Sjeff	curthread->td_rw_rlocks++;
602154941Sjhb}
603154941Sjhb
604177843Sattilioint
605242515Sattilio__rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
606177843Sattilio{
607242515Sattilio	struct rwlock *rw;
608177843Sattilio	uintptr_t x;
609177843Sattilio
610228424Savg	if (SCHEDULER_STOPPED())
611228424Savg		return (1);
612228424Savg
613242515Sattilio	rw = rwlock2rw(c);
614242515Sattilio
615244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
616240424Sattilio	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
617240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
618240424Sattilio
619177843Sattilio	for (;;) {
620177843Sattilio		x = rw->rw_lock;
621177843Sattilio		KASSERT(rw->rw_lock != RW_DESTROYED,
622177843Sattilio		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
623177843Sattilio		if (!(x & RW_LOCK_READ))
624177843Sattilio			break;
625177843Sattilio		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
626177843Sattilio			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
627177843Sattilio			    line);
628177843Sattilio			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
629285704Smarkj			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
630285704Smarkj			    rw, 0, 0, file, line, LOCKSTAT_READER);
631286166Smarkj			TD_LOCKS_INC(curthread);
632177843Sattilio			curthread->td_rw_rlocks++;
633177843Sattilio			return (1);
634177843Sattilio		}
635177843Sattilio	}
636177843Sattilio
637177843Sattilio	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
638177843Sattilio	return (0);
639177843Sattilio}
640177843Sattilio
641154941Sjhbvoid
642242515Sattilio_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
643154941Sjhb{
644242515Sattilio	struct rwlock *rw;
645154941Sjhb	struct turnstile *ts;
646176017Sjeff	uintptr_t x, v, queue;
647154941Sjhb
648228424Savg	if (SCHEDULER_STOPPED())
649228424Savg		return;
650228424Savg
651242515Sattilio	rw = rwlock2rw(c);
652242515Sattilio
653169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
654169394Sjhb	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
655242515Sattilio	__rw_assert(c, RA_RLOCKED, file, line);
656167787Sjhb	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
657167787Sjhb	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
658154941Sjhb
659154941Sjhb	/* TODO: drop "owner of record" here. */
660154941Sjhb
661154941Sjhb	for (;;) {
662154941Sjhb		/*
663154941Sjhb		 * See if there is more than one read lock held.  If so,
664154941Sjhb		 * just drop one and return.
665154941Sjhb		 */
666154941Sjhb		x = rw->rw_lock;
667154941Sjhb		if (RW_READERS(x) > 1) {
668197643Sattilio			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
669154941Sjhb			    x - RW_ONE_READER)) {
670167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
671154941Sjhb					CTR4(KTR_LOCK,
672154941Sjhb					    "%s: %p succeeded %p -> %p",
673154941Sjhb					    __func__, rw, (void *)x,
674154941Sjhb					    (void *)(x - RW_ONE_READER));
675154941Sjhb				break;
676154941Sjhb			}
677154941Sjhb			continue;
678167307Sjhb		}
679154941Sjhb		/*
680154941Sjhb		 * If there aren't any waiters for a write lock, then try
681154941Sjhb		 * to drop it quickly.
682154941Sjhb		 */
683176017Sjeff		if (!(x & RW_LOCK_WAITERS)) {
684176017Sjeff			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
685176017Sjeff			    RW_READERS_LOCK(1));
686197643Sattilio			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
687197643Sattilio			    RW_UNLOCKED)) {
688167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
689154941Sjhb					CTR2(KTR_LOCK, "%s: %p last succeeded",
690154941Sjhb					    __func__, rw);
691154941Sjhb				break;
692154941Sjhb			}
693154941Sjhb			continue;
694154941Sjhb		}
695154941Sjhb		/*
696176017Sjeff		 * Ok, we know we have waiters and we think we are the
697176017Sjeff		 * last reader, so grab the turnstile lock.
698154941Sjhb		 */
699170295Sjeff		turnstile_chain_lock(&rw->lock_object);
700176017Sjeff		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
701176017Sjeff		MPASS(v & RW_LOCK_WAITERS);
702154941Sjhb
703154941Sjhb		/*
704154941Sjhb		 * Try to drop our lock leaving the lock in a unlocked
705154941Sjhb		 * state.
706154941Sjhb		 *
707154941Sjhb		 * If you wanted to do explicit lock handoff you'd have to
708154941Sjhb		 * do it here.  You'd also want to use turnstile_signal()
709154941Sjhb		 * and you'd have to handle the race where a higher
710154941Sjhb		 * priority thread blocks on the write lock before the
711154941Sjhb		 * thread you wakeup actually runs and have the new thread
712154941Sjhb		 * "steal" the lock.  For now it's a lot simpler to just
713154941Sjhb		 * wakeup all of the waiters.
714154941Sjhb		 *
715154941Sjhb		 * As above, if we fail, then another thread might have
716154941Sjhb		 * acquired a read lock, so drop the turnstile lock and
717154941Sjhb		 * restart.
718154941Sjhb		 */
719176017Sjeff		x = RW_UNLOCKED;
720176017Sjeff		if (v & RW_LOCK_WRITE_WAITERS) {
721176017Sjeff			queue = TS_EXCLUSIVE_QUEUE;
722176017Sjeff			x |= (v & RW_LOCK_READ_WAITERS);
723176017Sjeff		} else
724176017Sjeff			queue = TS_SHARED_QUEUE;
725197643Sattilio		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
726176017Sjeff		    x)) {
727170295Sjeff			turnstile_chain_unlock(&rw->lock_object);
728154941Sjhb			continue;
729154941Sjhb		}
730167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
731154941Sjhb			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
732154941Sjhb			    __func__, rw);
733154941Sjhb
734154941Sjhb		/*
735154941Sjhb		 * Ok.  The lock is released and all that's left is to
736154941Sjhb		 * wake up the waiters.  Note that the lock might not be
737154941Sjhb		 * free anymore, but in that case the writers will just
738154941Sjhb		 * block again if they run before the new lock holder(s)
739154941Sjhb		 * release the lock.
740154941Sjhb		 */
741167787Sjhb		ts = turnstile_lookup(&rw->lock_object);
742157846Sjhb		MPASS(ts != NULL);
743176017Sjeff		turnstile_broadcast(ts, queue);
744154941Sjhb		turnstile_unpend(ts, TS_SHARED_LOCK);
745170295Sjeff		turnstile_chain_unlock(&rw->lock_object);
746154941Sjhb		break;
747154941Sjhb	}
748285704Smarkj	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
749286166Smarkj	TD_LOCKS_DEC(curthread);
750252212Sjhb	curthread->td_rw_rlocks--;
751154941Sjhb}
752154941Sjhb
753154941Sjhb/*
754154941Sjhb * This function is called when we are unable to obtain a write lock on the
755154941Sjhb * first try.  This means that at least one other thread holds either a
756154941Sjhb * read or write lock.
757154941Sjhb */
758154941Sjhbvoid
759242515Sattilio__rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
760242515Sattilio    int line)
761154941Sjhb{
762242515Sattilio	struct rwlock *rw;
763170295Sjeff	struct turnstile *ts;
764167801Sjhb#ifdef ADAPTIVE_RWLOCKS
765157846Sjhb	volatile struct thread *owner;
766176017Sjeff	int spintries = 0;
767176017Sjeff	int i;
768157851Swkoszek#endif
769189846Sjeff	uintptr_t v, x;
770189846Sjeff#ifdef LOCK_PROFILING
771171516Sattilio	uint64_t waittime = 0;
772171516Sattilio	int contested = 0;
773189846Sjeff#endif
774303953Smjg#if defined(ADAPTIVE_RWLOCKS) || defined(KDTRACE_HOOKS)
775303953Smjg	struct lock_delay_arg lda;
776303953Smjg#endif
777192853Ssson#ifdef KDTRACE_HOOKS
778284297Savg	uintptr_t state;
779303953Smjg	u_int sleep_cnt = 0;
780192853Ssson	int64_t sleep_time = 0;
781284297Savg	int64_t all_time = 0;
782192853Ssson#endif
783154941Sjhb
784228424Savg	if (SCHEDULER_STOPPED())
785228424Savg		return;
786228424Savg
787303953Smjg#if defined(ADAPTIVE_RWLOCKS)
788303953Smjg	lock_delay_arg_init(&lda, &rw_delay);
789303953Smjg#elif defined(KDTRACE_HOOKS)
790303953Smjg	lock_delay_arg_init(&lda, NULL);
791303953Smjg#endif
792242515Sattilio	rw = rwlock2rw(c);
793242515Sattilio
794171052Sattilio	if (rw_wlocked(rw)) {
795193307Sattilio		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
796171052Sattilio		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
797171052Sattilio		    __func__, rw->lock_object.lo_name, file, line));
798171052Sattilio		rw->rw_recurse++;
799171052Sattilio		if (LOCK_LOG_TEST(&rw->lock_object, 0))
800171052Sattilio			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
801171052Sattilio		return;
802171052Sattilio	}
803171052Sattilio
804167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
805154941Sjhb		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
806167787Sjhb		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
807154941Sjhb
808284297Savg#ifdef KDTRACE_HOOKS
809285664Smarkj	all_time -= lockstat_nsecs(&rw->lock_object);
810284297Savg	state = rw->rw_lock;
811284297Savg#endif
812301157Smjg	for (;;) {
813301157Smjg		if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid))
814301157Smjg			break;
815192853Ssson#ifdef KDTRACE_HOOKS
816303953Smjg		lda.spin_cnt++;
817192853Ssson#endif
818233628Sfabient#ifdef HWPMC_HOOKS
819233628Sfabient		PMC_SOFT_CALL( , , lock, failed);
820233628Sfabient#endif
821174629Sjeff		lock_profile_obtain_lock_failed(&rw->lock_object,
822174629Sjeff		    &contested, &waittime);
823173960Sattilio#ifdef ADAPTIVE_RWLOCKS
824173960Sattilio		/*
825173960Sattilio		 * If the lock is write locked and the owner is
826173960Sattilio		 * running on another CPU, spin until the owner stops
827173960Sattilio		 * running or the state of the lock changes.
828173960Sattilio		 */
829173960Sattilio		v = rw->rw_lock;
830173960Sattilio		owner = (struct thread *)RW_OWNER(v);
831173960Sattilio		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
832173960Sattilio			if (LOCK_LOG_TEST(&rw->lock_object, 0))
833173960Sattilio				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
834173960Sattilio				    __func__, rw, owner);
835274092Sjhb			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
836274092Sjhb			    "spinning", "lockname:\"%s\"",
837274092Sjhb			    rw->lock_object.lo_name);
838173960Sattilio			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
839303953Smjg			    TD_IS_RUNNING(owner))
840303953Smjg				lock_delay(&lda);
841274092Sjhb			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
842274092Sjhb			    "running");
843173960Sattilio			continue;
844173960Sattilio		}
845177912Sjeff		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
846177912Sjeff		    spintries < rowner_retries) {
847176017Sjeff			if (!(v & RW_LOCK_WRITE_SPINNER)) {
848176017Sjeff				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
849176017Sjeff				    v | RW_LOCK_WRITE_SPINNER)) {
850176017Sjeff					continue;
851176017Sjeff				}
852176017Sjeff			}
853176017Sjeff			spintries++;
854274092Sjhb			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
855274092Sjhb			    "spinning", "lockname:\"%s\"",
856274092Sjhb			    rw->lock_object.lo_name);
857177912Sjeff			for (i = 0; i < rowner_loops; i++) {
858176017Sjeff				if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
859176017Sjeff					break;
860176017Sjeff				cpu_spinwait();
861176017Sjeff			}
862274092Sjhb			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
863274092Sjhb			    "running");
864192853Ssson#ifdef KDTRACE_HOOKS
865303953Smjg			lda.spin_cnt += rowner_loops - i;
866192853Ssson#endif
867177912Sjeff			if (i != rowner_loops)
868176017Sjeff				continue;
869176017Sjeff		}
870173960Sattilio#endif
871170295Sjeff		ts = turnstile_trywait(&rw->lock_object);
872154941Sjhb		v = rw->rw_lock;
873154941Sjhb
874173960Sattilio#ifdef ADAPTIVE_RWLOCKS
875154941Sjhb		/*
876193035Sjhb		 * The current lock owner might have started executing
877193035Sjhb		 * on another CPU (or the lock could have changed
878193035Sjhb		 * owners) while we were waiting on the turnstile
879193035Sjhb		 * chain lock.  If so, drop the turnstile lock and try
880193035Sjhb		 * again.
881173960Sattilio		 */
882173960Sattilio		if (!(v & RW_LOCK_READ)) {
883173960Sattilio			owner = (struct thread *)RW_OWNER(v);
884173960Sattilio			if (TD_IS_RUNNING(owner)) {
885173960Sattilio				turnstile_cancel(ts);
886173960Sattilio				continue;
887173960Sattilio			}
888173960Sattilio		}
889173960Sattilio#endif
890173960Sattilio		/*
891179334Sattilio		 * Check for the waiters flags about this rwlock.
892179334Sattilio		 * If the lock was released, without maintain any pending
893179334Sattilio		 * waiters queue, simply try to acquire it.
894179334Sattilio		 * If a pending waiters queue is present, claim the lock
895179334Sattilio		 * ownership and maintain the pending queue.
896154941Sjhb		 */
897176017Sjeff		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
898176017Sjeff		if ((v & ~x) == RW_UNLOCKED) {
899176017Sjeff			x &= ~RW_LOCK_WRITE_SPINNER;
900176017Sjeff			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
901176017Sjeff				if (x)
902176017Sjeff					turnstile_claim(ts);
903176017Sjeff				else
904176017Sjeff					turnstile_cancel(ts);
905154941Sjhb				break;
906154941Sjhb			}
907170295Sjeff			turnstile_cancel(ts);
908154941Sjhb			continue;
909154941Sjhb		}
910154941Sjhb		/*
911154941Sjhb		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
912154941Sjhb		 * set it.  If we fail to set it, then loop back and try
913154941Sjhb		 * again.
914154941Sjhb		 */
915157826Sjhb		if (!(v & RW_LOCK_WRITE_WAITERS)) {
916157826Sjhb			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
917157826Sjhb			    v | RW_LOCK_WRITE_WAITERS)) {
918170295Sjeff				turnstile_cancel(ts);
919157826Sjhb				continue;
920157826Sjhb			}
921167787Sjhb			if (LOCK_LOG_TEST(&rw->lock_object, 0))
922157826Sjhb				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
923157826Sjhb				    __func__, rw);
924154941Sjhb		}
925157846Sjhb		/*
926154941Sjhb		 * We were unable to acquire the lock and the write waiters
927154941Sjhb		 * flag is set, so we must block on the turnstile.
928154941Sjhb		 */
929167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
930154941Sjhb			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
931154941Sjhb			    rw);
932192853Ssson#ifdef KDTRACE_HOOKS
933285664Smarkj		sleep_time -= lockstat_nsecs(&rw->lock_object);
934192853Ssson#endif
935170295Sjeff		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
936192853Ssson#ifdef KDTRACE_HOOKS
937285664Smarkj		sleep_time += lockstat_nsecs(&rw->lock_object);
938192853Ssson		sleep_cnt++;
939192853Ssson#endif
940167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
941154941Sjhb			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
942154941Sjhb			    __func__, rw);
943176017Sjeff#ifdef ADAPTIVE_RWLOCKS
944176017Sjeff		spintries = 0;
945176017Sjeff#endif
946154941Sjhb	}
947192853Ssson#ifdef KDTRACE_HOOKS
948285664Smarkj	all_time += lockstat_nsecs(&rw->lock_object);
949192853Ssson	if (sleep_time)
950285703Smarkj		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
951284297Savg		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
952284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
953192853Ssson
954284297Savg	/* Record only the loops spinning and not sleeping. */
955303953Smjg	if (lda.spin_cnt > sleep_cnt)
956285703Smarkj		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
957303953Smjg		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
958284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
959192853Ssson#endif
960285704Smarkj	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
961285704Smarkj	    waittime, file, line, LOCKSTAT_WRITER);
962154941Sjhb}
963154941Sjhb
964154941Sjhb/*
965154941Sjhb * This function is called if the first try at releasing a write lock failed.
966154941Sjhb * This means that one of the 2 waiter bits must be set indicating that at
967154941Sjhb * least one thread is waiting on this lock.
968154941Sjhb */
969154941Sjhbvoid
970242515Sattilio__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
971242515Sattilio    int line)
972154941Sjhb{
973242515Sattilio	struct rwlock *rw;
974154941Sjhb	struct turnstile *ts;
975154941Sjhb	uintptr_t v;
976154941Sjhb	int queue;
977154941Sjhb
978228424Savg	if (SCHEDULER_STOPPED())
979228424Savg		return;
980228424Savg
981242515Sattilio	rw = rwlock2rw(c);
982242515Sattilio
983171052Sattilio	if (rw_wlocked(rw) && rw_recursed(rw)) {
984176017Sjeff		rw->rw_recurse--;
985171052Sattilio		if (LOCK_LOG_TEST(&rw->lock_object, 0))
986171052Sattilio			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
987171052Sattilio		return;
988171052Sattilio	}
989171052Sattilio
990154941Sjhb	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
991154941Sjhb	    ("%s: neither of the waiter flags are set", __func__));
992154941Sjhb
993167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
994154941Sjhb		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
995154941Sjhb
996170295Sjeff	turnstile_chain_lock(&rw->lock_object);
997167787Sjhb	ts = turnstile_lookup(&rw->lock_object);
998154941Sjhb	MPASS(ts != NULL);
999154941Sjhb
1000154941Sjhb	/*
1001154941Sjhb	 * Use the same algo as sx locks for now.  Prefer waking up shared
1002154941Sjhb	 * waiters if we have any over writers.  This is probably not ideal.
1003154941Sjhb	 *
1004154941Sjhb	 * 'v' is the value we are going to write back to rw_lock.  If we
1005154941Sjhb	 * have waiters on both queues, we need to preserve the state of
1006154941Sjhb	 * the waiter flag for the queue we don't wake up.  For now this is
1007154941Sjhb	 * hardcoded for the algorithm mentioned above.
1008154941Sjhb	 *
1009154941Sjhb	 * In the case of both readers and writers waiting we wakeup the
1010154941Sjhb	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
1011154941Sjhb	 * new writer comes in before a reader it will claim the lock up
1012154941Sjhb	 * above.  There is probably a potential priority inversion in
1013154941Sjhb	 * there that could be worked around either by waking both queues
1014154941Sjhb	 * of waiters or doing some complicated lock handoff gymnastics.
1015154941Sjhb	 */
1016157846Sjhb	v = RW_UNLOCKED;
1017176076Sjeff	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
1018176076Sjeff		queue = TS_EXCLUSIVE_QUEUE;
1019176076Sjeff		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
1020176076Sjeff	} else
1021154941Sjhb		queue = TS_SHARED_QUEUE;
1022157846Sjhb
1023157846Sjhb	/* Wake up all waiters for the specific queue. */
1024167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
1025154941Sjhb		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
1026154941Sjhb		    queue == TS_SHARED_QUEUE ? "read" : "write");
1027154941Sjhb	turnstile_broadcast(ts, queue);
1028154941Sjhb	atomic_store_rel_ptr(&rw->rw_lock, v);
1029154941Sjhb	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1030170295Sjeff	turnstile_chain_unlock(&rw->lock_object);
1031154941Sjhb}
1032154941Sjhb
1033157882Sjhb/*
1034157882Sjhb * Attempt to do a non-blocking upgrade from a read lock to a write
1035157882Sjhb * lock.  This will only succeed if this thread holds a single read
1036157882Sjhb * lock.  Returns true if the upgrade succeeded and false otherwise.
1037157882Sjhb */
1038157882Sjhbint
1039242515Sattilio__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
1040157882Sjhb{
1041242515Sattilio	struct rwlock *rw;
1042176017Sjeff	uintptr_t v, x, tid;
1043170295Sjeff	struct turnstile *ts;
1044157882Sjhb	int success;
1045157882Sjhb
1046228424Savg	if (SCHEDULER_STOPPED())
1047228424Savg		return (1);
1048228424Savg
1049242515Sattilio	rw = rwlock2rw(c);
1050242515Sattilio
1051169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
1052169394Sjhb	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1053242515Sattilio	__rw_assert(c, RA_RLOCKED, file, line);
1054157882Sjhb
1055157882Sjhb	/*
1056157882Sjhb	 * Attempt to switch from one reader to a writer.  If there
1057157882Sjhb	 * are any write waiters, then we will have to lock the
1058157882Sjhb	 * turnstile first to prevent races with another writer
1059157882Sjhb	 * calling turnstile_wait() before we have claimed this
1060157882Sjhb	 * turnstile.  So, do the simple case of no waiters first.
1061157882Sjhb	 */
1062157882Sjhb	tid = (uintptr_t)curthread;
1063176017Sjeff	success = 0;
1064176017Sjeff	for (;;) {
1065176017Sjeff		v = rw->rw_lock;
1066176017Sjeff		if (RW_READERS(v) > 1)
1067176017Sjeff			break;
1068176017Sjeff		if (!(v & RW_LOCK_WAITERS)) {
1069176017Sjeff			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
1070176017Sjeff			if (!success)
1071176017Sjeff				continue;
1072176017Sjeff			break;
1073176017Sjeff		}
1074157882Sjhb
1075176017Sjeff		/*
1076176017Sjeff		 * Ok, we think we have waiters, so lock the turnstile.
1077176017Sjeff		 */
1078176017Sjeff		ts = turnstile_trywait(&rw->lock_object);
1079176017Sjeff		v = rw->rw_lock;
1080176017Sjeff		if (RW_READERS(v) > 1) {
1081176017Sjeff			turnstile_cancel(ts);
1082176017Sjeff			break;
1083176017Sjeff		}
1084176017Sjeff		/*
1085176017Sjeff		 * Try to switch from one reader to a writer again.  This time
1086176017Sjeff		 * we honor the current state of the waiters flags.
1087176017Sjeff		 * If we obtain the lock with the flags set, then claim
1088176017Sjeff		 * ownership of the turnstile.
1089176017Sjeff		 */
1090176017Sjeff		x = rw->rw_lock & RW_LOCK_WAITERS;
1091176017Sjeff		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
1092176017Sjeff		if (success) {
1093176017Sjeff			if (x)
1094176017Sjeff				turnstile_claim(ts);
1095176017Sjeff			else
1096176017Sjeff				turnstile_cancel(ts);
1097176017Sjeff			break;
1098176017Sjeff		}
1099170295Sjeff		turnstile_cancel(ts);
1100176017Sjeff	}
1101167787Sjhb	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1102176017Sjeff	if (success) {
1103176017Sjeff		curthread->td_rw_rlocks--;
1104167787Sjhb		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1105157882Sjhb		    file, line);
1106285703Smarkj		LOCKSTAT_RECORD0(rw__upgrade, rw);
1107176017Sjeff	}
1108157882Sjhb	return (success);
1109157882Sjhb}
1110157882Sjhb
1111157882Sjhb/*
1112157882Sjhb * Downgrade a write lock into a single read lock.
1113157882Sjhb */
1114157882Sjhbvoid
1115242515Sattilio__rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1116157882Sjhb{
1117242515Sattilio	struct rwlock *rw;
1118157882Sjhb	struct turnstile *ts;
1119157882Sjhb	uintptr_t tid, v;
1120176017Sjeff	int rwait, wwait;
1121157882Sjhb
1122228424Savg	if (SCHEDULER_STOPPED())
1123228424Savg		return;
1124228424Savg
1125242515Sattilio	rw = rwlock2rw(c);
1126242515Sattilio
1127169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
1128169394Sjhb	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1129242515Sattilio	__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
1130171052Sattilio#ifndef INVARIANTS
1131171052Sattilio	if (rw_recursed(rw))
1132171052Sattilio		panic("downgrade of a recursed lock");
1133171052Sattilio#endif
1134157882Sjhb
1135167787Sjhb	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1136157882Sjhb
1137157882Sjhb	/*
1138157882Sjhb	 * Convert from a writer to a single reader.  First we handle
1139157882Sjhb	 * the easy case with no waiters.  If there are any waiters, we
1140176017Sjeff	 * lock the turnstile and "disown" the lock.
1141157882Sjhb	 */
1142157882Sjhb	tid = (uintptr_t)curthread;
1143157882Sjhb	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1144157882Sjhb		goto out;
1145157882Sjhb
1146157882Sjhb	/*
1147157882Sjhb	 * Ok, we think we have waiters, so lock the turnstile so we can
1148157882Sjhb	 * read the waiter flags without any races.
1149157882Sjhb	 */
1150170295Sjeff	turnstile_chain_lock(&rw->lock_object);
1151176017Sjeff	v = rw->rw_lock & RW_LOCK_WAITERS;
1152176017Sjeff	rwait = v & RW_LOCK_READ_WAITERS;
1153176017Sjeff	wwait = v & RW_LOCK_WRITE_WAITERS;
1154176017Sjeff	MPASS(rwait | wwait);
1155157882Sjhb
1156157882Sjhb	/*
1157176017Sjeff	 * Downgrade from a write lock while preserving waiters flag
1158176017Sjeff	 * and give up ownership of the turnstile.
1159157882Sjhb	 */
1160167787Sjhb	ts = turnstile_lookup(&rw->lock_object);
1161157882Sjhb	MPASS(ts != NULL);
1162176017Sjeff	if (!wwait)
1163176017Sjeff		v &= ~RW_LOCK_READ_WAITERS;
1164176017Sjeff	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1165176017Sjeff	/*
1166176017Sjeff	 * Wake other readers if there are no writers pending.  Otherwise they
1167176017Sjeff	 * won't be able to acquire the lock anyway.
1168176017Sjeff	 */
1169176017Sjeff	if (rwait && !wwait) {
1170157882Sjhb		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1171157882Sjhb		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1172176017Sjeff	} else
1173157882Sjhb		turnstile_disown(ts);
1174170295Sjeff	turnstile_chain_unlock(&rw->lock_object);
1175157882Sjhbout:
1176176017Sjeff	curthread->td_rw_rlocks++;
1177167787Sjhb	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1178285703Smarkj	LOCKSTAT_RECORD0(rw__downgrade, rw);
1179157882Sjhb}
1180157882Sjhb
1181154941Sjhb#ifdef INVARIANT_SUPPORT
1182155162Sscottl#ifndef INVARIANTS
1183242515Sattilio#undef __rw_assert
1184154941Sjhb#endif
1185154941Sjhb
1186154941Sjhb/*
1187154941Sjhb * In the non-WITNESS case, rw_assert() can only detect that at least
1188154941Sjhb * *some* thread owns an rlock, but it cannot guarantee that *this*
1189154941Sjhb * thread owns an rlock.
1190154941Sjhb */
1191154941Sjhbvoid
1192242515Sattilio__rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1193154941Sjhb{
1194242515Sattilio	const struct rwlock *rw;
1195154941Sjhb
1196154941Sjhb	if (panicstr != NULL)
1197154941Sjhb		return;
1198242515Sattilio
1199242515Sattilio	rw = rwlock2rw(c);
1200242515Sattilio
1201154941Sjhb	switch (what) {
1202154941Sjhb	case RA_LOCKED:
1203171052Sattilio	case RA_LOCKED | RA_RECURSED:
1204171052Sattilio	case RA_LOCKED | RA_NOTRECURSED:
1205154941Sjhb	case RA_RLOCKED:
1206251323Sjhb	case RA_RLOCKED | RA_RECURSED:
1207251323Sjhb	case RA_RLOCKED | RA_NOTRECURSED:
1208154941Sjhb#ifdef WITNESS
1209167787Sjhb		witness_assert(&rw->lock_object, what, file, line);
1210154941Sjhb#else
1211154941Sjhb		/*
1212154941Sjhb		 * If some other thread has a write lock or we have one
1213154941Sjhb		 * and are asserting a read lock, fail.  Also, if no one
1214154941Sjhb		 * has a lock at all, fail.
1215154941Sjhb		 */
1216155061Sscottl		if (rw->rw_lock == RW_UNLOCKED ||
1217251323Sjhb		    (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1218157826Sjhb		    rw_wowner(rw) != curthread)))
1219154941Sjhb			panic("Lock %s not %slocked @ %s:%d\n",
1220251323Sjhb			    rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1221154941Sjhb			    "read " : "", file, line);
1222171052Sattilio
1223251323Sjhb		if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1224171052Sattilio			if (rw_recursed(rw)) {
1225171052Sattilio				if (what & RA_NOTRECURSED)
1226171052Sattilio					panic("Lock %s recursed @ %s:%d\n",
1227171052Sattilio					    rw->lock_object.lo_name, file,
1228171052Sattilio					    line);
1229171052Sattilio			} else if (what & RA_RECURSED)
1230171052Sattilio				panic("Lock %s not recursed @ %s:%d\n",
1231171052Sattilio				    rw->lock_object.lo_name, file, line);
1232171052Sattilio		}
1233154941Sjhb#endif
1234154941Sjhb		break;
1235154941Sjhb	case RA_WLOCKED:
1236171052Sattilio	case RA_WLOCKED | RA_RECURSED:
1237171052Sattilio	case RA_WLOCKED | RA_NOTRECURSED:
1238157826Sjhb		if (rw_wowner(rw) != curthread)
1239154941Sjhb			panic("Lock %s not exclusively locked @ %s:%d\n",
1240167787Sjhb			    rw->lock_object.lo_name, file, line);
1241171052Sattilio		if (rw_recursed(rw)) {
1242171052Sattilio			if (what & RA_NOTRECURSED)
1243171052Sattilio				panic("Lock %s recursed @ %s:%d\n",
1244171052Sattilio				    rw->lock_object.lo_name, file, line);
1245171052Sattilio		} else if (what & RA_RECURSED)
1246171052Sattilio			panic("Lock %s not recursed @ %s:%d\n",
1247171052Sattilio			    rw->lock_object.lo_name, file, line);
1248154941Sjhb		break;
1249154941Sjhb	case RA_UNLOCKED:
1250154941Sjhb#ifdef WITNESS
1251167787Sjhb		witness_assert(&rw->lock_object, what, file, line);
1252154941Sjhb#else
1253154941Sjhb		/*
1254154941Sjhb		 * If we hold a write lock fail.  We can't reliably check
1255154941Sjhb		 * to see if we hold a read lock or not.
1256154941Sjhb		 */
1257157826Sjhb		if (rw_wowner(rw) == curthread)
1258154941Sjhb			panic("Lock %s exclusively locked @ %s:%d\n",
1259167787Sjhb			    rw->lock_object.lo_name, file, line);
1260154941Sjhb#endif
1261154941Sjhb		break;
1262154941Sjhb	default:
1263154941Sjhb		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1264154941Sjhb		    line);
1265154941Sjhb	}
1266154941Sjhb}
1267154941Sjhb#endif /* INVARIANT_SUPPORT */
1268154941Sjhb
1269154941Sjhb#ifdef DDB
1270154941Sjhbvoid
1271227588Spjddb_show_rwlock(const struct lock_object *lock)
1272154941Sjhb{
1273227588Spjd	const struct rwlock *rw;
1274154941Sjhb	struct thread *td;
1275154941Sjhb
1276227588Spjd	rw = (const struct rwlock *)lock;
1277154941Sjhb
1278154941Sjhb	db_printf(" state: ");
1279154941Sjhb	if (rw->rw_lock == RW_UNLOCKED)
1280154941Sjhb		db_printf("UNLOCKED\n");
1281169394Sjhb	else if (rw->rw_lock == RW_DESTROYED) {
1282169394Sjhb		db_printf("DESTROYED\n");
1283169394Sjhb		return;
1284169394Sjhb	} else if (rw->rw_lock & RW_LOCK_READ)
1285167504Sjhb		db_printf("RLOCK: %ju locks\n",
1286167504Sjhb		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1287154941Sjhb	else {
1288157826Sjhb		td = rw_wowner(rw);
1289154941Sjhb		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1290173600Sjulian		    td->td_tid, td->td_proc->p_pid, td->td_name);
1291171052Sattilio		if (rw_recursed(rw))
1292171052Sattilio			db_printf(" recursed: %u\n", rw->rw_recurse);
1293154941Sjhb	}
1294154941Sjhb	db_printf(" waiters: ");
1295154941Sjhb	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1296154941Sjhb	case RW_LOCK_READ_WAITERS:
1297154941Sjhb		db_printf("readers\n");
1298154941Sjhb		break;
1299154941Sjhb	case RW_LOCK_WRITE_WAITERS:
1300154941Sjhb		db_printf("writers\n");
1301154941Sjhb		break;
1302154941Sjhb	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1303167492Sjhb		db_printf("readers and writers\n");
1304154941Sjhb		break;
1305154941Sjhb	default:
1306154941Sjhb		db_printf("none\n");
1307154941Sjhb		break;
1308154941Sjhb	}
1309154941Sjhb}
1310154941Sjhb
1311154941Sjhb#endif
1312