kern_rwlock.c revision 301157
1154941Sjhb/*-
2154941Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3154941Sjhb * All rights reserved.
4154941Sjhb *
5154941Sjhb * Redistribution and use in source and binary forms, with or without
6154941Sjhb * modification, are permitted provided that the following conditions
7154941Sjhb * are met:
8154941Sjhb * 1. Redistributions of source code must retain the above copyright
9154941Sjhb *    notice, this list of conditions and the following disclaimer.
10154941Sjhb * 2. Redistributions in binary form must reproduce the above copyright
11154941Sjhb *    notice, this list of conditions and the following disclaimer in the
12154941Sjhb *    documentation and/or other materials provided with the distribution.
13154941Sjhb *
14154941Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15154941Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16154941Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17154941Sjhb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18154941Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19154941Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20154941Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21154941Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22154941Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23154941Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24154941Sjhb * SUCH DAMAGE.
25154941Sjhb */
26154941Sjhb
27154941Sjhb/*
28154941Sjhb * Machine independent bits of reader/writer lock implementation.
29154941Sjhb */
30154941Sjhb
31154941Sjhb#include <sys/cdefs.h>
32154941Sjhb__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 301157 2016-06-01 18:32:20Z mjg $");
33154941Sjhb
34154941Sjhb#include "opt_ddb.h"
35233628Sfabient#include "opt_hwpmc_hooks.h"
36167801Sjhb#include "opt_no_adaptive_rwlocks.h"
37154941Sjhb
38154941Sjhb#include <sys/param.h>
39244582Sattilio#include <sys/kdb.h>
40154941Sjhb#include <sys/ktr.h>
41177912Sjeff#include <sys/kernel.h>
42154941Sjhb#include <sys/lock.h>
43154941Sjhb#include <sys/mutex.h>
44154941Sjhb#include <sys/proc.h>
45154941Sjhb#include <sys/rwlock.h>
46274092Sjhb#include <sys/sched.h>
47177912Sjeff#include <sys/sysctl.h>
48154941Sjhb#include <sys/systm.h>
49154941Sjhb#include <sys/turnstile.h>
50171516Sattilio
51154941Sjhb#include <machine/cpu.h>
52154941Sjhb
53167801Sjhb#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
54167801Sjhb#define	ADAPTIVE_RWLOCKS
55167801Sjhb#endif
56167801Sjhb
57233628Sfabient#ifdef HWPMC_HOOKS
58233628Sfabient#include <sys/pmckern.h>
59233628SfabientPMC_SOFT_DECLARE( , , lock, failed);
60233628Sfabient#endif
61233628Sfabient
62242515Sattilio/*
63242515Sattilio * Return the rwlock address when the lock cookie address is provided.
64242515Sattilio * This functionality assumes that struct rwlock* have a member named rw_lock.
65242515Sattilio */
66242515Sattilio#define	rwlock2rw(c)	(__containerof(c, struct rwlock, rw_lock))
67242515Sattilio
68177912Sjeff#ifdef ADAPTIVE_RWLOCKS
69177912Sjeffstatic int rowner_retries = 10;
70177912Sjeffstatic int rowner_loops = 10000;
71227309Sedstatic SYSCTL_NODE(_debug, OID_AUTO, rwlock, CTLFLAG_RD, NULL,
72227309Sed    "rwlock debugging");
73177912SjeffSYSCTL_INT(_debug_rwlock, OID_AUTO, retry, CTLFLAG_RW, &rowner_retries, 0, "");
74177912SjeffSYSCTL_INT(_debug_rwlock, OID_AUTO, loops, CTLFLAG_RW, &rowner_loops, 0, "");
75177912Sjeff#endif
76177912Sjeff
77154941Sjhb#ifdef DDB
78154941Sjhb#include <ddb/ddb.h>
79154941Sjhb
80227588Spjdstatic void	db_show_rwlock(const struct lock_object *lock);
81154941Sjhb#endif
82227588Spjdstatic void	assert_rw(const struct lock_object *lock, int what);
83255745Sdavidestatic void	lock_rw(struct lock_object *lock, uintptr_t how);
84192853Ssson#ifdef KDTRACE_HOOKS
85227588Spjdstatic int	owner_rw(const struct lock_object *lock, struct thread **owner);
86192853Ssson#endif
87255745Sdavidestatic uintptr_t unlock_rw(struct lock_object *lock);
88154941Sjhb
89154941Sjhbstruct lock_class lock_class_rw = {
90167365Sjhb	.lc_name = "rw",
91167365Sjhb	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
92173733Sattilio	.lc_assert = assert_rw,
93154941Sjhb#ifdef DDB
94167365Sjhb	.lc_ddb_show = db_show_rwlock,
95154941Sjhb#endif
96167368Sjhb	.lc_lock = lock_rw,
97167368Sjhb	.lc_unlock = unlock_rw,
98192853Ssson#ifdef KDTRACE_HOOKS
99192853Ssson	.lc_owner = owner_rw,
100192853Ssson#endif
101154941Sjhb};
102154941Sjhb
103157826Sjhb/*
104157826Sjhb * Return a pointer to the owning thread if the lock is write-locked or
105157826Sjhb * NULL if the lock is unlocked or read-locked.
106157826Sjhb */
107157826Sjhb#define	rw_wowner(rw)							\
108154941Sjhb	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
109154941Sjhb	    (struct thread *)RW_OWNER((rw)->rw_lock))
110154941Sjhb
111157826Sjhb/*
112171052Sattilio * Returns if a write owner is recursed.  Write ownership is not assured
113171052Sattilio * here and should be previously checked.
114171052Sattilio */
115171052Sattilio#define	rw_recursed(rw)		((rw)->rw_recurse != 0)
116171052Sattilio
117171052Sattilio/*
118171052Sattilio * Return true if curthread helds the lock.
119171052Sattilio */
120171052Sattilio#define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
121171052Sattilio
122171052Sattilio/*
123157826Sjhb * Return a pointer to the owning thread for this lock who should receive
124157826Sjhb * any priority lent by threads that block on this lock.  Currently this
125157826Sjhb * is identical to rw_wowner().
126157826Sjhb */
127157826Sjhb#define	rw_owner(rw)		rw_wowner(rw)
128157826Sjhb
129154941Sjhb#ifndef INVARIANTS
130242515Sattilio#define	__rw_assert(c, what, file, line)
131154941Sjhb#endif
132154941Sjhb
133154941Sjhbvoid
134227588Spjdassert_rw(const struct lock_object *lock, int what)
135173733Sattilio{
136173733Sattilio
137227588Spjd	rw_assert((const struct rwlock *)lock, what);
138173733Sattilio}
139173733Sattilio
140173733Sattiliovoid
141255745Sdavidelock_rw(struct lock_object *lock, uintptr_t how)
142167368Sjhb{
143167368Sjhb	struct rwlock *rw;
144167368Sjhb
145167368Sjhb	rw = (struct rwlock *)lock;
146167368Sjhb	if (how)
147255788Sdavide		rw_rlock(rw);
148255788Sdavide	else
149167368Sjhb		rw_wlock(rw);
150167368Sjhb}
151167368Sjhb
152255745Sdavideuintptr_t
153167368Sjhbunlock_rw(struct lock_object *lock)
154167368Sjhb{
155167368Sjhb	struct rwlock *rw;
156167368Sjhb
157167368Sjhb	rw = (struct rwlock *)lock;
158167368Sjhb	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
159167368Sjhb	if (rw->rw_lock & RW_LOCK_READ) {
160167368Sjhb		rw_runlock(rw);
161255788Sdavide		return (1);
162167368Sjhb	} else {
163167368Sjhb		rw_wunlock(rw);
164255788Sdavide		return (0);
165167368Sjhb	}
166167368Sjhb}
167167368Sjhb
168192853Ssson#ifdef KDTRACE_HOOKS
169192853Sssonint
170227588Spjdowner_rw(const struct lock_object *lock, struct thread **owner)
171192853Ssson{
172227588Spjd	const struct rwlock *rw = (const struct rwlock *)lock;
173192853Ssson	uintptr_t x = rw->rw_lock;
174192853Ssson
175192853Ssson	*owner = rw_wowner(rw);
176192853Ssson	return ((x & RW_LOCK_READ) != 0 ?  (RW_READERS(x) != 0) :
177192853Ssson	    (*owner != NULL));
178192853Ssson}
179192853Ssson#endif
180192853Ssson
181167368Sjhbvoid
182242515Sattilio_rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
183154941Sjhb{
184242515Sattilio	struct rwlock *rw;
185171052Sattilio	int flags;
186154941Sjhb
187242515Sattilio	rw = rwlock2rw(c);
188242515Sattilio
189171052Sattilio	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
190275751Sdchagin	    RW_RECURSE | RW_NEW)) == 0);
191196334Sattilio	ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
192196334Sattilio	    ("%s: rw_lock not aligned for %s: %p", __func__, name,
193196334Sattilio	    &rw->rw_lock));
194171052Sattilio
195193307Sattilio	flags = LO_UPGRADABLE;
196171052Sattilio	if (opts & RW_DUPOK)
197171052Sattilio		flags |= LO_DUPOK;
198171052Sattilio	if (opts & RW_NOPROFILE)
199171052Sattilio		flags |= LO_NOPROFILE;
200171052Sattilio	if (!(opts & RW_NOWITNESS))
201171052Sattilio		flags |= LO_WITNESS;
202193307Sattilio	if (opts & RW_RECURSE)
203193307Sattilio		flags |= LO_RECURSABLE;
204171052Sattilio	if (opts & RW_QUIET)
205171052Sattilio		flags |= LO_QUIET;
206275751Sdchagin	if (opts & RW_NEW)
207275751Sdchagin		flags |= LO_NEW;
208171052Sattilio
209252212Sjhb	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
210154941Sjhb	rw->rw_lock = RW_UNLOCKED;
211171052Sattilio	rw->rw_recurse = 0;
212154941Sjhb}
213154941Sjhb
214154941Sjhbvoid
215242515Sattilio_rw_destroy(volatile uintptr_t *c)
216154941Sjhb{
217242515Sattilio	struct rwlock *rw;
218154941Sjhb
219242515Sattilio	rw = rwlock2rw(c);
220242515Sattilio
221205626Sbz	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
222205626Sbz	KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
223169394Sjhb	rw->rw_lock = RW_DESTROYED;
224167787Sjhb	lock_destroy(&rw->lock_object);
225154941Sjhb}
226154941Sjhb
227154941Sjhbvoid
228154941Sjhbrw_sysinit(void *arg)
229154941Sjhb{
230154941Sjhb	struct rw_args *args = arg;
231154941Sjhb
232242515Sattilio	rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
233154941Sjhb}
234154941Sjhb
235185778Skmacyvoid
236185778Skmacyrw_sysinit_flags(void *arg)
237185778Skmacy{
238185778Skmacy	struct rw_args_flags *args = arg;
239185778Skmacy
240242515Sattilio	rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
241242515Sattilio	    args->ra_flags);
242185778Skmacy}
243185778Skmacy
244167024Srwatsonint
245242515Sattilio_rw_wowned(const volatile uintptr_t *c)
246167024Srwatson{
247167024Srwatson
248242515Sattilio	return (rw_wowner(rwlock2rw(c)) == curthread);
249167024Srwatson}
250167024Srwatson
251154941Sjhbvoid
252242515Sattilio_rw_wlock_cookie(volatile uintptr_t *c, const char *file, int line)
253154941Sjhb{
254242515Sattilio	struct rwlock *rw;
255154941Sjhb
256228424Savg	if (SCHEDULER_STOPPED())
257228424Savg		return;
258242515Sattilio
259242515Sattilio	rw = rwlock2rw(c);
260242515Sattilio
261244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
262240424Sattilio	    ("rw_wlock() by idle thread %p on rwlock %s @ %s:%d",
263240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
264169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
265169394Sjhb	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
266167787Sjhb	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
267182914Sjhb	    line, NULL);
268154941Sjhb	__rw_wlock(rw, curthread, file, line);
269171052Sattilio	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
270167787Sjhb	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
271286166Smarkj	TD_LOCKS_INC(curthread);
272154941Sjhb}
273154941Sjhb
274177843Sattilioint
275242515Sattilio__rw_try_wlock(volatile uintptr_t *c, const char *file, int line)
276177843Sattilio{
277242515Sattilio	struct rwlock *rw;
278177843Sattilio	int rval;
279177843Sattilio
280228424Savg	if (SCHEDULER_STOPPED())
281228424Savg		return (1);
282228424Savg
283242515Sattilio	rw = rwlock2rw(c);
284242515Sattilio
285244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
286240424Sattilio	    ("rw_try_wlock() by idle thread %p on rwlock %s @ %s:%d",
287240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
288177843Sattilio	KASSERT(rw->rw_lock != RW_DESTROYED,
289177843Sattilio	    ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line));
290177843Sattilio
291193307Sattilio	if (rw_wlocked(rw) &&
292193307Sattilio	    (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) {
293177843Sattilio		rw->rw_recurse++;
294177843Sattilio		rval = 1;
295177843Sattilio	} else
296177843Sattilio		rval = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_UNLOCKED,
297177843Sattilio		    (uintptr_t)curthread);
298177843Sattilio
299177843Sattilio	LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line);
300177843Sattilio	if (rval) {
301177843Sattilio		WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
302177843Sattilio		    file, line);
303284297Savg		if (!rw_recursed(rw))
304285704Smarkj			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
305285704Smarkj			    rw, 0, 0, file, line, LOCKSTAT_WRITER);
306286166Smarkj		TD_LOCKS_INC(curthread);
307177843Sattilio	}
308177843Sattilio	return (rval);
309177843Sattilio}
310177843Sattilio
311154941Sjhbvoid
312242515Sattilio_rw_wunlock_cookie(volatile uintptr_t *c, const char *file, int line)
313154941Sjhb{
314242515Sattilio	struct rwlock *rw;
315154941Sjhb
316228424Savg	if (SCHEDULER_STOPPED())
317228424Savg		return;
318242515Sattilio
319242515Sattilio	rw = rwlock2rw(c);
320242515Sattilio
321169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
322169394Sjhb	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
323242515Sattilio	__rw_assert(c, RA_WLOCKED, file, line);
324167787Sjhb	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
325171052Sattilio	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
326171052Sattilio	    line);
327154941Sjhb	__rw_wunlock(rw, curthread, file, line);
328286166Smarkj	TD_LOCKS_DEC(curthread);
329154941Sjhb}
330286166Smarkj
331176017Sjeff/*
332176017Sjeff * Determines whether a new reader can acquire a lock.  Succeeds if the
333176017Sjeff * reader already owns a read lock and the lock is locked for read to
334176017Sjeff * prevent deadlock from reader recursion.  Also succeeds if the lock
335176017Sjeff * is unlocked and has no writer waiters or spinners.  Failing otherwise
336176017Sjeff * prioritizes writers before readers.
337176017Sjeff */
338176017Sjeff#define	RW_CAN_READ(_rw)						\
339176017Sjeff    ((curthread->td_rw_rlocks && (_rw) & RW_LOCK_READ) || ((_rw) &	\
340176017Sjeff    (RW_LOCK_READ | RW_LOCK_WRITE_WAITERS | RW_LOCK_WRITE_SPINNER)) ==	\
341176017Sjeff    RW_LOCK_READ)
342154941Sjhb
343154941Sjhbvoid
344242515Sattilio__rw_rlock(volatile uintptr_t *c, const char *file, int line)
345154941Sjhb{
346242515Sattilio	struct rwlock *rw;
347170295Sjeff	struct turnstile *ts;
348167801Sjhb#ifdef ADAPTIVE_RWLOCKS
349157846Sjhb	volatile struct thread *owner;
350177912Sjeff	int spintries = 0;
351177912Sjeff	int i;
352157851Swkoszek#endif
353189846Sjeff#ifdef LOCK_PROFILING
354167307Sjhb	uint64_t waittime = 0;
355167054Skmacy	int contested = 0;
356189846Sjeff#endif
357176017Sjeff	uintptr_t v;
358192853Ssson#ifdef KDTRACE_HOOKS
359284297Savg	uintptr_t state;
360192853Ssson	uint64_t spin_cnt = 0;
361192853Ssson	uint64_t sleep_cnt = 0;
362192853Ssson	int64_t sleep_time = 0;
363284297Savg	int64_t all_time = 0;
364192853Ssson#endif
365154941Sjhb
366228424Savg	if (SCHEDULER_STOPPED())
367228424Savg		return;
368228424Savg
369242515Sattilio	rw = rwlock2rw(c);
370242515Sattilio
371244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
372240424Sattilio	    ("rw_rlock() by idle thread %p on rwlock %s @ %s:%d",
373240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
374169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
375169394Sjhb	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
376157826Sjhb	KASSERT(rw_wowner(rw) != curthread,
377251323Sjhb	    ("rw_rlock: wlock already held for %s @ %s:%d",
378167787Sjhb	    rw->lock_object.lo_name, file, line));
379182914Sjhb	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL);
380154941Sjhb
381284297Savg#ifdef KDTRACE_HOOKS
382285664Smarkj	all_time -= lockstat_nsecs(&rw->lock_object);
383284297Savg	state = rw->rw_lock;
384284297Savg#endif
385154941Sjhb	for (;;) {
386154941Sjhb		/*
387154941Sjhb		 * Handle the easy case.  If no other thread has a write
388154941Sjhb		 * lock, then try to bump up the count of read locks.  Note
389154941Sjhb		 * that we have to preserve the current state of the
390154941Sjhb		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
391154941Sjhb		 * read lock, then rw_lock must have changed, so restart
392154941Sjhb		 * the loop.  Note that this handles the case of a
393154941Sjhb		 * completely unlocked rwlock since such a lock is encoded
394154941Sjhb		 * as a read lock with no waiters.
395154941Sjhb		 */
396176017Sjeff		v = rw->rw_lock;
397176017Sjeff		if (RW_CAN_READ(v)) {
398154941Sjhb			/*
399154941Sjhb			 * The RW_LOCK_READ_WAITERS flag should only be set
400176017Sjeff			 * if the lock has been unlocked and write waiters
401176017Sjeff			 * were present.
402154941Sjhb			 */
403176017Sjeff			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v,
404176017Sjeff			    v + RW_ONE_READER)) {
405167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
406154941Sjhb					CTR4(KTR_LOCK,
407154941Sjhb					    "%s: %p succeed %p -> %p", __func__,
408176017Sjeff					    rw, (void *)v,
409176017Sjeff					    (void *)(v + RW_ONE_READER));
410154941Sjhb				break;
411154941Sjhb			}
412154941Sjhb			continue;
413154941Sjhb		}
414285706Smarkj#ifdef KDTRACE_HOOKS
415285706Smarkj		spin_cnt++;
416285706Smarkj#endif
417233628Sfabient#ifdef HWPMC_HOOKS
418233628Sfabient		PMC_SOFT_CALL( , , lock, failed);
419233628Sfabient#endif
420174629Sjeff		lock_profile_obtain_lock_failed(&rw->lock_object,
421174629Sjeff		    &contested, &waittime);
422154941Sjhb
423173960Sattilio#ifdef ADAPTIVE_RWLOCKS
424154941Sjhb		/*
425173960Sattilio		 * If the owner is running on another CPU, spin until
426173960Sattilio		 * the owner stops running or the state of the lock
427173960Sattilio		 * changes.
428173960Sattilio		 */
429176017Sjeff		if ((v & RW_LOCK_READ) == 0) {
430176017Sjeff			owner = (struct thread *)RW_OWNER(v);
431176017Sjeff			if (TD_IS_RUNNING(owner)) {
432176017Sjeff				if (LOCK_LOG_TEST(&rw->lock_object, 0))
433176017Sjeff					CTR3(KTR_LOCK,
434176017Sjeff					    "%s: spinning on %p held by %p",
435176017Sjeff					    __func__, rw, owner);
436274092Sjhb				KTR_STATE1(KTR_SCHED, "thread",
437274092Sjhb				    sched_tdname(curthread), "spinning",
438274092Sjhb				    "lockname:\"%s\"", rw->lock_object.lo_name);
439176017Sjeff				while ((struct thread*)RW_OWNER(rw->rw_lock) ==
440192853Ssson				    owner && TD_IS_RUNNING(owner)) {
441176017Sjeff					cpu_spinwait();
442192853Ssson#ifdef KDTRACE_HOOKS
443192853Ssson					spin_cnt++;
444192853Ssson#endif
445192853Ssson				}
446274092Sjhb				KTR_STATE0(KTR_SCHED, "thread",
447274092Sjhb				    sched_tdname(curthread), "running");
448176017Sjeff				continue;
449176017Sjeff			}
450177912Sjeff		} else if (spintries < rowner_retries) {
451177912Sjeff			spintries++;
452274092Sjhb			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
453274092Sjhb			    "spinning", "lockname:\"%s\"",
454274092Sjhb			    rw->lock_object.lo_name);
455177912Sjeff			for (i = 0; i < rowner_loops; i++) {
456177912Sjeff				v = rw->rw_lock;
457177912Sjeff				if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
458177912Sjeff					break;
459177912Sjeff				cpu_spinwait();
460177912Sjeff			}
461259509Sattilio#ifdef KDTRACE_HOOKS
462259509Sattilio			spin_cnt += rowner_loops - i;
463259509Sattilio#endif
464274092Sjhb			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
465274092Sjhb			    "running");
466177912Sjeff			if (i != rowner_loops)
467177912Sjeff				continue;
468173960Sattilio		}
469173960Sattilio#endif
470173960Sattilio
471173960Sattilio		/*
472154941Sjhb		 * Okay, now it's the hard case.  Some other thread already
473176017Sjeff		 * has a write lock or there are write waiters present,
474176017Sjeff		 * acquire the turnstile lock so we can begin the process
475176017Sjeff		 * of blocking.
476154941Sjhb		 */
477170295Sjeff		ts = turnstile_trywait(&rw->lock_object);
478154941Sjhb
479154941Sjhb		/*
480154941Sjhb		 * The lock might have been released while we spun, so
481176017Sjeff		 * recheck its state and restart the loop if needed.
482154941Sjhb		 */
483176017Sjeff		v = rw->rw_lock;
484176017Sjeff		if (RW_CAN_READ(v)) {
485170295Sjeff			turnstile_cancel(ts);
486154941Sjhb			continue;
487154941Sjhb		}
488154941Sjhb
489173960Sattilio#ifdef ADAPTIVE_RWLOCKS
490154941Sjhb		/*
491193035Sjhb		 * The current lock owner might have started executing
492193035Sjhb		 * on another CPU (or the lock could have changed
493193035Sjhb		 * owners) while we were waiting on the turnstile
494193035Sjhb		 * chain lock.  If so, drop the turnstile lock and try
495193035Sjhb		 * again.
496173960Sattilio		 */
497176017Sjeff		if ((v & RW_LOCK_READ) == 0) {
498176017Sjeff			owner = (struct thread *)RW_OWNER(v);
499176017Sjeff			if (TD_IS_RUNNING(owner)) {
500176017Sjeff				turnstile_cancel(ts);
501176017Sjeff				continue;
502176017Sjeff			}
503173960Sattilio		}
504173960Sattilio#endif
505173960Sattilio
506173960Sattilio		/*
507176017Sjeff		 * The lock is held in write mode or it already has waiters.
508154941Sjhb		 */
509176017Sjeff		MPASS(!RW_CAN_READ(v));
510176017Sjeff
511176017Sjeff		/*
512176017Sjeff		 * If the RW_LOCK_READ_WAITERS flag is already set, then
513176017Sjeff		 * we can go ahead and block.  If it is not set then try
514176017Sjeff		 * to set it.  If we fail to set it drop the turnstile
515176017Sjeff		 * lock and restart the loop.
516176017Sjeff		 */
517176017Sjeff		if (!(v & RW_LOCK_READ_WAITERS)) {
518176017Sjeff			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
519176017Sjeff			    v | RW_LOCK_READ_WAITERS)) {
520170295Sjeff				turnstile_cancel(ts);
521157826Sjhb				continue;
522157826Sjhb			}
523167787Sjhb			if (LOCK_LOG_TEST(&rw->lock_object, 0))
524157826Sjhb				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
525157826Sjhb				    __func__, rw);
526154941Sjhb		}
527154941Sjhb
528154941Sjhb		/*
529154941Sjhb		 * We were unable to acquire the lock and the read waiters
530154941Sjhb		 * flag is set, so we must block on the turnstile.
531154941Sjhb		 */
532167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
533154941Sjhb			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
534154941Sjhb			    rw);
535192853Ssson#ifdef KDTRACE_HOOKS
536285664Smarkj		sleep_time -= lockstat_nsecs(&rw->lock_object);
537192853Ssson#endif
538170295Sjeff		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
539192853Ssson#ifdef KDTRACE_HOOKS
540285664Smarkj		sleep_time += lockstat_nsecs(&rw->lock_object);
541192853Ssson		sleep_cnt++;
542192853Ssson#endif
543167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
544154941Sjhb			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
545154941Sjhb			    __func__, rw);
546154941Sjhb	}
547284297Savg#ifdef KDTRACE_HOOKS
548285664Smarkj	all_time += lockstat_nsecs(&rw->lock_object);
549284297Savg	if (sleep_time)
550285703Smarkj		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
551284297Savg		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
552284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
553154941Sjhb
554284297Savg	/* Record only the loops spinning and not sleeping. */
555284297Savg	if (spin_cnt > sleep_cnt)
556285703Smarkj		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
557284297Savg		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
558284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
559284297Savg#endif
560154941Sjhb	/*
561154941Sjhb	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
562154941Sjhb	 * however.  turnstiles don't like owners changing between calls to
563154941Sjhb	 * turnstile_wait() currently.
564154941Sjhb	 */
565285704Smarkj	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
566285704Smarkj	    waittime, file, line, LOCKSTAT_READER);
567167787Sjhb	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
568167787Sjhb	WITNESS_LOCK(&rw->lock_object, 0, file, line);
569286166Smarkj	TD_LOCKS_INC(curthread);
570176017Sjeff	curthread->td_rw_rlocks++;
571154941Sjhb}
572154941Sjhb
573177843Sattilioint
574242515Sattilio__rw_try_rlock(volatile uintptr_t *c, const char *file, int line)
575177843Sattilio{
576242515Sattilio	struct rwlock *rw;
577177843Sattilio	uintptr_t x;
578177843Sattilio
579228424Savg	if (SCHEDULER_STOPPED())
580228424Savg		return (1);
581228424Savg
582242515Sattilio	rw = rwlock2rw(c);
583242515Sattilio
584244582Sattilio	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
585240424Sattilio	    ("rw_try_rlock() by idle thread %p on rwlock %s @ %s:%d",
586240424Sattilio	    curthread, rw->lock_object.lo_name, file, line));
587240424Sattilio
588177843Sattilio	for (;;) {
589177843Sattilio		x = rw->rw_lock;
590177843Sattilio		KASSERT(rw->rw_lock != RW_DESTROYED,
591177843Sattilio		    ("rw_try_rlock() of destroyed rwlock @ %s:%d", file, line));
592177843Sattilio		if (!(x & RW_LOCK_READ))
593177843Sattilio			break;
594177843Sattilio		if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) {
595177843Sattilio			LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file,
596177843Sattilio			    line);
597177843Sattilio			WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line);
598285704Smarkj			LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire,
599285704Smarkj			    rw, 0, 0, file, line, LOCKSTAT_READER);
600286166Smarkj			TD_LOCKS_INC(curthread);
601177843Sattilio			curthread->td_rw_rlocks++;
602177843Sattilio			return (1);
603177843Sattilio		}
604177843Sattilio	}
605177843Sattilio
606177843Sattilio	LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line);
607177843Sattilio	return (0);
608177843Sattilio}
609177843Sattilio
610154941Sjhbvoid
611242515Sattilio_rw_runlock_cookie(volatile uintptr_t *c, const char *file, int line)
612154941Sjhb{
613242515Sattilio	struct rwlock *rw;
614154941Sjhb	struct turnstile *ts;
615176017Sjeff	uintptr_t x, v, queue;
616154941Sjhb
617228424Savg	if (SCHEDULER_STOPPED())
618228424Savg		return;
619228424Savg
620242515Sattilio	rw = rwlock2rw(c);
621242515Sattilio
622169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
623169394Sjhb	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
624242515Sattilio	__rw_assert(c, RA_RLOCKED, file, line);
625167787Sjhb	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
626167787Sjhb	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
627154941Sjhb
628154941Sjhb	/* TODO: drop "owner of record" here. */
629154941Sjhb
630154941Sjhb	for (;;) {
631154941Sjhb		/*
632154941Sjhb		 * See if there is more than one read lock held.  If so,
633154941Sjhb		 * just drop one and return.
634154941Sjhb		 */
635154941Sjhb		x = rw->rw_lock;
636154941Sjhb		if (RW_READERS(x) > 1) {
637197643Sattilio			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
638154941Sjhb			    x - RW_ONE_READER)) {
639167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
640154941Sjhb					CTR4(KTR_LOCK,
641154941Sjhb					    "%s: %p succeeded %p -> %p",
642154941Sjhb					    __func__, rw, (void *)x,
643154941Sjhb					    (void *)(x - RW_ONE_READER));
644154941Sjhb				break;
645154941Sjhb			}
646154941Sjhb			continue;
647167307Sjhb		}
648154941Sjhb		/*
649154941Sjhb		 * If there aren't any waiters for a write lock, then try
650154941Sjhb		 * to drop it quickly.
651154941Sjhb		 */
652176017Sjeff		if (!(x & RW_LOCK_WAITERS)) {
653176017Sjeff			MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
654176017Sjeff			    RW_READERS_LOCK(1));
655197643Sattilio			if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
656197643Sattilio			    RW_UNLOCKED)) {
657167787Sjhb				if (LOCK_LOG_TEST(&rw->lock_object, 0))
658154941Sjhb					CTR2(KTR_LOCK, "%s: %p last succeeded",
659154941Sjhb					    __func__, rw);
660154941Sjhb				break;
661154941Sjhb			}
662154941Sjhb			continue;
663154941Sjhb		}
664154941Sjhb		/*
665176017Sjeff		 * Ok, we know we have waiters and we think we are the
666176017Sjeff		 * last reader, so grab the turnstile lock.
667154941Sjhb		 */
668170295Sjeff		turnstile_chain_lock(&rw->lock_object);
669176017Sjeff		v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
670176017Sjeff		MPASS(v & RW_LOCK_WAITERS);
671154941Sjhb
672154941Sjhb		/*
673154941Sjhb		 * Try to drop our lock leaving the lock in a unlocked
674154941Sjhb		 * state.
675154941Sjhb		 *
676154941Sjhb		 * If you wanted to do explicit lock handoff you'd have to
677154941Sjhb		 * do it here.  You'd also want to use turnstile_signal()
678154941Sjhb		 * and you'd have to handle the race where a higher
679154941Sjhb		 * priority thread blocks on the write lock before the
680154941Sjhb		 * thread you wakeup actually runs and have the new thread
681154941Sjhb		 * "steal" the lock.  For now it's a lot simpler to just
682154941Sjhb		 * wakeup all of the waiters.
683154941Sjhb		 *
684154941Sjhb		 * As above, if we fail, then another thread might have
685154941Sjhb		 * acquired a read lock, so drop the turnstile lock and
686154941Sjhb		 * restart.
687154941Sjhb		 */
688176017Sjeff		x = RW_UNLOCKED;
689176017Sjeff		if (v & RW_LOCK_WRITE_WAITERS) {
690176017Sjeff			queue = TS_EXCLUSIVE_QUEUE;
691176017Sjeff			x |= (v & RW_LOCK_READ_WAITERS);
692176017Sjeff		} else
693176017Sjeff			queue = TS_SHARED_QUEUE;
694197643Sattilio		if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
695176017Sjeff		    x)) {
696170295Sjeff			turnstile_chain_unlock(&rw->lock_object);
697154941Sjhb			continue;
698154941Sjhb		}
699167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
700154941Sjhb			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
701154941Sjhb			    __func__, rw);
702154941Sjhb
703154941Sjhb		/*
704154941Sjhb		 * Ok.  The lock is released and all that's left is to
705154941Sjhb		 * wake up the waiters.  Note that the lock might not be
706154941Sjhb		 * free anymore, but in that case the writers will just
707154941Sjhb		 * block again if they run before the new lock holder(s)
708154941Sjhb		 * release the lock.
709154941Sjhb		 */
710167787Sjhb		ts = turnstile_lookup(&rw->lock_object);
711157846Sjhb		MPASS(ts != NULL);
712176017Sjeff		turnstile_broadcast(ts, queue);
713154941Sjhb		turnstile_unpend(ts, TS_SHARED_LOCK);
714170295Sjeff		turnstile_chain_unlock(&rw->lock_object);
715154941Sjhb		break;
716154941Sjhb	}
717285704Smarkj	LOCKSTAT_PROFILE_RELEASE_RWLOCK(rw__release, rw, LOCKSTAT_READER);
718286166Smarkj	TD_LOCKS_DEC(curthread);
719252212Sjhb	curthread->td_rw_rlocks--;
720154941Sjhb}
721154941Sjhb
722154941Sjhb/*
723154941Sjhb * This function is called when we are unable to obtain a write lock on the
724154941Sjhb * first try.  This means that at least one other thread holds either a
725154941Sjhb * read or write lock.
726154941Sjhb */
727154941Sjhbvoid
728242515Sattilio__rw_wlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
729242515Sattilio    int line)
730154941Sjhb{
731242515Sattilio	struct rwlock *rw;
732170295Sjeff	struct turnstile *ts;
733167801Sjhb#ifdef ADAPTIVE_RWLOCKS
734157846Sjhb	volatile struct thread *owner;
735176017Sjeff	int spintries = 0;
736176017Sjeff	int i;
737157851Swkoszek#endif
738189846Sjeff	uintptr_t v, x;
739189846Sjeff#ifdef LOCK_PROFILING
740171516Sattilio	uint64_t waittime = 0;
741171516Sattilio	int contested = 0;
742189846Sjeff#endif
743192853Ssson#ifdef KDTRACE_HOOKS
744284297Savg	uintptr_t state;
745192853Ssson	uint64_t spin_cnt = 0;
746192853Ssson	uint64_t sleep_cnt = 0;
747192853Ssson	int64_t sleep_time = 0;
748284297Savg	int64_t all_time = 0;
749192853Ssson#endif
750154941Sjhb
751228424Savg	if (SCHEDULER_STOPPED())
752228424Savg		return;
753228424Savg
754242515Sattilio	rw = rwlock2rw(c);
755242515Sattilio
756171052Sattilio	if (rw_wlocked(rw)) {
757193307Sattilio		KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
758171052Sattilio		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
759171052Sattilio		    __func__, rw->lock_object.lo_name, file, line));
760171052Sattilio		rw->rw_recurse++;
761171052Sattilio		if (LOCK_LOG_TEST(&rw->lock_object, 0))
762171052Sattilio			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
763171052Sattilio		return;
764171052Sattilio	}
765171052Sattilio
766167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
767154941Sjhb		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
768167787Sjhb		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
769154941Sjhb
770284297Savg#ifdef KDTRACE_HOOKS
771285664Smarkj	all_time -= lockstat_nsecs(&rw->lock_object);
772284297Savg	state = rw->rw_lock;
773284297Savg#endif
774301157Smjg	for (;;) {
775301157Smjg		if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid))
776301157Smjg			break;
777192853Ssson#ifdef KDTRACE_HOOKS
778192853Ssson		spin_cnt++;
779192853Ssson#endif
780233628Sfabient#ifdef HWPMC_HOOKS
781233628Sfabient		PMC_SOFT_CALL( , , lock, failed);
782233628Sfabient#endif
783174629Sjeff		lock_profile_obtain_lock_failed(&rw->lock_object,
784174629Sjeff		    &contested, &waittime);
785173960Sattilio#ifdef ADAPTIVE_RWLOCKS
786173960Sattilio		/*
787173960Sattilio		 * If the lock is write locked and the owner is
788173960Sattilio		 * running on another CPU, spin until the owner stops
789173960Sattilio		 * running or the state of the lock changes.
790173960Sattilio		 */
791173960Sattilio		v = rw->rw_lock;
792173960Sattilio		owner = (struct thread *)RW_OWNER(v);
793173960Sattilio		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
794173960Sattilio			if (LOCK_LOG_TEST(&rw->lock_object, 0))
795173960Sattilio				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
796173960Sattilio				    __func__, rw, owner);
797274092Sjhb			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
798274092Sjhb			    "spinning", "lockname:\"%s\"",
799274092Sjhb			    rw->lock_object.lo_name);
800173960Sattilio			while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
801192853Ssson			    TD_IS_RUNNING(owner)) {
802173960Sattilio				cpu_spinwait();
803192853Ssson#ifdef KDTRACE_HOOKS
804192853Ssson				spin_cnt++;
805192853Ssson#endif
806192853Ssson			}
807274092Sjhb			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
808274092Sjhb			    "running");
809173960Sattilio			continue;
810173960Sattilio		}
811177912Sjeff		if ((v & RW_LOCK_READ) && RW_READERS(v) &&
812177912Sjeff		    spintries < rowner_retries) {
813176017Sjeff			if (!(v & RW_LOCK_WRITE_SPINNER)) {
814176017Sjeff				if (!atomic_cmpset_ptr(&rw->rw_lock, v,
815176017Sjeff				    v | RW_LOCK_WRITE_SPINNER)) {
816176017Sjeff					continue;
817176017Sjeff				}
818176017Sjeff			}
819176017Sjeff			spintries++;
820274092Sjhb			KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
821274092Sjhb			    "spinning", "lockname:\"%s\"",
822274092Sjhb			    rw->lock_object.lo_name);
823177912Sjeff			for (i = 0; i < rowner_loops; i++) {
824176017Sjeff				if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
825176017Sjeff					break;
826176017Sjeff				cpu_spinwait();
827176017Sjeff			}
828274092Sjhb			KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
829274092Sjhb			    "running");
830192853Ssson#ifdef KDTRACE_HOOKS
831192853Ssson			spin_cnt += rowner_loops - i;
832192853Ssson#endif
833177912Sjeff			if (i != rowner_loops)
834176017Sjeff				continue;
835176017Sjeff		}
836173960Sattilio#endif
837170295Sjeff		ts = turnstile_trywait(&rw->lock_object);
838154941Sjhb		v = rw->rw_lock;
839154941Sjhb
840173960Sattilio#ifdef ADAPTIVE_RWLOCKS
841154941Sjhb		/*
842193035Sjhb		 * The current lock owner might have started executing
843193035Sjhb		 * on another CPU (or the lock could have changed
844193035Sjhb		 * owners) while we were waiting on the turnstile
845193035Sjhb		 * chain lock.  If so, drop the turnstile lock and try
846193035Sjhb		 * again.
847173960Sattilio		 */
848173960Sattilio		if (!(v & RW_LOCK_READ)) {
849173960Sattilio			owner = (struct thread *)RW_OWNER(v);
850173960Sattilio			if (TD_IS_RUNNING(owner)) {
851173960Sattilio				turnstile_cancel(ts);
852173960Sattilio				continue;
853173960Sattilio			}
854173960Sattilio		}
855173960Sattilio#endif
856173960Sattilio		/*
857179334Sattilio		 * Check for the waiters flags about this rwlock.
858179334Sattilio		 * If the lock was released, without maintain any pending
859179334Sattilio		 * waiters queue, simply try to acquire it.
860179334Sattilio		 * If a pending waiters queue is present, claim the lock
861179334Sattilio		 * ownership and maintain the pending queue.
862154941Sjhb		 */
863176017Sjeff		x = v & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);
864176017Sjeff		if ((v & ~x) == RW_UNLOCKED) {
865176017Sjeff			x &= ~RW_LOCK_WRITE_SPINNER;
866176017Sjeff			if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) {
867176017Sjeff				if (x)
868176017Sjeff					turnstile_claim(ts);
869176017Sjeff				else
870176017Sjeff					turnstile_cancel(ts);
871154941Sjhb				break;
872154941Sjhb			}
873170295Sjeff			turnstile_cancel(ts);
874154941Sjhb			continue;
875154941Sjhb		}
876154941Sjhb		/*
877154941Sjhb		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
878154941Sjhb		 * set it.  If we fail to set it, then loop back and try
879154941Sjhb		 * again.
880154941Sjhb		 */
881157826Sjhb		if (!(v & RW_LOCK_WRITE_WAITERS)) {
882157826Sjhb			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
883157826Sjhb			    v | RW_LOCK_WRITE_WAITERS)) {
884170295Sjeff				turnstile_cancel(ts);
885157826Sjhb				continue;
886157826Sjhb			}
887167787Sjhb			if (LOCK_LOG_TEST(&rw->lock_object, 0))
888157826Sjhb				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
889157826Sjhb				    __func__, rw);
890154941Sjhb		}
891157846Sjhb		/*
892154941Sjhb		 * We were unable to acquire the lock and the write waiters
893154941Sjhb		 * flag is set, so we must block on the turnstile.
894154941Sjhb		 */
895167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
896154941Sjhb			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
897154941Sjhb			    rw);
898192853Ssson#ifdef KDTRACE_HOOKS
899285664Smarkj		sleep_time -= lockstat_nsecs(&rw->lock_object);
900192853Ssson#endif
901170295Sjeff		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
902192853Ssson#ifdef KDTRACE_HOOKS
903285664Smarkj		sleep_time += lockstat_nsecs(&rw->lock_object);
904192853Ssson		sleep_cnt++;
905192853Ssson#endif
906167787Sjhb		if (LOCK_LOG_TEST(&rw->lock_object, 0))
907154941Sjhb			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
908154941Sjhb			    __func__, rw);
909176017Sjeff#ifdef ADAPTIVE_RWLOCKS
910176017Sjeff		spintries = 0;
911176017Sjeff#endif
912154941Sjhb	}
913192853Ssson#ifdef KDTRACE_HOOKS
914285664Smarkj	all_time += lockstat_nsecs(&rw->lock_object);
915192853Ssson	if (sleep_time)
916285703Smarkj		LOCKSTAT_RECORD4(rw__block, rw, sleep_time,
917284297Savg		    LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0,
918284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
919192853Ssson
920284297Savg	/* Record only the loops spinning and not sleeping. */
921192853Ssson	if (spin_cnt > sleep_cnt)
922285703Smarkj		LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time,
923284297Savg		    LOCKSTAT_READER, (state & RW_LOCK_READ) == 0,
924284297Savg		    (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state));
925192853Ssson#endif
926285704Smarkj	LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(rw__acquire, rw, contested,
927285704Smarkj	    waittime, file, line, LOCKSTAT_WRITER);
928154941Sjhb}
929154941Sjhb
930154941Sjhb/*
931154941Sjhb * This function is called if the first try at releasing a write lock failed.
932154941Sjhb * This means that one of the 2 waiter bits must be set indicating that at
933154941Sjhb * least one thread is waiting on this lock.
934154941Sjhb */
935154941Sjhbvoid
936242515Sattilio__rw_wunlock_hard(volatile uintptr_t *c, uintptr_t tid, const char *file,
937242515Sattilio    int line)
938154941Sjhb{
939242515Sattilio	struct rwlock *rw;
940154941Sjhb	struct turnstile *ts;
941154941Sjhb	uintptr_t v;
942154941Sjhb	int queue;
943154941Sjhb
944228424Savg	if (SCHEDULER_STOPPED())
945228424Savg		return;
946228424Savg
947242515Sattilio	rw = rwlock2rw(c);
948242515Sattilio
949171052Sattilio	if (rw_wlocked(rw) && rw_recursed(rw)) {
950176017Sjeff		rw->rw_recurse--;
951171052Sattilio		if (LOCK_LOG_TEST(&rw->lock_object, 0))
952171052Sattilio			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
953171052Sattilio		return;
954171052Sattilio	}
955171052Sattilio
956154941Sjhb	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
957154941Sjhb	    ("%s: neither of the waiter flags are set", __func__));
958154941Sjhb
959167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
960154941Sjhb		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
961154941Sjhb
962170295Sjeff	turnstile_chain_lock(&rw->lock_object);
963167787Sjhb	ts = turnstile_lookup(&rw->lock_object);
964154941Sjhb	MPASS(ts != NULL);
965154941Sjhb
966154941Sjhb	/*
967154941Sjhb	 * Use the same algo as sx locks for now.  Prefer waking up shared
968154941Sjhb	 * waiters if we have any over writers.  This is probably not ideal.
969154941Sjhb	 *
970154941Sjhb	 * 'v' is the value we are going to write back to rw_lock.  If we
971154941Sjhb	 * have waiters on both queues, we need to preserve the state of
972154941Sjhb	 * the waiter flag for the queue we don't wake up.  For now this is
973154941Sjhb	 * hardcoded for the algorithm mentioned above.
974154941Sjhb	 *
975154941Sjhb	 * In the case of both readers and writers waiting we wakeup the
976154941Sjhb	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
977154941Sjhb	 * new writer comes in before a reader it will claim the lock up
978154941Sjhb	 * above.  There is probably a potential priority inversion in
979154941Sjhb	 * there that could be worked around either by waking both queues
980154941Sjhb	 * of waiters or doing some complicated lock handoff gymnastics.
981154941Sjhb	 */
982157846Sjhb	v = RW_UNLOCKED;
983176076Sjeff	if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
984176076Sjeff		queue = TS_EXCLUSIVE_QUEUE;
985176076Sjeff		v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
986176076Sjeff	} else
987154941Sjhb		queue = TS_SHARED_QUEUE;
988157846Sjhb
989157846Sjhb	/* Wake up all waiters for the specific queue. */
990167787Sjhb	if (LOCK_LOG_TEST(&rw->lock_object, 0))
991154941Sjhb		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
992154941Sjhb		    queue == TS_SHARED_QUEUE ? "read" : "write");
993154941Sjhb	turnstile_broadcast(ts, queue);
994154941Sjhb	atomic_store_rel_ptr(&rw->rw_lock, v);
995154941Sjhb	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
996170295Sjeff	turnstile_chain_unlock(&rw->lock_object);
997154941Sjhb}
998154941Sjhb
999157882Sjhb/*
1000157882Sjhb * Attempt to do a non-blocking upgrade from a read lock to a write
1001157882Sjhb * lock.  This will only succeed if this thread holds a single read
1002157882Sjhb * lock.  Returns true if the upgrade succeeded and false otherwise.
1003157882Sjhb */
1004157882Sjhbint
1005242515Sattilio__rw_try_upgrade(volatile uintptr_t *c, const char *file, int line)
1006157882Sjhb{
1007242515Sattilio	struct rwlock *rw;
1008176017Sjeff	uintptr_t v, x, tid;
1009170295Sjeff	struct turnstile *ts;
1010157882Sjhb	int success;
1011157882Sjhb
1012228424Savg	if (SCHEDULER_STOPPED())
1013228424Savg		return (1);
1014228424Savg
1015242515Sattilio	rw = rwlock2rw(c);
1016242515Sattilio
1017169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
1018169394Sjhb	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
1019242515Sattilio	__rw_assert(c, RA_RLOCKED, file, line);
1020157882Sjhb
1021157882Sjhb	/*
1022157882Sjhb	 * Attempt to switch from one reader to a writer.  If there
1023157882Sjhb	 * are any write waiters, then we will have to lock the
1024157882Sjhb	 * turnstile first to prevent races with another writer
1025157882Sjhb	 * calling turnstile_wait() before we have claimed this
1026157882Sjhb	 * turnstile.  So, do the simple case of no waiters first.
1027157882Sjhb	 */
1028157882Sjhb	tid = (uintptr_t)curthread;
1029176017Sjeff	success = 0;
1030176017Sjeff	for (;;) {
1031176017Sjeff		v = rw->rw_lock;
1032176017Sjeff		if (RW_READERS(v) > 1)
1033176017Sjeff			break;
1034176017Sjeff		if (!(v & RW_LOCK_WAITERS)) {
1035176017Sjeff			success = atomic_cmpset_ptr(&rw->rw_lock, v, tid);
1036176017Sjeff			if (!success)
1037176017Sjeff				continue;
1038176017Sjeff			break;
1039176017Sjeff		}
1040157882Sjhb
1041176017Sjeff		/*
1042176017Sjeff		 * Ok, we think we have waiters, so lock the turnstile.
1043176017Sjeff		 */
1044176017Sjeff		ts = turnstile_trywait(&rw->lock_object);
1045176017Sjeff		v = rw->rw_lock;
1046176017Sjeff		if (RW_READERS(v) > 1) {
1047176017Sjeff			turnstile_cancel(ts);
1048176017Sjeff			break;
1049176017Sjeff		}
1050176017Sjeff		/*
1051176017Sjeff		 * Try to switch from one reader to a writer again.  This time
1052176017Sjeff		 * we honor the current state of the waiters flags.
1053176017Sjeff		 * If we obtain the lock with the flags set, then claim
1054176017Sjeff		 * ownership of the turnstile.
1055176017Sjeff		 */
1056176017Sjeff		x = rw->rw_lock & RW_LOCK_WAITERS;
1057176017Sjeff		success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x);
1058176017Sjeff		if (success) {
1059176017Sjeff			if (x)
1060176017Sjeff				turnstile_claim(ts);
1061176017Sjeff			else
1062176017Sjeff				turnstile_cancel(ts);
1063176017Sjeff			break;
1064176017Sjeff		}
1065170295Sjeff		turnstile_cancel(ts);
1066176017Sjeff	}
1067167787Sjhb	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
1068176017Sjeff	if (success) {
1069176017Sjeff		curthread->td_rw_rlocks--;
1070167787Sjhb		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
1071157882Sjhb		    file, line);
1072285703Smarkj		LOCKSTAT_RECORD0(rw__upgrade, rw);
1073176017Sjeff	}
1074157882Sjhb	return (success);
1075157882Sjhb}
1076157882Sjhb
1077157882Sjhb/*
1078157882Sjhb * Downgrade a write lock into a single read lock.
1079157882Sjhb */
1080157882Sjhbvoid
1081242515Sattilio__rw_downgrade(volatile uintptr_t *c, const char *file, int line)
1082157882Sjhb{
1083242515Sattilio	struct rwlock *rw;
1084157882Sjhb	struct turnstile *ts;
1085157882Sjhb	uintptr_t tid, v;
1086176017Sjeff	int rwait, wwait;
1087157882Sjhb
1088228424Savg	if (SCHEDULER_STOPPED())
1089228424Savg		return;
1090228424Savg
1091242515Sattilio	rw = rwlock2rw(c);
1092242515Sattilio
1093169394Sjhb	KASSERT(rw->rw_lock != RW_DESTROYED,
1094169394Sjhb	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
1095242515Sattilio	__rw_assert(c, RA_WLOCKED | RA_NOTRECURSED, file, line);
1096171052Sattilio#ifndef INVARIANTS
1097171052Sattilio	if (rw_recursed(rw))
1098171052Sattilio		panic("downgrade of a recursed lock");
1099171052Sattilio#endif
1100157882Sjhb
1101167787Sjhb	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
1102157882Sjhb
1103157882Sjhb	/*
1104157882Sjhb	 * Convert from a writer to a single reader.  First we handle
1105157882Sjhb	 * the easy case with no waiters.  If there are any waiters, we
1106176017Sjeff	 * lock the turnstile and "disown" the lock.
1107157882Sjhb	 */
1108157882Sjhb	tid = (uintptr_t)curthread;
1109157882Sjhb	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
1110157882Sjhb		goto out;
1111157882Sjhb
1112157882Sjhb	/*
1113157882Sjhb	 * Ok, we think we have waiters, so lock the turnstile so we can
1114157882Sjhb	 * read the waiter flags without any races.
1115157882Sjhb	 */
1116170295Sjeff	turnstile_chain_lock(&rw->lock_object);
1117176017Sjeff	v = rw->rw_lock & RW_LOCK_WAITERS;
1118176017Sjeff	rwait = v & RW_LOCK_READ_WAITERS;
1119176017Sjeff	wwait = v & RW_LOCK_WRITE_WAITERS;
1120176017Sjeff	MPASS(rwait | wwait);
1121157882Sjhb
1122157882Sjhb	/*
1123176017Sjeff	 * Downgrade from a write lock while preserving waiters flag
1124176017Sjeff	 * and give up ownership of the turnstile.
1125157882Sjhb	 */
1126167787Sjhb	ts = turnstile_lookup(&rw->lock_object);
1127157882Sjhb	MPASS(ts != NULL);
1128176017Sjeff	if (!wwait)
1129176017Sjeff		v &= ~RW_LOCK_READ_WAITERS;
1130176017Sjeff	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v);
1131176017Sjeff	/*
1132176017Sjeff	 * Wake other readers if there are no writers pending.  Otherwise they
1133176017Sjeff	 * won't be able to acquire the lock anyway.
1134176017Sjeff	 */
1135176017Sjeff	if (rwait && !wwait) {
1136157882Sjhb		turnstile_broadcast(ts, TS_SHARED_QUEUE);
1137157882Sjhb		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
1138176017Sjeff	} else
1139157882Sjhb		turnstile_disown(ts);
1140170295Sjeff	turnstile_chain_unlock(&rw->lock_object);
1141157882Sjhbout:
1142176017Sjeff	curthread->td_rw_rlocks++;
1143167787Sjhb	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
1144285703Smarkj	LOCKSTAT_RECORD0(rw__downgrade, rw);
1145157882Sjhb}
1146157882Sjhb
1147154941Sjhb#ifdef INVARIANT_SUPPORT
1148155162Sscottl#ifndef INVARIANTS
1149242515Sattilio#undef __rw_assert
1150154941Sjhb#endif
1151154941Sjhb
1152154941Sjhb/*
1153154941Sjhb * In the non-WITNESS case, rw_assert() can only detect that at least
1154154941Sjhb * *some* thread owns an rlock, but it cannot guarantee that *this*
1155154941Sjhb * thread owns an rlock.
1156154941Sjhb */
1157154941Sjhbvoid
1158242515Sattilio__rw_assert(const volatile uintptr_t *c, int what, const char *file, int line)
1159154941Sjhb{
1160242515Sattilio	const struct rwlock *rw;
1161154941Sjhb
1162154941Sjhb	if (panicstr != NULL)
1163154941Sjhb		return;
1164242515Sattilio
1165242515Sattilio	rw = rwlock2rw(c);
1166242515Sattilio
1167154941Sjhb	switch (what) {
1168154941Sjhb	case RA_LOCKED:
1169171052Sattilio	case RA_LOCKED | RA_RECURSED:
1170171052Sattilio	case RA_LOCKED | RA_NOTRECURSED:
1171154941Sjhb	case RA_RLOCKED:
1172251323Sjhb	case RA_RLOCKED | RA_RECURSED:
1173251323Sjhb	case RA_RLOCKED | RA_NOTRECURSED:
1174154941Sjhb#ifdef WITNESS
1175167787Sjhb		witness_assert(&rw->lock_object, what, file, line);
1176154941Sjhb#else
1177154941Sjhb		/*
1178154941Sjhb		 * If some other thread has a write lock or we have one
1179154941Sjhb		 * and are asserting a read lock, fail.  Also, if no one
1180154941Sjhb		 * has a lock at all, fail.
1181154941Sjhb		 */
1182155061Sscottl		if (rw->rw_lock == RW_UNLOCKED ||
1183251323Sjhb		    (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED ||
1184157826Sjhb		    rw_wowner(rw) != curthread)))
1185154941Sjhb			panic("Lock %s not %slocked @ %s:%d\n",
1186251323Sjhb			    rw->lock_object.lo_name, (what & RA_RLOCKED) ?
1187154941Sjhb			    "read " : "", file, line);
1188171052Sattilio
1189251323Sjhb		if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) {
1190171052Sattilio			if (rw_recursed(rw)) {
1191171052Sattilio				if (what & RA_NOTRECURSED)
1192171052Sattilio					panic("Lock %s recursed @ %s:%d\n",
1193171052Sattilio					    rw->lock_object.lo_name, file,
1194171052Sattilio					    line);
1195171052Sattilio			} else if (what & RA_RECURSED)
1196171052Sattilio				panic("Lock %s not recursed @ %s:%d\n",
1197171052Sattilio				    rw->lock_object.lo_name, file, line);
1198171052Sattilio		}
1199154941Sjhb#endif
1200154941Sjhb		break;
1201154941Sjhb	case RA_WLOCKED:
1202171052Sattilio	case RA_WLOCKED | RA_RECURSED:
1203171052Sattilio	case RA_WLOCKED | RA_NOTRECURSED:
1204157826Sjhb		if (rw_wowner(rw) != curthread)
1205154941Sjhb			panic("Lock %s not exclusively locked @ %s:%d\n",
1206167787Sjhb			    rw->lock_object.lo_name, file, line);
1207171052Sattilio		if (rw_recursed(rw)) {
1208171052Sattilio			if (what & RA_NOTRECURSED)
1209171052Sattilio				panic("Lock %s recursed @ %s:%d\n",
1210171052Sattilio				    rw->lock_object.lo_name, file, line);
1211171052Sattilio		} else if (what & RA_RECURSED)
1212171052Sattilio			panic("Lock %s not recursed @ %s:%d\n",
1213171052Sattilio			    rw->lock_object.lo_name, file, line);
1214154941Sjhb		break;
1215154941Sjhb	case RA_UNLOCKED:
1216154941Sjhb#ifdef WITNESS
1217167787Sjhb		witness_assert(&rw->lock_object, what, file, line);
1218154941Sjhb#else
1219154941Sjhb		/*
1220154941Sjhb		 * If we hold a write lock fail.  We can't reliably check
1221154941Sjhb		 * to see if we hold a read lock or not.
1222154941Sjhb		 */
1223157826Sjhb		if (rw_wowner(rw) == curthread)
1224154941Sjhb			panic("Lock %s exclusively locked @ %s:%d\n",
1225167787Sjhb			    rw->lock_object.lo_name, file, line);
1226154941Sjhb#endif
1227154941Sjhb		break;
1228154941Sjhb	default:
1229154941Sjhb		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
1230154941Sjhb		    line);
1231154941Sjhb	}
1232154941Sjhb}
1233154941Sjhb#endif /* INVARIANT_SUPPORT */
1234154941Sjhb
1235154941Sjhb#ifdef DDB
1236154941Sjhbvoid
1237227588Spjddb_show_rwlock(const struct lock_object *lock)
1238154941Sjhb{
1239227588Spjd	const struct rwlock *rw;
1240154941Sjhb	struct thread *td;
1241154941Sjhb
1242227588Spjd	rw = (const struct rwlock *)lock;
1243154941Sjhb
1244154941Sjhb	db_printf(" state: ");
1245154941Sjhb	if (rw->rw_lock == RW_UNLOCKED)
1246154941Sjhb		db_printf("UNLOCKED\n");
1247169394Sjhb	else if (rw->rw_lock == RW_DESTROYED) {
1248169394Sjhb		db_printf("DESTROYED\n");
1249169394Sjhb		return;
1250169394Sjhb	} else if (rw->rw_lock & RW_LOCK_READ)
1251167504Sjhb		db_printf("RLOCK: %ju locks\n",
1252167504Sjhb		    (uintmax_t)(RW_READERS(rw->rw_lock)));
1253154941Sjhb	else {
1254157826Sjhb		td = rw_wowner(rw);
1255154941Sjhb		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1256173600Sjulian		    td->td_tid, td->td_proc->p_pid, td->td_name);
1257171052Sattilio		if (rw_recursed(rw))
1258171052Sattilio			db_printf(" recursed: %u\n", rw->rw_recurse);
1259154941Sjhb	}
1260154941Sjhb	db_printf(" waiters: ");
1261154941Sjhb	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
1262154941Sjhb	case RW_LOCK_READ_WAITERS:
1263154941Sjhb		db_printf("readers\n");
1264154941Sjhb		break;
1265154941Sjhb	case RW_LOCK_WRITE_WAITERS:
1266154941Sjhb		db_printf("writers\n");
1267154941Sjhb		break;
1268154941Sjhb	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
1269167492Sjhb		db_printf("readers and writers\n");
1270154941Sjhb		break;
1271154941Sjhb	default:
1272154941Sjhb		db_printf("none\n");
1273154941Sjhb		break;
1274154941Sjhb	}
1275154941Sjhb}
1276154941Sjhb
1277154941Sjhb#endif
1278