kern_rwlock.c revision 173617
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 173617 2007-11-14 21:21:48Z attilio $");
36
37#include "opt_ddb.h"
38#include "opt_no_adaptive_rwlocks.h"
39
40#include <sys/param.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45#include <sys/rwlock.h>
46#include <sys/systm.h>
47#include <sys/turnstile.h>
48
49#include <machine/cpu.h>
50
51CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE);
52
53#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
54#define	ADAPTIVE_RWLOCKS
55#endif
56
57#ifdef DDB
58#include <ddb/ddb.h>
59
60static void	db_show_rwlock(struct lock_object *lock);
61#endif
62static void	lock_rw(struct lock_object *lock, int how);
63static int	unlock_rw(struct lock_object *lock);
64
65struct lock_class lock_class_rw = {
66	.lc_name = "rw",
67	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
68#ifdef DDB
69	.lc_ddb_show = db_show_rwlock,
70#endif
71	.lc_lock = lock_rw,
72	.lc_unlock = unlock_rw,
73};
74
75/*
76 * Return a pointer to the owning thread if the lock is write-locked or
77 * NULL if the lock is unlocked or read-locked.
78 */
79#define	rw_wowner(rw)							\
80	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
81	    (struct thread *)RW_OWNER((rw)->rw_lock))
82
83/*
84 * Returns if a write owner is recursed.  Write ownership is not assured
85 * here and should be previously checked.
86 */
87#define	rw_recursed(rw)		((rw)->rw_recurse != 0)
88
89/*
90 * Return true if curthread helds the lock.
91 */
92#define	rw_wlocked(rw)		(rw_wowner((rw)) == curthread)
93
94/*
95 * Return a pointer to the owning thread for this lock who should receive
96 * any priority lent by threads that block on this lock.  Currently this
97 * is identical to rw_wowner().
98 */
99#define	rw_owner(rw)		rw_wowner(rw)
100
101#ifndef INVARIANTS
102#define	_rw_assert(rw, what, file, line)
103#endif
104
105void
106lock_rw(struct lock_object *lock, int how)
107{
108	struct rwlock *rw;
109
110	rw = (struct rwlock *)lock;
111	if (how)
112		rw_wlock(rw);
113	else
114		rw_rlock(rw);
115}
116
117int
118unlock_rw(struct lock_object *lock)
119{
120	struct rwlock *rw;
121
122	rw = (struct rwlock *)lock;
123	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
124	if (rw->rw_lock & RW_LOCK_READ) {
125		rw_runlock(rw);
126		return (0);
127	} else {
128		rw_wunlock(rw);
129		return (1);
130	}
131}
132
133void
134rw_init_flags(struct rwlock *rw, const char *name, int opts)
135{
136	int flags;
137
138	MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
139	    RW_RECURSE)) == 0);
140
141	flags = LO_UPGRADABLE | LO_RECURSABLE;
142	if (opts & RW_DUPOK)
143		flags |= LO_DUPOK;
144	if (opts & RW_NOPROFILE)
145		flags |= LO_NOPROFILE;
146	if (!(opts & RW_NOWITNESS))
147		flags |= LO_WITNESS;
148	if (opts & RW_QUIET)
149		flags |= LO_QUIET;
150	flags |= opts & RW_RECURSE;
151
152	rw->rw_lock = RW_UNLOCKED;
153	rw->rw_recurse = 0;
154	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
155}
156
157void
158rw_destroy(struct rwlock *rw)
159{
160
161	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
162	KASSERT(rw->rw_recurse == 0, ("rw lock still recursed"));
163	rw->rw_lock = RW_DESTROYED;
164	lock_destroy(&rw->lock_object);
165}
166
167void
168rw_sysinit(void *arg)
169{
170	struct rw_args *args = arg;
171
172	rw_init(args->ra_rw, args->ra_desc);
173}
174
175int
176rw_wowned(struct rwlock *rw)
177{
178
179	return (rw_wowner(rw) == curthread);
180}
181
182void
183_rw_wlock(struct rwlock *rw, const char *file, int line)
184{
185
186	MPASS(curthread != NULL);
187	KASSERT(rw->rw_lock != RW_DESTROYED,
188	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
189	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
190	    line);
191	__rw_wlock(rw, curthread, file, line);
192	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line);
193	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
194	curthread->td_locks++;
195}
196
197void
198_rw_wunlock(struct rwlock *rw, const char *file, int line)
199{
200
201	MPASS(curthread != NULL);
202	KASSERT(rw->rw_lock != RW_DESTROYED,
203	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
204	_rw_assert(rw, RA_WLOCKED, file, line);
205	curthread->td_locks--;
206	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
207	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file,
208	    line);
209	if (!rw_recursed(rw))
210		lock_profile_release_lock(&rw->lock_object);
211	__rw_wunlock(rw, curthread, file, line);
212}
213
214void
215_rw_rlock(struct rwlock *rw, const char *file, int line)
216{
217	struct turnstile *ts;
218#ifdef ADAPTIVE_RWLOCKS
219	volatile struct thread *owner;
220#endif
221#ifdef LOCK_PROFILING_SHARED
222	uint64_t waittime = 0;
223	int contested = 0;
224#endif
225	uintptr_t x;
226
227	KASSERT(rw->rw_lock != RW_DESTROYED,
228	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
229	KASSERT(rw_wowner(rw) != curthread,
230	    ("%s (%s): wlock already held @ %s:%d", __func__,
231	    rw->lock_object.lo_name, file, line));
232	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line);
233
234	/*
235	 * Note that we don't make any attempt to try to block read
236	 * locks once a writer has blocked on the lock.  The reason is
237	 * that we currently allow for read locks to recurse and we
238	 * don't keep track of all the holders of read locks.  Thus, if
239	 * we were to block readers once a writer blocked and a reader
240	 * tried to recurse on their reader lock after a writer had
241	 * blocked we would end up in a deadlock since the reader would
242	 * be blocked on the writer, and the writer would be blocked
243	 * waiting for the reader to release its original read lock.
244	 */
245	for (;;) {
246		/*
247		 * Handle the easy case.  If no other thread has a write
248		 * lock, then try to bump up the count of read locks.  Note
249		 * that we have to preserve the current state of the
250		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
251		 * read lock, then rw_lock must have changed, so restart
252		 * the loop.  Note that this handles the case of a
253		 * completely unlocked rwlock since such a lock is encoded
254		 * as a read lock with no waiters.
255		 */
256		x = rw->rw_lock;
257		if (x & RW_LOCK_READ) {
258
259			/*
260			 * The RW_LOCK_READ_WAITERS flag should only be set
261			 * if another thread currently holds a write lock,
262			 * and in that case RW_LOCK_READ should be clear.
263			 */
264			MPASS((x & RW_LOCK_READ_WAITERS) == 0);
265			if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
266			    x + RW_ONE_READER)) {
267#ifdef LOCK_PROFILING_SHARED
268				if (RW_READERS(x) == 0)
269					lock_profile_obtain_lock_success(
270					    &rw->lock_object, contested,
271					    waittime, file, line);
272#endif
273				if (LOCK_LOG_TEST(&rw->lock_object, 0))
274					CTR4(KTR_LOCK,
275					    "%s: %p succeed %p -> %p", __func__,
276					    rw, (void *)x,
277					    (void *)(x + RW_ONE_READER));
278				break;
279			}
280			cpu_spinwait();
281			continue;
282		}
283
284		/*
285		 * Okay, now it's the hard case.  Some other thread already
286		 * has a write lock, so acquire the turnstile lock so we can
287		 * begin the process of blocking.
288		 */
289		ts = turnstile_trywait(&rw->lock_object);
290
291		/*
292		 * The lock might have been released while we spun, so
293		 * recheck its state and restart the loop if there is no
294		 * longer a write lock.
295		 */
296		x = rw->rw_lock;
297		if (x & RW_LOCK_READ) {
298			turnstile_cancel(ts);
299			cpu_spinwait();
300			continue;
301		}
302
303		/*
304		 * Ok, it's still a write lock.  If the RW_LOCK_READ_WAITERS
305		 * flag is already set, then we can go ahead and block.  If
306		 * it is not set then try to set it.  If we fail to set it
307		 * drop the turnstile lock and restart the loop.
308		 */
309		if (!(x & RW_LOCK_READ_WAITERS)) {
310			if (!atomic_cmpset_ptr(&rw->rw_lock, x,
311			    x | RW_LOCK_READ_WAITERS)) {
312				turnstile_cancel(ts);
313				cpu_spinwait();
314				continue;
315			}
316			if (LOCK_LOG_TEST(&rw->lock_object, 0))
317				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
318				    __func__, rw);
319		}
320
321#ifdef ADAPTIVE_RWLOCKS
322		/*
323		 * If the owner is running on another CPU, spin until
324		 * the owner stops running or the state of the lock
325		 * changes.
326		 */
327		owner = (struct thread *)RW_OWNER(x);
328		if (TD_IS_RUNNING(owner)) {
329			turnstile_cancel(ts);
330			if (LOCK_LOG_TEST(&rw->lock_object, 0))
331				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
332				    __func__, rw, owner);
333#ifdef LOCK_PROFILING_SHARED
334			lock_profile_obtain_lock_failed(&rw->lock_object,
335			    &contested, &waittime);
336#endif
337			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
338			    TD_IS_RUNNING(owner))
339				cpu_spinwait();
340			continue;
341		}
342#endif
343
344		/*
345		 * We were unable to acquire the lock and the read waiters
346		 * flag is set, so we must block on the turnstile.
347		 */
348		if (LOCK_LOG_TEST(&rw->lock_object, 0))
349			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
350			    rw);
351#ifdef LOCK_PROFILING_SHARED
352		lock_profile_obtain_lock_failed(&rw->lock_object, &contested,
353		    &waittime);
354#endif
355		turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE);
356		if (LOCK_LOG_TEST(&rw->lock_object, 0))
357			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
358			    __func__, rw);
359	}
360
361	/*
362	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
363	 * however.  turnstiles don't like owners changing between calls to
364	 * turnstile_wait() currently.
365	 */
366
367	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
368	WITNESS_LOCK(&rw->lock_object, 0, file, line);
369	curthread->td_locks++;
370}
371
372void
373_rw_runlock(struct rwlock *rw, const char *file, int line)
374{
375	struct turnstile *ts;
376	uintptr_t x;
377
378	KASSERT(rw->rw_lock != RW_DESTROYED,
379	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
380	_rw_assert(rw, RA_RLOCKED, file, line);
381	curthread->td_locks--;
382	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
383	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
384
385	/* TODO: drop "owner of record" here. */
386
387	for (;;) {
388		/*
389		 * See if there is more than one read lock held.  If so,
390		 * just drop one and return.
391		 */
392		x = rw->rw_lock;
393		if (RW_READERS(x) > 1) {
394			if (atomic_cmpset_ptr(&rw->rw_lock, x,
395			    x - RW_ONE_READER)) {
396				if (LOCK_LOG_TEST(&rw->lock_object, 0))
397					CTR4(KTR_LOCK,
398					    "%s: %p succeeded %p -> %p",
399					    __func__, rw, (void *)x,
400					    (void *)(x - RW_ONE_READER));
401				break;
402			}
403			continue;
404		}
405
406
407		/*
408		 * We should never have read waiters while at least one
409		 * thread holds a read lock.  (See note above)
410		 */
411		KASSERT(!(x & RW_LOCK_READ_WAITERS),
412		    ("%s: waiting readers", __func__));
413#ifdef LOCK_PROFILING_SHARED
414		lock_profile_release_lock(&rw->lock_object);
415#endif
416
417		/*
418		 * If there aren't any waiters for a write lock, then try
419		 * to drop it quickly.
420		 */
421		if (!(x & RW_LOCK_WRITE_WAITERS)) {
422
423			/*
424			 * There shouldn't be any flags set and we should
425			 * be the only read lock.  If we fail to release
426			 * the single read lock, then another thread might
427			 * have just acquired a read lock, so go back up
428			 * to the multiple read locks case.
429			 */
430			MPASS(x == RW_READERS_LOCK(1));
431			if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
432			    RW_UNLOCKED)) {
433				if (LOCK_LOG_TEST(&rw->lock_object, 0))
434					CTR2(KTR_LOCK, "%s: %p last succeeded",
435					    __func__, rw);
436				break;
437			}
438			continue;
439		}
440
441		/*
442		 * There should just be one reader with one or more
443		 * writers waiting.
444		 */
445		MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS));
446
447		/*
448		 * Ok, we know we have a waiting writer and we think we
449		 * are the last reader, so grab the turnstile lock.
450		 */
451		turnstile_chain_lock(&rw->lock_object);
452
453		/*
454		 * Try to drop our lock leaving the lock in a unlocked
455		 * state.
456		 *
457		 * If you wanted to do explicit lock handoff you'd have to
458		 * do it here.  You'd also want to use turnstile_signal()
459		 * and you'd have to handle the race where a higher
460		 * priority thread blocks on the write lock before the
461		 * thread you wakeup actually runs and have the new thread
462		 * "steal" the lock.  For now it's a lot simpler to just
463		 * wakeup all of the waiters.
464		 *
465		 * As above, if we fail, then another thread might have
466		 * acquired a read lock, so drop the turnstile lock and
467		 * restart.
468		 */
469		if (!atomic_cmpset_ptr(&rw->rw_lock,
470		    RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) {
471			turnstile_chain_unlock(&rw->lock_object);
472			continue;
473		}
474		if (LOCK_LOG_TEST(&rw->lock_object, 0))
475			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
476			    __func__, rw);
477
478		/*
479		 * Ok.  The lock is released and all that's left is to
480		 * wake up the waiters.  Note that the lock might not be
481		 * free anymore, but in that case the writers will just
482		 * block again if they run before the new lock holder(s)
483		 * release the lock.
484		 */
485		ts = turnstile_lookup(&rw->lock_object);
486		MPASS(ts != NULL);
487		turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
488		turnstile_unpend(ts, TS_SHARED_LOCK);
489		turnstile_chain_unlock(&rw->lock_object);
490		break;
491	}
492}
493
494/*
495 * This function is called when we are unable to obtain a write lock on the
496 * first try.  This means that at least one other thread holds either a
497 * read or write lock.
498 */
499void
500_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
501{
502	struct turnstile *ts;
503#ifdef ADAPTIVE_RWLOCKS
504	volatile struct thread *owner;
505#endif
506	uint64_t waittime = 0;
507	uintptr_t v;
508	int contested = 0;
509
510	if (rw_wlocked(rw)) {
511		KASSERT(rw->lock_object.lo_flags & RW_RECURSE,
512		    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
513		    __func__, rw->lock_object.lo_name, file, line));
514		rw->rw_recurse++;
515		atomic_set_ptr(&rw->rw_lock, RW_LOCK_RECURSED);
516		if (LOCK_LOG_TEST(&rw->lock_object, 0))
517			CTR2(KTR_LOCK, "%s: %p recursing", __func__, rw);
518		return;
519	}
520
521	if (LOCK_LOG_TEST(&rw->lock_object, 0))
522		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
523		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
524
525	while (!_rw_write_lock(rw, tid)) {
526		ts = turnstile_trywait(&rw->lock_object);
527		v = rw->rw_lock;
528
529		/*
530		 * If the lock was released while spinning on the
531		 * turnstile chain lock, try again.
532		 */
533		if (v == RW_UNLOCKED) {
534			turnstile_cancel(ts);
535			cpu_spinwait();
536			continue;
537		}
538
539		/*
540		 * If the lock was released by a writer with both readers
541		 * and writers waiting and a reader hasn't woken up and
542		 * acquired the lock yet, rw_lock will be set to the
543		 * value RW_UNLOCKED | RW_LOCK_WRITE_WAITERS.  If we see
544		 * that value, try to acquire it once.  Note that we have
545		 * to preserve the RW_LOCK_WRITE_WAITERS flag as there are
546		 * other writers waiting still.  If we fail, restart the
547		 * loop.
548		 */
549		if (v == (RW_UNLOCKED | RW_LOCK_WRITE_WAITERS)) {
550			if (atomic_cmpset_acq_ptr(&rw->rw_lock,
551			    RW_UNLOCKED | RW_LOCK_WRITE_WAITERS,
552			    tid | RW_LOCK_WRITE_WAITERS)) {
553				turnstile_claim(ts);
554				CTR2(KTR_LOCK, "%s: %p claimed by new writer",
555				    __func__, rw);
556				break;
557			}
558			turnstile_cancel(ts);
559			cpu_spinwait();
560			continue;
561		}
562
563		/*
564		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
565		 * set it.  If we fail to set it, then loop back and try
566		 * again.
567		 */
568		if (!(v & RW_LOCK_WRITE_WAITERS)) {
569			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
570			    v | RW_LOCK_WRITE_WAITERS)) {
571				turnstile_cancel(ts);
572				cpu_spinwait();
573				continue;
574			}
575			if (LOCK_LOG_TEST(&rw->lock_object, 0))
576				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
577				    __func__, rw);
578		}
579
580#ifdef ADAPTIVE_RWLOCKS
581		/*
582		 * If the lock is write locked and the owner is
583		 * running on another CPU, spin until the owner stops
584		 * running or the state of the lock changes.
585		 */
586		owner = (struct thread *)RW_OWNER(v);
587		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
588			turnstile_cancel(ts);
589			if (LOCK_LOG_TEST(&rw->lock_object, 0))
590				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
591				    __func__, rw, owner);
592			lock_profile_obtain_lock_failed(&rw->lock_object,
593			    &contested, &waittime);
594			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
595			    TD_IS_RUNNING(owner))
596				cpu_spinwait();
597			continue;
598		}
599#endif
600
601		/*
602		 * We were unable to acquire the lock and the write waiters
603		 * flag is set, so we must block on the turnstile.
604		 */
605		if (LOCK_LOG_TEST(&rw->lock_object, 0))
606			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
607			    rw);
608		lock_profile_obtain_lock_failed(&rw->lock_object, &contested,
609		    &waittime);
610		turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE);
611		if (LOCK_LOG_TEST(&rw->lock_object, 0))
612			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
613			    __func__, rw);
614	}
615	lock_profile_obtain_lock_success(&rw->lock_object, contested, waittime,
616	    file, line);
617}
618
619/*
620 * This function is called if the first try at releasing a write lock failed.
621 * This means that one of the 2 waiter bits must be set indicating that at
622 * least one thread is waiting on this lock.
623 */
624void
625_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
626{
627	struct turnstile *ts;
628	uintptr_t v;
629	int queue;
630
631	if (rw_wlocked(rw) && rw_recursed(rw)) {
632		if ((--rw->rw_recurse) == 0)
633			atomic_clear_ptr(&rw->rw_lock, RW_LOCK_RECURSED);
634		if (LOCK_LOG_TEST(&rw->lock_object, 0))
635			CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, rw);
636		return;
637	}
638
639	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
640	    ("%s: neither of the waiter flags are set", __func__));
641
642	if (LOCK_LOG_TEST(&rw->lock_object, 0))
643		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
644
645	turnstile_chain_lock(&rw->lock_object);
646	ts = turnstile_lookup(&rw->lock_object);
647
648#ifdef ADAPTIVE_RWLOCKS
649	/*
650	 * There might not be a turnstile for this lock if all of
651	 * the waiters are adaptively spinning.  In that case, just
652	 * reset the lock to the unlocked state and return.
653	 */
654	if (ts == NULL) {
655		atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED);
656		if (LOCK_LOG_TEST(&rw->lock_object, 0))
657			CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw);
658		turnstile_chain_unlock(&rw->lock_object);
659		return;
660	}
661#else
662	MPASS(ts != NULL);
663#endif
664
665	/*
666	 * Use the same algo as sx locks for now.  Prefer waking up shared
667	 * waiters if we have any over writers.  This is probably not ideal.
668	 *
669	 * 'v' is the value we are going to write back to rw_lock.  If we
670	 * have waiters on both queues, we need to preserve the state of
671	 * the waiter flag for the queue we don't wake up.  For now this is
672	 * hardcoded for the algorithm mentioned above.
673	 *
674	 * In the case of both readers and writers waiting we wakeup the
675	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
676	 * new writer comes in before a reader it will claim the lock up
677	 * above.  There is probably a potential priority inversion in
678	 * there that could be worked around either by waking both queues
679	 * of waiters or doing some complicated lock handoff gymnastics.
680	 *
681	 * Note that in the ADAPTIVE_RWLOCKS case, if both flags are
682	 * set, there might not be any actual writers on the turnstile
683	 * as they might all be spinning.  In that case, we don't want
684	 * to preserve the RW_LOCK_WRITE_WAITERS flag as the turnstile
685	 * is going to go away once we wakeup all the readers.
686	 */
687	v = RW_UNLOCKED;
688	if (rw->rw_lock & RW_LOCK_READ_WAITERS) {
689		queue = TS_SHARED_QUEUE;
690#ifdef ADAPTIVE_RWLOCKS
691		if (rw->rw_lock & RW_LOCK_WRITE_WAITERS &&
692		    !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
693			v |= RW_LOCK_WRITE_WAITERS;
694#else
695		v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
696#endif
697	} else
698		queue = TS_EXCLUSIVE_QUEUE;
699
700#ifdef ADAPTIVE_RWLOCKS
701	/*
702	 * We have to make sure that we actually have waiters to
703	 * wakeup.  If they are all spinning, then we just need to
704	 * disown the turnstile and return.
705	 */
706	if (turnstile_empty(ts, queue)) {
707		if (LOCK_LOG_TEST(&rw->lock_object, 0))
708			CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw);
709		atomic_store_rel_ptr(&rw->rw_lock, v);
710		turnstile_disown(ts);
711		turnstile_chain_unlock(&rw->lock_object);
712		return;
713	}
714#endif
715
716	/* Wake up all waiters for the specific queue. */
717	if (LOCK_LOG_TEST(&rw->lock_object, 0))
718		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
719		    queue == TS_SHARED_QUEUE ? "read" : "write");
720	turnstile_broadcast(ts, queue);
721	atomic_store_rel_ptr(&rw->rw_lock, v);
722	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
723	turnstile_chain_unlock(&rw->lock_object);
724}
725
726/*
727 * Attempt to do a non-blocking upgrade from a read lock to a write
728 * lock.  This will only succeed if this thread holds a single read
729 * lock.  Returns true if the upgrade succeeded and false otherwise.
730 */
731int
732_rw_try_upgrade(struct rwlock *rw, const char *file, int line)
733{
734	uintptr_t v, tid;
735	struct turnstile *ts;
736	int success;
737
738	KASSERT(rw->rw_lock != RW_DESTROYED,
739	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
740	_rw_assert(rw, RA_RLOCKED, file, line);
741
742	/*
743	 * Attempt to switch from one reader to a writer.  If there
744	 * are any write waiters, then we will have to lock the
745	 * turnstile first to prevent races with another writer
746	 * calling turnstile_wait() before we have claimed this
747	 * turnstile.  So, do the simple case of no waiters first.
748	 */
749	tid = (uintptr_t)curthread;
750	if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) {
751		success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
752		    tid);
753		goto out;
754	}
755
756	/*
757	 * Ok, we think we have write waiters, so lock the
758	 * turnstile.
759	 */
760	ts = turnstile_trywait(&rw->lock_object);
761
762	/*
763	 * Try to switch from one reader to a writer again.  This time
764	 * we honor the current state of the RW_LOCK_WRITE_WAITERS
765	 * flag.  If we obtain the lock with the flag set, then claim
766	 * ownership of the turnstile.  In the ADAPTIVE_RWLOCKS case
767	 * it is possible for there to not be an associated turnstile
768	 * even though there are waiters if all of the waiters are
769	 * spinning.
770	 */
771	v = rw->rw_lock & RW_LOCK_WRITE_WAITERS;
772	success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
773	    tid | v);
774#ifdef ADAPTIVE_RWLOCKS
775	if (success && v && turnstile_lookup(&rw->lock_object) != NULL)
776#else
777	if (success && v)
778#endif
779		turnstile_claim(ts);
780	else
781		turnstile_cancel(ts);
782out:
783	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
784	if (success)
785		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
786		    file, line);
787	return (success);
788}
789
790/*
791 * Downgrade a write lock into a single read lock.
792 */
793void
794_rw_downgrade(struct rwlock *rw, const char *file, int line)
795{
796	struct turnstile *ts;
797	uintptr_t tid, v;
798
799	KASSERT(rw->rw_lock != RW_DESTROYED,
800	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
801	_rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line);
802#ifndef INVARIANTS
803	if (rw_recursed(rw))
804		panic("downgrade of a recursed lock");
805#endif
806
807	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
808
809	/*
810	 * Convert from a writer to a single reader.  First we handle
811	 * the easy case with no waiters.  If there are any waiters, we
812	 * lock the turnstile, "disown" the lock, and awaken any read
813	 * waiters.
814	 */
815	tid = (uintptr_t)curthread;
816	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
817		goto out;
818
819	/*
820	 * Ok, we think we have waiters, so lock the turnstile so we can
821	 * read the waiter flags without any races.
822	 */
823	turnstile_chain_lock(&rw->lock_object);
824	v = rw->rw_lock;
825	MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS));
826
827	/*
828	 * Downgrade from a write lock while preserving
829	 * RW_LOCK_WRITE_WAITERS and give up ownership of the
830	 * turnstile.  If there are any read waiters, wake them up.
831	 *
832	 * For ADAPTIVE_RWLOCKS, we have to allow for the fact that
833	 * all of the read waiters might be spinning.  In that case,
834	 * act as if RW_LOCK_READ_WAITERS is not set.  Also, only
835	 * preserve the RW_LOCK_WRITE_WAITERS flag if at least one
836	 * writer is blocked on the turnstile.
837	 */
838	ts = turnstile_lookup(&rw->lock_object);
839#ifdef ADAPTIVE_RWLOCKS
840	if (ts == NULL)
841		v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS);
842	else if (v & RW_LOCK_READ_WAITERS &&
843	    turnstile_empty(ts, TS_SHARED_QUEUE))
844		v &= ~RW_LOCK_READ_WAITERS;
845	else if (v & RW_LOCK_WRITE_WAITERS &&
846	    turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
847		v &= ~RW_LOCK_WRITE_WAITERS;
848#else
849	MPASS(ts != NULL);
850#endif
851	if (v & RW_LOCK_READ_WAITERS)
852		turnstile_broadcast(ts, TS_SHARED_QUEUE);
853	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) |
854	    (v & RW_LOCK_WRITE_WAITERS));
855	if (v & RW_LOCK_READ_WAITERS)
856		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
857	else if (ts)
858		turnstile_disown(ts);
859	turnstile_chain_unlock(&rw->lock_object);
860out:
861	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
862}
863
864#ifdef INVARIANT_SUPPORT
865#ifndef INVARIANTS
866#undef _rw_assert
867#endif
868
869/*
870 * In the non-WITNESS case, rw_assert() can only detect that at least
871 * *some* thread owns an rlock, but it cannot guarantee that *this*
872 * thread owns an rlock.
873 */
874void
875_rw_assert(struct rwlock *rw, int what, const char *file, int line)
876{
877
878	if (panicstr != NULL)
879		return;
880	switch (what) {
881	case RA_LOCKED:
882	case RA_LOCKED | RA_RECURSED:
883	case RA_LOCKED | RA_NOTRECURSED:
884	case RA_RLOCKED:
885#ifdef WITNESS
886		witness_assert(&rw->lock_object, what, file, line);
887#else
888		/*
889		 * If some other thread has a write lock or we have one
890		 * and are asserting a read lock, fail.  Also, if no one
891		 * has a lock at all, fail.
892		 */
893		if (rw->rw_lock == RW_UNLOCKED ||
894		    (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
895		    rw_wowner(rw) != curthread)))
896			panic("Lock %s not %slocked @ %s:%d\n",
897			    rw->lock_object.lo_name, (what == RA_RLOCKED) ?
898			    "read " : "", file, line);
899
900		if (!(rw->rw_lock & RW_LOCK_READ)) {
901			if (rw_recursed(rw)) {
902				if (what & RA_NOTRECURSED)
903					panic("Lock %s recursed @ %s:%d\n",
904					    rw->lock_object.lo_name, file,
905					    line);
906			} else if (what & RA_RECURSED)
907				panic("Lock %s not recursed @ %s:%d\n",
908				    rw->lock_object.lo_name, file, line);
909		}
910#endif
911		break;
912	case RA_WLOCKED:
913	case RA_WLOCKED | RA_RECURSED:
914	case RA_WLOCKED | RA_NOTRECURSED:
915		if (rw_wowner(rw) != curthread)
916			panic("Lock %s not exclusively locked @ %s:%d\n",
917			    rw->lock_object.lo_name, file, line);
918		if (rw_recursed(rw)) {
919			if (what & RA_NOTRECURSED)
920				panic("Lock %s recursed @ %s:%d\n",
921				    rw->lock_object.lo_name, file, line);
922		} else if (what & RA_RECURSED)
923			panic("Lock %s not recursed @ %s:%d\n",
924			    rw->lock_object.lo_name, file, line);
925		break;
926	case RA_UNLOCKED:
927#ifdef WITNESS
928		witness_assert(&rw->lock_object, what, file, line);
929#else
930		/*
931		 * If we hold a write lock fail.  We can't reliably check
932		 * to see if we hold a read lock or not.
933		 */
934		if (rw_wowner(rw) == curthread)
935			panic("Lock %s exclusively locked @ %s:%d\n",
936			    rw->lock_object.lo_name, file, line);
937#endif
938		break;
939	default:
940		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
941		    line);
942	}
943}
944#endif /* INVARIANT_SUPPORT */
945
946#ifdef DDB
947void
948db_show_rwlock(struct lock_object *lock)
949{
950	struct rwlock *rw;
951	struct thread *td;
952
953	rw = (struct rwlock *)lock;
954
955	db_printf(" state: ");
956	if (rw->rw_lock == RW_UNLOCKED)
957		db_printf("UNLOCKED\n");
958	else if (rw->rw_lock == RW_DESTROYED) {
959		db_printf("DESTROYED\n");
960		return;
961	} else if (rw->rw_lock & RW_LOCK_READ)
962		db_printf("RLOCK: %ju locks\n",
963		    (uintmax_t)(RW_READERS(rw->rw_lock)));
964	else {
965		td = rw_wowner(rw);
966		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
967		    td->td_tid, td->td_proc->p_pid, td->td_name);
968		if (rw_recursed(rw))
969			db_printf(" recursed: %u\n", rw->rw_recurse);
970	}
971	db_printf(" waiters: ");
972	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
973	case RW_LOCK_READ_WAITERS:
974		db_printf("readers\n");
975		break;
976	case RW_LOCK_WRITE_WAITERS:
977		db_printf("writers\n");
978		break;
979	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
980		db_printf("readers and writers\n");
981		break;
982	default:
983		db_printf("none\n");
984		break;
985	}
986}
987
988#endif
989