kern_rwlock.c revision 167024
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 167024 2007-02-26 19:05:13Z rwatson $");
36
37#include "opt_ddb.h"
38
39#include <sys/param.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/rwlock.h>
45#include <sys/systm.h>
46#include <sys/turnstile.h>
47#include <sys/lock_profile.h>
48#include <machine/cpu.h>
49
50#ifdef DDB
51#include <ddb/ddb.h>
52
53static void	db_show_rwlock(struct lock_object *lock);
54#endif
55
56struct lock_class lock_class_rw = {
57	"rw",
58	LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
59#ifdef DDB
60	db_show_rwlock
61#endif
62};
63
64/*
65 * Return a pointer to the owning thread if the lock is write-locked or
66 * NULL if the lock is unlocked or read-locked.
67 */
68#define	rw_wowner(rw)							\
69	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
70	    (struct thread *)RW_OWNER((rw)->rw_lock))
71
72/*
73 * Return a pointer to the owning thread for this lock who should receive
74 * any priority lent by threads that block on this lock.  Currently this
75 * is identical to rw_wowner().
76 */
77#define	rw_owner(rw)		rw_wowner(rw)
78
79#ifndef INVARIANTS
80#define	_rw_assert(rw, what, file, line)
81#endif
82
83void
84rw_init(struct rwlock *rw, const char *name)
85{
86
87	rw->rw_lock = RW_UNLOCKED;
88
89	lock_profile_object_init(&rw->rw_object, &lock_class_rw, name);
90	lock_init(&rw->rw_object, &lock_class_rw, name, NULL, LO_WITNESS |
91	    LO_RECURSABLE | LO_UPGRADABLE);
92}
93
94void
95rw_destroy(struct rwlock *rw)
96{
97
98	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
99	lock_profile_object_destroy(&rw->rw_object);
100	lock_destroy(&rw->rw_object);
101}
102
103void
104rw_sysinit(void *arg)
105{
106	struct rw_args *args = arg;
107
108	rw_init(args->ra_rw, args->ra_desc);
109}
110
111int
112rw_wowned(struct rwlock *rw)
113{
114
115	return (rw_wowner(rw) == curthread);
116}
117
118void
119_rw_wlock(struct rwlock *rw, const char *file, int line)
120{
121
122	MPASS(curthread != NULL);
123	KASSERT(rw_wowner(rw) != curthread,
124	    ("%s (%s): wlock already held @ %s:%d", __func__,
125	    rw->rw_object.lo_name, file, line));
126	WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
127	    line);
128	__rw_wlock(rw, curthread, file, line);
129	LOCK_LOG_LOCK("WLOCK", &rw->rw_object, 0, 0, file, line);
130	WITNESS_LOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
131	curthread->td_locks++;
132}
133
134void
135_rw_wunlock(struct rwlock *rw, const char *file, int line)
136{
137
138	MPASS(curthread != NULL);
139	_rw_assert(rw, RA_WLOCKED, file, line);
140	curthread->td_locks--;
141	WITNESS_UNLOCK(&rw->rw_object, LOP_EXCLUSIVE, file, line);
142	LOCK_LOG_LOCK("WUNLOCK", &rw->rw_object, 0, 0, file, line);
143	lock_profile_release_lock(&rw->rw_object);
144	__rw_wunlock(rw, curthread, file, line);
145}
146
147void
148_rw_rlock(struct rwlock *rw, const char *file, int line)
149{
150#ifdef SMP
151	volatile struct thread *owner;
152#endif
153	uint64_t waitstart;
154	int contested;
155	uintptr_t x;
156
157	KASSERT(rw_wowner(rw) != curthread,
158	    ("%s (%s): wlock already held @ %s:%d", __func__,
159	    rw->rw_object.lo_name, file, line));
160	WITNESS_CHECKORDER(&rw->rw_object, LOP_NEWORDER, file, line);
161
162	/*
163	 * Note that we don't make any attempt to try to block read
164	 * locks once a writer has blocked on the lock.  The reason is
165	 * that we currently allow for read locks to recurse and we
166	 * don't keep track of all the holders of read locks.  Thus, if
167	 * we were to block readers once a writer blocked and a reader
168	 * tried to recurse on their reader lock after a writer had
169	 * blocked we would end up in a deadlock since the reader would
170	 * be blocked on the writer, and the writer would be blocked
171	 * waiting for the reader to release its original read lock.
172	 */
173	for (;;) {
174		/*
175		 * Handle the easy case.  If no other thread has a write
176		 * lock, then try to bump up the count of read locks.  Note
177		 * that we have to preserve the current state of the
178		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
179		 * read lock, then rw_lock must have changed, so restart
180		 * the loop.  Note that this handles the case of a
181		 * completely unlocked rwlock since such a lock is encoded
182		 * as a read lock with no waiters.
183		 */
184		x = rw->rw_lock;
185		if (x & RW_LOCK_READ) {
186
187			/*
188			 * The RW_LOCK_READ_WAITERS flag should only be set
189			 * if another thread currently holds a write lock,
190			 * and in that case RW_LOCK_READ should be clear.
191			 */
192			MPASS((x & RW_LOCK_READ_WAITERS) == 0);
193			if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
194			    x + RW_ONE_READER)) {
195				lock_profile_obtain_lock_success(&rw->rw_object, contested, waitstart, file, line);
196				if (LOCK_LOG_TEST(&rw->rw_object, 0))
197					CTR4(KTR_LOCK,
198					    "%s: %p succeed %p -> %p", __func__,
199					    rw, (void *)x,
200					    (void *)(x + RW_ONE_READER));
201				break;
202			}
203			lock_profile_obtain_lock_failed(&rw->rw_object, &contested, &waitstart);
204			cpu_spinwait();
205			continue;
206		}
207
208		/*
209		 * Okay, now it's the hard case.  Some other thread already
210		 * has a write lock, so acquire the turnstile lock so we can
211		 * begin the process of blocking.
212		 */
213		turnstile_lock(&rw->rw_object);
214
215		/*
216		 * The lock might have been released while we spun, so
217		 * recheck its state and restart the loop if there is no
218		 * longer a write lock.
219		 */
220		x = rw->rw_lock;
221		if (x & RW_LOCK_READ) {
222			turnstile_release(&rw->rw_object);
223			cpu_spinwait();
224			continue;
225		}
226
227		/*
228		 * Ok, it's still a write lock.  If the RW_LOCK_READ_WAITERS
229		 * flag is already set, then we can go ahead and block.  If
230		 * it is not set then try to set it.  If we fail to set it
231		 * drop the turnstile lock and restart the loop.
232		 */
233		if (!(x & RW_LOCK_READ_WAITERS)) {
234			if (!atomic_cmpset_ptr(&rw->rw_lock, x,
235			    x | RW_LOCK_READ_WAITERS)) {
236				turnstile_release(&rw->rw_object);
237				cpu_spinwait();
238				continue;
239			}
240			if (LOCK_LOG_TEST(&rw->rw_object, 0))
241				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
242				    __func__, rw);
243		}
244
245#ifdef SMP
246		/*
247		 * If the owner is running on another CPU, spin until
248		 * the owner stops running or the state of the lock
249		 * changes.
250		 */
251		owner = (struct thread *)RW_OWNER(x);
252		if (TD_IS_RUNNING(owner)) {
253			lock_profile_obtain_lock_failed(&rw->rw_object, &contested, &waitstart);
254			turnstile_release(&rw->rw_object);
255			if (LOCK_LOG_TEST(&rw->rw_object, 0))
256				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
257				    __func__, rw, owner);
258			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
259			    TD_IS_RUNNING(owner))
260				cpu_spinwait();
261			continue;
262		}
263#endif
264
265		/*
266		 * We were unable to acquire the lock and the read waiters
267		 * flag is set, so we must block on the turnstile.
268		 */
269		if (LOCK_LOG_TEST(&rw->rw_object, 0))
270			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
271			    rw);
272		turnstile_wait(&rw->rw_object, rw_owner(rw), TS_SHARED_QUEUE);
273		if (LOCK_LOG_TEST(&rw->rw_object, 0))
274			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
275			    __func__, rw);
276	}
277
278	/*
279	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
280	 * however.  turnstiles don't like owners changing between calls to
281	 * turnstile_wait() currently.
282	 */
283
284	LOCK_LOG_LOCK("RLOCK", &rw->rw_object, 0, 0, file, line);
285	WITNESS_LOCK(&rw->rw_object, 0, file, line);
286	curthread->td_locks++;
287}
288
289void
290_rw_runlock(struct rwlock *rw, const char *file, int line)
291{
292	struct turnstile *ts;
293	uintptr_t x;
294
295	_rw_assert(rw, RA_RLOCKED, file, line);
296	curthread->td_locks--;
297	WITNESS_UNLOCK(&rw->rw_object, 0, file, line);
298	LOCK_LOG_LOCK("RUNLOCK", &rw->rw_object, 0, 0, file, line);
299
300	/* TODO: drop "owner of record" here. */
301
302	for (;;) {
303		/*
304		 * See if there is more than one read lock held.  If so,
305		 * just drop one and return.
306		 */
307		x = rw->rw_lock;
308		if (RW_READERS(x) > 1) {
309			if (atomic_cmpset_ptr(&rw->rw_lock, x,
310			    x - RW_ONE_READER)) {
311				if (LOCK_LOG_TEST(&rw->rw_object, 0))
312					CTR4(KTR_LOCK,
313					    "%s: %p succeeded %p -> %p",
314					    __func__, rw, (void *)x,
315					    (void *)(x - RW_ONE_READER));
316				break;
317			}
318			continue;
319		} else
320			lock_profile_release_lock(&rw->rw_object);
321
322
323		/*
324		 * We should never have read waiters while at least one
325		 * thread holds a read lock.  (See note above)
326		 */
327		KASSERT(!(x & RW_LOCK_READ_WAITERS),
328		    ("%s: waiting readers", __func__));
329
330		/*
331		 * If there aren't any waiters for a write lock, then try
332		 * to drop it quickly.
333		 */
334		if (!(x & RW_LOCK_WRITE_WAITERS)) {
335
336			/*
337			 * There shouldn't be any flags set and we should
338			 * be the only read lock.  If we fail to release
339			 * the single read lock, then another thread might
340			 * have just acquired a read lock, so go back up
341			 * to the multiple read locks case.
342			 */
343			MPASS(x == RW_READERS_LOCK(1));
344			if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
345			    RW_UNLOCKED)) {
346				if (LOCK_LOG_TEST(&rw->rw_object, 0))
347					CTR2(KTR_LOCK, "%s: %p last succeeded",
348					    __func__, rw);
349				break;
350			}
351			continue;
352		}
353
354		/*
355		 * There should just be one reader with one or more
356		 * writers waiting.
357		 */
358		MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS));
359
360		/*
361		 * Ok, we know we have a waiting writer and we think we
362		 * are the last reader, so grab the turnstile lock.
363		 */
364		turnstile_lock(&rw->rw_object);
365
366		/*
367		 * Try to drop our lock leaving the lock in a unlocked
368		 * state.
369		 *
370		 * If you wanted to do explicit lock handoff you'd have to
371		 * do it here.  You'd also want to use turnstile_signal()
372		 * and you'd have to handle the race where a higher
373		 * priority thread blocks on the write lock before the
374		 * thread you wakeup actually runs and have the new thread
375		 * "steal" the lock.  For now it's a lot simpler to just
376		 * wakeup all of the waiters.
377		 *
378		 * As above, if we fail, then another thread might have
379		 * acquired a read lock, so drop the turnstile lock and
380		 * restart.
381		 */
382		if (!atomic_cmpset_ptr(&rw->rw_lock,
383		    RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) {
384			turnstile_release(&rw->rw_object);
385			continue;
386		}
387		if (LOCK_LOG_TEST(&rw->rw_object, 0))
388			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
389			    __func__, rw);
390
391		/*
392		 * Ok.  The lock is released and all that's left is to
393		 * wake up the waiters.  Note that the lock might not be
394		 * free anymore, but in that case the writers will just
395		 * block again if they run before the new lock holder(s)
396		 * release the lock.
397		 */
398		ts = turnstile_lookup(&rw->rw_object);
399		MPASS(ts != NULL);
400		turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
401		turnstile_unpend(ts, TS_SHARED_LOCK);
402		break;
403	}
404}
405
406/*
407 * This function is called when we are unable to obtain a write lock on the
408 * first try.  This means that at least one other thread holds either a
409 * read or write lock.
410 */
411void
412_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
413{
414#ifdef SMP
415	volatile struct thread *owner;
416#endif
417	uintptr_t v;
418
419	if (LOCK_LOG_TEST(&rw->rw_object, 0))
420		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
421		    rw->rw_object.lo_name, (void *)rw->rw_lock, file, line);
422
423	while (!_rw_write_lock(rw, tid)) {
424		turnstile_lock(&rw->rw_object);
425		v = rw->rw_lock;
426
427		/*
428		 * If the lock was released while spinning on the
429		 * turnstile chain lock, try again.
430		 */
431		if (v == RW_UNLOCKED) {
432			turnstile_release(&rw->rw_object);
433			cpu_spinwait();
434			continue;
435		}
436
437		/*
438		 * If the lock was released by a writer with both readers
439		 * and writers waiting and a reader hasn't woken up and
440		 * acquired the lock yet, rw_lock will be set to the
441		 * value RW_UNLOCKED | RW_LOCK_WRITE_WAITERS.  If we see
442		 * that value, try to acquire it once.  Note that we have
443		 * to preserve the RW_LOCK_WRITE_WAITERS flag as there are
444		 * other writers waiting still. If we fail, restart the
445		 * loop.
446		 */
447		if (v == (RW_UNLOCKED | RW_LOCK_WRITE_WAITERS)) {
448			if (atomic_cmpset_acq_ptr(&rw->rw_lock,
449			    RW_UNLOCKED | RW_LOCK_WRITE_WAITERS,
450			    tid | RW_LOCK_WRITE_WAITERS)) {
451				turnstile_claim(&rw->rw_object);
452				CTR2(KTR_LOCK, "%s: %p claimed by new writer",
453				    __func__, rw);
454				break;
455			}
456			turnstile_release(&rw->rw_object);
457			cpu_spinwait();
458			continue;
459		}
460
461		/*
462		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
463		 * set it.  If we fail to set it, then loop back and try
464		 * again.
465		 */
466		if (!(v & RW_LOCK_WRITE_WAITERS)) {
467			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
468			    v | RW_LOCK_WRITE_WAITERS)) {
469				turnstile_release(&rw->rw_object);
470				cpu_spinwait();
471				continue;
472			}
473			if (LOCK_LOG_TEST(&rw->rw_object, 0))
474				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
475				    __func__, rw);
476		}
477
478#ifdef SMP
479		/*
480		 * If the lock is write locked and the owner is
481		 * running on another CPU, spin until the owner stops
482		 * running or the state of the lock changes.
483		 */
484		owner = (struct thread *)RW_OWNER(v);
485		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
486			turnstile_release(&rw->rw_object);
487			if (LOCK_LOG_TEST(&rw->rw_object, 0))
488				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
489				    __func__, rw, owner);
490			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
491			    TD_IS_RUNNING(owner))
492				cpu_spinwait();
493			continue;
494		}
495#endif
496
497		/*
498		 * We were unable to acquire the lock and the write waiters
499		 * flag is set, so we must block on the turnstile.
500		 */
501		if (LOCK_LOG_TEST(&rw->rw_object, 0))
502			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
503			    rw);
504		turnstile_wait(&rw->rw_object, rw_owner(rw),
505		    TS_EXCLUSIVE_QUEUE);
506		if (LOCK_LOG_TEST(&rw->rw_object, 0))
507			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
508			    __func__, rw);
509	}
510}
511
512/*
513 * This function is called if the first try at releasing a write lock failed.
514 * This means that one of the 2 waiter bits must be set indicating that at
515 * least one thread is waiting on this lock.
516 */
517void
518_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
519{
520	struct turnstile *ts;
521	uintptr_t v;
522	int queue;
523
524	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
525	    ("%s: neither of the waiter flags are set", __func__));
526
527	if (LOCK_LOG_TEST(&rw->rw_object, 0))
528		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
529
530	turnstile_lock(&rw->rw_object);
531	ts = turnstile_lookup(&rw->rw_object);
532
533#ifdef SMP
534	/*
535	 * There might not be a turnstile for this lock if all of
536	 * the waiters are adaptively spinning.  In that case, just
537	 * reset the lock to the unlocked state and return.
538	 */
539	if (ts == NULL) {
540		atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED);
541		if (LOCK_LOG_TEST(&rw->rw_object, 0))
542			CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw);
543		turnstile_release(&rw->rw_object);
544		return;
545	}
546#else
547	MPASS(ts != NULL);
548#endif
549
550	/*
551	 * Use the same algo as sx locks for now.  Prefer waking up shared
552	 * waiters if we have any over writers.  This is probably not ideal.
553	 *
554	 * 'v' is the value we are going to write back to rw_lock.  If we
555	 * have waiters on both queues, we need to preserve the state of
556	 * the waiter flag for the queue we don't wake up.  For now this is
557	 * hardcoded for the algorithm mentioned above.
558	 *
559	 * In the case of both readers and writers waiting we wakeup the
560	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
561	 * new writer comes in before a reader it will claim the lock up
562	 * above.  There is probably a potential priority inversion in
563	 * there that could be worked around either by waking both queues
564	 * of waiters or doing some complicated lock handoff gymnastics.
565	 *
566	 * Note that in the SMP case, if both flags are set, there might
567	 * not be any actual writers on the turnstile as they might all
568	 * be spinning.  In that case, we don't want to preserve the
569	 * RW_LOCK_WRITE_WAITERS flag as the turnstile is going to go
570	 * away once we wakeup all the readers.
571	 */
572	v = RW_UNLOCKED;
573	if (rw->rw_lock & RW_LOCK_READ_WAITERS) {
574		queue = TS_SHARED_QUEUE;
575#ifdef SMP
576		if (rw->rw_lock & RW_LOCK_WRITE_WAITERS &&
577		    !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
578			v |= RW_LOCK_WRITE_WAITERS;
579#else
580		v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
581#endif
582	} else
583		queue = TS_EXCLUSIVE_QUEUE;
584
585#ifdef SMP
586	/*
587	 * We have to make sure that we actually have waiters to
588	 * wakeup.  If they are all spinning, then we just need to
589	 * disown the turnstile and return.
590	 */
591	if (turnstile_empty(ts, queue)) {
592		if (LOCK_LOG_TEST(&rw->rw_object, 0))
593			CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw);
594		atomic_store_rel_ptr(&rw->rw_lock, v);
595		turnstile_disown(ts);
596		return;
597	}
598#endif
599
600	/* Wake up all waiters for the specific queue. */
601	if (LOCK_LOG_TEST(&rw->rw_object, 0))
602		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
603		    queue == TS_SHARED_QUEUE ? "read" : "write");
604	turnstile_broadcast(ts, queue);
605	atomic_store_rel_ptr(&rw->rw_lock, v);
606	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
607}
608
609/*
610 * Attempt to do a non-blocking upgrade from a read lock to a write
611 * lock.  This will only succeed if this thread holds a single read
612 * lock.  Returns true if the upgrade succeeded and false otherwise.
613 */
614int
615_rw_try_upgrade(struct rwlock *rw, const char *file, int line)
616{
617	uintptr_t v, tid;
618	int success;
619
620	_rw_assert(rw, RA_RLOCKED, file, line);
621
622	/*
623	 * Attempt to switch from one reader to a writer.  If there
624	 * are any write waiters, then we will have to lock the
625	 * turnstile first to prevent races with another writer
626	 * calling turnstile_wait() before we have claimed this
627	 * turnstile.  So, do the simple case of no waiters first.
628	 */
629	tid = (uintptr_t)curthread;
630	if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) {
631		success = atomic_cmpset_acq_ptr(&rw->rw_lock,
632		    RW_READERS_LOCK(1), tid);
633		goto out;
634	}
635
636	/*
637	 * Ok, we think we have write waiters, so lock the
638	 * turnstile.
639	 */
640	turnstile_lock(&rw->rw_object);
641
642	/*
643	 * Try to switch from one reader to a writer again.  This time
644	 * we honor the current state of the RW_LOCK_WRITE_WAITERS
645	 * flag.  If we obtain the lock with the flag set, then claim
646	 * ownership of the turnstile.  In the SMP case it is possible
647	 * for there to not be an associated turnstile even though there
648	 * are waiters if all of the waiters are spinning.
649	 */
650	v = rw->rw_lock & RW_LOCK_WRITE_WAITERS;
651	success = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
652	    tid | v);
653#ifdef SMP
654	if (success && v && turnstile_lookup(&rw->rw_object) != NULL)
655#else
656	if (success && v)
657#endif
658		turnstile_claim(&rw->rw_object);
659	else
660		turnstile_release(&rw->rw_object);
661out:
662	LOCK_LOG_TRY("WUPGRADE", &rw->rw_object, 0, success, file, line);
663	if (success)
664		WITNESS_UPGRADE(&rw->rw_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
665		    file, line);
666	return (success);
667}
668
669/*
670 * Downgrade a write lock into a single read lock.
671 */
672void
673_rw_downgrade(struct rwlock *rw, const char *file, int line)
674{
675	struct turnstile *ts;
676	uintptr_t tid, v;
677
678	_rw_assert(rw, RA_WLOCKED, file, line);
679
680	WITNESS_DOWNGRADE(&rw->rw_object, 0, file, line);
681
682	/*
683	 * Convert from a writer to a single reader.  First we handle
684	 * the easy case with no waiters.  If there are any waiters, we
685	 * lock the turnstile, "disown" the lock, and awaken any read
686	 * waiters.
687	 */
688	tid = (uintptr_t)curthread;
689	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
690		goto out;
691
692	/*
693	 * Ok, we think we have waiters, so lock the turnstile so we can
694	 * read the waiter flags without any races.
695	 */
696	turnstile_lock(&rw->rw_object);
697	v = rw->rw_lock;
698	MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS));
699
700	/*
701	 * Downgrade from a write lock while preserving
702	 * RW_LOCK_WRITE_WAITERS and give up ownership of the
703	 * turnstile.  If there are any read waiters, wake them up.
704	 *
705	 * For SMP, we have to allow for the fact that all of the
706	 * read waiters might be spinning.  In that case, act as if
707	 * RW_LOCK_READ_WAITERS is not set.  Also, only preserve
708	 * the RW_LOCK_WRITE_WAITERS flag if at least one writer is
709	 * blocked on the turnstile.
710	 */
711	ts = turnstile_lookup(&rw->rw_object);
712#ifdef SMP
713	if (ts == NULL)
714		v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS);
715	else if (v & RW_LOCK_READ_WAITERS &&
716	    turnstile_empty(ts, TS_SHARED_QUEUE))
717		v &= ~RW_LOCK_READ_WAITERS;
718	else if (v & RW_LOCK_WRITE_WAITERS &&
719	    turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
720		v &= ~RW_LOCK_WRITE_WAITERS;
721#else
722	MPASS(ts != NULL);
723#endif
724	if (v & RW_LOCK_READ_WAITERS)
725		turnstile_broadcast(ts, TS_SHARED_QUEUE);
726	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) |
727	    (v & RW_LOCK_WRITE_WAITERS));
728	if (v & RW_LOCK_READ_WAITERS)
729		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
730#ifdef SMP
731	else if (ts == NULL)
732		turnstile_release(&rw->rw_object);
733#endif
734	else
735		turnstile_disown(ts);
736out:
737	LOCK_LOG_LOCK("WDOWNGRADE", &rw->rw_object, 0, 0, file, line);
738}
739
740#ifdef INVARIANT_SUPPORT
741#ifndef INVARIANTS
742#undef _rw_assert
743#endif
744
745/*
746 * In the non-WITNESS case, rw_assert() can only detect that at least
747 * *some* thread owns an rlock, but it cannot guarantee that *this*
748 * thread owns an rlock.
749 */
750void
751_rw_assert(struct rwlock *rw, int what, const char *file, int line)
752{
753
754	if (panicstr != NULL)
755		return;
756	switch (what) {
757	case RA_LOCKED:
758	case RA_RLOCKED:
759#ifdef WITNESS
760		witness_assert(&rw->rw_object, what, file, line);
761#else
762		/*
763		 * If some other thread has a write lock or we have one
764		 * and are asserting a read lock, fail.  Also, if no one
765		 * has a lock at all, fail.
766		 */
767		if (rw->rw_lock == RW_UNLOCKED ||
768		    (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
769		    rw_wowner(rw) != curthread)))
770			panic("Lock %s not %slocked @ %s:%d\n",
771			    rw->rw_object.lo_name, (what == RA_RLOCKED) ?
772			    "read " : "", file, line);
773#endif
774		break;
775	case RA_WLOCKED:
776		if (rw_wowner(rw) != curthread)
777			panic("Lock %s not exclusively locked @ %s:%d\n",
778			    rw->rw_object.lo_name, file, line);
779		break;
780	case RA_UNLOCKED:
781#ifdef WITNESS
782		witness_assert(&rw->rw_object, what, file, line);
783#else
784		/*
785		 * If we hold a write lock fail.  We can't reliably check
786		 * to see if we hold a read lock or not.
787		 */
788		if (rw_wowner(rw) == curthread)
789			panic("Lock %s exclusively locked @ %s:%d\n",
790			    rw->rw_object.lo_name, file, line);
791#endif
792		break;
793	default:
794		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
795		    line);
796	}
797}
798#endif /* INVARIANT_SUPPORT */
799
800#ifdef DDB
801void
802db_show_rwlock(struct lock_object *lock)
803{
804	struct rwlock *rw;
805	struct thread *td;
806
807	rw = (struct rwlock *)lock;
808
809	db_printf(" state: ");
810	if (rw->rw_lock == RW_UNLOCKED)
811		db_printf("UNLOCKED\n");
812	else if (rw->rw_lock & RW_LOCK_READ)
813		db_printf("RLOCK: %jd locks\n",
814		    (intmax_t)(RW_READERS(rw->rw_lock)));
815	else {
816		td = rw_wowner(rw);
817		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
818		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
819	}
820	db_printf(" waiters: ");
821	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
822	case RW_LOCK_READ_WAITERS:
823		db_printf("readers\n");
824		break;
825	case RW_LOCK_WRITE_WAITERS:
826		db_printf("writers\n");
827		break;
828	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
829		db_printf("readers and waiters\n");
830		break;
831	default:
832		db_printf("none\n");
833		break;
834	}
835}
836
837#endif
838