kern_rwlock.c revision 169675
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 169675 2007-05-18 15:04:59Z jhb $");
36
37#include "opt_ddb.h"
38#include "opt_no_adaptive_rwlocks.h"
39
40#include <sys/param.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45#include <sys/rwlock.h>
46#include <sys/systm.h>
47#include <sys/turnstile.h>
48#include <sys/lock_profile.h>
49#include <machine/cpu.h>
50
51#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
52#define	ADAPTIVE_RWLOCKS
53#endif
54
55#ifdef DDB
56#include <ddb/ddb.h>
57
58static void	db_show_rwlock(struct lock_object *lock);
59#endif
60static void	lock_rw(struct lock_object *lock, int how);
61static int	unlock_rw(struct lock_object *lock);
62
63struct lock_class lock_class_rw = {
64	.lc_name = "rw",
65	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
66#ifdef DDB
67	.lc_ddb_show = db_show_rwlock,
68#endif
69	.lc_lock = lock_rw,
70	.lc_unlock = unlock_rw,
71};
72
73/*
74 * Return a pointer to the owning thread if the lock is write-locked or
75 * NULL if the lock is unlocked or read-locked.
76 */
77#define	rw_wowner(rw)							\
78	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
79	    (struct thread *)RW_OWNER((rw)->rw_lock))
80
81/*
82 * Return a pointer to the owning thread for this lock who should receive
83 * any priority lent by threads that block on this lock.  Currently this
84 * is identical to rw_wowner().
85 */
86#define	rw_owner(rw)		rw_wowner(rw)
87
88#ifndef INVARIANTS
89#define	_rw_assert(rw, what, file, line)
90#endif
91
92void
93lock_rw(struct lock_object *lock, int how)
94{
95	struct rwlock *rw;
96
97	rw = (struct rwlock *)lock;
98	if (how)
99		rw_wlock(rw);
100	else
101		rw_rlock(rw);
102}
103
104int
105unlock_rw(struct lock_object *lock)
106{
107	struct rwlock *rw;
108
109	rw = (struct rwlock *)lock;
110	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
111	if (rw->rw_lock & RW_LOCK_READ) {
112		rw_runlock(rw);
113		return (0);
114	} else {
115		rw_wunlock(rw);
116		return (1);
117	}
118}
119
120void
121rw_init(struct rwlock *rw, const char *name)
122{
123
124	rw->rw_lock = RW_UNLOCKED;
125
126	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, LO_WITNESS |
127	    LO_RECURSABLE | LO_UPGRADABLE);
128}
129
130void
131rw_destroy(struct rwlock *rw)
132{
133
134	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
135	rw->rw_lock = RW_DESTROYED;
136	lock_destroy(&rw->lock_object);
137}
138
139void
140rw_sysinit(void *arg)
141{
142	struct rw_args *args = arg;
143
144	rw_init(args->ra_rw, args->ra_desc);
145}
146
147int
148rw_wowned(struct rwlock *rw)
149{
150
151	return (rw_wowner(rw) == curthread);
152}
153
154void
155_rw_wlock(struct rwlock *rw, const char *file, int line)
156{
157
158	MPASS(curthread != NULL);
159	KASSERT(rw->rw_lock != RW_DESTROYED,
160	    ("rw_wlock() of destroyed rwlock @ %s:%d", file, line));
161	KASSERT(rw_wowner(rw) != curthread,
162	    ("%s (%s): wlock already held @ %s:%d", __func__,
163	    rw->lock_object.lo_name, file, line));
164	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
165	    line);
166	__rw_wlock(rw, curthread, file, line);
167	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, 0, file, line);
168	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
169	curthread->td_locks++;
170}
171
172void
173_rw_wunlock(struct rwlock *rw, const char *file, int line)
174{
175
176	MPASS(curthread != NULL);
177	KASSERT(rw->rw_lock != RW_DESTROYED,
178	    ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line));
179	_rw_assert(rw, RA_WLOCKED, file, line);
180	curthread->td_locks--;
181	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
182	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, 0, file, line);
183	lock_profile_release_lock(&rw->lock_object);
184	__rw_wunlock(rw, curthread, file, line);
185}
186
187void
188_rw_rlock(struct rwlock *rw, const char *file, int line)
189{
190#ifdef ADAPTIVE_RWLOCKS
191	volatile struct thread *owner;
192#endif
193	uint64_t waittime = 0;
194	int contested = 0;
195	uintptr_t x;
196
197	KASSERT(rw->rw_lock != RW_DESTROYED,
198	    ("rw_rlock() of destroyed rwlock @ %s:%d", file, line));
199	KASSERT(rw_wowner(rw) != curthread,
200	    ("%s (%s): wlock already held @ %s:%d", __func__,
201	    rw->lock_object.lo_name, file, line));
202	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line);
203
204	/*
205	 * Note that we don't make any attempt to try to block read
206	 * locks once a writer has blocked on the lock.  The reason is
207	 * that we currently allow for read locks to recurse and we
208	 * don't keep track of all the holders of read locks.  Thus, if
209	 * we were to block readers once a writer blocked and a reader
210	 * tried to recurse on their reader lock after a writer had
211	 * blocked we would end up in a deadlock since the reader would
212	 * be blocked on the writer, and the writer would be blocked
213	 * waiting for the reader to release its original read lock.
214	 */
215	for (;;) {
216		/*
217		 * Handle the easy case.  If no other thread has a write
218		 * lock, then try to bump up the count of read locks.  Note
219		 * that we have to preserve the current state of the
220		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
221		 * read lock, then rw_lock must have changed, so restart
222		 * the loop.  Note that this handles the case of a
223		 * completely unlocked rwlock since such a lock is encoded
224		 * as a read lock with no waiters.
225		 */
226		x = rw->rw_lock;
227		if (x & RW_LOCK_READ) {
228
229			/*
230			 * The RW_LOCK_READ_WAITERS flag should only be set
231			 * if another thread currently holds a write lock,
232			 * and in that case RW_LOCK_READ should be clear.
233			 */
234			MPASS((x & RW_LOCK_READ_WAITERS) == 0);
235			if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
236			    x + RW_ONE_READER)) {
237				if (LOCK_LOG_TEST(&rw->lock_object, 0))
238					CTR4(KTR_LOCK,
239					    "%s: %p succeed %p -> %p", __func__,
240					    rw, (void *)x,
241					    (void *)(x + RW_ONE_READER));
242				if (RW_READERS(x) == 0)
243					lock_profile_obtain_lock_success(
244					    &rw->lock_object, contested, waittime,
245					    file, line);
246				break;
247			}
248			cpu_spinwait();
249			continue;
250		}
251		lock_profile_obtain_lock_failed(&rw->lock_object, &contested,
252		    &waittime);
253
254		/*
255		 * Okay, now it's the hard case.  Some other thread already
256		 * has a write lock, so acquire the turnstile lock so we can
257		 * begin the process of blocking.
258		 */
259		turnstile_lock(&rw->lock_object);
260
261		/*
262		 * The lock might have been released while we spun, so
263		 * recheck its state and restart the loop if there is no
264		 * longer a write lock.
265		 */
266		x = rw->rw_lock;
267		if (x & RW_LOCK_READ) {
268			turnstile_release(&rw->lock_object);
269			cpu_spinwait();
270			continue;
271		}
272
273		/*
274		 * Ok, it's still a write lock.  If the RW_LOCK_READ_WAITERS
275		 * flag is already set, then we can go ahead and block.  If
276		 * it is not set then try to set it.  If we fail to set it
277		 * drop the turnstile lock and restart the loop.
278		 */
279		if (!(x & RW_LOCK_READ_WAITERS)) {
280			if (!atomic_cmpset_ptr(&rw->rw_lock, x,
281			    x | RW_LOCK_READ_WAITERS)) {
282				turnstile_release(&rw->lock_object);
283				cpu_spinwait();
284				continue;
285			}
286			if (LOCK_LOG_TEST(&rw->lock_object, 0))
287				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
288				    __func__, rw);
289		}
290
291#ifdef ADAPTIVE_RWLOCKS
292		/*
293		 * If the owner is running on another CPU, spin until
294		 * the owner stops running or the state of the lock
295		 * changes.
296		 */
297		owner = (struct thread *)RW_OWNER(x);
298		if (TD_IS_RUNNING(owner)) {
299			turnstile_release(&rw->lock_object);
300			if (LOCK_LOG_TEST(&rw->lock_object, 0))
301				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
302				    __func__, rw, owner);
303			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
304			    TD_IS_RUNNING(owner))
305				cpu_spinwait();
306			continue;
307		}
308#endif
309
310		/*
311		 * We were unable to acquire the lock and the read waiters
312		 * flag is set, so we must block on the turnstile.
313		 */
314		if (LOCK_LOG_TEST(&rw->lock_object, 0))
315			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
316			    rw);
317		turnstile_wait(&rw->lock_object, rw_owner(rw), TS_SHARED_QUEUE);
318		if (LOCK_LOG_TEST(&rw->lock_object, 0))
319			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
320			    __func__, rw);
321	}
322
323	/*
324	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
325	 * however.  turnstiles don't like owners changing between calls to
326	 * turnstile_wait() currently.
327	 */
328
329	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
330	WITNESS_LOCK(&rw->lock_object, 0, file, line);
331	curthread->td_locks++;
332}
333
334void
335_rw_runlock(struct rwlock *rw, const char *file, int line)
336{
337	struct turnstile *ts;
338	uintptr_t x;
339
340	KASSERT(rw->rw_lock != RW_DESTROYED,
341	    ("rw_runlock() of destroyed rwlock @ %s:%d", file, line));
342	_rw_assert(rw, RA_RLOCKED, file, line);
343	curthread->td_locks--;
344	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
345	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
346
347	/* TODO: drop "owner of record" here. */
348
349	for (;;) {
350		/*
351		 * See if there is more than one read lock held.  If so,
352		 * just drop one and return.
353		 */
354		x = rw->rw_lock;
355		if (RW_READERS(x) > 1) {
356			if (atomic_cmpset_ptr(&rw->rw_lock, x,
357			    x - RW_ONE_READER)) {
358				if (LOCK_LOG_TEST(&rw->lock_object, 0))
359					CTR4(KTR_LOCK,
360					    "%s: %p succeeded %p -> %p",
361					    __func__, rw, (void *)x,
362					    (void *)(x - RW_ONE_READER));
363				break;
364			}
365			continue;
366		}
367
368
369		/*
370		 * We should never have read waiters while at least one
371		 * thread holds a read lock.  (See note above)
372		 */
373		KASSERT(!(x & RW_LOCK_READ_WAITERS),
374		    ("%s: waiting readers", __func__));
375
376		/*
377		 * If there aren't any waiters for a write lock, then try
378		 * to drop it quickly.
379		 */
380		if (!(x & RW_LOCK_WRITE_WAITERS)) {
381
382			/*
383			 * There shouldn't be any flags set and we should
384			 * be the only read lock.  If we fail to release
385			 * the single read lock, then another thread might
386			 * have just acquired a read lock, so go back up
387			 * to the multiple read locks case.
388			 */
389			MPASS(x == RW_READERS_LOCK(1));
390			if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
391			    RW_UNLOCKED)) {
392				if (LOCK_LOG_TEST(&rw->lock_object, 0))
393					CTR2(KTR_LOCK, "%s: %p last succeeded",
394					    __func__, rw);
395				break;
396			}
397			continue;
398		}
399
400		/*
401		 * There should just be one reader with one or more
402		 * writers waiting.
403		 */
404		MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS));
405
406		/*
407		 * Ok, we know we have a waiting writer and we think we
408		 * are the last reader, so grab the turnstile lock.
409		 */
410		turnstile_lock(&rw->lock_object);
411
412		/*
413		 * Try to drop our lock leaving the lock in a unlocked
414		 * state.
415		 *
416		 * If you wanted to do explicit lock handoff you'd have to
417		 * do it here.  You'd also want to use turnstile_signal()
418		 * and you'd have to handle the race where a higher
419		 * priority thread blocks on the write lock before the
420		 * thread you wakeup actually runs and have the new thread
421		 * "steal" the lock.  For now it's a lot simpler to just
422		 * wakeup all of the waiters.
423		 *
424		 * As above, if we fail, then another thread might have
425		 * acquired a read lock, so drop the turnstile lock and
426		 * restart.
427		 */
428		if (!atomic_cmpset_ptr(&rw->rw_lock,
429		    RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) {
430			turnstile_release(&rw->lock_object);
431			continue;
432		}
433		if (LOCK_LOG_TEST(&rw->lock_object, 0))
434			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
435			    __func__, rw);
436
437		/*
438		 * Ok.  The lock is released and all that's left is to
439		 * wake up the waiters.  Note that the lock might not be
440		 * free anymore, but in that case the writers will just
441		 * block again if they run before the new lock holder(s)
442		 * release the lock.
443		 */
444		ts = turnstile_lookup(&rw->lock_object);
445		MPASS(ts != NULL);
446		turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
447		turnstile_unpend(ts, TS_SHARED_LOCK);
448		break;
449	}
450	lock_profile_release_lock(&rw->lock_object);
451}
452
453/*
454 * This function is called when we are unable to obtain a write lock on the
455 * first try.  This means that at least one other thread holds either a
456 * read or write lock.
457 */
458void
459_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
460{
461#ifdef ADAPTIVE_RWLOCKS
462	volatile struct thread *owner;
463#endif
464	uintptr_t v;
465
466	if (LOCK_LOG_TEST(&rw->lock_object, 0))
467		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
468		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
469
470	while (!_rw_write_lock(rw, tid)) {
471		turnstile_lock(&rw->lock_object);
472		v = rw->rw_lock;
473
474		/*
475		 * If the lock was released while spinning on the
476		 * turnstile chain lock, try again.
477		 */
478		if (v == RW_UNLOCKED) {
479			turnstile_release(&rw->lock_object);
480			cpu_spinwait();
481			continue;
482		}
483
484		/*
485		 * If the lock was released by a writer with both readers
486		 * and writers waiting and a reader hasn't woken up and
487		 * acquired the lock yet, rw_lock will be set to the
488		 * value RW_UNLOCKED | RW_LOCK_WRITE_WAITERS.  If we see
489		 * that value, try to acquire it once.  Note that we have
490		 * to preserve the RW_LOCK_WRITE_WAITERS flag as there are
491		 * other writers waiting still.  If we fail, restart the
492		 * loop.
493		 */
494		if (v == (RW_UNLOCKED | RW_LOCK_WRITE_WAITERS)) {
495			if (atomic_cmpset_acq_ptr(&rw->rw_lock,
496			    RW_UNLOCKED | RW_LOCK_WRITE_WAITERS,
497			    tid | RW_LOCK_WRITE_WAITERS)) {
498				turnstile_claim(&rw->lock_object);
499				CTR2(KTR_LOCK, "%s: %p claimed by new writer",
500				    __func__, rw);
501				break;
502			}
503			turnstile_release(&rw->lock_object);
504			cpu_spinwait();
505			continue;
506		}
507
508		/*
509		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
510		 * set it.  If we fail to set it, then loop back and try
511		 * again.
512		 */
513		if (!(v & RW_LOCK_WRITE_WAITERS)) {
514			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
515			    v | RW_LOCK_WRITE_WAITERS)) {
516				turnstile_release(&rw->lock_object);
517				cpu_spinwait();
518				continue;
519			}
520			if (LOCK_LOG_TEST(&rw->lock_object, 0))
521				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
522				    __func__, rw);
523		}
524
525#ifdef ADAPTIVE_RWLOCKS
526		/*
527		 * If the lock is write locked and the owner is
528		 * running on another CPU, spin until the owner stops
529		 * running or the state of the lock changes.
530		 */
531		owner = (struct thread *)RW_OWNER(v);
532		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
533			turnstile_release(&rw->lock_object);
534			if (LOCK_LOG_TEST(&rw->lock_object, 0))
535				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
536				    __func__, rw, owner);
537			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
538			    TD_IS_RUNNING(owner))
539				cpu_spinwait();
540			continue;
541		}
542#endif
543
544		/*
545		 * We were unable to acquire the lock and the write waiters
546		 * flag is set, so we must block on the turnstile.
547		 */
548		if (LOCK_LOG_TEST(&rw->lock_object, 0))
549			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
550			    rw);
551		turnstile_wait(&rw->lock_object, rw_owner(rw),
552		    TS_EXCLUSIVE_QUEUE);
553		if (LOCK_LOG_TEST(&rw->lock_object, 0))
554			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
555			    __func__, rw);
556	}
557}
558
559/*
560 * This function is called if the first try at releasing a write lock failed.
561 * This means that one of the 2 waiter bits must be set indicating that at
562 * least one thread is waiting on this lock.
563 */
564void
565_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
566{
567	struct turnstile *ts;
568	uintptr_t v;
569	int queue;
570
571	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
572	    ("%s: neither of the waiter flags are set", __func__));
573
574	if (LOCK_LOG_TEST(&rw->lock_object, 0))
575		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
576
577	turnstile_lock(&rw->lock_object);
578	ts = turnstile_lookup(&rw->lock_object);
579
580#ifdef ADAPTIVE_RWLOCKS
581	/*
582	 * There might not be a turnstile for this lock if all of
583	 * the waiters are adaptively spinning.  In that case, just
584	 * reset the lock to the unlocked state and return.
585	 */
586	if (ts == NULL) {
587		atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED);
588		if (LOCK_LOG_TEST(&rw->lock_object, 0))
589			CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw);
590		turnstile_release(&rw->lock_object);
591		return;
592	}
593#else
594	MPASS(ts != NULL);
595#endif
596
597	/*
598	 * Use the same algo as sx locks for now.  Prefer waking up shared
599	 * waiters if we have any over writers.  This is probably not ideal.
600	 *
601	 * 'v' is the value we are going to write back to rw_lock.  If we
602	 * have waiters on both queues, we need to preserve the state of
603	 * the waiter flag for the queue we don't wake up.  For now this is
604	 * hardcoded for the algorithm mentioned above.
605	 *
606	 * In the case of both readers and writers waiting we wakeup the
607	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
608	 * new writer comes in before a reader it will claim the lock up
609	 * above.  There is probably a potential priority inversion in
610	 * there that could be worked around either by waking both queues
611	 * of waiters or doing some complicated lock handoff gymnastics.
612	 *
613	 * Note that in the ADAPTIVE_RWLOCKS case, if both flags are
614	 * set, there might not be any actual writers on the turnstile
615	 * as they might all be spinning.  In that case, we don't want
616	 * to preserve the RW_LOCK_WRITE_WAITERS flag as the turnstile
617	 * is going to go away once we wakeup all the readers.
618	 */
619	v = RW_UNLOCKED;
620	if (rw->rw_lock & RW_LOCK_READ_WAITERS) {
621		queue = TS_SHARED_QUEUE;
622#ifdef ADAPTIVE_RWLOCKS
623		if (rw->rw_lock & RW_LOCK_WRITE_WAITERS &&
624		    !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
625			v |= RW_LOCK_WRITE_WAITERS;
626#else
627		v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
628#endif
629	} else
630		queue = TS_EXCLUSIVE_QUEUE;
631
632#ifdef ADAPTIVE_RWLOCKS
633	/*
634	 * We have to make sure that we actually have waiters to
635	 * wakeup.  If they are all spinning, then we just need to
636	 * disown the turnstile and return.
637	 */
638	if (turnstile_empty(ts, queue)) {
639		if (LOCK_LOG_TEST(&rw->lock_object, 0))
640			CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw);
641		atomic_store_rel_ptr(&rw->rw_lock, v);
642		turnstile_disown(ts);
643		return;
644	}
645#endif
646
647	/* Wake up all waiters for the specific queue. */
648	if (LOCK_LOG_TEST(&rw->lock_object, 0))
649		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
650		    queue == TS_SHARED_QUEUE ? "read" : "write");
651	turnstile_broadcast(ts, queue);
652	atomic_store_rel_ptr(&rw->rw_lock, v);
653	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
654}
655
656/*
657 * Attempt to do a non-blocking upgrade from a read lock to a write
658 * lock.  This will only succeed if this thread holds a single read
659 * lock.  Returns true if the upgrade succeeded and false otherwise.
660 */
661int
662_rw_try_upgrade(struct rwlock *rw, const char *file, int line)
663{
664	uintptr_t v, tid;
665	int success;
666
667	KASSERT(rw->rw_lock != RW_DESTROYED,
668	    ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line));
669	_rw_assert(rw, RA_RLOCKED, file, line);
670
671	/*
672	 * Attempt to switch from one reader to a writer.  If there
673	 * are any write waiters, then we will have to lock the
674	 * turnstile first to prevent races with another writer
675	 * calling turnstile_wait() before we have claimed this
676	 * turnstile.  So, do the simple case of no waiters first.
677	 */
678	tid = (uintptr_t)curthread;
679	if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) {
680		success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
681		    tid);
682		goto out;
683	}
684
685	/*
686	 * Ok, we think we have write waiters, so lock the
687	 * turnstile.
688	 */
689	turnstile_lock(&rw->lock_object);
690
691	/*
692	 * Try to switch from one reader to a writer again.  This time
693	 * we honor the current state of the RW_LOCK_WRITE_WAITERS
694	 * flag.  If we obtain the lock with the flag set, then claim
695	 * ownership of the turnstile.  In the ADAPTIVE_RWLOCKS case
696	 * it is possible for there to not be an associated turnstile
697	 * even though there are waiters if all of the waiters are
698	 * spinning.
699	 */
700	v = rw->rw_lock & RW_LOCK_WRITE_WAITERS;
701	success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
702	    tid | v);
703#ifdef ADAPTIVE_RWLOCKS
704	if (success && v && turnstile_lookup(&rw->lock_object) != NULL)
705#else
706	if (success && v)
707#endif
708		turnstile_claim(&rw->lock_object);
709	else
710		turnstile_release(&rw->lock_object);
711out:
712	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
713	if (success)
714		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
715		    file, line);
716	return (success);
717}
718
719/*
720 * Downgrade a write lock into a single read lock.
721 */
722void
723_rw_downgrade(struct rwlock *rw, const char *file, int line)
724{
725	struct turnstile *ts;
726	uintptr_t tid, v;
727
728	KASSERT(rw->rw_lock != RW_DESTROYED,
729	    ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line));
730	_rw_assert(rw, RA_WLOCKED, file, line);
731
732	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
733
734	/*
735	 * Convert from a writer to a single reader.  First we handle
736	 * the easy case with no waiters.  If there are any waiters, we
737	 * lock the turnstile, "disown" the lock, and awaken any read
738	 * waiters.
739	 */
740	tid = (uintptr_t)curthread;
741	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
742		goto out;
743
744	/*
745	 * Ok, we think we have waiters, so lock the turnstile so we can
746	 * read the waiter flags without any races.
747	 */
748	turnstile_lock(&rw->lock_object);
749	v = rw->rw_lock;
750	MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS));
751
752	/*
753	 * Downgrade from a write lock while preserving
754	 * RW_LOCK_WRITE_WAITERS and give up ownership of the
755	 * turnstile.  If there are any read waiters, wake them up.
756	 *
757	 * For ADAPTIVE_RWLOCKS, we have to allow for the fact that
758	 * all of the read waiters might be spinning.  In that case,
759	 * act as if RW_LOCK_READ_WAITERS is not set.  Also, only
760	 * preserve the RW_LOCK_WRITE_WAITERS flag if at least one
761	 * writer is blocked on the turnstile.
762	 */
763	ts = turnstile_lookup(&rw->lock_object);
764#ifdef ADAPTIVE_RWLOCKS
765	if (ts == NULL)
766		v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS);
767	else if (v & RW_LOCK_READ_WAITERS &&
768	    turnstile_empty(ts, TS_SHARED_QUEUE))
769		v &= ~RW_LOCK_READ_WAITERS;
770	else if (v & RW_LOCK_WRITE_WAITERS &&
771	    turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
772		v &= ~RW_LOCK_WRITE_WAITERS;
773#else
774	MPASS(ts != NULL);
775#endif
776	if (v & RW_LOCK_READ_WAITERS)
777		turnstile_broadcast(ts, TS_SHARED_QUEUE);
778	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) |
779	    (v & RW_LOCK_WRITE_WAITERS));
780	if (v & RW_LOCK_READ_WAITERS)
781		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
782#ifdef ADAPTIVE_RWLOCKS
783	else if (ts == NULL)
784		turnstile_release(&rw->lock_object);
785#endif
786	else
787		turnstile_disown(ts);
788out:
789	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
790}
791
792#ifdef INVARIANT_SUPPORT
793#ifndef INVARIANTS
794#undef _rw_assert
795#endif
796
797/*
798 * In the non-WITNESS case, rw_assert() can only detect that at least
799 * *some* thread owns an rlock, but it cannot guarantee that *this*
800 * thread owns an rlock.
801 */
802void
803_rw_assert(struct rwlock *rw, int what, const char *file, int line)
804{
805
806	if (panicstr != NULL)
807		return;
808	switch (what) {
809	case RA_LOCKED:
810	case RA_LOCKED | LA_NOTRECURSED:
811	case RA_RLOCKED:
812#ifdef WITNESS
813		witness_assert(&rw->lock_object, what, file, line);
814#else
815		/*
816		 * If some other thread has a write lock or we have one
817		 * and are asserting a read lock, fail.  Also, if no one
818		 * has a lock at all, fail.
819		 */
820		if (rw->rw_lock == RW_UNLOCKED ||
821		    (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
822		    rw_wowner(rw) != curthread)))
823			panic("Lock %s not %slocked @ %s:%d\n",
824			    rw->lock_object.lo_name, (what == RA_RLOCKED) ?
825			    "read " : "", file, line);
826#endif
827		break;
828	case RA_WLOCKED:
829		if (rw_wowner(rw) != curthread)
830			panic("Lock %s not exclusively locked @ %s:%d\n",
831			    rw->lock_object.lo_name, file, line);
832		break;
833	case RA_UNLOCKED:
834#ifdef WITNESS
835		witness_assert(&rw->lock_object, what, file, line);
836#else
837		/*
838		 * If we hold a write lock fail.  We can't reliably check
839		 * to see if we hold a read lock or not.
840		 */
841		if (rw_wowner(rw) == curthread)
842			panic("Lock %s exclusively locked @ %s:%d\n",
843			    rw->lock_object.lo_name, file, line);
844#endif
845		break;
846	default:
847		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
848		    line);
849	}
850}
851#endif /* INVARIANT_SUPPORT */
852
853#ifdef DDB
854void
855db_show_rwlock(struct lock_object *lock)
856{
857	struct rwlock *rw;
858	struct thread *td;
859
860	rw = (struct rwlock *)lock;
861
862	db_printf(" state: ");
863	if (rw->rw_lock == RW_UNLOCKED)
864		db_printf("UNLOCKED\n");
865	else if (rw->rw_lock == RW_DESTROYED) {
866		db_printf("DESTROYED\n");
867		return;
868	} else if (rw->rw_lock & RW_LOCK_READ)
869		db_printf("RLOCK: %ju locks\n",
870		    (uintmax_t)(RW_READERS(rw->rw_lock)));
871	else {
872		td = rw_wowner(rw);
873		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
874		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
875	}
876	db_printf(" waiters: ");
877	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
878	case RW_LOCK_READ_WAITERS:
879		db_printf("readers\n");
880		break;
881	case RW_LOCK_WRITE_WAITERS:
882		db_printf("writers\n");
883		break;
884	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
885		db_printf("readers and writers\n");
886		break;
887	default:
888		db_printf("none\n");
889		break;
890	}
891}
892
893#endif
894