kern_rwlock.c revision 167801
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 167801 2007-03-22 16:09:23Z jhb $");
36
37#include "opt_ddb.h"
38#include "opt_no_adaptive_rwlocks.h"
39
40#include <sys/param.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45#include <sys/rwlock.h>
46#include <sys/systm.h>
47#include <sys/turnstile.h>
48#include <sys/lock_profile.h>
49#include <machine/cpu.h>
50
51#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
52#define	ADAPTIVE_RWLOCKS
53#endif
54
55#ifdef DDB
56#include <ddb/ddb.h>
57
58static void	db_show_rwlock(struct lock_object *lock);
59#endif
60static void	lock_rw(struct lock_object *lock, int how);
61static int	unlock_rw(struct lock_object *lock);
62
63struct lock_class lock_class_rw = {
64	.lc_name = "rw",
65	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
66#ifdef DDB
67	.lc_ddb_show = db_show_rwlock,
68#endif
69	.lc_lock = lock_rw,
70	.lc_unlock = unlock_rw,
71};
72
73/*
74 * Return a pointer to the owning thread if the lock is write-locked or
75 * NULL if the lock is unlocked or read-locked.
76 */
77#define	rw_wowner(rw)							\
78	((rw)->rw_lock & RW_LOCK_READ ? NULL :				\
79	    (struct thread *)RW_OWNER((rw)->rw_lock))
80
81/*
82 * Return a pointer to the owning thread for this lock who should receive
83 * any priority lent by threads that block on this lock.  Currently this
84 * is identical to rw_wowner().
85 */
86#define	rw_owner(rw)		rw_wowner(rw)
87
88#ifndef INVARIANTS
89#define	_rw_assert(rw, what, file, line)
90#endif
91
92void
93lock_rw(struct lock_object *lock, int how)
94{
95	struct rwlock *rw;
96
97	rw = (struct rwlock *)lock;
98	if (how)
99		rw_wlock(rw);
100	else
101		rw_rlock(rw);
102}
103
104int
105unlock_rw(struct lock_object *lock)
106{
107	struct rwlock *rw;
108
109	rw = (struct rwlock *)lock;
110	rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
111	if (rw->rw_lock & RW_LOCK_READ) {
112		rw_runlock(rw);
113		return (0);
114	} else {
115		rw_wunlock(rw);
116		return (1);
117	}
118}
119
120void
121rw_init(struct rwlock *rw, const char *name)
122{
123
124	rw->rw_lock = RW_UNLOCKED;
125
126	lock_profile_object_init(&rw->lock_object, &lock_class_rw, name);
127	lock_init(&rw->lock_object, &lock_class_rw, name, NULL, LO_WITNESS |
128	    LO_RECURSABLE | LO_UPGRADABLE);
129}
130
131void
132rw_destroy(struct rwlock *rw)
133{
134
135	KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
136	lock_profile_object_destroy(&rw->lock_object);
137	lock_destroy(&rw->lock_object);
138}
139
140void
141rw_sysinit(void *arg)
142{
143	struct rw_args *args = arg;
144
145	rw_init(args->ra_rw, args->ra_desc);
146}
147
148int
149rw_wowned(struct rwlock *rw)
150{
151
152	return (rw_wowner(rw) == curthread);
153}
154
155void
156_rw_wlock(struct rwlock *rw, const char *file, int line)
157{
158
159	MPASS(curthread != NULL);
160	KASSERT(rw_wowner(rw) != curthread,
161	    ("%s (%s): wlock already held @ %s:%d", __func__,
162	    rw->lock_object.lo_name, file, line));
163	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
164	    line);
165	__rw_wlock(rw, curthread, file, line);
166	LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, 0, file, line);
167	WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
168	curthread->td_locks++;
169}
170
171void
172_rw_wunlock(struct rwlock *rw, const char *file, int line)
173{
174
175	MPASS(curthread != NULL);
176	_rw_assert(rw, RA_WLOCKED, file, line);
177	curthread->td_locks--;
178	WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line);
179	LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, 0, file, line);
180	lock_profile_release_lock(&rw->lock_object);
181	__rw_wunlock(rw, curthread, file, line);
182}
183
184void
185_rw_rlock(struct rwlock *rw, const char *file, int line)
186{
187#ifdef ADAPTIVE_RWLOCKS
188	volatile struct thread *owner;
189#endif
190	uint64_t waittime = 0;
191	int contested = 0;
192	uintptr_t x;
193
194	KASSERT(rw_wowner(rw) != curthread,
195	    ("%s (%s): wlock already held @ %s:%d", __func__,
196	    rw->lock_object.lo_name, file, line));
197	WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line);
198
199	/*
200	 * Note that we don't make any attempt to try to block read
201	 * locks once a writer has blocked on the lock.  The reason is
202	 * that we currently allow for read locks to recurse and we
203	 * don't keep track of all the holders of read locks.  Thus, if
204	 * we were to block readers once a writer blocked and a reader
205	 * tried to recurse on their reader lock after a writer had
206	 * blocked we would end up in a deadlock since the reader would
207	 * be blocked on the writer, and the writer would be blocked
208	 * waiting for the reader to release its original read lock.
209	 */
210	for (;;) {
211		/*
212		 * Handle the easy case.  If no other thread has a write
213		 * lock, then try to bump up the count of read locks.  Note
214		 * that we have to preserve the current state of the
215		 * RW_LOCK_WRITE_WAITERS flag.  If we fail to acquire a
216		 * read lock, then rw_lock must have changed, so restart
217		 * the loop.  Note that this handles the case of a
218		 * completely unlocked rwlock since such a lock is encoded
219		 * as a read lock with no waiters.
220		 */
221		x = rw->rw_lock;
222		if (x & RW_LOCK_READ) {
223
224			/*
225			 * The RW_LOCK_READ_WAITERS flag should only be set
226			 * if another thread currently holds a write lock,
227			 * and in that case RW_LOCK_READ should be clear.
228			 */
229			MPASS((x & RW_LOCK_READ_WAITERS) == 0);
230			if (atomic_cmpset_acq_ptr(&rw->rw_lock, x,
231			    x + RW_ONE_READER)) {
232				if (LOCK_LOG_TEST(&rw->lock_object, 0))
233					CTR4(KTR_LOCK,
234					    "%s: %p succeed %p -> %p", __func__,
235					    rw, (void *)x,
236					    (void *)(x + RW_ONE_READER));
237				if (RW_READERS(x) == 0)
238					lock_profile_obtain_lock_success(
239					    &rw->lock_object, contested, waittime,
240					    file, line);
241				break;
242			}
243			cpu_spinwait();
244			continue;
245		}
246		lock_profile_obtain_lock_failed(&rw->lock_object, &contested,
247		    &waittime);
248
249		/*
250		 * Okay, now it's the hard case.  Some other thread already
251		 * has a write lock, so acquire the turnstile lock so we can
252		 * begin the process of blocking.
253		 */
254		turnstile_lock(&rw->lock_object);
255
256		/*
257		 * The lock might have been released while we spun, so
258		 * recheck its state and restart the loop if there is no
259		 * longer a write lock.
260		 */
261		x = rw->rw_lock;
262		if (x & RW_LOCK_READ) {
263			turnstile_release(&rw->lock_object);
264			cpu_spinwait();
265			continue;
266		}
267
268		/*
269		 * Ok, it's still a write lock.  If the RW_LOCK_READ_WAITERS
270		 * flag is already set, then we can go ahead and block.  If
271		 * it is not set then try to set it.  If we fail to set it
272		 * drop the turnstile lock and restart the loop.
273		 */
274		if (!(x & RW_LOCK_READ_WAITERS)) {
275			if (!atomic_cmpset_ptr(&rw->rw_lock, x,
276			    x | RW_LOCK_READ_WAITERS)) {
277				turnstile_release(&rw->lock_object);
278				cpu_spinwait();
279				continue;
280			}
281			if (LOCK_LOG_TEST(&rw->lock_object, 0))
282				CTR2(KTR_LOCK, "%s: %p set read waiters flag",
283				    __func__, rw);
284		}
285
286#ifdef ADAPTIVE_RWLOCKS
287		/*
288		 * If the owner is running on another CPU, spin until
289		 * the owner stops running or the state of the lock
290		 * changes.
291		 */
292		owner = (struct thread *)RW_OWNER(x);
293		if (TD_IS_RUNNING(owner)) {
294			turnstile_release(&rw->lock_object);
295			if (LOCK_LOG_TEST(&rw->lock_object, 0))
296				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
297				    __func__, rw, owner);
298			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
299			    TD_IS_RUNNING(owner))
300				cpu_spinwait();
301			continue;
302		}
303#endif
304
305		/*
306		 * We were unable to acquire the lock and the read waiters
307		 * flag is set, so we must block on the turnstile.
308		 */
309		if (LOCK_LOG_TEST(&rw->lock_object, 0))
310			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
311			    rw);
312		turnstile_wait(&rw->lock_object, rw_owner(rw), TS_SHARED_QUEUE);
313		if (LOCK_LOG_TEST(&rw->lock_object, 0))
314			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
315			    __func__, rw);
316	}
317
318	/*
319	 * TODO: acquire "owner of record" here.  Here be turnstile dragons
320	 * however.  turnstiles don't like owners changing between calls to
321	 * turnstile_wait() currently.
322	 */
323
324	LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line);
325	WITNESS_LOCK(&rw->lock_object, 0, file, line);
326	curthread->td_locks++;
327}
328
329void
330_rw_runlock(struct rwlock *rw, const char *file, int line)
331{
332	struct turnstile *ts;
333	uintptr_t x;
334
335	_rw_assert(rw, RA_RLOCKED, file, line);
336	curthread->td_locks--;
337	WITNESS_UNLOCK(&rw->lock_object, 0, file, line);
338	LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
339
340	/* TODO: drop "owner of record" here. */
341
342	for (;;) {
343		/*
344		 * See if there is more than one read lock held.  If so,
345		 * just drop one and return.
346		 */
347		x = rw->rw_lock;
348		if (RW_READERS(x) > 1) {
349			if (atomic_cmpset_ptr(&rw->rw_lock, x,
350			    x - RW_ONE_READER)) {
351				if (LOCK_LOG_TEST(&rw->lock_object, 0))
352					CTR4(KTR_LOCK,
353					    "%s: %p succeeded %p -> %p",
354					    __func__, rw, (void *)x,
355					    (void *)(x - RW_ONE_READER));
356				break;
357			}
358			continue;
359		}
360
361
362		/*
363		 * We should never have read waiters while at least one
364		 * thread holds a read lock.  (See note above)
365		 */
366		KASSERT(!(x & RW_LOCK_READ_WAITERS),
367		    ("%s: waiting readers", __func__));
368
369		/*
370		 * If there aren't any waiters for a write lock, then try
371		 * to drop it quickly.
372		 */
373		if (!(x & RW_LOCK_WRITE_WAITERS)) {
374
375			/*
376			 * There shouldn't be any flags set and we should
377			 * be the only read lock.  If we fail to release
378			 * the single read lock, then another thread might
379			 * have just acquired a read lock, so go back up
380			 * to the multiple read locks case.
381			 */
382			MPASS(x == RW_READERS_LOCK(1));
383			if (atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1),
384			    RW_UNLOCKED)) {
385				if (LOCK_LOG_TEST(&rw->lock_object, 0))
386					CTR2(KTR_LOCK, "%s: %p last succeeded",
387					    __func__, rw);
388				break;
389			}
390			continue;
391		}
392
393		/*
394		 * There should just be one reader with one or more
395		 * writers waiting.
396		 */
397		MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS));
398
399		/*
400		 * Ok, we know we have a waiting writer and we think we
401		 * are the last reader, so grab the turnstile lock.
402		 */
403		turnstile_lock(&rw->lock_object);
404
405		/*
406		 * Try to drop our lock leaving the lock in a unlocked
407		 * state.
408		 *
409		 * If you wanted to do explicit lock handoff you'd have to
410		 * do it here.  You'd also want to use turnstile_signal()
411		 * and you'd have to handle the race where a higher
412		 * priority thread blocks on the write lock before the
413		 * thread you wakeup actually runs and have the new thread
414		 * "steal" the lock.  For now it's a lot simpler to just
415		 * wakeup all of the waiters.
416		 *
417		 * As above, if we fail, then another thread might have
418		 * acquired a read lock, so drop the turnstile lock and
419		 * restart.
420		 */
421		if (!atomic_cmpset_ptr(&rw->rw_lock,
422		    RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) {
423			turnstile_release(&rw->lock_object);
424			continue;
425		}
426		if (LOCK_LOG_TEST(&rw->lock_object, 0))
427			CTR2(KTR_LOCK, "%s: %p last succeeded with waiters",
428			    __func__, rw);
429
430		/*
431		 * Ok.  The lock is released and all that's left is to
432		 * wake up the waiters.  Note that the lock might not be
433		 * free anymore, but in that case the writers will just
434		 * block again if they run before the new lock holder(s)
435		 * release the lock.
436		 */
437		ts = turnstile_lookup(&rw->lock_object);
438		MPASS(ts != NULL);
439		turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE);
440		turnstile_unpend(ts, TS_SHARED_LOCK);
441		break;
442	}
443	lock_profile_release_lock(&rw->lock_object);
444}
445
446/*
447 * This function is called when we are unable to obtain a write lock on the
448 * first try.  This means that at least one other thread holds either a
449 * read or write lock.
450 */
451void
452_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
453{
454#ifdef ADAPTIVE_RWLOCKS
455	volatile struct thread *owner;
456#endif
457	uintptr_t v;
458
459	if (LOCK_LOG_TEST(&rw->lock_object, 0))
460		CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
461		    rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
462
463	while (!_rw_write_lock(rw, tid)) {
464		turnstile_lock(&rw->lock_object);
465		v = rw->rw_lock;
466
467		/*
468		 * If the lock was released while spinning on the
469		 * turnstile chain lock, try again.
470		 */
471		if (v == RW_UNLOCKED) {
472			turnstile_release(&rw->lock_object);
473			cpu_spinwait();
474			continue;
475		}
476
477		/*
478		 * If the lock was released by a writer with both readers
479		 * and writers waiting and a reader hasn't woken up and
480		 * acquired the lock yet, rw_lock will be set to the
481		 * value RW_UNLOCKED | RW_LOCK_WRITE_WAITERS.  If we see
482		 * that value, try to acquire it once.  Note that we have
483		 * to preserve the RW_LOCK_WRITE_WAITERS flag as there are
484		 * other writers waiting still. If we fail, restart the
485		 * loop.
486		 */
487		if (v == (RW_UNLOCKED | RW_LOCK_WRITE_WAITERS)) {
488			if (atomic_cmpset_acq_ptr(&rw->rw_lock,
489			    RW_UNLOCKED | RW_LOCK_WRITE_WAITERS,
490			    tid | RW_LOCK_WRITE_WAITERS)) {
491				turnstile_claim(&rw->lock_object);
492				CTR2(KTR_LOCK, "%s: %p claimed by new writer",
493				    __func__, rw);
494				break;
495			}
496			turnstile_release(&rw->lock_object);
497			cpu_spinwait();
498			continue;
499		}
500
501		/*
502		 * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to
503		 * set it.  If we fail to set it, then loop back and try
504		 * again.
505		 */
506		if (!(v & RW_LOCK_WRITE_WAITERS)) {
507			if (!atomic_cmpset_ptr(&rw->rw_lock, v,
508			    v | RW_LOCK_WRITE_WAITERS)) {
509				turnstile_release(&rw->lock_object);
510				cpu_spinwait();
511				continue;
512			}
513			if (LOCK_LOG_TEST(&rw->lock_object, 0))
514				CTR2(KTR_LOCK, "%s: %p set write waiters flag",
515				    __func__, rw);
516		}
517
518#ifdef ADAPTIVE_RWLOCKS
519		/*
520		 * If the lock is write locked and the owner is
521		 * running on another CPU, spin until the owner stops
522		 * running or the state of the lock changes.
523		 */
524		owner = (struct thread *)RW_OWNER(v);
525		if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
526			turnstile_release(&rw->lock_object);
527			if (LOCK_LOG_TEST(&rw->lock_object, 0))
528				CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
529				    __func__, rw, owner);
530			while ((struct thread*)RW_OWNER(rw->rw_lock)== owner &&
531			    TD_IS_RUNNING(owner))
532				cpu_spinwait();
533			continue;
534		}
535#endif
536
537		/*
538		 * We were unable to acquire the lock and the write waiters
539		 * flag is set, so we must block on the turnstile.
540		 */
541		if (LOCK_LOG_TEST(&rw->lock_object, 0))
542			CTR2(KTR_LOCK, "%s: %p blocking on turnstile", __func__,
543			    rw);
544		turnstile_wait(&rw->lock_object, rw_owner(rw),
545		    TS_EXCLUSIVE_QUEUE);
546		if (LOCK_LOG_TEST(&rw->lock_object, 0))
547			CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
548			    __func__, rw);
549	}
550}
551
552/*
553 * This function is called if the first try at releasing a write lock failed.
554 * This means that one of the 2 waiter bits must be set indicating that at
555 * least one thread is waiting on this lock.
556 */
557void
558_rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
559{
560	struct turnstile *ts;
561	uintptr_t v;
562	int queue;
563
564	KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS),
565	    ("%s: neither of the waiter flags are set", __func__));
566
567	if (LOCK_LOG_TEST(&rw->lock_object, 0))
568		CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
569
570	turnstile_lock(&rw->lock_object);
571	ts = turnstile_lookup(&rw->lock_object);
572
573#ifdef ADAPTIVE_RWLOCKS
574	/*
575	 * There might not be a turnstile for this lock if all of
576	 * the waiters are adaptively spinning.  In that case, just
577	 * reset the lock to the unlocked state and return.
578	 */
579	if (ts == NULL) {
580		atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED);
581		if (LOCK_LOG_TEST(&rw->lock_object, 0))
582			CTR2(KTR_LOCK, "%s: %p no sleepers", __func__, rw);
583		turnstile_release(&rw->lock_object);
584		return;
585	}
586#else
587	MPASS(ts != NULL);
588#endif
589
590	/*
591	 * Use the same algo as sx locks for now.  Prefer waking up shared
592	 * waiters if we have any over writers.  This is probably not ideal.
593	 *
594	 * 'v' is the value we are going to write back to rw_lock.  If we
595	 * have waiters on both queues, we need to preserve the state of
596	 * the waiter flag for the queue we don't wake up.  For now this is
597	 * hardcoded for the algorithm mentioned above.
598	 *
599	 * In the case of both readers and writers waiting we wakeup the
600	 * readers but leave the RW_LOCK_WRITE_WAITERS flag set.  If a
601	 * new writer comes in before a reader it will claim the lock up
602	 * above.  There is probably a potential priority inversion in
603	 * there that could be worked around either by waking both queues
604	 * of waiters or doing some complicated lock handoff gymnastics.
605	 *
606	 * Note that in the ADAPTIVE_RWLOCKS case, if both flags are
607	 * set, there might not be any actual writers on the turnstile
608	 * as they might all be spinning.  In that case, we don't want
609	 * to preserve the RW_LOCK_WRITE_WAITERS flag as the turnstile
610	 * is going to go away once we wakeup all the readers.
611	 */
612	v = RW_UNLOCKED;
613	if (rw->rw_lock & RW_LOCK_READ_WAITERS) {
614		queue = TS_SHARED_QUEUE;
615#ifdef ADAPTIVE_RWLOCKS
616		if (rw->rw_lock & RW_LOCK_WRITE_WAITERS &&
617		    !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
618			v |= RW_LOCK_WRITE_WAITERS;
619#else
620		v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
621#endif
622	} else
623		queue = TS_EXCLUSIVE_QUEUE;
624
625#ifdef ADAPTIVE_RWLOCKS
626	/*
627	 * We have to make sure that we actually have waiters to
628	 * wakeup.  If they are all spinning, then we just need to
629	 * disown the turnstile and return.
630	 */
631	if (turnstile_empty(ts, queue)) {
632		if (LOCK_LOG_TEST(&rw->lock_object, 0))
633			CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw);
634		atomic_store_rel_ptr(&rw->rw_lock, v);
635		turnstile_disown(ts);
636		return;
637	}
638#endif
639
640	/* Wake up all waiters for the specific queue. */
641	if (LOCK_LOG_TEST(&rw->lock_object, 0))
642		CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
643		    queue == TS_SHARED_QUEUE ? "read" : "write");
644	turnstile_broadcast(ts, queue);
645	atomic_store_rel_ptr(&rw->rw_lock, v);
646	turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
647}
648
649/*
650 * Attempt to do a non-blocking upgrade from a read lock to a write
651 * lock.  This will only succeed if this thread holds a single read
652 * lock.  Returns true if the upgrade succeeded and false otherwise.
653 */
654int
655_rw_try_upgrade(struct rwlock *rw, const char *file, int line)
656{
657	uintptr_t v, tid;
658	int success;
659
660	_rw_assert(rw, RA_RLOCKED, file, line);
661
662	/*
663	 * Attempt to switch from one reader to a writer.  If there
664	 * are any write waiters, then we will have to lock the
665	 * turnstile first to prevent races with another writer
666	 * calling turnstile_wait() before we have claimed this
667	 * turnstile.  So, do the simple case of no waiters first.
668	 */
669	tid = (uintptr_t)curthread;
670	if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) {
671		success = atomic_cmpset_acq_ptr(&rw->rw_lock,
672		    RW_READERS_LOCK(1), tid);
673		goto out;
674	}
675
676	/*
677	 * Ok, we think we have write waiters, so lock the
678	 * turnstile.
679	 */
680	turnstile_lock(&rw->lock_object);
681
682	/*
683	 * Try to switch from one reader to a writer again.  This time
684	 * we honor the current state of the RW_LOCK_WRITE_WAITERS
685	 * flag.  If we obtain the lock with the flag set, then claim
686	 * ownership of the turnstile.  In the ADAPTIVE_RWLOCKS case
687	 * it is possible for there to not be an associated turnstile
688	 * even though there are waiters if all of the waiters are
689	 * spinning.
690	 */
691	v = rw->rw_lock & RW_LOCK_WRITE_WAITERS;
692	success = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
693	    tid | v);
694#ifdef ADAPTIVE_RWLOCKS
695	if (success && v && turnstile_lookup(&rw->lock_object) != NULL)
696#else
697	if (success && v)
698#endif
699		turnstile_claim(&rw->lock_object);
700	else
701		turnstile_release(&rw->lock_object);
702out:
703	LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line);
704	if (success)
705		WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
706		    file, line);
707	return (success);
708}
709
710/*
711 * Downgrade a write lock into a single read lock.
712 */
713void
714_rw_downgrade(struct rwlock *rw, const char *file, int line)
715{
716	struct turnstile *ts;
717	uintptr_t tid, v;
718
719	_rw_assert(rw, RA_WLOCKED, file, line);
720
721	WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line);
722
723	/*
724	 * Convert from a writer to a single reader.  First we handle
725	 * the easy case with no waiters.  If there are any waiters, we
726	 * lock the turnstile, "disown" the lock, and awaken any read
727	 * waiters.
728	 */
729	tid = (uintptr_t)curthread;
730	if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
731		goto out;
732
733	/*
734	 * Ok, we think we have waiters, so lock the turnstile so we can
735	 * read the waiter flags without any races.
736	 */
737	turnstile_lock(&rw->lock_object);
738	v = rw->rw_lock;
739	MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS));
740
741	/*
742	 * Downgrade from a write lock while preserving
743	 * RW_LOCK_WRITE_WAITERS and give up ownership of the
744	 * turnstile.  If there are any read waiters, wake them up.
745	 *
746	 * For ADAPTIVE_RWLOCKS, we have to allow for the fact that
747	 * all of the read waiters might be spinning.  In that case,
748	 * act as if RW_LOCK_READ_WAITERS is not set.  Also, only
749	 * preserve the RW_LOCK_WRITE_WAITERS flag if at least one
750	 * writer is blocked on the turnstile.
751	 */
752	ts = turnstile_lookup(&rw->lock_object);
753#ifdef ADAPTIVE_RWLOCKS
754	if (ts == NULL)
755		v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS);
756	else if (v & RW_LOCK_READ_WAITERS &&
757	    turnstile_empty(ts, TS_SHARED_QUEUE))
758		v &= ~RW_LOCK_READ_WAITERS;
759	else if (v & RW_LOCK_WRITE_WAITERS &&
760	    turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
761		v &= ~RW_LOCK_WRITE_WAITERS;
762#else
763	MPASS(ts != NULL);
764#endif
765	if (v & RW_LOCK_READ_WAITERS)
766		turnstile_broadcast(ts, TS_SHARED_QUEUE);
767	atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) |
768	    (v & RW_LOCK_WRITE_WAITERS));
769	if (v & RW_LOCK_READ_WAITERS)
770		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
771#ifdef ADAPTIVE_RWLOCKS
772	else if (ts == NULL)
773		turnstile_release(&rw->lock_object);
774#endif
775	else
776		turnstile_disown(ts);
777out:
778	LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
779}
780
781#ifdef INVARIANT_SUPPORT
782#ifndef INVARIANTS
783#undef _rw_assert
784#endif
785
786/*
787 * In the non-WITNESS case, rw_assert() can only detect that at least
788 * *some* thread owns an rlock, but it cannot guarantee that *this*
789 * thread owns an rlock.
790 */
791void
792_rw_assert(struct rwlock *rw, int what, const char *file, int line)
793{
794
795	if (panicstr != NULL)
796		return;
797	switch (what) {
798	case RA_LOCKED:
799	case RA_LOCKED | LA_NOTRECURSED:
800	case RA_RLOCKED:
801#ifdef WITNESS
802		witness_assert(&rw->lock_object, what, file, line);
803#else
804		/*
805		 * If some other thread has a write lock or we have one
806		 * and are asserting a read lock, fail.  Also, if no one
807		 * has a lock at all, fail.
808		 */
809		if (rw->rw_lock == RW_UNLOCKED ||
810		    (!(rw->rw_lock & RW_LOCK_READ) && (what == RA_RLOCKED ||
811		    rw_wowner(rw) != curthread)))
812			panic("Lock %s not %slocked @ %s:%d\n",
813			    rw->lock_object.lo_name, (what == RA_RLOCKED) ?
814			    "read " : "", file, line);
815#endif
816		break;
817	case RA_WLOCKED:
818		if (rw_wowner(rw) != curthread)
819			panic("Lock %s not exclusively locked @ %s:%d\n",
820			    rw->lock_object.lo_name, file, line);
821		break;
822	case RA_UNLOCKED:
823#ifdef WITNESS
824		witness_assert(&rw->lock_object, what, file, line);
825#else
826		/*
827		 * If we hold a write lock fail.  We can't reliably check
828		 * to see if we hold a read lock or not.
829		 */
830		if (rw_wowner(rw) == curthread)
831			panic("Lock %s exclusively locked @ %s:%d\n",
832			    rw->lock_object.lo_name, file, line);
833#endif
834		break;
835	default:
836		panic("Unknown rw lock assertion: %d @ %s:%d", what, file,
837		    line);
838	}
839}
840#endif /* INVARIANT_SUPPORT */
841
842#ifdef DDB
843void
844db_show_rwlock(struct lock_object *lock)
845{
846	struct rwlock *rw;
847	struct thread *td;
848
849	rw = (struct rwlock *)lock;
850
851	db_printf(" state: ");
852	if (rw->rw_lock == RW_UNLOCKED)
853		db_printf("UNLOCKED\n");
854	else if (rw->rw_lock & RW_LOCK_READ)
855		db_printf("RLOCK: %ju locks\n",
856		    (uintmax_t)(RW_READERS(rw->rw_lock)));
857	else {
858		td = rw_wowner(rw);
859		db_printf("WLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
860		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
861	}
862	db_printf(" waiters: ");
863	switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) {
864	case RW_LOCK_READ_WAITERS:
865		db_printf("readers\n");
866		break;
867	case RW_LOCK_WRITE_WAITERS:
868		db_printf("writers\n");
869		break;
870	case RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS:
871		db_printf("readers and writers\n");
872		break;
873	default:
874		db_printf("none\n");
875		break;
876	}
877}
878
879#endif
880