1/*-
2 * Copyright (c) 2007 Stephan Uphoff <ups@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD$");
36
37#include "opt_ddb.h"
38#include "opt_kdtrace.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42
43#include <sys/kernel.h>
44#include <sys/kdb.h>
45#include <sys/ktr.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/rmlock.h>
50#include <sys/sched.h>
51#include <sys/smp.h>
52#include <sys/turnstile.h>
53#include <sys/lock_profile.h>
54#include <machine/cpu.h>
55
56#ifdef DDB
57#include <ddb/ddb.h>
58#endif
59
60/*
61 * A cookie to mark destroyed rmlocks.  This is stored in the head of
62 * rm_activeReaders.
63 */
64#define	RM_DESTROYED	((void *)0xdead)
65
66#define	rm_destroyed(rm)						\
67	(LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
68
69#define RMPF_ONQUEUE	1
70#define RMPF_SIGNAL	2
71
72#ifndef INVARIANTS
73#define	_rm_assert(c, what, file, line)
74#endif
75
76static void	assert_rm(struct lock_object *lock, int what);
77#ifdef DDB
78static void	db_show_rm(struct lock_object *lock);
79#endif
80static void	lock_rm(struct lock_object *lock, int how);
81#ifdef KDTRACE_HOOKS
82static int	owner_rm(struct lock_object *lock, struct thread **owner);
83#endif
84static int	unlock_rm(struct lock_object *lock);
85
86struct lock_class lock_class_rm = {
87	.lc_name = "rm",
88	.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
89	.lc_assert = assert_rm,
90#ifdef DDB
91	.lc_ddb_show = db_show_rm,
92#endif
93	.lc_lock = lock_rm,
94	.lc_unlock = unlock_rm,
95#ifdef KDTRACE_HOOKS
96	.lc_owner = owner_rm,
97#endif
98};
99
100struct lock_class lock_class_rm_sleepable = {
101	.lc_name = "sleepable rm",
102	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE,
103	.lc_assert = assert_rm,
104#ifdef DDB
105	.lc_ddb_show = db_show_rm,
106#endif
107	.lc_lock = lock_rm,
108	.lc_unlock = unlock_rm,
109#ifdef KDTRACE_HOOKS
110	.lc_owner = owner_rm,
111#endif
112};
113
114static void
115assert_rm(struct lock_object *lock, int what)
116{
117
118	rm_assert((struct rmlock *)lock, what);
119}
120
121/*
122 * These do not support read locks because it would be hard to make
123 * the tracker work correctly with the current lock_class API as you
124 * would need to have the tracker pointer available when calling
125 * rm_rlock() in lock_rm().
126 */
127static void
128lock_rm(struct lock_object *lock, int how)
129{
130	struct rmlock *rm;
131
132	rm = (struct rmlock *)lock;
133	if (how)
134		rm_wlock(rm);
135#ifdef INVARIANTS
136	else
137		panic("lock_rm called in read mode");
138#endif
139}
140
141static int
142unlock_rm(struct lock_object *lock)
143{
144	struct rmlock *rm;
145
146	rm = (struct rmlock *)lock;
147	rm_wunlock(rm);
148	return (1);
149}
150
151#ifdef KDTRACE_HOOKS
152static int
153owner_rm(struct lock_object *lock, struct thread **owner)
154{
155	struct rmlock *rm;
156	struct lock_class *lc;
157
158	rm = (struct rmlock *)lock;
159	lc = LOCK_CLASS(&rm->rm_wlock_object);
160	return (lc->lc_owner(&rm->rm_wlock_object, owner));
161}
162#endif
163
164static struct mtx rm_spinlock;
165
166MTX_SYSINIT(rm_spinlock, &rm_spinlock, "rm_spinlock", MTX_SPIN);
167
168/*
169 * Add or remove tracker from per-cpu list.
170 *
171 * The per-cpu list can be traversed at any time in forward direction from an
172 * interrupt on the *local* cpu.
173 */
174static void inline
175rm_tracker_add(struct pcpu *pc, struct rm_priotracker *tracker)
176{
177	struct rm_queue *next;
178
179	/* Initialize all tracker pointers */
180	tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue;
181	next = pc->pc_rm_queue.rmq_next;
182	tracker->rmp_cpuQueue.rmq_next = next;
183
184	/* rmq_prev is not used during froward traversal. */
185	next->rmq_prev = &tracker->rmp_cpuQueue;
186
187	/* Update pointer to first element. */
188	pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue;
189}
190
191/*
192 * Return a count of the number of trackers the thread 'td' already
193 * has on this CPU for the lock 'rm'.
194 */
195static int
196rm_trackers_present(const struct pcpu *pc, const struct rmlock *rm,
197    const struct thread *td)
198{
199	struct rm_queue *queue;
200	struct rm_priotracker *tracker;
201	int count;
202
203	count = 0;
204	for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
205	    queue = queue->rmq_next) {
206		tracker = (struct rm_priotracker *)queue;
207		if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td))
208			count++;
209	}
210	return (count);
211}
212
213static void inline
214rm_tracker_remove(struct pcpu *pc, struct rm_priotracker *tracker)
215{
216	struct rm_queue *next, *prev;
217
218	next = tracker->rmp_cpuQueue.rmq_next;
219	prev = tracker->rmp_cpuQueue.rmq_prev;
220
221	/* Not used during forward traversal. */
222	next->rmq_prev = prev;
223
224	/* Remove from list. */
225	prev->rmq_next = next;
226}
227
228static void
229rm_cleanIPI(void *arg)
230{
231	struct pcpu *pc;
232	struct rmlock *rm = arg;
233	struct rm_priotracker *tracker;
234	struct rm_queue *queue;
235	pc = pcpu_find(curcpu);
236
237	for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
238	    queue = queue->rmq_next) {
239		tracker = (struct rm_priotracker *)queue;
240		if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) {
241			tracker->rmp_flags = RMPF_ONQUEUE;
242			mtx_lock_spin(&rm_spinlock);
243			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
244			    rmp_qentry);
245			mtx_unlock_spin(&rm_spinlock);
246		}
247	}
248}
249
250void
251rm_init_flags(struct rmlock *rm, const char *name, int opts)
252{
253	struct lock_class *lc;
254	int liflags;
255
256	liflags = 0;
257	if (!(opts & RM_NOWITNESS))
258		liflags |= LO_WITNESS;
259	if (opts & RM_RECURSE)
260		liflags |= LO_RECURSABLE;
261	rm->rm_writecpus = all_cpus;
262	LIST_INIT(&rm->rm_activeReaders);
263	if (opts & RM_SLEEPABLE) {
264		liflags |= LO_SLEEPABLE;
265		lc = &lock_class_rm_sleepable;
266		sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_NOWITNESS);
267	} else {
268		lc = &lock_class_rm;
269		mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
270	}
271	lock_init(&rm->lock_object, lc, name, NULL, liflags);
272}
273
274void
275rm_init(struct rmlock *rm, const char *name)
276{
277
278	rm_init_flags(rm, name, 0);
279}
280
281void
282rm_destroy(struct rmlock *rm)
283{
284
285	rm_assert(rm, RA_UNLOCKED);
286	LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED;
287	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
288		sx_destroy(&rm->rm_lock_sx);
289	else
290		mtx_destroy(&rm->rm_lock_mtx);
291	lock_destroy(&rm->lock_object);
292}
293
294int
295rm_wowned(struct rmlock *rm)
296{
297
298	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
299		return (sx_xlocked(&rm->rm_lock_sx));
300	else
301		return (mtx_owned(&rm->rm_lock_mtx));
302}
303
304void
305rm_sysinit(void *arg)
306{
307	struct rm_args *args = arg;
308
309	rm_init(args->ra_rm, args->ra_desc);
310}
311
312void
313rm_sysinit_flags(void *arg)
314{
315	struct rm_args_flags *args = arg;
316
317	rm_init_flags(args->ra_rm, args->ra_desc, args->ra_opts);
318}
319
320static int
321_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
322{
323	struct pcpu *pc;
324
325	critical_enter();
326	pc = pcpu_find(curcpu);
327
328	/* Check if we just need to do a proper critical_exit. */
329	if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
330		critical_exit();
331		return (1);
332	}
333
334	/* Remove our tracker from the per-cpu list. */
335	rm_tracker_remove(pc, tracker);
336
337	/* Check to see if the IPI granted us the lock after all. */
338	if (tracker->rmp_flags) {
339		/* Just add back tracker - we hold the lock. */
340		rm_tracker_add(pc, tracker);
341		critical_exit();
342		return (1);
343	}
344
345	/*
346	 * We allow readers to aquire a lock even if a writer is blocked if
347	 * the lock is recursive and the reader already holds the lock.
348	 */
349	if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) {
350		/*
351		 * Just grant the lock if this thread already has a tracker
352		 * for this lock on the per-cpu queue.
353		 */
354		if (rm_trackers_present(pc, rm, curthread) != 0) {
355			mtx_lock_spin(&rm_spinlock);
356			LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker,
357			    rmp_qentry);
358			tracker->rmp_flags = RMPF_ONQUEUE;
359			mtx_unlock_spin(&rm_spinlock);
360			rm_tracker_add(pc, tracker);
361			critical_exit();
362			return (1);
363		}
364	}
365
366	sched_unpin();
367	critical_exit();
368
369	if (trylock) {
370		if (rm->lock_object.lo_flags & LO_SLEEPABLE) {
371			if (!sx_try_xlock(&rm->rm_lock_sx))
372				return (0);
373		} else {
374			if (!mtx_trylock(&rm->rm_lock_mtx))
375				return (0);
376		}
377	} else {
378		if (rm->lock_object.lo_flags & LO_SLEEPABLE)
379			sx_xlock(&rm->rm_lock_sx);
380		else
381			mtx_lock(&rm->rm_lock_mtx);
382	}
383
384	critical_enter();
385	pc = pcpu_find(curcpu);
386	CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
387	rm_tracker_add(pc, tracker);
388	sched_pin();
389	critical_exit();
390
391	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
392		sx_xunlock(&rm->rm_lock_sx);
393	else
394		mtx_unlock(&rm->rm_lock_mtx);
395
396	return (1);
397}
398
399int
400_rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
401{
402	struct thread *td = curthread;
403	struct pcpu *pc;
404
405	if (SCHEDULER_STOPPED())
406		return (1);
407
408	tracker->rmp_flags  = 0;
409	tracker->rmp_thread = td;
410	tracker->rmp_rmlock = rm;
411
412	td->td_critnest++;	/* critical_enter(); */
413
414	__compiler_membar();
415
416	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
417
418	rm_tracker_add(pc, tracker);
419
420	sched_pin();
421
422	__compiler_membar();
423
424	td->td_critnest--;
425
426	/*
427	 * Fast path to combine two common conditions into a single
428	 * conditional jump.
429	 */
430	if (0 == (td->td_owepreempt |
431	    CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
432		return (1);
433
434	/* We do not have a read token and need to acquire one. */
435	return _rm_rlock_hard(rm, tracker, trylock);
436}
437
438static void
439_rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
440{
441
442	if (td->td_owepreempt) {
443		td->td_critnest++;
444		critical_exit();
445	}
446
447	if (!tracker->rmp_flags)
448		return;
449
450	mtx_lock_spin(&rm_spinlock);
451	LIST_REMOVE(tracker, rmp_qentry);
452
453	if (tracker->rmp_flags & RMPF_SIGNAL) {
454		struct rmlock *rm;
455		struct turnstile *ts;
456
457		rm = tracker->rmp_rmlock;
458
459		turnstile_chain_lock(&rm->lock_object);
460		mtx_unlock_spin(&rm_spinlock);
461
462		ts = turnstile_lookup(&rm->lock_object);
463
464		turnstile_signal(ts, TS_EXCLUSIVE_QUEUE);
465		turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
466		turnstile_chain_unlock(&rm->lock_object);
467	} else
468		mtx_unlock_spin(&rm_spinlock);
469}
470
471void
472_rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
473{
474	struct pcpu *pc;
475	struct thread *td = tracker->rmp_thread;
476
477	if (SCHEDULER_STOPPED())
478		return;
479
480	td->td_critnest++;	/* critical_enter(); */
481	pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */
482	rm_tracker_remove(pc, tracker);
483	td->td_critnest--;
484	sched_unpin();
485
486	if (0 == (td->td_owepreempt | tracker->rmp_flags))
487		return;
488
489	_rm_unlock_hard(td, tracker);
490}
491
492void
493_rm_wlock(struct rmlock *rm)
494{
495	struct rm_priotracker *prio;
496	struct turnstile *ts;
497	cpuset_t readcpus;
498
499	if (SCHEDULER_STOPPED())
500		return;
501
502	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
503		sx_xlock(&rm->rm_lock_sx);
504	else
505		mtx_lock(&rm->rm_lock_mtx);
506
507	if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
508		/* Get all read tokens back */
509		readcpus = all_cpus;
510		CPU_NAND(&readcpus, &rm->rm_writecpus);
511		rm->rm_writecpus = all_cpus;
512
513		/*
514		 * Assumes rm->rm_writecpus update is visible on other CPUs
515		 * before rm_cleanIPI is called.
516		 */
517#ifdef SMP
518		smp_rendezvous_cpus(readcpus,
519		    smp_no_rendevous_barrier,
520		    rm_cleanIPI,
521		    smp_no_rendevous_barrier,
522		    rm);
523
524#else
525		rm_cleanIPI(rm);
526#endif
527
528		mtx_lock_spin(&rm_spinlock);
529		while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) {
530			ts = turnstile_trywait(&rm->lock_object);
531			prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL;
532			mtx_unlock_spin(&rm_spinlock);
533			turnstile_wait(ts, prio->rmp_thread,
534			    TS_EXCLUSIVE_QUEUE);
535			mtx_lock_spin(&rm_spinlock);
536		}
537		mtx_unlock_spin(&rm_spinlock);
538	}
539}
540
541void
542_rm_wunlock(struct rmlock *rm)
543{
544
545	if (rm->lock_object.lo_flags & LO_SLEEPABLE)
546		sx_xunlock(&rm->rm_lock_sx);
547	else
548		mtx_unlock(&rm->rm_lock_mtx);
549}
550
551#ifdef LOCK_DEBUG
552
553void
554_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
555{
556
557	if (SCHEDULER_STOPPED())
558		return;
559
560	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
561	    ("rm_wlock() by idle thread %p on rmlock %s @ %s:%d",
562	    curthread, rm->lock_object.lo_name, file, line));
563	KASSERT(!rm_destroyed(rm),
564	    ("rm_wlock() of destroyed rmlock @ %s:%d", file, line));
565	_rm_assert(rm, RA_UNLOCKED, file, line);
566
567	WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE,
568	    file, line, NULL);
569
570	_rm_wlock(rm);
571
572	LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line);
573
574	WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
575
576	curthread->td_locks++;
577
578}
579
580void
581_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
582{
583
584	if (SCHEDULER_STOPPED())
585		return;
586
587	KASSERT(!rm_destroyed(rm),
588	    ("rm_wunlock() of destroyed rmlock @ %s:%d", file, line));
589	_rm_assert(rm, RA_WLOCKED, file, line);
590	WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line);
591	LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line);
592	_rm_wunlock(rm);
593	curthread->td_locks--;
594}
595
596int
597_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
598    int trylock, const char *file, int line)
599{
600
601	if (SCHEDULER_STOPPED())
602		return (1);
603
604#ifdef INVARIANTS
605	if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) {
606		critical_enter();
607		KASSERT(rm_trackers_present(pcpu_find(curcpu), rm,
608		    curthread) == 0,
609		    ("rm_rlock: recursed on non-recursive rmlock %s @ %s:%d\n",
610		    rm->lock_object.lo_name, file, line));
611		critical_exit();
612	}
613#endif
614	KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
615	    ("rm_rlock() by idle thread %p on rmlock %s @ %s:%d",
616	    curthread, rm->lock_object.lo_name, file, line));
617	KASSERT(!rm_destroyed(rm),
618	    ("rm_rlock() of destroyed rmlock @ %s:%d", file, line));
619	if (!trylock) {
620		KASSERT(!rm_wowned(rm),
621		    ("rm_rlock: wlock already held for %s @ %s:%d",
622		    rm->lock_object.lo_name, file, line));
623		WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER, file, line,
624		    NULL);
625	}
626
627	if (_rm_rlock(rm, tracker, trylock)) {
628		if (trylock)
629			LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file,
630			    line);
631		else
632			LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file,
633			    line);
634		WITNESS_LOCK(&rm->lock_object, 0, file, line);
635
636		curthread->td_locks++;
637
638		return (1);
639	} else if (trylock)
640		LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line);
641
642	return (0);
643}
644
645void
646_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
647    const char *file, int line)
648{
649
650	if (SCHEDULER_STOPPED())
651		return;
652
653	KASSERT(!rm_destroyed(rm),
654	    ("rm_runlock() of destroyed rmlock @ %s:%d", file, line));
655	_rm_assert(rm, RA_RLOCKED, file, line);
656	WITNESS_UNLOCK(&rm->lock_object, 0, file, line);
657	LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line);
658	_rm_runlock(rm, tracker);
659	curthread->td_locks--;
660}
661
662#else
663
664/*
665 * Just strip out file and line arguments if no lock debugging is enabled in
666 * the kernel - we are called from a kernel module.
667 */
668void
669_rm_wlock_debug(struct rmlock *rm, const char *file, int line)
670{
671
672	_rm_wlock(rm);
673}
674
675void
676_rm_wunlock_debug(struct rmlock *rm, const char *file, int line)
677{
678
679	_rm_wunlock(rm);
680}
681
682int
683_rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
684    int trylock, const char *file, int line)
685{
686
687	return _rm_rlock(rm, tracker, trylock);
688}
689
690void
691_rm_runlock_debug(struct rmlock *rm, struct rm_priotracker *tracker,
692    const char *file, int line)
693{
694
695	_rm_runlock(rm, tracker);
696}
697
698#endif
699
700#ifdef INVARIANT_SUPPORT
701#ifndef INVARIANTS
702#undef _rm_assert
703#endif
704
705/*
706 * Note that this does not need to use witness_assert() for read lock
707 * assertions since an exact count of read locks held by this thread
708 * is computable.
709 */
710void
711_rm_assert(struct rmlock *rm, int what, const char *file, int line)
712{
713	int count;
714
715	if (panicstr != NULL)
716		return;
717	switch (what) {
718	case RA_LOCKED:
719	case RA_LOCKED | RA_RECURSED:
720	case RA_LOCKED | RA_NOTRECURSED:
721	case RA_RLOCKED:
722	case RA_RLOCKED | RA_RECURSED:
723	case RA_RLOCKED | RA_NOTRECURSED:
724		/*
725		 * Handle the write-locked case.  Unlike other
726		 * primitives, writers can never recurse.
727		 */
728		if (rm_wowned(rm)) {
729			if (what & RA_RLOCKED)
730				panic("Lock %s exclusively locked @ %s:%d\n",
731				    rm->lock_object.lo_name, file, line);
732			if (what & RA_RECURSED)
733				panic("Lock %s not recursed @ %s:%d\n",
734				    rm->lock_object.lo_name, file, line);
735			break;
736		}
737
738		critical_enter();
739		count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
740		critical_exit();
741
742		if (count == 0)
743			panic("Lock %s not %slocked @ %s:%d\n",
744			    rm->lock_object.lo_name, (what & RA_RLOCKED) ?
745			    "read " : "", file, line);
746		if (count > 1) {
747			if (what & RA_NOTRECURSED)
748				panic("Lock %s recursed @ %s:%d\n",
749				    rm->lock_object.lo_name, file, line);
750		} else if (what & RA_RECURSED)
751			panic("Lock %s not recursed @ %s:%d\n",
752			    rm->lock_object.lo_name, file, line);
753		break;
754	case RA_WLOCKED:
755		if (!rm_wowned(rm))
756			panic("Lock %s not exclusively locked @ %s:%d\n",
757			    rm->lock_object.lo_name, file, line);
758		break;
759	case RA_UNLOCKED:
760		if (rm_wowned(rm))
761			panic("Lock %s exclusively locked @ %s:%d\n",
762			    rm->lock_object.lo_name, file, line);
763
764		critical_enter();
765		count = rm_trackers_present(pcpu_find(curcpu), rm, curthread);
766		critical_exit();
767
768		if (count != 0)
769			panic("Lock %s read locked @ %s:%d\n",
770			    rm->lock_object.lo_name, file, line);
771		break;
772	default:
773		panic("Unknown rm lock assertion: %d @ %s:%d", what, file,
774		    line);
775	}
776}
777#endif /* INVARIANT_SUPPORT */
778
779#ifdef DDB
780static void
781print_tracker(struct rm_priotracker *tr)
782{
783	struct thread *td;
784
785	td = tr->rmp_thread;
786	db_printf("   thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid,
787	    td->td_proc->p_pid, td->td_name);
788	if (tr->rmp_flags & RMPF_ONQUEUE) {
789		db_printf("ONQUEUE");
790		if (tr->rmp_flags & RMPF_SIGNAL)
791			db_printf(",SIGNAL");
792	} else
793		db_printf("0");
794	db_printf("}\n");
795}
796
797static void
798db_show_rm(struct lock_object *lock)
799{
800	struct rm_priotracker *tr;
801	struct rm_queue *queue;
802	struct rmlock *rm;
803	struct lock_class *lc;
804	struct pcpu *pc;
805
806	rm = (struct rmlock *)lock;
807	db_printf(" writecpus: ");
808	ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus));
809	db_printf("\n");
810	db_printf(" per-CPU readers:\n");
811	STAILQ_FOREACH(pc, &cpuhead, pc_allcpu)
812		for (queue = pc->pc_rm_queue.rmq_next;
813		    queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
814			tr = (struct rm_priotracker *)queue;
815			if (tr->rmp_rmlock == rm)
816				print_tracker(tr);
817		}
818	db_printf(" active readers:\n");
819	LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry)
820		print_tracker(tr);
821	lc = LOCK_CLASS(&rm->rm_wlock_object);
822	db_printf("Backing write-lock (%s):\n", lc->lc_name);
823	lc->lc_ddb_show(&rm->rm_wlock_object);
824}
825#endif
826