kern_sx.c revision 74912
1/*
2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice(s), this list of conditions and the following disclaimer as
9 *    the first lines of this file unmodified other than the possible
10 *    addition of one or more copyright notices.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice(s), this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
25 * DAMAGE.
26 *
27 * $FreeBSD: head/sys/kern/kern_sx.c 74912 2001-03-28 09:03:24Z jhb $
28 */
29
30/*
31 * Shared/exclusive locks.  This implementation assures deterministic lock
32 * granting behavior, so that slocks and xlocks are interleaved.
33 *
34 * Priority propagation will not generally raise the priority of lock holders,
35 * so should not be relied upon in combination with sx locks.
36 *
37 * The witness code can not detect lock cycles (yet).
38 *
39 * XXX: When witness is made to function with sx locks, it will need to
40 * XXX: be taught to deal with these situations, as they are more involved:
41 *   slock --> xlock (deadlock)
42 *   slock --> slock (slock recursion, not fatal)
43 */
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/ktr.h>
48#include <sys/condvar.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/sx.h>
52
53/*
54 * XXX: We don't implement the LO_RECURSED flag for this lock yet.
55 * We could do this by walking p_sleeplocks if we really wanted to.
56 */
57struct lock_class lock_class_sx = {
58	"sx",
59	LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE
60};
61
62void
63sx_init(struct sx *sx, const char *description)
64{
65	struct lock_object *lock;
66
67	bzero(sx, sizeof(*sx));
68	lock = &sx->sx_object;
69	lock->lo_class = &lock_class_sx;
70	lock->lo_name = description;
71	lock->lo_flags = LO_WITNESS | LO_SLEEPABLE;
72	mtx_init(&sx->sx_lock, "sx backing lock",
73	    MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
74	sx->sx_cnt = 0;
75	cv_init(&sx->sx_shrd_cv, description);
76	sx->sx_shrd_wcnt = 0;
77	cv_init(&sx->sx_excl_cv, description);
78	sx->sx_excl_wcnt = 0;
79	sx->sx_xholder = NULL;
80
81	LOCK_LOG_INIT(lock, 0);
82
83	WITNESS_INIT(lock);
84}
85
86void
87sx_destroy(struct sx *sx)
88{
89
90	LOCK_LOG_DESTROY(&sx->sx_object, 0);
91
92	KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
93	    0), ("%s (%s): holders or waiters\n", __FUNCTION__,
94	    sx->sx_object.lo_name));
95
96	mtx_destroy(&sx->sx_lock);
97	cv_destroy(&sx->sx_shrd_cv);
98	cv_destroy(&sx->sx_excl_cv);
99
100	WITNESS_DESTROY(&sx->sx_object);
101}
102
103void
104_sx_slock(struct sx *sx, const char *file, int line)
105{
106
107	mtx_lock(&sx->sx_lock);
108	KASSERT(sx->sx_xholder != curproc,
109	    ("%s (%s): trying to get slock while xlock is held\n", __FUNCTION__,
110	    sx->sx_object.lo_name));
111
112	/*
113	 * Loop in case we lose the race for lock acquisition.
114	 */
115	while (sx->sx_cnt < 0) {
116		sx->sx_shrd_wcnt++;
117		cv_wait(&sx->sx_shrd_cv, &sx->sx_lock);
118		sx->sx_shrd_wcnt--;
119	}
120
121	/* Acquire a shared lock. */
122	sx->sx_cnt++;
123
124#ifdef WITNESS
125	sx->sx_object.lo_flags |= LO_LOCKED;
126#endif
127	LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
128	WITNESS_LOCK(&sx->sx_object, 0, file, line);
129
130	mtx_unlock(&sx->sx_lock);
131}
132
133void
134_sx_xlock(struct sx *sx, const char *file, int line)
135{
136
137	mtx_lock(&sx->sx_lock);
138
139	/*
140	 * With sx locks, we're absolutely not permitted to recurse on
141	 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
142	 * by WITNESS, but as it is not semantically correct to hold the
143	 * xlock while in here, we consider it API abuse and put it under
144	 * INVARIANTS.
145	 */
146	KASSERT(sx->sx_xholder != curproc,
147	    ("%s (%s): xlock already held @ %s:%d", __FUNCTION__,
148	    sx->sx_object.lo_name, file, line));
149
150	/* Loop in case we lose the race for lock acquisition. */
151	while (sx->sx_cnt != 0) {
152		sx->sx_excl_wcnt++;
153		cv_wait(&sx->sx_excl_cv, &sx->sx_lock);
154		sx->sx_excl_wcnt--;
155	}
156
157	MPASS(sx->sx_cnt == 0);
158
159	/* Acquire an exclusive lock. */
160	sx->sx_cnt--;
161	sx->sx_xholder = curproc;
162
163#ifdef WITNESS
164	sx->sx_object.lo_flags |= LO_LOCKED;
165#endif
166	LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
167	WITNESS_LOCK(&sx->sx_object, 0, file, line);
168
169	mtx_unlock(&sx->sx_lock);
170}
171
172void
173_sx_sunlock(struct sx *sx, const char *file, int line)
174{
175
176	mtx_lock(&sx->sx_lock);
177	_SX_ASSERT_SLOCKED(sx);
178
179#ifdef WITNESS
180	if (sx->sx_cnt == 0)
181		sx->sx_object.lo_flags &= ~LO_LOCKED;
182#endif
183	WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
184
185	/* Release. */
186	sx->sx_cnt--;
187
188	/*
189	 * If we just released the last shared lock, wake any waiters up, giving
190	 * exclusive lockers precedence.  In order to make sure that exclusive
191	 * lockers won't be blocked forever, don't wake shared lock waiters if
192	 * there are exclusive lock waiters.
193	 */
194	if (sx->sx_excl_wcnt > 0) {
195		if (sx->sx_cnt == 0)
196			cv_signal(&sx->sx_excl_cv);
197	} else if (sx->sx_shrd_wcnt > 0)
198		cv_broadcast(&sx->sx_shrd_cv);
199
200	LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
201
202	mtx_unlock(&sx->sx_lock);
203}
204
205void
206_sx_xunlock(struct sx *sx, const char *file, int line)
207{
208
209	mtx_lock(&sx->sx_lock);
210	_SX_ASSERT_XLOCKED(sx);
211	MPASS(sx->sx_cnt == -1);
212
213#ifdef WITNESS
214	sx->sx_object.lo_flags &= ~LO_LOCKED;
215#endif
216	WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
217
218	/* Release. */
219	sx->sx_cnt++;
220	sx->sx_xholder = NULL;
221
222	/*
223	 * Wake up waiters if there are any.  Give precedence to slock waiters.
224	 */
225	if (sx->sx_shrd_wcnt > 0)
226		cv_broadcast(&sx->sx_shrd_cv);
227	else if (sx->sx_excl_wcnt > 0)
228		cv_signal(&sx->sx_excl_cv);
229
230	LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
231
232	mtx_unlock(&sx->sx_lock);
233}
234