kern_sx.c revision 87594
1/*
2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice(s), this list of conditions and the following disclaimer as
9 *    the first lines of this file unmodified other than the possible
10 *    addition of one or more copyright notices.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice(s), this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
25 * DAMAGE.
26 *
27 * $FreeBSD: head/sys/kern/kern_sx.c 87594 2001-12-10 05:51:45Z obrien $
28 */
29
30/*
31 * Shared/exclusive locks.  This implementation assures deterministic lock
32 * granting behavior, so that slocks and xlocks are interleaved.
33 *
34 * Priority propagation will not generally raise the priority of lock holders,
35 * so should not be relied upon in combination with sx locks.
36 */
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/ktr.h>
41#include <sys/condvar.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/sx.h>
45
46struct lock_class lock_class_sx = {
47	"sx",
48	LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE
49};
50
51#ifndef INVARIANTS
52#define	_sx_assert(sx, what, file, line)
53#endif
54
55void
56sx_init(struct sx *sx, const char *description)
57{
58	struct lock_object *lock;
59
60	lock = &sx->sx_object;
61	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
62	    ("sx lock %s %p already initialized", description, sx));
63	bzero(sx, sizeof(*sx));
64	lock->lo_class = &lock_class_sx;
65	lock->lo_name = description;
66	lock->lo_flags = LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE |
67	    LO_UPGRADABLE;
68	sx->sx_lock = mtx_pool_find(sx);
69	sx->sx_cnt = 0;
70	cv_init(&sx->sx_shrd_cv, description);
71	sx->sx_shrd_wcnt = 0;
72	cv_init(&sx->sx_excl_cv, description);
73	sx->sx_excl_wcnt = 0;
74	sx->sx_xholder = NULL;
75
76	LOCK_LOG_INIT(lock, 0);
77
78	WITNESS_INIT(lock);
79}
80
81void
82sx_destroy(struct sx *sx)
83{
84
85	LOCK_LOG_DESTROY(&sx->sx_object, 0);
86
87	KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
88	    0), ("%s (%s): holders or waiters\n", __func__,
89	    sx->sx_object.lo_name));
90
91	sx->sx_lock = NULL;
92	cv_destroy(&sx->sx_shrd_cv);
93	cv_destroy(&sx->sx_excl_cv);
94
95	WITNESS_DESTROY(&sx->sx_object);
96}
97
98void
99_sx_slock(struct sx *sx, const char *file, int line)
100{
101
102	mtx_lock(sx->sx_lock);
103	KASSERT(sx->sx_xholder != curthread,
104	    ("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
105	    sx->sx_object.lo_name, file, line));
106
107	/*
108	 * Loop in case we lose the race for lock acquisition.
109	 */
110	while (sx->sx_cnt < 0) {
111		sx->sx_shrd_wcnt++;
112		cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
113		sx->sx_shrd_wcnt--;
114	}
115
116	/* Acquire a shared lock. */
117	sx->sx_cnt++;
118
119	LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
120	WITNESS_LOCK(&sx->sx_object, 0, file, line);
121
122	mtx_unlock(sx->sx_lock);
123}
124
125int
126_sx_try_slock(struct sx *sx, const char *file, int line)
127{
128
129	mtx_lock(sx->sx_lock);
130	if (sx->sx_cnt >= 0) {
131		sx->sx_cnt++;
132		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
133		WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
134		mtx_unlock(sx->sx_lock);
135		return (1);
136	} else {
137		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
138		mtx_unlock(sx->sx_lock);
139		return (0);
140	}
141}
142
143void
144_sx_xlock(struct sx *sx, const char *file, int line)
145{
146
147	mtx_lock(sx->sx_lock);
148
149	/*
150	 * With sx locks, we're absolutely not permitted to recurse on
151	 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
152	 * by WITNESS, but as it is not semantically correct to hold the
153	 * xlock while in here, we consider it API abuse and put it under
154	 * INVARIANTS.
155	 */
156	KASSERT(sx->sx_xholder != curthread,
157	    ("%s (%s): xlock already held @ %s:%d", __func__,
158	    sx->sx_object.lo_name, file, line));
159
160	/* Loop in case we lose the race for lock acquisition. */
161	while (sx->sx_cnt != 0) {
162		sx->sx_excl_wcnt++;
163		cv_wait(&sx->sx_excl_cv, sx->sx_lock);
164		sx->sx_excl_wcnt--;
165	}
166
167	MPASS(sx->sx_cnt == 0);
168
169	/* Acquire an exclusive lock. */
170	sx->sx_cnt--;
171	sx->sx_xholder = curthread;
172
173	LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
174	WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
175
176	mtx_unlock(sx->sx_lock);
177}
178
179int
180_sx_try_xlock(struct sx *sx, const char *file, int line)
181{
182
183	mtx_lock(sx->sx_lock);
184	if (sx->sx_cnt == 0) {
185		sx->sx_cnt--;
186		sx->sx_xholder = curthread;
187		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
188		WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
189		    line);
190		mtx_unlock(sx->sx_lock);
191		return (1);
192	} else {
193		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
194		mtx_unlock(sx->sx_lock);
195		return (0);
196	}
197}
198
199void
200_sx_sunlock(struct sx *sx, const char *file, int line)
201{
202
203	_sx_assert(sx, SX_SLOCKED, file, line);
204	mtx_lock(sx->sx_lock);
205
206	WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
207
208	/* Release. */
209	sx->sx_cnt--;
210
211	/*
212	 * If we just released the last shared lock, wake any waiters up, giving
213	 * exclusive lockers precedence.  In order to make sure that exclusive
214	 * lockers won't be blocked forever, don't wake shared lock waiters if
215	 * there are exclusive lock waiters.
216	 */
217	if (sx->sx_excl_wcnt > 0) {
218		if (sx->sx_cnt == 0)
219			cv_signal(&sx->sx_excl_cv);
220	} else if (sx->sx_shrd_wcnt > 0)
221		cv_broadcast(&sx->sx_shrd_cv);
222
223	LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
224
225	mtx_unlock(sx->sx_lock);
226}
227
228void
229_sx_xunlock(struct sx *sx, const char *file, int line)
230{
231
232	_sx_assert(sx, SX_XLOCKED, file, line);
233	mtx_lock(sx->sx_lock);
234	MPASS(sx->sx_cnt == -1);
235
236	WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
237
238	/* Release. */
239	sx->sx_cnt++;
240	sx->sx_xholder = NULL;
241
242	/*
243	 * Wake up waiters if there are any.  Give precedence to slock waiters.
244	 */
245	if (sx->sx_shrd_wcnt > 0)
246		cv_broadcast(&sx->sx_shrd_cv);
247	else if (sx->sx_excl_wcnt > 0)
248		cv_signal(&sx->sx_excl_cv);
249
250	LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
251
252	mtx_unlock(sx->sx_lock);
253}
254
255int
256_sx_try_upgrade(struct sx *sx, const char *file, int line)
257{
258
259	_sx_assert(sx, SX_SLOCKED, file, line);
260	mtx_lock(sx->sx_lock);
261
262	if (sx->sx_cnt == 1) {
263		sx->sx_cnt = -1;
264		sx->sx_xholder = curthread;
265
266		LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
267		WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
268		    file, line);
269
270		mtx_unlock(sx->sx_lock);
271		return (1);
272	} else {
273		LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
274		mtx_unlock(sx->sx_lock);
275		return (0);
276	}
277}
278
279void
280_sx_downgrade(struct sx *sx, const char *file, int line)
281{
282
283	_sx_assert(sx, SX_XLOCKED, file, line);
284	mtx_lock(sx->sx_lock);
285	MPASS(sx->sx_cnt == -1);
286
287	WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line);
288
289	sx->sx_cnt = 1;
290	sx->sx_xholder = NULL;
291        if (sx->sx_shrd_wcnt > 0)
292                cv_broadcast(&sx->sx_shrd_cv);
293
294	LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line);
295
296	mtx_unlock(sx->sx_lock);
297}
298
299#ifdef INVARIANT_SUPPORT
300#ifndef INVARIANTS
301#undef	_sx_assert
302#endif
303
304/*
305 * In the non-WITNESS case, sx_assert() can only detect that at least
306 * *some* thread owns an slock, but it cannot guarantee that *this*
307 * thread owns an slock.
308 */
309void
310_sx_assert(struct sx *sx, int what, const char *file, int line)
311{
312
313	switch (what) {
314	case SX_LOCKED:
315	case SX_SLOCKED:
316#ifdef WITNESS
317		witness_assert(&sx->sx_object, what, file, line);
318#else
319		mtx_lock(sx->sx_lock);
320		if (sx->sx_cnt <= 0 &&
321		    (what == SX_SLOCKED || sx->sx_xholder == curthread))
322			printf("Lock %s not %slocked @ %s:%d",
323			    sx->sx_object.lo_name, (what == SX_SLOCKED) ?
324			    "share " : "", file, line);
325		mtx_unlock(sx->sx_lock);
326#endif
327		break;
328	case SX_XLOCKED:
329		mtx_lock(sx->sx_lock);
330		if (sx->sx_xholder != curthread)
331			printf("Lock %s not exclusively locked @ %s:%d",
332			    sx->sx_object.lo_name, file, line);
333		mtx_unlock(sx->sx_lock);
334		break;
335	default:
336		panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
337		    line);
338	}
339}
340#endif	/* INVARIANT_SUPPORT */
341