kern_sx.c revision 85412
1/*
2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice(s), this list of conditions and the following disclaimer as
9 *    the first lines of this file unmodified other than the possible
10 *    addition of one or more copyright notices.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice(s), this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
25 * DAMAGE.
26 *
27 * $FreeBSD: head/sys/kern/kern_sx.c 85412 2001-10-24 14:18:33Z jhb $
28 */
29
30/*
31 * Shared/exclusive locks.  This implementation assures deterministic lock
32 * granting behavior, so that slocks and xlocks are interleaved.
33 *
34 * Priority propagation will not generally raise the priority of lock holders,
35 * so should not be relied upon in combination with sx locks.
36 */
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/ktr.h>
41#include <sys/condvar.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/sx.h>
45
46struct lock_class lock_class_sx = {
47	"sx",
48	LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE
49};
50
51#ifndef INVARIANTS
52#define	_sx_assert(sx, what, file, line)
53#endif
54
55void
56sx_init(struct sx *sx, const char *description)
57{
58	struct lock_object *lock;
59
60	lock = &sx->sx_object;
61	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
62	    ("sx lock %s %p already initialized", description, sx));
63	bzero(sx, sizeof(*sx));
64	lock->lo_class = &lock_class_sx;
65	lock->lo_name = description;
66	lock->lo_flags = LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE |
67	    LO_UPGRADABLE;
68	mtx_init(&sx->sx_lock, "sx backing lock",
69	    MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
70	sx->sx_cnt = 0;
71	cv_init(&sx->sx_shrd_cv, description);
72	sx->sx_shrd_wcnt = 0;
73	cv_init(&sx->sx_excl_cv, description);
74	sx->sx_excl_wcnt = 0;
75	sx->sx_xholder = NULL;
76
77	LOCK_LOG_INIT(lock, 0);
78
79	WITNESS_INIT(lock);
80}
81
82void
83sx_destroy(struct sx *sx)
84{
85
86	LOCK_LOG_DESTROY(&sx->sx_object, 0);
87
88	KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
89	    0), ("%s (%s): holders or waiters\n", __FUNCTION__,
90	    sx->sx_object.lo_name));
91
92	mtx_destroy(&sx->sx_lock);
93	cv_destroy(&sx->sx_shrd_cv);
94	cv_destroy(&sx->sx_excl_cv);
95
96	WITNESS_DESTROY(&sx->sx_object);
97}
98
99void
100_sx_slock(struct sx *sx, const char *file, int line)
101{
102
103	mtx_lock(&sx->sx_lock);
104	KASSERT(sx->sx_xholder != curthread,
105	    ("%s (%s): slock while xlock is held @ %s:%d\n", __FUNCTION__,
106	    sx->sx_object.lo_name, file, line));
107
108	/*
109	 * Loop in case we lose the race for lock acquisition.
110	 */
111	while (sx->sx_cnt < 0) {
112		sx->sx_shrd_wcnt++;
113		cv_wait(&sx->sx_shrd_cv, &sx->sx_lock);
114		sx->sx_shrd_wcnt--;
115	}
116
117	/* Acquire a shared lock. */
118	sx->sx_cnt++;
119
120	LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
121	WITNESS_LOCK(&sx->sx_object, 0, file, line);
122
123	mtx_unlock(&sx->sx_lock);
124}
125
126int
127_sx_try_slock(struct sx *sx, const char *file, int line)
128{
129
130	mtx_lock(&sx->sx_lock);
131	if (sx->sx_cnt >= 0) {
132		sx->sx_cnt++;
133		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
134		WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
135		mtx_unlock(&sx->sx_lock);
136		return (1);
137	} else {
138		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
139		mtx_unlock(&sx->sx_lock);
140		return (0);
141	}
142}
143
144void
145_sx_xlock(struct sx *sx, const char *file, int line)
146{
147
148	mtx_lock(&sx->sx_lock);
149
150	/*
151	 * With sx locks, we're absolutely not permitted to recurse on
152	 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
153	 * by WITNESS, but as it is not semantically correct to hold the
154	 * xlock while in here, we consider it API abuse and put it under
155	 * INVARIANTS.
156	 */
157	KASSERT(sx->sx_xholder != curthread,
158	    ("%s (%s): xlock already held @ %s:%d", __FUNCTION__,
159	    sx->sx_object.lo_name, file, line));
160
161	/* Loop in case we lose the race for lock acquisition. */
162	while (sx->sx_cnt != 0) {
163		sx->sx_excl_wcnt++;
164		cv_wait(&sx->sx_excl_cv, &sx->sx_lock);
165		sx->sx_excl_wcnt--;
166	}
167
168	MPASS(sx->sx_cnt == 0);
169
170	/* Acquire an exclusive lock. */
171	sx->sx_cnt--;
172	sx->sx_xholder = curthread;
173
174	LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
175	WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
176
177	mtx_unlock(&sx->sx_lock);
178}
179
180int
181_sx_try_xlock(struct sx *sx, const char *file, int line)
182{
183
184	mtx_lock(&sx->sx_lock);
185	if (sx->sx_cnt == 0) {
186		sx->sx_cnt--;
187		sx->sx_xholder = curthread;
188		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
189		WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
190		    line);
191		mtx_unlock(&sx->sx_lock);
192		return (1);
193	} else {
194		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
195		mtx_unlock(&sx->sx_lock);
196		return (0);
197	}
198}
199
200void
201_sx_sunlock(struct sx *sx, const char *file, int line)
202{
203
204	_sx_assert(sx, SX_SLOCKED, file, line);
205	mtx_lock(&sx->sx_lock);
206
207	WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
208
209	/* Release. */
210	sx->sx_cnt--;
211
212	/*
213	 * If we just released the last shared lock, wake any waiters up, giving
214	 * exclusive lockers precedence.  In order to make sure that exclusive
215	 * lockers won't be blocked forever, don't wake shared lock waiters if
216	 * there are exclusive lock waiters.
217	 */
218	if (sx->sx_excl_wcnt > 0) {
219		if (sx->sx_cnt == 0)
220			cv_signal(&sx->sx_excl_cv);
221	} else if (sx->sx_shrd_wcnt > 0)
222		cv_broadcast(&sx->sx_shrd_cv);
223
224	LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
225
226	mtx_unlock(&sx->sx_lock);
227}
228
229void
230_sx_xunlock(struct sx *sx, const char *file, int line)
231{
232
233	_sx_assert(sx, SX_XLOCKED, file, line);
234	mtx_lock(&sx->sx_lock);
235	MPASS(sx->sx_cnt == -1);
236
237	WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
238
239	/* Release. */
240	sx->sx_cnt++;
241	sx->sx_xholder = NULL;
242
243	/*
244	 * Wake up waiters if there are any.  Give precedence to slock waiters.
245	 */
246	if (sx->sx_shrd_wcnt > 0)
247		cv_broadcast(&sx->sx_shrd_cv);
248	else if (sx->sx_excl_wcnt > 0)
249		cv_signal(&sx->sx_excl_cv);
250
251	LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
252
253	mtx_unlock(&sx->sx_lock);
254}
255
256int
257_sx_try_upgrade(struct sx *sx, const char *file, int line)
258{
259
260	_sx_assert(sx, SX_SLOCKED, file, line);
261	mtx_lock(&sx->sx_lock);
262
263	if (sx->sx_cnt == 1) {
264		sx->sx_cnt = -1;
265		sx->sx_xholder = curthread;
266
267		LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
268		WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
269		    file, line);
270
271		mtx_unlock(&sx->sx_lock);
272		return (1);
273	} else {
274		LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
275		mtx_unlock(&sx->sx_lock);
276		return (0);
277	}
278}
279
280void
281_sx_downgrade(struct sx *sx, const char *file, int line)
282{
283
284	_sx_assert(sx, SX_XLOCKED, file, line);
285	mtx_lock(&sx->sx_lock);
286	MPASS(sx->sx_cnt == -1);
287
288	WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line);
289
290	sx->sx_cnt = 1;
291	sx->sx_xholder = NULL;
292        if (sx->sx_shrd_wcnt > 0)
293                cv_broadcast(&sx->sx_shrd_cv);
294
295	LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line);
296
297	mtx_unlock(&sx->sx_lock);
298}
299
300#ifdef INVARIANT_SUPPORT
301#ifndef INVARIANTS
302#undef	_sx_assert
303#endif
304
305/*
306 * In the non-WITNESS case, sx_assert() can only detect that at least
307 * *some* thread owns an slock, but it cannot guarantee that *this*
308 * thread owns an slock.
309 */
310void
311_sx_assert(struct sx *sx, int what, const char *file, int line)
312{
313
314	switch (what) {
315	case SX_LOCKED:
316	case SX_SLOCKED:
317#ifdef WITNESS
318		witness_assert(&sx->sx_object, what, file, line);
319#else
320		mtx_lock(&sx->sx_lock);
321		if (sx->sx_cnt <= 0 &&
322		    (what == SX_SLOCKED || sx->sx_xholder == curthread))
323			printf("Lock %s not %slocked @ %s:%d",
324			    sx->sx_object.lo_name, (what == SX_SLOCKED) ?
325			    "share " : "", file, line);
326		mtx_unlock(&sx->sx_lock);
327#endif
328		break;
329	case SX_XLOCKED:
330		mtx_lock(&sx->sx_lock);
331		if (sx->sx_xholder != curthread)
332			printf("Lock %s not exclusively locked @ %s:%d",
333			    sx->sx_object.lo_name, file, line);
334		mtx_unlock(&sx->sx_lock);
335		break;
336	default:
337		panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
338		    line);
339	}
340}
341#endif	/* INVARIANT_SUPPORT */
342