kern_sx.c revision 154077
1/*-
2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice(s), this list of conditions and the following disclaimer as
9 *    the first lines of this file unmodified other than the possible
10 *    addition of one or more copyright notices.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice(s), this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
25 * DAMAGE.
26 */
27
28/*
29 * Shared/exclusive locks.  This implementation assures deterministic lock
30 * granting behavior, so that slocks and xlocks are interleaved.
31 *
32 * Priority propagation will not generally raise the priority of lock holders,
33 * so should not be relied upon in combination with sx locks.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 154077 2006-01-06 18:07:32Z jhb $");
38
39#include "opt_ddb.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/ktr.h>
44#include <sys/linker_set.h>
45#include <sys/condvar.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/sx.h>
50
51#include <ddb/ddb.h>
52
53#ifdef DDB
54static void	db_show_sx(struct lock_object *lock);
55#endif
56
57struct lock_class lock_class_sx = {
58	"sx",
59	LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
60#ifdef DDB
61	db_show_sx
62#endif
63};
64
65#ifndef INVARIANTS
66#define	_sx_assert(sx, what, file, line)
67#endif
68
69void
70sx_sysinit(void *arg)
71{
72	struct sx_args *sargs = arg;
73
74	sx_init(sargs->sa_sx, sargs->sa_desc);
75}
76
77void
78sx_init(struct sx *sx, const char *description)
79{
80	struct lock_object *lock;
81
82	lock = &sx->sx_object;
83	KASSERT((lock->lo_flags & LO_INITIALIZED) == 0,
84	    ("sx lock %s %p already initialized", description, sx));
85	bzero(sx, sizeof(*sx));
86	lock->lo_flags = LOCK_CLASS_SX << LO_CLASSSHIFT;
87	lock->lo_type = lock->lo_name = description;
88	lock->lo_flags |= LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE |
89	    LO_UPGRADABLE;
90	sx->sx_lock = mtx_pool_find(mtxpool_lockbuilder, sx);
91	sx->sx_cnt = 0;
92	cv_init(&sx->sx_shrd_cv, description);
93	sx->sx_shrd_wcnt = 0;
94	cv_init(&sx->sx_excl_cv, description);
95	sx->sx_excl_wcnt = 0;
96	sx->sx_xholder = NULL;
97
98	LOCK_LOG_INIT(lock, 0);
99
100	WITNESS_INIT(lock);
101}
102
103void
104sx_destroy(struct sx *sx)
105{
106
107	LOCK_LOG_DESTROY(&sx->sx_object, 0);
108
109	KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
110	    0), ("%s (%s): holders or waiters\n", __func__,
111	    sx->sx_object.lo_name));
112
113	sx->sx_lock = NULL;
114	cv_destroy(&sx->sx_shrd_cv);
115	cv_destroy(&sx->sx_excl_cv);
116
117	WITNESS_DESTROY(&sx->sx_object);
118}
119
120void
121_sx_slock(struct sx *sx, const char *file, int line)
122{
123
124	mtx_lock(sx->sx_lock);
125	KASSERT(sx->sx_xholder != curthread,
126	    ("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
127	    sx->sx_object.lo_name, file, line));
128	WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER, file, line);
129
130	/*
131	 * Loop in case we lose the race for lock acquisition.
132	 */
133	while (sx->sx_cnt < 0) {
134		sx->sx_shrd_wcnt++;
135		cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
136		sx->sx_shrd_wcnt--;
137	}
138
139	/* Acquire a shared lock. */
140	sx->sx_cnt++;
141
142	LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
143	WITNESS_LOCK(&sx->sx_object, 0, file, line);
144
145	mtx_unlock(sx->sx_lock);
146}
147
148int
149_sx_try_slock(struct sx *sx, const char *file, int line)
150{
151
152	mtx_lock(sx->sx_lock);
153	if (sx->sx_cnt >= 0) {
154		sx->sx_cnt++;
155		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
156		WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
157		mtx_unlock(sx->sx_lock);
158		return (1);
159	} else {
160		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
161		mtx_unlock(sx->sx_lock);
162		return (0);
163	}
164}
165
166void
167_sx_xlock(struct sx *sx, const char *file, int line)
168{
169
170	mtx_lock(sx->sx_lock);
171
172	/*
173	 * With sx locks, we're absolutely not permitted to recurse on
174	 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
175	 * by WITNESS, but as it is not semantically correct to hold the
176	 * xlock while in here, we consider it API abuse and put it under
177	 * INVARIANTS.
178	 */
179	KASSERT(sx->sx_xholder != curthread,
180	    ("%s (%s): xlock already held @ %s:%d", __func__,
181	    sx->sx_object.lo_name, file, line));
182	WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
183	    line);
184
185	/* Loop in case we lose the race for lock acquisition. */
186	while (sx->sx_cnt != 0) {
187		sx->sx_excl_wcnt++;
188		cv_wait(&sx->sx_excl_cv, sx->sx_lock);
189		sx->sx_excl_wcnt--;
190	}
191
192	MPASS(sx->sx_cnt == 0);
193
194	/* Acquire an exclusive lock. */
195	sx->sx_cnt--;
196	sx->sx_xholder = curthread;
197
198	LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
199	WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
200
201	mtx_unlock(sx->sx_lock);
202}
203
204int
205_sx_try_xlock(struct sx *sx, const char *file, int line)
206{
207
208	mtx_lock(sx->sx_lock);
209	if (sx->sx_cnt == 0) {
210		sx->sx_cnt--;
211		sx->sx_xholder = curthread;
212		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
213		WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
214		    line);
215		mtx_unlock(sx->sx_lock);
216		return (1);
217	} else {
218		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
219		mtx_unlock(sx->sx_lock);
220		return (0);
221	}
222}
223
224void
225_sx_sunlock(struct sx *sx, const char *file, int line)
226{
227
228	_sx_assert(sx, SX_SLOCKED, file, line);
229	mtx_lock(sx->sx_lock);
230
231	WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
232
233	/* Release. */
234	sx->sx_cnt--;
235
236	/*
237	 * If we just released the last shared lock, wake any waiters up, giving
238	 * exclusive lockers precedence.  In order to make sure that exclusive
239	 * lockers won't be blocked forever, don't wake shared lock waiters if
240	 * there are exclusive lock waiters.
241	 */
242	if (sx->sx_excl_wcnt > 0) {
243		if (sx->sx_cnt == 0)
244			cv_signal(&sx->sx_excl_cv);
245	} else if (sx->sx_shrd_wcnt > 0)
246		cv_broadcast(&sx->sx_shrd_cv);
247
248	LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
249
250	mtx_unlock(sx->sx_lock);
251}
252
253void
254_sx_xunlock(struct sx *sx, const char *file, int line)
255{
256
257	_sx_assert(sx, SX_XLOCKED, file, line);
258	mtx_lock(sx->sx_lock);
259	MPASS(sx->sx_cnt == -1);
260
261	WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
262
263	/* Release. */
264	sx->sx_cnt++;
265	sx->sx_xholder = NULL;
266
267	/*
268	 * Wake up waiters if there are any.  Give precedence to slock waiters.
269	 */
270	if (sx->sx_shrd_wcnt > 0)
271		cv_broadcast(&sx->sx_shrd_cv);
272	else if (sx->sx_excl_wcnt > 0)
273		cv_signal(&sx->sx_excl_cv);
274
275	LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
276
277	mtx_unlock(sx->sx_lock);
278}
279
280int
281_sx_try_upgrade(struct sx *sx, const char *file, int line)
282{
283
284	_sx_assert(sx, SX_SLOCKED, file, line);
285	mtx_lock(sx->sx_lock);
286
287	if (sx->sx_cnt == 1) {
288		sx->sx_cnt = -1;
289		sx->sx_xholder = curthread;
290
291		LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
292		WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
293		    file, line);
294
295		mtx_unlock(sx->sx_lock);
296		return (1);
297	} else {
298		LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
299		mtx_unlock(sx->sx_lock);
300		return (0);
301	}
302}
303
304void
305_sx_downgrade(struct sx *sx, const char *file, int line)
306{
307
308	_sx_assert(sx, SX_XLOCKED, file, line);
309	mtx_lock(sx->sx_lock);
310	MPASS(sx->sx_cnt == -1);
311
312	WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line);
313
314	sx->sx_cnt = 1;
315	sx->sx_xholder = NULL;
316        if (sx->sx_shrd_wcnt > 0)
317                cv_broadcast(&sx->sx_shrd_cv);
318
319	LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line);
320
321	mtx_unlock(sx->sx_lock);
322}
323
324#ifdef INVARIANT_SUPPORT
325#ifndef INVARIANTS
326#undef	_sx_assert
327#endif
328
329/*
330 * In the non-WITNESS case, sx_assert() can only detect that at least
331 * *some* thread owns an slock, but it cannot guarantee that *this*
332 * thread owns an slock.
333 */
334void
335_sx_assert(struct sx *sx, int what, const char *file, int line)
336{
337
338	if (panicstr != NULL)
339		return;
340	switch (what) {
341	case SX_LOCKED:
342	case SX_SLOCKED:
343#ifdef WITNESS
344		witness_assert(&sx->sx_object, what, file, line);
345#else
346		mtx_lock(sx->sx_lock);
347		if (sx->sx_cnt <= 0 &&
348		    (what == SX_SLOCKED || sx->sx_xholder != curthread))
349			panic("Lock %s not %slocked @ %s:%d\n",
350			    sx->sx_object.lo_name, (what == SX_SLOCKED) ?
351			    "share " : "", file, line);
352		mtx_unlock(sx->sx_lock);
353#endif
354		break;
355	case SX_XLOCKED:
356		mtx_lock(sx->sx_lock);
357		if (sx->sx_xholder != curthread)
358			panic("Lock %s not exclusively locked @ %s:%d\n",
359			    sx->sx_object.lo_name, file, line);
360		mtx_unlock(sx->sx_lock);
361		break;
362	case SX_UNLOCKED:
363#ifdef WITNESS
364		witness_assert(&sx->sx_object, what, file, line);
365#else
366		/*
367		 * We are able to check only exclusive lock here,
368		 * we cannot assert that *this* thread owns slock.
369		 */
370		mtx_lock(sx->sx_lock);
371		if (sx->sx_xholder == curthread)
372			panic("Lock %s exclusively locked @ %s:%d\n",
373			    sx->sx_object.lo_name, file, line);
374		mtx_unlock(sx->sx_lock);
375#endif
376		break;
377	default:
378		panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
379		    line);
380	}
381}
382#endif	/* INVARIANT_SUPPORT */
383
384#ifdef DDB
385void
386db_show_sx(struct lock_object *lock)
387{
388	struct thread *td;
389	struct sx *sx;
390
391	sx = (struct sx *)lock;
392
393	db_printf(" state: ");
394	if (sx->sx_cnt < 0) {
395		td = sx->sx_xholder;
396		db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
397		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
398	} else if (sx->sx_cnt > 0)
399		db_printf("SLOCK: %d locks\n", sx->sx_cnt);
400	else
401		db_printf("UNLOCKED\n");
402	db_printf(" waiters: %d shared, %d exclusive\n", sx->sx_shrd_wcnt,
403	    sx->sx_excl_wcnt);
404}
405#endif
406