kern_sx.c revision 167787
1/*-
2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice(s), this list of conditions and the following disclaimer as
9 *    the first lines of this file unmodified other than the possible
10 *    addition of one or more copyright notices.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice(s), this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
25 * DAMAGE.
26 */
27
28/*
29 * Shared/exclusive locks.  This implementation assures deterministic lock
30 * granting behavior, so that slocks and xlocks are interleaved.
31 *
32 * Priority propagation will not generally raise the priority of lock holders,
33 * so should not be relied upon in combination with sx locks.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 167787 2007-03-21 21:20:51Z jhb $");
38
39#include "opt_ddb.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/ktr.h>
44#include <sys/linker_set.h>
45#include <sys/condvar.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/sx.h>
50#include <sys/lock_profile.h>
51
52#ifdef DDB
53#include <ddb/ddb.h>
54
55static void	db_show_sx(struct lock_object *lock);
56#endif
57static void	lock_sx(struct lock_object *lock, int how);
58static int	unlock_sx(struct lock_object *lock);
59
60struct lock_class lock_class_sx = {
61	.lc_name = "sx",
62	.lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
63#ifdef DDB
64	.lc_ddb_show = db_show_sx,
65#endif
66	.lc_lock = lock_sx,
67	.lc_unlock = unlock_sx,
68};
69
70#ifndef INVARIANTS
71#define	_sx_assert(sx, what, file, line)
72#endif
73
74void
75lock_sx(struct lock_object *lock, int how)
76{
77	struct sx *sx;
78
79	sx = (struct sx *)lock;
80	if (how)
81		sx_xlock(sx);
82	else
83		sx_slock(sx);
84}
85
86int
87unlock_sx(struct lock_object *lock)
88{
89	struct sx *sx;
90
91	sx = (struct sx *)lock;
92	sx_assert(sx, SX_LOCKED | LA_NOTRECURSED);
93	if (sx_xlocked(sx)) {
94		sx_xunlock(sx);
95		return (1);
96	} else {
97		sx_sunlock(sx);
98		return (0);
99	}
100}
101
102void
103sx_sysinit(void *arg)
104{
105	struct sx_args *sargs = arg;
106
107	sx_init(sargs->sa_sx, sargs->sa_desc);
108}
109
110void
111sx_init(struct sx *sx, const char *description)
112{
113
114	sx->sx_lock = mtx_pool_find(mtxpool_lockbuilder, sx);
115	sx->sx_cnt = 0;
116	cv_init(&sx->sx_shrd_cv, description);
117	sx->sx_shrd_wcnt = 0;
118	cv_init(&sx->sx_excl_cv, description);
119	sx->sx_excl_wcnt = 0;
120	sx->sx_xholder = NULL;
121	lock_profile_object_init(&sx->lock_object, &lock_class_sx, description);
122	lock_init(&sx->lock_object, &lock_class_sx, description, NULL,
123	    LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE);
124}
125
126void
127sx_destroy(struct sx *sx)
128{
129
130	KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
131	    0), ("%s (%s): holders or waiters\n", __func__,
132	    sx->lock_object.lo_name));
133
134	sx->sx_lock = NULL;
135	cv_destroy(&sx->sx_shrd_cv);
136	cv_destroy(&sx->sx_excl_cv);
137
138	lock_profile_object_destroy(&sx->lock_object);
139	lock_destroy(&sx->lock_object);
140}
141
142void
143_sx_slock(struct sx *sx, const char *file, int line)
144{
145	uint64_t waittime = 0;
146	int contested = 0;
147
148	mtx_lock(sx->sx_lock);
149	KASSERT(sx->sx_xholder != curthread,
150	    ("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
151	    sx->lock_object.lo_name, file, line));
152	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line);
153
154	/*
155	 * Loop in case we lose the race for lock acquisition.
156	 */
157	while (sx->sx_cnt < 0) {
158		sx->sx_shrd_wcnt++;
159		lock_profile_obtain_lock_failed(&sx->lock_object, &contested, &waittime);
160		cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
161		sx->sx_shrd_wcnt--;
162	}
163
164	/* Acquire a shared lock. */
165	sx->sx_cnt++;
166
167        if (sx->sx_cnt == 1)
168		lock_profile_obtain_lock_success(&sx->lock_object, contested, waittime, file, line);
169
170	LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
171	WITNESS_LOCK(&sx->lock_object, 0, file, line);
172	curthread->td_locks++;
173
174	mtx_unlock(sx->sx_lock);
175}
176
177int
178_sx_try_slock(struct sx *sx, const char *file, int line)
179{
180
181	mtx_lock(sx->sx_lock);
182	if (sx->sx_cnt >= 0) {
183		sx->sx_cnt++;
184		LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
185		WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
186		curthread->td_locks++;
187		mtx_unlock(sx->sx_lock);
188		return (1);
189	} else {
190		LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
191		mtx_unlock(sx->sx_lock);
192		return (0);
193	}
194}
195
196void
197_sx_xlock(struct sx *sx, const char *file, int line)
198{
199	int contested = 0;
200	uint64_t waittime = 0;
201
202	mtx_lock(sx->sx_lock);
203
204	/*
205	 * With sx locks, we're absolutely not permitted to recurse on
206	 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
207	 * by WITNESS, but as it is not semantically correct to hold the
208	 * xlock while in here, we consider it API abuse and put it under
209	 * INVARIANTS.
210	 */
211	KASSERT(sx->sx_xholder != curthread,
212	    ("%s (%s): xlock already held @ %s:%d", __func__,
213	    sx->lock_object.lo_name, file, line));
214	WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
215	    line);
216
217	/* Loop in case we lose the race for lock acquisition. */
218	while (sx->sx_cnt != 0) {
219		sx->sx_excl_wcnt++;
220		lock_profile_obtain_lock_failed(&sx->lock_object, &contested, &waittime);
221		cv_wait(&sx->sx_excl_cv, sx->sx_lock);
222		sx->sx_excl_wcnt--;
223	}
224
225	MPASS(sx->sx_cnt == 0);
226
227	/* Acquire an exclusive lock. */
228	sx->sx_cnt--;
229	sx->sx_xholder = curthread;
230
231	lock_profile_obtain_lock_success(&sx->lock_object, contested, waittime, file, line);
232	LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, 0, file, line);
233	WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
234	curthread->td_locks++;
235
236	mtx_unlock(sx->sx_lock);
237}
238
239int
240_sx_try_xlock(struct sx *sx, const char *file, int line)
241{
242
243	mtx_lock(sx->sx_lock);
244	if (sx->sx_cnt == 0) {
245		sx->sx_cnt--;
246		sx->sx_xholder = curthread;
247		LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, 1, file, line);
248		WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
249		    line);
250		curthread->td_locks++;
251		mtx_unlock(sx->sx_lock);
252		return (1);
253	} else {
254		LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, 0, file, line);
255		mtx_unlock(sx->sx_lock);
256		return (0);
257	}
258}
259
260void
261_sx_sunlock(struct sx *sx, const char *file, int line)
262{
263	_sx_assert(sx, SX_SLOCKED, file, line);
264	mtx_lock(sx->sx_lock);
265
266	curthread->td_locks--;
267	WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
268
269	/* Release. */
270	sx->sx_cnt--;
271
272	if (sx->sx_cnt == 0) {
273		lock_profile_release_lock(&sx->lock_object);
274	}
275
276	/*
277	 * If we just released the last shared lock, wake any waiters up, giving
278	 * exclusive lockers precedence.  In order to make sure that exclusive
279	 * lockers won't be blocked forever, don't wake shared lock waiters if
280	 * there are exclusive lock waiters.
281	 */
282	if (sx->sx_excl_wcnt > 0) {
283		if (sx->sx_cnt == 0)
284			cv_signal(&sx->sx_excl_cv);
285	} else if (sx->sx_shrd_wcnt > 0)
286		cv_broadcast(&sx->sx_shrd_cv);
287
288	LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
289
290	mtx_unlock(sx->sx_lock);
291}
292
293void
294_sx_xunlock(struct sx *sx, const char *file, int line)
295{
296	_sx_assert(sx, SX_XLOCKED, file, line);
297	mtx_lock(sx->sx_lock);
298	MPASS(sx->sx_cnt == -1);
299
300	curthread->td_locks--;
301	WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
302
303	/* Release. */
304	sx->sx_cnt++;
305	sx->sx_xholder = NULL;
306
307	/*
308	 * Wake up waiters if there are any.  Give precedence to slock waiters.
309	 */
310	if (sx->sx_shrd_wcnt > 0)
311		cv_broadcast(&sx->sx_shrd_cv);
312	else if (sx->sx_excl_wcnt > 0)
313		cv_signal(&sx->sx_excl_cv);
314
315	LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, 0, file, line);
316
317	lock_profile_release_lock(&sx->lock_object);
318	mtx_unlock(sx->sx_lock);
319}
320
321int
322_sx_try_upgrade(struct sx *sx, const char *file, int line)
323{
324
325	_sx_assert(sx, SX_SLOCKED, file, line);
326	mtx_lock(sx->sx_lock);
327
328	if (sx->sx_cnt == 1) {
329		sx->sx_cnt = -1;
330		sx->sx_xholder = curthread;
331
332		LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, 1, file, line);
333		WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
334		    file, line);
335
336		mtx_unlock(sx->sx_lock);
337		return (1);
338	} else {
339		LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, 0, file, line);
340		mtx_unlock(sx->sx_lock);
341		return (0);
342	}
343}
344
345void
346_sx_downgrade(struct sx *sx, const char *file, int line)
347{
348
349	_sx_assert(sx, SX_XLOCKED, file, line);
350	mtx_lock(sx->sx_lock);
351	MPASS(sx->sx_cnt == -1);
352
353	WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
354
355	sx->sx_cnt = 1;
356	sx->sx_xholder = NULL;
357        if (sx->sx_shrd_wcnt > 0)
358                cv_broadcast(&sx->sx_shrd_cv);
359
360	LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
361
362	mtx_unlock(sx->sx_lock);
363}
364
365#ifdef INVARIANT_SUPPORT
366#ifndef INVARIANTS
367#undef	_sx_assert
368#endif
369
370/*
371 * In the non-WITNESS case, sx_assert() can only detect that at least
372 * *some* thread owns an slock, but it cannot guarantee that *this*
373 * thread owns an slock.
374 */
375void
376_sx_assert(struct sx *sx, int what, const char *file, int line)
377{
378
379	if (panicstr != NULL)
380		return;
381	switch (what) {
382	case SX_LOCKED:
383	case SX_LOCKED | LA_NOTRECURSED:
384	case SX_SLOCKED:
385#ifdef WITNESS
386		witness_assert(&sx->lock_object, what, file, line);
387#else
388		mtx_lock(sx->sx_lock);
389		if (sx->sx_cnt <= 0 &&
390		    (what == SX_SLOCKED || sx->sx_xholder != curthread))
391			panic("Lock %s not %slocked @ %s:%d\n",
392			    sx->lock_object.lo_name, (what == SX_SLOCKED) ?
393			    "share " : "", file, line);
394		mtx_unlock(sx->sx_lock);
395#endif
396		break;
397	case SX_XLOCKED:
398		mtx_lock(sx->sx_lock);
399		if (sx->sx_xholder != curthread)
400			panic("Lock %s not exclusively locked @ %s:%d\n",
401			    sx->lock_object.lo_name, file, line);
402		mtx_unlock(sx->sx_lock);
403		break;
404	case SX_UNLOCKED:
405#ifdef WITNESS
406		witness_assert(&sx->lock_object, what, file, line);
407#else
408		/*
409		 * We are able to check only exclusive lock here,
410		 * we cannot assert that *this* thread owns slock.
411		 */
412		mtx_lock(sx->sx_lock);
413		if (sx->sx_xholder == curthread)
414			panic("Lock %s exclusively locked @ %s:%d\n",
415			    sx->lock_object.lo_name, file, line);
416		mtx_unlock(sx->sx_lock);
417#endif
418		break;
419	default:
420		panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
421		    line);
422	}
423}
424#endif	/* INVARIANT_SUPPORT */
425
426#ifdef DDB
427void
428db_show_sx(struct lock_object *lock)
429{
430	struct thread *td;
431	struct sx *sx;
432
433	sx = (struct sx *)lock;
434
435	db_printf(" state: ");
436	if (sx->sx_cnt < 0) {
437		td = sx->sx_xholder;
438		db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
439		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
440	} else if (sx->sx_cnt > 0)
441		db_printf("SLOCK: %d locks\n", sx->sx_cnt);
442	else
443		db_printf("UNLOCKED\n");
444	db_printf(" waiters: %d shared, %d exclusive\n", sx->sx_shrd_wcnt,
445	    sx->sx_excl_wcnt);
446}
447
448/*
449 * Check to see if a thread that is blocked on a sleep queue is actually
450 * blocked on an sx lock.  If so, output some details and return true.
451 * If the lock has an exclusive owner, return that in *ownerp.
452 */
453int
454sx_chain(struct thread *td, struct thread **ownerp)
455{
456	struct sx *sx;
457	struct cv *cv;
458
459	/*
460	 * First, see if it looks like td is blocked on a condition
461	 * variable.
462	 */
463	cv = td->td_wchan;
464	if (cv->cv_description != td->td_wmesg)
465		return (0);
466
467	/*
468	 * Ok, see if it looks like td is blocked on the exclusive
469	 * condition variable.
470	 */
471	sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_excl_cv));
472	if (LOCK_CLASS(&sx->lock_object) == &lock_class_sx &&
473	    sx->sx_excl_wcnt > 0)
474		goto ok;
475
476	/*
477	 * Second, see if it looks like td is blocked on the shared
478	 * condition variable.
479	 */
480	sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_shrd_cv));
481	if (LOCK_CLASS(&sx->lock_object) == &lock_class_sx &&
482	    sx->sx_shrd_wcnt > 0)
483		goto ok;
484
485	/* Doesn't seem to be an sx lock. */
486	return (0);
487
488ok:
489	/* We think we have an sx lock, so output some details. */
490	db_printf("blocked on sx \"%s\" ", td->td_wmesg);
491	if (sx->sx_cnt >= 0) {
492		db_printf("SLOCK (count %d)\n", sx->sx_cnt);
493		*ownerp = NULL;
494	} else {
495		db_printf("XLOCK\n");
496		*ownerp = sx->sx_xholder;
497	}
498	return (1);
499}
500#endif
501