kern_sx.c revision 167054
1/*-
2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice(s), this list of conditions and the following disclaimer as
9 *    the first lines of this file unmodified other than the possible
10 *    addition of one or more copyright notices.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice(s), this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
25 * DAMAGE.
26 */
27
28/*
29 * Shared/exclusive locks.  This implementation assures deterministic lock
30 * granting behavior, so that slocks and xlocks are interleaved.
31 *
32 * Priority propagation will not generally raise the priority of lock holders,
33 * so should not be relied upon in combination with sx locks.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 167054 2007-02-27 06:42:05Z kmacy $");
38
39#include "opt_ddb.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/ktr.h>
44#include <sys/linker_set.h>
45#include <sys/condvar.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/sx.h>
50#include <sys/lock_profile.h>
51
52#ifdef DDB
53#include <ddb/ddb.h>
54
55static void	db_show_sx(struct lock_object *lock);
56#endif
57
58struct lock_class lock_class_sx = {
59	"sx",
60	LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
61#ifdef DDB
62	db_show_sx
63#endif
64};
65
66#ifndef INVARIANTS
67#define	_sx_assert(sx, what, file, line)
68#endif
69
70void
71sx_sysinit(void *arg)
72{
73	struct sx_args *sargs = arg;
74
75	sx_init(sargs->sa_sx, sargs->sa_desc);
76}
77
78void
79sx_init(struct sx *sx, const char *description)
80{
81
82	sx->sx_lock = mtx_pool_find(mtxpool_lockbuilder, sx);
83	sx->sx_cnt = 0;
84	cv_init(&sx->sx_shrd_cv, description);
85	sx->sx_shrd_wcnt = 0;
86	cv_init(&sx->sx_excl_cv, description);
87	sx->sx_excl_wcnt = 0;
88	sx->sx_xholder = NULL;
89	lock_profile_object_init(&sx->sx_object, &lock_class_sx, description);
90	lock_init(&sx->sx_object, &lock_class_sx, description, NULL,
91	    LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE);
92}
93
94void
95sx_destroy(struct sx *sx)
96{
97
98	KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
99	    0), ("%s (%s): holders or waiters\n", __func__,
100	    sx->sx_object.lo_name));
101
102	sx->sx_lock = NULL;
103	cv_destroy(&sx->sx_shrd_cv);
104	cv_destroy(&sx->sx_excl_cv);
105
106	lock_profile_object_destroy(&sx->sx_object);
107	lock_destroy(&sx->sx_object);
108}
109
110void
111_sx_slock(struct sx *sx, const char *file, int line)
112{
113	uint64_t waittime = 0;
114	int contested = 0;
115
116	mtx_lock(sx->sx_lock);
117	KASSERT(sx->sx_xholder != curthread,
118	    ("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
119	    sx->sx_object.lo_name, file, line));
120	WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER, file, line);
121
122	/*
123	 * Loop in case we lose the race for lock acquisition.
124	 */
125	while (sx->sx_cnt < 0) {
126		sx->sx_shrd_wcnt++;
127		lock_profile_obtain_lock_failed(&sx->sx_object, &contested, &waittime);
128		cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
129		sx->sx_shrd_wcnt--;
130	}
131
132	/* Acquire a shared lock. */
133	sx->sx_cnt++;
134
135        if (sx->sx_cnt == 1)
136		lock_profile_obtain_lock_success(&sx->sx_object, contested, waittime, file, line);
137
138	LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
139	WITNESS_LOCK(&sx->sx_object, 0, file, line);
140	curthread->td_locks++;
141
142	mtx_unlock(sx->sx_lock);
143}
144
145int
146_sx_try_slock(struct sx *sx, const char *file, int line)
147{
148
149	mtx_lock(sx->sx_lock);
150	if (sx->sx_cnt >= 0) {
151		sx->sx_cnt++;
152		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
153		WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
154		curthread->td_locks++;
155		mtx_unlock(sx->sx_lock);
156		return (1);
157	} else {
158		LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
159		mtx_unlock(sx->sx_lock);
160		return (0);
161	}
162}
163
164void
165_sx_xlock(struct sx *sx, const char *file, int line)
166{
167	int contested = 0;
168	uint64_t waittime = 0;
169
170	mtx_lock(sx->sx_lock);
171
172	/*
173	 * With sx locks, we're absolutely not permitted to recurse on
174	 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
175	 * by WITNESS, but as it is not semantically correct to hold the
176	 * xlock while in here, we consider it API abuse and put it under
177	 * INVARIANTS.
178	 */
179	KASSERT(sx->sx_xholder != curthread,
180	    ("%s (%s): xlock already held @ %s:%d", __func__,
181	    sx->sx_object.lo_name, file, line));
182	WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
183	    line);
184
185	/* Loop in case we lose the race for lock acquisition. */
186	while (sx->sx_cnt != 0) {
187		sx->sx_excl_wcnt++;
188		lock_profile_obtain_lock_failed(&sx->sx_object, &contested, &waittime);
189		cv_wait(&sx->sx_excl_cv, sx->sx_lock);
190		sx->sx_excl_wcnt--;
191	}
192
193	MPASS(sx->sx_cnt == 0);
194
195	/* Acquire an exclusive lock. */
196	sx->sx_cnt--;
197	sx->sx_xholder = curthread;
198
199	lock_profile_obtain_lock_success(&sx->sx_object, contested, waittime, file, line);
200	LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
201	WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
202	curthread->td_locks++;
203
204	mtx_unlock(sx->sx_lock);
205}
206
207int
208_sx_try_xlock(struct sx *sx, const char *file, int line)
209{
210
211	mtx_lock(sx->sx_lock);
212	if (sx->sx_cnt == 0) {
213		sx->sx_cnt--;
214		sx->sx_xholder = curthread;
215		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
216		WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
217		    line);
218		curthread->td_locks++;
219		mtx_unlock(sx->sx_lock);
220		return (1);
221	} else {
222		LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
223		mtx_unlock(sx->sx_lock);
224		return (0);
225	}
226}
227
228void
229_sx_sunlock(struct sx *sx, const char *file, int line)
230{
231	struct lock_object lo;
232	int count = -1;
233
234	_sx_assert(sx, SX_SLOCKED, file, line);
235	mtx_lock(sx->sx_lock);
236
237	curthread->td_locks--;
238	WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
239
240	/* Release. */
241	sx->sx_cnt--;
242
243#ifdef LOCK_PROFILING
244	if (sx->sx_cnt == 0) {
245		memcpy(&lo, &sx->sx_object, sizeof(lo));
246		sx->sx_object.lo_flags &= ~LO_CONTESTED;
247		count = 0;
248	}
249#endif
250	/*
251	 * If we just released the last shared lock, wake any waiters up, giving
252	 * exclusive lockers precedence.  In order to make sure that exclusive
253	 * lockers won't be blocked forever, don't wake shared lock waiters if
254	 * there are exclusive lock waiters.
255	 */
256	if (sx->sx_excl_wcnt > 0) {
257		if (sx->sx_cnt == 0)
258			cv_signal(&sx->sx_excl_cv);
259	} else if (sx->sx_shrd_wcnt > 0)
260		cv_broadcast(&sx->sx_shrd_cv);
261
262	LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
263
264	mtx_unlock(sx->sx_lock);
265	if (count == 0)
266		lock_profile_release_lock(&lo);
267
268}
269
270void
271_sx_xunlock(struct sx *sx, const char *file, int line)
272{
273	struct lock_object lo;
274
275	_sx_assert(sx, SX_XLOCKED, file, line);
276	mtx_lock(sx->sx_lock);
277	MPASS(sx->sx_cnt == -1);
278
279	curthread->td_locks--;
280	WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
281
282	/* Release. */
283	sx->sx_cnt++;
284	sx->sx_xholder = NULL;
285
286#ifdef LOCK_PROFILING
287	memcpy(&lo, &sx->sx_object, sizeof(lo));
288	sx->sx_object.lo_flags &= ~LO_CONTESTED;
289#endif
290	/*
291	 * Wake up waiters if there are any.  Give precedence to slock waiters.
292	 */
293	if (sx->sx_shrd_wcnt > 0)
294		cv_broadcast(&sx->sx_shrd_cv);
295	else if (sx->sx_excl_wcnt > 0)
296		cv_signal(&sx->sx_excl_cv);
297
298	LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
299
300	mtx_unlock(sx->sx_lock);
301	lock_profile_release_lock(&lo);
302}
303
304int
305_sx_try_upgrade(struct sx *sx, const char *file, int line)
306{
307
308	_sx_assert(sx, SX_SLOCKED, file, line);
309	mtx_lock(sx->sx_lock);
310
311	if (sx->sx_cnt == 1) {
312		sx->sx_cnt = -1;
313		sx->sx_xholder = curthread;
314
315		LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
316		WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
317		    file, line);
318
319		mtx_unlock(sx->sx_lock);
320		return (1);
321	} else {
322		LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
323		mtx_unlock(sx->sx_lock);
324		return (0);
325	}
326}
327
328void
329_sx_downgrade(struct sx *sx, const char *file, int line)
330{
331
332	_sx_assert(sx, SX_XLOCKED, file, line);
333	mtx_lock(sx->sx_lock);
334	MPASS(sx->sx_cnt == -1);
335
336	WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line);
337
338	sx->sx_cnt = 1;
339	sx->sx_xholder = NULL;
340        if (sx->sx_shrd_wcnt > 0)
341                cv_broadcast(&sx->sx_shrd_cv);
342
343	LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line);
344
345	mtx_unlock(sx->sx_lock);
346}
347
348#ifdef INVARIANT_SUPPORT
349#ifndef INVARIANTS
350#undef	_sx_assert
351#endif
352
353/*
354 * In the non-WITNESS case, sx_assert() can only detect that at least
355 * *some* thread owns an slock, but it cannot guarantee that *this*
356 * thread owns an slock.
357 */
358void
359_sx_assert(struct sx *sx, int what, const char *file, int line)
360{
361
362	if (panicstr != NULL)
363		return;
364	switch (what) {
365	case SX_LOCKED:
366	case SX_SLOCKED:
367#ifdef WITNESS
368		witness_assert(&sx->sx_object, what, file, line);
369#else
370		mtx_lock(sx->sx_lock);
371		if (sx->sx_cnt <= 0 &&
372		    (what == SX_SLOCKED || sx->sx_xholder != curthread))
373			panic("Lock %s not %slocked @ %s:%d\n",
374			    sx->sx_object.lo_name, (what == SX_SLOCKED) ?
375			    "share " : "", file, line);
376		mtx_unlock(sx->sx_lock);
377#endif
378		break;
379	case SX_XLOCKED:
380		mtx_lock(sx->sx_lock);
381		if (sx->sx_xholder != curthread)
382			panic("Lock %s not exclusively locked @ %s:%d\n",
383			    sx->sx_object.lo_name, file, line);
384		mtx_unlock(sx->sx_lock);
385		break;
386	case SX_UNLOCKED:
387#ifdef WITNESS
388		witness_assert(&sx->sx_object, what, file, line);
389#else
390		/*
391		 * We are able to check only exclusive lock here,
392		 * we cannot assert that *this* thread owns slock.
393		 */
394		mtx_lock(sx->sx_lock);
395		if (sx->sx_xholder == curthread)
396			panic("Lock %s exclusively locked @ %s:%d\n",
397			    sx->sx_object.lo_name, file, line);
398		mtx_unlock(sx->sx_lock);
399#endif
400		break;
401	default:
402		panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
403		    line);
404	}
405}
406#endif	/* INVARIANT_SUPPORT */
407
408#ifdef DDB
409void
410db_show_sx(struct lock_object *lock)
411{
412	struct thread *td;
413	struct sx *sx;
414
415	sx = (struct sx *)lock;
416
417	db_printf(" state: ");
418	if (sx->sx_cnt < 0) {
419		td = sx->sx_xholder;
420		db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
421		    td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
422	} else if (sx->sx_cnt > 0)
423		db_printf("SLOCK: %d locks\n", sx->sx_cnt);
424	else
425		db_printf("UNLOCKED\n");
426	db_printf(" waiters: %d shared, %d exclusive\n", sx->sx_shrd_wcnt,
427	    sx->sx_excl_wcnt);
428}
429
430/*
431 * Check to see if a thread that is blocked on a sleep queue is actually
432 * blocked on an sx lock.  If so, output some details and return true.
433 * If the lock has an exclusive owner, return that in *ownerp.
434 */
435int
436sx_chain(struct thread *td, struct thread **ownerp)
437{
438	struct sx *sx;
439	struct cv *cv;
440
441	/*
442	 * First, see if it looks like td is blocked on a condition
443	 * variable.
444	 */
445	cv = td->td_wchan;
446	if (cv->cv_description != td->td_wmesg)
447		return (0);
448
449	/*
450	 * Ok, see if it looks like td is blocked on the exclusive
451	 * condition variable.
452	 */
453	sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_excl_cv));
454	if (LOCK_CLASS(&sx->sx_object) == &lock_class_sx &&
455	    sx->sx_excl_wcnt > 0)
456		goto ok;
457
458	/*
459	 * Second, see if it looks like td is blocked on the shared
460	 * condition variable.
461	 */
462	sx = (struct sx *)((char *)cv - offsetof(struct sx, sx_shrd_cv));
463	if (LOCK_CLASS(&sx->sx_object) == &lock_class_sx &&
464	    sx->sx_shrd_wcnt > 0)
465		goto ok;
466
467	/* Doesn't seem to be an sx lock. */
468	return (0);
469
470ok:
471	/* We think we have an sx lock, so output some details. */
472	db_printf("blocked on sx \"%s\" ", td->td_wmesg);
473	if (sx->sx_cnt >= 0) {
474		db_printf("SLOCK (count %d)\n", sx->sx_cnt);
475		*ownerp = NULL;
476	} else {
477		db_printf("XLOCK\n");
478		*ownerp = sx->sx_xholder;
479	}
480	return (1);
481}
482#endif
483