Deleted Added
full compact
kern_sx.c (154484) kern_sx.c (160771)
1/*-
2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice(s), this list of conditions and the following disclaimer as
9 * the first lines of this file unmodified other than the possible
10 * addition of one or more copyright notices.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice(s), this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
25 * DAMAGE.
26 */
27
28/*
29 * Shared/exclusive locks. This implementation assures deterministic lock
30 * granting behavior, so that slocks and xlocks are interleaved.
31 *
32 * Priority propagation will not generally raise the priority of lock holders,
33 * so should not be relied upon in combination with sx locks.
34 */
35
36#include <sys/cdefs.h>
1/*-
2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice(s), this list of conditions and the following disclaimer as
9 * the first lines of this file unmodified other than the possible
10 * addition of one or more copyright notices.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice(s), this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
25 * DAMAGE.
26 */
27
28/*
29 * Shared/exclusive locks. This implementation assures deterministic lock
30 * granting behavior, so that slocks and xlocks are interleaved.
31 *
32 * Priority propagation will not generally raise the priority of lock holders,
33 * so should not be relied upon in combination with sx locks.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 154484 2006-01-17 16:55:17Z jhb $");
37__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 160771 2006-07-27 21:45:55Z jhb $");
38
39#include "opt_ddb.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/ktr.h>
44#include <sys/linker_set.h>
45#include <sys/condvar.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/sx.h>
50
51#include <ddb/ddb.h>
52
53#ifdef DDB
54static void db_show_sx(struct lock_object *lock);
55#endif
56
57struct lock_class lock_class_sx = {
58 "sx",
59 LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
60#ifdef DDB
61 db_show_sx
62#endif
63};
64
65#ifndef INVARIANTS
66#define _sx_assert(sx, what, file, line)
67#endif
68
69void
70sx_sysinit(void *arg)
71{
72 struct sx_args *sargs = arg;
73
74 sx_init(sargs->sa_sx, sargs->sa_desc);
75}
76
77void
78sx_init(struct sx *sx, const char *description)
79{
80
81 sx->sx_lock = mtx_pool_find(mtxpool_lockbuilder, sx);
82 sx->sx_cnt = 0;
83 cv_init(&sx->sx_shrd_cv, description);
84 sx->sx_shrd_wcnt = 0;
85 cv_init(&sx->sx_excl_cv, description);
86 sx->sx_excl_wcnt = 0;
87 sx->sx_xholder = NULL;
88 lock_init(&sx->sx_object, &lock_class_sx, description, NULL,
89 LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE);
90}
91
92void
93sx_destroy(struct sx *sx)
94{
95
96 KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
97 0), ("%s (%s): holders or waiters\n", __func__,
98 sx->sx_object.lo_name));
99
100 sx->sx_lock = NULL;
101 cv_destroy(&sx->sx_shrd_cv);
102 cv_destroy(&sx->sx_excl_cv);
103
104 lock_destroy(&sx->sx_object);
105}
106
107void
108_sx_slock(struct sx *sx, const char *file, int line)
109{
110
111 mtx_lock(sx->sx_lock);
112 KASSERT(sx->sx_xholder != curthread,
113 ("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
114 sx->sx_object.lo_name, file, line));
115 WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER, file, line);
116
117 /*
118 * Loop in case we lose the race for lock acquisition.
119 */
120 while (sx->sx_cnt < 0) {
121 sx->sx_shrd_wcnt++;
122 cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
123 sx->sx_shrd_wcnt--;
124 }
125
126 /* Acquire a shared lock. */
127 sx->sx_cnt++;
128
129 LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
130 WITNESS_LOCK(&sx->sx_object, 0, file, line);
38
39#include "opt_ddb.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/ktr.h>
44#include <sys/linker_set.h>
45#include <sys/condvar.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/sx.h>
50
51#include <ddb/ddb.h>
52
53#ifdef DDB
54static void db_show_sx(struct lock_object *lock);
55#endif
56
57struct lock_class lock_class_sx = {
58 "sx",
59 LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
60#ifdef DDB
61 db_show_sx
62#endif
63};
64
65#ifndef INVARIANTS
66#define _sx_assert(sx, what, file, line)
67#endif
68
69void
70sx_sysinit(void *arg)
71{
72 struct sx_args *sargs = arg;
73
74 sx_init(sargs->sa_sx, sargs->sa_desc);
75}
76
77void
78sx_init(struct sx *sx, const char *description)
79{
80
81 sx->sx_lock = mtx_pool_find(mtxpool_lockbuilder, sx);
82 sx->sx_cnt = 0;
83 cv_init(&sx->sx_shrd_cv, description);
84 sx->sx_shrd_wcnt = 0;
85 cv_init(&sx->sx_excl_cv, description);
86 sx->sx_excl_wcnt = 0;
87 sx->sx_xholder = NULL;
88 lock_init(&sx->sx_object, &lock_class_sx, description, NULL,
89 LO_WITNESS | LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE);
90}
91
92void
93sx_destroy(struct sx *sx)
94{
95
96 KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
97 0), ("%s (%s): holders or waiters\n", __func__,
98 sx->sx_object.lo_name));
99
100 sx->sx_lock = NULL;
101 cv_destroy(&sx->sx_shrd_cv);
102 cv_destroy(&sx->sx_excl_cv);
103
104 lock_destroy(&sx->sx_object);
105}
106
107void
108_sx_slock(struct sx *sx, const char *file, int line)
109{
110
111 mtx_lock(sx->sx_lock);
112 KASSERT(sx->sx_xholder != curthread,
113 ("%s (%s): slock while xlock is held @ %s:%d\n", __func__,
114 sx->sx_object.lo_name, file, line));
115 WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER, file, line);
116
117 /*
118 * Loop in case we lose the race for lock acquisition.
119 */
120 while (sx->sx_cnt < 0) {
121 sx->sx_shrd_wcnt++;
122 cv_wait(&sx->sx_shrd_cv, sx->sx_lock);
123 sx->sx_shrd_wcnt--;
124 }
125
126 /* Acquire a shared lock. */
127 sx->sx_cnt++;
128
129 LOCK_LOG_LOCK("SLOCK", &sx->sx_object, 0, 0, file, line);
130 WITNESS_LOCK(&sx->sx_object, 0, file, line);
131 curthread->td_locks++;
131
132 mtx_unlock(sx->sx_lock);
133}
134
135int
136_sx_try_slock(struct sx *sx, const char *file, int line)
137{
138
139 mtx_lock(sx->sx_lock);
140 if (sx->sx_cnt >= 0) {
141 sx->sx_cnt++;
142 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
143 WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
132
133 mtx_unlock(sx->sx_lock);
134}
135
136int
137_sx_try_slock(struct sx *sx, const char *file, int line)
138{
139
140 mtx_lock(sx->sx_lock);
141 if (sx->sx_cnt >= 0) {
142 sx->sx_cnt++;
143 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 1, file, line);
144 WITNESS_LOCK(&sx->sx_object, LOP_TRYLOCK, file, line);
145 curthread->td_locks++;
144 mtx_unlock(sx->sx_lock);
145 return (1);
146 } else {
147 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
148 mtx_unlock(sx->sx_lock);
149 return (0);
150 }
151}
152
153void
154_sx_xlock(struct sx *sx, const char *file, int line)
155{
156
157 mtx_lock(sx->sx_lock);
158
159 /*
160 * With sx locks, we're absolutely not permitted to recurse on
161 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
162 * by WITNESS, but as it is not semantically correct to hold the
163 * xlock while in here, we consider it API abuse and put it under
164 * INVARIANTS.
165 */
166 KASSERT(sx->sx_xholder != curthread,
167 ("%s (%s): xlock already held @ %s:%d", __func__,
168 sx->sx_object.lo_name, file, line));
169 WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
170 line);
171
172 /* Loop in case we lose the race for lock acquisition. */
173 while (sx->sx_cnt != 0) {
174 sx->sx_excl_wcnt++;
175 cv_wait(&sx->sx_excl_cv, sx->sx_lock);
176 sx->sx_excl_wcnt--;
177 }
178
179 MPASS(sx->sx_cnt == 0);
180
181 /* Acquire an exclusive lock. */
182 sx->sx_cnt--;
183 sx->sx_xholder = curthread;
184
185 LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
186 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
146 mtx_unlock(sx->sx_lock);
147 return (1);
148 } else {
149 LOCK_LOG_TRY("SLOCK", &sx->sx_object, 0, 0, file, line);
150 mtx_unlock(sx->sx_lock);
151 return (0);
152 }
153}
154
155void
156_sx_xlock(struct sx *sx, const char *file, int line)
157{
158
159 mtx_lock(sx->sx_lock);
160
161 /*
162 * With sx locks, we're absolutely not permitted to recurse on
163 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
164 * by WITNESS, but as it is not semantically correct to hold the
165 * xlock while in here, we consider it API abuse and put it under
166 * INVARIANTS.
167 */
168 KASSERT(sx->sx_xholder != curthread,
169 ("%s (%s): xlock already held @ %s:%d", __func__,
170 sx->sx_object.lo_name, file, line));
171 WITNESS_CHECKORDER(&sx->sx_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
172 line);
173
174 /* Loop in case we lose the race for lock acquisition. */
175 while (sx->sx_cnt != 0) {
176 sx->sx_excl_wcnt++;
177 cv_wait(&sx->sx_excl_cv, sx->sx_lock);
178 sx->sx_excl_wcnt--;
179 }
180
181 MPASS(sx->sx_cnt == 0);
182
183 /* Acquire an exclusive lock. */
184 sx->sx_cnt--;
185 sx->sx_xholder = curthread;
186
187 LOCK_LOG_LOCK("XLOCK", &sx->sx_object, 0, 0, file, line);
188 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
189 curthread->td_locks++;
187
188 mtx_unlock(sx->sx_lock);
189}
190
191int
192_sx_try_xlock(struct sx *sx, const char *file, int line)
193{
194
195 mtx_lock(sx->sx_lock);
196 if (sx->sx_cnt == 0) {
197 sx->sx_cnt--;
198 sx->sx_xholder = curthread;
199 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
200 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
201 line);
190
191 mtx_unlock(sx->sx_lock);
192}
193
194int
195_sx_try_xlock(struct sx *sx, const char *file, int line)
196{
197
198 mtx_lock(sx->sx_lock);
199 if (sx->sx_cnt == 0) {
200 sx->sx_cnt--;
201 sx->sx_xholder = curthread;
202 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 1, file, line);
203 WITNESS_LOCK(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK, file,
204 line);
205 curthread->td_locks++;
202 mtx_unlock(sx->sx_lock);
203 return (1);
204 } else {
205 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
206 mtx_unlock(sx->sx_lock);
207 return (0);
208 }
209}
210
211void
212_sx_sunlock(struct sx *sx, const char *file, int line)
213{
214
215 _sx_assert(sx, SX_SLOCKED, file, line);
216 mtx_lock(sx->sx_lock);
217
206 mtx_unlock(sx->sx_lock);
207 return (1);
208 } else {
209 LOCK_LOG_TRY("XLOCK", &sx->sx_object, 0, 0, file, line);
210 mtx_unlock(sx->sx_lock);
211 return (0);
212 }
213}
214
215void
216_sx_sunlock(struct sx *sx, const char *file, int line)
217{
218
219 _sx_assert(sx, SX_SLOCKED, file, line);
220 mtx_lock(sx->sx_lock);
221
222 curthread->td_locks--;
218 WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
219
220 /* Release. */
221 sx->sx_cnt--;
222
223 /*
224 * If we just released the last shared lock, wake any waiters up, giving
225 * exclusive lockers precedence. In order to make sure that exclusive
226 * lockers won't be blocked forever, don't wake shared lock waiters if
227 * there are exclusive lock waiters.
228 */
229 if (sx->sx_excl_wcnt > 0) {
230 if (sx->sx_cnt == 0)
231 cv_signal(&sx->sx_excl_cv);
232 } else if (sx->sx_shrd_wcnt > 0)
233 cv_broadcast(&sx->sx_shrd_cv);
234
235 LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
236
237 mtx_unlock(sx->sx_lock);
238}
239
240void
241_sx_xunlock(struct sx *sx, const char *file, int line)
242{
243
244 _sx_assert(sx, SX_XLOCKED, file, line);
245 mtx_lock(sx->sx_lock);
246 MPASS(sx->sx_cnt == -1);
247
223 WITNESS_UNLOCK(&sx->sx_object, 0, file, line);
224
225 /* Release. */
226 sx->sx_cnt--;
227
228 /*
229 * If we just released the last shared lock, wake any waiters up, giving
230 * exclusive lockers precedence. In order to make sure that exclusive
231 * lockers won't be blocked forever, don't wake shared lock waiters if
232 * there are exclusive lock waiters.
233 */
234 if (sx->sx_excl_wcnt > 0) {
235 if (sx->sx_cnt == 0)
236 cv_signal(&sx->sx_excl_cv);
237 } else if (sx->sx_shrd_wcnt > 0)
238 cv_broadcast(&sx->sx_shrd_cv);
239
240 LOCK_LOG_LOCK("SUNLOCK", &sx->sx_object, 0, 0, file, line);
241
242 mtx_unlock(sx->sx_lock);
243}
244
245void
246_sx_xunlock(struct sx *sx, const char *file, int line)
247{
248
249 _sx_assert(sx, SX_XLOCKED, file, line);
250 mtx_lock(sx->sx_lock);
251 MPASS(sx->sx_cnt == -1);
252
253 curthread->td_locks--;
248 WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
249
250 /* Release. */
251 sx->sx_cnt++;
252 sx->sx_xholder = NULL;
253
254 /*
255 * Wake up waiters if there are any. Give precedence to slock waiters.
256 */
257 if (sx->sx_shrd_wcnt > 0)
258 cv_broadcast(&sx->sx_shrd_cv);
259 else if (sx->sx_excl_wcnt > 0)
260 cv_signal(&sx->sx_excl_cv);
261
262 LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
263
264 mtx_unlock(sx->sx_lock);
265}
266
267int
268_sx_try_upgrade(struct sx *sx, const char *file, int line)
269{
270
271 _sx_assert(sx, SX_SLOCKED, file, line);
272 mtx_lock(sx->sx_lock);
273
274 if (sx->sx_cnt == 1) {
275 sx->sx_cnt = -1;
276 sx->sx_xholder = curthread;
277
278 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
279 WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
280 file, line);
281
282 mtx_unlock(sx->sx_lock);
283 return (1);
284 } else {
285 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
286 mtx_unlock(sx->sx_lock);
287 return (0);
288 }
289}
290
291void
292_sx_downgrade(struct sx *sx, const char *file, int line)
293{
294
295 _sx_assert(sx, SX_XLOCKED, file, line);
296 mtx_lock(sx->sx_lock);
297 MPASS(sx->sx_cnt == -1);
298
299 WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line);
300
301 sx->sx_cnt = 1;
302 sx->sx_xholder = NULL;
303 if (sx->sx_shrd_wcnt > 0)
304 cv_broadcast(&sx->sx_shrd_cv);
305
306 LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line);
307
308 mtx_unlock(sx->sx_lock);
309}
310
311#ifdef INVARIANT_SUPPORT
312#ifndef INVARIANTS
313#undef _sx_assert
314#endif
315
316/*
317 * In the non-WITNESS case, sx_assert() can only detect that at least
318 * *some* thread owns an slock, but it cannot guarantee that *this*
319 * thread owns an slock.
320 */
321void
322_sx_assert(struct sx *sx, int what, const char *file, int line)
323{
324
325 if (panicstr != NULL)
326 return;
327 switch (what) {
328 case SX_LOCKED:
329 case SX_SLOCKED:
330#ifdef WITNESS
331 witness_assert(&sx->sx_object, what, file, line);
332#else
333 mtx_lock(sx->sx_lock);
334 if (sx->sx_cnt <= 0 &&
335 (what == SX_SLOCKED || sx->sx_xholder != curthread))
336 panic("Lock %s not %slocked @ %s:%d\n",
337 sx->sx_object.lo_name, (what == SX_SLOCKED) ?
338 "share " : "", file, line);
339 mtx_unlock(sx->sx_lock);
340#endif
341 break;
342 case SX_XLOCKED:
343 mtx_lock(sx->sx_lock);
344 if (sx->sx_xholder != curthread)
345 panic("Lock %s not exclusively locked @ %s:%d\n",
346 sx->sx_object.lo_name, file, line);
347 mtx_unlock(sx->sx_lock);
348 break;
349 case SX_UNLOCKED:
350#ifdef WITNESS
351 witness_assert(&sx->sx_object, what, file, line);
352#else
353 /*
354 * We are able to check only exclusive lock here,
355 * we cannot assert that *this* thread owns slock.
356 */
357 mtx_lock(sx->sx_lock);
358 if (sx->sx_xholder == curthread)
359 panic("Lock %s exclusively locked @ %s:%d\n",
360 sx->sx_object.lo_name, file, line);
361 mtx_unlock(sx->sx_lock);
362#endif
363 break;
364 default:
365 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
366 line);
367 }
368}
369#endif /* INVARIANT_SUPPORT */
370
371#ifdef DDB
372void
373db_show_sx(struct lock_object *lock)
374{
375 struct thread *td;
376 struct sx *sx;
377
378 sx = (struct sx *)lock;
379
380 db_printf(" state: ");
381 if (sx->sx_cnt < 0) {
382 td = sx->sx_xholder;
383 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
384 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
385 } else if (sx->sx_cnt > 0)
386 db_printf("SLOCK: %d locks\n", sx->sx_cnt);
387 else
388 db_printf("UNLOCKED\n");
389 db_printf(" waiters: %d shared, %d exclusive\n", sx->sx_shrd_wcnt,
390 sx->sx_excl_wcnt);
391}
392#endif
254 WITNESS_UNLOCK(&sx->sx_object, LOP_EXCLUSIVE, file, line);
255
256 /* Release. */
257 sx->sx_cnt++;
258 sx->sx_xholder = NULL;
259
260 /*
261 * Wake up waiters if there are any. Give precedence to slock waiters.
262 */
263 if (sx->sx_shrd_wcnt > 0)
264 cv_broadcast(&sx->sx_shrd_cv);
265 else if (sx->sx_excl_wcnt > 0)
266 cv_signal(&sx->sx_excl_cv);
267
268 LOCK_LOG_LOCK("XUNLOCK", &sx->sx_object, 0, 0, file, line);
269
270 mtx_unlock(sx->sx_lock);
271}
272
273int
274_sx_try_upgrade(struct sx *sx, const char *file, int line)
275{
276
277 _sx_assert(sx, SX_SLOCKED, file, line);
278 mtx_lock(sx->sx_lock);
279
280 if (sx->sx_cnt == 1) {
281 sx->sx_cnt = -1;
282 sx->sx_xholder = curthread;
283
284 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 1, file, line);
285 WITNESS_UPGRADE(&sx->sx_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
286 file, line);
287
288 mtx_unlock(sx->sx_lock);
289 return (1);
290 } else {
291 LOCK_LOG_TRY("XUPGRADE", &sx->sx_object, 0, 0, file, line);
292 mtx_unlock(sx->sx_lock);
293 return (0);
294 }
295}
296
297void
298_sx_downgrade(struct sx *sx, const char *file, int line)
299{
300
301 _sx_assert(sx, SX_XLOCKED, file, line);
302 mtx_lock(sx->sx_lock);
303 MPASS(sx->sx_cnt == -1);
304
305 WITNESS_DOWNGRADE(&sx->sx_object, 0, file, line);
306
307 sx->sx_cnt = 1;
308 sx->sx_xholder = NULL;
309 if (sx->sx_shrd_wcnt > 0)
310 cv_broadcast(&sx->sx_shrd_cv);
311
312 LOCK_LOG_LOCK("XDOWNGRADE", &sx->sx_object, 0, 0, file, line);
313
314 mtx_unlock(sx->sx_lock);
315}
316
317#ifdef INVARIANT_SUPPORT
318#ifndef INVARIANTS
319#undef _sx_assert
320#endif
321
322/*
323 * In the non-WITNESS case, sx_assert() can only detect that at least
324 * *some* thread owns an slock, but it cannot guarantee that *this*
325 * thread owns an slock.
326 */
327void
328_sx_assert(struct sx *sx, int what, const char *file, int line)
329{
330
331 if (panicstr != NULL)
332 return;
333 switch (what) {
334 case SX_LOCKED:
335 case SX_SLOCKED:
336#ifdef WITNESS
337 witness_assert(&sx->sx_object, what, file, line);
338#else
339 mtx_lock(sx->sx_lock);
340 if (sx->sx_cnt <= 0 &&
341 (what == SX_SLOCKED || sx->sx_xholder != curthread))
342 panic("Lock %s not %slocked @ %s:%d\n",
343 sx->sx_object.lo_name, (what == SX_SLOCKED) ?
344 "share " : "", file, line);
345 mtx_unlock(sx->sx_lock);
346#endif
347 break;
348 case SX_XLOCKED:
349 mtx_lock(sx->sx_lock);
350 if (sx->sx_xholder != curthread)
351 panic("Lock %s not exclusively locked @ %s:%d\n",
352 sx->sx_object.lo_name, file, line);
353 mtx_unlock(sx->sx_lock);
354 break;
355 case SX_UNLOCKED:
356#ifdef WITNESS
357 witness_assert(&sx->sx_object, what, file, line);
358#else
359 /*
360 * We are able to check only exclusive lock here,
361 * we cannot assert that *this* thread owns slock.
362 */
363 mtx_lock(sx->sx_lock);
364 if (sx->sx_xholder == curthread)
365 panic("Lock %s exclusively locked @ %s:%d\n",
366 sx->sx_object.lo_name, file, line);
367 mtx_unlock(sx->sx_lock);
368#endif
369 break;
370 default:
371 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
372 line);
373 }
374}
375#endif /* INVARIANT_SUPPORT */
376
377#ifdef DDB
378void
379db_show_sx(struct lock_object *lock)
380{
381 struct thread *td;
382 struct sx *sx;
383
384 sx = (struct sx *)lock;
385
386 db_printf(" state: ");
387 if (sx->sx_cnt < 0) {
388 td = sx->sx_xholder;
389 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
390 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
391 } else if (sx->sx_cnt > 0)
392 db_printf("SLOCK: %d locks\n", sx->sx_cnt);
393 else
394 db_printf("UNLOCKED\n");
395 db_printf(" waiters: %d shared, %d exclusive\n", sx->sx_shrd_wcnt,
396 sx->sx_excl_wcnt);
397}
398#endif