Deleted Added
full compact
1/*-
2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice(s), this list of conditions and the following disclaimer as
11 * the first lines of this file unmodified other than the possible
12 * addition of one or more copyright notices.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice(s), this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27 * DAMAGE.
28 */
29
30/*
31 * Shared/exclusive locks. This implementation attempts to ensure
32 * deterministic lock granting behavior, so that slocks and xlocks are
33 * interleaved.
34 *
35 * Priority propagation will not generally raise the priority of lock holders,
36 * so should not be relied upon in combination with sx locks.
37 */
38
39#include "opt_adaptive_sx.h"
40#include "opt_ddb.h"
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 168333 2007-04-04 00:11:22Z kmacy $");
43__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 169394 2007-05-08 21:51:37Z jhb $");
44
45#include <sys/param.h>
46#include <sys/ktr.h>
47#include <sys/lock.h>
48#include <sys/lock_profile.h>
49#include <sys/mutex.h>
50#include <sys/proc.h>
51#include <sys/sleepqueue.h>
52#include <sys/sx.h>
53#include <sys/systm.h>
54
55#ifdef ADAPTIVE_SX
56#include <machine/cpu.h>
57#endif
58
59#ifdef DDB
60#include <ddb/ddb.h>
61#endif
62
63#if !defined(SMP) && defined(ADAPTIVE_SX)
64#error "You must have SMP to enable the ADAPTIVE_SX option"
65#endif
66
67/* Handy macros for sleep queues. */
68#define SQ_EXCLUSIVE_QUEUE 0
69#define SQ_SHARED_QUEUE 1
70
71/*
72 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We
73 * drop Giant anytime we have to sleep or if we adaptively spin.
74 */
75#define GIANT_DECLARE \
76 int _giantcnt = 0; \
77 WITNESS_SAVE_DECL(Giant) \
78
79#define GIANT_SAVE() do { \
80 if (mtx_owned(&Giant)) { \
81 WITNESS_SAVE(&Giant.lock_object, Giant); \
82 while (mtx_owned(&Giant)) { \
83 _giantcnt++; \
84 mtx_unlock(&Giant); \
85 } \
86 } \
87} while (0)
88
89#define GIANT_RESTORE() do { \
90 if (_giantcnt > 0) { \
91 mtx_assert(&Giant, MA_NOTOWNED); \
92 while (_giantcnt--) \
93 mtx_lock(&Giant); \
94 WITNESS_RESTORE(&Giant.lock_object, Giant); \
95 } \
96} while (0)
97
98/*
99 * Returns true if an exclusive lock is recursed. It curthread
100 * currently has an exclusive lock.
101 */
102#define sx_recursed(sx) ((sx)->sx_recurse != 0)
103
104/*
105 * Return a pointer to the owning thread if the lock is exclusively
106 * locked.
107 */
108#define sx_xholder(sx) \
109 ((sx)->sx_lock & SX_LOCK_SHARED ? NULL : \
110 (struct thread *)SX_OWNER((sx)->sx_lock))
111
112#ifdef DDB
113static void db_show_sx(struct lock_object *lock);
114#endif
115static void lock_sx(struct lock_object *lock, int how);
116static int unlock_sx(struct lock_object *lock);
117
118struct lock_class lock_class_sx = {
119 .lc_name = "sx",
120 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
121#ifdef DDB
122 .lc_ddb_show = db_show_sx,
123#endif
124 .lc_lock = lock_sx,
125 .lc_unlock = unlock_sx,
126};
127
128#ifndef INVARIANTS
129#define _sx_assert(sx, what, file, line)
130#endif
131
132void
133lock_sx(struct lock_object *lock, int how)
134{
135 struct sx *sx;
136
137 sx = (struct sx *)lock;
138 if (how)
139 sx_xlock(sx);
140 else
141 sx_slock(sx);
142}
143
144int
145unlock_sx(struct lock_object *lock)
146{
147 struct sx *sx;
148
149 sx = (struct sx *)lock;
150 sx_assert(sx, SX_LOCKED | SX_NOTRECURSED);
151 if (sx_xlocked(sx)) {
152 sx_xunlock(sx);
153 return (1);
154 } else {
155 sx_sunlock(sx);
156 return (0);
157 }
158}
159
160void
161sx_sysinit(void *arg)
162{
163 struct sx_args *sargs = arg;
164
165 sx_init(sargs->sa_sx, sargs->sa_desc);
166}
167
168void
169sx_init_flags(struct sx *sx, const char *description, int opts)
170{
171 int flags;
172
173 flags = LO_SLEEPABLE | LO_UPGRADABLE | LO_RECURSABLE;
174 if (opts & SX_DUPOK)
175 flags |= LO_DUPOK;
176 if (opts & SX_NOPROFILE)
177 flags |= LO_NOPROFILE;
178 if (!(opts & SX_NOWITNESS))
179 flags |= LO_WITNESS;
180 if (opts & SX_QUIET)
181 flags |= LO_QUIET;
182
183 flags |= opts & SX_ADAPTIVESPIN;
184 sx->sx_lock = SX_LOCK_UNLOCKED;
185 sx->sx_recurse = 0;
186 lock_profile_object_init(&sx->lock_object, &lock_class_sx, description);
187 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
188}
189
190void
191sx_destroy(struct sx *sx)
192{
193
194 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
195 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
196 sx->sx_lock = SX_LOCK_DESTROYED;
197 lock_profile_object_destroy(&sx->lock_object);
198 lock_destroy(&sx->lock_object);
199}
200
201void
202_sx_slock(struct sx *sx, const char *file, int line)
203{
204
205 MPASS(curthread != NULL);
206 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
207 ("sx_slock() of destroyed sx @ %s:%d", file, line));
208 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line);
209 __sx_slock(sx, file, line);
210 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
211 WITNESS_LOCK(&sx->lock_object, 0, file, line);
212 curthread->td_locks++;
213}
214
215int
216_sx_try_slock(struct sx *sx, const char *file, int line)
217{
218 uintptr_t x;
219
220 x = sx->sx_lock;
221 KASSERT(x != SX_LOCK_DESTROYED,
222 ("sx_try_slock() of destroyed sx @ %s:%d", file, line));
223 if ((x & SX_LOCK_SHARED) && atomic_cmpset_acq_ptr(&sx->sx_lock, x,
224 x + SX_ONE_SHARER)) {
225 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
226 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
227 curthread->td_locks++;
228 return (1);
229 }
230
231 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
232 return (0);
233}
234
235void
236_sx_xlock(struct sx *sx, const char *file, int line)
237{
238
239 MPASS(curthread != NULL);
240 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
241 ("sx_xlock() of destroyed sx @ %s:%d", file, line));
242 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
243 line);
244 __sx_xlock(sx, curthread, file, line);
245 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line);
246 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
247 curthread->td_locks++;
248}
249
250int
251_sx_try_xlock(struct sx *sx, const char *file, int line)
252{
253 int rval;
254
255 MPASS(curthread != NULL);
256 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
257 ("sx_try_xlock() of destroyed sx @ %s:%d", file, line));
258
259 if (sx_xlocked(sx)) {
260 sx->sx_recurse++;
261 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
262 rval = 1;
263 } else
264 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
265 (uintptr_t)curthread);
266 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
267 if (rval) {
268 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
269 file, line);
270 curthread->td_locks++;
271 }
272
273 return (rval);
274}
275
276void
277_sx_sunlock(struct sx *sx, const char *file, int line)
278{
279
280 MPASS(curthread != NULL);
281 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
282 ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
283 _sx_assert(sx, SX_SLOCKED, file, line);
284 curthread->td_locks--;
285 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
286 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
287 if (SX_SHARERS(sx->sx_lock) == 0)
288 lock_profile_release_lock(&sx->lock_object);
289 __sx_sunlock(sx, file, line);
290}
291
292void
293_sx_xunlock(struct sx *sx, const char *file, int line)
294{
295
296 MPASS(curthread != NULL);
297 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
298 ("sx_xunlock() of destroyed sx @ %s:%d", file, line));
299 _sx_assert(sx, SX_XLOCKED, file, line);
300 curthread->td_locks--;
301 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
302 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
303 line);
304 if (!sx_recursed(sx))
305 lock_profile_release_lock(&sx->lock_object);
306 __sx_xunlock(sx, curthread, file, line);
307}
308
309/*
310 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
311 * This will only succeed if this thread holds a single shared lock.
312 * Return 1 if if the upgrade succeed, 0 otherwise.
313 */
314int
315_sx_try_upgrade(struct sx *sx, const char *file, int line)
316{
317 uintptr_t x;
318 int success;
319
320 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
321 ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line));
322 _sx_assert(sx, SX_SLOCKED, file, line);
323
324 /*
325 * Try to switch from one shared lock to an exclusive lock. We need
326 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
327 * we will wake up the exclusive waiters when we drop the lock.
328 */
329 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
330 success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
331 (uintptr_t)curthread | x);
332 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
333 if (success)
334 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
335 file, line);
336 return (success);
337}
338
339/*
340 * Downgrade an unrecursed exclusive lock into a single shared lock.
341 */
342void
343_sx_downgrade(struct sx *sx, const char *file, int line)
344{
345 uintptr_t x;
346
347 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
348 ("sx_downgrade() of destroyed sx @ %s:%d", file, line));
349 _sx_assert(sx, SX_XLOCKED | SX_NOTRECURSED, file, line);
350#ifndef INVARIANTS
351 if (sx_recursed(sx))
352 panic("downgrade of a recursed lock");
353#endif
354
355 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
356
357 /*
358 * Try to switch from an exclusive lock with no shared waiters
359 * to one sharer with no shared waiters. If there are
360 * exclusive waiters, we don't need to lock the sleep queue so
361 * long as we preserve the flag. We do one quick try and if
362 * that fails we grab the sleepq lock to keep the flags from
363 * changing and do it the slow way.
364 *
365 * We have to lock the sleep queue if there are shared waiters
366 * so we can wake them up.
367 */
368 x = sx->sx_lock;
369 if (!(x & SX_LOCK_SHARED_WAITERS) &&
370 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
371 (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
372 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
373 return;
374 }
375
376 /*
377 * Lock the sleep queue so we can read the waiters bits
378 * without any races and wakeup any shared waiters.
379 */
380 sleepq_lock(&sx->lock_object);
381
382 /*
383 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
384 * shared lock. If there are any shared waiters, wake them up.
385 */
386 x = sx->sx_lock;
387 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
388 (x & SX_LOCK_EXCLUSIVE_WAITERS));
389 if (x & SX_LOCK_SHARED_WAITERS)
390 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
391 SQ_SHARED_QUEUE);
392 else
393 sleepq_release(&sx->lock_object);
394
395 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
396}
397
398/*
399 * This function represents the so-called 'hard case' for sx_xlock
400 * operation. All 'easy case' failures are redirected to this. Note
401 * that ideally this would be a static function, but it needs to be
402 * accessible from at least sx.h.
403 */
404void
405_sx_xlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
406{
407 GIANT_DECLARE;
408#ifdef ADAPTIVE_SX
409 volatile struct thread *owner;
410#endif
411 uintptr_t x;
412 int contested = 0;
413 uint64_t waitstart = 0;
414
415 /* If we already hold an exclusive lock, then recurse. */
416 if (sx_xlocked(sx)) {
417 sx->sx_recurse++;
418 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
419 if (LOCK_LOG_TEST(&sx->lock_object, 0))
420 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
421 return;
422 }
423 lock_profile_obtain_lock_failed(&(sx)->lock_object,
424 &contested, &waitstart);
425
426 if (LOCK_LOG_TEST(&sx->lock_object, 0))
427 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
428 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
429
430 while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
431#ifdef ADAPTIVE_SX
432 /*
433 * If the lock is write locked and the owner is
434 * running on another CPU, spin until the owner stops
435 * running or the state of the lock changes.
436 */
437 x = sx->sx_lock;
438 if (!(x & SX_LOCK_SHARED) &&
439 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
440 x = SX_OWNER(x);
441 owner = (struct thread *)x;
442 if (TD_IS_RUNNING(owner)) {
443 if (LOCK_LOG_TEST(&sx->lock_object, 0))
444 CTR3(KTR_LOCK,
445 "%s: spinning on %p held by %p",
446 __func__, sx, owner);
447 GIANT_SAVE();
448 while (SX_OWNER(sx->sx_lock) == x &&
449 TD_IS_RUNNING(owner))
450 cpu_spinwait();
451 continue;
452 }
453 }
454#endif
455
456 sleepq_lock(&sx->lock_object);
457 x = sx->sx_lock;
458
459 /*
460 * If the lock was released while spinning on the
461 * sleep queue chain lock, try again.
462 */
463 if (x == SX_LOCK_UNLOCKED) {
464 sleepq_release(&sx->lock_object);
465 continue;
466 }
467
468#ifdef ADAPTIVE_SX
469 /*
470 * The current lock owner might have started executing
471 * on another CPU (or the lock could have changed
472 * owners) while we were waiting on the sleep queue
473 * chain lock. If so, drop the sleep queue lock and try
474 * again.
475 */
476 if (!(x & SX_LOCK_SHARED) &&
477 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
478 owner = (struct thread *)SX_OWNER(x);
479 if (TD_IS_RUNNING(owner)) {
480 sleepq_release(&sx->lock_object);
481 continue;
482 }
483 }
484#endif
485
486 /*
487 * If an exclusive lock was released with both shared
488 * and exclusive waiters and a shared waiter hasn't
489 * woken up and acquired the lock yet, sx_lock will be
490 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
491 * If we see that value, try to acquire it once. Note
492 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
493 * as there are other exclusive waiters still. If we
494 * fail, restart the loop.
495 */
496 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
497 if (atomic_cmpset_acq_ptr(&sx->sx_lock,
498 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
499 tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
500 sleepq_release(&sx->lock_object);
501 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
502 __func__, sx);
503 break;
504 }
505 sleepq_release(&sx->lock_object);
506 continue;
507 }
508
509 /*
510 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail,
511 * than loop back and retry.
512 */
513 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
514 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
515 x | SX_LOCK_EXCLUSIVE_WAITERS)) {
516 sleepq_release(&sx->lock_object);
517 continue;
518 }
519 if (LOCK_LOG_TEST(&sx->lock_object, 0))
520 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
521 __func__, sx);
522 }
523
524 /*
525 * Since we have been unable to acquire the exclusive
526 * lock and the exclusive waiters flag is set, we have
527 * to sleep.
528 */
529 if (LOCK_LOG_TEST(&sx->lock_object, 0))
530 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
531 __func__, sx);
532
533 GIANT_SAVE();
534 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
535 SLEEPQ_SX, SQ_EXCLUSIVE_QUEUE);
536 sleepq_wait(&sx->lock_object);
537
538 if (LOCK_LOG_TEST(&sx->lock_object, 0))
539 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
540 __func__, sx);
541 }
542
543 GIANT_RESTORE();
544 lock_profile_obtain_lock_success(&(sx)->lock_object, contested,
545 waitstart, file, line);
546}
547
548/*
549 * This function represents the so-called 'hard case' for sx_xunlock
550 * operation. All 'easy case' failures are redirected to this. Note
551 * that ideally this would be a static function, but it needs to be
552 * accessible from at least sx.h.
553 */
554void
555_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
556{
557 uintptr_t x;
558 int queue;
559
560 MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
561
562 /* If the lock is recursed, then unrecurse one level. */
563 if (sx_xlocked(sx) && sx_recursed(sx)) {
564 if ((--sx->sx_recurse) == 0)
565 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
566 if (LOCK_LOG_TEST(&sx->lock_object, 0))
567 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
568 return;
569 }
570 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
571 SX_LOCK_EXCLUSIVE_WAITERS));
572 if (LOCK_LOG_TEST(&sx->lock_object, 0))
573 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
574
575 sleepq_lock(&sx->lock_object);
576 x = SX_LOCK_UNLOCKED;
577
578 /*
579 * The wake up algorithm here is quite simple and probably not
580 * ideal. It gives precedence to shared waiters if they are
581 * present. For this condition, we have to preserve the
582 * state of the exclusive waiters flag.
583 */
584 if (sx->sx_lock & SX_LOCK_SHARED_WAITERS) {
585 queue = SQ_SHARED_QUEUE;
586 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
587 } else
588 queue = SQ_EXCLUSIVE_QUEUE;
589
590 /* Wake up all the waiters for the specific queue. */
591 if (LOCK_LOG_TEST(&sx->lock_object, 0))
592 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
593 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
594 "exclusive");
595 atomic_store_rel_ptr(&sx->sx_lock, x);
596 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, queue);
597}
598
599/*
600 * This function represents the so-called 'hard case' for sx_slock
601 * operation. All 'easy case' failures are redirected to this. Note
602 * that ideally this would be a static function, but it needs to be
603 * accessible from at least sx.h.
604 */
605void
606_sx_slock_hard(struct sx *sx, const char *file, int line)
607{
608 GIANT_DECLARE;
609#ifdef ADAPTIVE_SX
610 volatile struct thread *owner;
611#endif
612 uintptr_t x;
613 uint64_t waitstart = 0;
614 int contested = 0;
615 /*
616 * As with rwlocks, we don't make any attempt to try to block
617 * shared locks once there is an exclusive waiter.
618 */
619
620 for (;;) {
621 x = sx->sx_lock;
622
623 /*
624 * If no other thread has an exclusive lock then try to bump up
625 * the count of sharers. Since we have to preserve the state
626 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
627 * shared lock loop back and retry.
628 */
629 if (x & SX_LOCK_SHARED) {
630 MPASS(!(x & SX_LOCK_SHARED_WAITERS));
631 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
632 x + SX_ONE_SHARER)) {
633 if (SX_SHARERS(x) == 0)
634 lock_profile_obtain_lock_success(
635 &sx->lock_object, contested,
636 waitstart, file, line);
637 if (LOCK_LOG_TEST(&sx->lock_object, 0))
638 CTR4(KTR_LOCK,
639 "%s: %p succeed %p -> %p", __func__,
640 sx, (void *)x,
641 (void *)(x + SX_ONE_SHARER));
642 break;
643 }
644 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
645 &waitstart);
646
647 continue;
648 }
649
650#ifdef ADAPTIVE_SX
651 /*
652 * If the owner is running on another CPU, spin until
653 * the owner stops running or the state of the lock
654 * changes.
655 */
656 else if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
657 x = SX_OWNER(x);
658 owner = (struct thread *)x;
659 if (TD_IS_RUNNING(owner)) {
660 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
661 &waitstart);
662 if (LOCK_LOG_TEST(&sx->lock_object, 0))
663 CTR3(KTR_LOCK,
664 "%s: spinning on %p held by %p",
665 __func__, sx, owner);
666 GIANT_SAVE();
667 while (SX_OWNER(sx->sx_lock) == x &&
668 TD_IS_RUNNING(owner))
669 cpu_spinwait();
670 continue;
671 }
672 }
673#endif
674 else
675 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
676 &waitstart);
677
678 /*
679 * Some other thread already has an exclusive lock, so
680 * start the process of blocking.
681 */
682 sleepq_lock(&sx->lock_object);
683 x = sx->sx_lock;
684
685 /*
686 * The lock could have been released while we spun.
687 * In this case loop back and retry.
688 */
689 if (x & SX_LOCK_SHARED) {
690 sleepq_release(&sx->lock_object);
691 continue;
692 }
693
694#ifdef ADAPTIVE_SX
695 /*
696 * If the owner is running on another CPU, spin until
697 * the owner stops running or the state of the lock
698 * changes.
699 */
700 if (!(x & SX_LOCK_SHARED) &&
701 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
702 owner = (struct thread *)SX_OWNER(x);
703 if (TD_IS_RUNNING(owner)) {
704 sleepq_release(&sx->lock_object);
705 continue;
706 }
707 }
708#endif
709
710 /*
711 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we
712 * fail to set it drop the sleep queue lock and loop
713 * back.
714 */
715 if (!(x & SX_LOCK_SHARED_WAITERS)) {
716 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
717 x | SX_LOCK_SHARED_WAITERS)) {
718 sleepq_release(&sx->lock_object);
719 continue;
720 }
721 if (LOCK_LOG_TEST(&sx->lock_object, 0))
722 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
723 __func__, sx);
724 }
725
726 /*
727 * Since we have been unable to acquire the shared lock,
728 * we have to sleep.
729 */
730 if (LOCK_LOG_TEST(&sx->lock_object, 0))
731 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
732 __func__, sx);
733
734 GIANT_SAVE();
735 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
736 SLEEPQ_SX, SQ_SHARED_QUEUE);
737 sleepq_wait(&sx->lock_object);
738
739 if (LOCK_LOG_TEST(&sx->lock_object, 0))
740 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
741 __func__, sx);
742 }
743
744 GIANT_RESTORE();
745}
746
747/*
748 * This function represents the so-called 'hard case' for sx_sunlock
749 * operation. All 'easy case' failures are redirected to this. Note
750 * that ideally this would be a static function, but it needs to be
751 * accessible from at least sx.h.
752 */
753void
754_sx_sunlock_hard(struct sx *sx, const char *file, int line)
755{
756 uintptr_t x;
757
758 for (;;) {
759 x = sx->sx_lock;
760
761 /*
762 * We should never have sharers while at least one thread
763 * holds a shared lock.
764 */
765 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
766 ("%s: waiting sharers", __func__));
767
768 /*
769 * See if there is more than one shared lock held. If
770 * so, just drop one and return.
771 */
772 if (SX_SHARERS(x) > 1) {
773 if (atomic_cmpset_ptr(&sx->sx_lock, x,
774 x - SX_ONE_SHARER)) {
775 if (LOCK_LOG_TEST(&sx->lock_object, 0))
776 CTR4(KTR_LOCK,
777 "%s: %p succeeded %p -> %p",
778 __func__, sx, (void *)x,
779 (void *)(x - SX_ONE_SHARER));
780 break;
781 }
782 continue;
783 }
784
785 /*
786 * If there aren't any waiters for an exclusive lock,
787 * then try to drop it quickly.
788 */
789 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
790 MPASS(x == SX_SHARERS_LOCK(1));
791 if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1),
792 SX_LOCK_UNLOCKED)) {
793 lock_profile_release_lock(&sx->lock_object);
794 if (LOCK_LOG_TEST(&sx->lock_object, 0))
795 CTR2(KTR_LOCK, "%s: %p last succeeded",
796 __func__, sx);
797 break;
798 }
799 continue;
800 }
801
802 /*
803 * At this point, there should just be one sharer with
804 * exclusive waiters.
805 */
806 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
807
808 lock_profile_release_lock(&sx->lock_object);
809 sleepq_lock(&sx->lock_object);
810
811 /*
812 * Wake up semantic here is quite simple:
813 * Just wake up all the exclusive waiters.
814 * Note that the state of the lock could have changed,
815 * so if it fails loop back and retry.
816 */
817 if (!atomic_cmpset_ptr(&sx->sx_lock,
818 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
819 SX_LOCK_UNLOCKED)) {
820 sleepq_release(&sx->lock_object);
821 continue;
822 }
823 if (LOCK_LOG_TEST(&sx->lock_object, 0))
824 CTR2(KTR_LOCK, "%s: %p waking up all thread on"
825 "exclusive queue", __func__, sx);
826 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
827 SQ_EXCLUSIVE_QUEUE);
828 break;
829 }
830}
831
832#ifdef INVARIANT_SUPPORT
833#ifndef INVARIANTS
834#undef _sx_assert
835#endif
836
837/*
838 * In the non-WITNESS case, sx_assert() can only detect that at least
839 * *some* thread owns an slock, but it cannot guarantee that *this*
840 * thread owns an slock.
841 */
842void
843_sx_assert(struct sx *sx, int what, const char *file, int line)
844{
845#ifndef WITNESS
846 int slocked = 0;
847#endif
848
849 if (panicstr != NULL)
850 return;
851 switch (what) {
852 case SX_SLOCKED:
853 case SX_SLOCKED | SX_NOTRECURSED:
854 case SX_SLOCKED | SX_RECURSED:
855#ifndef WITNESS
856 slocked = 1;
857 /* FALLTHROUGH */
858#endif
859 case SX_LOCKED:
860 case SX_LOCKED | SX_NOTRECURSED:
861 case SX_LOCKED | SX_RECURSED:
862#ifdef WITNESS
863 witness_assert(&sx->lock_object, what, file, line);
864#else
865 /*
866 * If some other thread has an exclusive lock or we
867 * have one and are asserting a shared lock, fail.
868 * Also, if no one has a lock at all, fail.
869 */
870 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
871 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
872 sx_xholder(sx) != curthread)))
873 panic("Lock %s not %slocked @ %s:%d\n",
874 sx->lock_object.lo_name, slocked ? "share " : "",
875 file, line);
876
877 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
878 if (sx_recursed(sx)) {
879 if (what & SX_NOTRECURSED)
880 panic("Lock %s recursed @ %s:%d\n",
881 sx->lock_object.lo_name, file,
882 line);
883 } else if (what & SX_RECURSED)
884 panic("Lock %s not recursed @ %s:%d\n",
885 sx->lock_object.lo_name, file, line);
886 }
887#endif
888 break;
889 case SX_XLOCKED:
890 case SX_XLOCKED | SX_NOTRECURSED:
891 case SX_XLOCKED | SX_RECURSED:
892 if (sx_xholder(sx) != curthread)
893 panic("Lock %s not exclusively locked @ %s:%d\n",
894 sx->lock_object.lo_name, file, line);
895 if (sx_recursed(sx)) {
896 if (what & SX_NOTRECURSED)
897 panic("Lock %s recursed @ %s:%d\n",
898 sx->lock_object.lo_name, file, line);
899 } else if (what & SX_RECURSED)
900 panic("Lock %s not recursed @ %s:%d\n",
901 sx->lock_object.lo_name, file, line);
902 break;
903 case SX_UNLOCKED:
904#ifdef WITNESS
905 witness_assert(&sx->lock_object, what, file, line);
906#else
907 /*
908 * If we hold an exclusve lock fail. We can't
909 * reliably check to see if we hold a shared lock or
910 * not.
911 */
912 if (sx_xholder(sx) == curthread)
913 panic("Lock %s exclusively locked @ %s:%d\n",
914 sx->lock_object.lo_name, file, line);
915#endif
916 break;
917 default:
918 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
919 line);
920 }
921}
922#endif /* INVARIANT_SUPPORT */
923
924#ifdef DDB
925static void
926db_show_sx(struct lock_object *lock)
927{
928 struct thread *td;
929 struct sx *sx;
930
931 sx = (struct sx *)lock;
932
933 db_printf(" state: ");
934 if (sx->sx_lock == SX_LOCK_UNLOCKED)
935 db_printf("UNLOCKED\n");
919 else if (sx->sx_lock & SX_LOCK_SHARED)
936 else if (sx->sx_lock == SX_LOCK_DESTROYED) {
937 db_printf("DESTROYED\n");
938 return;
939 } else if (sx->sx_lock & SX_LOCK_SHARED)
940 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
941 else {
942 td = sx_xholder(sx);
943 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
944 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
945 if (sx_recursed(sx))
946 db_printf(" recursed: %d\n", sx->sx_recurse);
947 }
948
949 db_printf(" waiters: ");
950 switch(sx->sx_lock &
951 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
952 case SX_LOCK_SHARED_WAITERS:
953 db_printf("shared\n");
954 break;
955 case SX_LOCK_EXCLUSIVE_WAITERS:
956 db_printf("exclusive\n");
957 break;
958 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
959 db_printf("exclusive and shared\n");
960 break;
961 default:
962 db_printf("none\n");
963 }
964}
965
966/*
967 * Check to see if a thread that is blocked on a sleep queue is actually
968 * blocked on an sx lock. If so, output some details and return true.
969 * If the lock has an exclusive owner, return that in *ownerp.
970 */
971int
972sx_chain(struct thread *td, struct thread **ownerp)
973{
974 struct sx *sx;
975
976 /*
977 * Check to see if this thread is blocked on an sx lock.
978 * First, we check the lock class. If that is ok, then we
979 * compare the lock name against the wait message.
980 */
981 sx = td->td_wchan;
982 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
983 sx->lock_object.lo_name != td->td_wmesg)
984 return (0);
985
986 /* We think we have an sx lock, so output some details. */
987 db_printf("blocked on sx \"%s\" ", td->td_wmesg);
988 *ownerp = sx_xholder(sx);
989 if (sx->sx_lock & SX_LOCK_SHARED)
990 db_printf("SLOCK (count %ju)\n",
991 (uintmax_t)SX_SHARERS(sx->sx_lock));
992 else
993 db_printf("XLOCK\n");
994 return (1);
995}
996#endif