Deleted Added
sdiff udiff text old ( 168333 ) new ( 169394 )
full compact
1/*-
2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice(s), this list of conditions and the following disclaimer as
11 * the first lines of this file unmodified other than the possible
12 * addition of one or more copyright notices.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice(s), this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
23 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27 * DAMAGE.
28 */
29
30/*
31 * Shared/exclusive locks. This implementation attempts to ensure
32 * deterministic lock granting behavior, so that slocks and xlocks are
33 * interleaved.
34 *
35 * Priority propagation will not generally raise the priority of lock holders,
36 * so should not be relied upon in combination with sx locks.
37 */
38
39#include "opt_adaptive_sx.h"
40#include "opt_ddb.h"
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 168333 2007-04-04 00:11:22Z kmacy $");
44
45#include <sys/param.h>
46#include <sys/ktr.h>
47#include <sys/lock.h>
48#include <sys/lock_profile.h>
49#include <sys/mutex.h>
50#include <sys/proc.h>
51#include <sys/sleepqueue.h>
52#include <sys/sx.h>
53#include <sys/systm.h>
54
55#ifdef ADAPTIVE_SX
56#include <machine/cpu.h>
57#endif
58
59#ifdef DDB
60#include <ddb/ddb.h>
61#endif
62
63#if !defined(SMP) && defined(ADAPTIVE_SX)
64#error "You must have SMP to enable the ADAPTIVE_SX option"
65#endif
66
67/* Handy macros for sleep queues. */
68#define SQ_EXCLUSIVE_QUEUE 0
69#define SQ_SHARED_QUEUE 1
70
71/*
72 * Variations on DROP_GIANT()/PICKUP_GIANT() for use in this file. We
73 * drop Giant anytime we have to sleep or if we adaptively spin.
74 */
75#define GIANT_DECLARE \
76 int _giantcnt = 0; \
77 WITNESS_SAVE_DECL(Giant) \
78
79#define GIANT_SAVE() do { \
80 if (mtx_owned(&Giant)) { \
81 WITNESS_SAVE(&Giant.lock_object, Giant); \
82 while (mtx_owned(&Giant)) { \
83 _giantcnt++; \
84 mtx_unlock(&Giant); \
85 } \
86 } \
87} while (0)
88
89#define GIANT_RESTORE() do { \
90 if (_giantcnt > 0) { \
91 mtx_assert(&Giant, MA_NOTOWNED); \
92 while (_giantcnt--) \
93 mtx_lock(&Giant); \
94 WITNESS_RESTORE(&Giant.lock_object, Giant); \
95 } \
96} while (0)
97
98/*
99 * Returns true if an exclusive lock is recursed. It curthread
100 * currently has an exclusive lock.
101 */
102#define sx_recursed(sx) ((sx)->sx_recurse != 0)
103
104/*
105 * Return a pointer to the owning thread if the lock is exclusively
106 * locked.
107 */
108#define sx_xholder(sx) \
109 ((sx)->sx_lock & SX_LOCK_SHARED ? NULL : \
110 (struct thread *)SX_OWNER((sx)->sx_lock))
111
112#ifdef DDB
113static void db_show_sx(struct lock_object *lock);
114#endif
115static void lock_sx(struct lock_object *lock, int how);
116static int unlock_sx(struct lock_object *lock);
117
118struct lock_class lock_class_sx = {
119 .lc_name = "sx",
120 .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE,
121#ifdef DDB
122 .lc_ddb_show = db_show_sx,
123#endif
124 .lc_lock = lock_sx,
125 .lc_unlock = unlock_sx,
126};
127
128#ifndef INVARIANTS
129#define _sx_assert(sx, what, file, line)
130#endif
131
132void
133lock_sx(struct lock_object *lock, int how)
134{
135 struct sx *sx;
136
137 sx = (struct sx *)lock;
138 if (how)
139 sx_xlock(sx);
140 else
141 sx_slock(sx);
142}
143
144int
145unlock_sx(struct lock_object *lock)
146{
147 struct sx *sx;
148
149 sx = (struct sx *)lock;
150 sx_assert(sx, SX_LOCKED | SX_NOTRECURSED);
151 if (sx_xlocked(sx)) {
152 sx_xunlock(sx);
153 return (1);
154 } else {
155 sx_sunlock(sx);
156 return (0);
157 }
158}
159
160void
161sx_sysinit(void *arg)
162{
163 struct sx_args *sargs = arg;
164
165 sx_init(sargs->sa_sx, sargs->sa_desc);
166}
167
168void
169sx_init_flags(struct sx *sx, const char *description, int opts)
170{
171 int flags;
172
173 flags = LO_SLEEPABLE | LO_UPGRADABLE | LO_RECURSABLE;
174 if (opts & SX_DUPOK)
175 flags |= LO_DUPOK;
176 if (opts & SX_NOPROFILE)
177 flags |= LO_NOPROFILE;
178 if (!(opts & SX_NOWITNESS))
179 flags |= LO_WITNESS;
180 if (opts & SX_QUIET)
181 flags |= LO_QUIET;
182
183 flags |= opts & SX_ADAPTIVESPIN;
184 sx->sx_lock = SX_LOCK_UNLOCKED;
185 sx->sx_recurse = 0;
186 lock_profile_object_init(&sx->lock_object, &lock_class_sx, description);
187 lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
188}
189
190void
191sx_destroy(struct sx *sx)
192{
193
194 KASSERT(sx->sx_lock == SX_LOCK_UNLOCKED, ("sx lock still held"));
195 KASSERT(sx->sx_recurse == 0, ("sx lock still recursed"));
196 lock_profile_object_destroy(&sx->lock_object);
197 lock_destroy(&sx->lock_object);
198}
199
200void
201_sx_slock(struct sx *sx, const char *file, int line)
202{
203
204 MPASS(curthread != NULL);
205 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line);
206 __sx_slock(sx, file, line);
207 LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
208 WITNESS_LOCK(&sx->lock_object, 0, file, line);
209 curthread->td_locks++;
210}
211
212int
213_sx_try_slock(struct sx *sx, const char *file, int line)
214{
215 uintptr_t x;
216
217 x = sx->sx_lock;
218 if ((x & SX_LOCK_SHARED) && atomic_cmpset_acq_ptr(&sx->sx_lock, x,
219 x + SX_ONE_SHARER)) {
220 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 1, file, line);
221 WITNESS_LOCK(&sx->lock_object, LOP_TRYLOCK, file, line);
222 curthread->td_locks++;
223 return (1);
224 }
225
226 LOCK_LOG_TRY("SLOCK", &sx->lock_object, 0, 0, file, line);
227 return (0);
228}
229
230void
231_sx_xlock(struct sx *sx, const char *file, int line)
232{
233
234 MPASS(curthread != NULL);
235 WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
236 line);
237 __sx_xlock(sx, curthread, file, line);
238 LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line);
239 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
240 curthread->td_locks++;
241}
242
243int
244_sx_try_xlock(struct sx *sx, const char *file, int line)
245{
246 int rval;
247
248 MPASS(curthread != NULL);
249
250 if (sx_xlocked(sx)) {
251 sx->sx_recurse++;
252 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
253 rval = 1;
254 } else
255 rval = atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED,
256 (uintptr_t)curthread);
257 LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line);
258 if (rval) {
259 WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
260 file, line);
261 curthread->td_locks++;
262 }
263
264 return (rval);
265}
266
267void
268_sx_sunlock(struct sx *sx, const char *file, int line)
269{
270
271 MPASS(curthread != NULL);
272 _sx_assert(sx, SX_SLOCKED, file, line);
273 curthread->td_locks--;
274 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
275 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
276 if (SX_SHARERS(sx->sx_lock) == 0)
277 lock_profile_release_lock(&sx->lock_object);
278 __sx_sunlock(sx, file, line);
279}
280
281void
282_sx_xunlock(struct sx *sx, const char *file, int line)
283{
284
285 MPASS(curthread != NULL);
286 _sx_assert(sx, SX_XLOCKED, file, line);
287 curthread->td_locks--;
288 WITNESS_UNLOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
289 LOCK_LOG_LOCK("XUNLOCK", &sx->lock_object, 0, sx->sx_recurse, file,
290 line);
291 if (!sx_recursed(sx))
292 lock_profile_release_lock(&sx->lock_object);
293 __sx_xunlock(sx, curthread, file, line);
294}
295
296/*
297 * Try to do a non-blocking upgrade from a shared lock to an exclusive lock.
298 * This will only succeed if this thread holds a single shared lock.
299 * Return 1 if if the upgrade succeed, 0 otherwise.
300 */
301int
302_sx_try_upgrade(struct sx *sx, const char *file, int line)
303{
304 uintptr_t x;
305 int success;
306
307 _sx_assert(sx, SX_SLOCKED, file, line);
308
309 /*
310 * Try to switch from one shared lock to an exclusive lock. We need
311 * to maintain the SX_LOCK_EXCLUSIVE_WAITERS flag if set so that
312 * we will wake up the exclusive waiters when we drop the lock.
313 */
314 x = sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS;
315 success = atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) | x,
316 (uintptr_t)curthread | x);
317 LOCK_LOG_TRY("XUPGRADE", &sx->lock_object, 0, success, file, line);
318 if (success)
319 WITNESS_UPGRADE(&sx->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
320 file, line);
321 return (success);
322}
323
324/*
325 * Downgrade an unrecursed exclusive lock into a single shared lock.
326 */
327void
328_sx_downgrade(struct sx *sx, const char *file, int line)
329{
330 uintptr_t x;
331
332 _sx_assert(sx, SX_XLOCKED | SX_NOTRECURSED, file, line);
333#ifndef INVARIANTS
334 if (sx_recursed(sx))
335 panic("downgrade of a recursed lock");
336#endif
337
338 WITNESS_DOWNGRADE(&sx->lock_object, 0, file, line);
339
340 /*
341 * Try to switch from an exclusive lock with no shared waiters
342 * to one sharer with no shared waiters. If there are
343 * exclusive waiters, we don't need to lock the sleep queue so
344 * long as we preserve the flag. We do one quick try and if
345 * that fails we grab the sleepq lock to keep the flags from
346 * changing and do it the slow way.
347 *
348 * We have to lock the sleep queue if there are shared waiters
349 * so we can wake them up.
350 */
351 x = sx->sx_lock;
352 if (!(x & SX_LOCK_SHARED_WAITERS) &&
353 atomic_cmpset_rel_ptr(&sx->sx_lock, x, SX_SHARERS_LOCK(1) |
354 (x & SX_LOCK_EXCLUSIVE_WAITERS))) {
355 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
356 return;
357 }
358
359 /*
360 * Lock the sleep queue so we can read the waiters bits
361 * without any races and wakeup any shared waiters.
362 */
363 sleepq_lock(&sx->lock_object);
364
365 /*
366 * Preserve SX_LOCK_EXCLUSIVE_WAITERS while downgraded to a single
367 * shared lock. If there are any shared waiters, wake them up.
368 */
369 x = sx->sx_lock;
370 atomic_store_rel_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1) |
371 (x & SX_LOCK_EXCLUSIVE_WAITERS));
372 if (x & SX_LOCK_SHARED_WAITERS)
373 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
374 SQ_SHARED_QUEUE);
375 else
376 sleepq_release(&sx->lock_object);
377
378 LOCK_LOG_LOCK("XDOWNGRADE", &sx->lock_object, 0, 0, file, line);
379}
380
381/*
382 * This function represents the so-called 'hard case' for sx_xlock
383 * operation. All 'easy case' failures are redirected to this. Note
384 * that ideally this would be a static function, but it needs to be
385 * accessible from at least sx.h.
386 */
387void
388_sx_xlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
389{
390 GIANT_DECLARE;
391#ifdef ADAPTIVE_SX
392 volatile struct thread *owner;
393#endif
394 uintptr_t x;
395 int contested = 0;
396 uint64_t waitstart = 0;
397
398 /* If we already hold an exclusive lock, then recurse. */
399 if (sx_xlocked(sx)) {
400 sx->sx_recurse++;
401 atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
402 if (LOCK_LOG_TEST(&sx->lock_object, 0))
403 CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
404 return;
405 }
406 lock_profile_obtain_lock_failed(&(sx)->lock_object,
407 &contested, &waitstart);
408
409 if (LOCK_LOG_TEST(&sx->lock_object, 0))
410 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
411 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
412
413 while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
414#ifdef ADAPTIVE_SX
415 /*
416 * If the lock is write locked and the owner is
417 * running on another CPU, spin until the owner stops
418 * running or the state of the lock changes.
419 */
420 x = sx->sx_lock;
421 if (!(x & SX_LOCK_SHARED) &&
422 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
423 x = SX_OWNER(x);
424 owner = (struct thread *)x;
425 if (TD_IS_RUNNING(owner)) {
426 if (LOCK_LOG_TEST(&sx->lock_object, 0))
427 CTR3(KTR_LOCK,
428 "%s: spinning on %p held by %p",
429 __func__, sx, owner);
430 GIANT_SAVE();
431 while (SX_OWNER(sx->sx_lock) == x &&
432 TD_IS_RUNNING(owner))
433 cpu_spinwait();
434 continue;
435 }
436 }
437#endif
438
439 sleepq_lock(&sx->lock_object);
440 x = sx->sx_lock;
441
442 /*
443 * If the lock was released while spinning on the
444 * sleep queue chain lock, try again.
445 */
446 if (x == SX_LOCK_UNLOCKED) {
447 sleepq_release(&sx->lock_object);
448 continue;
449 }
450
451#ifdef ADAPTIVE_SX
452 /*
453 * The current lock owner might have started executing
454 * on another CPU (or the lock could have changed
455 * owners) while we were waiting on the sleep queue
456 * chain lock. If so, drop the sleep queue lock and try
457 * again.
458 */
459 if (!(x & SX_LOCK_SHARED) &&
460 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
461 owner = (struct thread *)SX_OWNER(x);
462 if (TD_IS_RUNNING(owner)) {
463 sleepq_release(&sx->lock_object);
464 continue;
465 }
466 }
467#endif
468
469 /*
470 * If an exclusive lock was released with both shared
471 * and exclusive waiters and a shared waiter hasn't
472 * woken up and acquired the lock yet, sx_lock will be
473 * set to SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS.
474 * If we see that value, try to acquire it once. Note
475 * that we have to preserve SX_LOCK_EXCLUSIVE_WAITERS
476 * as there are other exclusive waiters still. If we
477 * fail, restart the loop.
478 */
479 if (x == (SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS)) {
480 if (atomic_cmpset_acq_ptr(&sx->sx_lock,
481 SX_LOCK_UNLOCKED | SX_LOCK_EXCLUSIVE_WAITERS,
482 tid | SX_LOCK_EXCLUSIVE_WAITERS)) {
483 sleepq_release(&sx->lock_object);
484 CTR2(KTR_LOCK, "%s: %p claimed by new writer",
485 __func__, sx);
486 break;
487 }
488 sleepq_release(&sx->lock_object);
489 continue;
490 }
491
492 /*
493 * Try to set the SX_LOCK_EXCLUSIVE_WAITERS. If we fail,
494 * than loop back and retry.
495 */
496 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
497 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
498 x | SX_LOCK_EXCLUSIVE_WAITERS)) {
499 sleepq_release(&sx->lock_object);
500 continue;
501 }
502 if (LOCK_LOG_TEST(&sx->lock_object, 0))
503 CTR2(KTR_LOCK, "%s: %p set excl waiters flag",
504 __func__, sx);
505 }
506
507 /*
508 * Since we have been unable to acquire the exclusive
509 * lock and the exclusive waiters flag is set, we have
510 * to sleep.
511 */
512 if (LOCK_LOG_TEST(&sx->lock_object, 0))
513 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
514 __func__, sx);
515
516 GIANT_SAVE();
517 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
518 SLEEPQ_SX, SQ_EXCLUSIVE_QUEUE);
519 sleepq_wait(&sx->lock_object);
520
521 if (LOCK_LOG_TEST(&sx->lock_object, 0))
522 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
523 __func__, sx);
524 }
525
526 GIANT_RESTORE();
527 lock_profile_obtain_lock_success(&(sx)->lock_object, contested,
528 waitstart, file, line);
529}
530
531/*
532 * This function represents the so-called 'hard case' for sx_xunlock
533 * operation. All 'easy case' failures are redirected to this. Note
534 * that ideally this would be a static function, but it needs to be
535 * accessible from at least sx.h.
536 */
537void
538_sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
539{
540 uintptr_t x;
541 int queue;
542
543 MPASS(!(sx->sx_lock & SX_LOCK_SHARED));
544
545 /* If the lock is recursed, then unrecurse one level. */
546 if (sx_xlocked(sx) && sx_recursed(sx)) {
547 if ((--sx->sx_recurse) == 0)
548 atomic_clear_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
549 if (LOCK_LOG_TEST(&sx->lock_object, 0))
550 CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx);
551 return;
552 }
553 MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS |
554 SX_LOCK_EXCLUSIVE_WAITERS));
555 if (LOCK_LOG_TEST(&sx->lock_object, 0))
556 CTR2(KTR_LOCK, "%s: %p contested", __func__, sx);
557
558 sleepq_lock(&sx->lock_object);
559 x = SX_LOCK_UNLOCKED;
560
561 /*
562 * The wake up algorithm here is quite simple and probably not
563 * ideal. It gives precedence to shared waiters if they are
564 * present. For this condition, we have to preserve the
565 * state of the exclusive waiters flag.
566 */
567 if (sx->sx_lock & SX_LOCK_SHARED_WAITERS) {
568 queue = SQ_SHARED_QUEUE;
569 x |= (sx->sx_lock & SX_LOCK_EXCLUSIVE_WAITERS);
570 } else
571 queue = SQ_EXCLUSIVE_QUEUE;
572
573 /* Wake up all the waiters for the specific queue. */
574 if (LOCK_LOG_TEST(&sx->lock_object, 0))
575 CTR3(KTR_LOCK, "%s: %p waking up all threads on %s queue",
576 __func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
577 "exclusive");
578 atomic_store_rel_ptr(&sx->sx_lock, x);
579 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1, queue);
580}
581
582/*
583 * This function represents the so-called 'hard case' for sx_slock
584 * operation. All 'easy case' failures are redirected to this. Note
585 * that ideally this would be a static function, but it needs to be
586 * accessible from at least sx.h.
587 */
588void
589_sx_slock_hard(struct sx *sx, const char *file, int line)
590{
591 GIANT_DECLARE;
592#ifdef ADAPTIVE_SX
593 volatile struct thread *owner;
594#endif
595 uintptr_t x;
596 uint64_t waitstart = 0;
597 int contested = 0;
598 /*
599 * As with rwlocks, we don't make any attempt to try to block
600 * shared locks once there is an exclusive waiter.
601 */
602
603 for (;;) {
604 x = sx->sx_lock;
605
606 /*
607 * If no other thread has an exclusive lock then try to bump up
608 * the count of sharers. Since we have to preserve the state
609 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
610 * shared lock loop back and retry.
611 */
612 if (x & SX_LOCK_SHARED) {
613 MPASS(!(x & SX_LOCK_SHARED_WAITERS));
614 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
615 x + SX_ONE_SHARER)) {
616 if (SX_SHARERS(x) == 0)
617 lock_profile_obtain_lock_success(
618 &sx->lock_object, contested,
619 waitstart, file, line);
620 if (LOCK_LOG_TEST(&sx->lock_object, 0))
621 CTR4(KTR_LOCK,
622 "%s: %p succeed %p -> %p", __func__,
623 sx, (void *)x,
624 (void *)(x + SX_ONE_SHARER));
625 break;
626 }
627 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
628 &waitstart);
629
630 continue;
631 }
632
633#ifdef ADAPTIVE_SX
634 /*
635 * If the owner is running on another CPU, spin until
636 * the owner stops running or the state of the lock
637 * changes.
638 */
639 else if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
640 x = SX_OWNER(x);
641 owner = (struct thread *)x;
642 if (TD_IS_RUNNING(owner)) {
643 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
644 &waitstart);
645 if (LOCK_LOG_TEST(&sx->lock_object, 0))
646 CTR3(KTR_LOCK,
647 "%s: spinning on %p held by %p",
648 __func__, sx, owner);
649 GIANT_SAVE();
650 while (SX_OWNER(sx->sx_lock) == x &&
651 TD_IS_RUNNING(owner))
652 cpu_spinwait();
653 continue;
654 }
655 }
656#endif
657 else
658 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
659 &waitstart);
660
661 /*
662 * Some other thread already has an exclusive lock, so
663 * start the process of blocking.
664 */
665 sleepq_lock(&sx->lock_object);
666 x = sx->sx_lock;
667
668 /*
669 * The lock could have been released while we spun.
670 * In this case loop back and retry.
671 */
672 if (x & SX_LOCK_SHARED) {
673 sleepq_release(&sx->lock_object);
674 continue;
675 }
676
677#ifdef ADAPTIVE_SX
678 /*
679 * If the owner is running on another CPU, spin until
680 * the owner stops running or the state of the lock
681 * changes.
682 */
683 if (!(x & SX_LOCK_SHARED) &&
684 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
685 owner = (struct thread *)SX_OWNER(x);
686 if (TD_IS_RUNNING(owner)) {
687 sleepq_release(&sx->lock_object);
688 continue;
689 }
690 }
691#endif
692
693 /*
694 * Try to set the SX_LOCK_SHARED_WAITERS flag. If we
695 * fail to set it drop the sleep queue lock and loop
696 * back.
697 */
698 if (!(x & SX_LOCK_SHARED_WAITERS)) {
699 if (!atomic_cmpset_ptr(&sx->sx_lock, x,
700 x | SX_LOCK_SHARED_WAITERS)) {
701 sleepq_release(&sx->lock_object);
702 continue;
703 }
704 if (LOCK_LOG_TEST(&sx->lock_object, 0))
705 CTR2(KTR_LOCK, "%s: %p set shared waiters flag",
706 __func__, sx);
707 }
708
709 /*
710 * Since we have been unable to acquire the shared lock,
711 * we have to sleep.
712 */
713 if (LOCK_LOG_TEST(&sx->lock_object, 0))
714 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
715 __func__, sx);
716
717 GIANT_SAVE();
718 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
719 SLEEPQ_SX, SQ_SHARED_QUEUE);
720 sleepq_wait(&sx->lock_object);
721
722 if (LOCK_LOG_TEST(&sx->lock_object, 0))
723 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
724 __func__, sx);
725 }
726
727 GIANT_RESTORE();
728}
729
730/*
731 * This function represents the so-called 'hard case' for sx_sunlock
732 * operation. All 'easy case' failures are redirected to this. Note
733 * that ideally this would be a static function, but it needs to be
734 * accessible from at least sx.h.
735 */
736void
737_sx_sunlock_hard(struct sx *sx, const char *file, int line)
738{
739 uintptr_t x;
740
741 for (;;) {
742 x = sx->sx_lock;
743
744 /*
745 * We should never have sharers while at least one thread
746 * holds a shared lock.
747 */
748 KASSERT(!(x & SX_LOCK_SHARED_WAITERS),
749 ("%s: waiting sharers", __func__));
750
751 /*
752 * See if there is more than one shared lock held. If
753 * so, just drop one and return.
754 */
755 if (SX_SHARERS(x) > 1) {
756 if (atomic_cmpset_ptr(&sx->sx_lock, x,
757 x - SX_ONE_SHARER)) {
758 if (LOCK_LOG_TEST(&sx->lock_object, 0))
759 CTR4(KTR_LOCK,
760 "%s: %p succeeded %p -> %p",
761 __func__, sx, (void *)x,
762 (void *)(x - SX_ONE_SHARER));
763 break;
764 }
765 continue;
766 }
767
768 /*
769 * If there aren't any waiters for an exclusive lock,
770 * then try to drop it quickly.
771 */
772 if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
773 MPASS(x == SX_SHARERS_LOCK(1));
774 if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1),
775 SX_LOCK_UNLOCKED)) {
776 lock_profile_release_lock(&sx->lock_object);
777 if (LOCK_LOG_TEST(&sx->lock_object, 0))
778 CTR2(KTR_LOCK, "%s: %p last succeeded",
779 __func__, sx);
780 break;
781 }
782 continue;
783 }
784
785 /*
786 * At this point, there should just be one sharer with
787 * exclusive waiters.
788 */
789 MPASS(x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS));
790
791 lock_profile_release_lock(&sx->lock_object);
792 sleepq_lock(&sx->lock_object);
793
794 /*
795 * Wake up semantic here is quite simple:
796 * Just wake up all the exclusive waiters.
797 * Note that the state of the lock could have changed,
798 * so if it fails loop back and retry.
799 */
800 if (!atomic_cmpset_ptr(&sx->sx_lock,
801 SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
802 SX_LOCK_UNLOCKED)) {
803 sleepq_release(&sx->lock_object);
804 continue;
805 }
806 if (LOCK_LOG_TEST(&sx->lock_object, 0))
807 CTR2(KTR_LOCK, "%s: %p waking up all thread on"
808 "exclusive queue", __func__, sx);
809 sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, -1,
810 SQ_EXCLUSIVE_QUEUE);
811 break;
812 }
813}
814
815#ifdef INVARIANT_SUPPORT
816#ifndef INVARIANTS
817#undef _sx_assert
818#endif
819
820/*
821 * In the non-WITNESS case, sx_assert() can only detect that at least
822 * *some* thread owns an slock, but it cannot guarantee that *this*
823 * thread owns an slock.
824 */
825void
826_sx_assert(struct sx *sx, int what, const char *file, int line)
827{
828#ifndef WITNESS
829 int slocked = 0;
830#endif
831
832 if (panicstr != NULL)
833 return;
834 switch (what) {
835 case SX_SLOCKED:
836 case SX_SLOCKED | SX_NOTRECURSED:
837 case SX_SLOCKED | SX_RECURSED:
838#ifndef WITNESS
839 slocked = 1;
840 /* FALLTHROUGH */
841#endif
842 case SX_LOCKED:
843 case SX_LOCKED | SX_NOTRECURSED:
844 case SX_LOCKED | SX_RECURSED:
845#ifdef WITNESS
846 witness_assert(&sx->lock_object, what, file, line);
847#else
848 /*
849 * If some other thread has an exclusive lock or we
850 * have one and are asserting a shared lock, fail.
851 * Also, if no one has a lock at all, fail.
852 */
853 if (sx->sx_lock == SX_LOCK_UNLOCKED ||
854 (!(sx->sx_lock & SX_LOCK_SHARED) && (slocked ||
855 sx_xholder(sx) != curthread)))
856 panic("Lock %s not %slocked @ %s:%d\n",
857 sx->lock_object.lo_name, slocked ? "share " : "",
858 file, line);
859
860 if (!(sx->sx_lock & SX_LOCK_SHARED)) {
861 if (sx_recursed(sx)) {
862 if (what & SX_NOTRECURSED)
863 panic("Lock %s recursed @ %s:%d\n",
864 sx->lock_object.lo_name, file,
865 line);
866 } else if (what & SX_RECURSED)
867 panic("Lock %s not recursed @ %s:%d\n",
868 sx->lock_object.lo_name, file, line);
869 }
870#endif
871 break;
872 case SX_XLOCKED:
873 case SX_XLOCKED | SX_NOTRECURSED:
874 case SX_XLOCKED | SX_RECURSED:
875 if (sx_xholder(sx) != curthread)
876 panic("Lock %s not exclusively locked @ %s:%d\n",
877 sx->lock_object.lo_name, file, line);
878 if (sx_recursed(sx)) {
879 if (what & SX_NOTRECURSED)
880 panic("Lock %s recursed @ %s:%d\n",
881 sx->lock_object.lo_name, file, line);
882 } else if (what & SX_RECURSED)
883 panic("Lock %s not recursed @ %s:%d\n",
884 sx->lock_object.lo_name, file, line);
885 break;
886 case SX_UNLOCKED:
887#ifdef WITNESS
888 witness_assert(&sx->lock_object, what, file, line);
889#else
890 /*
891 * If we hold an exclusve lock fail. We can't
892 * reliably check to see if we hold a shared lock or
893 * not.
894 */
895 if (sx_xholder(sx) == curthread)
896 panic("Lock %s exclusively locked @ %s:%d\n",
897 sx->lock_object.lo_name, file, line);
898#endif
899 break;
900 default:
901 panic("Unknown sx lock assertion: %d @ %s:%d", what, file,
902 line);
903 }
904}
905#endif /* INVARIANT_SUPPORT */
906
907#ifdef DDB
908static void
909db_show_sx(struct lock_object *lock)
910{
911 struct thread *td;
912 struct sx *sx;
913
914 sx = (struct sx *)lock;
915
916 db_printf(" state: ");
917 if (sx->sx_lock == SX_LOCK_UNLOCKED)
918 db_printf("UNLOCKED\n");
919 else if (sx->sx_lock & SX_LOCK_SHARED)
920 db_printf("SLOCK: %ju\n", (uintmax_t)SX_SHARERS(sx->sx_lock));
921 else {
922 td = sx_xholder(sx);
923 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
924 td->td_tid, td->td_proc->p_pid, td->td_proc->p_comm);
925 if (sx_recursed(sx))
926 db_printf(" recursed: %d\n", sx->sx_recurse);
927 }
928
929 db_printf(" waiters: ");
930 switch(sx->sx_lock &
931 (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)) {
932 case SX_LOCK_SHARED_WAITERS:
933 db_printf("shared\n");
934 break;
935 case SX_LOCK_EXCLUSIVE_WAITERS:
936 db_printf("exclusive\n");
937 break;
938 case SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS:
939 db_printf("exclusive and shared\n");
940 break;
941 default:
942 db_printf("none\n");
943 }
944}
945
946/*
947 * Check to see if a thread that is blocked on a sleep queue is actually
948 * blocked on an sx lock. If so, output some details and return true.
949 * If the lock has an exclusive owner, return that in *ownerp.
950 */
951int
952sx_chain(struct thread *td, struct thread **ownerp)
953{
954 struct sx *sx;
955
956 /*
957 * Check to see if this thread is blocked on an sx lock.
958 * First, we check the lock class. If that is ok, then we
959 * compare the lock name against the wait message.
960 */
961 sx = td->td_wchan;
962 if (LOCK_CLASS(&sx->lock_object) != &lock_class_sx ||
963 sx->lock_object.lo_name != td->td_wmesg)
964 return (0);
965
966 /* We think we have an sx lock, so output some details. */
967 db_printf("blocked on sx \"%s\" ", td->td_wmesg);
968 *ownerp = sx_xholder(sx);
969 if (sx->sx_lock & SX_LOCK_SHARED)
970 db_printf("SLOCK (count %ju)\n",
971 (uintmax_t)SX_SHARERS(sx->sx_lock));
972 else
973 db_printf("XLOCK\n");
974 return (1);
975}
976#endif