Deleted Added
full compact
kern_sx.c (173733) kern_sx.c (174629)
1/*-
2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 26 unchanged lines hidden (view full) ---

35 * Priority propagation will not generally raise the priority of lock holders,
36 * so should not be relied upon in combination with sx locks.
37 */
38
39#include "opt_adaptive_sx.h"
40#include "opt_ddb.h"
41
42#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
3 * Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 26 unchanged lines hidden (view full) ---

35 * Priority propagation will not generally raise the priority of lock holders,
36 * so should not be relied upon in combination with sx locks.
37 */
38
39#include "opt_adaptive_sx.h"
40#include "opt_ddb.h"
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 173733 2007-11-18 14:43:53Z attilio $");
43__FBSDID("$FreeBSD: head/sys/kern/kern_sx.c 174629 2007-12-15 23:13:31Z jeff $");
44
45#include <sys/param.h>
46#include <sys/ktr.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/proc.h>
50#include <sys/sleepqueue.h>
51#include <sys/sx.h>

--- 245 unchanged lines hidden (view full) ---

297
298 MPASS(curthread != NULL);
299 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
300 ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
301 _sx_assert(sx, SA_SLOCKED, file, line);
302 curthread->td_locks--;
303 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
304 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
44
45#include <sys/param.h>
46#include <sys/ktr.h>
47#include <sys/lock.h>
48#include <sys/mutex.h>
49#include <sys/proc.h>
50#include <sys/sleepqueue.h>
51#include <sys/sx.h>

--- 245 unchanged lines hidden (view full) ---

297
298 MPASS(curthread != NULL);
299 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
300 ("sx_sunlock() of destroyed sx @ %s:%d", file, line));
301 _sx_assert(sx, SA_SLOCKED, file, line);
302 curthread->td_locks--;
303 WITNESS_UNLOCK(&sx->lock_object, 0, file, line);
304 LOCK_LOG_LOCK("SUNLOCK", &sx->lock_object, 0, 0, file, line);
305#ifdef LOCK_PROFILING_SHARED
306 if (SX_SHARERS(sx->sx_lock) == 1)
307 lock_profile_release_lock(&sx->lock_object);
308#endif
309 __sx_sunlock(sx, file, line);
305 __sx_sunlock(sx, file, line);
306 lock_profile_release_lock(&sx->lock_object);
310}
311
312void
313_sx_xunlock(struct sx *sx, const char *file, int line)
314{
315
316 MPASS(curthread != NULL);
317 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,

--- 127 unchanged lines hidden (view full) ---

445 return (0);
446 }
447
448 if (LOCK_LOG_TEST(&sx->lock_object, 0))
449 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
450 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
451
452 while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
307}
308
309void
310_sx_xunlock(struct sx *sx, const char *file, int line)
311{
312
313 MPASS(curthread != NULL);
314 KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,

--- 127 unchanged lines hidden (view full) ---

442 return (0);
443 }
444
445 if (LOCK_LOG_TEST(&sx->lock_object, 0))
446 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
447 sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
448
449 while (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid)) {
450 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
451 &waittime);
453#ifdef ADAPTIVE_SX
454 /*
455 * If the lock is write locked and the owner is
456 * running on another CPU, spin until the owner stops
457 * running or the state of the lock changes.
458 */
459 x = sx->sx_lock;
460 if (!(x & SX_LOCK_SHARED) &&
461 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
462 x = SX_OWNER(x);
463 owner = (struct thread *)x;
464 if (TD_IS_RUNNING(owner)) {
465 if (LOCK_LOG_TEST(&sx->lock_object, 0))
466 CTR3(KTR_LOCK,
467 "%s: spinning on %p held by %p",
468 __func__, sx, owner);
469 GIANT_SAVE();
452#ifdef ADAPTIVE_SX
453 /*
454 * If the lock is write locked and the owner is
455 * running on another CPU, spin until the owner stops
456 * running or the state of the lock changes.
457 */
458 x = sx->sx_lock;
459 if (!(x & SX_LOCK_SHARED) &&
460 (sx->lock_object.lo_flags & SX_ADAPTIVESPIN)) {
461 x = SX_OWNER(x);
462 owner = (struct thread *)x;
463 if (TD_IS_RUNNING(owner)) {
464 if (LOCK_LOG_TEST(&sx->lock_object, 0))
465 CTR3(KTR_LOCK,
466 "%s: spinning on %p held by %p",
467 __func__, sx, owner);
468 GIANT_SAVE();
470 lock_profile_obtain_lock_failed(
471 &sx->lock_object, &contested, &waittime);
472 while (SX_OWNER(sx->sx_lock) == x &&
473 TD_IS_RUNNING(owner))
474 cpu_spinwait();
475 continue;
476 }
477 }
478#endif
479

--- 70 unchanged lines hidden (view full) ---

550 * lock and the exclusive waiters flag is set, we have
551 * to sleep.
552 */
553 if (LOCK_LOG_TEST(&sx->lock_object, 0))
554 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
555 __func__, sx);
556
557 GIANT_SAVE();
469 while (SX_OWNER(sx->sx_lock) == x &&
470 TD_IS_RUNNING(owner))
471 cpu_spinwait();
472 continue;
473 }
474 }
475#endif
476

--- 70 unchanged lines hidden (view full) ---

547 * lock and the exclusive waiters flag is set, we have
548 * to sleep.
549 */
550 if (LOCK_LOG_TEST(&sx->lock_object, 0))
551 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
552 __func__, sx);
553
554 GIANT_SAVE();
558 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
559 &waittime);
560 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
561 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
562 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
563 if (!(opts & SX_INTERRUPTIBLE))
564 sleepq_wait(&sx->lock_object);
565 else
566 error = sleepq_wait_sig(&sx->lock_object);
567

--- 75 unchanged lines hidden (view full) ---

643 */
644int
645_sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
646{
647 GIANT_DECLARE;
648#ifdef ADAPTIVE_SX
649 volatile struct thread *owner;
650#endif
555 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
556 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
557 SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
558 if (!(opts & SX_INTERRUPTIBLE))
559 sleepq_wait(&sx->lock_object);
560 else
561 error = sleepq_wait_sig(&sx->lock_object);
562

--- 75 unchanged lines hidden (view full) ---

638 */
639int
640_sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
641{
642 GIANT_DECLARE;
643#ifdef ADAPTIVE_SX
644 volatile struct thread *owner;
645#endif
651#ifdef LOCK_PROFILING_SHARED
652 uint64_t waittime = 0;
653 int contested = 0;
646 uint64_t waittime = 0;
647 int contested = 0;
654#endif
655 uintptr_t x;
656 int error = 0;
657
658 /*
659 * As with rwlocks, we don't make any attempt to try to block
660 * shared locks once there is an exclusive waiter.
661 */
662 for (;;) {

--- 4 unchanged lines hidden (view full) ---

667 * the count of sharers. Since we have to preserve the state
668 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
669 * shared lock loop back and retry.
670 */
671 if (x & SX_LOCK_SHARED) {
672 MPASS(!(x & SX_LOCK_SHARED_WAITERS));
673 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
674 x + SX_ONE_SHARER)) {
648 uintptr_t x;
649 int error = 0;
650
651 /*
652 * As with rwlocks, we don't make any attempt to try to block
653 * shared locks once there is an exclusive waiter.
654 */
655 for (;;) {

--- 4 unchanged lines hidden (view full) ---

660 * the count of sharers. Since we have to preserve the state
661 * of SX_LOCK_EXCLUSIVE_WAITERS, if we fail to acquire the
662 * shared lock loop back and retry.
663 */
664 if (x & SX_LOCK_SHARED) {
665 MPASS(!(x & SX_LOCK_SHARED_WAITERS));
666 if (atomic_cmpset_acq_ptr(&sx->sx_lock, x,
667 x + SX_ONE_SHARER)) {
675#ifdef LOCK_PROFILING_SHARED
676 if (SX_SHARERS(x) == 0)
677 lock_profile_obtain_lock_success(
678 &sx->lock_object, contested,
679 waittime, file, line);
680#endif
681 if (LOCK_LOG_TEST(&sx->lock_object, 0))
682 CTR4(KTR_LOCK,
683 "%s: %p succeed %p -> %p", __func__,
684 sx, (void *)x,
685 (void *)(x + SX_ONE_SHARER));
686 break;
687 }
688 continue;
689 }
668 if (LOCK_LOG_TEST(&sx->lock_object, 0))
669 CTR4(KTR_LOCK,
670 "%s: %p succeed %p -> %p", __func__,
671 sx, (void *)x,
672 (void *)(x + SX_ONE_SHARER));
673 break;
674 }
675 continue;
676 }
677 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
678 &waittime);
690
691#ifdef ADAPTIVE_SX
692 /*
693 * If the owner is running on another CPU, spin until
694 * the owner stops running or the state of the lock
695 * changes.
696 */
679
680#ifdef ADAPTIVE_SX
681 /*
682 * If the owner is running on another CPU, spin until
683 * the owner stops running or the state of the lock
684 * changes.
685 */
697 else if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
686 if (sx->lock_object.lo_flags & SX_ADAPTIVESPIN) {
698 x = SX_OWNER(x);
699 owner = (struct thread *)x;
700 if (TD_IS_RUNNING(owner)) {
701 if (LOCK_LOG_TEST(&sx->lock_object, 0))
702 CTR3(KTR_LOCK,
703 "%s: spinning on %p held by %p",
704 __func__, sx, owner);
705 GIANT_SAVE();
687 x = SX_OWNER(x);
688 owner = (struct thread *)x;
689 if (TD_IS_RUNNING(owner)) {
690 if (LOCK_LOG_TEST(&sx->lock_object, 0))
691 CTR3(KTR_LOCK,
692 "%s: spinning on %p held by %p",
693 __func__, sx, owner);
694 GIANT_SAVE();
706#ifdef LOCK_PROFILING_SHARED
707 lock_profile_obtain_lock_failed(
708 &sx->lock_object, &contested, &waittime);
709#endif
710 while (SX_OWNER(sx->sx_lock) == x &&
711 TD_IS_RUNNING(owner))
712 cpu_spinwait();
713 continue;
714 }
715 }
716#endif
717

--- 49 unchanged lines hidden (view full) ---

767 * Since we have been unable to acquire the shared lock,
768 * we have to sleep.
769 */
770 if (LOCK_LOG_TEST(&sx->lock_object, 0))
771 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
772 __func__, sx);
773
774 GIANT_SAVE();
695 while (SX_OWNER(sx->sx_lock) == x &&
696 TD_IS_RUNNING(owner))
697 cpu_spinwait();
698 continue;
699 }
700 }
701#endif
702

--- 49 unchanged lines hidden (view full) ---

752 * Since we have been unable to acquire the shared lock,
753 * we have to sleep.
754 */
755 if (LOCK_LOG_TEST(&sx->lock_object, 0))
756 CTR2(KTR_LOCK, "%s: %p blocking on sleep queue",
757 __func__, sx);
758
759 GIANT_SAVE();
775#ifdef LOCK_PROFILING_SHARED
776 lock_profile_obtain_lock_failed(&sx->lock_object, &contested,
777 &waittime);
778#endif
779 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
780 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
781 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
782 if (!(opts & SX_INTERRUPTIBLE))
783 sleepq_wait(&sx->lock_object);
784 else
785 error = sleepq_wait_sig(&sx->lock_object);
786
787 if (error) {
788 if (LOCK_LOG_TEST(&sx->lock_object, 0))
789 CTR2(KTR_LOCK,
790 "%s: interruptible sleep by %p suspended by signal",
791 __func__, sx);
792 break;
793 }
794 if (LOCK_LOG_TEST(&sx->lock_object, 0))
795 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
796 __func__, sx);
797 }
760 sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
761 SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
762 SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
763 if (!(opts & SX_INTERRUPTIBLE))
764 sleepq_wait(&sx->lock_object);
765 else
766 error = sleepq_wait_sig(&sx->lock_object);
767
768 if (error) {
769 if (LOCK_LOG_TEST(&sx->lock_object, 0))
770 CTR2(KTR_LOCK,
771 "%s: interruptible sleep by %p suspended by signal",
772 __func__, sx);
773 break;
774 }
775 if (LOCK_LOG_TEST(&sx->lock_object, 0))
776 CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
777 __func__, sx);
778 }
779 if (error == 0)
780 lock_profile_obtain_lock_success(&sx->lock_object, contested,
781 waittime, file, line);
798
799 GIANT_RESTORE();
800 return (error);
801}
802
803/*
804 * This function represents the so-called 'hard case' for sx_sunlock
805 * operation. All 'easy case' failures are redirected to this. Note

--- 245 unchanged lines hidden ---
782
783 GIANT_RESTORE();
784 return (error);
785}
786
787/*
788 * This function represents the so-called 'hard case' for sx_sunlock
789 * operation. All 'easy case' failures are redirected to this. Note

--- 245 unchanged lines hidden ---