Deleted Added
full compact
kern_lock.c (278693) kern_lock.c (278694)
1/*-
2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 */
28
29#include "opt_adaptive_lockmgrs.h"
30#include "opt_ddb.h"
31#include "opt_hwpmc_hooks.h"
32#include "opt_kdtrace.h"
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 */
28
29#include "opt_adaptive_lockmgrs.h"
30#include "opt_ddb.h"
31#include "opt_hwpmc_hooks.h"
32#include "opt_kdtrace.h"
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/10/sys/kern/kern_lock.c 278693 2015-02-13 18:45:44Z sbruno $");
35__FBSDID("$FreeBSD: stable/10/sys/kern/kern_lock.c 278694 2015-02-13 19:06:22Z sbruno $");
36
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <sys/ktr.h>
40#include <sys/lock.h>
41#include <sys/lock_profile.h>
42#include <sys/lockmgr.h>
43#include <sys/mutex.h>

--- 534 unchanged lines hidden (view full) ---

578 */
579 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
580 LK_HOLDER(x) != LK_KERNPROC) {
581 owner = (struct thread *)LK_HOLDER(x);
582 if (LOCK_LOG_TEST(&lk->lock_object, 0))
583 CTR3(KTR_LOCK,
584 "%s: spinning on %p held by %p",
585 __func__, lk, owner);
36
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <sys/ktr.h>
40#include <sys/lock.h>
41#include <sys/lock_profile.h>
42#include <sys/lockmgr.h>
43#include <sys/mutex.h>

--- 534 unchanged lines hidden (view full) ---

578 */
579 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
580 LK_HOLDER(x) != LK_KERNPROC) {
581 owner = (struct thread *)LK_HOLDER(x);
582 if (LOCK_LOG_TEST(&lk->lock_object, 0))
583 CTR3(KTR_LOCK,
584 "%s: spinning on %p held by %p",
585 __func__, lk, owner);
586 KTR_STATE1(KTR_SCHED, "thread",
587 sched_tdname(td), "spinning",
588 "lockname:\"%s\"", lk->lock_object.lo_name);
586
587 /*
588 * If we are holding also an interlock drop it
589 * in order to avoid a deadlock if the lockmgr
590 * owner is adaptively spinning on the
591 * interlock itself.
592 */
593 if (flags & LK_INTERLOCK) {
594 class->lc_unlock(ilk);
595 flags &= ~LK_INTERLOCK;
596 }
597 GIANT_SAVE();
598 while (LK_HOLDER(lk->lk_lock) ==
599 (uintptr_t)owner && TD_IS_RUNNING(owner))
600 cpu_spinwait();
589
590 /*
591 * If we are holding also an interlock drop it
592 * in order to avoid a deadlock if the lockmgr
593 * owner is adaptively spinning on the
594 * interlock itself.
595 */
596 if (flags & LK_INTERLOCK) {
597 class->lc_unlock(ilk);
598 flags &= ~LK_INTERLOCK;
599 }
600 GIANT_SAVE();
601 while (LK_HOLDER(lk->lk_lock) ==
602 (uintptr_t)owner && TD_IS_RUNNING(owner))
603 cpu_spinwait();
604 KTR_STATE0(KTR_SCHED, "thread",
605 sched_tdname(td), "running");
601 GIANT_RESTORE();
602 continue;
603 } else if (LK_CAN_ADAPT(lk, flags) &&
604 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
605 spintries < alk_retries) {
606 GIANT_RESTORE();
607 continue;
608 } else if (LK_CAN_ADAPT(lk, flags) &&
609 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
610 spintries < alk_retries) {
611 KTR_STATE1(KTR_SCHED, "thread",
612 sched_tdname(td), "spinning",
613 "lockname:\"%s\"", lk->lock_object.lo_name);
606 if (flags & LK_INTERLOCK) {
607 class->lc_unlock(ilk);
608 flags &= ~LK_INTERLOCK;
609 }
610 GIANT_SAVE();
611 spintries++;
612 for (i = 0; i < alk_loops; i++) {
613 if (LOCK_LOG_TEST(&lk->lock_object, 0))
614 CTR4(KTR_LOCK,
615 "%s: shared spinning on %p with %u and %u",
616 __func__, lk, spintries, i);
617 x = lk->lk_lock;
618 if ((x & LK_SHARE) == 0 ||
619 LK_CAN_SHARE(x, flags) != 0)
620 break;
621 cpu_spinwait();
622 }
614 if (flags & LK_INTERLOCK) {
615 class->lc_unlock(ilk);
616 flags &= ~LK_INTERLOCK;
617 }
618 GIANT_SAVE();
619 spintries++;
620 for (i = 0; i < alk_loops; i++) {
621 if (LOCK_LOG_TEST(&lk->lock_object, 0))
622 CTR4(KTR_LOCK,
623 "%s: shared spinning on %p with %u and %u",
624 __func__, lk, spintries, i);
625 x = lk->lk_lock;
626 if ((x & LK_SHARE) == 0 ||
627 LK_CAN_SHARE(x, flags) != 0)
628 break;
629 cpu_spinwait();
630 }
631 KTR_STATE0(KTR_SCHED, "thread",
632 sched_tdname(td), "running");
623 GIANT_RESTORE();
624 if (i != alk_loops)
625 continue;
626 }
627#endif
628
629 /*
630 * Acquire the sleepqueue chain lock because we

--- 179 unchanged lines hidden (view full) ---

810 x = lk->lk_lock;
811 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
812 LK_HOLDER(x) != LK_KERNPROC) {
813 owner = (struct thread *)LK_HOLDER(x);
814 if (LOCK_LOG_TEST(&lk->lock_object, 0))
815 CTR3(KTR_LOCK,
816 "%s: spinning on %p held by %p",
817 __func__, lk, owner);
633 GIANT_RESTORE();
634 if (i != alk_loops)
635 continue;
636 }
637#endif
638
639 /*
640 * Acquire the sleepqueue chain lock because we

--- 179 unchanged lines hidden (view full) ---

820 x = lk->lk_lock;
821 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
822 LK_HOLDER(x) != LK_KERNPROC) {
823 owner = (struct thread *)LK_HOLDER(x);
824 if (LOCK_LOG_TEST(&lk->lock_object, 0))
825 CTR3(KTR_LOCK,
826 "%s: spinning on %p held by %p",
827 __func__, lk, owner);
828 KTR_STATE1(KTR_SCHED, "thread",
829 sched_tdname(td), "spinning",
830 "lockname:\"%s\"", lk->lock_object.lo_name);
818
819 /*
820 * If we are holding also an interlock drop it
821 * in order to avoid a deadlock if the lockmgr
822 * owner is adaptively spinning on the
823 * interlock itself.
824 */
825 if (flags & LK_INTERLOCK) {
826 class->lc_unlock(ilk);
827 flags &= ~LK_INTERLOCK;
828 }
829 GIANT_SAVE();
830 while (LK_HOLDER(lk->lk_lock) ==
831 (uintptr_t)owner && TD_IS_RUNNING(owner))
832 cpu_spinwait();
831
832 /*
833 * If we are holding also an interlock drop it
834 * in order to avoid a deadlock if the lockmgr
835 * owner is adaptively spinning on the
836 * interlock itself.
837 */
838 if (flags & LK_INTERLOCK) {
839 class->lc_unlock(ilk);
840 flags &= ~LK_INTERLOCK;
841 }
842 GIANT_SAVE();
843 while (LK_HOLDER(lk->lk_lock) ==
844 (uintptr_t)owner && TD_IS_RUNNING(owner))
845 cpu_spinwait();
846 KTR_STATE0(KTR_SCHED, "thread",
847 sched_tdname(td), "running");
833 GIANT_RESTORE();
834 continue;
835 } else if (LK_CAN_ADAPT(lk, flags) &&
836 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
837 spintries < alk_retries) {
838 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
839 !atomic_cmpset_ptr(&lk->lk_lock, x,
840 x | LK_EXCLUSIVE_SPINNERS))
841 continue;
848 GIANT_RESTORE();
849 continue;
850 } else if (LK_CAN_ADAPT(lk, flags) &&
851 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
852 spintries < alk_retries) {
853 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
854 !atomic_cmpset_ptr(&lk->lk_lock, x,
855 x | LK_EXCLUSIVE_SPINNERS))
856 continue;
857 KTR_STATE1(KTR_SCHED, "thread",
858 sched_tdname(td), "spinning",
859 "lockname:\"%s\"", lk->lock_object.lo_name);
842 if (flags & LK_INTERLOCK) {
843 class->lc_unlock(ilk);
844 flags &= ~LK_INTERLOCK;
845 }
846 GIANT_SAVE();
847 spintries++;
848 for (i = 0; i < alk_loops; i++) {
849 if (LOCK_LOG_TEST(&lk->lock_object, 0))
850 CTR4(KTR_LOCK,
851 "%s: shared spinning on %p with %u and %u",
852 __func__, lk, spintries, i);
853 if ((lk->lk_lock &
854 LK_EXCLUSIVE_SPINNERS) == 0)
855 break;
856 cpu_spinwait();
857 }
860 if (flags & LK_INTERLOCK) {
861 class->lc_unlock(ilk);
862 flags &= ~LK_INTERLOCK;
863 }
864 GIANT_SAVE();
865 spintries++;
866 for (i = 0; i < alk_loops; i++) {
867 if (LOCK_LOG_TEST(&lk->lock_object, 0))
868 CTR4(KTR_LOCK,
869 "%s: shared spinning on %p with %u and %u",
870 __func__, lk, spintries, i);
871 if ((lk->lk_lock &
872 LK_EXCLUSIVE_SPINNERS) == 0)
873 break;
874 cpu_spinwait();
875 }
876 KTR_STATE0(KTR_SCHED, "thread",
877 sched_tdname(td), "running");
858 GIANT_RESTORE();
859 if (i != alk_loops)
860 continue;
861 }
862#endif
863
864 /*
865 * Acquire the sleepqueue chain lock because we

--- 667 unchanged lines hidden ---
878 GIANT_RESTORE();
879 if (i != alk_loops)
880 continue;
881 }
882#endif
883
884 /*
885 * Acquire the sleepqueue chain lock because we

--- 667 unchanged lines hidden ---