Deleted Added
full compact
kern_rwlock.c (278693) kern_rwlock.c (278694)
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 15 unchanged lines hidden (view full) ---

24 * SUCH DAMAGE.
25 */
26
27/*
28 * Machine independent bits of reader/writer lock implementation.
29 */
30
31#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 15 unchanged lines hidden (view full) ---

24 * SUCH DAMAGE.
25 */
26
27/*
28 * Machine independent bits of reader/writer lock implementation.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/10/sys/kern/kern_rwlock.c 278693 2015-02-13 18:45:44Z sbruno $");
32__FBSDID("$FreeBSD: stable/10/sys/kern/kern_rwlock.c 278694 2015-02-13 19:06:22Z sbruno $");
33
34#include "opt_ddb.h"
35#include "opt_hwpmc_hooks.h"
36#include "opt_kdtrace.h"
37#include "opt_no_adaptive_rwlocks.h"
38
39#include <sys/param.h>
40#include <sys/kdb.h>
41#include <sys/ktr.h>
42#include <sys/kernel.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/proc.h>
46#include <sys/rwlock.h>
33
34#include "opt_ddb.h"
35#include "opt_hwpmc_hooks.h"
36#include "opt_kdtrace.h"
37#include "opt_no_adaptive_rwlocks.h"
38
39#include <sys/param.h>
40#include <sys/kdb.h>
41#include <sys/ktr.h>
42#include <sys/kernel.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/proc.h>
46#include <sys/rwlock.h>
47#include <sys/sched.h>
47#include <sys/sysctl.h>
48#include <sys/systm.h>
49#include <sys/turnstile.h>
50
51#include <machine/cpu.h>
52
53#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
54#define ADAPTIVE_RWLOCKS

--- 363 unchanged lines hidden (view full) ---

418 */
419 if ((v & RW_LOCK_READ) == 0) {
420 owner = (struct thread *)RW_OWNER(v);
421 if (TD_IS_RUNNING(owner)) {
422 if (LOCK_LOG_TEST(&rw->lock_object, 0))
423 CTR3(KTR_LOCK,
424 "%s: spinning on %p held by %p",
425 __func__, rw, owner);
48#include <sys/sysctl.h>
49#include <sys/systm.h>
50#include <sys/turnstile.h>
51
52#include <machine/cpu.h>
53
54#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
55#define ADAPTIVE_RWLOCKS

--- 363 unchanged lines hidden (view full) ---

419 */
420 if ((v & RW_LOCK_READ) == 0) {
421 owner = (struct thread *)RW_OWNER(v);
422 if (TD_IS_RUNNING(owner)) {
423 if (LOCK_LOG_TEST(&rw->lock_object, 0))
424 CTR3(KTR_LOCK,
425 "%s: spinning on %p held by %p",
426 __func__, rw, owner);
427 KTR_STATE1(KTR_SCHED, "thread",
428 sched_tdname(curthread), "spinning",
429 "lockname:\"%s\"", rw->lock_object.lo_name);
426 while ((struct thread*)RW_OWNER(rw->rw_lock) ==
427 owner && TD_IS_RUNNING(owner)) {
428 cpu_spinwait();
429#ifdef KDTRACE_HOOKS
430 spin_cnt++;
431#endif
432 }
430 while ((struct thread*)RW_OWNER(rw->rw_lock) ==
431 owner && TD_IS_RUNNING(owner)) {
432 cpu_spinwait();
433#ifdef KDTRACE_HOOKS
434 spin_cnt++;
435#endif
436 }
437 KTR_STATE0(KTR_SCHED, "thread",
438 sched_tdname(curthread), "running");
433 continue;
434 }
435 } else if (spintries < rowner_retries) {
436 spintries++;
439 continue;
440 }
441 } else if (spintries < rowner_retries) {
442 spintries++;
443 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
444 "spinning", "lockname:\"%s\"",
445 rw->lock_object.lo_name);
437 for (i = 0; i < rowner_loops; i++) {
438 v = rw->rw_lock;
439 if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
440 break;
441 cpu_spinwait();
442 }
446 for (i = 0; i < rowner_loops; i++) {
447 v = rw->rw_lock;
448 if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
449 break;
450 cpu_spinwait();
451 }
452 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
453 "running");
443 if (i != rowner_loops)
444 continue;
445 }
446#endif
447
448 /*
449 * Okay, now it's the hard case. Some other thread already
450 * has a write lock or there are write waiters present,

--- 303 unchanged lines hidden (view full) ---

754 * running or the state of the lock changes.
755 */
756 v = rw->rw_lock;
757 owner = (struct thread *)RW_OWNER(v);
758 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
759 if (LOCK_LOG_TEST(&rw->lock_object, 0))
760 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
761 __func__, rw, owner);
454 if (i != rowner_loops)
455 continue;
456 }
457#endif
458
459 /*
460 * Okay, now it's the hard case. Some other thread already
461 * has a write lock or there are write waiters present,

--- 303 unchanged lines hidden (view full) ---

765 * running or the state of the lock changes.
766 */
767 v = rw->rw_lock;
768 owner = (struct thread *)RW_OWNER(v);
769 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
770 if (LOCK_LOG_TEST(&rw->lock_object, 0))
771 CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
772 __func__, rw, owner);
773 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
774 "spinning", "lockname:\"%s\"",
775 rw->lock_object.lo_name);
762 while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
763 TD_IS_RUNNING(owner)) {
764 cpu_spinwait();
765#ifdef KDTRACE_HOOKS
766 spin_cnt++;
767#endif
768 }
776 while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
777 TD_IS_RUNNING(owner)) {
778 cpu_spinwait();
779#ifdef KDTRACE_HOOKS
780 spin_cnt++;
781#endif
782 }
783 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
784 "running");
769 continue;
770 }
771 if ((v & RW_LOCK_READ) && RW_READERS(v) &&
772 spintries < rowner_retries) {
773 if (!(v & RW_LOCK_WRITE_SPINNER)) {
774 if (!atomic_cmpset_ptr(&rw->rw_lock, v,
775 v | RW_LOCK_WRITE_SPINNER)) {
776 continue;
777 }
778 }
779 spintries++;
785 continue;
786 }
787 if ((v & RW_LOCK_READ) && RW_READERS(v) &&
788 spintries < rowner_retries) {
789 if (!(v & RW_LOCK_WRITE_SPINNER)) {
790 if (!atomic_cmpset_ptr(&rw->rw_lock, v,
791 v | RW_LOCK_WRITE_SPINNER)) {
792 continue;
793 }
794 }
795 spintries++;
796 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
797 "spinning", "lockname:\"%s\"",
798 rw->lock_object.lo_name);
780 for (i = 0; i < rowner_loops; i++) {
781 if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
782 break;
783 cpu_spinwait();
784 }
799 for (i = 0; i < rowner_loops; i++) {
800 if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
801 break;
802 cpu_spinwait();
803 }
804 KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
805 "running");
785#ifdef KDTRACE_HOOKS
786 spin_cnt += rowner_loops - i;
787#endif
788 if (i != rowner_loops)
789 continue;
790 }
791#endif
792 ts = turnstile_trywait(&rw->lock_object);

--- 437 unchanged lines hidden ---
806#ifdef KDTRACE_HOOKS
807 spin_cnt += rowner_loops - i;
808#endif
809 if (i != rowner_loops)
810 continue;
811 }
812#endif
813 ts = turnstile_trywait(&rw->lock_object);

--- 437 unchanged lines hidden ---