Deleted Added
full compact
kern_rwlock.c (167787) kern_rwlock.c (167801)
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 167787 2007-03-21 21:20:51Z jhb $");
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 167801 2007-03-22 16:09:23Z jhb $");
36
37#include "opt_ddb.h"
36
37#include "opt_ddb.h"
38#include "opt_no_adaptive_rwlocks.h"
38
39#include <sys/param.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>
44#include <sys/rwlock.h>
45#include <sys/systm.h>
46#include <sys/turnstile.h>
47#include <sys/lock_profile.h>
48#include <machine/cpu.h>
49
39
40#include <sys/param.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45#include <sys/rwlock.h>
46#include <sys/systm.h>
47#include <sys/turnstile.h>
48#include <sys/lock_profile.h>
49#include <machine/cpu.h>
50
51#if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS)
52#define ADAPTIVE_RWLOCKS
53#endif
54
50#ifdef DDB
51#include <ddb/ddb.h>
52
53static void db_show_rwlock(struct lock_object *lock);
54#endif
55static void lock_rw(struct lock_object *lock, int how);
56static int unlock_rw(struct lock_object *lock);
57

--- 116 unchanged lines hidden (view full) ---

174 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, 0, file, line);
175 lock_profile_release_lock(&rw->lock_object);
176 __rw_wunlock(rw, curthread, file, line);
177}
178
179void
180_rw_rlock(struct rwlock *rw, const char *file, int line)
181{
55#ifdef DDB
56#include <ddb/ddb.h>
57
58static void db_show_rwlock(struct lock_object *lock);
59#endif
60static void lock_rw(struct lock_object *lock, int how);
61static int unlock_rw(struct lock_object *lock);
62

--- 116 unchanged lines hidden (view full) ---

179 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, 0, file, line);
180 lock_profile_release_lock(&rw->lock_object);
181 __rw_wunlock(rw, curthread, file, line);
182}
183
184void
185_rw_rlock(struct rwlock *rw, const char *file, int line)
186{
182#ifdef SMP
187#ifdef ADAPTIVE_RWLOCKS
183 volatile struct thread *owner;
184#endif
185 uint64_t waittime = 0;
186 int contested = 0;
187 uintptr_t x;
188
189 KASSERT(rw_wowner(rw) != curthread,
190 ("%s (%s): wlock already held @ %s:%d", __func__,

--- 82 unchanged lines hidden (view full) ---

273 cpu_spinwait();
274 continue;
275 }
276 if (LOCK_LOG_TEST(&rw->lock_object, 0))
277 CTR2(KTR_LOCK, "%s: %p set read waiters flag",
278 __func__, rw);
279 }
280
188 volatile struct thread *owner;
189#endif
190 uint64_t waittime = 0;
191 int contested = 0;
192 uintptr_t x;
193
194 KASSERT(rw_wowner(rw) != curthread,
195 ("%s (%s): wlock already held @ %s:%d", __func__,

--- 82 unchanged lines hidden (view full) ---

278 cpu_spinwait();
279 continue;
280 }
281 if (LOCK_LOG_TEST(&rw->lock_object, 0))
282 CTR2(KTR_LOCK, "%s: %p set read waiters flag",
283 __func__, rw);
284 }
285
281#ifdef SMP
286#ifdef ADAPTIVE_RWLOCKS
282 /*
283 * If the owner is running on another CPU, spin until
284 * the owner stops running or the state of the lock
285 * changes.
286 */
287 owner = (struct thread *)RW_OWNER(x);
288 if (TD_IS_RUNNING(owner)) {
289 turnstile_release(&rw->lock_object);

--- 151 unchanged lines hidden (view full) ---

441/*
442 * This function is called when we are unable to obtain a write lock on the
443 * first try. This means that at least one other thread holds either a
444 * read or write lock.
445 */
446void
447_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
448{
287 /*
288 * If the owner is running on another CPU, spin until
289 * the owner stops running or the state of the lock
290 * changes.
291 */
292 owner = (struct thread *)RW_OWNER(x);
293 if (TD_IS_RUNNING(owner)) {
294 turnstile_release(&rw->lock_object);

--- 151 unchanged lines hidden (view full) ---

446/*
447 * This function is called when we are unable to obtain a write lock on the
448 * first try. This means that at least one other thread holds either a
449 * read or write lock.
450 */
451void
452_rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line)
453{
449#ifdef SMP
454#ifdef ADAPTIVE_RWLOCKS
450 volatile struct thread *owner;
451#endif
452 uintptr_t v;
453
454 if (LOCK_LOG_TEST(&rw->lock_object, 0))
455 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
456 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
457

--- 47 unchanged lines hidden (view full) ---

505 cpu_spinwait();
506 continue;
507 }
508 if (LOCK_LOG_TEST(&rw->lock_object, 0))
509 CTR2(KTR_LOCK, "%s: %p set write waiters flag",
510 __func__, rw);
511 }
512
455 volatile struct thread *owner;
456#endif
457 uintptr_t v;
458
459 if (LOCK_LOG_TEST(&rw->lock_object, 0))
460 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
461 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line);
462

--- 47 unchanged lines hidden (view full) ---

510 cpu_spinwait();
511 continue;
512 }
513 if (LOCK_LOG_TEST(&rw->lock_object, 0))
514 CTR2(KTR_LOCK, "%s: %p set write waiters flag",
515 __func__, rw);
516 }
517
513#ifdef SMP
518#ifdef ADAPTIVE_RWLOCKS
514 /*
515 * If the lock is write locked and the owner is
516 * running on another CPU, spin until the owner stops
517 * running or the state of the lock changes.
518 */
519 owner = (struct thread *)RW_OWNER(v);
520 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
521 turnstile_release(&rw->lock_object);

--- 38 unchanged lines hidden (view full) ---

560 ("%s: neither of the waiter flags are set", __func__));
561
562 if (LOCK_LOG_TEST(&rw->lock_object, 0))
563 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
564
565 turnstile_lock(&rw->lock_object);
566 ts = turnstile_lookup(&rw->lock_object);
567
519 /*
520 * If the lock is write locked and the owner is
521 * running on another CPU, spin until the owner stops
522 * running or the state of the lock changes.
523 */
524 owner = (struct thread *)RW_OWNER(v);
525 if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
526 turnstile_release(&rw->lock_object);

--- 38 unchanged lines hidden (view full) ---

565 ("%s: neither of the waiter flags are set", __func__));
566
567 if (LOCK_LOG_TEST(&rw->lock_object, 0))
568 CTR2(KTR_LOCK, "%s: %p contested", __func__, rw);
569
570 turnstile_lock(&rw->lock_object);
571 ts = turnstile_lookup(&rw->lock_object);
572
568#ifdef SMP
573#ifdef ADAPTIVE_RWLOCKS
569 /*
570 * There might not be a turnstile for this lock if all of
571 * the waiters are adaptively spinning. In that case, just
572 * reset the lock to the unlocked state and return.
573 */
574 if (ts == NULL) {
575 atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED);
576 if (LOCK_LOG_TEST(&rw->lock_object, 0))

--- 16 unchanged lines hidden (view full) ---

593 *
594 * In the case of both readers and writers waiting we wakeup the
595 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a
596 * new writer comes in before a reader it will claim the lock up
597 * above. There is probably a potential priority inversion in
598 * there that could be worked around either by waking both queues
599 * of waiters or doing some complicated lock handoff gymnastics.
600 *
574 /*
575 * There might not be a turnstile for this lock if all of
576 * the waiters are adaptively spinning. In that case, just
577 * reset the lock to the unlocked state and return.
578 */
579 if (ts == NULL) {
580 atomic_store_rel_ptr(&rw->rw_lock, RW_UNLOCKED);
581 if (LOCK_LOG_TEST(&rw->lock_object, 0))

--- 16 unchanged lines hidden (view full) ---

598 *
599 * In the case of both readers and writers waiting we wakeup the
600 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a
601 * new writer comes in before a reader it will claim the lock up
602 * above. There is probably a potential priority inversion in
603 * there that could be worked around either by waking both queues
604 * of waiters or doing some complicated lock handoff gymnastics.
605 *
601 * Note that in the SMP case, if both flags are set, there might
602 * not be any actual writers on the turnstile as they might all
603 * be spinning. In that case, we don't want to preserve the
604 * RW_LOCK_WRITE_WAITERS flag as the turnstile is going to go
605 * away once we wakeup all the readers.
606 * Note that in the ADAPTIVE_RWLOCKS case, if both flags are
607 * set, there might not be any actual writers on the turnstile
608 * as they might all be spinning. In that case, we don't want
609 * to preserve the RW_LOCK_WRITE_WAITERS flag as the turnstile
610 * is going to go away once we wakeup all the readers.
606 */
607 v = RW_UNLOCKED;
608 if (rw->rw_lock & RW_LOCK_READ_WAITERS) {
609 queue = TS_SHARED_QUEUE;
611 */
612 v = RW_UNLOCKED;
613 if (rw->rw_lock & RW_LOCK_READ_WAITERS) {
614 queue = TS_SHARED_QUEUE;
610#ifdef SMP
615#ifdef ADAPTIVE_RWLOCKS
611 if (rw->rw_lock & RW_LOCK_WRITE_WAITERS &&
612 !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
613 v |= RW_LOCK_WRITE_WAITERS;
614#else
615 v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
616#endif
617 } else
618 queue = TS_EXCLUSIVE_QUEUE;
619
616 if (rw->rw_lock & RW_LOCK_WRITE_WAITERS &&
617 !turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
618 v |= RW_LOCK_WRITE_WAITERS;
619#else
620 v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
621#endif
622 } else
623 queue = TS_EXCLUSIVE_QUEUE;
624
620#ifdef SMP
625#ifdef ADAPTIVE_RWLOCKS
621 /*
622 * We have to make sure that we actually have waiters to
623 * wakeup. If they are all spinning, then we just need to
624 * disown the turnstile and return.
625 */
626 if (turnstile_empty(ts, queue)) {
627 if (LOCK_LOG_TEST(&rw->lock_object, 0))
628 CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw);

--- 44 unchanged lines hidden (view full) ---

673 * turnstile.
674 */
675 turnstile_lock(&rw->lock_object);
676
677 /*
678 * Try to switch from one reader to a writer again. This time
679 * we honor the current state of the RW_LOCK_WRITE_WAITERS
680 * flag. If we obtain the lock with the flag set, then claim
626 /*
627 * We have to make sure that we actually have waiters to
628 * wakeup. If they are all spinning, then we just need to
629 * disown the turnstile and return.
630 */
631 if (turnstile_empty(ts, queue)) {
632 if (LOCK_LOG_TEST(&rw->lock_object, 0))
633 CTR2(KTR_LOCK, "%s: %p no sleepers 2", __func__, rw);

--- 44 unchanged lines hidden (view full) ---

678 * turnstile.
679 */
680 turnstile_lock(&rw->lock_object);
681
682 /*
683 * Try to switch from one reader to a writer again. This time
684 * we honor the current state of the RW_LOCK_WRITE_WAITERS
685 * flag. If we obtain the lock with the flag set, then claim
681 * ownership of the turnstile. In the SMP case it is possible
682 * for there to not be an associated turnstile even though there
683 * are waiters if all of the waiters are spinning.
686 * ownership of the turnstile. In the ADAPTIVE_RWLOCKS case
687 * it is possible for there to not be an associated turnstile
688 * even though there are waiters if all of the waiters are
689 * spinning.
684 */
685 v = rw->rw_lock & RW_LOCK_WRITE_WAITERS;
686 success = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
687 tid | v);
690 */
691 v = rw->rw_lock & RW_LOCK_WRITE_WAITERS;
692 success = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
693 tid | v);
688#ifdef SMP
694#ifdef ADAPTIVE_RWLOCKS
689 if (success && v && turnstile_lookup(&rw->lock_object) != NULL)
690#else
691 if (success && v)
692#endif
693 turnstile_claim(&rw->lock_object);
694 else
695 turnstile_release(&rw->lock_object);
696out:

--- 35 unchanged lines hidden (view full) ---

732 v = rw->rw_lock;
733 MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS));
734
735 /*
736 * Downgrade from a write lock while preserving
737 * RW_LOCK_WRITE_WAITERS and give up ownership of the
738 * turnstile. If there are any read waiters, wake them up.
739 *
695 if (success && v && turnstile_lookup(&rw->lock_object) != NULL)
696#else
697 if (success && v)
698#endif
699 turnstile_claim(&rw->lock_object);
700 else
701 turnstile_release(&rw->lock_object);
702out:

--- 35 unchanged lines hidden (view full) ---

738 v = rw->rw_lock;
739 MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS));
740
741 /*
742 * Downgrade from a write lock while preserving
743 * RW_LOCK_WRITE_WAITERS and give up ownership of the
744 * turnstile. If there are any read waiters, wake them up.
745 *
740 * For SMP, we have to allow for the fact that all of the
741 * read waiters might be spinning. In that case, act as if
742 * RW_LOCK_READ_WAITERS is not set. Also, only preserve
743 * the RW_LOCK_WRITE_WAITERS flag if at least one writer is
744 * blocked on the turnstile.
746 * For ADAPTIVE_RWLOCKS, we have to allow for the fact that
747 * all of the read waiters might be spinning. In that case,
748 * act as if RW_LOCK_READ_WAITERS is not set. Also, only
749 * preserve the RW_LOCK_WRITE_WAITERS flag if at least one
750 * writer is blocked on the turnstile.
745 */
746 ts = turnstile_lookup(&rw->lock_object);
751 */
752 ts = turnstile_lookup(&rw->lock_object);
747#ifdef SMP
753#ifdef ADAPTIVE_RWLOCKS
748 if (ts == NULL)
749 v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS);
750 else if (v & RW_LOCK_READ_WAITERS &&
751 turnstile_empty(ts, TS_SHARED_QUEUE))
752 v &= ~RW_LOCK_READ_WAITERS;
753 else if (v & RW_LOCK_WRITE_WAITERS &&
754 turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
755 v &= ~RW_LOCK_WRITE_WAITERS;
756#else
757 MPASS(ts != NULL);
758#endif
759 if (v & RW_LOCK_READ_WAITERS)
760 turnstile_broadcast(ts, TS_SHARED_QUEUE);
761 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) |
762 (v & RW_LOCK_WRITE_WAITERS));
763 if (v & RW_LOCK_READ_WAITERS)
764 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
754 if (ts == NULL)
755 v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS);
756 else if (v & RW_LOCK_READ_WAITERS &&
757 turnstile_empty(ts, TS_SHARED_QUEUE))
758 v &= ~RW_LOCK_READ_WAITERS;
759 else if (v & RW_LOCK_WRITE_WAITERS &&
760 turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
761 v &= ~RW_LOCK_WRITE_WAITERS;
762#else
763 MPASS(ts != NULL);
764#endif
765 if (v & RW_LOCK_READ_WAITERS)
766 turnstile_broadcast(ts, TS_SHARED_QUEUE);
767 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) |
768 (v & RW_LOCK_WRITE_WAITERS));
769 if (v & RW_LOCK_READ_WAITERS)
770 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
765#ifdef SMP
771#ifdef ADAPTIVE_RWLOCKS
766 else if (ts == NULL)
767 turnstile_release(&rw->lock_object);
768#endif
769 else
770 turnstile_disown(ts);
771out:
772 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
773}

--- 100 unchanged lines hidden ---
772 else if (ts == NULL)
773 turnstile_release(&rw->lock_object);
774#endif
775 else
776 turnstile_disown(ts);
777out:
778 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line);
779}

--- 100 unchanged lines hidden ---