Deleted Added
full compact
kern_rwlock.c (157851) kern_rwlock.c (157882)
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 157851 2006-04-18 20:32:42Z wkoszek $");
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 157882 2006-04-19 21:06:52Z jhb $");
36
37#include "opt_ddb.h"
38
39#include <sys/param.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>

--- 6 unchanged lines hidden (view full) ---

50#ifdef DDB
51#include <ddb/ddb.h>
52
53static void db_show_rwlock(struct lock_object *lock);
54#endif
55
56struct lock_class lock_class_rw = {
57 "rw",
36
37#include "opt_ddb.h"
38
39#include <sys/param.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/proc.h>

--- 6 unchanged lines hidden (view full) ---

50#ifdef DDB
51#include <ddb/ddb.h>
52
53static void db_show_rwlock(struct lock_object *lock);
54#endif
55
56struct lock_class lock_class_rw = {
57 "rw",
58 LC_SLEEPLOCK | LC_RECURSABLE /* | LC_UPGRADABLE */,
58 LC_SLEEPLOCK | LC_RECURSABLE | LC_UPGRADABLE,
59#ifdef DDB
60 db_show_rwlock
61#endif
62};
63
64/*
65 * Return a pointer to the owning thread if the lock is write-locked or
66 * NULL if the lock is unlocked or read-locked.

--- 15 unchanged lines hidden (view full) ---

82
83void
84rw_init(struct rwlock *rw, const char *name)
85{
86
87 rw->rw_lock = RW_UNLOCKED;
88
89 lock_init(&rw->rw_object, &lock_class_rw, name, NULL, LO_WITNESS |
59#ifdef DDB
60 db_show_rwlock
61#endif
62};
63
64/*
65 * Return a pointer to the owning thread if the lock is write-locked or
66 * NULL if the lock is unlocked or read-locked.

--- 15 unchanged lines hidden (view full) ---

82
83void
84rw_init(struct rwlock *rw, const char *name)
85{
86
87 rw->rw_lock = RW_UNLOCKED;
88
89 lock_init(&rw->rw_object, &lock_class_rw, name, NULL, LO_WITNESS |
90 LO_RECURSABLE /* | LO_UPGRADABLE */);
90 LO_RECURSABLE | LO_UPGRADABLE);
91}
92
93void
94rw_destroy(struct rwlock *rw)
95{
96
97 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
98 lock_destroy(&rw->rw_object);

--- 481 unchanged lines hidden (view full) ---

580 if (LOCK_LOG_TEST(&rw->rw_object, 0))
581 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
582 queue == TS_SHARED_QUEUE ? "read" : "write");
583 turnstile_broadcast(ts, queue);
584 atomic_store_rel_ptr(&rw->rw_lock, v);
585 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
586}
587
91}
92
93void
94rw_destroy(struct rwlock *rw)
95{
96
97 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
98 lock_destroy(&rw->rw_object);

--- 481 unchanged lines hidden (view full) ---

580 if (LOCK_LOG_TEST(&rw->rw_object, 0))
581 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
582 queue == TS_SHARED_QUEUE ? "read" : "write");
583 turnstile_broadcast(ts, queue);
584 atomic_store_rel_ptr(&rw->rw_lock, v);
585 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
586}
587
588/*
589 * Attempt to do a non-blocking upgrade from a read lock to a write
590 * lock. This will only succeed if this thread holds a single read
591 * lock. Returns true if the upgrade succeeded and false otherwise.
592 */
593int
594_rw_try_upgrade(struct rwlock *rw, const char *file, int line)
595{
596 uintptr_t v, tid;
597 int success;
598
599 _rw_assert(rw, RA_RLOCKED, file, line);
600
601 /*
602 * Attempt to switch from one reader to a writer. If there
603 * are any write waiters, then we will have to lock the
604 * turnstile first to prevent races with another writer
605 * calling turnstile_wait() before we have claimed this
606 * turnstile. So, do the simple case of no waiters first.
607 */
608 tid = (uintptr_t)curthread;
609 if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) {
610 success = atomic_cmpset_acq_ptr(&rw->rw_lock,
611 RW_READERS_LOCK(1), tid);
612 goto out;
613 }
614
615 /*
616 * Ok, we think we have write waiters, so lock the
617 * turnstile.
618 */
619 turnstile_lock(&rw->rw_object);
620
621 /*
622 * Try to switch from one reader to a writer again. This time
623 * we honor the current state of the RW_LOCK_WRITE_WAITERS
624 * flag. If we obtain the lock with the flag set, then claim
625 * ownership of the turnstile. In the SMP case it is possible
626 * for there to not be an associated turnstile even though there
627 * are waiters if all of the waiters are spinning.
628 */
629 v = rw->rw_lock & RW_LOCK_WRITE_WAITERS;
630 success = atomic_cmpset_acq_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
631 tid | v);
632#ifdef SMP
633 if (success && v && turnstile_lookup(&rw->rw_object) != NULL)
634#else
635 if (success && v)
636#endif
637 turnstile_claim(&rw->rw_object);
638 else
639 turnstile_release(&rw->rw_object);
640out:
641 LOCK_LOG_TRY("WUPGRADE", &rw->rw_object, 0, success, file, line);
642 if (success)
643 WITNESS_UPGRADE(&rw->rw_object, LOP_EXCLUSIVE | LOP_TRYLOCK,
644 file, line);
645 return (success);
646}
647
648/*
649 * Downgrade a write lock into a single read lock.
650 */
651void
652_rw_downgrade(struct rwlock *rw, const char *file, int line)
653{
654 struct turnstile *ts;
655 uintptr_t tid, v;
656
657 _rw_assert(rw, RA_WLOCKED, file, line);
658
659 WITNESS_DOWNGRADE(&rw->rw_object, 0, file, line);
660
661 /*
662 * Convert from a writer to a single reader. First we handle
663 * the easy case with no waiters. If there are any waiters, we
664 * lock the turnstile, "disown" the lock, and awaken any read
665 * waiters.
666 */
667 tid = (uintptr_t)curthread;
668 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1)))
669 goto out;
670
671 /*
672 * Ok, we think we have waiters, so lock the turnstile so we can
673 * read the waiter flags without any races.
674 */
675 turnstile_lock(&rw->rw_object);
676 v = rw->rw_lock;
677 MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS));
678
679 /*
680 * Downgrade from a write lock while preserving
681 * RW_LOCK_WRITE_WAITERS and give up ownership of the
682 * turnstile. If there are any read waiters, wake them up.
683 *
684 * For SMP, we have to allow for the fact that all of the
685 * read waiters might be spinning. In that case, act as if
686 * RW_LOCK_READ_WAITERS is not set. Also, only preserve
687 * the RW_LOCK_WRITE_WAITERS flag if at least one writer is
688 * blocked on the turnstile.
689 */
690 ts = turnstile_lookup(&rw->rw_object);
691#ifdef SMP
692 if (ts == NULL)
693 v &= ~(RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS);
694 else if (v & RW_LOCK_READ_WAITERS &&
695 turnstile_empty(ts, TS_SHARED_QUEUE))
696 v &= ~RW_LOCK_READ_WAITERS;
697 else if (v & RW_LOCK_WRITE_WAITERS &&
698 turnstile_empty(ts, TS_EXCLUSIVE_QUEUE))
699 v &= ~RW_LOCK_WRITE_WAITERS;
700#else
701 MPASS(ts != NULL);
702#endif
703 if (v & RW_LOCK_READ_WAITERS)
704 turnstile_broadcast(ts, TS_SHARED_QUEUE);
705 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) |
706 (v & RW_LOCK_WRITE_WAITERS));
707 if (v & RW_LOCK_READ_WAITERS)
708 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);
709#ifdef SMP
710 else if (ts == NULL)
711 turnstile_release(&rw->rw_object);
712#endif
713 else
714 turnstile_disown(ts);
715out:
716 LOCK_LOG_LOCK("WDOWNGRADE", &rw->rw_object, 0, 0, file, line);
717}
718
588#ifdef INVARIANT_SUPPORT
589#ifndef INVARIANTS
590#undef _rw_assert
591#endif
592
593/*
594 * In the non-WITNESS case, rw_assert() can only detect that at least
595 * *some* thread owns an rlock, but it cannot guarantee that *this*

--- 90 unchanged lines hidden ---
719#ifdef INVARIANT_SUPPORT
720#ifndef INVARIANTS
721#undef _rw_assert
722#endif
723
724/*
725 * In the non-WITNESS case, rw_assert() can only detect that at least
726 * *some* thread owns an rlock, but it cannot guarantee that *this*

--- 90 unchanged lines hidden ---