Deleted Added
full compact
kern_rwlock.c (176017) kern_rwlock.c (176076)
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 18 unchanged lines hidden (view full) ---

27 * SUCH DAMAGE.
28 */
29
30/*
31 * Machine independent bits of reader/writer lock implementation.
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 176017 2008-02-06 01:02:13Z jeff $");
35__FBSDID("$FreeBSD: head/sys/kern/kern_rwlock.c 176076 2008-02-07 06:16:54Z jeff $");
36
37#include "opt_ddb.h"
38#include "opt_no_adaptive_rwlocks.h"
39
40#include <sys/param.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>

--- 630 unchanged lines hidden (view full) ---

674 * In the case of both readers and writers waiting we wakeup the
675 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a
676 * new writer comes in before a reader it will claim the lock up
677 * above. There is probably a potential priority inversion in
678 * there that could be worked around either by waking both queues
679 * of waiters or doing some complicated lock handoff gymnastics.
680 */
681 v = RW_UNLOCKED;
36
37#include "opt_ddb.h"
38#include "opt_no_adaptive_rwlocks.h"
39
40#include <sys/param.h>
41#include <sys/ktr.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>

--- 630 unchanged lines hidden (view full) ---

674 * In the case of both readers and writers waiting we wakeup the
675 * readers but leave the RW_LOCK_WRITE_WAITERS flag set. If a
676 * new writer comes in before a reader it will claim the lock up
677 * above. There is probably a potential priority inversion in
678 * there that could be worked around either by waking both queues
679 * of waiters or doing some complicated lock handoff gymnastics.
680 */
681 v = RW_UNLOCKED;
682 if (rw->rw_lock & RW_LOCK_READ_WAITERS) {
683 queue = TS_SHARED_QUEUE;
684 v |= (rw->rw_lock & RW_LOCK_WRITE_WAITERS);
685 } else
682 if (rw->rw_lock & RW_LOCK_WRITE_WAITERS) {
686 queue = TS_EXCLUSIVE_QUEUE;
683 queue = TS_EXCLUSIVE_QUEUE;
684 v |= (rw->rw_lock & RW_LOCK_READ_WAITERS);
685 } else
686 queue = TS_SHARED_QUEUE;
687
688 /* Wake up all waiters for the specific queue. */
689 if (LOCK_LOG_TEST(&rw->lock_object, 0))
690 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
691 queue == TS_SHARED_QUEUE ? "read" : "write");
692 turnstile_broadcast(ts, queue);
693 atomic_store_rel_ptr(&rw->rw_lock, v);
694 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);

--- 262 unchanged lines hidden ---
687
688 /* Wake up all waiters for the specific queue. */
689 if (LOCK_LOG_TEST(&rw->lock_object, 0))
690 CTR3(KTR_LOCK, "%s: %p waking up %s waiters", __func__, rw,
691 queue == TS_SHARED_QUEUE ? "read" : "write");
692 turnstile_broadcast(ts, queue);
693 atomic_store_rel_ptr(&rw->rw_lock, v);
694 turnstile_unpend(ts, TS_EXCLUSIVE_LOCK);

--- 262 unchanged lines hidden ---