Deleted Added
sdiff udiff text old ( 73901 ) new ( 74912 )
full compact
1/*
2 * Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice(s), this list of conditions and the following disclaimer as

--- 10 unchanged lines hidden (view full) ---

19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
25 * DAMAGE.
26 *
27 * $FreeBSD: head/sys/kern/kern_sx.c 73901 2001-03-06 23:13:15Z jhb $
28 */
29
30/*
31 * Shared/exclusive locks. This implementation assures deterministic lock
32 * granting behavior, so that slocks and xlocks are interleaved.
33 *
34 * Priority propagation will not generally raise the priority of lock holders,
35 * so should not be relied upon in combination with sx locks.

--- 5 unchanged lines hidden (view full) ---

41 * slock --> xlock (deadlock)
42 * slock --> slock (slock recursion, not fatal)
43 */
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/ktr.h>
48#include <sys/condvar.h>
49#include <sys/mutex.h>
50#include <sys/sx.h>
51
52void
53sx_init(struct sx *sx, const char *description)
54{
55
56 mtx_init(&sx->sx_lock, description, MTX_DEF);
57 sx->sx_cnt = 0;
58 cv_init(&sx->sx_shrd_cv, description);
59 sx->sx_shrd_wcnt = 0;
60 cv_init(&sx->sx_excl_cv, description);
61 sx->sx_descr = description;
62 sx->sx_excl_wcnt = 0;
63 sx->sx_xholder = NULL;
64}
65
66void
67sx_destroy(struct sx *sx)
68{
69
70 KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt ==
71 0), ("%s (%s): holders or waiters\n", __FUNCTION__, sx->sx_descr));
72
73 mtx_destroy(&sx->sx_lock);
74 cv_destroy(&sx->sx_shrd_cv);
75 cv_destroy(&sx->sx_excl_cv);
76}
77
78void
79sx_slock(struct sx *sx)
80{
81
82 mtx_lock(&sx->sx_lock);
83 KASSERT(sx->sx_xholder != curproc,
84 ("%s (%s): trying to get slock while xlock is held\n", __FUNCTION__,
85 sx->sx_descr));
86
87 /*
88 * Loop in case we lose the race for lock acquisition.
89 */
90 while (sx->sx_cnt < 0) {
91 sx->sx_shrd_wcnt++;
92 cv_wait(&sx->sx_shrd_cv, &sx->sx_lock);
93 sx->sx_shrd_wcnt--;
94 }
95
96 /* Acquire a shared lock. */
97 sx->sx_cnt++;
98
99 mtx_unlock(&sx->sx_lock);
100}
101
102void
103sx_xlock(struct sx *sx)
104{
105
106 mtx_lock(&sx->sx_lock);
107
108 /*
109 * With sx locks, we're absolutely not permitted to recurse on
110 * xlocks, as it is fatal (deadlock). Normally, recursion is handled
111 * by WITNESS, but as it is not semantically correct to hold the
112 * xlock while in here, we consider it API abuse and put it under
113 * INVARIANTS.
114 */
115 KASSERT(sx->sx_xholder != curproc,
116 ("%s (%s): xlock already held", __FUNCTION__, sx->sx_descr));
117
118 /* Loop in case we lose the race for lock acquisition. */
119 while (sx->sx_cnt != 0) {
120 sx->sx_excl_wcnt++;
121 cv_wait(&sx->sx_excl_cv, &sx->sx_lock);
122 sx->sx_excl_wcnt--;
123 }
124
125 MPASS(sx->sx_cnt == 0);
126
127 /* Acquire an exclusive lock. */
128 sx->sx_cnt--;
129 sx->sx_xholder = curproc;
130
131 mtx_unlock(&sx->sx_lock);
132}
133
134void
135sx_sunlock(struct sx *sx)
136{
137
138 mtx_lock(&sx->sx_lock);
139 _SX_ASSERT_SLOCKED(sx);
140
141 /* Release. */
142 sx->sx_cnt--;
143
144 /*
145 * If we just released the last shared lock, wake any waiters up, giving
146 * exclusive lockers precedence. In order to make sure that exclusive
147 * lockers won't be blocked forever, don't wake shared lock waiters if
148 * there are exclusive lock waiters.
149 */
150 if (sx->sx_excl_wcnt > 0) {
151 if (sx->sx_cnt == 0)
152 cv_signal(&sx->sx_excl_cv);
153 } else if (sx->sx_shrd_wcnt > 0)
154 cv_broadcast(&sx->sx_shrd_cv);
155
156 mtx_unlock(&sx->sx_lock);
157}
158
159void
160sx_xunlock(struct sx *sx)
161{
162
163 mtx_lock(&sx->sx_lock);
164 _SX_ASSERT_XLOCKED(sx);
165 MPASS(sx->sx_cnt == -1);
166
167 /* Release. */
168 sx->sx_cnt++;
169 sx->sx_xholder = NULL;
170
171 /*
172 * Wake up waiters if there are any. Give precedence to slock waiters.
173 */
174 if (sx->sx_shrd_wcnt > 0)
175 cv_broadcast(&sx->sx_shrd_cv);
176 else if (sx->sx_excl_wcnt > 0)
177 cv_signal(&sx->sx_excl_cv);
178
179 mtx_unlock(&sx->sx_lock);
180}