Deleted Added
full compact
thr_rwlock.c (195403) thr_rwlock.c (213241)
1/*-
2 * Copyright (c) 1998 Alex Nash
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 1998 Alex Nash
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libthr/thread/thr_rwlock.c 195403 2009-07-06 09:31:04Z attilio $
26 * $FreeBSD: head/lib/libthr/thread/thr_rwlock.c 213241 2010-09-28 04:57:56Z davidxu $
27 */
28
29#include <errno.h>
30#include <limits.h>
31#include <stdlib.h>
32
33#include "namespace.h"
34#include <pthread.h>
35#include "un-namespace.h"
36#include "thr_private.h"
37
38__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
39__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
40__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
41__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
42__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
43__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
44__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
45__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
46__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
47
27 */
28
29#include <errno.h>
30#include <limits.h>
31#include <stdlib.h>
32
33#include "namespace.h"
34#include <pthread.h>
35#include "un-namespace.h"
36#include "thr_private.h"
37
38__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
39__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
40__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
41__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
42__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
43__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
44__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
45__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
46__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
47
48#define CHECK_AND_INIT_RWLOCK \
49 if (__predict_false((prwlock = (*rwlock)) <= THR_RWLOCK_DESTROYED)) { \
50 if (prwlock == THR_RWLOCK_INITIALIZER) { \
51 int ret; \
52 ret = init_static(_get_curthread(), rwlock); \
53 if (ret) \
54 return (ret); \
55 } else if (prwlock == THR_RWLOCK_DESTROYED) { \
56 return (EINVAL); \
57 } \
58 prwlock = *rwlock; \
59 }
60
48/*
49 * Prototypes
50 */
51
52static int
53rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
54{
55 pthread_rwlock_t prwlock;
56
57 prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
58 if (prwlock == NULL)
59 return (ENOMEM);
60 *rwlock = prwlock;
61 return (0);
62}
63
64int
65_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
66{
61/*
62 * Prototypes
63 */
64
65static int
66rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
67{
68 pthread_rwlock_t prwlock;
69
70 prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
71 if (prwlock == NULL)
72 return (ENOMEM);
73 *rwlock = prwlock;
74 return (0);
75}
76
77int
78_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
79{
80 pthread_rwlock_t prwlock;
67 int ret;
68
81 int ret;
82
69 if (rwlock == NULL)
83 prwlock = *rwlock;
84 if (prwlock == THR_RWLOCK_INITIALIZER)
85 ret = 0;
86 else if (prwlock == THR_RWLOCK_DESTROYED)
70 ret = EINVAL;
71 else {
87 ret = EINVAL;
88 else {
72 pthread_rwlock_t prwlock;
89 *rwlock = THR_RWLOCK_DESTROYED;
73
90
74 prwlock = *rwlock;
75 *rwlock = NULL;
76
77 free(prwlock);
78 ret = 0;
79 }
80 return (ret);
81}
82
83static int
84init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
85{
86 int ret;
87
88 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
89
91 free(prwlock);
92 ret = 0;
93 }
94 return (ret);
95}
96
97static int
98init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
99{
100 int ret;
101
102 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
103
90 if (*rwlock == NULL)
104 if (*rwlock == THR_RWLOCK_INITIALIZER)
91 ret = rwlock_init(rwlock, NULL);
92 else
93 ret = 0;
94
95 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
96
97 return (ret);
98}
99
100int
101_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
102{
103 *rwlock = NULL;
104 return (rwlock_init(rwlock, attr));
105}
106
107static int
108rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
109{
110 struct pthread *curthread = _get_curthread();
111 pthread_rwlock_t prwlock;
112 struct timespec ts, ts2, *tsp;
113 int flags;
114 int ret;
115
105 ret = rwlock_init(rwlock, NULL);
106 else
107 ret = 0;
108
109 THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
110
111 return (ret);
112}
113
114int
115_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
116{
117 *rwlock = NULL;
118 return (rwlock_init(rwlock, attr));
119}
120
121static int
122rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
123{
124 struct pthread *curthread = _get_curthread();
125 pthread_rwlock_t prwlock;
126 struct timespec ts, ts2, *tsp;
127 int flags;
128 int ret;
129
116 if (__predict_false(rwlock == NULL))
117 return (EINVAL);
130 CHECK_AND_INIT_RWLOCK
118
131
119 prwlock = *rwlock;
120
121 /* check for static initialization */
122 if (__predict_false(prwlock == NULL)) {
123 if ((ret = init_static(curthread, rwlock)) != 0)
124 return (ret);
125
126 prwlock = *rwlock;
127 }
128
129 if (curthread->rdlock_count) {
130 /*
131 * To avoid having to track all the rdlocks held by
132 * a thread or all of the threads that hold a rdlock,
133 * we keep a simple count of all the rdlocks held by
134 * a thread. If a thread holds any rdlocks it is
135 * possible that it is attempting to take a recursive
136 * rdlock. If there are blocked writers and precedence
137 * is given to them, then that would result in the thread
138 * deadlocking. So allowing a thread to take the rdlock
139 * when it already has one or more rdlocks avoids the
140 * deadlock. I hope the reader can follow that logic ;-)
141 */
142 flags = URWLOCK_PREFER_READER;
143 } else {
144 flags = 0;
145 }
146
147 /*
148 * POSIX said the validity of the abstimeout parameter need
149 * not be checked if the lock can be immediately acquired.
150 */
151 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
152 if (ret == 0) {
153 curthread->rdlock_count++;
154 return (ret);
155 }
156
157 if (__predict_false(abstime &&
158 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
159 return (EINVAL);
160
161 for (;;) {
162 if (abstime) {
163 clock_gettime(CLOCK_REALTIME, &ts);
164 TIMESPEC_SUB(&ts2, abstime, &ts);
165 if (ts2.tv_sec < 0 ||
166 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
167 return (ETIMEDOUT);
168 tsp = &ts2;
169 } else
170 tsp = NULL;
171
172 /* goto kernel and lock it */
173 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp);
174 if (ret != EINTR)
175 break;
176
177 /* if interrupted, try to lock it in userland again. */
178 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
179 ret = 0;
180 break;
181 }
182 }
183 if (ret == 0)
184 curthread->rdlock_count++;
185 return (ret);
186}
187
188int
189_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
190{
191 return (rwlock_rdlock_common(rwlock, NULL));
192}
193
194int
195_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
196 const struct timespec *abstime)
197{
198 return (rwlock_rdlock_common(rwlock, abstime));
199}
200
201int
202_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
203{
204 struct pthread *curthread = _get_curthread();
205 pthread_rwlock_t prwlock;
206 int flags;
207 int ret;
208
132 if (curthread->rdlock_count) {
133 /*
134 * To avoid having to track all the rdlocks held by
135 * a thread or all of the threads that hold a rdlock,
136 * we keep a simple count of all the rdlocks held by
137 * a thread. If a thread holds any rdlocks it is
138 * possible that it is attempting to take a recursive
139 * rdlock. If there are blocked writers and precedence
140 * is given to them, then that would result in the thread
141 * deadlocking. So allowing a thread to take the rdlock
142 * when it already has one or more rdlocks avoids the
143 * deadlock. I hope the reader can follow that logic ;-)
144 */
145 flags = URWLOCK_PREFER_READER;
146 } else {
147 flags = 0;
148 }
149
150 /*
151 * POSIX said the validity of the abstimeout parameter need
152 * not be checked if the lock can be immediately acquired.
153 */
154 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
155 if (ret == 0) {
156 curthread->rdlock_count++;
157 return (ret);
158 }
159
160 if (__predict_false(abstime &&
161 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
162 return (EINVAL);
163
164 for (;;) {
165 if (abstime) {
166 clock_gettime(CLOCK_REALTIME, &ts);
167 TIMESPEC_SUB(&ts2, abstime, &ts);
168 if (ts2.tv_sec < 0 ||
169 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
170 return (ETIMEDOUT);
171 tsp = &ts2;
172 } else
173 tsp = NULL;
174
175 /* goto kernel and lock it */
176 ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp);
177 if (ret != EINTR)
178 break;
179
180 /* if interrupted, try to lock it in userland again. */
181 if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
182 ret = 0;
183 break;
184 }
185 }
186 if (ret == 0)
187 curthread->rdlock_count++;
188 return (ret);
189}
190
191int
192_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
193{
194 return (rwlock_rdlock_common(rwlock, NULL));
195}
196
197int
198_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
199 const struct timespec *abstime)
200{
201 return (rwlock_rdlock_common(rwlock, abstime));
202}
203
204int
205_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
206{
207 struct pthread *curthread = _get_curthread();
208 pthread_rwlock_t prwlock;
209 int flags;
210 int ret;
211
209 if (__predict_false(rwlock == NULL))
210 return (EINVAL);
212 CHECK_AND_INIT_RWLOCK
211
213
212 prwlock = *rwlock;
213
214 /* check for static initialization */
215 if (__predict_false(prwlock == NULL)) {
216 if ((ret = init_static(curthread, rwlock)) != 0)
217 return (ret);
218
219 prwlock = *rwlock;
220 }
221
222 if (curthread->rdlock_count) {
223 /*
224 * To avoid having to track all the rdlocks held by
225 * a thread or all of the threads that hold a rdlock,
226 * we keep a simple count of all the rdlocks held by
227 * a thread. If a thread holds any rdlocks it is
228 * possible that it is attempting to take a recursive
229 * rdlock. If there are blocked writers and precedence
230 * is given to them, then that would result in the thread
231 * deadlocking. So allowing a thread to take the rdlock
232 * when it already has one or more rdlocks avoids the
233 * deadlock. I hope the reader can follow that logic ;-)
234 */
235 flags = URWLOCK_PREFER_READER;
236 } else {
237 flags = 0;
238 }
239
240 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
241 if (ret == 0)
242 curthread->rdlock_count++;
243 return (ret);
244}
245
246int
247_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
248{
249 struct pthread *curthread = _get_curthread();
250 pthread_rwlock_t prwlock;
251 int ret;
252
214 if (curthread->rdlock_count) {
215 /*
216 * To avoid having to track all the rdlocks held by
217 * a thread or all of the threads that hold a rdlock,
218 * we keep a simple count of all the rdlocks held by
219 * a thread. If a thread holds any rdlocks it is
220 * possible that it is attempting to take a recursive
221 * rdlock. If there are blocked writers and precedence
222 * is given to them, then that would result in the thread
223 * deadlocking. So allowing a thread to take the rdlock
224 * when it already has one or more rdlocks avoids the
225 * deadlock. I hope the reader can follow that logic ;-)
226 */
227 flags = URWLOCK_PREFER_READER;
228 } else {
229 flags = 0;
230 }
231
232 ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
233 if (ret == 0)
234 curthread->rdlock_count++;
235 return (ret);
236}
237
238int
239_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
240{
241 struct pthread *curthread = _get_curthread();
242 pthread_rwlock_t prwlock;
243 int ret;
244
253 if (__predict_false(rwlock == NULL))
254 return (EINVAL);
245 CHECK_AND_INIT_RWLOCK
255
246
256 prwlock = *rwlock;
257
258 /* check for static initialization */
259 if (__predict_false(prwlock == NULL)) {
260 if ((ret = init_static(curthread, rwlock)) != 0)
261 return (ret);
262
263 prwlock = *rwlock;
264 }
265
266 ret = _thr_rwlock_trywrlock(&prwlock->lock);
267 if (ret == 0)
268 prwlock->owner = curthread;
269 return (ret);
270}
271
272static int
273rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
274{
275 struct pthread *curthread = _get_curthread();
276 pthread_rwlock_t prwlock;
277 struct timespec ts, ts2, *tsp;
278 int ret;
279
247 ret = _thr_rwlock_trywrlock(&prwlock->lock);
248 if (ret == 0)
249 prwlock->owner = curthread;
250 return (ret);
251}
252
253static int
254rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
255{
256 struct pthread *curthread = _get_curthread();
257 pthread_rwlock_t prwlock;
258 struct timespec ts, ts2, *tsp;
259 int ret;
260
280 if (__predict_false(rwlock == NULL))
281 return (EINVAL);
261 CHECK_AND_INIT_RWLOCK
282
262
283 prwlock = *rwlock;
284
285 /* check for static initialization */
286 if (__predict_false(prwlock == NULL)) {
287 if ((ret = init_static(curthread, rwlock)) != 0)
288 return (ret);
289
290 prwlock = *rwlock;
291 }
292
293 /*
294 * POSIX said the validity of the abstimeout parameter need
295 * not be checked if the lock can be immediately acquired.
296 */
297 ret = _thr_rwlock_trywrlock(&prwlock->lock);
298 if (ret == 0) {
299 prwlock->owner = curthread;
300 return (ret);
301 }
302
303 if (__predict_false(abstime &&
304 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
305 return (EINVAL);
306
307 for (;;) {
308 if (abstime != NULL) {
309 clock_gettime(CLOCK_REALTIME, &ts);
310 TIMESPEC_SUB(&ts2, abstime, &ts);
311 if (ts2.tv_sec < 0 ||
312 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
313 return (ETIMEDOUT);
314 tsp = &ts2;
315 } else
316 tsp = NULL;
317
318 /* goto kernel and lock it */
319 ret = __thr_rwlock_wrlock(&prwlock->lock, tsp);
320 if (ret == 0) {
321 prwlock->owner = curthread;
322 break;
323 }
324
325 if (ret != EINTR)
326 break;
327
328 /* if interrupted, try to lock it in userland again. */
329 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
330 ret = 0;
331 prwlock->owner = curthread;
332 break;
333 }
334 }
335 return (ret);
336}
337
338int
339_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
340{
341 return (rwlock_wrlock_common (rwlock, NULL));
342}
343
344int
345_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
346 const struct timespec *abstime)
347{
348 return (rwlock_wrlock_common (rwlock, abstime));
349}
350
351int
352_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
353{
354 struct pthread *curthread = _get_curthread();
355 pthread_rwlock_t prwlock;
356 int ret;
357 int32_t state;
358
263 /*
264 * POSIX said the validity of the abstimeout parameter need
265 * not be checked if the lock can be immediately acquired.
266 */
267 ret = _thr_rwlock_trywrlock(&prwlock->lock);
268 if (ret == 0) {
269 prwlock->owner = curthread;
270 return (ret);
271 }
272
273 if (__predict_false(abstime &&
274 (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
275 return (EINVAL);
276
277 for (;;) {
278 if (abstime != NULL) {
279 clock_gettime(CLOCK_REALTIME, &ts);
280 TIMESPEC_SUB(&ts2, abstime, &ts);
281 if (ts2.tv_sec < 0 ||
282 (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
283 return (ETIMEDOUT);
284 tsp = &ts2;
285 } else
286 tsp = NULL;
287
288 /* goto kernel and lock it */
289 ret = __thr_rwlock_wrlock(&prwlock->lock, tsp);
290 if (ret == 0) {
291 prwlock->owner = curthread;
292 break;
293 }
294
295 if (ret != EINTR)
296 break;
297
298 /* if interrupted, try to lock it in userland again. */
299 if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
300 ret = 0;
301 prwlock->owner = curthread;
302 break;
303 }
304 }
305 return (ret);
306}
307
308int
309_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
310{
311 return (rwlock_wrlock_common (rwlock, NULL));
312}
313
314int
315_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
316 const struct timespec *abstime)
317{
318 return (rwlock_wrlock_common (rwlock, abstime));
319}
320
321int
322_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
323{
324 struct pthread *curthread = _get_curthread();
325 pthread_rwlock_t prwlock;
326 int ret;
327 int32_t state;
328
359 if (__predict_false(rwlock == NULL))
360 return (EINVAL);
361
362 prwlock = *rwlock;
363
329 prwlock = *rwlock;
330
364 if (__predict_false(prwlock == NULL))
331 if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
365 return (EINVAL);
366
367 state = prwlock->lock.rw_state;
368 if (state & URWLOCK_WRITE_OWNER) {
369 if (__predict_false(prwlock->owner != curthread))
370 return (EPERM);
371 prwlock->owner = NULL;
372 }
373
374 ret = _thr_rwlock_unlock(&prwlock->lock);
375 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
376 curthread->rdlock_count--;
377
378 return (ret);
379}
332 return (EINVAL);
333
334 state = prwlock->lock.rw_state;
335 if (state & URWLOCK_WRITE_OWNER) {
336 if (__predict_false(prwlock->owner != curthread))
337 return (EPERM);
338 prwlock->owner = NULL;
339 }
340
341 ret = _thr_rwlock_unlock(&prwlock->lock);
342 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
343 curthread->rdlock_count--;
344
345 return (ret);
346}