thr_rwlock.c revision 296162
1/*-
2 * Copyright (c) 1998 Alex Nash
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libthr/thread/thr_rwlock.c 296162 2016-02-28 17:52:33Z kib $
27 */
28
29#include <errno.h>
30#include <limits.h>
31#include <stdlib.h>
32
33#include "namespace.h"
34#include <pthread.h>
35#include "un-namespace.h"
36#include "thr_private.h"
37
38__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
39__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
40__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
41__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
42__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
43__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
44__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
45__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
46__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
47
48#define CHECK_AND_INIT_RWLOCK							\
49	if (*rwlock == THR_PSHARED_PTR) {					\
50		prwlock = __thr_pshared_offpage(rwlock, 0);			\
51		if (prwlock == NULL)						\
52			return (EINVAL);					\
53	} else if (__predict_false((prwlock = (*rwlock)) <=			\
54	    THR_RWLOCK_DESTROYED)) {						\
55		if (prwlock == THR_RWLOCK_INITIALIZER) {			\
56			int ret;						\
57			ret = init_static(_get_curthread(), rwlock);		\
58			if (ret)						\
59				return (ret);					\
60		} else if (prwlock == THR_RWLOCK_DESTROYED) {			\
61			return (EINVAL);					\
62		}								\
63		prwlock = *rwlock;						\
64	}
65
66/*
67 * Prototypes
68 */
69
70static int
71rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
72{
73	pthread_rwlock_t prwlock;
74
75	if (attr == NULL || *attr == NULL ||
76	    (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
77		prwlock = calloc(1, sizeof(struct pthread_rwlock));
78		if (prwlock == NULL)
79			return (ENOMEM);
80		*rwlock = prwlock;
81	} else {
82		prwlock = __thr_pshared_offpage(rwlock, 1);
83		if (prwlock == NULL)
84			return (EFAULT);
85		prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
86		*rwlock = THR_PSHARED_PTR;
87	}
88	return (0);
89}
90
91int
92_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
93{
94	pthread_rwlock_t prwlock;
95	int ret;
96
97	prwlock = *rwlock;
98	if (prwlock == THR_RWLOCK_INITIALIZER)
99		ret = 0;
100	else if (prwlock == THR_RWLOCK_DESTROYED)
101		ret = EINVAL;
102	else if (prwlock == THR_PSHARED_PTR) {
103		*rwlock = THR_RWLOCK_DESTROYED;
104		__thr_pshared_destroy(rwlock);
105		ret = 0;
106	} else {
107		*rwlock = THR_RWLOCK_DESTROYED;
108		free(prwlock);
109		ret = 0;
110	}
111	return (ret);
112}
113
114static int
115init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
116{
117	int ret;
118
119	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
120
121	if (*rwlock == THR_RWLOCK_INITIALIZER)
122		ret = rwlock_init(rwlock, NULL);
123	else
124		ret = 0;
125
126	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
127
128	return (ret);
129}
130
131int
132_pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
133{
134
135	*rwlock = NULL;
136	return (rwlock_init(rwlock, attr));
137}
138
139static int
140rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
141{
142	struct pthread *curthread = _get_curthread();
143	pthread_rwlock_t prwlock;
144	int flags;
145	int ret;
146
147	CHECK_AND_INIT_RWLOCK
148
149	if (curthread->rdlock_count) {
150		/*
151		 * To avoid having to track all the rdlocks held by
152		 * a thread or all of the threads that hold a rdlock,
153		 * we keep a simple count of all the rdlocks held by
154		 * a thread.  If a thread holds any rdlocks it is
155		 * possible that it is attempting to take a recursive
156		 * rdlock.  If there are blocked writers and precedence
157		 * is given to them, then that would result in the thread
158		 * deadlocking.  So allowing a thread to take the rdlock
159		 * when it already has one or more rdlocks avoids the
160		 * deadlock.  I hope the reader can follow that logic ;-)
161		 */
162		flags = URWLOCK_PREFER_READER;
163	} else {
164		flags = 0;
165	}
166
167	/*
168	 * POSIX said the validity of the abstimeout parameter need
169	 * not be checked if the lock can be immediately acquired.
170	 */
171	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
172	if (ret == 0) {
173		curthread->rdlock_count++;
174		return (ret);
175	}
176
177	if (__predict_false(abstime &&
178		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
179		return (EINVAL);
180
181	for (;;) {
182		/* goto kernel and lock it */
183		ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
184		if (ret != EINTR)
185			break;
186
187		/* if interrupted, try to lock it in userland again. */
188		if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
189			ret = 0;
190			break;
191		}
192	}
193	if (ret == 0)
194		curthread->rdlock_count++;
195	return (ret);
196}
197
198int
199_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
200{
201	return (rwlock_rdlock_common(rwlock, NULL));
202}
203
204int
205_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
206	 const struct timespec *abstime)
207{
208	return (rwlock_rdlock_common(rwlock, abstime));
209}
210
211int
212_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
213{
214	struct pthread *curthread = _get_curthread();
215	pthread_rwlock_t prwlock;
216	int flags;
217	int ret;
218
219	CHECK_AND_INIT_RWLOCK
220
221	if (curthread->rdlock_count) {
222		/*
223		 * To avoid having to track all the rdlocks held by
224		 * a thread or all of the threads that hold a rdlock,
225		 * we keep a simple count of all the rdlocks held by
226		 * a thread.  If a thread holds any rdlocks it is
227		 * possible that it is attempting to take a recursive
228		 * rdlock.  If there are blocked writers and precedence
229		 * is given to them, then that would result in the thread
230		 * deadlocking.  So allowing a thread to take the rdlock
231		 * when it already has one or more rdlocks avoids the
232		 * deadlock.  I hope the reader can follow that logic ;-)
233		 */
234		flags = URWLOCK_PREFER_READER;
235	} else {
236		flags = 0;
237	}
238
239	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
240	if (ret == 0)
241		curthread->rdlock_count++;
242	return (ret);
243}
244
245int
246_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
247{
248	struct pthread *curthread = _get_curthread();
249	pthread_rwlock_t prwlock;
250	int ret;
251
252	CHECK_AND_INIT_RWLOCK
253
254	ret = _thr_rwlock_trywrlock(&prwlock->lock);
255	if (ret == 0)
256		prwlock->owner = TID(curthread);
257	return (ret);
258}
259
260static int
261rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
262{
263	struct pthread *curthread = _get_curthread();
264	pthread_rwlock_t prwlock;
265	int ret;
266
267	CHECK_AND_INIT_RWLOCK
268
269	/*
270	 * POSIX said the validity of the abstimeout parameter need
271	 * not be checked if the lock can be immediately acquired.
272	 */
273	ret = _thr_rwlock_trywrlock(&prwlock->lock);
274	if (ret == 0) {
275		prwlock->owner = TID(curthread);
276		return (ret);
277	}
278
279	if (__predict_false(abstime &&
280	    (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
281		return (EINVAL);
282
283	for (;;) {
284		/* goto kernel and lock it */
285		ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
286		if (ret == 0) {
287			prwlock->owner = TID(curthread);
288			break;
289		}
290
291		if (ret != EINTR)
292			break;
293
294		/* if interrupted, try to lock it in userland again. */
295		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
296			ret = 0;
297			prwlock->owner = TID(curthread);
298			break;
299		}
300	}
301	return (ret);
302}
303
304int
305_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
306{
307	return (rwlock_wrlock_common (rwlock, NULL));
308}
309
310int
311_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
312    const struct timespec *abstime)
313{
314	return (rwlock_wrlock_common (rwlock, abstime));
315}
316
317int
318_pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
319{
320	struct pthread *curthread = _get_curthread();
321	pthread_rwlock_t prwlock;
322	int ret;
323	int32_t state;
324
325	if (*rwlock == THR_PSHARED_PTR) {
326		prwlock = __thr_pshared_offpage(rwlock, 0);
327		if (prwlock == NULL)
328			return (EINVAL);
329	} else {
330		prwlock = *rwlock;
331	}
332
333	if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
334		return (EINVAL);
335
336	state = prwlock->lock.rw_state;
337	if (state & URWLOCK_WRITE_OWNER) {
338		if (__predict_false(prwlock->owner != TID(curthread)))
339			return (EPERM);
340		prwlock->owner = 0;
341	}
342
343	ret = _thr_rwlock_unlock(&prwlock->lock);
344	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
345		curthread->rdlock_count--;
346
347	return (ret);
348}
349