thr_rwlock.c revision 177850
1/*-
2 * Copyright (c) 1998 Alex Nash
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libthr/thread/thr_rwlock.c 177850 2008-04-02 04:32:31Z davidxu $
27 */
28
29#include <errno.h>
30#include <limits.h>
31#include <stdlib.h>
32
33#include "namespace.h"
34#include <pthread.h>
35#include <pthread_np.h>
36#include "un-namespace.h"
37#include "thr_private.h"
38
39__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
40__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
41__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
42__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
43__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
44__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
45__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
46__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
47__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
48
49/*
50 * Prototypes
51 */
52
53static int
54rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
55{
56	pthread_rwlock_t prwlock;
57
58	prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock));
59	if (prwlock == NULL)
60		return (ENOMEM);
61	*rwlock = prwlock;
62	return (0);
63}
64
65int
66_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
67{
68	int ret;
69
70	if (rwlock == NULL)
71		ret = EINVAL;
72	else {
73		pthread_rwlock_t prwlock;
74
75		prwlock = *rwlock;
76		*rwlock = NULL;
77
78		free(prwlock);
79		ret = 0;
80	}
81	return (ret);
82}
83
84static int
85init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
86{
87	int ret;
88
89	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
90
91	if (*rwlock == NULL)
92		ret = rwlock_init(rwlock, NULL);
93	else
94		ret = 0;
95
96	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
97
98	return (ret);
99}
100
101int
102_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
103{
104	*rwlock = NULL;
105	return (rwlock_init(rwlock, attr));
106}
107
108static int
109rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
110{
111	struct pthread *curthread = _get_curthread();
112	pthread_rwlock_t prwlock;
113	struct timespec ts, ts2, *tsp;
114	int flags;
115	int ret;
116
117	if (__predict_false(rwlock == NULL))
118		return (EINVAL);
119
120	prwlock = *rwlock;
121
122	/* check for static initialization */
123	if (__predict_false(prwlock == NULL)) {
124		if ((ret = init_static(curthread, rwlock)) != 0)
125			return (ret);
126
127		prwlock = *rwlock;
128	}
129
130	if (curthread->rdlock_count) {
131		/*
132		 * To avoid having to track all the rdlocks held by
133		 * a thread or all of the threads that hold a rdlock,
134		 * we keep a simple count of all the rdlocks held by
135		 * a thread.  If a thread holds any rdlocks it is
136		 * possible that it is attempting to take a recursive
137		 * rdlock.  If there are blocked writers and precedence
138		 * is given to them, then that would result in the thread
139		 * deadlocking.  So allowing a thread to take the rdlock
140		 * when it already has one or more rdlocks avoids the
141		 * deadlock.  I hope the reader can follow that logic ;-)
142		 */
143		flags = URWLOCK_PREFER_READER;
144	} else {
145		flags = 0;
146	}
147
148	/*
149	 * POSIX said the validity of the abstimeout parameter need
150	 * not be checked if the lock can be immediately acquired.
151	 */
152	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
153	if (ret == 0) {
154		curthread->rdlock_count++;
155		return (ret);
156	}
157
158	if (__predict_false(abstime &&
159		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
160		return (EINVAL);
161
162	for (;;) {
163		if (abstime) {
164			clock_gettime(CLOCK_REALTIME, &ts);
165			TIMESPEC_SUB(&ts2, abstime, &ts);
166			if (ts2.tv_sec < 0 ||
167			    (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
168				return (ETIMEDOUT);
169			tsp = &ts2;
170		} else
171			tsp = NULL;
172
173		/* goto kernel and lock it */
174		ret = __thr_rwlock_rdlock(&prwlock->lock, flags, tsp);
175		if (ret != EINTR)
176			break;
177
178		/* if interrupted, try to lock it in userland again. */
179		if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
180			ret = 0;
181			curthread->rdlock_count++;
182			break;
183		}
184	}
185	return (ret);
186}
187
188int
189_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
190{
191	return (rwlock_rdlock_common(rwlock, NULL));
192}
193
194int
195_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
196	 const struct timespec *abstime)
197{
198	return (rwlock_rdlock_common(rwlock, abstime));
199}
200
201int
202_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
203{
204	struct pthread *curthread = _get_curthread();
205	pthread_rwlock_t prwlock;
206	int flags;
207	int ret;
208
209	if (__predict_false(rwlock == NULL))
210		return (EINVAL);
211
212	prwlock = *rwlock;
213
214	/* check for static initialization */
215	if (__predict_false(prwlock == NULL)) {
216		if ((ret = init_static(curthread, rwlock)) != 0)
217			return (ret);
218
219		prwlock = *rwlock;
220	}
221
222	if (curthread->rdlock_count) {
223		/*
224		 * To avoid having to track all the rdlocks held by
225		 * a thread or all of the threads that hold a rdlock,
226		 * we keep a simple count of all the rdlocks held by
227		 * a thread.  If a thread holds any rdlocks it is
228		 * possible that it is attempting to take a recursive
229		 * rdlock.  If there are blocked writers and precedence
230		 * is given to them, then that would result in the thread
231		 * deadlocking.  So allowing a thread to take the rdlock
232		 * when it already has one or more rdlocks avoids the
233		 * deadlock.  I hope the reader can follow that logic ;-)
234		 */
235		flags = URWLOCK_PREFER_READER;
236	} else {
237		flags = 0;
238	}
239
240	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
241	if (ret == 0)
242		curthread->rdlock_count++;
243	return (ret);
244}
245
246int
247_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
248{
249	struct pthread *curthread = _get_curthread();
250	pthread_rwlock_t prwlock;
251	int ret;
252
253	if (__predict_false(rwlock == NULL))
254		return (EINVAL);
255
256	prwlock = *rwlock;
257
258	/* check for static initialization */
259	if (__predict_false(prwlock == NULL)) {
260		if ((ret = init_static(curthread, rwlock)) != 0)
261			return (ret);
262
263		prwlock = *rwlock;
264	}
265
266	ret = _thr_rwlock_trywrlock(&prwlock->lock);
267	if (ret == 0)
268		prwlock->owner = curthread;
269	return (ret);
270}
271
272static int
273rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
274{
275	struct pthread *curthread = _get_curthread();
276	pthread_rwlock_t prwlock;
277	struct timespec ts, ts2, *tsp;
278	int ret;
279
280	if (__predict_false(rwlock == NULL))
281		return (EINVAL);
282
283	prwlock = *rwlock;
284
285	/* check for static initialization */
286	if (__predict_false(prwlock == NULL)) {
287		if ((ret = init_static(curthread, rwlock)) != 0)
288			return (ret);
289
290		prwlock = *rwlock;
291	}
292
293	/*
294	 * POSIX said the validity of the abstimeout parameter need
295	 * not be checked if the lock can be immediately acquired.
296	 */
297	ret = _thr_rwlock_trywrlock(&prwlock->lock);
298	if (ret == 0) {
299		prwlock->owner = curthread;
300		return (ret);
301	}
302
303	if (__predict_false(abstime &&
304		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
305		return (EINVAL);
306
307	for (;;) {
308		if (abstime != NULL) {
309			clock_gettime(CLOCK_REALTIME, &ts);
310			TIMESPEC_SUB(&ts2, abstime, &ts);
311			if (ts2.tv_sec < 0 ||
312			    (ts2.tv_sec == 0 && ts2.tv_nsec <= 0))
313				return (ETIMEDOUT);
314			tsp = &ts2;
315		} else
316			tsp = NULL;
317
318		/* goto kernel and lock it */
319		ret = __thr_rwlock_wrlock(&prwlock->lock, tsp);
320		if (ret == 0) {
321			prwlock->owner = curthread;
322			break;
323		}
324
325		if (ret != EINTR)
326			break;
327
328		/* if interrupted, try to lock it in userland again. */
329		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
330			ret = 0;
331			prwlock->owner = curthread;
332			break;
333		}
334	}
335	return (ret);
336}
337
338int
339_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
340{
341	return (rwlock_wrlock_common (rwlock, NULL));
342}
343
344int
345_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
346    const struct timespec *abstime)
347{
348	return (rwlock_wrlock_common (rwlock, abstime));
349}
350
351int
352_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
353{
354	struct pthread *curthread = _get_curthread();
355	pthread_rwlock_t prwlock;
356	int ret;
357	int32_t state;
358
359	if (__predict_false(rwlock == NULL))
360		return (EINVAL);
361
362	prwlock = *rwlock;
363
364	if (__predict_false(prwlock == NULL))
365		return (EINVAL);
366
367	state = prwlock->lock.rw_state;
368	if (state & URWLOCK_WRITE_OWNER) {
369		if (__predict_false(prwlock->owner != curthread))
370			return (EPERM);
371		prwlock->owner = NULL;
372	}
373
374	ret = _thr_rwlock_unlock(&prwlock->lock);
375	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
376		curthread->rdlock_count--;
377
378	return (ret);
379}
380