thr_rwlock.c revision 297701
1/*-
2 * Copyright (c) 1998 Alex Nash
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libthr/thread/thr_rwlock.c 297701 2016-04-08 10:21:43Z kib $
27 */
28
29#include <errno.h>
30#include <limits.h>
31#include <stdlib.h>
32
33#include "namespace.h"
34#include <pthread.h>
35#include "un-namespace.h"
36#include "thr_private.h"
37
38_Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE,
39    "pthread_rwlock is too large for off-page");
40
41__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
42__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
43__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
44__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
45__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
46__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
47__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
48__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
49__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
50
51#define CHECK_AND_INIT_RWLOCK							\
52	if (*rwlock == THR_PSHARED_PTR) {					\
53		prwlock = __thr_pshared_offpage(rwlock, 0);			\
54		if (prwlock == NULL)						\
55			return (EINVAL);					\
56	} else if (__predict_false((prwlock = (*rwlock)) <=			\
57	    THR_RWLOCK_DESTROYED)) {						\
58		if (prwlock == THR_RWLOCK_INITIALIZER) {			\
59			int ret;						\
60			ret = init_static(_get_curthread(), rwlock);		\
61			if (ret)						\
62				return (ret);					\
63		} else if (prwlock == THR_RWLOCK_DESTROYED) {			\
64			return (EINVAL);					\
65		}								\
66		prwlock = *rwlock;						\
67	}
68
69/*
70 * Prototypes
71 */
72
73static int
74rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
75{
76	pthread_rwlock_t prwlock;
77
78	if (attr == NULL || *attr == NULL ||
79	    (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
80		prwlock = calloc(1, sizeof(struct pthread_rwlock));
81		if (prwlock == NULL)
82			return (ENOMEM);
83		*rwlock = prwlock;
84	} else {
85		prwlock = __thr_pshared_offpage(rwlock, 1);
86		if (prwlock == NULL)
87			return (EFAULT);
88		prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
89		*rwlock = THR_PSHARED_PTR;
90	}
91	return (0);
92}
93
94int
95_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
96{
97	pthread_rwlock_t prwlock;
98	int ret;
99
100	prwlock = *rwlock;
101	if (prwlock == THR_RWLOCK_INITIALIZER)
102		ret = 0;
103	else if (prwlock == THR_RWLOCK_DESTROYED)
104		ret = EINVAL;
105	else if (prwlock == THR_PSHARED_PTR) {
106		*rwlock = THR_RWLOCK_DESTROYED;
107		__thr_pshared_destroy(rwlock);
108		ret = 0;
109	} else {
110		*rwlock = THR_RWLOCK_DESTROYED;
111		free(prwlock);
112		ret = 0;
113	}
114	return (ret);
115}
116
117static int
118init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
119{
120	int ret;
121
122	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
123
124	if (*rwlock == THR_RWLOCK_INITIALIZER)
125		ret = rwlock_init(rwlock, NULL);
126	else
127		ret = 0;
128
129	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
130
131	return (ret);
132}
133
134int
135_pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
136{
137
138	*rwlock = NULL;
139	return (rwlock_init(rwlock, attr));
140}
141
142static int
143rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
144{
145	struct pthread *curthread = _get_curthread();
146	pthread_rwlock_t prwlock;
147	int flags;
148	int ret;
149
150	CHECK_AND_INIT_RWLOCK
151
152	if (curthread->rdlock_count) {
153		/*
154		 * To avoid having to track all the rdlocks held by
155		 * a thread or all of the threads that hold a rdlock,
156		 * we keep a simple count of all the rdlocks held by
157		 * a thread.  If a thread holds any rdlocks it is
158		 * possible that it is attempting to take a recursive
159		 * rdlock.  If there are blocked writers and precedence
160		 * is given to them, then that would result in the thread
161		 * deadlocking.  So allowing a thread to take the rdlock
162		 * when it already has one or more rdlocks avoids the
163		 * deadlock.  I hope the reader can follow that logic ;-)
164		 */
165		flags = URWLOCK_PREFER_READER;
166	} else {
167		flags = 0;
168	}
169
170	/*
171	 * POSIX said the validity of the abstimeout parameter need
172	 * not be checked if the lock can be immediately acquired.
173	 */
174	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
175	if (ret == 0) {
176		curthread->rdlock_count++;
177		return (ret);
178	}
179
180	if (__predict_false(abstime &&
181		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
182		return (EINVAL);
183
184	for (;;) {
185		/* goto kernel and lock it */
186		ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
187		if (ret != EINTR)
188			break;
189
190		/* if interrupted, try to lock it in userland again. */
191		if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
192			ret = 0;
193			break;
194		}
195	}
196	if (ret == 0)
197		curthread->rdlock_count++;
198	return (ret);
199}
200
201int
202_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
203{
204	return (rwlock_rdlock_common(rwlock, NULL));
205}
206
207int
208_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
209	 const struct timespec *abstime)
210{
211	return (rwlock_rdlock_common(rwlock, abstime));
212}
213
214int
215_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
216{
217	struct pthread *curthread = _get_curthread();
218	pthread_rwlock_t prwlock;
219	int flags;
220	int ret;
221
222	CHECK_AND_INIT_RWLOCK
223
224	if (curthread->rdlock_count) {
225		/*
226		 * To avoid having to track all the rdlocks held by
227		 * a thread or all of the threads that hold a rdlock,
228		 * we keep a simple count of all the rdlocks held by
229		 * a thread.  If a thread holds any rdlocks it is
230		 * possible that it is attempting to take a recursive
231		 * rdlock.  If there are blocked writers and precedence
232		 * is given to them, then that would result in the thread
233		 * deadlocking.  So allowing a thread to take the rdlock
234		 * when it already has one or more rdlocks avoids the
235		 * deadlock.  I hope the reader can follow that logic ;-)
236		 */
237		flags = URWLOCK_PREFER_READER;
238	} else {
239		flags = 0;
240	}
241
242	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
243	if (ret == 0)
244		curthread->rdlock_count++;
245	return (ret);
246}
247
248int
249_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
250{
251	struct pthread *curthread = _get_curthread();
252	pthread_rwlock_t prwlock;
253	int ret;
254
255	CHECK_AND_INIT_RWLOCK
256
257	ret = _thr_rwlock_trywrlock(&prwlock->lock);
258	if (ret == 0)
259		prwlock->owner = TID(curthread);
260	return (ret);
261}
262
263static int
264rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
265{
266	struct pthread *curthread = _get_curthread();
267	pthread_rwlock_t prwlock;
268	int ret;
269
270	CHECK_AND_INIT_RWLOCK
271
272	/*
273	 * POSIX said the validity of the abstimeout parameter need
274	 * not be checked if the lock can be immediately acquired.
275	 */
276	ret = _thr_rwlock_trywrlock(&prwlock->lock);
277	if (ret == 0) {
278		prwlock->owner = TID(curthread);
279		return (ret);
280	}
281
282	if (__predict_false(abstime &&
283	    (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
284		return (EINVAL);
285
286	for (;;) {
287		/* goto kernel and lock it */
288		ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
289		if (ret == 0) {
290			prwlock->owner = TID(curthread);
291			break;
292		}
293
294		if (ret != EINTR)
295			break;
296
297		/* if interrupted, try to lock it in userland again. */
298		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
299			ret = 0;
300			prwlock->owner = TID(curthread);
301			break;
302		}
303	}
304	return (ret);
305}
306
307int
308_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
309{
310	return (rwlock_wrlock_common (rwlock, NULL));
311}
312
313int
314_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
315    const struct timespec *abstime)
316{
317	return (rwlock_wrlock_common (rwlock, abstime));
318}
319
320int
321_pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
322{
323	struct pthread *curthread = _get_curthread();
324	pthread_rwlock_t prwlock;
325	int ret;
326	int32_t state;
327
328	if (*rwlock == THR_PSHARED_PTR) {
329		prwlock = __thr_pshared_offpage(rwlock, 0);
330		if (prwlock == NULL)
331			return (EINVAL);
332	} else {
333		prwlock = *rwlock;
334	}
335
336	if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
337		return (EINVAL);
338
339	state = prwlock->lock.rw_state;
340	if (state & URWLOCK_WRITE_OWNER) {
341		if (__predict_false(prwlock->owner != TID(curthread)))
342			return (EPERM);
343		prwlock->owner = 0;
344	}
345
346	ret = _thr_rwlock_unlock(&prwlock->lock);
347	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
348		curthread->rdlock_count--;
349
350	return (ret);
351}
352