1/*-
2 * Copyright (c) 1998 Alex Nash
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/lib/libthr/thread/thr_rwlock.c 338707 2018-09-17 02:51:08Z pfg $");
29
30#include <errno.h>
31#include <limits.h>
32#include <stdlib.h>
33
34#include "namespace.h"
35#include <pthread.h>
36#include "un-namespace.h"
37#include "thr_private.h"
38
39_Static_assert(sizeof(struct pthread_rwlock) <= PAGE_SIZE,
40    "pthread_rwlock is too large for off-page");
41
42__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
43__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
44__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
45__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
46__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
47__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
48__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
49__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
50__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
51
52static int init_static(struct pthread *thread, pthread_rwlock_t *rwlock);
53static int init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out);
54
55static int __always_inline
56check_and_init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
57{
58	if (__predict_false(*rwlock == THR_PSHARED_PTR ||
59	    *rwlock <= THR_RWLOCK_DESTROYED))
60		return (init_rwlock(rwlock, rwlock_out));
61	*rwlock_out = *rwlock;
62	return (0);
63}
64
65static int __noinline
66init_rwlock(pthread_rwlock_t *rwlock, pthread_rwlock_t *rwlock_out)
67{
68	pthread_rwlock_t prwlock;
69	int ret;
70
71	if (*rwlock == THR_PSHARED_PTR) {
72		prwlock = __thr_pshared_offpage(rwlock, 0);
73		if (prwlock == NULL)
74			return (EINVAL);
75	} else if ((prwlock = *rwlock) <= THR_RWLOCK_DESTROYED) {
76		if (prwlock == THR_RWLOCK_INITIALIZER) {
77			ret = init_static(_get_curthread(), rwlock);
78			if (ret != 0)
79				return (ret);
80		} else if (prwlock == THR_RWLOCK_DESTROYED) {
81			return (EINVAL);
82		}
83		prwlock = *rwlock;
84	}
85	*rwlock_out = prwlock;
86	return (0);
87}
88
89static int
90rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
91{
92	pthread_rwlock_t prwlock;
93
94	if (attr == NULL || *attr == NULL ||
95	    (*attr)->pshared == PTHREAD_PROCESS_PRIVATE) {
96		prwlock = calloc(1, sizeof(struct pthread_rwlock));
97		if (prwlock == NULL)
98			return (ENOMEM);
99		*rwlock = prwlock;
100	} else {
101		prwlock = __thr_pshared_offpage(rwlock, 1);
102		if (prwlock == NULL)
103			return (EFAULT);
104		prwlock->lock.rw_flags |= USYNC_PROCESS_SHARED;
105		*rwlock = THR_PSHARED_PTR;
106	}
107	return (0);
108}
109
110int
111_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
112{
113	pthread_rwlock_t prwlock;
114	int ret;
115
116	prwlock = *rwlock;
117	if (prwlock == THR_RWLOCK_INITIALIZER)
118		ret = 0;
119	else if (prwlock == THR_RWLOCK_DESTROYED)
120		ret = EINVAL;
121	else if (prwlock == THR_PSHARED_PTR) {
122		*rwlock = THR_RWLOCK_DESTROYED;
123		__thr_pshared_destroy(rwlock);
124		ret = 0;
125	} else {
126		*rwlock = THR_RWLOCK_DESTROYED;
127		free(prwlock);
128		ret = 0;
129	}
130	return (ret);
131}
132
133static int
134init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
135{
136	int ret;
137
138	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
139
140	if (*rwlock == THR_RWLOCK_INITIALIZER)
141		ret = rwlock_init(rwlock, NULL);
142	else
143		ret = 0;
144
145	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
146
147	return (ret);
148}
149
150int
151_pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
152{
153
154	*rwlock = NULL;
155	return (rwlock_init(rwlock, attr));
156}
157
158static int
159rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
160{
161	struct pthread *curthread = _get_curthread();
162	pthread_rwlock_t prwlock;
163	int flags;
164	int ret;
165
166	ret = check_and_init_rwlock(rwlock, &prwlock);
167	if (ret != 0)
168		return (ret);
169
170	if (curthread->rdlock_count) {
171		/*
172		 * To avoid having to track all the rdlocks held by
173		 * a thread or all of the threads that hold a rdlock,
174		 * we keep a simple count of all the rdlocks held by
175		 * a thread.  If a thread holds any rdlocks it is
176		 * possible that it is attempting to take a recursive
177		 * rdlock.  If there are blocked writers and precedence
178		 * is given to them, then that would result in the thread
179		 * deadlocking.  So allowing a thread to take the rdlock
180		 * when it already has one or more rdlocks avoids the
181		 * deadlock.  I hope the reader can follow that logic ;-)
182		 */
183		flags = URWLOCK_PREFER_READER;
184	} else {
185		flags = 0;
186	}
187
188	/*
189	 * POSIX said the validity of the abstimeout parameter need
190	 * not be checked if the lock can be immediately acquired.
191	 */
192	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
193	if (ret == 0) {
194		curthread->rdlock_count++;
195		return (ret);
196	}
197
198	if (__predict_false(abstime &&
199		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
200		return (EINVAL);
201
202	for (;;) {
203		/* goto kernel and lock it */
204		ret = __thr_rwlock_rdlock(&prwlock->lock, flags, abstime);
205		if (ret != EINTR)
206			break;
207
208		/* if interrupted, try to lock it in userland again. */
209		if (_thr_rwlock_tryrdlock(&prwlock->lock, flags) == 0) {
210			ret = 0;
211			break;
212		}
213	}
214	if (ret == 0)
215		curthread->rdlock_count++;
216	return (ret);
217}
218
219int
220_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
221{
222	return (rwlock_rdlock_common(rwlock, NULL));
223}
224
225int
226_pthread_rwlock_timedrdlock(pthread_rwlock_t * __restrict rwlock,
227    const struct timespec * __restrict abstime)
228{
229	return (rwlock_rdlock_common(rwlock, abstime));
230}
231
232int
233_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
234{
235	struct pthread *curthread = _get_curthread();
236	pthread_rwlock_t prwlock;
237	int flags;
238	int ret;
239
240	ret = check_and_init_rwlock(rwlock, &prwlock);
241	if (ret != 0)
242		return (ret);
243
244	if (curthread->rdlock_count) {
245		/*
246		 * To avoid having to track all the rdlocks held by
247		 * a thread or all of the threads that hold a rdlock,
248		 * we keep a simple count of all the rdlocks held by
249		 * a thread.  If a thread holds any rdlocks it is
250		 * possible that it is attempting to take a recursive
251		 * rdlock.  If there are blocked writers and precedence
252		 * is given to them, then that would result in the thread
253		 * deadlocking.  So allowing a thread to take the rdlock
254		 * when it already has one or more rdlocks avoids the
255		 * deadlock.  I hope the reader can follow that logic ;-)
256		 */
257		flags = URWLOCK_PREFER_READER;
258	} else {
259		flags = 0;
260	}
261
262	ret = _thr_rwlock_tryrdlock(&prwlock->lock, flags);
263	if (ret == 0)
264		curthread->rdlock_count++;
265	return (ret);
266}
267
268int
269_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
270{
271	struct pthread *curthread = _get_curthread();
272	pthread_rwlock_t prwlock;
273	int ret;
274
275	ret = check_and_init_rwlock(rwlock, &prwlock);
276	if (ret != 0)
277		return (ret);
278
279	ret = _thr_rwlock_trywrlock(&prwlock->lock);
280	if (ret == 0)
281		prwlock->owner = TID(curthread);
282	return (ret);
283}
284
285static int
286rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
287{
288	struct pthread *curthread = _get_curthread();
289	pthread_rwlock_t prwlock;
290	int ret;
291
292	ret = check_and_init_rwlock(rwlock, &prwlock);
293	if (ret != 0)
294		return (ret);
295
296	/*
297	 * POSIX said the validity of the abstimeout parameter need
298	 * not be checked if the lock can be immediately acquired.
299	 */
300	ret = _thr_rwlock_trywrlock(&prwlock->lock);
301	if (ret == 0) {
302		prwlock->owner = TID(curthread);
303		return (ret);
304	}
305
306	if (__predict_false(abstime &&
307	    (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
308		return (EINVAL);
309
310	for (;;) {
311		/* goto kernel and lock it */
312		ret = __thr_rwlock_wrlock(&prwlock->lock, abstime);
313		if (ret == 0) {
314			prwlock->owner = TID(curthread);
315			break;
316		}
317
318		if (ret != EINTR)
319			break;
320
321		/* if interrupted, try to lock it in userland again. */
322		if (_thr_rwlock_trywrlock(&prwlock->lock) == 0) {
323			ret = 0;
324			prwlock->owner = TID(curthread);
325			break;
326		}
327	}
328	return (ret);
329}
330
331int
332_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
333{
334	return (rwlock_wrlock_common (rwlock, NULL));
335}
336
337int
338_pthread_rwlock_timedwrlock(pthread_rwlock_t * __restrict rwlock,
339    const struct timespec * __restrict abstime)
340{
341	return (rwlock_wrlock_common (rwlock, abstime));
342}
343
344int
345_pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
346{
347	struct pthread *curthread = _get_curthread();
348	pthread_rwlock_t prwlock;
349	int ret;
350	int32_t state;
351
352	if (*rwlock == THR_PSHARED_PTR) {
353		prwlock = __thr_pshared_offpage(rwlock, 0);
354		if (prwlock == NULL)
355			return (EINVAL);
356	} else {
357		prwlock = *rwlock;
358	}
359
360	if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED))
361		return (EINVAL);
362
363	state = prwlock->lock.rw_state;
364	if (state & URWLOCK_WRITE_OWNER) {
365		if (__predict_false(prwlock->owner != TID(curthread)))
366			return (EPERM);
367		prwlock->owner = 0;
368	}
369
370	ret = _thr_rwlock_unlock(&prwlock->lock);
371	if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0)
372		curthread->rdlock_count--;
373
374	return (ret);
375}
376