1/*-
2 * Copyright (c) 1998 Alex Nash
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include "namespace.h"
30#include <errno.h>
31#include <limits.h>
32#include <stdlib.h>
33#include <pthread.h>
34#include "un-namespace.h"
35#include "thr_private.h"
36
37/* maximum number of times a read lock may be obtained */
38#define	MAX_READ_LOCKS		(INT_MAX - 1)
39
40__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
41__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
42__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
43__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
44__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
45__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
46__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
47__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
48__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
49
50/*
51 * Prototypes
52 */
53static int init_static(pthread_rwlock_t *rwlock);
54
55
56static int
57init_static(pthread_rwlock_t *rwlock)
58{
59	struct pthread *thread = _get_curthread();
60	int ret;
61
62	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
63
64	if (*rwlock == NULL)
65		ret = _pthread_rwlock_init(rwlock, NULL);
66	else
67		ret = 0;
68
69	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
70	return (ret);
71}
72
73int
74_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
75{
76	int ret;
77
78	if (rwlock == NULL)
79		ret = EINVAL;
80	else {
81		pthread_rwlock_t prwlock;
82
83		prwlock = *rwlock;
84
85		_pthread_mutex_destroy(&prwlock->lock);
86		_pthread_cond_destroy(&prwlock->read_signal);
87		_pthread_cond_destroy(&prwlock->write_signal);
88		free(prwlock);
89
90		*rwlock = NULL;
91
92		ret = 0;
93	}
94	return (ret);
95}
96
97int
98_pthread_rwlock_init (pthread_rwlock_t *rwlock,
99    const pthread_rwlockattr_t *attr __unused)
100{
101	pthread_rwlock_t prwlock;
102	int ret;
103
104	/* allocate rwlock object */
105	prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
106
107	if (prwlock == NULL)
108		return (ENOMEM);
109
110	/* initialize the lock */
111	if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
112		free(prwlock);
113	else {
114		/* initialize the read condition signal */
115		ret = _pthread_cond_init(&prwlock->read_signal, NULL);
116
117		if (ret != 0) {
118			_pthread_mutex_destroy(&prwlock->lock);
119			free(prwlock);
120		} else {
121			/* initialize the write condition signal */
122			ret = _pthread_cond_init(&prwlock->write_signal, NULL);
123
124			if (ret != 0) {
125				_pthread_cond_destroy(&prwlock->read_signal);
126				_pthread_mutex_destroy(&prwlock->lock);
127				free(prwlock);
128			} else {
129				/* success */
130				prwlock->state = 0;
131				prwlock->blocked_writers = 0;
132
133				*rwlock = prwlock;
134			}
135		}
136	}
137
138	return (ret);
139}
140
141static int
142rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
143{
144	pthread_rwlock_t prwlock;
145	struct pthread *curthread;
146	int ret;
147
148	if (rwlock == NULL)
149		return (EINVAL);
150
151	prwlock = *rwlock;
152
153	/* check for static initialization */
154	if (prwlock == NULL) {
155		if ((ret = init_static(rwlock)) != 0)
156			return (ret);
157
158		prwlock = *rwlock;
159	}
160
161	/* grab the monitor lock */
162	if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
163		return (ret);
164
165	/* check lock count */
166	if (prwlock->state == MAX_READ_LOCKS) {
167		_thr_mutex_unlock(&prwlock->lock);
168		return (EAGAIN);
169	}
170
171	curthread = _get_curthread();
172	if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
173		/*
174		 * To avoid having to track all the rdlocks held by
175		 * a thread or all of the threads that hold a rdlock,
176		 * we keep a simple count of all the rdlocks held by
177		 * a thread.  If a thread holds any rdlocks it is
178		 * possible that it is attempting to take a recursive
179		 * rdlock.  If there are blocked writers and precedence
180		 * is given to them, then that would result in the thread
181		 * deadlocking.  So allowing a thread to take the rdlock
182		 * when it already has one or more rdlocks avoids the
183		 * deadlock.  I hope the reader can follow that logic ;-)
184		 */
185		;	/* nothing needed */
186	} else {
187		/* give writers priority over readers */
188		while (prwlock->blocked_writers || prwlock->state < 0) {
189			if (abstime)
190				ret = _pthread_cond_timedwait
191				    (&prwlock->read_signal,
192				    &prwlock->lock, abstime);
193			else
194				ret = _thr_cond_wait(&prwlock->read_signal,
195			    &prwlock->lock);
196			if (ret != 0) {
197				/* can't do a whole lot if this fails */
198				_thr_mutex_unlock(&prwlock->lock);
199				return (ret);
200			}
201		}
202	}
203
204	curthread->rdlock_count++;
205	prwlock->state++; /* indicate we are locked for reading */
206
207	/*
208	 * Something is really wrong if this call fails.  Returning
209	 * error won't do because we've already obtained the read
210	 * lock.  Decrementing 'state' is no good because we probably
211	 * don't have the monitor lock.
212	 */
213	_thr_mutex_unlock(&prwlock->lock);
214
215	return (ret);
216}
217
218int
219_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
220{
221	return (rwlock_rdlock_common(rwlock, NULL));
222}
223
224__strong_reference(_pthread_rwlock_rdlock, _thr_rwlock_rdlock);
225
226int
227_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
228	 const struct timespec *abstime)
229{
230	return (rwlock_rdlock_common(rwlock, abstime));
231}
232
233int
234_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
235{
236	struct pthread *curthread;
237	pthread_rwlock_t prwlock;
238	int ret;
239
240	if (rwlock == NULL)
241		return (EINVAL);
242
243	prwlock = *rwlock;
244
245	/* check for static initialization */
246	if (prwlock == NULL) {
247		if ((ret = init_static(rwlock)) != 0)
248			return (ret);
249
250		prwlock = *rwlock;
251	}
252
253	/* grab the monitor lock */
254	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
255		return (ret);
256
257	curthread = _get_curthread();
258	if (prwlock->state == MAX_READ_LOCKS)
259		ret = EAGAIN;
260	else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
261		/* see comment for pthread_rwlock_rdlock() */
262		curthread->rdlock_count++;
263		prwlock->state++;
264	}
265	/* give writers priority over readers */
266	else if (prwlock->blocked_writers || prwlock->state < 0)
267		ret = EBUSY;
268	else {
269		curthread->rdlock_count++;
270		prwlock->state++; /* indicate we are locked for reading */
271	}
272
273	/* see the comment on this in pthread_rwlock_rdlock */
274	_pthread_mutex_unlock(&prwlock->lock);
275
276	return (ret);
277}
278
279int
280_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
281{
282	pthread_rwlock_t prwlock;
283	int ret;
284
285	if (rwlock == NULL)
286		return (EINVAL);
287
288	prwlock = *rwlock;
289
290	/* check for static initialization */
291	if (prwlock == NULL) {
292		if ((ret = init_static(rwlock)) != 0)
293			return (ret);
294
295		prwlock = *rwlock;
296	}
297
298	/* grab the monitor lock */
299	if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
300		return (ret);
301
302	if (prwlock->state != 0)
303		ret = EBUSY;
304	else
305		/* indicate we are locked for writing */
306		prwlock->state = -1;
307
308	/* see the comment on this in pthread_rwlock_rdlock */
309	_pthread_mutex_unlock(&prwlock->lock);
310
311	return (ret);
312}
313
314int
315_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
316{
317	struct pthread *curthread;
318	pthread_rwlock_t prwlock;
319	int ret;
320
321	if (rwlock == NULL)
322		return (EINVAL);
323
324	prwlock = *rwlock;
325
326	if (prwlock == NULL)
327		return (EINVAL);
328
329	/* grab the monitor lock */
330	if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
331		return (ret);
332
333	curthread = _get_curthread();
334	if (prwlock->state > 0) {
335		curthread->rdlock_count--;
336		prwlock->state--;
337		if (prwlock->state == 0 && prwlock->blocked_writers)
338			ret = _thr_cond_signal(&prwlock->write_signal);
339	} else if (prwlock->state < 0) {
340		prwlock->state = 0;
341
342		if (prwlock->blocked_writers)
343			ret = _thr_cond_signal(&prwlock->write_signal);
344		else
345			ret = _thr_cond_broadcast(&prwlock->read_signal);
346	} else
347		ret = EINVAL;
348
349	/* see the comment on this in pthread_rwlock_rdlock */
350	_thr_mutex_unlock(&prwlock->lock);
351
352	return (ret);
353}
354
355__strong_reference(_pthread_rwlock_unlock, _thr_rwlock_unlock);
356
357static int
358rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
359{
360	pthread_rwlock_t prwlock;
361	int ret;
362
363	if (rwlock == NULL)
364		return (EINVAL);
365
366	prwlock = *rwlock;
367
368	/* check for static initialization */
369	if (prwlock == NULL) {
370		if ((ret = init_static(rwlock)) != 0)
371			return (ret);
372
373		prwlock = *rwlock;
374	}
375
376	/* grab the monitor lock */
377	if ((ret = _thr_mutex_lock(&prwlock->lock)) != 0)
378		return (ret);
379
380	while (prwlock->state != 0) {
381		prwlock->blocked_writers++;
382
383		if (abstime != NULL)
384			ret = _pthread_cond_timedwait(&prwlock->write_signal,
385			    &prwlock->lock, abstime);
386		else
387			ret = _thr_cond_wait(&prwlock->write_signal,
388			    &prwlock->lock);
389		if (ret != 0) {
390			prwlock->blocked_writers--;
391			_thr_mutex_unlock(&prwlock->lock);
392			return (ret);
393		}
394
395		prwlock->blocked_writers--;
396	}
397
398	/* indicate we are locked for writing */
399	prwlock->state = -1;
400
401	/* see the comment on this in pthread_rwlock_rdlock */
402	_thr_mutex_unlock(&prwlock->lock);
403
404	return (ret);
405}
406
407int
408_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
409{
410	return (rwlock_wrlock_common (rwlock, NULL));
411}
412__strong_reference(_pthread_rwlock_wrlock, _thr_rwlock_wrlock);
413
414int
415_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
416    const struct timespec *abstime)
417{
418	return (rwlock_wrlock_common (rwlock, abstime));
419}
420