thr_rwlock.c revision 132890
1/*-
2 * Copyright (c) 1998 Alex Nash
3 * Copyright (c) 2004 Michael Telahun Makonnen
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/lib/libthr/thread/thr_rwlock.c 132890 2004-07-30 17:13:00Z mtm $
28 */
29
30#include <errno.h>
31#include <limits.h>
32#include <stdlib.h>
33
34#include <pthread.h>
35#include "thr_private.h"
36
37/* maximum number of times a read lock may be obtained */
38#define	MAX_READ_LOCKS		(INT_MAX - 1)
39
40/*
41 * For distinguishing operations on read and write locks.
42 */
43enum rwlock_type {RWT_READ, RWT_WRITE};
44
45/* Support for staticaly initialized mutexes. */
46static struct umtx init_lock = UMTX_INITIALIZER;
47
48__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
49__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
50__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
51__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
52__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
53__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
54__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
55__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
56__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
57
58static int	insert_rwlock(struct pthread_rwlock *, enum rwlock_type);
59static int	rwlock_init_static(struct pthread_rwlock **rwlock);
60static int	rwlock_rdlock_common(pthread_rwlock_t *, int,
61		    const struct timespec *);
62static int	rwlock_wrlock_common(pthread_rwlock_t *, int,
63		    const struct timespec *);
64
65int
66_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
67{
68	pthread_rwlock_t prwlock;
69
70	if (rwlock == NULL || *rwlock == NULL)
71		return (EINVAL);
72
73	prwlock = *rwlock;
74
75	if (prwlock->state != 0)
76		return (EBUSY);
77
78	pthread_mutex_destroy(&prwlock->lock);
79	pthread_cond_destroy(&prwlock->read_signal);
80	pthread_cond_destroy(&prwlock->write_signal);
81	free(prwlock);
82
83	*rwlock = NULL;
84
85	return (0);
86}
87
88int
89_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
90{
91	pthread_rwlock_t	prwlock;
92	int			ret;
93
94	/* allocate rwlock object */
95	prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
96
97	if (prwlock == NULL) {
98		ret = ENOMEM;
99		goto out;
100	}
101
102	/* initialize the lock */
103	if ((ret = pthread_mutex_init(&prwlock->lock, NULL)) != 0)
104		goto out;
105
106	/* initialize the read condition signal */
107	if ((ret = pthread_cond_init(&prwlock->read_signal, NULL)) != 0)
108		goto out_readcond;
109
110	/* initialize the write condition signal */
111	if ((ret = pthread_cond_init(&prwlock->write_signal, NULL)) != 0)
112		goto out_writecond;
113
114	/* success */
115	prwlock->state		 = 0;
116	prwlock->blocked_writers = 0;
117
118	*rwlock = prwlock;
119	return (0);
120
121out_writecond:
122	pthread_cond_destroy(&prwlock->read_signal);
123out_readcond:
124	pthread_mutex_destroy(&prwlock->lock);
125out:
126	if (prwlock != NULL)
127		free(prwlock);
128	return(ret);
129}
130
131/*
132 * If nonblocking is 0 this function will wait on the lock. If
133 * it is greater than 0 it will return immediately with EBUSY.
134 */
135static int
136rwlock_rdlock_common(pthread_rwlock_t *rwlock, int nonblocking,
137    const struct timespec *timeout)
138{
139	struct rwlock_held	*rh;
140	pthread_rwlock_t 	prwlock;
141	int			ret;
142
143	rh = NULL;
144	if (rwlock == NULL)
145		return(EINVAL);
146
147	/*
148	 * Check for validity of the timeout parameter.
149	 */
150	if (timeout != NULL &&
151	    (timeout->tv_nsec < 0 || timeout->tv_nsec >= 1000000000))
152		return (EINVAL);
153
154	if ((ret = rwlock_init_static(rwlock)) !=0 )
155		return (ret);
156	prwlock = *rwlock;
157
158	/* grab the monitor lock */
159	if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
160		return(ret);
161
162	/* check lock count */
163	if (prwlock->state == MAX_READ_LOCKS) {
164		pthread_mutex_unlock(&prwlock->lock);
165		return (EAGAIN);
166	}
167
168	/* give writers priority over readers */
169	while (prwlock->blocked_writers || prwlock->state < 0) {
170		if (nonblocking) {
171			pthread_mutex_unlock(&prwlock->lock);
172			return (EBUSY);
173		}
174
175		/*
176		 * If this lock is already held for writing we have
177		 * a deadlock situation.
178		 */
179		if (curthread->rwlockList != NULL && prwlock->state < 0) {
180			LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
181				if (rh->rh_rwlock == prwlock &&
182				    rh->rh_wrcount > 0) {
183					pthread_mutex_unlock(&prwlock->lock);
184					return (EDEADLK);
185				}
186			}
187		}
188		if (timeout == NULL)
189			ret = pthread_cond_wait(&prwlock->read_signal,
190			    &prwlock->lock);
191		else
192			ret = pthread_cond_timedwait(&prwlock->read_signal,
193			    &prwlock->lock, timeout);
194
195		if (ret != 0 && ret != EINTR) {
196			/* can't do a whole lot if this fails */
197			pthread_mutex_unlock(&prwlock->lock);
198			return(ret);
199		}
200	}
201
202	++prwlock->state; /* indicate we are locked for reading */
203	ret = insert_rwlock(prwlock, RWT_READ);
204	if (ret != 0) {
205		pthread_mutex_unlock(&prwlock->lock);
206		return (ret);
207	}
208
209	/*
210	 * Something is really wrong if this call fails.  Returning
211	 * error won't do because we've already obtained the read
212	 * lock.  Decrementing 'state' is no good because we probably
213	 * don't have the monitor lock.
214	 */
215	pthread_mutex_unlock(&prwlock->lock);
216
217	return(0);
218}
219
220int
221_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
222{
223	return (rwlock_rdlock_common(rwlock, 0, NULL));
224}
225
226int
227_pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
228    const struct timespec *timeout)
229{
230	return (rwlock_rdlock_common(rwlock, 0, timeout));
231}
232
233int
234_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
235{
236	return (rwlock_rdlock_common(rwlock, 1, NULL));
237}
238
239int
240_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
241{
242	struct rwlock_held	*rh;
243	pthread_rwlock_t 	prwlock;
244	int			ret;
245
246	rh = NULL;
247	if (rwlock == NULL || *rwlock == NULL)
248		return(EINVAL);
249
250	prwlock = *rwlock;
251
252	/* grab the monitor lock */
253	if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
254		return(ret);
255
256	if (curthread->rwlockList != NULL) {
257		LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
258			if (rh->rh_rwlock == prwlock)
259				break;
260		}
261	}
262	if (rh == NULL) {
263		ret = EPERM;
264		goto out;
265	}
266	if (prwlock->state > 0) {
267		PTHREAD_ASSERT(rh->rh_wrcount == 0,
268		    "write count on a readlock should be zero!");
269		rh->rh_rdcount--;
270		if (--prwlock->state == 0 && prwlock->blocked_writers)
271			ret = pthread_cond_signal(&prwlock->write_signal);
272	} else if (prwlock->state < 0) {
273		PTHREAD_ASSERT(rh->rh_rdcount == 0,
274		    "read count on a writelock should be zero!");
275		rh->rh_wrcount--;
276		prwlock->state = 0;
277		if (prwlock->blocked_writers)
278			ret = pthread_cond_signal(&prwlock->write_signal);
279		else
280			ret = pthread_cond_broadcast(&prwlock->read_signal);
281	} else {
282		/*
283		 * No thread holds this lock. We should never get here.
284		 */
285		PTHREAD_ASSERT(0, "state=0 on read-write lock held by thread");
286		ret = EPERM;
287		goto out;
288	}
289	if (rh->rh_wrcount == 0 && rh->rh_rdcount == 0) {
290		LIST_REMOVE(rh, rh_link);
291		free(rh);
292	}
293
294out:
295	/* see the comment on this in rwlock_rdlock_common */
296	pthread_mutex_unlock(&prwlock->lock);
297
298	return(ret);
299}
300
301int
302_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
303{
304	return (rwlock_wrlock_common(rwlock, 0, NULL));
305}
306
307int
308_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
309    const struct timespec *timeout)
310{
311	return (rwlock_wrlock_common(rwlock, 0, timeout));
312}
313
314int
315_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
316{
317	return (rwlock_wrlock_common(rwlock, 1, NULL));
318}
319
320/*
321 * If nonblocking is 0 this function will wait on the lock. If
322 * it is greater than 0 it will return immediately with EBUSY.
323 */
324static int
325rwlock_wrlock_common(pthread_rwlock_t *rwlock, int nonblocking,
326    const struct timespec *timeout)
327{
328	struct rwlock_held	*rh;
329	pthread_rwlock_t 	prwlock;
330	int			ret;
331
332	rh = NULL;
333	if (rwlock == NULL)
334		return(EINVAL);
335
336	/*
337	 * Check the timeout value for validity.
338	 */
339	if (timeout != NULL &&
340	    (timeout->tv_nsec < 0 || timeout->tv_nsec >= 1000000000))
341		return (EINVAL);
342
343	if ((ret = rwlock_init_static(rwlock)) !=0 )
344		return (ret);
345	prwlock = *rwlock;
346
347	/* grab the monitor lock */
348	if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
349		return(ret);
350
351	while (prwlock->state != 0) {
352		if (nonblocking) {
353			pthread_mutex_unlock(&prwlock->lock);
354			return (EBUSY);
355		}
356
357		/*
358		 * If this thread already holds the lock for reading
359		 * or writing we have a deadlock situation.
360		 */
361		if (curthread->rwlockList != NULL) {
362			LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
363				if (rh->rh_rwlock == prwlock) {
364					PTHREAD_ASSERT((rh->rh_rdcount > 0 ||
365					    rh->rh_wrcount > 0),
366					    "Invalid 0 R/RW count!");
367					pthread_mutex_unlock(&prwlock->lock);
368					return (EDEADLK);
369					break;
370				}
371			}
372		}
373
374		++prwlock->blocked_writers;
375
376		if (timeout == NULL)
377			ret = pthread_cond_wait(&prwlock->write_signal,
378			    &prwlock->lock);
379		else
380			ret = pthread_cond_timedwait(&prwlock->write_signal,
381			    &prwlock->lock, timeout);
382
383		if (ret != 0 && ret != EINTR) {
384			--prwlock->blocked_writers;
385			pthread_mutex_unlock(&prwlock->lock);
386			return(ret);
387		}
388
389		--prwlock->blocked_writers;
390	}
391
392	/* indicate we are locked for writing */
393	prwlock->state = -1;
394	ret = insert_rwlock(prwlock, RWT_WRITE);
395	if (ret != 0) {
396		pthread_mutex_unlock(&prwlock->lock);
397		return (ret);
398	}
399
400	/* see the comment on this in pthread_rwlock_rdlock */
401	pthread_mutex_unlock(&prwlock->lock);
402
403	return(0);
404}
405
406static int
407insert_rwlock(struct pthread_rwlock *prwlock, enum rwlock_type rwt)
408{
409	struct rwlock_held *rh;
410
411	/*
412	 * Initialize the rwlock list in the thread. Although this function
413	 * may be called for many read-write locks, the initialization
414	 * of the the head happens only once during the lifetime of
415	 * the thread.
416	 */
417	if (curthread->rwlockList == NULL) {
418		curthread->rwlockList =
419		    (struct rwlock_listhead *)malloc(sizeof(struct rwlock_listhead));
420		if (curthread->rwlockList == NULL) {
421			return (ENOMEM);
422		}
423		LIST_INIT(curthread->rwlockList);
424	}
425
426	LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
427		if (rh->rh_rwlock == prwlock) {
428			if (rwt == RWT_READ)
429				rh->rh_rdcount++;
430			else if (rwt == RWT_WRITE)
431				rh->rh_wrcount++;
432			return (0);
433		}
434	}
435
436	/*
437	 * This is the first time we're holding this lock,
438	 * create a new entry.
439	 */
440	rh = (struct rwlock_held *)malloc(sizeof(struct rwlock_held));
441	if (rh == NULL)
442		return (ENOMEM);
443	rh->rh_rwlock = prwlock;
444	rh->rh_rdcount = 0;
445	rh->rh_wrcount = 0;
446	if (rwt == RWT_READ)
447		rh->rh_rdcount = 1;
448	else if (rwt == RWT_WRITE)
449		rh->rh_wrcount = 1;
450	LIST_INSERT_HEAD(curthread->rwlockList, rh, rh_link);
451	return (0);
452}
453
454/*
455 * There are consumers of rwlocks, inluding our own libc, that depend on
456 * a PTHREAD_RWLOCK_INITIALIZER to do for rwlocks what
457 * a similarly named symbol does for statically initialized mutexes.
458 * This symbol was dropped in The Open Group Base Specifications Issue 6
459 * and does not exist in IEEE Std 1003.1, 2003, but it should still be
460 * supported for backwards compatibility.
461 */
462static int
463rwlock_init_static(struct pthread_rwlock **rwlock)
464{
465	int error;
466
467	error = 0;
468	UMTX_LOCK(&init_lock);
469	if (*rwlock == PTHREAD_RWLOCK_INITIALIZER)
470		error = _pthread_rwlock_init(rwlock, NULL);
471	UMTX_UNLOCK(&init_lock);
472	return (error);
473}
474