thr_rwlock.c revision 177770
1/*-
2 * Copyright (c) 1998 Alex Nash
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libthr/thread/thr_rwlock.c 177770 2008-03-31 02:55:49Z davidxu $
27 */
28
29#include <errno.h>
30#include <limits.h>
31#include <stdlib.h>
32
33#include "namespace.h"
34#include <pthread.h>
35#include "un-namespace.h"
36#include "thr_private.h"
37
38#define RWLOCK_WRITE_OWNER	0x80000000U
39#define RWLOCK_WRITE_WAITERS	0x40000000U
40#define RWLOCK_READ_WAITERS	0x20000000U
41#define RWLOCK_MAX_READERS	0x1fffffffU
42#define RWLOCK_READER_COUNT(c)	((c) & RWLOCK_MAX_READERS)
43
44__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
45__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
46__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
47__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
48__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
49__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
50__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
51__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
52__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
53
54/*
55 * Prototypes
56 */
57
58static int
59rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
60{
61	pthread_rwlock_t prwlock;
62	int ret;
63
64	/* allocate rwlock object */
65	prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
66
67	if (prwlock == NULL)
68		return (ENOMEM);
69
70	/* initialize the lock */
71	if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
72		free(prwlock);
73	else {
74		/* initialize the read condition signal */
75		ret = _pthread_cond_init(&prwlock->read_signal, NULL);
76
77		if (ret != 0) {
78			_pthread_mutex_destroy(&prwlock->lock);
79			free(prwlock);
80		} else {
81			/* initialize the write condition signal */
82			ret = _pthread_cond_init(&prwlock->write_signal, NULL);
83
84			if (ret != 0) {
85				_pthread_cond_destroy(&prwlock->read_signal);
86				_pthread_mutex_destroy(&prwlock->lock);
87				free(prwlock);
88			} else {
89				/* success */
90				prwlock->state = 0;
91				prwlock->blocked_readers = 0;
92				prwlock->blocked_writers = 0;
93				prwlock->owner = NULL;
94				*rwlock = prwlock;
95			}
96		}
97	}
98
99	return (ret);
100}
101
102int
103_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
104{
105	int ret;
106
107	if (rwlock == NULL)
108		ret = EINVAL;
109	else {
110		pthread_rwlock_t prwlock;
111
112		prwlock = *rwlock;
113		*rwlock = NULL;
114
115		_pthread_mutex_destroy(&prwlock->lock);
116		_pthread_cond_destroy(&prwlock->read_signal);
117		_pthread_cond_destroy(&prwlock->write_signal);
118		free(prwlock);
119
120		ret = 0;
121	}
122	return (ret);
123}
124
125static int
126init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
127{
128	int ret;
129
130	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
131
132	if (*rwlock == NULL)
133		ret = rwlock_init(rwlock, NULL);
134	else
135		ret = 0;
136
137	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
138
139	return (ret);
140}
141
142int
143_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
144{
145	*rwlock = NULL;
146	return (rwlock_init(rwlock, attr));
147}
148
149static inline int
150rwlock_tryrdlock(struct pthread_rwlock *prwlock, int prefer_reader)
151{
152	int32_t state;
153	int32_t wrflags;
154
155	if (prefer_reader)
156		wrflags = RWLOCK_WRITE_OWNER;
157	else
158		wrflags = RWLOCK_WRITE_OWNER | RWLOCK_WRITE_WAITERS;
159	state = prwlock->state;
160        while (!(state & wrflags)) {
161		if (RWLOCK_READER_COUNT(state) == RWLOCK_MAX_READERS)
162			return (EAGAIN);
163		if (atomic_cmpset_acq_32(&prwlock->state, state, state + 1))
164			return (0);
165		CPU_SPINWAIT;
166		state = prwlock->state;
167	}
168
169	return (EBUSY);
170}
171
172static int
173rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
174{
175	struct pthread *curthread = _get_curthread();
176	const int prefer_read = curthread->rdlock_count > 0;
177	pthread_rwlock_t prwlock;
178	int ret, wrflags, old;
179	int32_t state;
180
181	if (__predict_false(rwlock == NULL))
182		return (EINVAL);
183
184	prwlock = *rwlock;
185
186	/* check for static initialization */
187	if (__predict_false(prwlock == NULL)) {
188		if ((ret = init_static(curthread, rwlock)) != 0)
189			return (ret);
190
191		prwlock = *rwlock;
192	}
193
194	/*
195	 * POSIX said the validity of the abstimeout parameter need
196	 * not be checked if the lock can be immediately acquired.
197	 */
198	ret = rwlock_tryrdlock(prwlock, prefer_read);
199	if (ret == 0) {
200		curthread->rdlock_count++;
201		return (ret);
202	}
203
204	if (__predict_false(abstime &&
205		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
206		return (EINVAL);
207
208	if (prefer_read) {
209		/*
210		 * To avoid having to track all the rdlocks held by
211		 * a thread or all of the threads that hold a rdlock,
212		 * we keep a simple count of all the rdlocks held by
213		 * a thread.  If a thread holds any rdlocks it is
214		 * possible that it is attempting to take a recursive
215		 * rdlock.  If there are blocked writers and precedence
216		 * is given to them, then that would result in the thread
217		 * deadlocking.  So allowing a thread to take the rdlock
218		 * when it already has one or more rdlocks avoids the
219		 * deadlock.  I hope the reader can follow that logic ;-)
220		 */
221
222		wrflags = RWLOCK_WRITE_OWNER;
223	} else
224		wrflags = RWLOCK_WRITE_OWNER | RWLOCK_WRITE_WAITERS;
225
226	/* reset to zero */
227	ret = 0;
228	for (;;) {
229		_pthread_mutex_lock(&prwlock->lock);
230		state = prwlock->state;
231		/* set read contention bit */
232		while ((state & wrflags) && !(state & RWLOCK_READ_WAITERS)) {
233			if (atomic_cmpset_acq_32(&prwlock->state, state, state | RWLOCK_READ_WAITERS))
234				break;
235			CPU_SPINWAIT;
236			state = prwlock->state;
237		}
238
239		atomic_add_32(&prwlock->blocked_readers, 1);
240		if (state & wrflags) {
241			ret = _pthread_cond_wait_unlocked(&prwlock->read_signal, &prwlock->lock, abstime);
242			old = atomic_fetchadd_32(&prwlock->blocked_readers, -1);
243			if (old == 1)
244				_pthread_mutex_lock(&prwlock->lock);
245			else
246				goto try_it;
247		} else {
248			atomic_subtract_32(&prwlock->blocked_readers, 1);
249		}
250
251		if (prwlock->blocked_readers == 0)
252			atomic_clear_32(&prwlock->state, RWLOCK_READ_WAITERS);
253		_pthread_mutex_unlock(&prwlock->lock);
254
255try_it:
256		/* try to lock it again. */
257		if (rwlock_tryrdlock(prwlock, prefer_read) == 0) {
258			curthread->rdlock_count++;
259			ret = 0;
260			break;
261		}
262
263		if (ret)
264			break;
265	}
266	return (ret);
267}
268
269int
270_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
271{
272	return (rwlock_rdlock_common(rwlock, NULL));
273}
274
275int
276_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
277	 const struct timespec *abstime)
278{
279	return (rwlock_rdlock_common(rwlock, abstime));
280}
281
282int
283_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
284{
285	struct pthread *curthread = _get_curthread();
286	pthread_rwlock_t prwlock;
287	int ret;
288
289	if (__predict_false(rwlock == NULL))
290		return (EINVAL);
291
292	prwlock = *rwlock;
293
294	/* check for static initialization */
295	if (__predict_false(prwlock == NULL)) {
296		if ((ret = init_static(curthread, rwlock)) != 0)
297			return (ret);
298
299		prwlock = *rwlock;
300	}
301
302	ret = rwlock_tryrdlock(prwlock, curthread->rdlock_count > 0);
303	if (ret == 0)
304		curthread->rdlock_count++;
305	return (ret);
306}
307
308static inline int
309rwlock_trywrlock(struct pthread_rwlock *prwlock)
310{
311	int32_t state;
312
313	state = prwlock->state;
314	while (!(state & RWLOCK_WRITE_OWNER) && RWLOCK_READER_COUNT(state) == 0) {
315		if (atomic_cmpset_acq_32(&prwlock->state, state, state | RWLOCK_WRITE_OWNER))
316			return (0);
317		CPU_SPINWAIT;
318		state = prwlock->state;
319	}
320	return (EBUSY);
321}
322
323int
324_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
325{
326	struct pthread *curthread = _get_curthread();
327	pthread_rwlock_t prwlock;
328	int ret;
329
330	if (__predict_false(rwlock == NULL))
331		return (EINVAL);
332
333	prwlock = *rwlock;
334
335	/* check for static initialization */
336	if (__predict_false(prwlock == NULL)) {
337		if ((ret = init_static(curthread, rwlock)) != 0)
338			return (ret);
339
340		prwlock = *rwlock;
341	}
342
343	ret = rwlock_trywrlock(prwlock);
344	if (ret == 0)
345		prwlock->owner = curthread;
346	return (ret);
347}
348
349static int
350rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
351{
352	struct pthread *curthread = _get_curthread();
353	pthread_rwlock_t prwlock;
354	int ret;
355	int32_t state;
356
357	if (__predict_false(rwlock == NULL))
358		return (EINVAL);
359
360	prwlock = *rwlock;
361
362	/* check for static initialization */
363	if (__predict_false(prwlock == NULL)) {
364		if ((ret = init_static(curthread, rwlock)) != 0)
365			return (ret);
366
367		prwlock = *rwlock;
368	}
369
370	/*
371	 * POSIX said the validity of the abstimeout parameter need
372	 * not be checked if the lock can be immediately acquired.
373	 */
374
375	/* try to lock it in userland */
376	ret = rwlock_trywrlock(prwlock);
377	if (ret == 0) {
378		prwlock->owner = curthread;
379		return (ret);
380	}
381
382	if (__predict_false(abstime &&
383		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
384		return (EINVAL);
385
386	/* reset to zero */
387	ret = 0;
388
389	for (;;) {
390		_pthread_mutex_lock(&prwlock->lock);
391		state = prwlock->state;
392		while (((state & RWLOCK_WRITE_OWNER) || RWLOCK_READER_COUNT(state) != 0) &&
393			(state & RWLOCK_WRITE_WAITERS) == 0) {
394			if (atomic_cmpset_acq_32(&prwlock->state, state, state | RWLOCK_WRITE_WAITERS))
395				break;
396			CPU_SPINWAIT;
397			state = prwlock->state;
398		}
399
400		prwlock->blocked_writers++;
401
402		while ((state & RWLOCK_WRITE_OWNER) || RWLOCK_READER_COUNT(state) != 0) {
403			if (abstime == NULL)
404				ret = _pthread_cond_wait(&prwlock->write_signal, &prwlock->lock);
405			else
406				ret = _pthread_cond_timedwait(&prwlock->write_signal, &prwlock->lock, abstime);
407
408			if (ret)
409				break;
410			state = prwlock->state;
411		}
412
413		prwlock->blocked_writers--;
414		if (prwlock->blocked_writers == 0)
415			atomic_clear_32(&prwlock->state, RWLOCK_WRITE_WAITERS);
416		_pthread_mutex_unlock(&prwlock->lock);
417
418		if (rwlock_trywrlock(prwlock) == 0) {
419			prwlock->owner = curthread;
420			ret = 0;
421			break;
422		}
423
424		if (ret)
425			break;
426	}
427	return (ret);
428}
429
430int
431_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
432{
433	return (rwlock_wrlock_common (rwlock, NULL));
434}
435
436int
437_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
438    const struct timespec *abstime)
439{
440	return (rwlock_wrlock_common (rwlock, abstime));
441}
442
443int
444_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
445{
446	struct pthread *curthread = _get_curthread();
447	pthread_rwlock_t prwlock;
448	int32_t state;
449
450	if (__predict_false(rwlock == NULL))
451		return (EINVAL);
452
453	prwlock = *rwlock;
454
455	if (__predict_false(prwlock == NULL))
456		return (EINVAL);
457
458	state = prwlock->state;
459
460	if (state & RWLOCK_WRITE_OWNER) {
461		if (__predict_false(prwlock->owner != curthread))
462			return (EPERM);
463		prwlock->owner = NULL;
464		while (!atomic_cmpset_rel_32(&prwlock->state, state, state & ~RWLOCK_WRITE_OWNER)) {
465			CPU_SPINWAIT;
466			state = prwlock->state;
467		}
468	} else if (RWLOCK_READER_COUNT(state) != 0) {
469		while (!atomic_cmpset_rel_32(&prwlock->state, state, state - 1)) {
470			CPU_SPINWAIT;
471			state = prwlock->state;
472			if (RWLOCK_READER_COUNT(state) == 0)
473				return (EPERM);
474		}
475		curthread->rdlock_count--;
476        } else {
477		return (EPERM);
478	}
479
480#if 0
481	if (state & RWLOCK_WRITE_WAITERS) {
482		_pthread_mutex_lock(&prwlock->lock);
483		_pthread_cond_signal(&prwlock->write_signal);
484		_pthread_mutex_unlock(&prwlock->lock);
485	} else if (state & RWLOCK_READ_WAITERS) {
486		_pthread_mutex_lock(&prwlock->lock);
487		_pthread_cond_broadcast(&prwlock->read_signal);
488		_pthread_mutex_unlock(&prwlock->lock);
489	}
490#endif
491
492	if (state & RWLOCK_WRITE_WAITERS) {
493		_pthread_mutex_lock(&prwlock->lock);
494		_pthread_cond_broadcast_unlock(&prwlock->write_signal, &prwlock->lock, 0);
495	} else if (state & RWLOCK_READ_WAITERS) {
496		_pthread_mutex_lock(&prwlock->lock);
497		_pthread_cond_broadcast_unlock(&prwlock->write_signal, &prwlock->lock, 1);
498	}
499	return (0);
500}
501