thr_rwlock.c revision 177823
1/*-
2 * Copyright (c) 1998 Alex Nash
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libthr/thread/thr_rwlock.c 177823 2008-04-01 06:23:08Z davidxu $
27 */
28
29#include <errno.h>
30#include <limits.h>
31#include <stdlib.h>
32
33#include "namespace.h"
34#include <pthread.h>
35#include "un-namespace.h"
36#include "thr_private.h"
37
38#define RWLOCK_WRITE_OWNER	0x80000000U
39#define RWLOCK_WRITE_WAITERS	0x40000000U
40#define RWLOCK_READ_WAITERS	0x20000000U
41#define RWLOCK_MAX_READERS	0x1fffffffU
42#define RWLOCK_READER_COUNT(c)	((c) & RWLOCK_MAX_READERS)
43
44__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
45__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
46__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
47__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
48__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
49__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
50__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
51__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
52__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
53
54/*
55 * Prototypes
56 */
57
58static int
59rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused)
60{
61	pthread_rwlock_t prwlock;
62	int ret;
63
64	/* allocate rwlock object */
65	prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
66
67	if (prwlock == NULL)
68		return (ENOMEM);
69
70	/* initialize the lock */
71	if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
72		free(prwlock);
73	else {
74		/* initialize the read condition signal */
75		ret = _pthread_cond_init(&prwlock->read_signal, NULL);
76
77		if (ret != 0) {
78			_pthread_mutex_destroy(&prwlock->lock);
79			free(prwlock);
80		} else {
81			/* initialize the write condition signal */
82			ret = _pthread_cond_init(&prwlock->write_signal, NULL);
83
84			if (ret != 0) {
85				_pthread_cond_destroy(&prwlock->read_signal);
86				_pthread_mutex_destroy(&prwlock->lock);
87				free(prwlock);
88			} else {
89				/* success */
90				prwlock->state = 0;
91				prwlock->blocked_readers = 0;
92				prwlock->blocked_writers = 0;
93				prwlock->owner = NULL;
94				*rwlock = prwlock;
95			}
96		}
97	}
98
99	return (ret);
100}
101
102int
103_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
104{
105	int ret;
106
107	if (rwlock == NULL)
108		ret = EINVAL;
109	else {
110		pthread_rwlock_t prwlock;
111
112		prwlock = *rwlock;
113		*rwlock = NULL;
114
115		_pthread_mutex_destroy(&prwlock->lock);
116		_pthread_cond_destroy(&prwlock->read_signal);
117		_pthread_cond_destroy(&prwlock->write_signal);
118		free(prwlock);
119
120		ret = 0;
121	}
122	return (ret);
123}
124
125static int
126init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
127{
128	int ret;
129
130	THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
131
132	if (*rwlock == NULL)
133		ret = rwlock_init(rwlock, NULL);
134	else
135		ret = 0;
136
137	THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
138
139	return (ret);
140}
141
142int
143_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
144{
145	*rwlock = NULL;
146	return (rwlock_init(rwlock, attr));
147}
148
149static inline int
150rwlock_tryrdlock(struct pthread_rwlock *prwlock, int prefer_reader)
151{
152	int32_t state;
153	int32_t wrflags;
154
155	if (prefer_reader)
156		wrflags = RWLOCK_WRITE_OWNER;
157	else
158		wrflags = RWLOCK_WRITE_OWNER | RWLOCK_WRITE_WAITERS;
159	state = prwlock->state;
160        while (!(state & wrflags)) {
161		if (__predict_false(RWLOCK_READER_COUNT(state) == RWLOCK_MAX_READERS))
162			return (EAGAIN);
163		if (atomic_cmpset_acq_32(&prwlock->state, state, state + 1))
164			return (0);
165		CPU_SPINWAIT;
166		state = prwlock->state;
167	}
168
169	return (EBUSY);
170}
171
172static int
173rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime)
174{
175	struct pthread *curthread = _get_curthread();
176	const int prefer_read = curthread->rdlock_count > 0;
177	pthread_rwlock_t prwlock;
178	int ret, wrflags, old;
179	int32_t state;
180
181	if (__predict_false(rwlock == NULL))
182		return (EINVAL);
183
184	prwlock = *rwlock;
185
186	/* check for static initialization */
187	if (__predict_false(prwlock == NULL)) {
188		if ((ret = init_static(curthread, rwlock)) != 0)
189			return (ret);
190
191		prwlock = *rwlock;
192	}
193
194	/*
195	 * POSIX said the validity of the abstimeout parameter need
196	 * not be checked if the lock can be immediately acquired.
197	 */
198	ret = rwlock_tryrdlock(prwlock, prefer_read);
199	if (ret == 0) {
200		curthread->rdlock_count++;
201		return (ret);
202	}
203	if (__predict_false(ret == EAGAIN))
204		return (ret);
205
206	if (__predict_false(abstime &&
207		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
208		return (EINVAL);
209
210	if (prefer_read) {
211		/*
212		 * To avoid having to track all the rdlocks held by
213		 * a thread or all of the threads that hold a rdlock,
214		 * we keep a simple count of all the rdlocks held by
215		 * a thread.  If a thread holds any rdlocks it is
216		 * possible that it is attempting to take a recursive
217		 * rdlock.  If there are blocked writers and precedence
218		 * is given to them, then that would result in the thread
219		 * deadlocking.  So allowing a thread to take the rdlock
220		 * when it already has one or more rdlocks avoids the
221		 * deadlock.  I hope the reader can follow that logic ;-)
222		 */
223
224		wrflags = RWLOCK_WRITE_OWNER;
225	} else
226		wrflags = RWLOCK_WRITE_OWNER | RWLOCK_WRITE_WAITERS;
227
228	/* reset to zero */
229	ret = 0;
230	for (;;) {
231		_pthread_mutex_lock(&prwlock->lock);
232		state = prwlock->state;
233		/* set read contention bit */
234		while ((state & wrflags) && !(state & RWLOCK_READ_WAITERS)) {
235			if (atomic_cmpset_acq_32(&prwlock->state, state, state | RWLOCK_READ_WAITERS))
236				break;
237			CPU_SPINWAIT;
238			state = prwlock->state;
239		}
240
241		atomic_add_32(&prwlock->blocked_readers, 1);
242		if (state & wrflags) {
243			ret = _pthread_cond_wait_unlocked(&prwlock->read_signal, &prwlock->lock, abstime);
244			old = atomic_fetchadd_32(&prwlock->blocked_readers, -1);
245			if (old == 1)
246				_pthread_mutex_lock(&prwlock->lock);
247			else
248				goto try_it;
249		} else {
250			atomic_subtract_32(&prwlock->blocked_readers, 1);
251		}
252
253		if (prwlock->blocked_readers == 0)
254			atomic_clear_32(&prwlock->state, RWLOCK_READ_WAITERS);
255		_pthread_mutex_unlock(&prwlock->lock);
256
257try_it:
258		/* try to lock it again. */
259		if (rwlock_tryrdlock(prwlock, prefer_read) == 0) {
260			curthread->rdlock_count++;
261			ret = 0;
262			break;
263		}
264
265		if (ret)
266			break;
267	}
268	return (ret);
269}
270
271int
272_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
273{
274	return (rwlock_rdlock_common(rwlock, NULL));
275}
276
277int
278_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
279	 const struct timespec *abstime)
280{
281	return (rwlock_rdlock_common(rwlock, abstime));
282}
283
284int
285_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
286{
287	struct pthread *curthread = _get_curthread();
288	pthread_rwlock_t prwlock;
289	int ret;
290
291	if (__predict_false(rwlock == NULL))
292		return (EINVAL);
293
294	prwlock = *rwlock;
295
296	/* check for static initialization */
297	if (__predict_false(prwlock == NULL)) {
298		if ((ret = init_static(curthread, rwlock)) != 0)
299			return (ret);
300
301		prwlock = *rwlock;
302	}
303
304	ret = rwlock_tryrdlock(prwlock, curthread->rdlock_count > 0);
305	if (ret == 0)
306		curthread->rdlock_count++;
307	return (ret);
308}
309
310static inline int
311rwlock_trywrlock(struct pthread_rwlock *prwlock)
312{
313	int32_t state;
314
315	state = prwlock->state;
316	while (!(state & RWLOCK_WRITE_OWNER) && RWLOCK_READER_COUNT(state) == 0) {
317		if (atomic_cmpset_acq_32(&prwlock->state, state, state | RWLOCK_WRITE_OWNER))
318			return (0);
319		CPU_SPINWAIT;
320		state = prwlock->state;
321	}
322	return (EBUSY);
323}
324
325int
326_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
327{
328	struct pthread *curthread = _get_curthread();
329	pthread_rwlock_t prwlock;
330	int ret;
331
332	if (__predict_false(rwlock == NULL))
333		return (EINVAL);
334
335	prwlock = *rwlock;
336
337	/* check for static initialization */
338	if (__predict_false(prwlock == NULL)) {
339		if ((ret = init_static(curthread, rwlock)) != 0)
340			return (ret);
341
342		prwlock = *rwlock;
343	}
344
345	ret = rwlock_trywrlock(prwlock);
346	if (ret == 0)
347		prwlock->owner = curthread;
348	return (ret);
349}
350
351static int
352rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
353{
354	struct pthread *curthread = _get_curthread();
355	pthread_rwlock_t prwlock;
356	int ret;
357	int32_t state;
358
359	if (__predict_false(rwlock == NULL))
360		return (EINVAL);
361
362	prwlock = *rwlock;
363
364	/* check for static initialization */
365	if (__predict_false(prwlock == NULL)) {
366		if ((ret = init_static(curthread, rwlock)) != 0)
367			return (ret);
368
369		prwlock = *rwlock;
370	}
371
372	/*
373	 * POSIX said the validity of the abstimeout parameter need
374	 * not be checked if the lock can be immediately acquired.
375	 */
376
377	/* try to lock it in userland */
378	ret = rwlock_trywrlock(prwlock);
379	if (ret == 0) {
380		prwlock->owner = curthread;
381		return (ret);
382	}
383
384	if (__predict_false(abstime &&
385		(abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0)))
386		return (EINVAL);
387
388	/* reset to zero */
389	ret = 0;
390
391	for (;;) {
392		_pthread_mutex_lock(&prwlock->lock);
393		state = prwlock->state;
394		while (((state & RWLOCK_WRITE_OWNER) || RWLOCK_READER_COUNT(state) != 0) &&
395			(state & RWLOCK_WRITE_WAITERS) == 0) {
396			if (atomic_cmpset_acq_32(&prwlock->state, state, state | RWLOCK_WRITE_WAITERS))
397				break;
398			CPU_SPINWAIT;
399			state = prwlock->state;
400		}
401
402		prwlock->blocked_writers++;
403
404		while ((state & RWLOCK_WRITE_OWNER) || RWLOCK_READER_COUNT(state) != 0) {
405			if (abstime == NULL)
406				ret = _pthread_cond_wait(&prwlock->write_signal, &prwlock->lock);
407			else
408				ret = _pthread_cond_timedwait(&prwlock->write_signal, &prwlock->lock, abstime);
409
410			if (ret)
411				break;
412			state = prwlock->state;
413		}
414
415		prwlock->blocked_writers--;
416		if (prwlock->blocked_writers == 0)
417			atomic_clear_32(&prwlock->state, RWLOCK_WRITE_WAITERS);
418		_pthread_mutex_unlock(&prwlock->lock);
419
420		if (rwlock_trywrlock(prwlock) == 0) {
421			prwlock->owner = curthread;
422			ret = 0;
423			break;
424		}
425
426		if (ret)
427			break;
428	}
429	return (ret);
430}
431
432int
433_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
434{
435	return (rwlock_wrlock_common (rwlock, NULL));
436}
437
438int
439_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
440    const struct timespec *abstime)
441{
442	return (rwlock_wrlock_common (rwlock, abstime));
443}
444
445int
446_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
447{
448	struct pthread *curthread = _get_curthread();
449	pthread_rwlock_t prwlock;
450	int32_t state;
451
452	if (__predict_false(rwlock == NULL))
453		return (EINVAL);
454
455	prwlock = *rwlock;
456
457	if (__predict_false(prwlock == NULL))
458		return (EINVAL);
459
460	state = prwlock->state;
461
462	if (state & RWLOCK_WRITE_OWNER) {
463		if (__predict_false(prwlock->owner != curthread))
464			return (EPERM);
465		prwlock->owner = NULL;
466		while (!atomic_cmpset_rel_32(&prwlock->state, state, state & ~RWLOCK_WRITE_OWNER)) {
467			CPU_SPINWAIT;
468			state = prwlock->state;
469		}
470	} else if (RWLOCK_READER_COUNT(state) != 0) {
471		while (!atomic_cmpset_rel_32(&prwlock->state, state, state - 1)) {
472			CPU_SPINWAIT;
473			state = prwlock->state;
474			if (RWLOCK_READER_COUNT(state) == 0)
475				return (EPERM);
476		}
477		curthread->rdlock_count--;
478        } else {
479		return (EPERM);
480	}
481
482#if 1
483	if (state & RWLOCK_WRITE_WAITERS) {
484		_pthread_mutex_lock(&prwlock->lock);
485		_pthread_cond_signal(&prwlock->write_signal);
486		_pthread_mutex_unlock(&prwlock->lock);
487	} else if (state & RWLOCK_READ_WAITERS) {
488		_pthread_mutex_lock(&prwlock->lock);
489		_pthread_cond_broadcast(&prwlock->read_signal);
490		_pthread_mutex_unlock(&prwlock->lock);
491	}
492#else
493
494	if (state & RWLOCK_WRITE_WAITERS) {
495		_pthread_mutex_lock(&prwlock->lock);
496		_pthread_cond_broadcast_unlock(&prwlock->write_signal, &prwlock->lock, 0);
497	} else if (state & RWLOCK_READ_WAITERS) {
498		_pthread_mutex_lock(&prwlock->lock);
499		_pthread_cond_broadcast_unlock(&prwlock->write_signal, &prwlock->lock, 1);
500	}
501#endif
502	return (0);
503}
504