1/*- 2 * Copyright (c) 1998 Alex Nash 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright --- 9 unchanged lines hidden (view full) --- 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * |
26 * $FreeBSD: head/lib/libthr/thread/thr_rwlock.c 213241 2010-09-28 04:57:56Z davidxu $ |
27 */ 28 29#include <errno.h> 30#include <limits.h> 31#include <stdlib.h> 32 33#include "namespace.h" 34#include <pthread.h> --- 5 unchanged lines hidden (view full) --- 40__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock); 41__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock); 42__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock); 43__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock); 44__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock); 45__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock); 46__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock); 47 |
48#define CHECK_AND_INIT_RWLOCK \ 49 if (__predict_false((prwlock = (*rwlock)) <= THR_RWLOCK_DESTROYED)) { \ 50 if (prwlock == THR_RWLOCK_INITIALIZER) { \ 51 int ret; \ 52 ret = init_static(_get_curthread(), rwlock); \ 53 if (ret) \ 54 return (ret); \ 55 } else if (prwlock == THR_RWLOCK_DESTROYED) { \ 56 return (EINVAL); \ 57 } \ 58 prwlock = *rwlock; \ 59 } 60 |
61/* 62 * Prototypes 63 */ 64 65static int 66rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr __unused) 67{ 68 pthread_rwlock_t prwlock; 69 70 prwlock = (pthread_rwlock_t)calloc(1, sizeof(struct pthread_rwlock)); 71 if (prwlock == NULL) 72 return (ENOMEM); 73 *rwlock = prwlock; 74 return (0); 75} 76 77int 78_pthread_rwlock_destroy (pthread_rwlock_t *rwlock) 79{ |
80 pthread_rwlock_t prwlock; |
81 int ret; 82 |
83 prwlock = *rwlock; 84 if (prwlock == THR_RWLOCK_INITIALIZER) 85 ret = 0; 86 else if (prwlock == THR_RWLOCK_DESTROYED) |
87 ret = EINVAL; 88 else { |
89 *rwlock = THR_RWLOCK_DESTROYED; |
90 |
91 free(prwlock); 92 ret = 0; 93 } 94 return (ret); 95} 96 97static int 98init_static(struct pthread *thread, pthread_rwlock_t *rwlock) 99{ 100 int ret; 101 102 THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock); 103 |
104 if (*rwlock == THR_RWLOCK_INITIALIZER) |
105 ret = rwlock_init(rwlock, NULL); 106 else 107 ret = 0; 108 109 THR_LOCK_RELEASE(thread, &_rwlock_static_lock); 110 111 return (ret); 112} --- 9 unchanged lines hidden (view full) --- 122rwlock_rdlock_common(pthread_rwlock_t *rwlock, const struct timespec *abstime) 123{ 124 struct pthread *curthread = _get_curthread(); 125 pthread_rwlock_t prwlock; 126 struct timespec ts, ts2, *tsp; 127 int flags; 128 int ret; 129 |
130 CHECK_AND_INIT_RWLOCK |
131 |
132 if (curthread->rdlock_count) { 133 /* 134 * To avoid having to track all the rdlocks held by 135 * a thread or all of the threads that hold a rdlock, 136 * we keep a simple count of all the rdlocks held by 137 * a thread. If a thread holds any rdlocks it is 138 * possible that it is attempting to take a recursive 139 * rdlock. If there are blocked writers and precedence --- 64 unchanged lines hidden (view full) --- 204int 205_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) 206{ 207 struct pthread *curthread = _get_curthread(); 208 pthread_rwlock_t prwlock; 209 int flags; 210 int ret; 211 |
212 CHECK_AND_INIT_RWLOCK |
213 |
214 if (curthread->rdlock_count) { 215 /* 216 * To avoid having to track all the rdlocks held by 217 * a thread or all of the threads that hold a rdlock, 218 * we keep a simple count of all the rdlocks held by 219 * a thread. If a thread holds any rdlocks it is 220 * possible that it is attempting to take a recursive 221 * rdlock. If there are blocked writers and precedence --- 15 unchanged lines hidden (view full) --- 237 238int 239_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) 240{ 241 struct pthread *curthread = _get_curthread(); 242 pthread_rwlock_t prwlock; 243 int ret; 244 |
245 CHECK_AND_INIT_RWLOCK |
246 |
247 ret = _thr_rwlock_trywrlock(&prwlock->lock); 248 if (ret == 0) 249 prwlock->owner = curthread; 250 return (ret); 251} 252 253static int 254rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime) 255{ 256 struct pthread *curthread = _get_curthread(); 257 pthread_rwlock_t prwlock; 258 struct timespec ts, ts2, *tsp; 259 int ret; 260 |
261 CHECK_AND_INIT_RWLOCK |
262 |
263 /* 264 * POSIX said the validity of the abstimeout parameter need 265 * not be checked if the lock can be immediately acquired. 266 */ 267 ret = _thr_rwlock_trywrlock(&prwlock->lock); 268 if (ret == 0) { 269 prwlock->owner = curthread; 270 return (ret); --- 50 unchanged lines hidden (view full) --- 321int 322_pthread_rwlock_unlock (pthread_rwlock_t *rwlock) 323{ 324 struct pthread *curthread = _get_curthread(); 325 pthread_rwlock_t prwlock; 326 int ret; 327 int32_t state; 328 |
329 prwlock = *rwlock; 330 |
331 if (__predict_false(prwlock <= THR_RWLOCK_DESTROYED)) |
332 return (EINVAL); 333 334 state = prwlock->lock.rw_state; 335 if (state & URWLOCK_WRITE_OWNER) { 336 if (__predict_false(prwlock->owner != curthread)) 337 return (EPERM); 338 prwlock->owner = NULL; 339 } 340 341 ret = _thr_rwlock_unlock(&prwlock->lock); 342 if (ret == 0 && (state & URWLOCK_WRITE_OWNER) == 0) 343 curthread->rdlock_count--; 344 345 return (ret); 346} |