thr_rwlock.c revision 124718
1/*- 2 * Copyright (c) 1998 Alex Nash 3 * Copyright (c) 2004 Michael Telahun Makonnen 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: head/lib/libthr/thread/thr_rwlock.c 124718 2004-01-19 14:51:45Z mtm $ 28 */ 29 30#include <errno.h> 31#include <limits.h> 32#include <stdlib.h> 33 34#include <pthread.h> 35#include "thr_private.h" 36 37/* maximum number of times a read lock may be obtained */ 38#define MAX_READ_LOCKS (INT_MAX - 1) 39 40/* 41 * For distinguishing operations on read and write locks. 42 */ 43enum rwlock_type {RWT_READ, RWT_WRITE}; 44 45__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy); 46__weak_reference(_pthread_rwlock_init, pthread_rwlock_init); 47__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock); 48__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock); 49__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock); 50__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock); 51__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock); 52__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock); 53__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock); 54 55static int insert_rwlock(struct pthread_rwlock *, enum rwlock_type); 56static int rwlock_rdlock_common(pthread_rwlock_t *, int, 57 const struct timespec *); 58static int rwlock_wrlock_common(pthread_rwlock_t *, int, 59 const struct timespec *); 60 61int 62_pthread_rwlock_destroy (pthread_rwlock_t *rwlock) 63{ 64 pthread_rwlock_t prwlock; 65 66 if (rwlock == NULL || *rwlock == NULL) 67 return (EINVAL); 68 69 prwlock = *rwlock; 70 71 if (prwlock->state != 0) 72 return (EBUSY); 73 74 pthread_mutex_destroy(&prwlock->lock); 75 pthread_cond_destroy(&prwlock->read_signal); 76 pthread_cond_destroy(&prwlock->write_signal); 77 free(prwlock); 78 79 *rwlock = NULL; 80 81 return (0); 82} 83 84int 85_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr) 86{ 87 pthread_rwlock_t prwlock; 88 int ret; 89 90 /* allocate rwlock object */ 91 prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock)); 92 93 if (prwlock == NULL) { 94 ret = ENOMEM; 95 goto out; 96 } 97 98 /* initialize the lock */ 99 if ((ret = pthread_mutex_init(&prwlock->lock, NULL)) != 0) 100 goto out; 101 102 /* initialize the read condition signal */ 103 if ((ret = pthread_cond_init(&prwlock->read_signal, NULL)) != 0) 104 goto out_readcond; 105 106 /* initialize the write condition signal */ 107 if ((ret = pthread_cond_init(&prwlock->write_signal, NULL)) != 0) 108 goto out_writecond; 109 110 /* success */ 111 prwlock->state = 0; 112 prwlock->blocked_writers = 0; 113 114 *rwlock = prwlock; 115 return (0); 116 117out_writecond: 118 pthread_cond_destroy(&prwlock->read_signal); 119out_readcond: 120 pthread_mutex_destroy(&prwlock->lock); 121out: 122 if (prwlock != NULL) 123 free(prwlock); 124 return(ret); 125} 126 127/* 128 * If nonblocking is 0 this function will wait on the lock. If 129 * it is greater than 0 it will return immediately with EBUSY. 130 */ 131static int 132rwlock_rdlock_common(pthread_rwlock_t *rwlock, int nonblocking, 133 const struct timespec *timeout) 134{ 135 struct rwlock_held *rh; 136 pthread_rwlock_t prwlock; 137 int ret; 138 139 rh = NULL; 140 if (rwlock == NULL || *rwlock == NULL) 141 return(EINVAL); 142 143 /* 144 * Check for validity of the timeout parameter. 145 */ 146 if (timeout != NULL && 147 (timeout->tv_nsec < 0 || timeout->tv_nsec >= 1000000000)) 148 return (EINVAL); 149 150 prwlock = *rwlock; 151 152 /* grab the monitor lock */ 153 if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0) 154 return(ret); 155 156 /* check lock count */ 157 if (prwlock->state == MAX_READ_LOCKS) { 158 pthread_mutex_unlock(&prwlock->lock); 159 return (EAGAIN); 160 } 161 162 /* give writers priority over readers */ 163 while (prwlock->blocked_writers || prwlock->state < 0) { 164 if (nonblocking) { 165 pthread_mutex_unlock(&prwlock->lock); 166 return (EBUSY); 167 } 168 169 /* 170 * If this lock is already held for writing we have 171 * a deadlock situation. 172 */ 173 if (curthread->rwlockList != NULL && prwlock->state < 0) { 174 LIST_FOREACH(rh, curthread->rwlockList, rh_link) { 175 if (rh->rh_rwlock == prwlock && 176 rh->rh_wrcount > 0) { 177 pthread_mutex_unlock(&prwlock->lock); 178 return (EDEADLK); 179 } 180 } 181 } 182 if (timeout == NULL) 183 ret = pthread_cond_wait(&prwlock->read_signal, 184 &prwlock->lock); 185 else 186 ret = pthread_cond_timedwait(&prwlock->read_signal, 187 &prwlock->lock, timeout); 188 189 if (ret != 0 && ret != EINTR) { 190 /* can't do a whole lot if this fails */ 191 pthread_mutex_unlock(&prwlock->lock); 192 return(ret); 193 } 194 } 195 196 ++prwlock->state; /* indicate we are locked for reading */ 197 ret = insert_rwlock(prwlock, RWT_READ); 198 if (ret != 0) { 199 pthread_mutex_unlock(&prwlock->lock); 200 return (ret); 201 } 202 203 /* 204 * Something is really wrong if this call fails. Returning 205 * error won't do because we've already obtained the read 206 * lock. Decrementing 'state' is no good because we probably 207 * don't have the monitor lock. 208 */ 209 pthread_mutex_unlock(&prwlock->lock); 210 211 return(0); 212} 213 214int 215_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock) 216{ 217 return (rwlock_rdlock_common(rwlock, 0, NULL)); 218} 219 220int 221_pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, 222 const struct timespec *timeout) 223{ 224 return (rwlock_rdlock_common(rwlock, 0, timeout)); 225} 226 227int 228_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) 229{ 230 return (rwlock_rdlock_common(rwlock, 1, NULL)); 231} 232 233int 234_pthread_rwlock_unlock (pthread_rwlock_t *rwlock) 235{ 236 struct rwlock_held *rh; 237 pthread_rwlock_t prwlock; 238 int ret; 239 240 rh = NULL; 241 if (rwlock == NULL || *rwlock == NULL) 242 return(EINVAL); 243 244 prwlock = *rwlock; 245 246 /* grab the monitor lock */ 247 if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0) 248 return(ret); 249 250 if (curthread->rwlockList != NULL) { 251 LIST_FOREACH(rh, curthread->rwlockList, rh_link) { 252 if (rh->rh_rwlock == prwlock) 253 break; 254 } 255 } 256 if (rh == NULL) { 257 ret = EPERM; 258 goto out; 259 } 260 if (prwlock->state > 0) { 261 rh->rh_rdcount--; 262 if (rh->rh_rdcount == 0) { 263 LIST_REMOVE(rh, rh_link); 264 free(rh); 265 } 266 if (--prwlock->state == 0 && prwlock->blocked_writers) 267 ret = pthread_cond_signal(&prwlock->write_signal); 268 } else if (prwlock->state < 0) { 269 rh->rh_wrcount--; 270 if (rh->rh_wrcount == 0) { 271 LIST_REMOVE(rh, rh_link); 272 free(rh); 273 } 274 prwlock->state = 0; 275 if (prwlock->blocked_writers) 276 ret = pthread_cond_signal(&prwlock->write_signal); 277 else 278 ret = pthread_cond_broadcast(&prwlock->read_signal); 279 } else { 280 /* 281 * No thread holds this lock. We should never get here. 282 */ 283 PTHREAD_ASSERT(0, "state=0 on read-write lock held by thread"); 284 ret = EPERM; 285 goto out; 286 } 287 288out: 289 /* see the comment on this in rwlock_rdlock_common */ 290 pthread_mutex_unlock(&prwlock->lock); 291 292 return(ret); 293} 294 295int 296_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock) 297{ 298 return (rwlock_wrlock_common(rwlock, 0, NULL)); 299} 300 301int 302_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock, 303 const struct timespec *timeout) 304{ 305 return (rwlock_wrlock_common(rwlock, 0, timeout)); 306} 307 308int 309_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock) 310{ 311 return (rwlock_wrlock_common(rwlock, 1, NULL)); 312} 313 314/* 315 * If nonblocking is 0 this function will wait on the lock. If 316 * it is greater than 0 it will return immediately with EBUSY. 317 */ 318static int 319rwlock_wrlock_common(pthread_rwlock_t *rwlock, int nonblocking, 320 const struct timespec *timeout) 321{ 322 struct rwlock_held *rh; 323 pthread_rwlock_t prwlock; 324 int ret; 325 326 rh = NULL; 327 if (rwlock == NULL || *rwlock == NULL) 328 return(EINVAL); 329 330 /* 331 * Check the timeout value for validity. 332 */ 333 if (timeout != NULL && 334 (timeout->tv_nsec < 0 || timeout->tv_nsec >= 1000000000)) 335 return (EINVAL); 336 337 prwlock = *rwlock; 338 339 /* grab the monitor lock */ 340 if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0) 341 return(ret); 342 343 while (prwlock->state != 0) { 344 if (nonblocking) { 345 pthread_mutex_unlock(&prwlock->lock); 346 return (EBUSY); 347 } 348 349 /* 350 * If this thread already holds the lock for reading 351 * or writing we have a deadlock situation. 352 */ 353 if (curthread->rwlockList != NULL) { 354 LIST_FOREACH(rh, curthread->rwlockList, rh_link) { 355 if (rh->rh_rwlock == prwlock && 356 (rh->rh_rdcount > 0 || rh->rh_wrcount > 0)) { 357 pthread_mutex_unlock(&prwlock->lock); 358 return (EDEADLK); 359 break; 360 } 361 } 362 } 363 364 ++prwlock->blocked_writers; 365 366 if (timeout == NULL) 367 ret = pthread_cond_wait(&prwlock->write_signal, 368 &prwlock->lock); 369 else 370 ret = pthread_cond_timedwait(&prwlock->write_signal, 371 &prwlock->lock, timeout); 372 373 if (ret != 0 && ret != EINTR) { 374 --prwlock->blocked_writers; 375 pthread_mutex_unlock(&prwlock->lock); 376 return(ret); 377 } 378 379 --prwlock->blocked_writers; 380 } 381 382 /* indicate we are locked for writing */ 383 prwlock->state = -1; 384 ret = insert_rwlock(prwlock, RWT_WRITE); 385 if (ret != 0) { 386 pthread_mutex_unlock(&prwlock->lock); 387 return (ret); 388 } 389 390 /* see the comment on this in pthread_rwlock_rdlock */ 391 pthread_mutex_unlock(&prwlock->lock); 392 393 return(0); 394} 395 396static int 397insert_rwlock(struct pthread_rwlock *prwlock, enum rwlock_type rwt) 398{ 399 struct rwlock_held *rh; 400 401 /* 402 * Initialize the rwlock list in the thread. Although this function 403 * may be called for many read-write locks, the initialization 404 * of the the head happens only once during the lifetime of 405 * the thread. 406 */ 407 if (curthread->rwlockList == NULL) { 408 curthread->rwlockList = 409 (struct rwlock_listhead *)malloc(sizeof(struct rwlock_listhead)); 410 if (curthread->rwlockList == NULL) { 411 return (ENOMEM); 412 } 413 LIST_INIT(curthread->rwlockList); 414 } 415 416 LIST_FOREACH(rh, curthread->rwlockList, rh_link) { 417 if (rh->rh_rwlock == prwlock) { 418 if (rwt == RWT_READ) 419 rh->rh_rdcount++; 420 else if (rwt == RWT_WRITE) 421 rh->rh_wrcount++; 422 return (0); 423 } 424 } 425 426 /* 427 * This is the first time we're holding this lock, 428 * create a new entry. 429 */ 430 rh = (struct rwlock_held *)malloc(sizeof(struct rwlock_held)); 431 if (rh == NULL) 432 return (ENOMEM); 433 rh->rh_rwlock = prwlock; 434 rh->rh_rdcount = 0; 435 rh->rh_wrcount = 0; 436 if (rwt == RWT_READ) 437 rh->rh_rdcount = 1; 438 else if (rwt == RWT_WRITE) 439 rh->rh_wrcount = 1; 440 LIST_INSERT_HEAD(curthread->rwlockList, rh, rh_link); 441 return (0); 442} 443