thr_umtx.c revision 232209
1/* 2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD: head/lib/libthr/thread/thr_umtx.c 232209 2012-02-27 13:38:52Z davidxu $ 27 * 28 */ 29 30#include "thr_private.h" 31#include "thr_umtx.h" 32 33#ifndef HAS__UMTX_OP_ERR 34int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2) 35{ 36 if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1) 37 return (errno); 38 return (0); 39} 40#endif 41 42void 43_thr_umutex_init(struct umutex *mtx) 44{ 45 static struct umutex default_mtx = DEFAULT_UMUTEX; 46 47 *mtx = default_mtx; 48} 49 50void 51_thr_urwlock_init(struct urwlock *rwl) 52{ 53 static struct urwlock default_rwl = DEFAULT_URWLOCK; 54 *rwl = default_rwl; 55} 56 57int 58__thr_umutex_lock(struct umutex *mtx, uint32_t id) 59{ 60 uint32_t owner; 61 62 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 63 for (;;) { 64 /* wait in kernel */ 65 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 66 67 owner = mtx->m_owner; 68 if ((owner & ~UMUTEX_CONTESTED) == 0 && 69 atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) 70 return (0); 71 } 72 } 73 74 return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); 75} 76 77#define SPINLOOPS 1000 78 79int 80__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id) 81{ 82 uint32_t owner; 83 84 if (!_thr_is_smp) 85 return __thr_umutex_lock(mtx, id); 86 87 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 88 for (;;) { 89 int count = SPINLOOPS; 90 while (count--) { 91 owner = mtx->m_owner; 92 if ((owner & ~UMUTEX_CONTESTED) == 0) { 93 if (atomic_cmpset_acq_32( 94 &mtx->m_owner, 95 owner, id|owner)) { 96 return (0); 97 } 98 } 99 CPU_SPINWAIT; 100 } 101 102 /* wait in kernel */ 103 _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0); 104 } 105 } 106 107 return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0); 108} 109 110int 111__thr_umutex_timedlock(struct umutex *mtx, uint32_t id, 112 const struct timespec *abstime) 113{ 114 struct _umtx_time *tm_p, timeout; 115 size_t tm_size; 116 uint32_t owner; 117 int ret; 118 119 if (abstime == NULL) { 120 tm_p = NULL; 121 tm_size = 0; 122 } else { 123 timeout._clockid = CLOCK_REALTIME; 124 timeout._flags = UMTX_ABSTIME; 125 timeout._timeout = *abstime; 126 tm_p = &timeout; 127 tm_size = sizeof(timeout); 128 } 129 130 for (;;) { 131 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 132 133 /* wait in kernel */ 134 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 135 (void *)tm_size, __DECONST(void *, tm_p)); 136 137 /* now try to lock it */ 138 owner = mtx->m_owner; 139 if ((owner & ~UMUTEX_CONTESTED) == 0 && 140 atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner)) 141 return (0); 142 } else { 143 ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 144 (void *)tm_size, __DECONST(void *, tm_p)); 145 if (ret == 0) 146 break; 147 } 148 if (ret == ETIMEDOUT) 149 break; 150 } 151 return (ret); 152} 153 154int 155__thr_umutex_unlock(struct umutex *mtx, uint32_t id) 156{ 157#ifndef __ia64__ 158 /* XXX this logic has a race-condition on ia64. */ 159 if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) { 160 atomic_cmpset_rel_32(&mtx->m_owner, id | UMUTEX_CONTESTED, UMUTEX_CONTESTED); 161 return _umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE, 0, 0, 0); 162 } 163#endif /* __ia64__ */ 164 return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0); 165} 166 167int 168__thr_umutex_trylock(struct umutex *mtx) 169{ 170 return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0); 171} 172 173int 174__thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling, 175 uint32_t *oldceiling) 176{ 177 return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0); 178} 179 180int 181_thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout) 182{ 183 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 184 timeout->tv_nsec <= 0))) 185 return (ETIMEDOUT); 186 return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0, 187 __DECONST(void*, timeout)); 188} 189 190int 191_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared) 192{ 193 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 194 timeout->tv_nsec <= 0))) 195 return (ETIMEDOUT); 196 return _umtx_op_err(__DEVOLATILE(void *, mtx), 197 shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0, 198 __DECONST(void*, timeout)); 199} 200 201int 202_thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid, 203 const struct timespec *abstime, int shared) 204{ 205 struct _umtx_time *tm_p, timeout; 206 size_t tm_size; 207 208 if (abstime == NULL) { 209 tm_p = NULL; 210 tm_size = 0; 211 } else { 212 timeout._clockid = CLOCK_REALTIME; 213 timeout._flags = UMTX_ABSTIME; 214 timeout._timeout = *abstime; 215 tm_p = &timeout; 216 tm_size = sizeof(timeout); 217 } 218 219 return _umtx_op_err(__DEVOLATILE(void *, mtx), 220 shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 221 (void *)tm_size, __DECONST(void *, tm_p)); 222} 223 224int 225_thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared) 226{ 227 return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, 228 nr_wakeup, 0, 0); 229} 230 231void 232_thr_ucond_init(struct ucond *cv) 233{ 234 bzero(cv, sizeof(struct ucond)); 235} 236 237int 238_thr_ucond_wait(struct ucond *cv, struct umutex *m, 239 const struct timespec *timeout, int flags) 240{ 241 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 242 timeout->tv_nsec <= 0))) { 243 struct pthread *curthread = _get_curthread(); 244 _thr_umutex_unlock(m, TID(curthread)); 245 return (ETIMEDOUT); 246 } 247 return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, 248 m, __DECONST(void*, timeout)); 249} 250 251int 252_thr_ucond_signal(struct ucond *cv) 253{ 254 if (!cv->c_has_waiters) 255 return (0); 256 return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL); 257} 258 259int 260_thr_ucond_broadcast(struct ucond *cv) 261{ 262 if (!cv->c_has_waiters) 263 return (0); 264 return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL); 265} 266 267int 268__thr_rwlock_rdlock(struct urwlock *rwlock, int flags, 269 const struct timespec *tsp) 270{ 271 struct _umtx_time timeout, *tm_p; 272 size_t tm_size; 273 274 if (tsp == NULL) { 275 tm_p = NULL; 276 tm_size = 0; 277 } else { 278 timeout._timeout = *tsp; 279 timeout._flags = UMTX_ABSTIME; 280 timeout._clockid = CLOCK_REALTIME; 281 tm_p = &timeout; 282 tm_size = sizeof(timeout); 283 } 284 return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, (void *)tm_size, tm_p); 285} 286 287int 288__thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp) 289{ 290 struct _umtx_time timeout, *tm_p; 291 size_t tm_size; 292 293 if (tsp == NULL) { 294 tm_p = NULL; 295 tm_size = 0; 296 } else { 297 timeout._timeout = *tsp; 298 timeout._flags = UMTX_ABSTIME; 299 timeout._clockid = CLOCK_REALTIME; 300 tm_p = &timeout; 301 tm_size = sizeof(timeout); 302 } 303 return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, tm_p); 304} 305 306int 307__thr_rwlock_unlock(struct urwlock *rwlock) 308{ 309 return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL); 310} 311 312void 313_thr_rwl_rdlock(struct urwlock *rwlock) 314{ 315 int ret; 316 317 for (;;) { 318 if (_thr_rwlock_tryrdlock(rwlock, URWLOCK_PREFER_READER) == 0) 319 return; 320 ret = __thr_rwlock_rdlock(rwlock, URWLOCK_PREFER_READER, NULL); 321 if (ret == 0) 322 return; 323 if (ret != EINTR) 324 PANIC("rdlock error"); 325 } 326} 327 328void 329_thr_rwl_wrlock(struct urwlock *rwlock) 330{ 331 int ret; 332 333 for (;;) { 334 if (_thr_rwlock_trywrlock(rwlock) == 0) 335 return; 336 ret = __thr_rwlock_wrlock(rwlock, NULL); 337 if (ret == 0) 338 return; 339 if (ret != EINTR) 340 PANIC("wrlock error"); 341 } 342} 343 344void 345_thr_rwl_unlock(struct urwlock *rwlock) 346{ 347 if (_thr_rwlock_unlock(rwlock)) 348 PANIC("unlock error"); 349} 350