1/* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 *
| 1/* 2 * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 3 * Copyright (c) 2006 David Xu <davidxu@freebsd.org>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 *
|
33 * $FreeBSD: head/lib/libthr/thread/thr_mutex.c 212077 2010-09-01 03:11:21Z davidxu $
| 33 * $FreeBSD: head/lib/libthr/thread/thr_mutex.c 213241 2010-09-28 04:57:56Z davidxu $
|
34 */ 35 36#include "namespace.h" 37#include <stdlib.h> 38#include <errno.h> 39#include <string.h> 40#include <sys/param.h> 41#include <sys/queue.h> 42#include <pthread.h> 43#include <pthread_np.h> 44#include "un-namespace.h" 45 46#include "thr_private.h" 47 48#if defined(_PTHREADS_INVARIANTS) 49#define MUTEX_INIT_LINK(m) do { \ 50 (m)->m_qe.tqe_prev = NULL; \ 51 (m)->m_qe.tqe_next = NULL; \ 52} while (0) 53#define MUTEX_ASSERT_IS_OWNED(m) do { \ 54 if (__predict_false((m)->m_qe.tqe_prev == NULL))\ 55 PANIC("mutex is not on list"); \ 56} while (0) 57#define MUTEX_ASSERT_NOT_OWNED(m) do { \ 58 if (__predict_false((m)->m_qe.tqe_prev != NULL || \ 59 (m)->m_qe.tqe_next != NULL)) \ 60 PANIC("mutex is on list"); \ 61} while (0) 62#else 63#define MUTEX_INIT_LINK(m) 64#define MUTEX_ASSERT_IS_OWNED(m) 65#define MUTEX_ASSERT_NOT_OWNED(m) 66#endif 67 68/* 69 * For adaptive mutexes, how many times to spin doing trylock2 70 * before entering the kernel to block 71 */ 72#define MUTEX_ADAPTIVE_SPINS 2000 73 74/* 75 * Prototypes 76 */ 77int __pthread_mutex_init(pthread_mutex_t *mutex, 78 const pthread_mutexattr_t *mutex_attr); 79int __pthread_mutex_trylock(pthread_mutex_t *mutex); 80int __pthread_mutex_lock(pthread_mutex_t *mutex); 81int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 82 const struct timespec *abstime); 83int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 84 void *(calloc_cb)(size_t, size_t)); 85int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); 86int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 87int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 88int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 89int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); 90int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 91 92static int mutex_self_trylock(pthread_mutex_t); 93static int mutex_self_lock(pthread_mutex_t, 94 const struct timespec *abstime); 95static int mutex_unlock_common(pthread_mutex_t *); 96static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, 97 const struct timespec *); 98 99__weak_reference(__pthread_mutex_init, pthread_mutex_init); 100__strong_reference(__pthread_mutex_init, _pthread_mutex_init); 101__weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 102__strong_reference(__pthread_mutex_lock, _pthread_mutex_lock); 103__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 104__strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock); 105__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 106__strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock); 107 108/* Single underscore versions provided for libc internal usage: */ 109/* No difference between libc and application usage of these: */ 110__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 111__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 112 113__weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 114__weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 115 116__weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); 117__strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np); 118__weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); 119 120__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); 121__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np); 122__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); 123__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); 124 125static int 126mutex_init(pthread_mutex_t *mutex,
| 34 */ 35 36#include "namespace.h" 37#include <stdlib.h> 38#include <errno.h> 39#include <string.h> 40#include <sys/param.h> 41#include <sys/queue.h> 42#include <pthread.h> 43#include <pthread_np.h> 44#include "un-namespace.h" 45 46#include "thr_private.h" 47 48#if defined(_PTHREADS_INVARIANTS) 49#define MUTEX_INIT_LINK(m) do { \ 50 (m)->m_qe.tqe_prev = NULL; \ 51 (m)->m_qe.tqe_next = NULL; \ 52} while (0) 53#define MUTEX_ASSERT_IS_OWNED(m) do { \ 54 if (__predict_false((m)->m_qe.tqe_prev == NULL))\ 55 PANIC("mutex is not on list"); \ 56} while (0) 57#define MUTEX_ASSERT_NOT_OWNED(m) do { \ 58 if (__predict_false((m)->m_qe.tqe_prev != NULL || \ 59 (m)->m_qe.tqe_next != NULL)) \ 60 PANIC("mutex is on list"); \ 61} while (0) 62#else 63#define MUTEX_INIT_LINK(m) 64#define MUTEX_ASSERT_IS_OWNED(m) 65#define MUTEX_ASSERT_NOT_OWNED(m) 66#endif 67 68/* 69 * For adaptive mutexes, how many times to spin doing trylock2 70 * before entering the kernel to block 71 */ 72#define MUTEX_ADAPTIVE_SPINS 2000 73 74/* 75 * Prototypes 76 */ 77int __pthread_mutex_init(pthread_mutex_t *mutex, 78 const pthread_mutexattr_t *mutex_attr); 79int __pthread_mutex_trylock(pthread_mutex_t *mutex); 80int __pthread_mutex_lock(pthread_mutex_t *mutex); 81int __pthread_mutex_timedlock(pthread_mutex_t *mutex, 82 const struct timespec *abstime); 83int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 84 void *(calloc_cb)(size_t, size_t)); 85int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count); 86int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 87int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count); 88int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 89int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count); 90int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count); 91 92static int mutex_self_trylock(pthread_mutex_t); 93static int mutex_self_lock(pthread_mutex_t, 94 const struct timespec *abstime); 95static int mutex_unlock_common(pthread_mutex_t *); 96static int mutex_lock_sleep(struct pthread *, pthread_mutex_t, 97 const struct timespec *); 98 99__weak_reference(__pthread_mutex_init, pthread_mutex_init); 100__strong_reference(__pthread_mutex_init, _pthread_mutex_init); 101__weak_reference(__pthread_mutex_lock, pthread_mutex_lock); 102__strong_reference(__pthread_mutex_lock, _pthread_mutex_lock); 103__weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock); 104__strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock); 105__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock); 106__strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock); 107 108/* Single underscore versions provided for libc internal usage: */ 109/* No difference between libc and application usage of these: */ 110__weak_reference(_pthread_mutex_destroy, pthread_mutex_destroy); 111__weak_reference(_pthread_mutex_unlock, pthread_mutex_unlock); 112 113__weak_reference(_pthread_mutex_getprioceiling, pthread_mutex_getprioceiling); 114__weak_reference(_pthread_mutex_setprioceiling, pthread_mutex_setprioceiling); 115 116__weak_reference(__pthread_mutex_setspinloops_np, pthread_mutex_setspinloops_np); 117__strong_reference(__pthread_mutex_setspinloops_np, _pthread_mutex_setspinloops_np); 118__weak_reference(_pthread_mutex_getspinloops_np, pthread_mutex_getspinloops_np); 119 120__weak_reference(__pthread_mutex_setyieldloops_np, pthread_mutex_setyieldloops_np); 121__strong_reference(__pthread_mutex_setyieldloops_np, _pthread_mutex_setyieldloops_np); 122__weak_reference(_pthread_mutex_getyieldloops_np, pthread_mutex_getyieldloops_np); 123__weak_reference(_pthread_mutex_isowned_np, pthread_mutex_isowned_np); 124 125static int 126mutex_init(pthread_mutex_t *mutex,
|
127 const pthread_mutexattr_t *mutex_attr,
| 127 const struct pthread_mutex_attr *mutex_attr,
|
128 void *(calloc_cb)(size_t, size_t)) 129{ 130 const struct pthread_mutex_attr *attr; 131 struct pthread_mutex *pmutex; 132 133 if (mutex_attr == NULL) { 134 attr = &_pthread_mutexattr_default; 135 } else {
| 128 void *(calloc_cb)(size_t, size_t)) 129{ 130 const struct pthread_mutex_attr *attr; 131 struct pthread_mutex *pmutex; 132 133 if (mutex_attr == NULL) { 134 attr = &_pthread_mutexattr_default; 135 } else {
|
136 attr = *mutex_attr;
| 136 attr = mutex_attr;
|
137 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 138 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 139 return (EINVAL); 140 if (attr->m_protocol < PTHREAD_PRIO_NONE || 141 attr->m_protocol > PTHREAD_PRIO_PROTECT) 142 return (EINVAL); 143 } 144 if ((pmutex = (pthread_mutex_t) 145 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) 146 return (ENOMEM); 147 148 pmutex->m_type = attr->m_type; 149 pmutex->m_owner = NULL; 150 pmutex->m_count = 0; 151 pmutex->m_refcount = 0; 152 pmutex->m_spinloops = 0; 153 pmutex->m_yieldloops = 0; 154 MUTEX_INIT_LINK(pmutex); 155 switch(attr->m_protocol) {
| 137 if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK || 138 attr->m_type >= PTHREAD_MUTEX_TYPE_MAX) 139 return (EINVAL); 140 if (attr->m_protocol < PTHREAD_PRIO_NONE || 141 attr->m_protocol > PTHREAD_PRIO_PROTECT) 142 return (EINVAL); 143 } 144 if ((pmutex = (pthread_mutex_t) 145 calloc_cb(1, sizeof(struct pthread_mutex))) == NULL) 146 return (ENOMEM); 147 148 pmutex->m_type = attr->m_type; 149 pmutex->m_owner = NULL; 150 pmutex->m_count = 0; 151 pmutex->m_refcount = 0; 152 pmutex->m_spinloops = 0; 153 pmutex->m_yieldloops = 0; 154 MUTEX_INIT_LINK(pmutex); 155 switch(attr->m_protocol) {
|
| 156 case PTHREAD_PRIO_NONE: 157 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 158 pmutex->m_lock.m_flags = 0; 159 break;
|
156 case PTHREAD_PRIO_INHERIT: 157 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 158 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 159 break; 160 case PTHREAD_PRIO_PROTECT: 161 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 162 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 163 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 164 break;
| 160 case PTHREAD_PRIO_INHERIT: 161 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 162 pmutex->m_lock.m_flags = UMUTEX_PRIO_INHERIT; 163 break; 164 case PTHREAD_PRIO_PROTECT: 165 pmutex->m_lock.m_owner = UMUTEX_CONTESTED; 166 pmutex->m_lock.m_flags = UMUTEX_PRIO_PROTECT; 167 pmutex->m_lock.m_ceilings[0] = attr->m_ceiling; 168 break;
|
165 case PTHREAD_PRIO_NONE: 166 pmutex->m_lock.m_owner = UMUTEX_UNOWNED; 167 pmutex->m_lock.m_flags = 0;
| |
168 } 169 170 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { 171 pmutex->m_spinloops = 172 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; 173 pmutex->m_yieldloops = _thr_yieldloops; 174 } 175 176 *mutex = pmutex; 177 return (0); 178} 179 180static int 181init_static(struct pthread *thread, pthread_mutex_t *mutex) 182{ 183 int ret; 184 185 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 186
| 169 } 170 171 if (pmutex->m_type == PTHREAD_MUTEX_ADAPTIVE_NP) { 172 pmutex->m_spinloops = 173 _thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS; 174 pmutex->m_yieldloops = _thr_yieldloops; 175 } 176 177 *mutex = pmutex; 178 return (0); 179} 180 181static int 182init_static(struct pthread *thread, pthread_mutex_t *mutex) 183{ 184 int ret; 185 186 THR_LOCK_ACQUIRE(thread, &_mutex_static_lock); 187
|
187 if (*mutex == NULL) 188 ret = mutex_init(mutex, NULL, calloc);
| 188 if (*mutex == THR_MUTEX_INITIALIZER) 189 ret = mutex_init(mutex, &_pthread_mutexattr_default, calloc); 190 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER) 191 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default, calloc);
|
189 else 190 ret = 0;
| 192 else 193 ret = 0;
|
191
| |
192 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 193 194 return (ret); 195} 196 197static void 198set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 199{ 200 struct pthread_mutex *m2; 201 202 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 203 if (m2 != NULL) 204 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 205 else 206 m->m_lock.m_ceilings[1] = -1; 207} 208 209int 210__pthread_mutex_init(pthread_mutex_t *mutex, 211 const pthread_mutexattr_t *mutex_attr) 212{
| 194 THR_LOCK_RELEASE(thread, &_mutex_static_lock); 195 196 return (ret); 197} 198 199static void 200set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m) 201{ 202 struct pthread_mutex *m2; 203 204 m2 = TAILQ_LAST(&curthread->pp_mutexq, mutex_queue); 205 if (m2 != NULL) 206 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0]; 207 else 208 m->m_lock.m_ceilings[1] = -1; 209} 210 211int 212__pthread_mutex_init(pthread_mutex_t *mutex, 213 const pthread_mutexattr_t *mutex_attr) 214{
|
213 return mutex_init(mutex, mutex_attr, calloc);
| 215 return mutex_init(mutex, mutex_attr ? *mutex_attr : NULL, calloc);
|
214} 215 216/* This function is used internally by malloc. */ 217int 218_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 219 void *(calloc_cb)(size_t, size_t)) 220{ 221 static const struct pthread_mutex_attr attr = { 222 .m_type = PTHREAD_MUTEX_NORMAL, 223 .m_protocol = PTHREAD_PRIO_NONE, 224 .m_ceiling = 0 225 };
| 216} 217 218/* This function is used internally by malloc. */ 219int 220_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, 221 void *(calloc_cb)(size_t, size_t)) 222{ 223 static const struct pthread_mutex_attr attr = { 224 .m_type = PTHREAD_MUTEX_NORMAL, 225 .m_protocol = PTHREAD_PRIO_NONE, 226 .m_ceiling = 0 227 };
|
226 static const struct pthread_mutex_attr *pattr = &attr;
| |
227 int ret; 228
| 228 int ret; 229
|
229 ret = mutex_init(mutex, (pthread_mutexattr_t *)&pattr, calloc_cb);
| 230 ret = mutex_init(mutex, &attr, calloc_cb);
|
230 if (ret == 0) 231 (*mutex)->m_private = 1; 232 return (ret); 233} 234 235void 236_mutex_fork(struct pthread *curthread) 237{ 238 struct pthread_mutex *m; 239 240 /* 241 * Fix mutex ownership for child process. 242 * note that process shared mutex should not 243 * be inherited because owner is forking thread 244 * which is in parent process, they should be 245 * removed from the owned mutex list, current, 246 * process shared mutex is not supported, so I 247 * am not worried. 248 */ 249 250 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 251 m->m_lock.m_owner = TID(curthread); 252 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 253 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 254} 255 256int 257_pthread_mutex_destroy(pthread_mutex_t *mutex) 258{ 259 struct pthread *curthread = _get_curthread(); 260 pthread_mutex_t m; 261 uint32_t id; 262 int ret = 0; 263
| 231 if (ret == 0) 232 (*mutex)->m_private = 1; 233 return (ret); 234} 235 236void 237_mutex_fork(struct pthread *curthread) 238{ 239 struct pthread_mutex *m; 240 241 /* 242 * Fix mutex ownership for child process. 243 * note that process shared mutex should not 244 * be inherited because owner is forking thread 245 * which is in parent process, they should be 246 * removed from the owned mutex list, current, 247 * process shared mutex is not supported, so I 248 * am not worried. 249 */ 250 251 TAILQ_FOREACH(m, &curthread->mutexq, m_qe) 252 m->m_lock.m_owner = TID(curthread); 253 TAILQ_FOREACH(m, &curthread->pp_mutexq, m_qe) 254 m->m_lock.m_owner = TID(curthread) | UMUTEX_CONTESTED; 255} 256 257int 258_pthread_mutex_destroy(pthread_mutex_t *mutex) 259{ 260 struct pthread *curthread = _get_curthread(); 261 pthread_mutex_t m; 262 uint32_t id; 263 int ret = 0; 264
|
264 if (__predict_false(*mutex == NULL))
| 265 m = *mutex; 266 if (m < THR_MUTEX_DESTROYED) { 267 ret = 0; 268 } else if (m == THR_MUTEX_DESTROYED) {
|
265 ret = EINVAL;
| 269 ret = EINVAL;
|
266 else {
| 270 } else {
|
267 id = TID(curthread); 268 269 /* 270 * Try to lock the mutex structure, we only need to 271 * try once, if failed, the mutex is in used. 272 */
| 271 id = TID(curthread); 272 273 /* 274 * Try to lock the mutex structure, we only need to 275 * try once, if failed, the mutex is in used. 276 */
|
273 ret = _thr_umutex_trylock(&(*mutex)->m_lock, id);
| 277 ret = _thr_umutex_trylock(&m->m_lock, id);
|
274 if (ret) 275 return (ret);
| 278 if (ret) 279 return (ret);
|
276 m = *mutex;
| |
277 /* 278 * Check mutex other fields to see if this mutex is 279 * in use. Mostly for prority mutex types, or there 280 * are condition variables referencing it. 281 */ 282 if (m->m_owner != NULL || m->m_refcount != 0) { 283 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 284 set_inherited_priority(curthread, m); 285 _thr_umutex_unlock(&m->m_lock, id); 286 ret = EBUSY; 287 } else {
| 280 /* 281 * Check mutex other fields to see if this mutex is 282 * in use. Mostly for prority mutex types, or there 283 * are condition variables referencing it. 284 */ 285 if (m->m_owner != NULL || m->m_refcount != 0) { 286 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 287 set_inherited_priority(curthread, m); 288 _thr_umutex_unlock(&m->m_lock, id); 289 ret = EBUSY; 290 } else {
|
288 /* 289 * Save a pointer to the mutex so it can be free'd 290 * and set the caller's pointer to NULL. 291 */ 292 *mutex = NULL;
| 291 *mutex = THR_MUTEX_DESTROYED;
|
293 294 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 295 set_inherited_priority(curthread, m); 296 _thr_umutex_unlock(&m->m_lock, id); 297 298 MUTEX_ASSERT_NOT_OWNED(m); 299 free(m); 300 } 301 } 302 303 return (ret); 304} 305 306#define ENQUEUE_MUTEX(curthread, m) \ 307 do { \ 308 (m)->m_owner = curthread; \ 309 /* Add to the list of owned mutexes: */ \ 310 MUTEX_ASSERT_NOT_OWNED((m)); \ 311 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 312 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ 313 else \ 314 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ 315 } while (0) 316
| 292 293 if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) 294 set_inherited_priority(curthread, m); 295 _thr_umutex_unlock(&m->m_lock, id); 296 297 MUTEX_ASSERT_NOT_OWNED(m); 298 free(m); 299 } 300 } 301 302 return (ret); 303} 304 305#define ENQUEUE_MUTEX(curthread, m) \ 306 do { \ 307 (m)->m_owner = curthread; \ 308 /* Add to the list of owned mutexes: */ \ 309 MUTEX_ASSERT_NOT_OWNED((m)); \ 310 if (((m)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) \ 311 TAILQ_INSERT_TAIL(&curthread->mutexq, (m), m_qe);\ 312 else \ 313 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, (m), m_qe);\ 314 } while (0) 315
|
| 316#define CHECK_AND_INIT_MUTEX \ 317 if (__predict_false((m = *mutex) <= THR_MUTEX_DESTROYED)) { \ 318 if (m == THR_MUTEX_DESTROYED) \ 319 return (EINVAL); \ 320 int ret; \ 321 ret = init_static(_get_curthread(), mutex); \ 322 if (ret) \ 323 return (ret); \ 324 m = *mutex; \ 325 } 326
|
317static int
| 327static int
|
318mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
| 328mutex_trylock_common(pthread_mutex_t *mutex)
|
319{
| 329{
|
320 struct pthread_mutex *m;
| 330 struct pthread *curthread = _get_curthread(); 331 struct pthread_mutex *m = *mutex;
|
321 uint32_t id; 322 int ret; 323 324 id = TID(curthread);
| 332 uint32_t id; 333 int ret; 334 335 id = TID(curthread);
|
325 m = *mutex;
| |
326 if (m->m_private) 327 THR_CRITICAL_ENTER(curthread); 328 ret = _thr_umutex_trylock(&m->m_lock, id);
| 336 if (m->m_private) 337 THR_CRITICAL_ENTER(curthread); 338 ret = _thr_umutex_trylock(&m->m_lock, id);
|
329 if (ret == 0) {
| 339 if (__predict_true(ret == 0)) {
|
330 ENQUEUE_MUTEX(curthread, m); 331 } else if (m->m_owner == curthread) { 332 ret = mutex_self_trylock(m); 333 } /* else {} */ 334 if (ret && m->m_private) 335 THR_CRITICAL_LEAVE(curthread); 336 return (ret); 337} 338 339int 340__pthread_mutex_trylock(pthread_mutex_t *mutex) 341{
| 340 ENQUEUE_MUTEX(curthread, m); 341 } else if (m->m_owner == curthread) { 342 ret = mutex_self_trylock(m); 343 } /* else {} */ 344 if (ret && m->m_private) 345 THR_CRITICAL_LEAVE(curthread); 346 return (ret); 347} 348 349int 350__pthread_mutex_trylock(pthread_mutex_t *mutex) 351{
|
342 struct pthread *curthread = _get_curthread(); 343 int ret;
| 352 struct pthread_mutex *m;
|
344
| 353
|
345 /* 346 * If the mutex is statically initialized, perform the dynamic 347 * initialization: 348 */ 349 if (__predict_false(*mutex == NULL)) { 350 ret = init_static(curthread, mutex); 351 if (__predict_false(ret)) 352 return (ret); 353 } 354 return (mutex_trylock_common(curthread, mutex));
| 354 CHECK_AND_INIT_MUTEX 355 356 return (mutex_trylock_common(mutex));
|
355} 356 357static int 358mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, 359 const struct timespec *abstime) 360{ 361 uint32_t id, owner; 362 int count; 363 int ret; 364 365 if (m->m_owner == curthread) 366 return mutex_self_lock(m, abstime); 367 368 id = TID(curthread); 369 /* 370 * For adaptive mutexes, spin for a bit in the expectation 371 * that if the application requests this mutex type then 372 * the lock is likely to be released quickly and it is 373 * faster than entering the kernel 374 */
| 357} 358 359static int 360mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m, 361 const struct timespec *abstime) 362{ 363 uint32_t id, owner; 364 int count; 365 int ret; 366 367 if (m->m_owner == curthread) 368 return mutex_self_lock(m, abstime); 369 370 id = TID(curthread); 371 /* 372 * For adaptive mutexes, spin for a bit in the expectation 373 * that if the application requests this mutex type then 374 * the lock is likely to be released quickly and it is 375 * faster than entering the kernel 376 */
|
375 if (m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) 376 goto sleep_in_kernel;
| 377 if (__predict_false( 378 (m->m_lock.m_flags & 379 (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)) 380 goto sleep_in_kernel;
|
377 378 if (!_thr_is_smp) 379 goto yield_loop; 380 381 count = m->m_spinloops; 382 while (count--) { 383 owner = m->m_lock.m_owner; 384 if ((owner & ~UMUTEX_CONTESTED) == 0) { 385 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { 386 ret = 0; 387 goto done; 388 } 389 } 390 CPU_SPINWAIT; 391 } 392 393yield_loop: 394 count = m->m_yieldloops; 395 while (count--) { 396 _sched_yield(); 397 owner = m->m_lock.m_owner; 398 if ((owner & ~UMUTEX_CONTESTED) == 0) { 399 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { 400 ret = 0; 401 goto done; 402 } 403 } 404 } 405 406sleep_in_kernel: 407 if (abstime == NULL) { 408 ret = __thr_umutex_lock(&m->m_lock, id); 409 } else if (__predict_false( 410 abstime->tv_nsec < 0 || 411 abstime->tv_nsec >= 1000000000)) { 412 ret = EINVAL; 413 } else { 414 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); 415 } 416done: 417 if (ret == 0) 418 ENQUEUE_MUTEX(curthread, m); 419 420 return (ret); 421} 422 423static inline int
| 381 382 if (!_thr_is_smp) 383 goto yield_loop; 384 385 count = m->m_spinloops; 386 while (count--) { 387 owner = m->m_lock.m_owner; 388 if ((owner & ~UMUTEX_CONTESTED) == 0) { 389 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { 390 ret = 0; 391 goto done; 392 } 393 } 394 CPU_SPINWAIT; 395 } 396 397yield_loop: 398 count = m->m_yieldloops; 399 while (count--) { 400 _sched_yield(); 401 owner = m->m_lock.m_owner; 402 if ((owner & ~UMUTEX_CONTESTED) == 0) { 403 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) { 404 ret = 0; 405 goto done; 406 } 407 } 408 } 409 410sleep_in_kernel: 411 if (abstime == NULL) { 412 ret = __thr_umutex_lock(&m->m_lock, id); 413 } else if (__predict_false( 414 abstime->tv_nsec < 0 || 415 abstime->tv_nsec >= 1000000000)) { 416 ret = EINVAL; 417 } else { 418 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime); 419 } 420done: 421 if (ret == 0) 422 ENQUEUE_MUTEX(curthread, m); 423 424 return (ret); 425} 426 427static inline int
|
424mutex_lock_common(struct pthread *curthread, struct pthread_mutex *m,
| 428mutex_lock_common(struct pthread_mutex *m,
|
425 const struct timespec *abstime) 426{
| 429 const struct timespec *abstime) 430{
|
| 431 struct pthread *curthread = _get_curthread();
|
427 int ret; 428 429 if (m->m_private) 430 THR_CRITICAL_ENTER(curthread); 431 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { 432 ENQUEUE_MUTEX(curthread, m); 433 ret = 0; 434 } else { 435 ret = mutex_lock_sleep(curthread, m, abstime); 436 } 437 if (ret && m->m_private) 438 THR_CRITICAL_LEAVE(curthread); 439 return (ret); 440} 441 442int 443__pthread_mutex_lock(pthread_mutex_t *mutex) 444{
| 432 int ret; 433 434 if (m->m_private) 435 THR_CRITICAL_ENTER(curthread); 436 if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) { 437 ENQUEUE_MUTEX(curthread, m); 438 ret = 0; 439 } else { 440 ret = mutex_lock_sleep(curthread, m, abstime); 441 } 442 if (ret && m->m_private) 443 THR_CRITICAL_LEAVE(curthread); 444 return (ret); 445} 446 447int 448__pthread_mutex_lock(pthread_mutex_t *mutex) 449{
|
445 struct pthread *curthread; 446 struct pthread_mutex *m; 447 int ret;
| 450 struct pthread_mutex *m;
|
448 449 _thr_check_init(); 450
| 451 452 _thr_check_init(); 453
|
451 curthread = _get_curthread();
| 454 CHECK_AND_INIT_MUTEX
|
452
| 455
|
453 /* 454 * If the mutex is statically initialized, perform the dynamic 455 * initialization: 456 */ 457 if (__predict_false((m = *mutex) == NULL)) { 458 ret = init_static(curthread, mutex); 459 if (__predict_false(ret)) 460 return (ret); 461 m = *mutex; 462 } 463 464 return (mutex_lock_common(curthread, m, NULL));
| 456 return (mutex_lock_common(m, NULL));
|
465} 466 467int 468__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) 469{
| 457} 458 459int 460__pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abstime) 461{
|
470 struct pthread *curthread; 471 struct pthread_mutex *m; 472 int ret;
| 462 struct pthread_mutex *m;
|
473 474 _thr_check_init(); 475
| 463 464 _thr_check_init(); 465
|
476 curthread = _get_curthread();
| 466 CHECK_AND_INIT_MUTEX
|
477
| 467
|
478 /* 479 * If the mutex is statically initialized, perform the dynamic 480 * initialization: 481 */ 482 if (__predict_false((m = *mutex) == NULL)) { 483 ret = init_static(curthread, mutex); 484 if (__predict_false(ret)) 485 return (ret); 486 m = *mutex; 487 } 488 return (mutex_lock_common(curthread, m, abstime));
| 468 return (mutex_lock_common(m, abstime));
|
489} 490 491int 492_pthread_mutex_unlock(pthread_mutex_t *m) 493{ 494 return (mutex_unlock_common(m)); 495} 496 497int
| 469} 470 471int 472_pthread_mutex_unlock(pthread_mutex_t *m) 473{ 474 return (mutex_unlock_common(m)); 475} 476 477int
|
498_mutex_cv_lock(pthread_mutex_t *m, int count)
| 478_mutex_cv_lock(pthread_mutex_t *mutex, int count)
|
499{
| 479{
|
| 480 struct pthread_mutex *m;
|
500 int ret; 501
| 481 int ret; 482
|
502 ret = mutex_lock_common(_get_curthread(), *m, NULL);
| 483 m = *mutex; 484 ret = mutex_lock_common(m, NULL);
|
503 if (ret == 0) {
| 485 if (ret == 0) {
|
504 (*m)->m_refcount--; 505 (*m)->m_count += count;
| 486 m->m_refcount--; 487 m->m_count += count;
|
506 } 507 return (ret); 508} 509 510static int
| 488 } 489 return (ret); 490} 491 492static int
|
511mutex_self_trylock(pthread_mutex_t m)
| 493mutex_self_trylock(struct pthread_mutex *m)
|
512{ 513 int ret; 514 515 switch (m->m_type) { 516 case PTHREAD_MUTEX_ERRORCHECK: 517 case PTHREAD_MUTEX_NORMAL: 518 ret = EBUSY; 519 break; 520 521 case PTHREAD_MUTEX_RECURSIVE: 522 /* Increment the lock count: */ 523 if (m->m_count + 1 > 0) { 524 m->m_count++; 525 ret = 0; 526 } else 527 ret = EAGAIN; 528 break; 529 530 default: 531 /* Trap invalid mutex types; */ 532 ret = EINVAL; 533 } 534 535 return (ret); 536} 537 538static int
| 494{ 495 int ret; 496 497 switch (m->m_type) { 498 case PTHREAD_MUTEX_ERRORCHECK: 499 case PTHREAD_MUTEX_NORMAL: 500 ret = EBUSY; 501 break; 502 503 case PTHREAD_MUTEX_RECURSIVE: 504 /* Increment the lock count: */ 505 if (m->m_count + 1 > 0) { 506 m->m_count++; 507 ret = 0; 508 } else 509 ret = EAGAIN; 510 break; 511 512 default: 513 /* Trap invalid mutex types; */ 514 ret = EINVAL; 515 } 516 517 return (ret); 518} 519 520static int
|
539mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
| 521mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
|
540{ 541 struct timespec ts1, ts2; 542 int ret; 543 544 switch (m->m_type) { 545 case PTHREAD_MUTEX_ERRORCHECK: 546 case PTHREAD_MUTEX_ADAPTIVE_NP: 547 if (abstime) { 548 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 549 abstime->tv_nsec >= 1000000000) { 550 ret = EINVAL; 551 } else { 552 clock_gettime(CLOCK_REALTIME, &ts1); 553 TIMESPEC_SUB(&ts2, abstime, &ts1); 554 __sys_nanosleep(&ts2, NULL); 555 ret = ETIMEDOUT; 556 } 557 } else { 558 /* 559 * POSIX specifies that mutexes should return 560 * EDEADLK if a recursive lock is detected. 561 */ 562 ret = EDEADLK; 563 } 564 break; 565 566 case PTHREAD_MUTEX_NORMAL: 567 /* 568 * What SS2 define as a 'normal' mutex. Intentionally 569 * deadlock on attempts to get a lock you already own. 570 */ 571 ret = 0; 572 if (abstime) { 573 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 574 abstime->tv_nsec >= 1000000000) { 575 ret = EINVAL; 576 } else { 577 clock_gettime(CLOCK_REALTIME, &ts1); 578 TIMESPEC_SUB(&ts2, abstime, &ts1); 579 __sys_nanosleep(&ts2, NULL); 580 ret = ETIMEDOUT; 581 } 582 } else { 583 ts1.tv_sec = 30; 584 ts1.tv_nsec = 0; 585 for (;;) 586 __sys_nanosleep(&ts1, NULL); 587 } 588 break; 589 590 case PTHREAD_MUTEX_RECURSIVE: 591 /* Increment the lock count: */ 592 if (m->m_count + 1 > 0) { 593 m->m_count++; 594 ret = 0; 595 } else 596 ret = EAGAIN; 597 break; 598 599 default: 600 /* Trap invalid mutex types; */ 601 ret = EINVAL; 602 } 603 604 return (ret); 605} 606 607static int 608mutex_unlock_common(pthread_mutex_t *mutex) 609{ 610 struct pthread *curthread = _get_curthread(); 611 struct pthread_mutex *m; 612 uint32_t id; 613
| 522{ 523 struct timespec ts1, ts2; 524 int ret; 525 526 switch (m->m_type) { 527 case PTHREAD_MUTEX_ERRORCHECK: 528 case PTHREAD_MUTEX_ADAPTIVE_NP: 529 if (abstime) { 530 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 531 abstime->tv_nsec >= 1000000000) { 532 ret = EINVAL; 533 } else { 534 clock_gettime(CLOCK_REALTIME, &ts1); 535 TIMESPEC_SUB(&ts2, abstime, &ts1); 536 __sys_nanosleep(&ts2, NULL); 537 ret = ETIMEDOUT; 538 } 539 } else { 540 /* 541 * POSIX specifies that mutexes should return 542 * EDEADLK if a recursive lock is detected. 543 */ 544 ret = EDEADLK; 545 } 546 break; 547 548 case PTHREAD_MUTEX_NORMAL: 549 /* 550 * What SS2 define as a 'normal' mutex. Intentionally 551 * deadlock on attempts to get a lock you already own. 552 */ 553 ret = 0; 554 if (abstime) { 555 if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 || 556 abstime->tv_nsec >= 1000000000) { 557 ret = EINVAL; 558 } else { 559 clock_gettime(CLOCK_REALTIME, &ts1); 560 TIMESPEC_SUB(&ts2, abstime, &ts1); 561 __sys_nanosleep(&ts2, NULL); 562 ret = ETIMEDOUT; 563 } 564 } else { 565 ts1.tv_sec = 30; 566 ts1.tv_nsec = 0; 567 for (;;) 568 __sys_nanosleep(&ts1, NULL); 569 } 570 break; 571 572 case PTHREAD_MUTEX_RECURSIVE: 573 /* Increment the lock count: */ 574 if (m->m_count + 1 > 0) { 575 m->m_count++; 576 ret = 0; 577 } else 578 ret = EAGAIN; 579 break; 580 581 default: 582 /* Trap invalid mutex types; */ 583 ret = EINVAL; 584 } 585 586 return (ret); 587} 588 589static int 590mutex_unlock_common(pthread_mutex_t *mutex) 591{ 592 struct pthread *curthread = _get_curthread(); 593 struct pthread_mutex *m; 594 uint32_t id; 595
|
614 if (__predict_false((m = *mutex) == NULL)) 615 return (EINVAL);
| 596 m = *mutex; 597 if (__predict_false(m <= THR_MUTEX_DESTROYED)) { 598 if (m == THR_MUTEX_DESTROYED) 599 return (EINVAL); 600 return (EPERM); 601 }
|
616 617 /* 618 * Check if the running thread is not the owner of the mutex. 619 */ 620 if (__predict_false(m->m_owner != curthread)) 621 return (EPERM); 622 623 id = TID(curthread); 624 if (__predict_false( 625 m->m_type == PTHREAD_MUTEX_RECURSIVE && 626 m->m_count > 0)) { 627 m->m_count--; 628 } else { 629 m->m_owner = NULL; 630 /* Remove the mutex from the threads queue. */ 631 MUTEX_ASSERT_IS_OWNED(m);
| 602 603 /* 604 * Check if the running thread is not the owner of the mutex. 605 */ 606 if (__predict_false(m->m_owner != curthread)) 607 return (EPERM); 608 609 id = TID(curthread); 610 if (__predict_false( 611 m->m_type == PTHREAD_MUTEX_RECURSIVE && 612 m->m_count > 0)) { 613 m->m_count--; 614 } else { 615 m->m_owner = NULL; 616 /* Remove the mutex from the threads queue. */ 617 MUTEX_ASSERT_IS_OWNED(m);
|
632 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
| 618 if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))
|
633 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 634 else { 635 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 636 set_inherited_priority(curthread, m); 637 } 638 MUTEX_INIT_LINK(m); 639 _thr_umutex_unlock(&m->m_lock, id); 640 } 641 if (m->m_private) 642 THR_CRITICAL_LEAVE(curthread); 643 return (0); 644} 645 646int 647_mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 648{ 649 struct pthread *curthread = _get_curthread(); 650 struct pthread_mutex *m; 651
| 619 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 620 else { 621 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 622 set_inherited_priority(curthread, m); 623 } 624 MUTEX_INIT_LINK(m); 625 _thr_umutex_unlock(&m->m_lock, id); 626 } 627 if (m->m_private) 628 THR_CRITICAL_LEAVE(curthread); 629 return (0); 630} 631 632int 633_mutex_cv_unlock(pthread_mutex_t *mutex, int *count) 634{ 635 struct pthread *curthread = _get_curthread(); 636 struct pthread_mutex *m; 637
|
652 if (__predict_false((m = *mutex) == NULL)) 653 return (EINVAL); 654
| 638 m = *mutex;
|
655 /* 656 * Check if the running thread is not the owner of the mutex. 657 */ 658 if (__predict_false(m->m_owner != curthread)) 659 return (EPERM); 660 661 /* 662 * Clear the count in case this is a recursive mutex. 663 */ 664 *count = m->m_count; 665 m->m_refcount++; 666 m->m_count = 0; 667 m->m_owner = NULL; 668 /* Remove the mutex from the threads queue. */ 669 MUTEX_ASSERT_IS_OWNED(m);
| 639 /* 640 * Check if the running thread is not the owner of the mutex. 641 */ 642 if (__predict_false(m->m_owner != curthread)) 643 return (EPERM); 644 645 /* 646 * Clear the count in case this is a recursive mutex. 647 */ 648 *count = m->m_count; 649 m->m_refcount++; 650 m->m_count = 0; 651 m->m_owner = NULL; 652 /* Remove the mutex from the threads queue. */ 653 MUTEX_ASSERT_IS_OWNED(m);
|
670 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
| 654 if (__predict_true((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0))
|
671 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 672 else { 673 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 674 set_inherited_priority(curthread, m); 675 } 676 MUTEX_INIT_LINK(m); 677 _thr_umutex_unlock(&m->m_lock, TID(curthread)); 678 679 if (m->m_private) 680 THR_CRITICAL_LEAVE(curthread); 681 return (0); 682} 683 684int 685_pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 686 int *prioceiling) 687{
| 655 TAILQ_REMOVE(&curthread->mutexq, m, m_qe); 656 else { 657 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 658 set_inherited_priority(curthread, m); 659 } 660 MUTEX_INIT_LINK(m); 661 _thr_umutex_unlock(&m->m_lock, TID(curthread)); 662 663 if (m->m_private) 664 THR_CRITICAL_LEAVE(curthread); 665 return (0); 666} 667 668int 669_pthread_mutex_getprioceiling(pthread_mutex_t *mutex, 670 int *prioceiling) 671{
|
| 672 struct pthread_mutex *m;
|
688 int ret; 689
| 673 int ret; 674
|
690 if (*mutex == NULL)
| 675 m = *mutex; 676 if ((m <= THR_MUTEX_DESTROYED) || 677 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
|
691 ret = EINVAL;
| 678 ret = EINVAL;
|
692 else if (((*mutex)->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0) 693 ret = EINVAL;
| |
694 else {
| 679 else {
|
695 *prioceiling = (*mutex)->m_lock.m_ceilings[0];
| 680 *prioceiling = m->m_lock.m_ceilings[0];
|
696 ret = 0; 697 } 698
| 681 ret = 0; 682 } 683
|
699 return(ret);
| 684 return (ret);
|
700} 701 702int 703_pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 704 int ceiling, int *old_ceiling) 705{ 706 struct pthread *curthread = _get_curthread(); 707 struct pthread_mutex *m, *m1, *m2; 708 int ret; 709 710 m = *mutex;
| 685} 686 687int 688_pthread_mutex_setprioceiling(pthread_mutex_t *mutex, 689 int ceiling, int *old_ceiling) 690{ 691 struct pthread *curthread = _get_curthread(); 692 struct pthread_mutex *m, *m1, *m2; 693 int ret; 694 695 m = *mutex;
|
711 if (m == NULL || (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
| 696 if ((m <= THR_MUTEX_DESTROYED) || 697 (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
|
712 return (EINVAL); 713 714 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 715 if (ret != 0) 716 return (ret); 717 718 if (m->m_owner == curthread) { 719 MUTEX_ASSERT_IS_OWNED(m); 720 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 721 m2 = TAILQ_NEXT(m, m_qe); 722 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || 723 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { 724 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 725 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 726 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { 727 TAILQ_INSERT_BEFORE(m2, m, m_qe); 728 return (0); 729 } 730 } 731 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 732 } 733 } 734 return (0); 735} 736 737int 738_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) 739{
| 698 return (EINVAL); 699 700 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling); 701 if (ret != 0) 702 return (ret); 703 704 if (m->m_owner == curthread) { 705 MUTEX_ASSERT_IS_OWNED(m); 706 m1 = TAILQ_PREV(m, mutex_queue, m_qe); 707 m2 = TAILQ_NEXT(m, m_qe); 708 if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) || 709 (m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) { 710 TAILQ_REMOVE(&curthread->pp_mutexq, m, m_qe); 711 TAILQ_FOREACH(m2, &curthread->pp_mutexq, m_qe) { 712 if (m2->m_lock.m_ceilings[0] > (u_int)ceiling) { 713 TAILQ_INSERT_BEFORE(m2, m, m_qe); 714 return (0); 715 } 716 } 717 TAILQ_INSERT_TAIL(&curthread->pp_mutexq, m, m_qe); 718 } 719 } 720 return (0); 721} 722 723int 724_pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count) 725{
|
740 if (*mutex == NULL) 741 return (EINVAL); 742 *count = (*mutex)->m_spinloops;
| 726 struct pthread_mutex *m; 727 728 CHECK_AND_INIT_MUTEX 729 730 *count = m->m_spinloops;
|
743 return (0); 744} 745 746int 747__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 748{
| 731 return (0); 732} 733 734int 735__pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count) 736{
|
749 struct pthread *curthread = _get_curthread(); 750 int ret;
| 737 struct pthread_mutex *m;
|
751
| 738
|
752 if (__predict_false(*mutex == NULL)) { 753 ret = init_static(curthread, mutex); 754 if (__predict_false(ret)) 755 return (ret); 756 } 757 (*mutex)->m_spinloops = count;
| 739 CHECK_AND_INIT_MUTEX 740 741 m->m_spinloops = count;
|
758 return (0); 759} 760 761int 762_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) 763{
| 742 return (0); 743} 744 745int 746_pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count) 747{
|
764 if (*mutex == NULL) 765 return (EINVAL); 766 *count = (*mutex)->m_yieldloops;
| 748 struct pthread_mutex *m; 749 750 CHECK_AND_INIT_MUTEX 751 752 *count = m->m_yieldloops;
|
767 return (0); 768} 769 770int 771__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 772{
| 753 return (0); 754} 755 756int 757__pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count) 758{
|
773 struct pthread *curthread = _get_curthread(); 774 int ret;
| 759 struct pthread_mutex *m;
|
775
| 760
|
776 if (__predict_false(*mutex == NULL)) { 777 ret = init_static(curthread, mutex); 778 if (__predict_false(ret)) 779 return (ret); 780 } 781 (*mutex)->m_yieldloops = count;
| 761 CHECK_AND_INIT_MUTEX 762 763 m->m_yieldloops = count;
|
782 return (0); 783} 784 785int 786_pthread_mutex_isowned_np(pthread_mutex_t *mutex) 787{
| 764 return (0); 765} 766 767int 768_pthread_mutex_isowned_np(pthread_mutex_t *mutex) 769{
|
788 struct pthread *curthread = _get_curthread(); 789 int ret;
| 770 struct pthread_mutex *m;
|
790
| 771
|
791 if (__predict_false(*mutex == NULL)) { 792 ret = init_static(curthread, mutex); 793 if (__predict_false(ret)) 794 return (ret); 795 } 796 return ((*mutex)->m_owner == curthread);
| 772 m = *mutex; 773 if (m <= THR_MUTEX_DESTROYED) 774 return (0); 775 return (m->m_owner == _get_curthread());
|
797}
| 776}
|