mutex.h revision 228424
1/*- 2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ 29 * $FreeBSD: head/sys/sys/mutex.h 228424 2011-12-11 21:02:01Z avg $ 30 */ 31 32#ifndef _SYS_MUTEX_H_ 33#define _SYS_MUTEX_H_ 34 35#include <sys/queue.h> 36#include <sys/_lock.h> 37#include <sys/_mutex.h> 38 39#ifdef _KERNEL 40#include <sys/pcpu.h> 41#include <sys/lock_profile.h> 42#include <sys/lockstat.h> 43#include <machine/atomic.h> 44#include <machine/cpufunc.h> 45 46/* 47 * Mutex types and options passed to mtx_init(). MTX_QUIET and MTX_DUPOK 48 * can also be passed in. 49 */ 50#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ 51#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ 52#define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ 53#define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ 54#define MTX_NOPROFILE 0x00000020 /* Don't profile this lock */ 55 56/* 57 * Option flags passed to certain lock/unlock routines, through the use 58 * of corresponding mtx_{lock,unlock}_flags() interface macros. 59 */ 60#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ 61#define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */ 62 63/* 64 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, 65 * with the exception of MTX_UNOWNED, applies to spin locks. 66 */ 67#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ 68#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ 69#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ 70#define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_UNOWNED) 71 72/* 73 * Value stored in mutex->mtx_lock to denote a destroyed mutex. 74 */ 75#define MTX_DESTROYED (MTX_CONTESTED | MTX_UNOWNED) 76 77/* 78 * Prototypes 79 * 80 * NOTE: Functions prepended with `_' (underscore) are exported to other parts 81 * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE 82 * and LOCK_LINE. These functions should not be called directly by any 83 * code using the API. Their macros cover their functionality. 84 * Functions with a `_' suffix are the entrypoint for the common 85 * KPI covering both compat shims and fast path case. These can be 86 * used by consumers willing to pass options, file and line 87 * informations, in an option-independent way. 88 * 89 * [See below for descriptions] 90 * 91 */ 92void mtx_init(struct mtx *m, const char *name, const char *type, int opts); 93void mtx_destroy(struct mtx *m); 94void mtx_sysinit(void *arg); 95int mtx_trylock_flags_(struct mtx *m, int opts, const char *file, 96 int line); 97void mutex_init(void); 98void _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, 99 const char *file, int line); 100void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); 101#ifdef SMP 102void _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, 103 const char *file, int line); 104#endif 105void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); 106void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line); 107void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line); 108void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, 109 int line); 110void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, 111 int line); 112#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 113void _mtx_assert(const struct mtx *m, int what, const char *file, int line); 114#endif 115void thread_lock_flags_(struct thread *, int, const char *, int); 116 117#define thread_lock(tdp) \ 118 thread_lock_flags_((tdp), 0, __FILE__, __LINE__) 119#define thread_lock_flags(tdp, opt) \ 120 thread_lock_flags_((tdp), (opt), __FILE__, __LINE__) 121#define thread_unlock(tdp) \ 122 mtx_unlock_spin((tdp)->td_lock) 123 124#define mtx_recurse lock_object.lo_data 125 126/* Very simple operations on mtx_lock. */ 127 128/* Try to obtain mtx_lock once. */ 129#define _mtx_obtain_lock(mp, tid) \ 130 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) 131 132/* Try to release mtx_lock if it is unrecursed and uncontested. */ 133#define _mtx_release_lock(mp, tid) \ 134 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED) 135 136/* Release mtx_lock quickly, assuming we own it. */ 137#define _mtx_release_lock_quick(mp) \ 138 atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED) 139 140/* 141 * Full lock operations that are suitable to be inlined in non-debug 142 * kernels. If the lock cannot be acquired or released trivially then 143 * the work is deferred to another function. 144 */ 145 146/* Lock a normal mutex. */ 147#define __mtx_lock(mp, tid, opts, file, line) do { \ 148 uintptr_t _tid = (uintptr_t)(tid); \ 149 \ 150 if (!_mtx_obtain_lock((mp), _tid)) \ 151 _mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \ 152 else \ 153 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, \ 154 mp, 0, 0, (file), (line)); \ 155} while (0) 156 157/* 158 * Lock a spin mutex. For spinlocks, we handle recursion inline (it 159 * turns out that function calls can be significantly expensive on 160 * some architectures). Since spin locks are not _too_ common, 161 * inlining this code is not too big a deal. 162 */ 163#ifdef SMP 164#define __mtx_lock_spin(mp, tid, opts, file, line) do { \ 165 uintptr_t _tid = (uintptr_t)(tid); \ 166 \ 167 spinlock_enter(); \ 168 if (!_mtx_obtain_lock((mp), _tid)) { \ 169 if ((mp)->mtx_lock == _tid) \ 170 (mp)->mtx_recurse++; \ 171 else \ 172 _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \ 173 } else \ 174 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, \ 175 mp, 0, 0, (file), (line)); \ 176} while (0) 177#else /* SMP */ 178#define __mtx_lock_spin(mp, tid, opts, file, line) do { \ 179 uintptr_t _tid = (uintptr_t)(tid); \ 180 \ 181 spinlock_enter(); \ 182 if ((mp)->mtx_lock == _tid) \ 183 (mp)->mtx_recurse++; \ 184 else { \ 185 KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ 186 (mp)->mtx_lock = _tid; \ 187 } \ 188} while (0) 189#endif /* SMP */ 190 191/* Unlock a normal mutex. */ 192#define __mtx_unlock(mp, tid, opts, file, line) do { \ 193 uintptr_t _tid = (uintptr_t)(tid); \ 194 \ 195 if (!_mtx_release_lock((mp), _tid)) \ 196 _mtx_unlock_sleep((mp), (opts), (file), (line)); \ 197} while (0) 198 199/* 200 * Unlock a spin mutex. For spinlocks, we can handle everything 201 * inline, as it's pretty simple and a function call would be too 202 * expensive (at least on some architectures). Since spin locks are 203 * not _too_ common, inlining this code is not too big a deal. 204 * 205 * Since we always perform a spinlock_enter() when attempting to acquire a 206 * spin lock, we need to always perform a matching spinlock_exit() when 207 * releasing a spin lock. This includes the recursion cases. 208 */ 209#ifdef SMP 210#define __mtx_unlock_spin(mp) do { \ 211 if (mtx_recursed((mp))) \ 212 (mp)->mtx_recurse--; \ 213 else { \ 214 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \ 215 mp); \ 216 _mtx_release_lock_quick((mp)); \ 217 } \ 218 spinlock_exit(); \ 219} while (0) 220#else /* SMP */ 221#define __mtx_unlock_spin(mp) do { \ 222 if (mtx_recursed((mp))) \ 223 (mp)->mtx_recurse--; \ 224 else { \ 225 LOCKSTAT_PROFILE_RELEASE_LOCK(LS_MTX_SPIN_UNLOCK_RELEASE, \ 226 mp); \ 227 (mp)->mtx_lock = MTX_UNOWNED; \ 228 } \ 229 spinlock_exit(); \ 230} while (0) 231#endif /* SMP */ 232 233/* 234 * Exported lock manipulation interface. 235 * 236 * mtx_lock(m) locks MTX_DEF mutex `m' 237 * 238 * mtx_lock_spin(m) locks MTX_SPIN mutex `m' 239 * 240 * mtx_unlock(m) unlocks MTX_DEF mutex `m' 241 * 242 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' 243 * 244 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' 245 * and passes option flags `opts' to the "hard" function, if required. 246 * With these routines, it is possible to pass flags such as MTX_QUIET 247 * to the appropriate lock manipulation routines. 248 * 249 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if 250 * it cannot. Rather, it returns 0 on failure and non-zero on success. 251 * It does NOT handle recursion as we assume that if a caller is properly 252 * using this part of the interface, he will know that the lock in question 253 * is _not_ recursed. 254 * 255 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts 256 * relevant option flags `opts.' 257 * 258 * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. 259 * 260 * mtx_owned(m) returns non-zero if the current thread owns the lock `m' 261 * 262 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. 263 */ 264#define mtx_lock(m) mtx_lock_flags((m), 0) 265#define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) 266#define mtx_trylock(m) mtx_trylock_flags((m), 0) 267#define mtx_unlock(m) mtx_unlock_flags((m), 0) 268#define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) 269 270struct mtx_pool; 271 272struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts); 273void mtx_pool_destroy(struct mtx_pool **poolp); 274struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr); 275struct mtx *mtx_pool_alloc(struct mtx_pool *pool); 276#define mtx_pool_lock(pool, ptr) \ 277 mtx_lock(mtx_pool_find((pool), (ptr))) 278#define mtx_pool_lock_spin(pool, ptr) \ 279 mtx_lock_spin(mtx_pool_find((pool), (ptr))) 280#define mtx_pool_unlock(pool, ptr) \ 281 mtx_unlock(mtx_pool_find((pool), (ptr))) 282#define mtx_pool_unlock_spin(pool, ptr) \ 283 mtx_unlock_spin(mtx_pool_find((pool), (ptr))) 284 285/* 286 * mtxpool_lockbuilder is a pool of sleep locks that is not witness 287 * checked and should only be used for building higher level locks. 288 * 289 * mtxpool_sleep is a general purpose pool of sleep mutexes. 290 */ 291extern struct mtx_pool *mtxpool_lockbuilder; 292extern struct mtx_pool *mtxpool_sleep; 293 294#ifndef LOCK_DEBUG 295#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h> 296#endif 297#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) 298#define mtx_lock_flags_(m, opts, file, line) \ 299 _mtx_lock_flags((m), (opts), (file), (line)) 300#define mtx_unlock_flags_(m, opts, file, line) \ 301 _mtx_unlock_flags((m), (opts), (file), (line)) 302#define mtx_lock_spin_flags_(m, opts, file, line) \ 303 _mtx_lock_spin_flags((m), (opts), (file), (line)) 304#define mtx_unlock_spin_flags_(m, opts, file, line) \ 305 _mtx_unlock_spin_flags((m), (opts), (file), (line)) 306#else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ 307#define mtx_lock_flags_(m, opts, file, line) \ 308 __mtx_lock((m), curthread, (opts), (file), (line)) 309#define mtx_unlock_flags_(m, opts, file, line) \ 310 __mtx_unlock((m), curthread, (opts), (file), (line)) 311#define mtx_lock_spin_flags_(m, opts, file, line) \ 312 __mtx_lock_spin((m), curthread, (opts), (file), (line)) 313#define mtx_unlock_spin_flags_(m, opts, file, line) \ 314 __mtx_unlock_spin((m)) 315#endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ 316 317#ifdef INVARIANTS 318#define mtx_assert_(m, what, file, line) \ 319 _mtx_assert((m), (what), (file), (line)) 320 321#define GIANT_REQUIRED mtx_assert_(&Giant, MA_OWNED, __FILE__, __LINE__) 322 323#else /* INVARIANTS */ 324#define mtx_assert_(m, what, file, line) (void)0 325#define GIANT_REQUIRED 326#endif /* INVARIANTS */ 327 328#define mtx_lock_flags(m, opts) \ 329 mtx_lock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 330#define mtx_unlock_flags(m, opts) \ 331 mtx_unlock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 332#define mtx_lock_spin_flags(m, opts) \ 333 mtx_lock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 334#define mtx_unlock_spin_flags(m, opts) \ 335 mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 336#define mtx_trylock_flags(m, opts) \ 337 mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 338#define mtx_assert(m, what) \ 339 mtx_assert_((m), (what), __FILE__, __LINE__) 340 341#define mtx_sleep(chan, mtx, pri, wmesg, timo) \ 342 _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (timo)) 343 344#define mtx_initialized(m) lock_initalized(&(m)->lock_object) 345 346#define mtx_owned(m) (((m)->mtx_lock & ~MTX_FLAGMASK) == (uintptr_t)curthread) 347 348#define mtx_recursed(m) ((m)->mtx_recurse != 0) 349 350#define mtx_name(m) ((m)->lock_object.lo_name) 351 352/* 353 * Global locks. 354 */ 355extern struct mtx Giant; 356extern struct mtx blocked_lock; 357 358/* 359 * Giant lock manipulation and clean exit macros. 360 * Used to replace return with an exit Giant and return. 361 * 362 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() 363 * The #ifndef is to allow lint-like tools to redefine DROP_GIANT. 364 */ 365#ifndef DROP_GIANT 366#define DROP_GIANT() \ 367do { \ 368 int _giantcnt = 0; \ 369 WITNESS_SAVE_DECL(Giant); \ 370 \ 371 if (mtx_owned(&Giant)) { \ 372 WITNESS_SAVE(&Giant.lock_object, Giant); \ 373 for (_giantcnt = 0; mtx_owned(&Giant) && \ 374 !SCHEDULER_STOPPED(); _giantcnt++) \ 375 mtx_unlock(&Giant); \ 376 } 377 378#define PICKUP_GIANT() \ 379 PARTIAL_PICKUP_GIANT(); \ 380} while (0) 381 382#define PARTIAL_PICKUP_GIANT() \ 383 mtx_assert(&Giant, MA_NOTOWNED); \ 384 if (_giantcnt > 0) { \ 385 while (_giantcnt--) \ 386 mtx_lock(&Giant); \ 387 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 388 } 389#endif 390 391#define UGAR(rval) do { \ 392 int _val = (rval); \ 393 mtx_unlock(&Giant); \ 394 return (_val); \ 395} while (0) 396 397struct mtx_args { 398 struct mtx *ma_mtx; 399 const char *ma_desc; 400 int ma_opts; 401}; 402 403#define MTX_SYSINIT(name, mtx, desc, opts) \ 404 static struct mtx_args name##_args = { \ 405 (mtx), \ 406 (desc), \ 407 (opts) \ 408 }; \ 409 SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 410 mtx_sysinit, &name##_args); \ 411 SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 412 mtx_destroy, (mtx)) 413 414/* 415 * The INVARIANTS-enabled mtx_assert() functionality. 416 * 417 * The constants need to be defined for INVARIANT_SUPPORT infrastructure 418 * support as _mtx_assert() itself uses them and the latter implies that 419 * _mtx_assert() must build. 420 */ 421#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 422#define MA_OWNED LA_XLOCKED 423#define MA_NOTOWNED LA_UNLOCKED 424#define MA_RECURSED LA_RECURSED 425#define MA_NOTRECURSED LA_NOTRECURSED 426#endif 427 428/* 429 * Common lock type names. 430 */ 431#define MTX_NETWORK_LOCK "network driver" 432 433#endif /* _KERNEL */ 434#endif /* _SYS_MUTEX_H_ */ 435