mutex.h revision 144637
1/*- 2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ 29 * $FreeBSD: head/sys/sys/mutex.h 144637 2005-04-04 21:53:56Z jhb $ 30 */ 31 32#ifndef _SYS_MUTEX_H_ 33#define _SYS_MUTEX_H_ 34 35#ifndef LOCORE 36#include <sys/queue.h> 37#include <sys/_lock.h> 38#include <sys/_mutex.h> 39 40#ifdef _KERNEL 41#include <sys/pcpu.h> 42#include <machine/atomic.h> 43#include <machine/cpufunc.h> 44#endif /* _KERNEL_ */ 45#endif /* !LOCORE */ 46 47#include <machine/mutex.h> 48 49#ifdef _KERNEL 50 51/* 52 * Mutex types and options passed to mtx_init(). MTX_QUIET can also be 53 * passed in. 54 */ 55#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ 56#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ 57#define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ 58#define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ 59#define MTX_DUPOK 0x00000020 /* Don't log a duplicate acquire */ 60 61/* 62 * Option flags passed to certain lock/unlock routines, through the use 63 * of corresponding mtx_{lock,unlock}_flags() interface macros. 64 */ 65#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ 66 67/* 68 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, 69 * with the exception of MTX_UNOWNED, applies to spin locks. 70 */ 71#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ 72#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ 73#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ 74#define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED) 75 76#endif /* _KERNEL */ 77 78#ifndef LOCORE 79 80/* 81 * XXX: Friendly reminder to fix things in MP code that is presently being 82 * XXX: worked on. 83 */ 84#define mp_fixme(string) 85 86#ifdef _KERNEL 87 88/* 89 * Prototypes 90 * 91 * NOTE: Functions prepended with `_' (underscore) are exported to other parts 92 * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE 93 * and LOCK_LINE. These functions should not be called directly by any 94 * code using the API. Their macros cover their functionality. 95 * 96 * [See below for descriptions] 97 * 98 */ 99void mtx_init(struct mtx *m, const char *name, const char *type, int opts); 100void mtx_destroy(struct mtx *m); 101void mtx_sysinit(void *arg); 102void mutex_init(void); 103void _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, 104 const char *file, int line); 105void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); 106#ifdef SMP 107void _mtx_lock_spin(struct mtx *m, struct thread *td, int opts, 108 const char *file, int line); 109#endif 110void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); 111int _mtx_trylock(struct mtx *m, int opts, const char *file, int line); 112void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line); 113void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line); 114void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, 115 int line); 116void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, 117 int line); 118#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 119void _mtx_assert(struct mtx *m, int what, const char *file, int line); 120#endif 121 122/* 123 * We define our machine-independent (unoptimized) mutex micro-operations 124 * here, if they are not already defined in the machine-dependent mutex.h 125 */ 126 127/* Actually obtain mtx_lock */ 128#ifndef _obtain_lock 129#define _obtain_lock(mp, tid) \ 130 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid)) 131#endif 132 133/* Actually release mtx_lock */ 134#ifndef _release_lock 135#define _release_lock(mp, tid) \ 136 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED) 137#endif 138 139/* Actually release mtx_lock quickly, assuming we own it. */ 140#ifndef _release_lock_quick 141#define _release_lock_quick(mp) \ 142 atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED) 143#endif 144 145/* 146 * Obtain a sleep lock inline, or call the "hard" function if we can't get it 147 * easy. 148 */ 149#ifndef _get_sleep_lock 150#define _get_sleep_lock(mp, tid, opts, file, line) do { \ 151 struct thread *_tid = (tid); \ 152 \ 153 if (!_obtain_lock((mp), _tid)) \ 154 _mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \ 155} while (0) 156#endif 157 158/* 159 * Obtain a spin lock inline, or call the "hard" function if we can't get it 160 * easy. For spinlocks, we handle recursion inline (it turns out that function 161 * calls can be significantly expensive on some architectures). 162 * Since spin locks are not _too_ common, inlining this code is not too big 163 * a deal. 164 */ 165#ifndef _get_spin_lock 166#ifdef SMP 167#define _get_spin_lock(mp, tid, opts, file, line) do { \ 168 struct thread *_tid = (tid); \ 169 \ 170 spinlock_enter(); \ 171 if (!_obtain_lock((mp), _tid)) { \ 172 if ((mp)->mtx_lock == (uintptr_t)_tid) \ 173 (mp)->mtx_recurse++; \ 174 else \ 175 _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \ 176 } \ 177} while (0) 178#else /* SMP */ 179#define _get_spin_lock(mp, tid, opts, file, line) do { \ 180 struct thread *_tid = (tid); \ 181 \ 182 spinlock_enter(); \ 183 if ((mp)->mtx_lock == (uintptr_t)_tid) \ 184 (mp)->mtx_recurse++; \ 185 else { \ 186 KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ 187 (mp)->mtx_lock = (uintptr_t)_tid; \ 188 } \ 189} while (0) 190#endif /* SMP */ 191#endif 192 193/* 194 * Release a sleep lock inline, or call the "hard" function if we can't do it 195 * easy. 196 */ 197#ifndef _rel_sleep_lock 198#define _rel_sleep_lock(mp, tid, opts, file, line) do { \ 199 if (!_release_lock((mp), (tid))) \ 200 _mtx_unlock_sleep((mp), (opts), (file), (line)); \ 201} while (0) 202#endif 203 204/* 205 * For spinlocks, we can handle everything inline, as it's pretty simple and 206 * a function call would be too expensive (at least on some architectures). 207 * Since spin locks are not _too_ common, inlining this code is not too big 208 * a deal. 209 * 210 * Since we always perform a spinlock_enter() when attempting to acquire a 211 * spin lock, we need to always perform a matching spinlock_exit() when 212 * releasing a spin lock. This includes the recursion cases. 213 */ 214#ifndef _rel_spin_lock 215#ifdef SMP 216#define _rel_spin_lock(mp) do { \ 217 if (mtx_recursed((mp))) \ 218 (mp)->mtx_recurse--; \ 219 else \ 220 _release_lock_quick((mp)); \ 221 spinlock_exit(); \ 222} while (0) 223#else /* SMP */ 224#define _rel_spin_lock(mp) do { \ 225 if (mtx_recursed((mp))) \ 226 (mp)->mtx_recurse--; \ 227 else \ 228 (mp)->mtx_lock = MTX_UNOWNED; \ 229 spinlock_exit(); \ 230} while (0) 231#endif /* SMP */ 232#endif 233 234/* 235 * Exported lock manipulation interface. 236 * 237 * mtx_lock(m) locks MTX_DEF mutex `m' 238 * 239 * mtx_lock_spin(m) locks MTX_SPIN mutex `m' 240 * 241 * mtx_unlock(m) unlocks MTX_DEF mutex `m' 242 * 243 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' 244 * 245 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' 246 * and passes option flags `opts' to the "hard" function, if required. 247 * With these routines, it is possible to pass flags such as MTX_QUIET 248 * to the appropriate lock manipulation routines. 249 * 250 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if 251 * it cannot. Rather, it returns 0 on failure and non-zero on success. 252 * It does NOT handle recursion as we assume that if a caller is properly 253 * using this part of the interface, he will know that the lock in question 254 * is _not_ recursed. 255 * 256 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts 257 * relevant option flags `opts.' 258 * 259 * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. 260 * 261 * mtx_owned(m) returns non-zero if the current thread owns the lock `m' 262 * 263 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. 264 */ 265#define mtx_lock(m) mtx_lock_flags((m), 0) 266#define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) 267#define mtx_trylock(m) mtx_trylock_flags((m), 0) 268#define mtx_unlock(m) mtx_unlock_flags((m), 0) 269#define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) 270 271struct mtx_pool; 272 273struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts); 274void mtx_pool_destroy(struct mtx_pool **poolp); 275struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr); 276struct mtx *mtx_pool_alloc(struct mtx_pool *pool); 277#define mtx_pool_lock(pool, ptr) \ 278 mtx_lock(mtx_pool_find((pool), (ptr))) 279#define mtx_pool_lock_spin(pool, ptr) \ 280 mtx_lock_spin(mtx_pool_find((pool), (ptr))) 281#define mtx_pool_unlock(pool, ptr) \ 282 mtx_unlock(mtx_pool_find((pool), (ptr))) 283#define mtx_pool_unlock_spin(pool, ptr) \ 284 mtx_unlock_spin(mtx_pool_find((pool), (ptr))) 285 286/* 287 * mtxpool_lockbuilder is a pool of sleep locks that is not witness 288 * checked and should only be used for building higher level locks. 289 * 290 * mtxpool_sleep is a general purpose pool of sleep mutexes. 291 */ 292extern struct mtx_pool *mtxpool_lockbuilder; 293extern struct mtx_pool *mtxpool_sleep; 294 295#ifndef LOCK_DEBUG 296#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h> 297#endif 298#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) 299#define mtx_lock_flags(m, opts) \ 300 _mtx_lock_flags((m), (opts), LOCK_FILE, LOCK_LINE) 301#define mtx_unlock_flags(m, opts) \ 302 _mtx_unlock_flags((m), (opts), LOCK_FILE, LOCK_LINE) 303#define mtx_lock_spin_flags(m, opts) \ 304 _mtx_lock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE) 305#define mtx_unlock_spin_flags(m, opts) \ 306 _mtx_unlock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE) 307#else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ 308#define mtx_lock_flags(m, opts) \ 309 _get_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE) 310#define mtx_unlock_flags(m, opts) \ 311 _rel_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE) 312#define mtx_lock_spin_flags(m, opts) \ 313 _get_spin_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE) 314#define mtx_unlock_spin_flags(m, opts) \ 315 _rel_spin_lock((m)) 316#endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ 317 318#define mtx_trylock_flags(m, opts) \ 319 _mtx_trylock((m), (opts), LOCK_FILE, LOCK_LINE) 320 321#define mtx_initialized(m) ((m)->mtx_object.lo_flags & LO_INITIALIZED) 322 323#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curthread) 324 325#define mtx_recursed(m) ((m)->mtx_recurse != 0) 326 327#define mtx_name(m) ((m)->mtx_object.lo_name) 328 329/* 330 * Global locks. 331 */ 332extern struct mtx sched_lock; 333extern struct mtx Giant; 334 335/* 336 * Giant lock manipulation and clean exit macros. 337 * Used to replace return with an exit Giant and return. 338 * 339 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() 340 * The #ifndef is to allow lint-like tools to redefine DROP_GIANT. 341 */ 342#ifndef DROP_GIANT 343#define DROP_GIANT() \ 344do { \ 345 int _giantcnt; \ 346 WITNESS_SAVE_DECL(Giant); \ 347 \ 348 if (mtx_owned(&Giant)) \ 349 WITNESS_SAVE(&Giant.mtx_object, Giant); \ 350 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ 351 mtx_unlock(&Giant) 352 353#define PICKUP_GIANT() \ 354 mtx_assert(&Giant, MA_NOTOWNED); \ 355 while (_giantcnt--) \ 356 mtx_lock(&Giant); \ 357 if (mtx_owned(&Giant)) \ 358 WITNESS_RESTORE(&Giant.mtx_object, Giant); \ 359} while (0) 360 361#define PARTIAL_PICKUP_GIANT() \ 362 mtx_assert(&Giant, MA_NOTOWNED); \ 363 while (_giantcnt--) \ 364 mtx_lock(&Giant); \ 365 if (mtx_owned(&Giant)) \ 366 WITNESS_RESTORE(&Giant.mtx_object, Giant) 367#endif 368 369/* 370 * Network MPSAFE temporary workarounds. When debug_mpsafenet 371 * is 1 the network is assumed to operate without Giant on the 372 * input path and protocols that require Giant must collect it 373 * on entry. When 0 Giant is grabbed in the network interface 374 * ISR's and in the netisr path and there is no need to grab 375 * the Giant lock. Note that, unlike GIANT_PICKUP() and 376 * GIANT_DROP(), these macros directly wrap mutex operations 377 * without special recursion handling. 378 * 379 * This mechanism is intended as temporary until everything of 380 * importance is properly locked. Note: the semantics for 381 * NET_{LOCK,UNLOCK}_GIANT() are not the same as DROP_GIANT() 382 * and PICKUP_GIANT(), as they are plain mutex operations 383 * without a recursion counter. 384 */ 385extern int debug_mpsafenet; /* defined in net/netisr.c */ 386#define NET_LOCK_GIANT() do { \ 387 if (!debug_mpsafenet) \ 388 mtx_lock(&Giant); \ 389} while (0) 390#define NET_UNLOCK_GIANT() do { \ 391 if (!debug_mpsafenet) \ 392 mtx_unlock(&Giant); \ 393} while (0) 394#define NET_ASSERT_GIANT() do { \ 395 if (!debug_mpsafenet) \ 396 mtx_assert(&Giant, MA_OWNED); \ 397} while (0) 398#define NET_CALLOUT_MPSAFE (debug_mpsafenet ? CALLOUT_MPSAFE : 0) 399 400#define UGAR(rval) do { \ 401 int _val = (rval); \ 402 mtx_unlock(&Giant); \ 403 return (_val); \ 404} while (0) 405 406struct mtx_args { 407 struct mtx *ma_mtx; 408 const char *ma_desc; 409 int ma_opts; 410}; 411 412#define MTX_SYSINIT(name, mtx, desc, opts) \ 413 static struct mtx_args name##_args = { \ 414 (mtx), \ 415 (desc), \ 416 (opts) \ 417 }; \ 418 SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 419 mtx_sysinit, &name##_args) 420 421/* 422 * The INVARIANTS-enabled mtx_assert() functionality. 423 * 424 * The constants need to be defined for INVARIANT_SUPPORT infrastructure 425 * support as _mtx_assert() itself uses them and the latter implies that 426 * _mtx_assert() must build. 427 */ 428#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 429#define MA_OWNED 0x01 430#define MA_NOTOWNED 0x02 431#define MA_RECURSED 0x04 432#define MA_NOTRECURSED 0x08 433#endif 434 435#ifdef INVARIANTS 436#define mtx_assert(m, what) \ 437 _mtx_assert((m), (what), __FILE__, __LINE__) 438 439#define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED) 440 441#else /* INVARIANTS */ 442#define mtx_assert(m, what) 443#define GIANT_REQUIRED 444#endif /* INVARIANTS */ 445 446/* 447 * Common lock type names. 448 */ 449#define MTX_NETWORK_LOCK "network driver" 450 451#endif /* _KERNEL */ 452#endif /* !LOCORE */ 453#endif /* _SYS_MUTEX_H_ */ 454