mutex.h revision 72393
1/*- 2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ 29 * $FreeBSD: head/sys/sys/mutex.h 72393 2001-02-12 03:15:43Z bmilekic $ 30 */ 31 32#ifndef _SYS_MUTEX_H_ 33#define _SYS_MUTEX_H_ 34 35#ifndef LOCORE 36#include <sys/queue.h> 37 38#ifdef _KERNEL 39#include <sys/ktr.h> 40#include <machine/atomic.h> 41#include <machine/cpufunc.h> 42#include <machine/globals.h> 43#endif /* _KERNEL_ */ 44#endif /* !LOCORE */ 45 46#include <machine/mutex.h> 47 48#ifdef _KERNEL 49 50/* 51 * Mutex types and options stored in mutex->mtx_flags 52 */ 53#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ 54#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ 55#define MTX_RECURSE 0x00000002 /* Option: lock allowed to recurse */ 56 57/* 58 * Option flags passed to certain lock/unlock routines, through the use 59 * of corresponding mtx_{lock,unlock}_flags() interface macros. 60 * 61 * XXX: The only reason we make these bits not interfere with the above "types 62 * and options" bits is because we have to pass both to the witness 63 * routines right now; if/when we clean up the witness interface to 64 * not check for mutex type from the passed in flag, but rather from 65 * the mutex lock's mtx_flags field, then we can change these values to 66 * 0x1, 0x2, ... 67 */ 68#define MTX_NOSWITCH 0x00000004 /* Do not switch on release */ 69#define MTX_QUIET 0x00000008 /* Don't log a mutex event */ 70 71/* 72 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, 73 * with the exception of MTX_UNOWNED, applies to spin locks. 74 */ 75#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ 76#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ 77#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ 78#define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED) 79 80#endif /* _KERNEL */ 81 82#ifndef LOCORE 83 84struct mtx_debug; 85 86/* 87 * Sleep/spin mutex 88 */ 89struct mtx { 90 volatile uintptr_t mtx_lock; /* owner (and state for sleep locks) */ 91 volatile u_int mtx_recurse; /* number of recursive holds */ 92 u_int mtx_saveintr; /* saved flags (for spin locks) */ 93 int mtx_flags; /* flags passed to mtx_init() */ 94 const char *mtx_description; 95 TAILQ_HEAD(, proc) mtx_blocked; /* threads blocked on this lock */ 96 LIST_ENTRY(mtx) mtx_contested; /* list of all contested locks */ 97 struct mtx *mtx_next; /* all existing locks */ 98 struct mtx *mtx_prev; /* in system... */ 99 struct mtx_debug *mtx_debug; /* debugging information... */ 100}; 101 102/* 103 * XXX: Friendly reminder to fix things in MP code that is presently being 104 * XXX: worked on. 105 */ 106#define mp_fixme(string) 107 108#ifdef _KERNEL 109 110/* 111 * Strings for KTR_LOCK tracing. 112 */ 113extern char STR_mtx_lock_slp[]; 114extern char STR_mtx_lock_spn[]; 115extern char STR_mtx_unlock_slp[]; 116extern char STR_mtx_unlock_spn[]; 117 118/* 119 * Prototypes 120 * 121 * NOTE: Functions prepended with `_' (underscore) are exported to other parts 122 * of the kernel via macros, thus allowing us to use the cpp __FILE__ 123 * and __LINE__. These functions should not be called directly by any 124 * code using the IPI. Their macros cover their functionality. 125 * 126 * [See below for descriptions] 127 * 128 */ 129void mtx_init(struct mtx *m, const char *description, int opts); 130void mtx_destroy(struct mtx *m); 131void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line); 132void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); 133void _mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, 134 const char *file, int line); 135void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); 136int _mtx_trylock(struct mtx *m, int opts, const char *file, int line); 137 138/* 139 * We define our machine-independent (unoptimized) mutex micro-operations 140 * here, if they are not already defined in the machine-dependent mutex.h 141 */ 142 143/* Actually obtain mtx_lock */ 144#ifndef _obtain_lock 145#define _obtain_lock(mp, tid) \ 146 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid)) 147#endif 148 149/* Actually release mtx_lock */ 150#ifndef _release_lock 151#define _release_lock(mp, tid) \ 152 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED) 153#endif 154 155/* Actually release mtx_lock quickly, assuming we own it. */ 156#ifndef _release_lock_quick 157#define _release_lock_quick(mp) \ 158 atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED) 159#endif 160 161/* 162 * Obtain a sleep lock inline, or call the "hard" function if we can't get it 163 * easy. 164 */ 165#ifndef _get_sleep_lock 166#define _get_sleep_lock(mp, tid, opts) do { \ 167 if (!_obtain_lock((mp), (tid))) \ 168 _mtx_lock_sleep((mp), (opts), __FILE__, __LINE__); \ 169} while (0) 170#endif 171 172/* 173 * Obtain a spin lock inline, or call the "hard" function if we can't get it 174 * easy. For spinlocks, we handle recursion inline (it turns out that function 175 * calls can be significantly expensive on some architectures). 176 * Since spin locks are not _too_ common, inlining this code is not too big 177 * a deal. 178 */ 179#ifndef _get_spin_lock 180#define _get_spin_lock(mp, tid, opts) do { \ 181 u_int _mtx_intr = save_intr(); \ 182 disable_intr(); \ 183 if (!_obtain_lock((mp), (tid))) { \ 184 if ((mp)->mtx_lock == (uintptr_t)(tid)) \ 185 (mp)->mtx_recurse++; \ 186 else \ 187 _mtx_lock_spin((mp), (opts), _mtx_intr, \ 188 __FILE__, __LINE__); \ 189 } else \ 190 (mp)->mtx_saveintr = _mtx_intr; \ 191} while (0) 192#endif 193 194/* 195 * Release a sleep lock inline, or call the "hard" function if we can't do it 196 * easy. 197 */ 198#ifndef _rel_sleep_lock 199#define _rel_sleep_lock(mp, tid, opts) do { \ 200 if (!_release_lock((mp), (tid))) \ 201 _mtx_unlock_sleep((mp), (opts), __FILE__, __LINE__); \ 202} while (0) 203#endif 204 205/* 206 * For spinlocks, we can handle everything inline, as it's pretty simple and 207 * a function call would be too expensive (at least on some architectures). 208 * Since spin locks are not _too_ common, inlining this code is not too big 209 * a deal. 210 */ 211#ifndef _rel_spin_lock 212#define _rel_spin_lock(mp) do { \ 213 u_int _mtx_intr = (mp)->mtx_saveintr; \ 214 if (mtx_recursed((mp))) \ 215 (mp)->mtx_recurse--; \ 216 else { \ 217 _release_lock_quick((mp)); \ 218 restore_intr(_mtx_intr); \ 219 } \ 220} while (0) 221#endif 222 223/* 224 * Exported lock manipulation interface. 225 * 226 * mtx_lock(m) locks MTX_DEF mutex `m' 227 * 228 * mtx_lock_spin(m) locks MTX_SPIN mutex `m' 229 * 230 * mtx_unlock(m) unlocks MTX_DEF mutex `m' 231 * 232 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' 233 * 234 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' 235 * and passes option flags `opts' to the "hard" function, if required. 236 * With these routines, it is possible to pass flags such as MTX_QUIET 237 * and/or MTX_NOSWITCH to the appropriate lock manipulation routines. 238 * 239 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if 240 * it cannot. Rather, it returns 0 on failure and non-zero on success. 241 * It does NOT handle recursion as we assume that if a caller is properly 242 * using this part of the interface, he will know that the lock in question 243 * is _not_ recursed. 244 * 245 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts 246 * relevant option flags `opts.' 247 * 248 * mtx_owned(m) returns non-zero if the current thread owns the lock `m' 249 * 250 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. 251 */ 252#define mtx_lock(m) do { \ 253 MPASS(curproc != NULL); \ 254 _get_sleep_lock((m), curproc, 0); \ 255 CTR5(KTR_LOCK, STR_mtx_lock_slp, (m)->mtx_description, (m), \ 256 (m)->mtx_recurse, __FILE__, __LINE__); \ 257 WITNESS_ENTER((m), (m)->mtx_flags, __FILE__, __LINE__); \ 258} while (0) 259 260#define mtx_lock_spin(m) do { \ 261 MPASS(curproc != NULL); \ 262 _get_spin_lock((m), curproc, 0); \ 263 CTR5(KTR_LOCK, STR_mtx_lock_spn, (m)->mtx_description, (m), \ 264 (m)->mtx_recurse, __FILE__, __LINE__); \ 265 WITNESS_ENTER((m), (m)->mtx_flags, __FILE__, __LINE__); \ 266} while (0) 267 268#define mtx_unlock(m) do { \ 269 MPASS(curproc != NULL); \ 270 WITNESS_EXIT((m), (m)->mtx_flags, __FILE__, __LINE__); \ 271 mtx_assert((m), MA_OWNED); \ 272 _rel_sleep_lock((m), curproc, 0); \ 273 CTR5(KTR_LOCK, STR_mtx_unlock_slp, (m)->mtx_description, (m), \ 274 (m)->mtx_recurse, __FILE__, __LINE__); \ 275} while (0) 276 277#define mtx_unlock_spin(m) do { \ 278 MPASS(curproc != NULL); \ 279 WITNESS_EXIT((m), (m)->mtx_flags, __FILE__, __LINE__); \ 280 mtx_assert((m), MA_OWNED); \ 281 _rel_spin_lock((m)); \ 282 CTR5(KTR_LOCK, STR_mtx_unlock_spn, (m)->mtx_description, (m), \ 283 (m)->mtx_recurse, __FILE__, __LINE__); \ 284} while (0) 285 286#define mtx_lock_flags(m, opts) do { \ 287 MPASS(curproc != NULL); \ 288 _get_sleep_lock((m), curproc, (opts)); \ 289 if (((opts) & MTX_QUIET) == 0) \ 290 CTR5(KTR_LOCK, STR_mtx_lock_slp, \ 291 (m)->mtx_description, (m), (m)->mtx_recurse, \ 292 __FILE__, __LINE__); \ 293 WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \ 294 __LINE__); \ 295} while (0) 296 297#define mtx_lock_spin_flags(m, opts) do { \ 298 MPASS(curproc != NULL); \ 299 _get_spin_lock((m), curproc, (opts)); \ 300 if (((opts) & MTX_QUIET) == 0) \ 301 CTR5(KTR_LOCK, STR_mtx_lock_spn, \ 302 (m)->mtx_description, (m), (m)->mtx_recurse, \ 303 __FILE__, __LINE__); \ 304 WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \ 305 __LINE__); \ 306} while (0) 307 308#define mtx_unlock_flags(m, opts) do { \ 309 MPASS(curproc != NULL); \ 310 WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \ 311 __LINE__); \ 312 mtx_assert((m), MA_OWNED); \ 313 _rel_sleep_lock((m), curproc, (opts)); \ 314 if (((opts) & MTX_QUIET) == 0) \ 315 CTR5(KTR_LOCK, STR_mtx_unlock_slp, \ 316 (m)->mtx_description, (m), (m)->mtx_recurse, \ 317 __FILE__, __LINE__); \ 318} while (0) 319 320/* 321 * The MTX_SPIN unlock case is all inlined, so we handle the MTX_QUIET 322 * flag right in the macro. Not a problem as if we don't have KTR_LOCK, this 323 * check will be optimized out. 324 */ 325#define mtx_unlock_spin_flags(m, opts) do { \ 326 MPASS(curproc != NULL); \ 327 WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \ 328 __LINE__); \ 329 mtx_assert((m), MA_OWNED); \ 330 _rel_spin_lock((m)); \ 331 if (((opts) & MTX_QUIET) == 0) \ 332 CTR5(KTR_LOCK, STR_mtx_unlock_spn, \ 333 (m)->mtx_description, (m), (m)->mtx_recurse, \ 334 __FILE__, __LINE__); \ 335} while (0) 336 337#define mtx_trylock(m) \ 338 _mtx_trylock((m), 0, __FILE__, __LINE__) 339 340#define mtx_trylock_flags(m, opts) \ 341 _mtx_trylock((m), (opts), __FILE__, __LINE__) 342 343#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curproc) 344 345#define mtx_recursed(m) ((m)->mtx_recurse != 0) 346 347/* 348 * Global locks. 349 */ 350extern struct mtx sched_lock; 351extern struct mtx Giant; 352 353/* 354 * Giant lock manipulation and clean exit macros. 355 * Used to replace return with an exit Giant and return. 356 * 357 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() 358 */ 359#define DROP_GIANT_NOSWITCH() \ 360do { \ 361 int _giantcnt; \ 362 WITNESS_SAVE_DECL(Giant); \ 363 \ 364 if (mtx_owned(&Giant)) \ 365 WITNESS_SAVE(&Giant, Giant); \ 366 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ 367 mtx_unlock_flags(&Giant, MTX_NOSWITCH) 368 369#define DROP_GIANT() \ 370do { \ 371 int _giantcnt; \ 372 WITNESS_SAVE_DECL(Giant); \ 373 \ 374 if (mtx_owned(&Giant)) \ 375 WITNESS_SAVE(&Giant, Giant); \ 376 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ 377 mtx_unlock(&Giant) 378 379#define PICKUP_GIANT() \ 380 mtx_assert(&Giant, MA_NOTOWNED); \ 381 while (_giantcnt--) \ 382 mtx_lock(&Giant); \ 383 if (mtx_owned(&Giant)) \ 384 WITNESS_RESTORE(&Giant, Giant); \ 385} while (0) 386 387#define PARTIAL_PICKUP_GIANT() \ 388 mtx_assert(&Giant, MA_NOTOWNED); \ 389 while (_giantcnt--) \ 390 mtx_lock(&Giant); \ 391 if (mtx_owned(&Giant)) \ 392 WITNESS_RESTORE(&Giant, Giant) 393 394/* 395 * The INVARIANTS-enabled mtx_assert() functionality. 396 */ 397#ifdef INVARIANTS 398#define MA_OWNED 0x01 399#define MA_NOTOWNED 0x02 400#define MA_RECURSED 0x04 401#define MA_NOTRECURSED 0x08 402 403void _mtx_assert(struct mtx *m, int what, const char *file, int line); 404#define mtx_assert(m, what) \ 405 _mtx_assert((m), (what), __FILE__, __LINE__) 406 407#else /* INVARIANTS */ 408#define mtx_assert(m, what) 409#endif /* INVARIANTS */ 410 411/* 412 * The MUTEX_DEBUG-enabled MPASS*() extra sanity-check macros. 413 */ 414#ifdef MUTEX_DEBUG 415#define MPASS(ex) \ 416 if (!(ex)) \ 417 panic("Assertion %s failed at %s:%d", #ex, __FILE__, \ 418 __LINE__) 419 420#define MPASS2(ex, what) \ 421 if (!(ex)) \ 422 panic("Assertion %s failed at %s:%d", what, __FILE__, \ 423 __LINE__) 424 425#define MPASS3(ex, file, line) \ 426 if (!(ex)) \ 427 panic("Assertion %s failed at %s:%d", #ex, file, line) 428 429#define MPASS4(ex, what, file, line) \ 430 if (!(ex)) \ 431 panic("Assertion %s failed at %s:%d", what, file, line) 432 433#else /* MUTEX_DEBUG */ 434#define MPASS(ex) 435#define MPASS2(ex, what) 436#define MPASS3(ex, file, line) 437#define MPASS4(ex, what, file, line) 438#endif /* MUTEX_DEBUG */ 439 440/* 441 * Exported WITNESS-enabled functions and corresponding wrapper macros. 442 */ 443#ifdef WITNESS 444void witness_save(struct mtx *, const char **, int *); 445void witness_restore(struct mtx *, const char *, int); 446void witness_enter(struct mtx *, int, const char *, int); 447void witness_try_enter(struct mtx *, int, const char *, int); 448void witness_exit(struct mtx *, int, const char *, int); 449int witness_list(struct proc *); 450int witness_sleep(int, struct mtx *, const char *, int); 451 452#define WITNESS_ENTER(m, t, f, l) \ 453 witness_enter((m), (t), (f), (l)) 454 455#define WITNESS_EXIT(m, t, f, l) \ 456 witness_exit((m), (t), (f), (l)) 457 458#define WITNESS_SLEEP(check, m) \ 459 witness_sleep(check, (m), __FILE__, __LINE__) 460 461#define WITNESS_SAVE_DECL(n) \ 462 const char * __CONCAT(n, __wf); \ 463 int __CONCAT(n, __wl) 464 465#define WITNESS_SAVE(m, n) \ 466 witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)) 467 468#define WITNESS_RESTORE(m, n) \ 469 witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)) 470 471#else /* WITNESS */ 472#define witness_enter(m, t, f, l) 473#define witness_tryenter(m, t, f, l) 474#define witness_exit(m, t, f, l) 475#define witness_list(p) 476#define witness_sleep(c, m, f, l) 477 478#define WITNESS_ENTER(m, t, f, l) 479#define WITNESS_EXIT(m, t, f, l) 480#define WITNESS_SLEEP(check, m) 481#define WITNESS_SAVE_DECL(n) 482#define WITNESS_SAVE(m, n) 483#define WITNESS_RESTORE(m, n) 484#endif /* WITNESS */ 485 486#endif /* _KERNEL */ 487#endif /* !LOCORE */ 488#endif /* _SYS_MUTEX_H_ */ 489