mutex.h revision 330897
1/*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Berkeley Software Design Inc's name may not be used to endorse or 15 * promote products derived from this software without specific prior 16 * written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ 31 * $FreeBSD: stable/11/sys/sys/mutex.h 330897 2018-03-14 03:19:51Z eadler $ 32 */ 33 34#ifndef _SYS_MUTEX_H_ 35#define _SYS_MUTEX_H_ 36 37#include <sys/queue.h> 38#include <sys/_lock.h> 39#include <sys/_mutex.h> 40 41#ifdef _KERNEL 42#include <sys/pcpu.h> 43#include <sys/lock_profile.h> 44#include <sys/lockstat.h> 45#include <machine/atomic.h> 46#include <machine/cpufunc.h> 47 48/* 49 * Mutex types and options passed to mtx_init(). MTX_QUIET and MTX_DUPOK 50 * can also be passed in. 51 */ 52#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ 53#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ 54#define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ 55#define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ 56#define MTX_NOPROFILE 0x00000020 /* Don't profile this lock */ 57#define MTX_NEW 0x00000040 /* Don't check for double-init */ 58 59/* 60 * Option flags passed to certain lock/unlock routines, through the use 61 * of corresponding mtx_{lock,unlock}_flags() interface macros. 62 */ 63#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ 64#define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */ 65 66/* 67 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, 68 * with the exception of MTX_UNOWNED, applies to spin locks. 69 */ 70#define MTX_UNOWNED 0x00000000 /* Cookie for free mutex */ 71#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ 72#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ 73#define MTX_DESTROYED 0x00000004 /* lock destroyed */ 74#define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_DESTROYED) 75 76/* 77 * Prototypes 78 * 79 * NOTE: Functions prepended with `_' (underscore) are exported to other parts 80 * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE 81 * and LOCK_LINE or for hiding the lock cookie crunching to the 82 * consumers. These functions should not be called directly by any 83 * code using the API. Their macros cover their functionality. 84 * Functions with a `_' suffix are the entrypoint for the common 85 * KPI covering both compat shims and fast path case. These can be 86 * used by consumers willing to pass options, file and line 87 * informations, in an option-independent way. 88 * 89 * [See below for descriptions] 90 * 91 */ 92void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, 93 int opts); 94void _mtx_destroy(volatile uintptr_t *c); 95void mtx_sysinit(void *arg); 96int _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF); 97int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, 98 int line); 99void mutex_init(void); 100#if LOCK_DEBUG > 0 101void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, 102 const char *file, int line); 103void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, 104 const char *file, int line); 105#else 106void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v); 107void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v); 108#endif 109 110#ifdef SMP 111#if LOCK_DEBUG > 0 112void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts, 113 const char *file, int line); 114#else 115void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v); 116#endif 117#endif 118void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, 119 int line); 120void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, 121 int line); 122void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, 123 int line); 124int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, 125 const char *file, int line); 126void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, 127 const char *file, int line); 128#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 129void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, 130 int line); 131#endif 132void thread_lock_flags_(struct thread *, int, const char *, int); 133#if LOCK_DEBUG > 0 134void _thread_lock(struct thread *td, int opts, const char *file, int line); 135#else 136void _thread_lock(struct thread *); 137#endif 138 139#if defined(LOCK_PROFILING) || defined(KLD_MODULE) 140#define thread_lock(tdp) \ 141 thread_lock_flags_((tdp), 0, __FILE__, __LINE__) 142#elif LOCK_DEBUG > 0 143#define thread_lock(tdp) \ 144 _thread_lock((tdp), 0, __FILE__, __LINE__) 145#else 146#define thread_lock(tdp) \ 147 _thread_lock((tdp)) 148#endif 149 150#if LOCK_DEBUG > 0 151#define thread_lock_flags(tdp, opt) \ 152 thread_lock_flags_((tdp), (opt), __FILE__, __LINE__) 153#else 154#define thread_lock_flags(tdp, opt) \ 155 _thread_lock(tdp) 156#endif 157 158#define thread_unlock(tdp) \ 159 mtx_unlock_spin((tdp)->td_lock) 160 161/* 162 * Top-level macros to provide lock cookie once the actual mtx is passed. 163 * They will also prevent passing a malformed object to the mtx KPI by 164 * failing compilation as the mtx_lock reserved member will not be found. 165 */ 166#define mtx_init(m, n, t, o) \ 167 _mtx_init(&(m)->mtx_lock, n, t, o) 168#define mtx_destroy(m) \ 169 _mtx_destroy(&(m)->mtx_lock) 170#define mtx_trylock_flags_(m, o, f, l) \ 171 _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l) 172#if LOCK_DEBUG > 0 173#define _mtx_lock_sleep(m, v, o, f, l) \ 174 __mtx_lock_sleep(&(m)->mtx_lock, v, o, f, l) 175#define _mtx_unlock_sleep(m, v, o, f, l) \ 176 __mtx_unlock_sleep(&(m)->mtx_lock, v, o, f, l) 177#else 178#define _mtx_lock_sleep(m, v, o, f, l) \ 179 __mtx_lock_sleep(&(m)->mtx_lock, v) 180#define _mtx_unlock_sleep(m, v, o, f, l) \ 181 __mtx_unlock_sleep(&(m)->mtx_lock, v) 182#endif 183#ifdef SMP 184#if LOCK_DEBUG > 0 185#define _mtx_lock_spin(m, v, o, f, l) \ 186 _mtx_lock_spin_cookie(&(m)->mtx_lock, v, o, f, l) 187#else 188#define _mtx_lock_spin(m, v, o, f, l) \ 189 _mtx_lock_spin_cookie(&(m)->mtx_lock, v) 190#endif 191#endif 192#define _mtx_lock_flags(m, o, f, l) \ 193 __mtx_lock_flags(&(m)->mtx_lock, o, f, l) 194#define _mtx_unlock_flags(m, o, f, l) \ 195 __mtx_unlock_flags(&(m)->mtx_lock, o, f, l) 196#define _mtx_lock_spin_flags(m, o, f, l) \ 197 __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l) 198#define _mtx_trylock_spin_flags(m, o, f, l) \ 199 __mtx_trylock_spin_flags(&(m)->mtx_lock, o, f, l) 200#define _mtx_unlock_spin_flags(m, o, f, l) \ 201 __mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l) 202#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 203#define _mtx_assert(m, w, f, l) \ 204 __mtx_assert(&(m)->mtx_lock, w, f, l) 205#endif 206 207#define mtx_recurse lock_object.lo_data 208 209/* Very simple operations on mtx_lock. */ 210 211/* Try to obtain mtx_lock once. */ 212#define _mtx_obtain_lock(mp, tid) \ 213 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) 214 215#define _mtx_obtain_lock_fetch(mp, vp, tid) \ 216 atomic_fcmpset_acq_ptr(&(mp)->mtx_lock, vp, (tid)) 217 218/* Try to release mtx_lock if it is unrecursed and uncontested. */ 219#define _mtx_release_lock(mp, tid) \ 220 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED) 221 222/* Release mtx_lock quickly, assuming we own it. */ 223#define _mtx_release_lock_quick(mp) \ 224 atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED) 225 226#define _mtx_release_lock_fetch(mp, vp) \ 227 atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, (vp), MTX_UNOWNED) 228 229/* 230 * Full lock operations that are suitable to be inlined in non-debug 231 * kernels. If the lock cannot be acquired or released trivially then 232 * the work is deferred to another function. 233 */ 234 235/* Lock a normal mutex. */ 236#define __mtx_lock(mp, tid, opts, file, line) do { \ 237 uintptr_t _tid = (uintptr_t)(tid); \ 238 uintptr_t _v = MTX_UNOWNED; \ 239 \ 240 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\ 241 !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ 242 _mtx_lock_sleep((mp), _v, (opts), (file), (line)); \ 243} while (0) 244 245/* 246 * Lock a spin mutex. For spinlocks, we handle recursion inline (it 247 * turns out that function calls can be significantly expensive on 248 * some architectures). Since spin locks are not _too_ common, 249 * inlining this code is not too big a deal. 250 */ 251#ifdef SMP 252#define __mtx_lock_spin(mp, tid, opts, file, line) do { \ 253 uintptr_t _tid = (uintptr_t)(tid); \ 254 uintptr_t _v = MTX_UNOWNED; \ 255 \ 256 spinlock_enter(); \ 257 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \ 258 !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ 259 _mtx_lock_spin((mp), _v, (opts), (file), (line)); \ 260} while (0) 261#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ 262 uintptr_t _tid = (uintptr_t)(tid); \ 263 int _ret; \ 264 \ 265 spinlock_enter(); \ 266 if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\ 267 spinlock_exit(); \ 268 _ret = 0; \ 269 } else { \ 270 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ 271 mp, 0, 0, file, line); \ 272 _ret = 1; \ 273 } \ 274 _ret; \ 275}) 276#else /* SMP */ 277#define __mtx_lock_spin(mp, tid, opts, file, line) do { \ 278 uintptr_t _tid = (uintptr_t)(tid); \ 279 \ 280 spinlock_enter(); \ 281 if ((mp)->mtx_lock == _tid) \ 282 (mp)->mtx_recurse++; \ 283 else { \ 284 KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ 285 (mp)->mtx_lock = _tid; \ 286 } \ 287} while (0) 288#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ 289 uintptr_t _tid = (uintptr_t)(tid); \ 290 int _ret; \ 291 \ 292 spinlock_enter(); \ 293 if ((mp)->mtx_lock != MTX_UNOWNED) { \ 294 spinlock_exit(); \ 295 _ret = 0; \ 296 } else { \ 297 (mp)->mtx_lock = _tid; \ 298 _ret = 1; \ 299 } \ 300 _ret; \ 301}) 302#endif /* SMP */ 303 304/* Unlock a normal mutex. */ 305#define __mtx_unlock(mp, tid, opts, file, line) do { \ 306 uintptr_t _v = (uintptr_t)(tid); \ 307 \ 308 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\ 309 !_mtx_release_lock_fetch((mp), &_v))) \ 310 _mtx_unlock_sleep((mp), _v, (opts), (file), (line)); \ 311} while (0) 312 313/* 314 * Unlock a spin mutex. For spinlocks, we can handle everything 315 * inline, as it's pretty simple and a function call would be too 316 * expensive (at least on some architectures). Since spin locks are 317 * not _too_ common, inlining this code is not too big a deal. 318 * 319 * Since we always perform a spinlock_enter() when attempting to acquire a 320 * spin lock, we need to always perform a matching spinlock_exit() when 321 * releasing a spin lock. This includes the recursion cases. 322 */ 323#ifdef SMP 324#define __mtx_unlock_spin(mp) do { \ 325 if (mtx_recursed((mp))) \ 326 (mp)->mtx_recurse--; \ 327 else { \ 328 LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ 329 _mtx_release_lock_quick((mp)); \ 330 } \ 331 spinlock_exit(); \ 332} while (0) 333#else /* SMP */ 334#define __mtx_unlock_spin(mp) do { \ 335 if (mtx_recursed((mp))) \ 336 (mp)->mtx_recurse--; \ 337 else { \ 338 LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ 339 (mp)->mtx_lock = MTX_UNOWNED; \ 340 } \ 341 spinlock_exit(); \ 342} while (0) 343#endif /* SMP */ 344 345/* 346 * Exported lock manipulation interface. 347 * 348 * mtx_lock(m) locks MTX_DEF mutex `m' 349 * 350 * mtx_lock_spin(m) locks MTX_SPIN mutex `m' 351 * 352 * mtx_unlock(m) unlocks MTX_DEF mutex `m' 353 * 354 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' 355 * 356 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' 357 * and passes option flags `opts' to the "hard" function, if required. 358 * With these routines, it is possible to pass flags such as MTX_QUIET 359 * to the appropriate lock manipulation routines. 360 * 361 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if 362 * it cannot. Rather, it returns 0 on failure and non-zero on success. 363 * It does NOT handle recursion as we assume that if a caller is properly 364 * using this part of the interface, he will know that the lock in question 365 * is _not_ recursed. 366 * 367 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts 368 * relevant option flags `opts.' 369 * 370 * mtx_trylock_spin(m) attempts to acquire MTX_SPIN mutex `m' but doesn't 371 * spin if it cannot. Rather, it returns 0 on failure and non-zero on 372 * success. It always returns failure for recursed lock attempts. 373 * 374 * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. 375 * 376 * mtx_owned(m) returns non-zero if the current thread owns the lock `m' 377 * 378 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. 379 */ 380#define mtx_lock(m) mtx_lock_flags((m), 0) 381#define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) 382#define mtx_trylock(m) mtx_trylock_flags((m), 0) 383#define mtx_trylock_spin(m) mtx_trylock_spin_flags((m), 0) 384#define mtx_unlock(m) mtx_unlock_flags((m), 0) 385#define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) 386 387struct mtx_pool; 388 389struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts); 390void mtx_pool_destroy(struct mtx_pool **poolp); 391struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr); 392struct mtx *mtx_pool_alloc(struct mtx_pool *pool); 393#define mtx_pool_lock(pool, ptr) \ 394 mtx_lock(mtx_pool_find((pool), (ptr))) 395#define mtx_pool_lock_spin(pool, ptr) \ 396 mtx_lock_spin(mtx_pool_find((pool), (ptr))) 397#define mtx_pool_unlock(pool, ptr) \ 398 mtx_unlock(mtx_pool_find((pool), (ptr))) 399#define mtx_pool_unlock_spin(pool, ptr) \ 400 mtx_unlock_spin(mtx_pool_find((pool), (ptr))) 401 402/* 403 * mtxpool_sleep is a general purpose pool of sleep mutexes. 404 */ 405extern struct mtx_pool *mtxpool_sleep; 406 407#ifndef LOCK_DEBUG 408#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h> 409#endif 410#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) 411#define mtx_lock_flags_(m, opts, file, line) \ 412 _mtx_lock_flags((m), (opts), (file), (line)) 413#define mtx_unlock_flags_(m, opts, file, line) \ 414 _mtx_unlock_flags((m), (opts), (file), (line)) 415#define mtx_lock_spin_flags_(m, opts, file, line) \ 416 _mtx_lock_spin_flags((m), (opts), (file), (line)) 417#define mtx_trylock_spin_flags_(m, opts, file, line) \ 418 _mtx_trylock_spin_flags((m), (opts), (file), (line)) 419#define mtx_unlock_spin_flags_(m, opts, file, line) \ 420 _mtx_unlock_spin_flags((m), (opts), (file), (line)) 421#else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ 422#define mtx_lock_flags_(m, opts, file, line) \ 423 __mtx_lock((m), curthread, (opts), (file), (line)) 424#define mtx_unlock_flags_(m, opts, file, line) \ 425 __mtx_unlock((m), curthread, (opts), (file), (line)) 426#define mtx_lock_spin_flags_(m, opts, file, line) \ 427 __mtx_lock_spin((m), curthread, (opts), (file), (line)) 428#define mtx_trylock_spin_flags_(m, opts, file, line) \ 429 __mtx_trylock_spin((m), curthread, (opts), (file), (line)) 430#define mtx_unlock_spin_flags_(m, opts, file, line) \ 431 __mtx_unlock_spin((m)) 432#endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ 433 434#ifdef INVARIANTS 435#define mtx_assert_(m, what, file, line) \ 436 _mtx_assert((m), (what), (file), (line)) 437 438#define GIANT_REQUIRED mtx_assert_(&Giant, MA_OWNED, __FILE__, __LINE__) 439 440#else /* INVARIANTS */ 441#define mtx_assert_(m, what, file, line) (void)0 442#define GIANT_REQUIRED 443#endif /* INVARIANTS */ 444 445#define mtx_lock_flags(m, opts) \ 446 mtx_lock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 447#define mtx_unlock_flags(m, opts) \ 448 mtx_unlock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 449#define mtx_lock_spin_flags(m, opts) \ 450 mtx_lock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 451#define mtx_unlock_spin_flags(m, opts) \ 452 mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 453#define mtx_trylock_flags(m, opts) \ 454 mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 455#define mtx_trylock_spin_flags(m, opts) \ 456 mtx_trylock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) 457#define mtx_assert(m, what) \ 458 mtx_assert_((m), (what), __FILE__, __LINE__) 459 460#define mtx_sleep(chan, mtx, pri, wmesg, timo) \ 461 _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ 462 tick_sbt * (timo), 0, C_HARDCLOCK) 463 464#define MTX_READ_VALUE(m) ((m)->mtx_lock) 465 466#define mtx_initialized(m) lock_initialized(&(m)->lock_object) 467 468#define lv_mtx_owner(v) ((struct thread *)((v) & ~MTX_FLAGMASK)) 469 470#define mtx_owner(m) lv_mtx_owner(MTX_READ_VALUE(m)) 471 472#define mtx_owned(m) (mtx_owner(m) == curthread) 473 474#define mtx_recursed(m) ((m)->mtx_recurse != 0) 475 476#define mtx_name(m) ((m)->lock_object.lo_name) 477 478/* 479 * Global locks. 480 */ 481extern struct mtx Giant; 482extern struct mtx blocked_lock; 483 484/* 485 * Giant lock manipulation and clean exit macros. 486 * Used to replace return with an exit Giant and return. 487 * 488 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() 489 * The #ifndef is to allow lint-like tools to redefine DROP_GIANT. 490 */ 491#ifndef DROP_GIANT 492#define DROP_GIANT() \ 493do { \ 494 int _giantcnt = 0; \ 495 WITNESS_SAVE_DECL(Giant); \ 496 \ 497 if (mtx_owned(&Giant)) { \ 498 WITNESS_SAVE(&Giant.lock_object, Giant); \ 499 for (_giantcnt = 0; mtx_owned(&Giant) && \ 500 !SCHEDULER_STOPPED(); _giantcnt++) \ 501 mtx_unlock(&Giant); \ 502 } 503 504#define PICKUP_GIANT() \ 505 PARTIAL_PICKUP_GIANT(); \ 506} while (0) 507 508#define PARTIAL_PICKUP_GIANT() \ 509 mtx_assert(&Giant, MA_NOTOWNED); \ 510 if (_giantcnt > 0) { \ 511 while (_giantcnt--) \ 512 mtx_lock(&Giant); \ 513 WITNESS_RESTORE(&Giant.lock_object, Giant); \ 514 } 515#endif 516 517struct mtx_args { 518 void *ma_mtx; 519 const char *ma_desc; 520 int ma_opts; 521}; 522 523#define MTX_SYSINIT(name, mtx, desc, opts) \ 524 static struct mtx_args name##_args = { \ 525 (mtx), \ 526 (desc), \ 527 (opts) \ 528 }; \ 529 SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 530 mtx_sysinit, &name##_args); \ 531 SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 532 _mtx_destroy, __DEVOLATILE(void *, &(mtx)->mtx_lock)) 533 534/* 535 * The INVARIANTS-enabled mtx_assert() functionality. 536 * 537 * The constants need to be defined for INVARIANT_SUPPORT infrastructure 538 * support as _mtx_assert() itself uses them and the latter implies that 539 * _mtx_assert() must build. 540 */ 541#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) 542#define MA_OWNED LA_XLOCKED 543#define MA_NOTOWNED LA_UNLOCKED 544#define MA_RECURSED LA_RECURSED 545#define MA_NOTRECURSED LA_NOTRECURSED 546#endif 547 548/* 549 * Common lock type names. 550 */ 551#define MTX_NETWORK_LOCK "network driver" 552 553#endif /* _KERNEL */ 554#endif /* _SYS_MUTEX_H_ */ 555