mutex.h revision 121049
1218885Sdim/*- 2218885Sdim * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 3218885Sdim * 4218885Sdim * Redistribution and use in source and binary forms, with or without 5218885Sdim * modification, are permitted provided that the following conditions 6218885Sdim * are met: 7218885Sdim * 1. Redistributions of source code must retain the above copyright 8218885Sdim * notice, this list of conditions and the following disclaimer. 9218885Sdim * 2. Redistributions in binary form must reproduce the above copyright 10218885Sdim * notice, this list of conditions and the following disclaimer in the 11218885Sdim * documentation and/or other materials provided with the distribution. 12218885Sdim * 3. Berkeley Software Design Inc's name may not be used to endorse or 13218885Sdim * promote products derived from this software without specific prior 14218885Sdim * written permission. 15218885Sdim * 16218885Sdim * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17218885Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18218885Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19218885Sdim * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20218885Sdim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21218885Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22226633Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23226633Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24226633Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25218885Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26218885Sdim * SUCH DAMAGE. 27263508Sdim * 28263508Sdim * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ 29218885Sdim * $FreeBSD: head/sys/sys/mutex.h 121049 2003-10-12 21:02:55Z jeff $ 30263508Sdim */ 31263508Sdim 32218885Sdim#ifndef _SYS_MUTEX_H_ 33234353Sdim#define _SYS_MUTEX_H_ 34218885Sdim 35218885Sdim#ifndef LOCORE 36263508Sdim#include <sys/queue.h> 37218885Sdim#include <sys/_lock.h> 38218885Sdim#include <sys/_mutex.h> 39218885Sdim 40218885Sdim#ifdef _KERNEL 41218885Sdim#include <sys/pcpu.h> 42263508Sdim#include <machine/atomic.h> 43263508Sdim#include <machine/cpufunc.h> 44263508Sdim#endif /* _KERNEL_ */ 45263508Sdim#endif /* !LOCORE */ 46263508Sdim 47263508Sdim#include <machine/mutex.h> 48263508Sdim 49263508Sdim#ifdef _KERNEL 50218885Sdim 51263508Sdim/* 52218885Sdim * Mutex types and options passed to mtx_init(). MTX_QUIET can also be 53218885Sdim * passed in. 54234353Sdim */ 55234353Sdim#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ 56234353Sdim#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ 57234353Sdim#define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ 58218885Sdim#define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ 59234353Sdim#define MTX_DUPOK 0x00000020 /* Don't log a duplicate acquire */ 60234353Sdim 61218885Sdim/* 62234353Sdim * Option flags passed to certain lock/unlock routines, through the use 63234353Sdim * of corresponding mtx_{lock,unlock}_flags() interface macros. 64218885Sdim */ 65234353Sdim#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ 66234353Sdim 67234353Sdim/* 68234353Sdim * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, 69234353Sdim * with the exception of MTX_UNOWNED, applies to spin locks. 70234353Sdim */ 71218885Sdim#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ 72218885Sdim#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ 73234353Sdim#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ 74234353Sdim#define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED) 75234353Sdim 76234353Sdim#endif /* _KERNEL */ 77218885Sdim 78218885Sdim#ifndef LOCORE 79234353Sdim 80234353Sdim/* 81234353Sdim * XXX: Friendly reminder to fix things in MP code that is presently being 82234353Sdim * XXX: worked on. 83218885Sdim */ 84218885Sdim#define mp_fixme(string) 85234353Sdim 86234353Sdim#ifdef _KERNEL 87263508Sdim 88234353Sdim/* 89234353Sdim * Prototypes 90234353Sdim * 91234353Sdim * NOTE: Functions prepended with `_' (underscore) are exported to other parts 92234353Sdim * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE 93234353Sdim * and LOCK_LINE. These functions should not be called directly by any 94218885Sdim * code using the API. Their macros cover their functionality. 95218885Sdim * 96234353Sdim * [See below for descriptions] 97234353Sdim * 98218885Sdim */ 99234353Sdimvoid mtx_init(struct mtx *m, const char *name, const char *type, int opts); 100234353Sdimvoid mtx_destroy(struct mtx *m); 101234353Sdimvoid mtx_sysinit(void *arg); 102218885Sdimvoid mutex_init(void); 103234353Sdimvoid _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line); 104234353Sdimvoid _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); 105218885Sdimvoid _mtx_lock_spin(struct mtx *m, int opts, const char *file, int line); 106218885Sdimvoid _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); 107234353Sdimint _mtx_trylock(struct mtx *m, int opts, const char *file, int line); 108234353Sdimvoid _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line); 109218885Sdimvoid _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line); 110234353Sdimvoid _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, 111218885Sdim int line); 112234353Sdimvoid _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, 113234353Sdim int line); 114234353Sdim#ifdef INVARIANT_SUPPORT 115234353Sdimvoid _mtx_assert(struct mtx *m, int what, const char *file, int line); 116234353Sdim#endif 117218885Sdim 118234353Sdim/* 119234353Sdim * We define our machine-independent (unoptimized) mutex micro-operations 120234353Sdim * here, if they are not already defined in the machine-dependent mutex.h 121234353Sdim */ 122234353Sdim 123218885Sdim/* Actually obtain mtx_lock */ 124218885Sdim#ifndef _obtain_lock 125234353Sdim#define _obtain_lock(mp, tid) \ 126234353Sdim atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid)) 127234353Sdim#endif 128218885Sdim 129234353Sdim/* Actually release mtx_lock */ 130234353Sdim#ifndef _release_lock 131218885Sdim#define _release_lock(mp, tid) \ 132234353Sdim atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED) 133218885Sdim#endif 134234353Sdim 135234353Sdim/* Actually release mtx_lock quickly, assuming we own it. */ 136234353Sdim#ifndef _release_lock_quick 137218885Sdim#define _release_lock_quick(mp) \ 138218885Sdim atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED) 139218885Sdim#endif 140234353Sdim 141218885Sdim/* 142234353Sdim * Obtain a sleep lock inline, or call the "hard" function if we can't get it 143234353Sdim * easy. 144234353Sdim */ 145234353Sdim#ifndef _get_sleep_lock 146234353Sdim#define _get_sleep_lock(mp, tid, opts, file, line) do { \ 147234353Sdim if (!_obtain_lock((mp), (tid))) \ 148218885Sdim _mtx_lock_sleep((mp), (opts), (file), (line)); \ 149218885Sdim} while (0) 150218885Sdim#endif 151218885Sdim 152218885Sdim/* 153218885Sdim * Obtain a spin lock inline, or call the "hard" function if we can't get it 154218885Sdim * easy. For spinlocks, we handle recursion inline (it turns out that function 155218885Sdim * calls can be significantly expensive on some architectures). 156218885Sdim * Since spin locks are not _too_ common, inlining this code is not too big 157218885Sdim * a deal. 158218885Sdim */ 159263508Sdim#ifndef _get_spin_lock 160263508Sdim#define _get_spin_lock(mp, tid, opts, file, line) do { \ 161263508Sdim critical_enter(); \ 162263508Sdim if (!_obtain_lock((mp), (tid))) { \ 163263508Sdim if ((mp)->mtx_lock == (uintptr_t)(tid)) \ 164263508Sdim (mp)->mtx_recurse++; \ 165263508Sdim else \ 166263508Sdim _mtx_lock_spin((mp), (opts), (file), (line)); \ 167263508Sdim } \ 168218885Sdim} while (0) 169#endif 170 171/* 172 * Release a sleep lock inline, or call the "hard" function if we can't do it 173 * easy. 174 */ 175#ifndef _rel_sleep_lock 176#define _rel_sleep_lock(mp, tid, opts, file, line) do { \ 177 if (!_release_lock((mp), (tid))) \ 178 _mtx_unlock_sleep((mp), (opts), (file), (line)); \ 179} while (0) 180#endif 181 182/* 183 * For spinlocks, we can handle everything inline, as it's pretty simple and 184 * a function call would be too expensive (at least on some architectures). 185 * Since spin locks are not _too_ common, inlining this code is not too big 186 * a deal. 187 * 188 * Since we always perform a critical_enter() when attempting to acquire a 189 * spin lock, we need to always perform a matching critical_exit() when 190 * releasing a spin lock. This includes the recursion cases. 191 */ 192#ifndef _rel_spin_lock 193#define _rel_spin_lock(mp) do { \ 194 if (mtx_recursed((mp))) \ 195 (mp)->mtx_recurse--; \ 196 else \ 197 _release_lock_quick((mp)); \ 198 critical_exit(); \ 199} while (0) 200#endif 201 202/* 203 * Exported lock manipulation interface. 204 * 205 * mtx_lock(m) locks MTX_DEF mutex `m' 206 * 207 * mtx_lock_spin(m) locks MTX_SPIN mutex `m' 208 * 209 * mtx_unlock(m) unlocks MTX_DEF mutex `m' 210 * 211 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' 212 * 213 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' 214 * and passes option flags `opts' to the "hard" function, if required. 215 * With these routines, it is possible to pass flags such as MTX_QUIET 216 * to the appropriate lock manipulation routines. 217 * 218 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if 219 * it cannot. Rather, it returns 0 on failure and non-zero on success. 220 * It does NOT handle recursion as we assume that if a caller is properly 221 * using this part of the interface, he will know that the lock in question 222 * is _not_ recursed. 223 * 224 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts 225 * relevant option flags `opts.' 226 * 227 * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. 228 * 229 * mtx_owned(m) returns non-zero if the current thread owns the lock `m' 230 * 231 * mtx_ownedby(m, td) returns non-zero if the specified thread owns the lock `m' 232 * 233 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. 234 */ 235#define mtx_lock(m) mtx_lock_flags((m), 0) 236#define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) 237#define mtx_trylock(m) mtx_trylock_flags((m), 0) 238#define mtx_unlock(m) mtx_unlock_flags((m), 0) 239#define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) 240 241struct mtx_pool; 242 243struct mtx_pool *mtx_pool_create(const char *mtx_name, int pool_size, int opts); 244void mtx_pool_destroy(struct mtx_pool **poolp); 245struct mtx *mtx_pool_find(struct mtx_pool *pool, void *ptr); 246struct mtx *mtx_pool_alloc(struct mtx_pool *pool); 247#define mtx_pool_lock(pool, ptr) \ 248 mtx_lock(mtx_pool_find((pool), (ptr))) 249#define mtx_pool_lock_spin(pool, ptr) \ 250 mtx_lock_spin(mtx_pool_find((pool), (ptr))) 251#define mtx_pool_unlock(pool, ptr) \ 252 mtx_unlock(mtx_pool_find((pool), (ptr))) 253#define mtx_pool_unlock_spin(pool, ptr) \ 254 mtx_unlock_spin(mtx_pool_find((pool), (ptr))) 255 256/* 257 * mtxpool_lockbuilder is a pool of sleep locks that is not witness 258 * checked and should only be used for building higher level locks. 259 * 260 * mtxpool_sleep is a general purpose pool of sleep mutexes. 261 */ 262extern struct mtx_pool *mtxpool_lockbuilder; 263extern struct mtx_pool *mtxpool_sleep; 264 265#ifndef LOCK_DEBUG 266#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h> 267#endif 268#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) 269#define mtx_lock_flags(m, opts) \ 270 _mtx_lock_flags((m), (opts), LOCK_FILE, LOCK_LINE) 271#define mtx_unlock_flags(m, opts) \ 272 _mtx_unlock_flags((m), (opts), LOCK_FILE, LOCK_LINE) 273#define mtx_lock_spin_flags(m, opts) \ 274 _mtx_lock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE) 275#define mtx_unlock_spin_flags(m, opts) \ 276 _mtx_unlock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE) 277#else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ 278#define mtx_lock_flags(m, opts) \ 279 _get_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE) 280#define mtx_unlock_flags(m, opts) \ 281 _rel_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE) 282#ifndef SMPnotyet 283#define mtx_lock_spin_flags(m, opts) \ 284 _get_spin_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE) 285#define mtx_unlock_spin_flags(m, opts) \ 286 _rel_spin_lock((m)) 287#else /* SMP */ 288#define mtx_lock_spin_flags(m, opts) critical_enter() 289#define mtx_unlock_spin_flags(m, opts) critical_exit() 290#endif /* SMP */ 291#endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ 292 293#define mtx_trylock_flags(m, opts) \ 294 _mtx_trylock((m), (opts), LOCK_FILE, LOCK_LINE) 295 296#define mtx_initialized(m) ((m)->mtx_object.lo_flags & LO_INITIALIZED) 297 298#define mtx_owned(m) (mtx_ownedby((m), curthread)) 299 300#define mtx_ownedby(m, td) \ 301 (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)(td)) 302 303#define mtx_recursed(m) ((m)->mtx_recurse != 0) 304 305#define mtx_name(m) ((m)->mtx_object.lo_name) 306 307/* 308 * Global locks. 309 */ 310extern struct mtx sched_lock; 311extern struct mtx Giant; 312 313/* 314 * Giant lock manipulation and clean exit macros. 315 * Used to replace return with an exit Giant and return. 316 * 317 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() 318 * The #ifndef is to allow lint-like tools to redefine DROP_GIANT. 319 */ 320#ifndef DROP_GIANT 321#define DROP_GIANT() \ 322do { \ 323 int _giantcnt; \ 324 WITNESS_SAVE_DECL(Giant); \ 325 \ 326 if (mtx_owned(&Giant)) \ 327 WITNESS_SAVE(&Giant.mtx_object, Giant); \ 328 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ 329 mtx_unlock(&Giant) 330 331#define PICKUP_GIANT() \ 332 mtx_assert(&Giant, MA_NOTOWNED); \ 333 while (_giantcnt--) \ 334 mtx_lock(&Giant); \ 335 if (mtx_owned(&Giant)) \ 336 WITNESS_RESTORE(&Giant.mtx_object, Giant); \ 337} while (0) 338 339#define PARTIAL_PICKUP_GIANT() \ 340 mtx_assert(&Giant, MA_NOTOWNED); \ 341 while (_giantcnt--) \ 342 mtx_lock(&Giant); \ 343 if (mtx_owned(&Giant)) \ 344 WITNESS_RESTORE(&Giant.mtx_object, Giant) 345#endif 346 347#define UGAR(rval) do { \ 348 int _val = (rval); \ 349 mtx_unlock(&Giant); \ 350 return (_val); \ 351} while (0) 352 353struct mtx_args { 354 struct mtx *ma_mtx; 355 const char *ma_desc; 356 int ma_opts; 357}; 358 359#define MTX_SYSINIT(name, mtx, desc, opts) \ 360 static struct mtx_args name##_args = { \ 361 (mtx), \ 362 (desc), \ 363 (opts) \ 364 }; \ 365 SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ 366 mtx_sysinit, &name##_args) 367 368/* 369 * The INVARIANTS-enabled mtx_assert() functionality. 370 * 371 * The constants need to be defined for INVARIANT_SUPPORT infrastructure 372 * support as _mtx_assert() itself uses them and the latter implies that 373 * _mtx_assert() must build. 374 */ 375#ifdef INVARIANT_SUPPORT 376#define MA_OWNED 0x01 377#define MA_NOTOWNED 0x02 378#define MA_RECURSED 0x04 379#define MA_NOTRECURSED 0x08 380#endif /* INVARIANT_SUPPORT */ 381 382#ifdef INVARIANTS 383#define mtx_assert(m, what) \ 384 _mtx_assert((m), (what), __FILE__, __LINE__) 385 386#define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED) 387 388#else /* INVARIANTS */ 389#define mtx_assert(m, what) 390#define GIANT_REQUIRED 391#endif /* INVARIANTS */ 392 393/* 394 * Common lock type names. 395 */ 396#define MTX_NETWORK_LOCK "network driver" 397 398#endif /* _KERNEL */ 399#endif /* !LOCORE */ 400#endif /* _SYS_MUTEX_H_ */ 401