mutex.h revision 85564
1/*- 2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ 29 * $FreeBSD: head/sys/sys/mutex.h 85564 2001-10-26 20:48:04Z dillon $ 30 */ 31 32#ifndef _SYS_MUTEX_H_ 33#define _SYS_MUTEX_H_ 34 35#ifndef LOCORE 36#include <sys/queue.h> 37#include <sys/_lock.h> 38#include <sys/_mutex.h> 39 40#ifdef _KERNEL 41#include <machine/atomic.h> 42#include <machine/cpufunc.h> 43#include <machine/globals.h> 44#endif /* _KERNEL_ */ 45#endif /* !LOCORE */ 46 47#include <machine/mutex.h> 48 49#ifdef _KERNEL 50 51/* 52 * Mutex types and options passed to mtx_init(). MTX_QUIET can also be 53 * passed in. 54 */ 55#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ 56#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ 57#define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ 58#define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ 59#define MTX_SLEEPABLE 0x00000010 /* We can sleep with this lock. */ 60 61/* 62 * Option flags passed to certain lock/unlock routines, through the use 63 * of corresponding mtx_{lock,unlock}_flags() interface macros. 64 */ 65#define MTX_NOSWITCH LOP_NOSWITCH /* Do not switch on release */ 66#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ 67 68/* 69 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, 70 * with the exception of MTX_UNOWNED, applies to spin locks. 71 */ 72#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ 73#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ 74#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ 75#define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED) 76 77#endif /* _KERNEL */ 78 79#ifndef LOCORE 80 81/* 82 * XXX: Friendly reminder to fix things in MP code that is presently being 83 * XXX: worked on. 84 */ 85#define mp_fixme(string) 86 87#ifdef _KERNEL 88 89/* 90 * Prototypes 91 * 92 * NOTE: Functions prepended with `_' (underscore) are exported to other parts 93 * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE 94 * and LOCK_LINE. These functions should not be called directly by any 95 * code using the API. Their macros cover their functionality. 96 * 97 * [See below for descriptions] 98 * 99 */ 100void mtx_init(struct mtx *m, const char *description, int opts); 101void mtx_destroy(struct mtx *m); 102void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line); 103void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); 104void _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, 105 const char *file, int line); 106void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); 107int _mtx_trylock(struct mtx *m, int opts, const char *file, int line); 108void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line); 109void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line); 110void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, 111 int line); 112void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, 113 int line); 114#ifdef INVARIANT_SUPPORT 115void _mtx_assert(struct mtx *m, int what, const char *file, int line); 116#endif 117int mtx_lock_giant(int sysctlvar); 118void mtx_unlock_giant(int s); 119 120/* 121 * We define our machine-independent (unoptimized) mutex micro-operations 122 * here, if they are not already defined in the machine-dependent mutex.h 123 */ 124 125/* Actually obtain mtx_lock */ 126#ifndef _obtain_lock 127#define _obtain_lock(mp, tid) \ 128 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid)) 129#endif 130 131/* Actually release mtx_lock */ 132#ifndef _release_lock 133#define _release_lock(mp, tid) \ 134 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED) 135#endif 136 137/* Actually release mtx_lock quickly, assuming we own it. */ 138#ifndef _release_lock_quick 139#define _release_lock_quick(mp) \ 140 atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED) 141#endif 142 143/* 144 * Obtain a sleep lock inline, or call the "hard" function if we can't get it 145 * easy. 146 */ 147#ifndef _get_sleep_lock 148#define _get_sleep_lock(mp, tid, opts, file, line) do { \ 149 if (!_obtain_lock((mp), (tid))) \ 150 _mtx_lock_sleep((mp), (opts), (file), (line)); \ 151} while (0) 152#endif 153 154/* 155 * Obtain a spin lock inline, or call the "hard" function if we can't get it 156 * easy. For spinlocks, we handle recursion inline (it turns out that function 157 * calls can be significantly expensive on some architectures). 158 * Since spin locks are not _too_ common, inlining this code is not too big 159 * a deal. 160 */ 161#ifndef _get_spin_lock 162#define _get_spin_lock(mp, tid, opts, file, line) do { \ 163 critical_t _mtx_crit; \ 164 _mtx_crit = critical_enter(); \ 165 if (!_obtain_lock((mp), (tid))) { \ 166 if ((mp)->mtx_lock == (uintptr_t)(tid)) \ 167 (mp)->mtx_recurse++; \ 168 else \ 169 _mtx_lock_spin((mp), (opts), _mtx_crit, (file), \ 170 (line)); \ 171 } else \ 172 (mp)->mtx_savecrit = _mtx_crit; \ 173} while (0) 174#endif 175 176/* 177 * Release a sleep lock inline, or call the "hard" function if we can't do it 178 * easy. 179 */ 180#ifndef _rel_sleep_lock 181#define _rel_sleep_lock(mp, tid, opts, file, line) do { \ 182 if (!_release_lock((mp), (tid))) \ 183 _mtx_unlock_sleep((mp), (opts), (file), (line)); \ 184} while (0) 185#endif 186 187/* 188 * For spinlocks, we can handle everything inline, as it's pretty simple and 189 * a function call would be too expensive (at least on some architectures). 190 * Since spin locks are not _too_ common, inlining this code is not too big 191 * a deal. 192 */ 193#ifndef _rel_spin_lock 194#define _rel_spin_lock(mp) do { \ 195 critical_t _mtx_crit = (mp)->mtx_savecrit; \ 196 if (mtx_recursed((mp))) \ 197 (mp)->mtx_recurse--; \ 198 else { \ 199 _release_lock_quick((mp)); \ 200 critical_exit(_mtx_crit); \ 201 } \ 202} while (0) 203#endif 204 205/* 206 * Exported lock manipulation interface. 207 * 208 * mtx_lock(m) locks MTX_DEF mutex `m' 209 * 210 * mtx_lock_spin(m) locks MTX_SPIN mutex `m' 211 * 212 * mtx_unlock(m) unlocks MTX_DEF mutex `m' 213 * 214 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' 215 * 216 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' 217 * and passes option flags `opts' to the "hard" function, if required. 218 * With these routines, it is possible to pass flags such as MTX_QUIET 219 * and/or MTX_NOSWITCH to the appropriate lock manipulation routines. 220 * 221 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if 222 * it cannot. Rather, it returns 0 on failure and non-zero on success. 223 * It does NOT handle recursion as we assume that if a caller is properly 224 * using this part of the interface, he will know that the lock in question 225 * is _not_ recursed. 226 * 227 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts 228 * relevant option flags `opts.' 229 * 230 * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. 231 * 232 * mtx_owned(m) returns non-zero if the current thread owns the lock `m' 233 * 234 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. 235 */ 236#define mtx_lock(m) mtx_lock_flags((m), 0) 237#define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) 238#define mtx_trylock(m) mtx_trylock_flags((m), 0) 239#define mtx_unlock(m) mtx_unlock_flags((m), 0) 240#define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) 241 242#ifndef LOCK_DEBUG 243#error LOCK_DEBUG not defined, include <sys/lock.h> before <sys/mutex.h> 244#endif 245#if LOCK_DEBUG > 0 246#define mtx_lock_flags(m, opts) \ 247 _mtx_lock_flags((m), (opts), LOCK_FILE, LOCK_LINE) 248#define mtx_unlock_flags(m, opts) \ 249 _mtx_unlock_flags((m), (opts), LOCK_FILE, LOCK_LINE) 250#define mtx_lock_spin_flags(m, opts) \ 251 _mtx_lock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE) 252#define mtx_unlock_spin_flags(m, opts) \ 253 _mtx_unlock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE) 254#else 255#define mtx_lock_flags(m, opts) \ 256 _get_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE) 257#define mtx_unlock_flags(m, opts) \ 258 _rel_sleep_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE) 259#define mtx_lock_spin_flags(m, opts) \ 260 _get_spin_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE) 261#define mtx_unlock_spin_flags(m, opts) \ 262 _rel_spin_lock((m)) 263#endif 264 265#define mtx_trylock_flags(m, opts) \ 266 _mtx_trylock((m), (opts), LOCK_FILE, LOCK_LINE) 267 268#define mtx_initialized(m) ((m)->mtx_object.lo_flags & LO_INITIALIZED) 269 270#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curthread) 271 272#define mtx_recursed(m) ((m)->mtx_recurse != 0) 273 274/* 275 * Global locks. 276 */ 277extern struct mtx sched_lock; 278extern struct mtx Giant; 279 280/* 281 * Giant lock sysctl variables used by other modules 282 */ 283extern int kern_giant_proc; 284extern int kern_giant_file; 285 286/* 287 * Giant lock manipulation and clean exit macros. 288 * Used to replace return with an exit Giant and return. 289 * 290 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() 291 */ 292#define DROP_GIANT_NOSWITCH() \ 293do { \ 294 int _giantcnt; \ 295 WITNESS_SAVE_DECL(Giant); \ 296 \ 297 if (mtx_owned(&Giant)) \ 298 WITNESS_SAVE(&Giant.mtx_object, Giant); \ 299 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ 300 mtx_unlock_flags(&Giant, MTX_NOSWITCH) 301 302#define DROP_GIANT() \ 303do { \ 304 int _giantcnt; \ 305 WITNESS_SAVE_DECL(Giant); \ 306 \ 307 if (mtx_owned(&Giant)) \ 308 WITNESS_SAVE(&Giant.mtx_object, Giant); \ 309 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ 310 mtx_unlock(&Giant) 311 312#define PICKUP_GIANT() \ 313 mtx_assert(&Giant, MA_NOTOWNED); \ 314 while (_giantcnt--) \ 315 mtx_lock(&Giant); \ 316 if (mtx_owned(&Giant)) \ 317 WITNESS_RESTORE(&Giant.mtx_object, Giant); \ 318} while (0) 319 320#define PARTIAL_PICKUP_GIANT() \ 321 mtx_assert(&Giant, MA_NOTOWNED); \ 322 while (_giantcnt--) \ 323 mtx_lock(&Giant); \ 324 if (mtx_owned(&Giant)) \ 325 WITNESS_RESTORE(&Giant.mtx_object, Giant) 326 327#define UGAR(rval) do { \ 328 int _val = (rval); \ 329 mtx_unlock(&Giant); \ 330 return (_val); \ 331} while (0) 332 333/* 334 * The INVARIANTS-enabled mtx_assert() functionality. 335 * 336 * The constants need to be defined for INVARIANT_SUPPORT infrastructure 337 * support as _mtx_assert() itself uses them and the latter implies that 338 * _mtx_assert() must build. 339 */ 340#ifdef INVARIANT_SUPPORT 341#define MA_OWNED 0x01 342#define MA_NOTOWNED 0x02 343#define MA_RECURSED 0x04 344#define MA_NOTRECURSED 0x08 345#endif /* INVARIANT_SUPPORT */ 346 347#ifdef INVARIANTS 348#define mtx_assert(m, what) \ 349 _mtx_assert((m), (what), __FILE__, __LINE__) 350 351#define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED) 352 353#else /* INVARIANTS */ 354#define mtx_assert(m, what) 355#define GIANT_REQUIRED 356#endif /* INVARIANTS */ 357 358#endif /* _KERNEL */ 359#endif /* !LOCORE */ 360#endif /* _SYS_MUTEX_H_ */ 361