mutex.h revision 83590
1/*- 2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 3. Berkeley Software Design Inc's name may not be used to endorse or 13 * promote products derived from this software without specific prior 14 * written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $ 29 * $FreeBSD: head/sys/sys/mutex.h 83590 2001-09-17 19:31:26Z jhb $ 30 */ 31 32#ifndef _SYS_MUTEX_H_ 33#define _SYS_MUTEX_H_ 34 35#ifndef LOCORE 36#include <sys/queue.h> 37#include <sys/_lock.h> 38#include <sys/_mutex.h> 39 40#ifdef _KERNEL 41#include <sys/ktr.h> 42#include <machine/atomic.h> 43#include <machine/cpufunc.h> 44#include <machine/globals.h> 45#endif /* _KERNEL_ */ 46#endif /* !LOCORE */ 47 48#include <machine/mutex.h> 49 50#ifdef _KERNEL 51 52/* 53 * Mutex types and options passed to mtx_init(). MTX_QUIET can also be 54 * passed in. 55 */ 56#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ 57#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ 58#define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */ 59#define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */ 60#define MTX_SLEEPABLE 0x00000010 /* We can sleep with this lock. */ 61 62/* 63 * Option flags passed to certain lock/unlock routines, through the use 64 * of corresponding mtx_{lock,unlock}_flags() interface macros. 65 */ 66#define MTX_NOSWITCH LOP_NOSWITCH /* Do not switch on release */ 67#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */ 68 69/* 70 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, 71 * with the exception of MTX_UNOWNED, applies to spin locks. 72 */ 73#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ 74#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ 75#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ 76#define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED) 77 78#endif /* _KERNEL */ 79 80#ifndef LOCORE 81 82/* 83 * XXX: Friendly reminder to fix things in MP code that is presently being 84 * XXX: worked on. 85 */ 86#define mp_fixme(string) 87 88#ifdef _KERNEL 89 90/* 91 * Prototypes 92 * 93 * NOTE: Functions prepended with `_' (underscore) are exported to other parts 94 * of the kernel via macros, thus allowing us to use the cpp __FILE__ 95 * and __LINE__. These functions should not be called directly by any 96 * code using the API. Their macros cover their functionality. 97 * 98 * [See below for descriptions] 99 * 100 */ 101void mtx_init(struct mtx *m, const char *description, int opts); 102void mtx_destroy(struct mtx *m); 103void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line); 104void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); 105void _mtx_lock_spin(struct mtx *m, int opts, critical_t mtx_crit, 106 const char *file, int line); 107void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); 108int _mtx_trylock(struct mtx *m, int opts, const char *file, int line); 109void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line); 110void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line); 111void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, 112 int line); 113void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, 114 int line); 115#ifdef INVARIANT_SUPPORT 116void _mtx_assert(struct mtx *m, int what, const char *file, int line); 117#endif 118 119/* 120 * We define our machine-independent (unoptimized) mutex micro-operations 121 * here, if they are not already defined in the machine-dependent mutex.h 122 */ 123 124/* Actually obtain mtx_lock */ 125#ifndef _obtain_lock 126#define _obtain_lock(mp, tid) \ 127 atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid)) 128#endif 129 130/* Actually release mtx_lock */ 131#ifndef _release_lock 132#define _release_lock(mp, tid) \ 133 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED) 134#endif 135 136/* Actually release mtx_lock quickly, assuming we own it. */ 137#ifndef _release_lock_quick 138#define _release_lock_quick(mp) \ 139 atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED) 140#endif 141 142/* 143 * Obtain a sleep lock inline, or call the "hard" function if we can't get it 144 * easy. 145 */ 146#ifndef _get_sleep_lock 147#define _get_sleep_lock(mp, tid, opts, file, line) do { \ 148 if (!_obtain_lock((mp), (tid))) \ 149 _mtx_lock_sleep((mp), (opts), (file), (line)); \ 150} while (0) 151#endif 152 153/* 154 * Obtain a spin lock inline, or call the "hard" function if we can't get it 155 * easy. For spinlocks, we handle recursion inline (it turns out that function 156 * calls can be significantly expensive on some architectures). 157 * Since spin locks are not _too_ common, inlining this code is not too big 158 * a deal. 159 */ 160#ifndef _get_spin_lock 161#define _get_spin_lock(mp, tid, opts, file, line) do { \ 162 critical_t _mtx_crit; \ 163 _mtx_crit = critical_enter(); \ 164 if (!_obtain_lock((mp), (tid))) { \ 165 if ((mp)->mtx_lock == (uintptr_t)(tid)) \ 166 (mp)->mtx_recurse++; \ 167 else \ 168 _mtx_lock_spin((mp), (opts), _mtx_crit, (file), \ 169 (line)); \ 170 } else \ 171 (mp)->mtx_savecrit = _mtx_crit; \ 172} while (0) 173#endif 174 175/* 176 * Release a sleep lock inline, or call the "hard" function if we can't do it 177 * easy. 178 */ 179#ifndef _rel_sleep_lock 180#define _rel_sleep_lock(mp, tid, opts, file, line) do { \ 181 if (!_release_lock((mp), (tid))) \ 182 _mtx_unlock_sleep((mp), (opts), (file), (line)); \ 183} while (0) 184#endif 185 186/* 187 * For spinlocks, we can handle everything inline, as it's pretty simple and 188 * a function call would be too expensive (at least on some architectures). 189 * Since spin locks are not _too_ common, inlining this code is not too big 190 * a deal. 191 */ 192#ifndef _rel_spin_lock 193#define _rel_spin_lock(mp) do { \ 194 critical_t _mtx_crit = (mp)->mtx_savecrit; \ 195 if (mtx_recursed((mp))) \ 196 (mp)->mtx_recurse--; \ 197 else { \ 198 _release_lock_quick((mp)); \ 199 critical_exit(_mtx_crit); \ 200 } \ 201} while (0) 202#endif 203 204/* 205 * Exported lock manipulation interface. 206 * 207 * mtx_lock(m) locks MTX_DEF mutex `m' 208 * 209 * mtx_lock_spin(m) locks MTX_SPIN mutex `m' 210 * 211 * mtx_unlock(m) unlocks MTX_DEF mutex `m' 212 * 213 * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' 214 * 215 * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' 216 * and passes option flags `opts' to the "hard" function, if required. 217 * With these routines, it is possible to pass flags such as MTX_QUIET 218 * and/or MTX_NOSWITCH to the appropriate lock manipulation routines. 219 * 220 * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if 221 * it cannot. Rather, it returns 0 on failure and non-zero on success. 222 * It does NOT handle recursion as we assume that if a caller is properly 223 * using this part of the interface, he will know that the lock in question 224 * is _not_ recursed. 225 * 226 * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts 227 * relevant option flags `opts.' 228 * 229 * mtx_initialized(m) returns non-zero if the lock `m' has been initialized. 230 * 231 * mtx_owned(m) returns non-zero if the current thread owns the lock `m' 232 * 233 * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. 234 */ 235#define mtx_lock(m) mtx_lock_flags((m), 0) 236#define mtx_lock_spin(m) mtx_lock_spin_flags((m), 0) 237#define mtx_trylock(m) mtx_trylock_flags((m), 0) 238#define mtx_unlock(m) mtx_unlock_flags((m), 0) 239#define mtx_unlock_spin(m) mtx_unlock_spin_flags((m), 0) 240 241#ifdef LOCK_DEBUG 242#define mtx_lock_flags(m, opts) \ 243 _mtx_lock_flags((m), (opts), __FILE__, __LINE__) 244#define mtx_unlock_flags(m, opts) \ 245 _mtx_unlock_flags((m), (opts), __FILE__, __LINE__) 246#define mtx_lock_spin_flags(m, opts) \ 247 _mtx_lock_spin_flags((m), (opts), __FILE__, __LINE__) 248#define mtx_unlock_spin_flags(m, opts) \ 249 _mtx_unlock_spin_flags((m), (opts), __FILE__, __LINE__) 250#else 251#define mtx_lock_flags(m, opts) \ 252 __mtx_lock_flags((m), (opts), __FILE__, __LINE__) 253#define mtx_unlock_flags(m, opts) \ 254 __mtx_unlock_flags((m), (opts), __FILE__, __LINE__) 255#define mtx_lock_spin_flags(m, opts) \ 256 __mtx_lock_spin_flags((m), (opts), __FILE__, __LINE__) 257#define mtx_unlock_spin_flags(m, opts) \ 258 __mtx_unlock_spin_flags((m), (opts), __FILE__, __LINE__) 259#endif 260 261#define __mtx_lock_flags(m, opts, file, line) do { \ 262 MPASS(curthread != NULL); \ 263 KASSERT(((opts) & MTX_NOSWITCH) == 0, \ 264 ("MTX_NOSWITCH used at %s:%d", (file), (line))); \ 265 _get_sleep_lock((m), curthread, (opts), (file), (line)); \ 266 LOCK_LOG_LOCK("LOCK", &(m)->mtx_object, opts, m->mtx_recurse, \ 267 (file), (line)); \ 268 WITNESS_LOCK(&(m)->mtx_object, (opts) | LOP_EXCLUSIVE, (file), \ 269 (line)); \ 270} while (0) 271 272#define __mtx_lock_spin_flags(m, opts, file, line) do { \ 273 MPASS(curthread != NULL); \ 274 _get_spin_lock((m), curthread, (opts), (file), (line)); \ 275 LOCK_LOG_LOCK("LOCK", &(m)->mtx_object, opts, m->mtx_recurse, \ 276 (file), (line)); \ 277 WITNESS_LOCK(&(m)->mtx_object, (opts) | LOP_EXCLUSIVE, (file), \ 278 (line)); \ 279} while (0) 280 281#define __mtx_unlock_flags(m, opts, file, line) do { \ 282 MPASS(curthread != NULL); \ 283 mtx_assert((m), MA_OWNED); \ 284 WITNESS_UNLOCK(&(m)->mtx_object, (opts) | LOP_EXCLUSIVE, \ 285 (file), (line)); \ 286 LOCK_LOG_LOCK("UNLOCK", &(m)->mtx_object, (opts), \ 287 (m)->mtx_recurse, (file), (line)); \ 288 _rel_sleep_lock((m), curthread, (opts), (file), (line)); \ 289} while (0) 290 291#define __mtx_unlock_spin_flags(m, opts, file, line) do { \ 292 MPASS(curthread != NULL); \ 293 mtx_assert((m), MA_OWNED); \ 294 WITNESS_UNLOCK(&(m)->mtx_object, (opts) | LOP_EXCLUSIVE, \ 295 (file), (line)); \ 296 LOCK_LOG_LOCK("UNLOCK", &(m)->mtx_object, (opts), \ 297 (m)->mtx_recurse, (file), (line)); \ 298 _rel_spin_lock((m)); \ 299} while (0) 300 301#define mtx_trylock_flags(m, opts) \ 302 _mtx_trylock((m), (opts), __FILE__, __LINE__) 303 304#define mtx_initialized(m) ((m)->mtx_object.lo_flags & LO_INITIALIZED) 305 306#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)curthread) 307 308#define mtx_recursed(m) ((m)->mtx_recurse != 0) 309 310/* 311 * Global locks. 312 */ 313extern struct mtx sched_lock; 314extern struct mtx Giant; 315 316/* 317 * Giant lock manipulation and clean exit macros. 318 * Used to replace return with an exit Giant and return. 319 * 320 * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() 321 */ 322#define DROP_GIANT_NOSWITCH() \ 323do { \ 324 int _giantcnt; \ 325 WITNESS_SAVE_DECL(Giant); \ 326 \ 327 if (mtx_owned(&Giant)) \ 328 WITNESS_SAVE(&Giant.mtx_object, Giant); \ 329 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ 330 mtx_unlock_flags(&Giant, MTX_NOSWITCH) 331 332#define DROP_GIANT() \ 333do { \ 334 int _giantcnt; \ 335 WITNESS_SAVE_DECL(Giant); \ 336 \ 337 if (mtx_owned(&Giant)) \ 338 WITNESS_SAVE(&Giant.mtx_object, Giant); \ 339 for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ 340 mtx_unlock(&Giant) 341 342#define PICKUP_GIANT() \ 343 mtx_assert(&Giant, MA_NOTOWNED); \ 344 while (_giantcnt--) \ 345 mtx_lock(&Giant); \ 346 if (mtx_owned(&Giant)) \ 347 WITNESS_RESTORE(&Giant.mtx_object, Giant); \ 348} while (0) 349 350#define PARTIAL_PICKUP_GIANT() \ 351 mtx_assert(&Giant, MA_NOTOWNED); \ 352 while (_giantcnt--) \ 353 mtx_lock(&Giant); \ 354 if (mtx_owned(&Giant)) \ 355 WITNESS_RESTORE(&Giant.mtx_object, Giant) 356 357#define UGAR(rval) do { \ 358 int _val = (rval); \ 359 mtx_unlock(&Giant); \ 360 return (_val); \ 361} while (0) 362 363/* 364 * The INVARIANTS-enabled mtx_assert() functionality. 365 * 366 * The constants need to be defined for INVARIANT_SUPPORT infrastructure 367 * support as _mtx_assert() itself uses them and the latter implies that 368 * _mtx_assert() must build. 369 */ 370#ifdef INVARIANT_SUPPORT 371#define MA_OWNED 0x01 372#define MA_NOTOWNED 0x02 373#define MA_RECURSED 0x04 374#define MA_NOTRECURSED 0x08 375#endif /* INVARIANT_SUPPORT */ 376 377#ifdef INVARIANTS 378#define mtx_assert(m, what) \ 379 _mtx_assert((m), (what), __FILE__, __LINE__) 380 381#define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED) 382 383#else /* INVARIANTS */ 384#define mtx_assert(m, what) 385#define GIANT_REQUIRED 386#endif /* INVARIANTS */ 387 388#endif /* _KERNEL */ 389#endif /* !LOCORE */ 390#endif /* _SYS_MUTEX_H_ */ 391