Deleted Added
full compact
mutex.h (315394) mutex.h (327413)
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.

--- 12 unchanged lines hidden (view full) ---

21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
1/*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.

--- 12 unchanged lines hidden (view full) ---

21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex.h,v 2.7.2.35 2000/04/27 03:10:26 cp Exp $
29 * $FreeBSD: stable/11/sys/sys/mutex.h 315394 2017-03-16 08:29:09Z mjg $
29 * $FreeBSD: stable/11/sys/sys/mutex.h 327413 2017-12-31 05:06:35Z mjg $
30 */
31
32#ifndef _SYS_MUTEX_H_
33#define _SYS_MUTEX_H_
34
35#include <sys/queue.h>
36#include <sys/_lock.h>
37#include <sys/_mutex.h>

--- 22 unchanged lines hidden (view full) ---

60 */
61#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */
62#define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */
63
64/*
65 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
66 * with the exception of MTX_UNOWNED, applies to spin locks.
67 */
30 */
31
32#ifndef _SYS_MUTEX_H_
33#define _SYS_MUTEX_H_
34
35#include <sys/queue.h>
36#include <sys/_lock.h>
37#include <sys/_mutex.h>

--- 22 unchanged lines hidden (view full) ---

60 */
61#define MTX_QUIET LOP_QUIET /* Don't log a mutex event */
62#define MTX_DUPOK LOP_DUPOK /* Don't log a duplicate acquire */
63
64/*
65 * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this,
66 * with the exception of MTX_UNOWNED, applies to spin locks.
67 */
68#define MTX_UNOWNED 0x00000000 /* Cookie for free mutex */
68#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */
69#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */
69#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */
70#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */
70#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */
71#define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_UNOWNED)
71#define MTX_DESTROYED 0x00000004 /* lock destroyed */
72#define MTX_FLAGMASK (MTX_RECURSED | MTX_CONTESTED | MTX_DESTROYED)
72
73/*
73
74/*
74 * Value stored in mutex->mtx_lock to denote a destroyed mutex.
75 */
76#define MTX_DESTROYED (MTX_CONTESTED | MTX_UNOWNED)
77
78/*
79 * Prototypes
80 *
81 * NOTE: Functions prepended with `_' (underscore) are exported to other parts
82 * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE
83 * and LOCK_LINE or for hiding the lock cookie crunching to the
84 * consumers. These functions should not be called directly by any
85 * code using the API. Their macros cover their functionality.
86 * Functions with a `_' suffix are the entrypoint for the common
87 * KPI covering both compat shims and fast path case. These can be
88 * used by consumers willing to pass options, file and line
89 * informations, in an option-independent way.
90 *
91 * [See below for descriptions]
92 *
93 */
94void _mtx_init(volatile uintptr_t *c, const char *name, const char *type,
95 int opts);
96void _mtx_destroy(volatile uintptr_t *c);
97void mtx_sysinit(void *arg);
75 * Prototypes
76 *
77 * NOTE: Functions prepended with `_' (underscore) are exported to other parts
78 * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE
79 * and LOCK_LINE or for hiding the lock cookie crunching to the
80 * consumers. These functions should not be called directly by any
81 * code using the API. Their macros cover their functionality.
82 * Functions with a `_' suffix are the entrypoint for the common
83 * KPI covering both compat shims and fast path case. These can be
84 * used by consumers willing to pass options, file and line
85 * informations, in an option-independent way.
86 *
87 * [See below for descriptions]
88 *
89 */
90void _mtx_init(volatile uintptr_t *c, const char *name, const char *type,
91 int opts);
92void _mtx_destroy(volatile uintptr_t *c);
93void mtx_sysinit(void *arg);
94int _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF);
98int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file,
99 int line);
100void mutex_init(void);
101#if LOCK_DEBUG > 0
95int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file,
96 int line);
97void mutex_init(void);
98#if LOCK_DEBUG > 0
102void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
103 int opts, const char *file, int line);
104void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file,
105 int line);
99void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
100 const char *file, int line);
101void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts,
102 const char *file, int line);
106#else
103#else
107void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid);
108void __mtx_unlock_sleep(volatile uintptr_t *c);
104void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v);
105void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v);
109#endif
110
111#ifdef SMP
106#endif
107
108#ifdef SMP
112void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid,
113 int opts, const char *file, int line);
109#if LOCK_DEBUG > 0
110void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, int opts,
111 const char *file, int line);
112#else
113void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v);
114#endif
114#endif
115#endif
115void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file,
116 int line);
117void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file,
118 int line);
119void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
120 int line);
121int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts,
122 const char *file, int line);
123void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts,
124 const char *file, int line);
125#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
126void __mtx_assert(const volatile uintptr_t *c, int what, const char *file,
127 int line);
128#endif
129void thread_lock_flags_(struct thread *, int, const char *, int);
116void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file,
117 int line);
118void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file,
119 int line);
120void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file,
121 int line);
122int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts,
123 const char *file, int line);
124void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts,
125 const char *file, int line);
126#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
127void __mtx_assert(const volatile uintptr_t *c, int what, const char *file,
128 int line);
129#endif
130void thread_lock_flags_(struct thread *, int, const char *, int);
131#if LOCK_DEBUG > 0
132void _thread_lock(struct thread *td, int opts, const char *file, int line);
133#else
134void _thread_lock(struct thread *);
135#endif
130
136
137#if defined(LOCK_PROFILING) || defined(KLD_MODULE)
131#define thread_lock(tdp) \
132 thread_lock_flags_((tdp), 0, __FILE__, __LINE__)
138#define thread_lock(tdp) \
139 thread_lock_flags_((tdp), 0, __FILE__, __LINE__)
140#elif LOCK_DEBUG > 0
141#define thread_lock(tdp) \
142 _thread_lock((tdp), 0, __FILE__, __LINE__)
143#else
144#define thread_lock(tdp) \
145 _thread_lock((tdp))
146#endif
147
148#if LOCK_DEBUG > 0
133#define thread_lock_flags(tdp, opt) \
134 thread_lock_flags_((tdp), (opt), __FILE__, __LINE__)
149#define thread_lock_flags(tdp, opt) \
150 thread_lock_flags_((tdp), (opt), __FILE__, __LINE__)
151#else
152#define thread_lock_flags(tdp, opt) \
153 _thread_lock(tdp)
154#endif
155
135#define thread_unlock(tdp) \
136 mtx_unlock_spin((tdp)->td_lock)
137
138/*
139 * Top-level macros to provide lock cookie once the actual mtx is passed.
140 * They will also prevent passing a malformed object to the mtx KPI by
141 * failing compilation as the mtx_lock reserved member will not be found.
142 */
143#define mtx_init(m, n, t, o) \
144 _mtx_init(&(m)->mtx_lock, n, t, o)
145#define mtx_destroy(m) \
146 _mtx_destroy(&(m)->mtx_lock)
147#define mtx_trylock_flags_(m, o, f, l) \
148 _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l)
149#if LOCK_DEBUG > 0
156#define thread_unlock(tdp) \
157 mtx_unlock_spin((tdp)->td_lock)
158
159/*
160 * Top-level macros to provide lock cookie once the actual mtx is passed.
161 * They will also prevent passing a malformed object to the mtx KPI by
162 * failing compilation as the mtx_lock reserved member will not be found.
163 */
164#define mtx_init(m, n, t, o) \
165 _mtx_init(&(m)->mtx_lock, n, t, o)
166#define mtx_destroy(m) \
167 _mtx_destroy(&(m)->mtx_lock)
168#define mtx_trylock_flags_(m, o, f, l) \
169 _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l)
170#if LOCK_DEBUG > 0
150#define _mtx_lock_sleep(m, v, t, o, f, l) \
151 __mtx_lock_sleep(&(m)->mtx_lock, v, t, o, f, l)
152#define _mtx_unlock_sleep(m, o, f, l) \
153 __mtx_unlock_sleep(&(m)->mtx_lock, o, f, l)
171#define _mtx_lock_sleep(m, v, o, f, l) \
172 __mtx_lock_sleep(&(m)->mtx_lock, v, o, f, l)
173#define _mtx_unlock_sleep(m, v, o, f, l) \
174 __mtx_unlock_sleep(&(m)->mtx_lock, v, o, f, l)
154#else
175#else
155#define _mtx_lock_sleep(m, v, t, o, f, l) \
156 __mtx_lock_sleep(&(m)->mtx_lock, v, t)
157#define _mtx_unlock_sleep(m, o, f, l) \
158 __mtx_unlock_sleep(&(m)->mtx_lock)
176#define _mtx_lock_sleep(m, v, o, f, l) \
177 __mtx_lock_sleep(&(m)->mtx_lock, v)
178#define _mtx_unlock_sleep(m, v, o, f, l) \
179 __mtx_unlock_sleep(&(m)->mtx_lock, v)
159#endif
160#ifdef SMP
180#endif
181#ifdef SMP
161#define _mtx_lock_spin(m, v, t, o, f, l) \
162 _mtx_lock_spin_cookie(&(m)->mtx_lock, v, t, o, f, l)
182#if LOCK_DEBUG > 0
183#define _mtx_lock_spin(m, v, o, f, l) \
184 _mtx_lock_spin_cookie(&(m)->mtx_lock, v, o, f, l)
185#else
186#define _mtx_lock_spin(m, v, o, f, l) \
187 _mtx_lock_spin_cookie(&(m)->mtx_lock, v)
163#endif
188#endif
189#endif
164#define _mtx_lock_flags(m, o, f, l) \
165 __mtx_lock_flags(&(m)->mtx_lock, o, f, l)
166#define _mtx_unlock_flags(m, o, f, l) \
167 __mtx_unlock_flags(&(m)->mtx_lock, o, f, l)
168#define _mtx_lock_spin_flags(m, o, f, l) \
169 __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l)
170#define _mtx_trylock_spin_flags(m, o, f, l) \
171 __mtx_trylock_spin_flags(&(m)->mtx_lock, o, f, l)

--- 18 unchanged lines hidden (view full) ---

190/* Try to release mtx_lock if it is unrecursed and uncontested. */
191#define _mtx_release_lock(mp, tid) \
192 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED)
193
194/* Release mtx_lock quickly, assuming we own it. */
195#define _mtx_release_lock_quick(mp) \
196 atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED)
197
190#define _mtx_lock_flags(m, o, f, l) \
191 __mtx_lock_flags(&(m)->mtx_lock, o, f, l)
192#define _mtx_unlock_flags(m, o, f, l) \
193 __mtx_unlock_flags(&(m)->mtx_lock, o, f, l)
194#define _mtx_lock_spin_flags(m, o, f, l) \
195 __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l)
196#define _mtx_trylock_spin_flags(m, o, f, l) \
197 __mtx_trylock_spin_flags(&(m)->mtx_lock, o, f, l)

--- 18 unchanged lines hidden (view full) ---

216/* Try to release mtx_lock if it is unrecursed and uncontested. */
217#define _mtx_release_lock(mp, tid) \
218 atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED)
219
220/* Release mtx_lock quickly, assuming we own it. */
221#define _mtx_release_lock_quick(mp) \
222 atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED)
223
224#define _mtx_release_lock_fetch(mp, vp) \
225 atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, (vp), MTX_UNOWNED)
226
198/*
199 * Full lock operations that are suitable to be inlined in non-debug
200 * kernels. If the lock cannot be acquired or released trivially then
201 * the work is deferred to another function.
202 */
203
204/* Lock a normal mutex. */
205#define __mtx_lock(mp, tid, opts, file, line) do { \
206 uintptr_t _tid = (uintptr_t)(tid); \
207 uintptr_t _v = MTX_UNOWNED; \
208 \
209 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\
210 !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \
227/*
228 * Full lock operations that are suitable to be inlined in non-debug
229 * kernels. If the lock cannot be acquired or released trivially then
230 * the work is deferred to another function.
231 */
232
233/* Lock a normal mutex. */
234#define __mtx_lock(mp, tid, opts, file, line) do { \
235 uintptr_t _tid = (uintptr_t)(tid); \
236 uintptr_t _v = MTX_UNOWNED; \
237 \
238 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\
239 !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \
211 _mtx_lock_sleep((mp), _v, _tid, (opts), (file), (line));\
240 _mtx_lock_sleep((mp), _v, (opts), (file), (line)); \
212} while (0)
213
214/*
215 * Lock a spin mutex. For spinlocks, we handle recursion inline (it
216 * turns out that function calls can be significantly expensive on
217 * some architectures). Since spin locks are not _too_ common,
218 * inlining this code is not too big a deal.
219 */
220#ifdef SMP
221#define __mtx_lock_spin(mp, tid, opts, file, line) do { \
222 uintptr_t _tid = (uintptr_t)(tid); \
223 uintptr_t _v = MTX_UNOWNED; \
224 \
225 spinlock_enter(); \
241} while (0)
242
243/*
244 * Lock a spin mutex. For spinlocks, we handle recursion inline (it
245 * turns out that function calls can be significantly expensive on
246 * some architectures). Since spin locks are not _too_ common,
247 * inlining this code is not too big a deal.
248 */
249#ifdef SMP
250#define __mtx_lock_spin(mp, tid, opts, file, line) do { \
251 uintptr_t _tid = (uintptr_t)(tid); \
252 uintptr_t _v = MTX_UNOWNED; \
253 \
254 spinlock_enter(); \
226 if (!_mtx_obtain_lock_fetch((mp), &_v, _tid)) \
227 _mtx_lock_spin((mp), _v, _tid, (opts), (file), (line)); \
228 else \
229 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \
230 mp, 0, 0, file, line); \
255 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \
256 !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \
257 _mtx_lock_spin((mp), _v, (opts), (file), (line)); \
231} while (0)
232#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \
233 uintptr_t _tid = (uintptr_t)(tid); \
234 int _ret; \
235 \
236 spinlock_enter(); \
237 if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\
238 spinlock_exit(); \

--- 30 unchanged lines hidden (view full) ---

269 _ret = 1; \
270 } \
271 _ret; \
272})
273#endif /* SMP */
274
275/* Unlock a normal mutex. */
276#define __mtx_unlock(mp, tid, opts, file, line) do { \
258} while (0)
259#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \
260 uintptr_t _tid = (uintptr_t)(tid); \
261 int _ret; \
262 \
263 spinlock_enter(); \
264 if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\
265 spinlock_exit(); \

--- 30 unchanged lines hidden (view full) ---

296 _ret = 1; \
297 } \
298 _ret; \
299})
300#endif /* SMP */
301
302/* Unlock a normal mutex. */
303#define __mtx_unlock(mp, tid, opts, file, line) do { \
277 uintptr_t _tid = (uintptr_t)(tid); \
304 uintptr_t _v = (uintptr_t)(tid); \
278 \
279 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\
305 \
306 if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\
280 !_mtx_release_lock((mp), _tid))) \
281 _mtx_unlock_sleep((mp), (opts), (file), (line)); \
307 !_mtx_release_lock_fetch((mp), &_v))) \
308 _mtx_unlock_sleep((mp), _v, (opts), (file), (line)); \
282} while (0)
283
284/*
285 * Unlock a spin mutex. For spinlocks, we can handle everything
286 * inline, as it's pretty simple and a function call would be too
287 * expensive (at least on some architectures). Since spin locks are
288 * not _too_ common, inlining this code is not too big a deal.
289 *

--- 236 unchanged lines hidden ---
309} while (0)
310
311/*
312 * Unlock a spin mutex. For spinlocks, we can handle everything
313 * inline, as it's pretty simple and a function call would be too
314 * expensive (at least on some architectures). Since spin locks are
315 * not _too_ common, inlining this code is not too big a deal.
316 *

--- 236 unchanged lines hidden ---