Deleted Added
full compact
kern_mutex.c (165265) kern_mutex.c (167012)
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.

--- 20 unchanged lines hidden (view full) ---

29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32/*
33 * Machine independent bits of mutex implementation.
34 */
35
36#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.

--- 20 unchanged lines hidden (view full) ---

29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 */
31
32/*
33 * Machine independent bits of mutex implementation.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 165265 2006-12-16 02:37:58Z kmacy $");
37__FBSDID("$FreeBSD: head/sys/kern/kern_mutex.c 167012 2007-02-26 08:26:44Z kmacy $");
38
39#include "opt_adaptive_mutexes.h"
40#include "opt_ddb.h"
41#include "opt_global.h"
42#include "opt_mutex_wake_all.h"
43#include "opt_sched.h"
44
45#include <sys/param.h>

--- 86 unchanged lines hidden (view full) ---

132
133/*
134 * Function versions of the inlined __mtx_* macros. These are used by
135 * modules and can also be called from assembly language if needed.
136 */
137void
138_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
139{
38
39#include "opt_adaptive_mutexes.h"
40#include "opt_ddb.h"
41#include "opt_global.h"
42#include "opt_mutex_wake_all.h"
43#include "opt_sched.h"
44
45#include <sys/param.h>

--- 86 unchanged lines hidden (view full) ---

132
133/*
134 * Function versions of the inlined __mtx_* macros. These are used by
135 * modules and can also be called from assembly language if needed.
136 */
137void
138_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line)
139{
140 uint64_t waittime;
141
142 MPASS(curthread != NULL);
143 KASSERT(m->mtx_lock != MTX_DESTROYED,
144 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
145 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
146 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
147 file, line));
148 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
149 file, line);
150
140
141 MPASS(curthread != NULL);
142 KASSERT(m->mtx_lock != MTX_DESTROYED,
143 ("mtx_lock() of destroyed mutex @ %s:%d", file, line));
144 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
145 ("mtx_lock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
146 file, line));
147 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
148 file, line);
149
151 lock_profile_waitstart(&waittime);
152 _get_sleep_lock(m, curthread, opts, file, line);
153 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
154 line);
155 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
156 curthread->td_locks++;
150 _get_sleep_lock(m, curthread, opts, file, line);
151 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
152 line);
153 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
154 curthread->td_locks++;
157 lock_profile_obtain_lock_success(&m->mtx_object, waittime, file, line);
158}
159
160void
161_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
162{
163
164 MPASS(curthread != NULL);
165 KASSERT(m->mtx_lock != MTX_DESTROYED,

--- 10 unchanged lines hidden (view full) ---

176 lock_profile_release_lock(&m->mtx_object);
177 _rel_sleep_lock(m, curthread, opts, file, line);
178}
179
180void
181_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
182{
183
155}
156
157void
158_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line)
159{
160
161 MPASS(curthread != NULL);
162 KASSERT(m->mtx_lock != MTX_DESTROYED,

--- 10 unchanged lines hidden (view full) ---

173 lock_profile_release_lock(&m->mtx_object);
174 _rel_sleep_lock(m, curthread, opts, file, line);
175}
176
177void
178_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line)
179{
180
184 uint64_t waittime;
185
186 MPASS(curthread != NULL);
187 KASSERT(m->mtx_lock != MTX_DESTROYED,
188 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
189 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
190 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
191 m->mtx_object.lo_name, file, line));
192 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
193 file, line);
181 MPASS(curthread != NULL);
182 KASSERT(m->mtx_lock != MTX_DESTROYED,
183 ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line));
184 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_spin,
185 ("mtx_lock_spin() of sleep mutex %s @ %s:%d",
186 m->mtx_object.lo_name, file, line));
187 WITNESS_CHECKORDER(&m->mtx_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
188 file, line);
194 lock_profile_waitstart(&waittime);
195 _get_spin_lock(m, curthread, opts, file, line);
196 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
197 line);
198 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
189 _get_spin_lock(m, curthread, opts, file, line);
190 LOCK_LOG_LOCK("LOCK", &m->mtx_object, opts, m->mtx_recurse, file,
191 line);
192 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE, file, line);
199 lock_profile_obtain_lock_success(&m->mtx_object, waittime, file, line);
200}
201
202void
203_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
204{
205
206 MPASS(curthread != NULL);
207 KASSERT(m->mtx_lock != MTX_DESTROYED,

--- 12 unchanged lines hidden (view full) ---

220/*
221 * The important part of mtx_trylock{,_flags}()
222 * Tries to acquire lock `m.' If this function is called on a mutex that
223 * is already owned, it will recursively acquire the lock.
224 */
225int
226_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
227{
193}
194
195void
196_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line)
197{
198
199 MPASS(curthread != NULL);
200 KASSERT(m->mtx_lock != MTX_DESTROYED,

--- 12 unchanged lines hidden (view full) ---

213/*
214 * The important part of mtx_trylock{,_flags}()
215 * Tries to acquire lock `m.' If this function is called on a mutex that
216 * is already owned, it will recursively acquire the lock.
217 */
218int
219_mtx_trylock(struct mtx *m, int opts, const char *file, int line)
220{
228 int rval;
221 int rval, contested = 0;
229 uint64_t waittime = 0;
222 uint64_t waittime = 0;
230
223
231 MPASS(curthread != NULL);
232 KASSERT(m->mtx_lock != MTX_DESTROYED,
233 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
234 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
235 ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
236 file, line));
237
238 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
239 m->mtx_recurse++;
240 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
241 rval = 1;
242 } else
243 rval = _obtain_lock(m, (uintptr_t)curthread);
244
245 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
246 if (rval) {
247 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
248 file, line);
249 curthread->td_locks++;
224 MPASS(curthread != NULL);
225 KASSERT(m->mtx_lock != MTX_DESTROYED,
226 ("mtx_trylock() of destroyed mutex @ %s:%d", file, line));
227 KASSERT(LOCK_CLASS(&m->mtx_object) == &lock_class_mtx_sleep,
228 ("mtx_trylock() of spin mutex %s @ %s:%d", m->mtx_object.lo_name,
229 file, line));
230
231 if (mtx_owned(m) && (m->mtx_object.lo_flags & LO_RECURSABLE) != 0) {
232 m->mtx_recurse++;
233 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
234 rval = 1;
235 } else
236 rval = _obtain_lock(m, (uintptr_t)curthread);
237
238 LOCK_LOG_TRY("LOCK", &m->mtx_object, opts, rval, file, line);
239 if (rval) {
240 WITNESS_LOCK(&m->mtx_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
241 file, line);
242 curthread->td_locks++;
250 lock_profile_obtain_lock_success(&m->mtx_object, waittime, file, line);
243 if (m->mtx_recurse == 0)
244 lock_profile_obtain_lock_success(&m->mtx_object, contested,
245 waittime, file, line);
251
252 }
253
254 return (rval);
255}
256
257/*
258 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.

--- 7 unchanged lines hidden (view full) ---

266{
267#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
268 volatile struct thread *owner;
269#endif
270#ifdef KTR
271 int cont_logged = 0;
272#endif
273 uintptr_t v;
246
247 }
248
249 return (rval);
250}
251
252/*
253 * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock.

--- 7 unchanged lines hidden (view full) ---

261{
262#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES)
263 volatile struct thread *owner;
264#endif
265#ifdef KTR
266 int cont_logged = 0;
267#endif
268 uintptr_t v;
274 int contested = 0;
275
269
276 if (mtx_owned(m)) {
277 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
278 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
279 m->mtx_object.lo_name, file, line));
280 m->mtx_recurse++;
281 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
282 if (LOCK_LOG_TEST(&m->mtx_object, opts))
283 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
284 return;
285 }
286
287 if (LOCK_LOG_TEST(&m->mtx_object, opts))
288 CTR4(KTR_LOCK,
289 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
290 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
291
270 if (mtx_owned(m)) {
271 KASSERT((m->mtx_object.lo_flags & LO_RECURSABLE) != 0,
272 ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
273 m->mtx_object.lo_name, file, line));
274 m->mtx_recurse++;
275 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
276 if (LOCK_LOG_TEST(&m->mtx_object, opts))
277 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
278 return;
279 }
280
281 if (LOCK_LOG_TEST(&m->mtx_object, opts))
282 CTR4(KTR_LOCK,
283 "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d",
284 m->mtx_object.lo_name, (void *)m->mtx_lock, file, line);
285
292 while (!_obtain_lock(m, tid)) {
293 lock_profile_obtain_lock_failed(&m->mtx_object, &contested);
286 while (!_obtain_lock(m, tid)) {
294 turnstile_lock(&m->mtx_object);
295 v = m->mtx_lock;
296
297 /*
298 * Check if the lock has been released while spinning for
299 * the turnstile chain lock.
300 */
301 if (v == MTX_UNOWNED) {

--- 74 unchanged lines hidden (view full) ---

376 }
377#ifdef KTR
378 if (cont_logged) {
379 CTR4(KTR_CONTENTION,
380 "contention end: %s acquired by %p at %s:%d",
381 m->mtx_object.lo_name, (void *)tid, file, line);
382 }
383#endif
287 turnstile_lock(&m->mtx_object);
288 v = m->mtx_lock;
289
290 /*
291 * Check if the lock has been released while spinning for
292 * the turnstile chain lock.
293 */
294 if (v == MTX_UNOWNED) {

--- 74 unchanged lines hidden (view full) ---

369 }
370#ifdef KTR
371 if (cont_logged) {
372 CTR4(KTR_CONTENTION,
373 "contention end: %s acquired by %p at %s:%d",
374 m->mtx_object.lo_name, (void *)tid, file, line);
375 }
376#endif
384#ifdef LOCK_PROFILING
385 m->mtx_object.lo_profile_obj.lpo_contest_holding = 0;
386 if (contested)
387 m->mtx_object.lo_profile_obj.lpo_contest_locking++;
388#endif
389 return;
390}
391
392#ifdef SMP
393/*
394 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
395 *
396 * This is only called if we need to actually spin for the lock. Recursion
397 * is handled inline.
398 */
399void
400_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
401 int line)
402{
377 return;
378}
379
380#ifdef SMP
381/*
382 * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock.
383 *
384 * This is only called if we need to actually spin for the lock. Recursion
385 * is handled inline.
386 */
387void
388_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file,
389 int line)
390{
391 int i = 0;
403 struct thread *td;
392 struct thread *td;
404 int contested = 0, i = 0;
405
406 if (LOCK_LOG_TEST(&m->mtx_object, opts))
407 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
408
409 while (!_obtain_lock(m, tid)) {
393
394 if (LOCK_LOG_TEST(&m->mtx_object, opts))
395 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
396
397 while (!_obtain_lock(m, tid)) {
410 lock_profile_obtain_lock_failed(&m->mtx_object, &contested);
411
412 /* Give interrupts a chance while we spin. */
413 spinlock_exit();
414 while (m->mtx_lock != MTX_UNOWNED) {
415 if (i++ < 10000000) {
416 cpu_spinwait();
417 continue;
418 }

--- 352 unchanged lines hidden ---
398
399 /* Give interrupts a chance while we spin. */
400 spinlock_exit();
401 while (m->mtx_lock != MTX_UNOWNED) {
402 if (i++ < 10000000) {
403 cpu_spinwait();
404 continue;
405 }

--- 352 unchanged lines hidden ---