Deleted Added
full compact
subr_witness.c (72344) subr_witness.c (72376)
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.

--- 13 unchanged lines hidden (view full) ---

22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
1/*-
2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.

--- 13 unchanged lines hidden (view full) ---

22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
30 * $FreeBSD: head/sys/kern/subr_witness.c 72344 2001-02-11 02:54:16Z bmilekic $
30 * $FreeBSD: head/sys/kern/subr_witness.c 72376 2001-02-12 00:20:08Z jake $
31 */
32
33/*
34 * Machine independent bits of mutex implementation and implementation of
35 * `witness' structure & related debugging routines.
36 */
37
38/*

--- 62 unchanged lines hidden (view full) ---

101 * Internal utility macros.
102 */
103#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
104
105#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
106 : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
107
108#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
31 */
32
33/*
34 * Machine independent bits of mutex implementation and implementation of
35 * `witness' structure & related debugging routines.
36 */
37
38/*

--- 62 unchanged lines hidden (view full) ---

101 * Internal utility macros.
102 */
103#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED)
104
105#define mtx_owner(m) (mtx_unowned((m)) ? NULL \
106 : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK))
107
108#define RETIP(x) *(((uintptr_t *)(&x)) - 1)
109#define SET_PRIO(p, pri) (p)->p_priority = (pri)
109#define SET_PRIO(p, pri) (p)->p_pri.pri_level = (pri)
110
111/*
112 * Early WITNESS-enabled declarations.
113 */
114#ifdef WITNESS
115
116/*
117 * Internal WITNESS routines which must be prototyped early.

--- 57 unchanged lines hidden (view full) ---

175 *
176 * NOTE: Prototypes for witness routines are placed at the bottom of the file.
177 */
178static void propagate_priority(struct proc *);
179
180static void
181propagate_priority(struct proc *p)
182{
110
111/*
112 * Early WITNESS-enabled declarations.
113 */
114#ifdef WITNESS
115
116/*
117 * Internal WITNESS routines which must be prototyped early.

--- 57 unchanged lines hidden (view full) ---

175 *
176 * NOTE: Prototypes for witness routines are placed at the bottom of the file.
177 */
178static void propagate_priority(struct proc *);
179
180static void
181propagate_priority(struct proc *p)
182{
183 int pri = p->p_priority;
183 int pri = p->p_pri.pri_level;
184 struct mtx *m = p->p_blocked;
185
186 mtx_assert(&sched_lock, MA_OWNED);
187 for (;;) {
188 struct proc *p1;
189
190 p = mtx_owner(m);
191

--- 4 unchanged lines hidden (view full) ---

196 * next acquires the mutex.
197 */
198 MPASS(m->mtx_lock == MTX_CONTESTED);
199 return;
200 }
201
202 MPASS(p->p_magic == P_MAGIC);
203 KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex"));
184 struct mtx *m = p->p_blocked;
185
186 mtx_assert(&sched_lock, MA_OWNED);
187 for (;;) {
188 struct proc *p1;
189
190 p = mtx_owner(m);
191

--- 4 unchanged lines hidden (view full) ---

196 * next acquires the mutex.
197 */
198 MPASS(m->mtx_lock == MTX_CONTESTED);
199 return;
200 }
201
202 MPASS(p->p_magic == P_MAGIC);
203 KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex"));
204 if (p->p_priority <= pri)
204 if (p->p_pri.pri_level <= pri)
205 return;
206
207 /*
208 * Bump this process' priority.
209 */
210 SET_PRIO(p, pri);
211
212 /*
213 * If lock holder is actually running, just bump priority.
214 */
205 return;
206
207 /*
208 * Bump this process' priority.
209 */
210 SET_PRIO(p, pri);
211
212 /*
213 * If lock holder is actually running, just bump priority.
214 */
215#ifdef SMP
216 /*
217 * For SMP, we can check the p_oncpu field to see if we are
218 * running.
219 */
220 if (p->p_oncpu != 0xff) {
221 MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
222 return;
223 }
215 if (p->p_oncpu != 0xff) {
216 MPASS(p->p_stat == SRUN || p->p_stat == SZOMB);
217 return;
218 }
224#else
219
225 /*
220 /*
226 * For UP, we check to see if p is curproc (this shouldn't
227 * ever happen however as it would mean we are in a deadlock.)
228 */
229 if (p == curproc) {
230 panic("Deadlock detected");
231 return;
232 }
233#endif
234 /*
235 * If on run queue move to new run queue, and
236 * quit.
237 */
238 if (p->p_stat == SRUN) {
221 * If on run queue move to new run queue, and
222 * quit.
223 */
224 if (p->p_stat == SRUN) {
239 printf("XXX: moving proc %d(%s) to a new run queue\n",
240 p->p_pid, p->p_comm);
241 MPASS(p->p_blocked == NULL);
242 remrunqueue(p);
243 setrunqueue(p);
244 return;
245 }
246
247 /*
248 * If we aren't blocked on a mutex, we should be.

--- 4 unchanged lines hidden (view full) ---

253 m->mtx_description));
254
255 /*
256 * Pick up the mutex that p is blocked on.
257 */
258 m = p->p_blocked;
259 MPASS(m != NULL);
260
225 MPASS(p->p_blocked == NULL);
226 remrunqueue(p);
227 setrunqueue(p);
228 return;
229 }
230
231 /*
232 * If we aren't blocked on a mutex, we should be.

--- 4 unchanged lines hidden (view full) ---

237 m->mtx_description));
238
239 /*
240 * Pick up the mutex that p is blocked on.
241 */
242 m = p->p_blocked;
243 MPASS(m != NULL);
244
261 printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid,
262 p->p_comm, m->mtx_description);
263
264 /*
265 * Check if the proc needs to be moved up on
266 * the blocked chain
267 */
268 if (p == TAILQ_FIRST(&m->mtx_blocked)) {
245 /*
246 * Check if the proc needs to be moved up on
247 * the blocked chain
248 */
249 if (p == TAILQ_FIRST(&m->mtx_blocked)) {
269 printf("XXX: process at head of run queue\n");
270 continue;
271 }
272
250 continue;
251 }
252
273 p1 = TAILQ_PREV(p, rq, p_procq);
274 if (p1->p_priority <= pri) {
275 printf(
276 "XXX: previous process %d(%s) has higher priority\n",
277 p->p_pid, p->p_comm);
253 p1 = TAILQ_PREV(p, procqueue, p_procq);
254 if (p1->p_pri.pri_level <= pri) {
278 continue;
279 }
280
281 /*
282 * Remove proc from blocked chain and determine where
283 * it should be moved up to. Since we know that p1 has
284 * a lower priority than p, we know that at least one
285 * process in the chain has a lower priority and that
286 * p1 will thus not be NULL after the loop.
287 */
288 TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
289 TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
290 MPASS(p1->p_magic == P_MAGIC);
255 continue;
256 }
257
258 /*
259 * Remove proc from blocked chain and determine where
260 * it should be moved up to. Since we know that p1 has
261 * a lower priority than p, we know that at least one
262 * process in the chain has a lower priority and that
263 * p1 will thus not be NULL after the loop.
264 */
265 TAILQ_REMOVE(&m->mtx_blocked, p, p_procq);
266 TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) {
267 MPASS(p1->p_magic == P_MAGIC);
291 if (p1->p_priority > pri)
268 if (p1->p_pri.pri_level > pri)
292 break;
293 }
294
295 MPASS(p1 != NULL);
296 TAILQ_INSERT_BEFORE(p1, p, p_procq);
297 CTR4(KTR_LOCK,
298 "propagate_priority: p %p moved before %p on [%p] %s",
299 p, p1, m, m->mtx_description);

--- 66 unchanged lines hidden (view full) ---

366 * Save our priority. Even though p_nativepri is protected by
367 * sched_lock, we don't obtain it here as it can be expensive.
368 * Since this is the only place p_nativepri is set, and since two
369 * CPUs will not be executing the same process concurrently, we know
370 * that no other CPU is going to be messing with this. Also,
371 * p_nativepri is only read when we are blocked on a mutex, so that
372 * can't be happening right now either.
373 */
269 break;
270 }
271
272 MPASS(p1 != NULL);
273 TAILQ_INSERT_BEFORE(p1, p, p_procq);
274 CTR4(KTR_LOCK,
275 "propagate_priority: p %p moved before %p on [%p] %s",
276 p, p1, m, m->mtx_description);

--- 66 unchanged lines hidden (view full) ---

343 * Save our priority. Even though p_nativepri is protected by
344 * sched_lock, we don't obtain it here as it can be expensive.
345 * Since this is the only place p_nativepri is set, and since two
346 * CPUs will not be executing the same process concurrently, we know
347 * that no other CPU is going to be messing with this. Also,
348 * p_nativepri is only read when we are blocked on a mutex, so that
349 * can't be happening right now either.
350 */
374 p->p_nativepri = p->p_priority;
351 p->p_pri.pri_native = p->p_pri.pri_level;
375
376 while (!_obtain_lock(m, p)) {
377 uintptr_t v;
378 struct proc *p1;
379
380 mtx_lock_spin(&sched_lock);
381 /*
382 * Check if the lock has been released while spinning for

--- 8 unchanged lines hidden (view full) ---

391 * The mutex was marked contested on release. This means that
392 * there are processes blocked on it.
393 */
394 if (v == MTX_CONTESTED) {
395 p1 = TAILQ_FIRST(&m->mtx_blocked);
396 MPASS(p1 != NULL);
397 m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
398
352
353 while (!_obtain_lock(m, p)) {
354 uintptr_t v;
355 struct proc *p1;
356
357 mtx_lock_spin(&sched_lock);
358 /*
359 * Check if the lock has been released while spinning for

--- 8 unchanged lines hidden (view full) ---

368 * The mutex was marked contested on release. This means that
369 * there are processes blocked on it.
370 */
371 if (v == MTX_CONTESTED) {
372 p1 = TAILQ_FIRST(&m->mtx_blocked);
373 MPASS(p1 != NULL);
374 m->mtx_lock = (uintptr_t)p | MTX_CONTESTED;
375
399 if (p1->p_priority < p->p_priority)
400 SET_PRIO(p, p1->p_priority);
376 if (p1->p_pri.pri_level < p->p_pri.pri_level)
377 SET_PRIO(p, p1->p_pri.pri_level);
401 mtx_unlock_spin(&sched_lock);
402 return;
403 }
404
405 /*
406 * If the mutex isn't already contested and a failure occurs
407 * setting the contested bit, the mutex was either released
408 * or the state of the MTX_RECURSED bit changed.

--- 32 unchanged lines hidden (view full) ---

441 * Put us on the list of threads blocked on this mutex.
442 */
443 if (TAILQ_EMPTY(&m->mtx_blocked)) {
444 p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
445 LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
446 TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
447 } else {
448 TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
378 mtx_unlock_spin(&sched_lock);
379 return;
380 }
381
382 /*
383 * If the mutex isn't already contested and a failure occurs
384 * setting the contested bit, the mutex was either released
385 * or the state of the MTX_RECURSED bit changed.

--- 32 unchanged lines hidden (view full) ---

418 * Put us on the list of threads blocked on this mutex.
419 */
420 if (TAILQ_EMPTY(&m->mtx_blocked)) {
421 p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK);
422 LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested);
423 TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
424 } else {
425 TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq)
449 if (p1->p_priority > p->p_priority)
426 if (p1->p_pri.pri_level > p->p_pri.pri_level)
450 break;
451 if (p1)
452 TAILQ_INSERT_BEFORE(p1, p, p_procq);
453 else
454 TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
455 }
456
457 /*
458 * Save who we're blocked on.
459 */
460 p->p_blocked = m;
461 p->p_mtxname = m->mtx_description;
462 p->p_stat = SMTX;
427 break;
428 if (p1)
429 TAILQ_INSERT_BEFORE(p1, p, p_procq);
430 else
431 TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq);
432 }
433
434 /*
435 * Save who we're blocked on.
436 */
437 p->p_blocked = m;
438 p->p_mtxname = m->mtx_description;
439 p->p_stat = SMTX;
463#if 0
464 propagate_priority(p);
440 propagate_priority(p);
465#endif
466
467 if ((opts & MTX_QUIET) == 0)
468 CTR3(KTR_LOCK,
469 "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
470 m->mtx_description);
471
472 mi_switch();
473

--- 86 unchanged lines hidden (view full) ---

560 if (TAILQ_EMPTY(&m->mtx_blocked)) {
561 LIST_REMOVE(m, mtx_contested);
562 _release_lock_quick(m);
563 if ((opts & MTX_QUIET) == 0)
564 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
565 } else
566 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
567
441
442 if ((opts & MTX_QUIET) == 0)
443 CTR3(KTR_LOCK,
444 "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m,
445 m->mtx_description);
446
447 mi_switch();
448

--- 86 unchanged lines hidden (view full) ---

535 if (TAILQ_EMPTY(&m->mtx_blocked)) {
536 LIST_REMOVE(m, mtx_contested);
537 _release_lock_quick(m);
538 if ((opts & MTX_QUIET) == 0)
539 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m);
540 } else
541 atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED);
542
568 pri = MAXPRI;
543 pri = PRI_MAX;
569 LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
544 LIST_FOREACH(m1, &p->p_contested, mtx_contested) {
570 int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority;
545 int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_pri.pri_level;
571 if (cp < pri)
572 pri = cp;
573 }
574
546 if (cp < pri)
547 pri = cp;
548 }
549
575 if (pri > p->p_nativepri)
576 pri = p->p_nativepri;
550 if (pri > p->p_pri.pri_native)
551 pri = p->p_pri.pri_native;
577 SET_PRIO(p, pri);
578
579 if ((opts & MTX_QUIET) == 0)
580 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
581 m, p1);
582
583 p1->p_blocked = NULL;
584 p1->p_mtxname = NULL;
585 p1->p_stat = SRUN;
586 setrunqueue(p1);
587
552 SET_PRIO(p, pri);
553
554 if ((opts & MTX_QUIET) == 0)
555 CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p",
556 m, p1);
557
558 p1->p_blocked = NULL;
559 p1->p_mtxname = NULL;
560 p1->p_stat = SRUN;
561 setrunqueue(p1);
562
588 if ((opts & MTX_NOSWITCH) == 0 && p1->p_priority < pri) {
563 if ((opts & MTX_NOSWITCH) == 0 && p1->p_pri.pri_level < pri) {
589#ifdef notyet
590 if (p->p_flag & (P_ITHD | P_SITHD)) {
591 ithd_t *it = (ithd_t *)p;
592
593 if (it->it_interrupted) {
594 if ((opts & MTX_QUIET) == 0)
595 CTR2(KTR_LOCK,
596 "_mtx_unlock_sleep: 0x%x interrupted 0x%x",

--- 1109 unchanged lines hidden ---
564#ifdef notyet
565 if (p->p_flag & (P_ITHD | P_SITHD)) {
566 ithd_t *it = (ithd_t *)p;
567
568 if (it->it_interrupted) {
569 if ((opts & MTX_QUIET) == 0)
570 CTR2(KTR_LOCK,
571 "_mtx_unlock_sleep: 0x%x interrupted 0x%x",

--- 1109 unchanged lines hidden ---