Deleted Added
full compact
subr_witness.c (207922) subr_witness.c (207929)
1/*-
2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 71 unchanged lines hidden (view full) ---

80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute. Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
85 */
86
87#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2008 Isilon Systems, Inc.
3 * Copyright (c) 2008 Ilya Maykov <ivmaykov@gmail.com>
4 * Copyright (c) 1998 Berkeley Software Design, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 71 unchanged lines hidden (view full) ---

80 * you have two threads T1 and T2 and a sleepable lock X. Suppose that T1
81 * acquires X and blocks on Giant. Then suppose that T2 acquires Giant and
82 * blocks on X. When T2 blocks on X, T2 will release Giant allowing T1 to
83 * execute. Thus, acquiring Giant both before and after a sleepable lock
84 * will not result in a lock order reversal.
85 */
86
87#include <sys/cdefs.h>
88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 207922 2010-05-11 17:01:14Z attilio $");
88__FBSDID("$FreeBSD: head/sys/kern/subr_witness.c 207929 2010-05-11 18:24:22Z attilio $");
89
90#include "opt_ddb.h"
91#include "opt_hwpmc_hooks.h"
92#include "opt_stack.h"
93#include "opt_witness.h"
94
95#include <sys/param.h>
96#include <sys/bus.h>

--- 265 unchanged lines hidden (view full) ---

362static struct lock_list_entry *witness_lock_list_get(void);
363static int witness_lock_order_add(struct witness *parent,
364 struct witness *child);
365static int witness_lock_order_check(struct witness *parent,
366 struct witness *child);
367static struct witness_lock_order_data *witness_lock_order_get(
368 struct witness *parent,
369 struct witness *child);
89
90#include "opt_ddb.h"
91#include "opt_hwpmc_hooks.h"
92#include "opt_stack.h"
93#include "opt_witness.h"
94
95#include <sys/param.h>
96#include <sys/bus.h>

--- 265 unchanged lines hidden (view full) ---

362static struct lock_list_entry *witness_lock_list_get(void);
363static int witness_lock_order_add(struct witness *parent,
364 struct witness *child);
365static int witness_lock_order_check(struct witness *parent,
366 struct witness *child);
367static struct witness_lock_order_data *witness_lock_order_get(
368 struct witness *parent,
369 struct witness *child);
370static void witness_list_lock(struct lock_instance *instance);
370static void witness_list_lock(struct lock_instance *instance,
371 int (*prnt)(const char *fmt, ...));
371static void witness_setflag(struct lock_object *lock, int flag, int set);
372
373#ifdef KDB
374#define witness_debugger(c) _witness_debugger(c, __func__)
375#else
376#define witness_debugger(c)
377#endif
378

--- 1213 unchanged lines hidden (view full) ---

1592 return;
1593 if (lle->ll_count != 0) {
1594 for (n = 0; lle != NULL; lle = lle->ll_next)
1595 for (i = lle->ll_count - 1; i >= 0; i--) {
1596 if (n == 0)
1597 printf("Thread %p exiting with the following locks held:\n",
1598 td);
1599 n++;
372static void witness_setflag(struct lock_object *lock, int flag, int set);
373
374#ifdef KDB
375#define witness_debugger(c) _witness_debugger(c, __func__)
376#else
377#define witness_debugger(c)
378#endif
379

--- 1213 unchanged lines hidden (view full) ---

1593 return;
1594 if (lle->ll_count != 0) {
1595 for (n = 0; lle != NULL; lle = lle->ll_next)
1596 for (i = lle->ll_count - 1; i >= 0; i--) {
1597 if (n == 0)
1598 printf("Thread %p exiting with the following locks held:\n",
1599 td);
1600 n++;
1600 witness_list_lock(&lle->ll_children[i]);
1601 witness_list_lock(&lle->ll_children[i], printf);
1601
1602 }
1603 panic("Thread %p cannot exit while holding sleeplocks\n", td);
1604 }
1605 witness_lock_list_free(lle);
1606}
1607
1608/*

--- 32 unchanged lines hidden (view full) ---

1641 vprintf(fmt, ap);
1642 va_end(ap);
1643 printf(" with the following");
1644 if (flags & WARN_SLEEPOK)
1645 printf(" non-sleepable");
1646 printf(" locks held:\n");
1647 }
1648 n++;
1602
1603 }
1604 panic("Thread %p cannot exit while holding sleeplocks\n", td);
1605 }
1606 witness_lock_list_free(lle);
1607}
1608
1609/*

--- 32 unchanged lines hidden (view full) ---

1642 vprintf(fmt, ap);
1643 va_end(ap);
1644 printf(" with the following");
1645 if (flags & WARN_SLEEPOK)
1646 printf(" non-sleepable");
1647 printf(" locks held:\n");
1648 }
1649 n++;
1649 witness_list_lock(lock1);
1650 witness_list_lock(lock1, printf);
1650 }
1651
1652 /*
1653 * Pin the thread in order to avoid problems with thread migration.
1654 * Once that all verifies are passed about spinlocks ownership,
1655 * the thread is in a safe path and it can be unpinned.
1656 */
1657 sched_pin();

--- 14 unchanged lines hidden (view full) ---

1672
1673 va_start(ap, fmt);
1674 vprintf(fmt, ap);
1675 va_end(ap);
1676 printf(" with the following");
1677 if (flags & WARN_SLEEPOK)
1678 printf(" non-sleepable");
1679 printf(" locks held:\n");
1651 }
1652
1653 /*
1654 * Pin the thread in order to avoid problems with thread migration.
1655 * Once that all verifies are passed about spinlocks ownership,
1656 * the thread is in a safe path and it can be unpinned.
1657 */
1658 sched_pin();

--- 14 unchanged lines hidden (view full) ---

1673
1674 va_start(ap, fmt);
1675 vprintf(fmt, ap);
1676 va_end(ap);
1677 printf(" with the following");
1678 if (flags & WARN_SLEEPOK)
1679 printf(" non-sleepable");
1680 printf(" locks held:\n");
1680 n += witness_list_locks(&lock_list);
1681 n += witness_list_locks(&lock_list, printf);
1681 } else
1682 sched_unpin();
1683 if (flags & WARN_PANIC && n)
1684 panic("%s", __func__);
1685 else
1686 witness_debugger(n);
1687 return (n);
1688}

--- 369 unchanged lines hidden (view full) ---

2058 instance = &lle->ll_children[i];
2059 if (instance->li_lock == lock)
2060 return (instance);
2061 }
2062 return (NULL);
2063}
2064
2065static void
1682 } else
1683 sched_unpin();
1684 if (flags & WARN_PANIC && n)
1685 panic("%s", __func__);
1686 else
1687 witness_debugger(n);
1688 return (n);
1689}

--- 369 unchanged lines hidden (view full) ---

2059 instance = &lle->ll_children[i];
2060 if (instance->li_lock == lock)
2061 return (instance);
2062 }
2063 return (NULL);
2064}
2065
2066static void
2066witness_list_lock(struct lock_instance *instance)
2067witness_list_lock(struct lock_instance *instance,
2068 int (*prnt)(const char *fmt, ...))
2067{
2068 struct lock_object *lock;
2069
2070 lock = instance->li_lock;
2069{
2070 struct lock_object *lock;
2071
2072 lock = instance->li_lock;
2071 printf("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2073 prnt("%s %s %s", (instance->li_flags & LI_EXCLUSIVE) != 0 ?
2072 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2073 if (lock->lo_witness->w_name != lock->lo_name)
2074 "exclusive" : "shared", LOCK_CLASS(lock)->lc_name, lock->lo_name);
2075 if (lock->lo_witness->w_name != lock->lo_name)
2074 printf(" (%s)", lock->lo_witness->w_name);
2075 printf(" r = %d (%p) locked @ %s:%d\n",
2076 prnt(" (%s)", lock->lo_witness->w_name);
2077 prnt(" r = %d (%p) locked @ %s:%d\n",
2076 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
2077 instance->li_line);
2078}
2079
2080#ifdef DDB
2081static int
2082witness_thread_has_locks(struct thread *td)
2083{

--- 12 unchanged lines hidden (view full) ---

2096 if (witness_thread_has_locks(td))
2097 return (1);
2098 }
2099 return (0);
2100}
2101#endif
2102
2103int
2078 instance->li_flags & LI_RECURSEMASK, lock, instance->li_file,
2079 instance->li_line);
2080}
2081
2082#ifdef DDB
2083static int
2084witness_thread_has_locks(struct thread *td)
2085{

--- 12 unchanged lines hidden (view full) ---

2098 if (witness_thread_has_locks(td))
2099 return (1);
2100 }
2101 return (0);
2102}
2103#endif
2104
2105int
2104witness_list_locks(struct lock_list_entry **lock_list)
2106witness_list_locks(struct lock_list_entry **lock_list,
2107 int (*prnt)(const char *fmt, ...))
2105{
2106 struct lock_list_entry *lle;
2107 int i, nheld;
2108
2109 nheld = 0;
2110 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2111 for (i = lle->ll_count - 1; i >= 0; i--) {
2108{
2109 struct lock_list_entry *lle;
2110 int i, nheld;
2111
2112 nheld = 0;
2113 for (lle = *lock_list; lle != NULL; lle = lle->ll_next)
2114 for (i = lle->ll_count - 1; i >= 0; i--) {
2112 witness_list_lock(&lle->ll_children[i]);
2115 witness_list_lock(&lle->ll_children[i], prnt);
2113 nheld++;
2114 }
2115 return (nheld);
2116}
2117
2118/*
2119 * This is a bit risky at best. We call this function when we have timed
2120 * out acquiring a spin lock, and we assume that the other CPU is stuck
2121 * with this lock held. So, we go groveling around in the other CPU's
2122 * per-cpu data to try to find the lock instance for this spin lock to
2123 * see when it was last acquired.
2124 */
2125void
2116 nheld++;
2117 }
2118 return (nheld);
2119}
2120
2121/*
2122 * This is a bit risky at best. We call this function when we have timed
2123 * out acquiring a spin lock, and we assume that the other CPU is stuck
2124 * with this lock held. So, we go groveling around in the other CPU's
2125 * per-cpu data to try to find the lock instance for this spin lock to
2126 * see when it was last acquired.
2127 */
2128void
2126witness_display_spinlock(struct lock_object *lock, struct thread *owner)
2129witness_display_spinlock(struct lock_object *lock, struct thread *owner,
2130 int (*prnt)(const char *fmt, ...))
2127{
2128 struct lock_instance *instance;
2129 struct pcpu *pc;
2130
2131 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2132 return;
2133 pc = pcpu_find(owner->td_oncpu);
2134 instance = find_instance(pc->pc_spinlocks, lock);
2135 if (instance != NULL)
2131{
2132 struct lock_instance *instance;
2133 struct pcpu *pc;
2134
2135 if (owner->td_critnest == 0 || owner->td_oncpu == NOCPU)
2136 return;
2137 pc = pcpu_find(owner->td_oncpu);
2138 instance = find_instance(pc->pc_spinlocks, lock);
2139 if (instance != NULL)
2136 witness_list_lock(instance);
2140 witness_list_lock(instance, prnt);
2137}
2138
2139void
2140witness_save(struct lock_object *lock, const char **filep, int *linep)
2141{
2142 struct lock_list_entry *lock_list;
2143 struct lock_instance *instance;
2144 struct lock_class *class;

--- 156 unchanged lines hidden (view full) ---

2301{
2302
2303 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2304 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2305
2306 if (witness_watch < 1)
2307 return;
2308
2141}
2142
2143void
2144witness_save(struct lock_object *lock, const char **filep, int *linep)
2145{
2146 struct lock_list_entry *lock_list;
2147 struct lock_instance *instance;
2148 struct lock_class *class;

--- 156 unchanged lines hidden (view full) ---

2305{
2306
2307 KASSERT(witness_cold == 0, ("%s: witness_cold", __func__));
2308 KASSERT(kdb_active, ("%s: not in the debugger", __func__));
2309
2310 if (witness_watch < 1)
2311 return;
2312
2309 witness_list_locks(&td->td_sleeplocks);
2313 witness_list_locks(&td->td_sleeplocks, db_printf);
2310
2311 /*
2312 * We only handle spinlocks if td == curthread. This is somewhat broken
2313 * if td is currently executing on some other CPU and holds spin locks
2314 * as we won't display those locks. If we had a MI way of getting
2315 * the per-cpu data for a given cpu then we could use
2316 * td->td_oncpu to get the list of spinlocks for this thread
2317 * and "fix" this.
2318 *
2319 * That still wouldn't really fix this unless we locked the scheduler
2320 * lock or stopped the other CPU to make sure it wasn't changing the
2321 * list out from under us. It is probably best to just not try to
2322 * handle threads on other CPU's for now.
2323 */
2324 if (td == curthread && PCPU_GET(spinlocks) != NULL)
2314
2315 /*
2316 * We only handle spinlocks if td == curthread. This is somewhat broken
2317 * if td is currently executing on some other CPU and holds spin locks
2318 * as we won't display those locks. If we had a MI way of getting
2319 * the per-cpu data for a given cpu then we could use
2320 * td->td_oncpu to get the list of spinlocks for this thread
2321 * and "fix" this.
2322 *
2323 * That still wouldn't really fix this unless we locked the scheduler
2324 * lock or stopped the other CPU to make sure it wasn't changing the
2325 * list out from under us. It is probably best to just not try to
2326 * handle threads on other CPU's for now.
2327 */
2328 if (td == curthread && PCPU_GET(spinlocks) != NULL)
2325 witness_list_locks(PCPU_PTR(spinlocks));
2329 witness_list_locks(PCPU_PTR(spinlocks), db_printf);
2326}
2327
2328DB_SHOW_COMMAND(locks, db_witness_list)
2329{
2330 struct thread *td;
2331
2332 if (have_addr)
2333 td = db_lookup_thread(addr, TRUE);

--- 473 unchanged lines hidden ---
2330}
2331
2332DB_SHOW_COMMAND(locks, db_witness_list)
2333{
2334 struct thread *td;
2335
2336 if (have_addr)
2337 td = db_lookup_thread(addr, TRUE);

--- 473 unchanged lines hidden ---