Lines Matching refs:this

12  * this code maps all the lock dependencies as they occur in a live kernel
20 * any deadlock at this point.
24 * locks (but of the same class as this lock), this code will detect it.
237 * At this point, if the passed hlock->class_idx is still garbage,
763 * If this indeed happens, lets pretend it does not hurt to continue
766 * to distinguish whether we are in this situation, if it just
819 * Is this the address of a static object:
974 pr_err("you didn't initialize this object before use?\n");
1408 static int add_lock_to_list(struct lock_class *this,
1422 entry->class = this;
1741 * Step 1: check whether we already finish on this one.
1743 * If we have visited all the dependencies from this @lock to
1746 * and visit all the dependencies in the list and mark this
1755 * Step 2: check whether prev dependency and this form a strong
1779 * Step 3: we haven't visited this and there is a strong
1780 * dependency path to this, so check with @match.
1781 * If @skip is provide and returns true, we skip this
1782 * lock (and any path this lock is in).
1856 * Print a dependency chain entry (this is only done when a deadlock
1888 * an intermediate lock (middle_class) where this lock is
1985 * b) A -> .. -> B is -(*N)-> (nothing is stronger than this)
2024 static noinline void print_circular_bug(struct lock_list *this,
2037 this->trace = save_trace();
2038 if (!this->trace)
2053 printk("\nother info that might help us debug this:\n\n");
2080 static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
2085 __bfs_forwards(this, (void *)&count, noop_count, NULL, &target_entry);
2092 struct lock_list this;
2094 __bfs_init_root(&this, class);
2098 ret = __lockdep_count_forward_deps(&this);
2105 static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
2110 __bfs_backwards(this, (void *)&count, noop_count, NULL, &target_entry);
2118 struct lock_list this;
2120 __bfs_init_root(&this, class);
2124 ret = __lockdep_count_backward_deps(&this);
2230 * stands for whether A -> B only has -(S*)-> (in this case ->only_xr is true).
2381 * ->links_to is A. In this case, we can say the lock_list is
2385 * and ->links_to is B. In this case, we can say the lock_list is
2391 * 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't
2522 * an intermediate lock (middle_class) where this lock is
2587 pr_warn("\nand this task is already holding:\n");
2595 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
2609 pr_warn("\nother info that might help us debug this:\n\n");
2707 * will marked as LOCK_ENABLE_IRQ_HARDIRQ_READ, and this is a possible
2734 * exclusive. Ie: this is the opposite of exclusive_mask().
2784 * lock with a hardirq-unsafe lock - to achieve this we search
2795 struct lock_list this, that;
2802 bfs_init_rootb(&this, prev);
2804 ret = __bfs_backwards(&this, &usage_mask, usage_accumulate, usage_skip, NULL);
2847 ret = find_usage_backwards(&this, backward_mask, &target_entry);
2865 print_bad_irq_dependency(curr, &this, &that,
2912 * we achieve this by setting the initial node's ->only_xr to true in
3007 pr_warn("\nother info that might help us debug this:\n");
3018 * (Note that this has to be done separately, because the graph cannot
3056 * We're holding the nest_lock, which serializes this lock's
3126 * create a circular dependency in the graph. (We do this by
3144 * (this may occur even though this is a new chain: consider
3224 * the end of this context's lock-chain - whichever comes first.
3241 * At least two relevant locks must exist for this
3260 * own direct dependencies already, so this
3466 * pointers. Make sure this bit isn't set in 'normal' class_idx usage.
3705 * disabled to make this an IRQ-safe lock.. for recursion reasons
3773 * add it and return 1 - in this case the new dependency chain is
3832 * the dependencies only if this is a new dependency chain.
3841 * - is irq-safe, if this lock is irq-unsafe
3842 * - is softirq-safe, if this lock is hardirq-unsafe
3861 * Add dependency only if this lock is not the head
3947 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3968 print_usage_bug(struct task_struct *curr, struct held_lock *this,
3989 print_lock(this);
3992 print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
3995 pr_warn("\nother info that might help us debug this:\n");
3996 print_usage_bug_scenario(this);
4008 valid_state(struct task_struct *curr, struct held_lock *this,
4011 if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
4013 print_usage_bug(curr, this, bad_bit, new_bit);
4026 struct held_lock *this, int forwards,
4043 print_lock(this);
4045 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
4047 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
4051 pr_warn("\nother info that might help us debug this:\n");
4084 * Prove that in the forwards-direction subgraph starting at <this>
4088 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
4097 bfs_init_root(&root, this);
4109 this, 1, state_name(bit));
4112 this, 1, state_name(read_bit));
4119 * Prove that in the backwards-direction subgraph starting at <this>
4123 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
4132 bfs_init_rootb(&root, this);
4144 this, 0, state_name(bit));
4147 this, 0, state_name(read_bit));
4205 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
4213 * Validate that this particular lock does not have conflicting
4216 if (!valid_state(curr, this, new_bit, excl_bit))
4222 if (!read && !valid_state(curr, this, new_bit,
4236 if (!check_usage_backwards(curr, this, excl_bit))
4243 if (!check_usage_forwards(curr, this, excl_bit))
4247 if (state_verbose(new_bit, hlock_class(this)))
4297 * this bit from being set before)
4328 * so this is racy by nature but losing one hit
4344 * See the fine text that goes along with this variable definition.
4397 * so this is racy by nature but losing one hit
4625 * hash key (this also prevents the checking and the
4637 static int mark_lock(struct task_struct *curr, struct held_lock *this,
4647 if (new_bit == LOCK_USED && this->read)
4656 if (likely(hlock_class(this)->usage_mask & new_mask))
4664 if (unlikely(hlock_class(this)->usage_mask & new_mask))
4667 if (!hlock_class(this)->usage_mask)
4670 hlock_class(this)->usage_mask |= new_mask;
4673 if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
4678 ret = mark_lock_irq(curr, this, new_bit);
4691 print_lock(this);
4743 pr_warn("other info that might help us debug this:\n");
4811 * Allow override for annotations -- this is typically
4870 * Can't be having no nameless bastards around this place!
4966 pr_warn("\nbut this task is not holding:\n");
4972 pr_warn("\nother info that might help us debug this:\n");
5066 * NULL like.. I bet this mushroom I ate was good!
5190 pr_warn("\nother info that might help us debug this:\n");
5400 * Remove the lock from the list of currently held locks - this gets
5417 * So we're all set to release this lock.. wait what lock? We don't
5463 * lock. In this case, we are done!
5520 * Grab 16bits of randomness; this is sufficient to not
5703 * 1. We force lockdep think this way in selftests or
5784 * No actual critical section is created by the APIs annotated with this: these
5909 pr_warn("\nother info that might help us debug this:\n");
5927 * Whee, we contended on this lock, except it seems we're not
5969 * Yay, we acquired ownership of this lock we didn't try to
6077 * we found a match we can break out of this loop.
6127 * Remove all dependencies this lock is
6366 * Remove all classes this lock might have:
6550 * is destroyed or reinitialized - this code checks whether there is
6631 * Careful: only use this function if you are sure that
6681 pr_warn("\nother info that might help us debug this:\n\n");