Lines Matching refs:prev

1448  * indicates that adding the <prev> -> <next> lock dependency will
1592 * For dependency @prev -> @next:
1594 * SR: @prev is shared reader (->read != 0) and @next is recursive reader
1596 * ER: @prev is exclusive locker (->read == 0) and @next is recursive reader
1597 * SN: @prev is shared reader and @next is non-recursive locker (->read != 2)
1598 * EN: @prev is exclusive locker and @next is non-recursive locker
1601 * bit0 is prev->read == 0
1615 __calc_dep_bit(struct held_lock *prev, struct held_lock *next)
1617 return (prev->read == 0) + ((next->read != 2) << 1);
1620 static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next)
1622 return 1U << __calc_dep_bit(prev, next);
1626 * calculate the dep_bit for backwards edges. We care about whether @prev is
1630 __calc_dep_bitb(struct held_lock *prev, struct held_lock *next)
1632 return (next->read != 2) + ((prev->read == 0) << 1);
1635 static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next)
1637 return 1U << __calc_dep_bitb(prev, next);
1657 * that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)->
1713 * has -(*R)-> in the search, and if it does (prev only has -(*R)->), we
1755 * Step 2: check whether prev dependency and this form a strong
1758 if (lock->parent) { /* Parent exists, check prev dependency */
2564 struct held_lock *prev,
2588 print_lock(prev);
2590 print_lock_name(prev, hlock_class(prev));
2611 hlock_class(prev), hlock_class(next));
2785 * the backwards-subgraph starting at <prev>, and the
2788 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
2802 bfs_init_rootb(&this, prev);
2867 prev, next,
2877 struct held_lock *prev, struct held_lock *next)
2913 * that case. And if <prev> is S, we set initial ->only_xr to false
2967 struct lock_class *prev = hlock_class(prv);
2973 __print_lock_name(prv, prev);
2983 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
2986 struct lock_class *class = hlock_class(prev);
3000 print_lock(prev);
3004 class->cmp_fn(prev->instance, next->instance));
3008 print_deadlock_scenario(next, prev);
3029 struct held_lock *prev;
3034 prev = curr->held_locks + i;
3036 if (prev->instance == next->nest_lock)
3037 nest = prev;
3039 if (hlock_class(prev) != hlock_class(next))
3046 if ((next->read == 2) && prev->read)
3049 class = hlock_class(prev);
3052 class->cmp_fn(prev->instance, next->instance) < 0)
3062 print_deadlock_bug(curr, prev, next);
3072 * - would the adding of the <prev> -> <next> dependency create a
3075 * - does the new prev->next dependency connect any hardirq-safe lock
3076 * (in the full backwards-subgraph starting at <prev>) with any
3080 * - does the new prev->next dependency connect any softirq-safe lock
3081 * (in the full backwards-subgraph starting at <prev>) with any
3091 check_prev_add(struct task_struct *curr, struct held_lock *prev,
3098 if (!hlock_class(prev)->key || !hlock_class(next)->key) {
3105 WARN_ONCE(!debug_locks_silent && !hlock_class(prev)->key,
3107 hlock_class(prev),
3108 hlock_class(prev)->name);
3116 if (prev->class_idx == next->class_idx) {
3117 struct lock_class *class = hlock_class(prev);
3120 class->cmp_fn(prev->instance, next->instance) < 0)
3125 * Prove that the new <prev> -> <next> dependency would not
3128 * and check whether we can reach <prev>.)
3134 ret = check_noncircular(next, prev, trace);
3138 if (!check_irq_usage(curr, prev, next))
3142 * Is the <prev> -> <next> dependency already present?
3149 list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
3153 entry->dep |= calc_dep(prev, next);
3166 * <prev>::locks_after contains <next> while
3167 * <next>::locks_before doesn't contain <prev>. In
3172 if (entry->class == hlock_class(prev)) {
3175 entry->dep |= calc_depb(prev, next);
3180 /* <prev> is not found in <next>::locks_before */
3186 * Is the <prev> -> <next> link redundant?
3188 ret = check_redundant(prev, next);
3204 ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
3205 &hlock_class(prev)->locks_after, distance,
3206 calc_dep(prev, next), *trace);
3211 ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
3213 calc_depb(prev, next), *trace);
3337 #define for_each_chain_block(bucket, prev, curr) \
3338 for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \
3340 (prev) = (curr), (curr) = chain_block_next(curr))
3384 int prev, curr;
3406 for_each_chain_block(0, prev, curr) {
3411 if (prev < 0)
3414 init_chain_block(prev, offset, 0, 0);
4626 * adding of the dependency to 'prev'):
4788 struct held_lock *prev = curr->held_locks + depth;
4789 if (prev->irq_context != next->irq_context)
4797 struct held_lock *prev = curr->held_locks + depth;
4798 struct lock_class *class = hlock_class(prev);