Lines Matching refs:chain

418 #define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
419 #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
1039 static bool check_lock_chain_key(struct lock_chain *chain)
1045 for (i = chain->base; i < chain->base + chain->depth; i++)
1051 if (chain->chain_key != chain_key) {
1052 printk(KERN_INFO "chain %lld: key %#llx <> %#llx\n",
1053 (unsigned long long)(chain - lock_chains),
1054 (unsigned long long)chain->chain_key,
1078 struct lock_chain *chain;
1107 hlist_for_each_entry_rcu(chain, head, entry) {
1108 if (!check_lock_chain_key(chain))
1856 * Print a dependency chain entry (this is only done when a deadlock
1887 * But if there is a chain instead, where the safe lock takes
1889 * not the same as the safe lock, then the lock chain is
1891 * to show a different CPU case for each link in the chain
1957 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
2284 * have the observation for any lock chain L1 -> ... -> Ln, for any
2438 printk("lockdep:%s bad path found in chain graph\n", __func__);
2493 printk("lockdep:%s bad path found in chain graph\n", __func__);
2521 * But if there is a chain instead, where the safe lock takes
2523 * not the same as the safe lock, then the lock chain is
2525 * to show a different CPU case for each link in the chain
3069 * There was a chain-cache miss, and we are about to add a new dependency
3144 * (this may occur even though this is a new chain: consider
3147 * L2 added to its dependency list, due to the first chain.)
3224 * the end of this context's lock-chain - whichever comes first.
3304 * The first 2 chain_hlocks entries in the chain block in the bucket
3315 * the chain block size:
3317 * entry[2] - upper 16 bits of the chain block size
3318 * entry[3] - lower 16 bits of the chain block size
3335 * Iterate all the chain blocks in a bucket.
3428 * returned, broken up and put back into the pool. So if a chain block of
3430 * queued up after the primordial chain block and never be used until the
3431 * hlock entries in the primordial chain block is almost used up. That
3433 * monitored by looking at the "large chain blocks" number in lockdep_stats.
3455 * Return offset of a chain block of the right size or -1 if not found.
3528 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
3530 u16 chain_hlock = chain_hlocks[chain->base + i];
3537 * Returns the index of the first held_lock of the current chain
3590 static void print_chain_keys_chain(struct lock_chain *chain)
3596 printk("depth: %u\n", chain->depth);
3597 for (i = 0; i < chain->depth; i++) {
3598 hlock_id = chain_hlocks[chain->base + i];
3608 struct lock_chain *chain)
3616 pr_warn("Hash chain already cached but the contents don't match!\n");
3621 pr_warn("Locks in cached chain:");
3622 print_chain_keys_chain(chain);
3630 * Checks whether the chain and the current held locks are consistent
3637 struct lock_chain *chain)
3644 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) {
3645 print_collision(curr, hlock, chain);
3649 for (j = 0; j < chain->depth - 1; j++, i++) {
3652 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id)) {
3653 print_collision(curr, hlock, chain);
3662 * Given an index that is >= -1, return the index of the next lock chain.
3663 * Return -2 if there is no next lock chain.
3689 * Adds a dependency chain into chain hashtable. And must be called with
3700 struct lock_chain *chain;
3711 chain = alloc_lock_chain();
3712 if (!chain) {
3720 chain->chain_key = chain_key;
3721 chain->irq_context = hlock->irq_context;
3723 chain->depth = curr->lockdep_depth + 1 - i;
3729 j = alloc_chain_hlocks(chain->depth);
3739 chain->base = j;
3740 for (j = 0; j < chain->depth - 1; j++, i++) {
3743 chain_hlocks[chain->base + j] = lock_id;
3745 chain_hlocks[chain->base + j] = hlock_id(hlock);
3746 hlist_add_head_rcu(&chain->entry, hash_head);
3748 inc_chains(chain->irq_context);
3754 * Look up a dependency chain. Must be called with either the graph lock or
3760 struct lock_chain *chain;
3762 hlist_for_each_entry_rcu(chain, hash_head, entry) {
3763 if (READ_ONCE(chain->chain_key) == chain_key) {
3765 return chain;
3772 * If the key is not present yet in dependency chain cache then
3773 * add it and return 1 - in this case the new dependency chain is
3782 struct lock_chain *chain = lookup_chain_cache(chain_key);
3784 if (chain) {
3786 if (!check_no_collision(curr, hlock, chain))
3790 printk("\nhash chain already cached, key: "
3800 printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
3808 * We have to walk the chain again locked - to avoid duplicates:
3810 chain = lookup_chain_cache(chain_key);
3811 if (chain) {
3832 * the dependencies only if this is a new dependency chain.
3862 * of the chain, and if the new lock introduces no more
3909 * We got mighty confused, our chain keys don't match
4057 pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
5095 * Calculate the chain hash: it's the combined hash of all the
5096 * lock keys along the dependency chain. We save the hash value
5098 * after unlock. The chain hash is then used to cache dependency
5113 * How can we have a chain hash when we ain't got no keys?!
6064 /* Remove a class from a lock chain. Must be called with the graph lock held. */
6066 struct lock_chain *chain,
6072 for (i = chain->base; i < chain->base + chain->depth; i++) {
6076 * Each lock class occurs at most once in a lock chain so once
6081 /* Since the chain has not been modified, return. */
6085 free_chain_hlocks(chain->base, chain->depth);
6086 /* Overwrite the chain key for concurrent RCU readers. */
6087 WRITE_ONCE(chain->chain_key, INITIAL_CHAIN_KEY);
6088 dec_chains(chain->irq_context);
6094 hlist_del_rcu(&chain->entry);
6095 __set_bit(chain - lock_chains, pf->lock_chains_being_freed);
6104 struct lock_chain *chain;
6110 hlist_for_each_entry_rcu(chain, head, entry) {
6111 remove_class_from_lock_chain(pf, chain, class);