Lines Matching defs:hlock

221 static inline struct lock_class *hlock_class(struct held_lock *hlock)
223 unsigned int class_idx = hlock->class_idx;
225 /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfield */
237 * At this point, if the passed hlock->class_idx is still garbage,
342 static void lock_release_holdtime(struct held_lock *hlock)
350 holdtime = lockstat_clock() - hlock->holdtime_stamp;
352 stats = get_lock_stats(hlock_class(hlock));
353 if (hlock->read)
359 static inline void lock_release_holdtime(struct held_lock *hlock)
426 static inline u16 hlock_id(struct held_lock *hlock)
430 return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS));
712 static void __print_lock_name(struct held_lock *hlock, struct lock_class *class)
727 if (hlock && class->print_fn)
728 class->print_fn(hlock->instance);
732 static void print_lock_name(struct held_lock *hlock, struct lock_class *class)
739 __print_lock_name(hlock, class);
757 static void print_lock(struct held_lock *hlock)
761 * extra careful, the hlock might have been released and cleared.
764 * to print the lock unless the hlock class_idx does not point to a
769 struct lock_class *lock = hlock_class(hlock);
776 printk(KERN_CONT "%px", hlock->instance);
777 print_lock_name(hlock, lock);
778 printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
1653 * Initialize a lock_list entry @lock based on a lock acquisition @hlock as the
1656 * ->only_xr of the initial lock node is set to @hlock->read == 2, to make sure
1657 * that <prev> -> @hlock and @hlock -> <whatever __bfs() found> is not -(*R)->
1661 struct held_lock *hlock)
1663 __bfs_init_root(lock, hlock_class(hlock));
1664 lock->only_xr = (hlock->read == 2);
1670 * ->only_xr of the initial lock node is set to @hlock->read != 0, to make sure
1671 * that <next> -> @hlock and @hlock -> <whatever backwards BFS found> is not
1675 struct held_lock *hlock)
1677 __bfs_init_root(lock, hlock_class(hlock));
1678 lock->only_xr = (hlock->read != 0);
1990 struct held_lock *hlock = (struct held_lock *)data;
1992 return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
1993 (hlock->read == 2 || /* A -> B is -(*R)-> */
2017 struct held_lock *hlock = (struct held_lock *)data;
2019 return hlock_class(hlock) == entry->class && /* Found A -> .. -> B */
2020 (hlock->read == 0 || /* B -> A is -(E*)-> */
3231 struct held_lock *hlock;
3250 hlock = curr->held_locks + depth - 1;
3252 if (hlock->check) {
3253 int ret = check_prev_add(curr, hlock, next, distance, &trace);
3263 if (!hlock->trylock)
3431 * hlock entries in the primordial chain block is almost used up. That
3540 struct held_lock *hlock)
3547 if (hlock_curr->irq_context != hlock->irq_context)
3572 struct held_lock *hlock;
3580 hlock = curr->held_locks + i;
3581 chain_key = print_chain_key_iteration(hlock_id(hlock), chain_key);
3583 print_lock(hlock);
3636 struct held_lock *hlock,
3642 i = get_first_held_lock(curr, hlock);
3645 print_collision(curr, hlock, chain);
3653 print_collision(curr, hlock, chain);
3696 struct held_lock *hlock,
3721 chain->irq_context = hlock->irq_context;
3722 i = get_first_held_lock(curr, hlock);
3745 chain_hlocks[chain->base + j] = hlock_id(hlock);
3778 struct held_lock *hlock,
3781 struct lock_class *class = hlock_class(hlock);
3786 if (!check_no_collision(curr, hlock, chain))
3816 if (!add_chain_cache(curr, hlock, chain_key))
3823 struct held_lock *hlock,
3836 if (!hlock->trylock && hlock->check &&
3837 lookup_chain_cache_add(curr, hlock, chain_key)) {
3856 int ret = check_deadlock(curr, hlock);
3869 if (!check_prevs_add(curr, hlock))
3884 struct held_lock *hlock,
3900 struct held_lock *hlock, *prev_hlock = NULL;
3905 hlock = curr->held_locks + i;
3906 if (chain_key != hlock->prev_chain_key) {
3915 (unsigned long long)hlock->prev_chain_key);
3920 * hlock->class_idx can't go beyond MAX_LOCKDEP_KEYS, but is
3923 if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use)))
3927 hlock->irq_context))
3929 chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
3930 prev_hlock = hlock;
4259 struct held_lock *hlock;
4264 hlock = curr->held_locks + i;
4266 if (hlock->read)
4271 if (!hlock->check)
4274 if (!mark_lock(curr, hlock, hlock_bit))
4543 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4552 if (!hlock->trylock) {
4553 if (hlock->read) {
4555 if (!mark_lock(curr, hlock,
4559 if (!mark_lock(curr, hlock,
4564 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
4567 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
4577 if (!hlock->hardirqs_off && !hlock->sync) {
4578 if (hlock->read) {
4579 if (!mark_lock(curr, hlock,
4583 if (!mark_lock(curr, hlock,
4587 if (!mark_lock(curr, hlock,
4591 if (!mark_lock(curr, hlock,
4599 if (!mark_lock(curr, hlock, LOCK_USED))
4612 struct held_lock *hlock)
4628 if (prev_hlock->irq_context != hlock->irq_context)
4725 struct held_lock *hlock)
4741 print_lock(hlock);
4829 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
4840 struct held_lock *hlock)
4950 struct held_lock *hlock)
4964 print_lock(hlock);
4967 pr_warn("%s\n", hlock->nest_lock->name);
4996 struct held_lock *hlock;
5045 hlock = curr->held_locks + depth - 1;
5046 if (hlock->class_idx == class_idx && nest_lock) {
5050 if (!hlock->references)
5051 hlock->references++;
5053 hlock->references += references;
5056 if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
5063 hlock = curr->held_locks + depth;
5070 hlock->class_idx = class_idx;
5071 hlock->acquire_ip = ip;
5072 hlock->instance = lock;
5073 hlock->nest_lock = nest_lock;
5074 hlock->irq_context = task_irq_context(curr);
5075 hlock->trylock = trylock;
5076 hlock->read = read;
5077 hlock->check = check;
5078 hlock->sync = !!sync;
5079 hlock->hardirqs_off = !!hardirqs_off;
5080 hlock->references = references;
5082 hlock->waittime_stamp = 0;
5083 hlock->holdtime_stamp = lockstat_clock();
5085 hlock->pin_count = pin_count;
5087 if (check_wait_context(curr, hlock))
5091 if (!mark_usage(curr, hlock, check))
5120 hlock->prev_chain_key = chain_key;
5121 if (separate_irq_context(curr, hlock)) {
5125 chain_key = iterate_chain_key(chain_key, hlock_id(hlock));
5128 print_lock_nested_lock_not_held(curr, hlock);
5133 WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key);
5134 WARN_ON_ONCE(!hlock_class(hlock)->key);
5137 if (!validate_chain(curr, hlock, chain_head, chain_key))
5141 if (hlock->sync)
5197 static noinstr int match_held_lock(const struct held_lock *hlock,
5200 if (hlock->instance == lock)
5203 if (hlock->references) {
5223 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
5226 if (hlock->class_idx == class - lock_classes)
5238 struct held_lock *ret, *hlock, *prev_hlock;
5242 hlock = curr->held_locks + i;
5243 ret = hlock;
5244 if (match_held_lock(hlock, lock))
5248 for (i--, prev_hlock = hlock--;
5250 i--, prev_hlock = hlock--) {
5254 if (prev_hlock->irq_context != hlock->irq_context) {
5258 if (match_held_lock(hlock, lock)) {
5259 ret = hlock;
5272 struct held_lock *hlock;
5278 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
5279 switch (__lock_acquire(hlock->instance,
5280 hlock_class(hlock)->subclass,
5281 hlock->trylock,
5282 hlock->read, hlock->check,
5283 hlock->hardirqs_off,
5284 hlock->nest_lock, hlock->acquire_ip,
5285 hlock->references, hlock->pin_count, 0)) {
5308 struct held_lock *hlock;
5323 hlock = find_held_lock(curr, lock, depth, &i);
5324 if (!hlock) {
5334 hlock->class_idx = class - lock_classes;
5337 curr->curr_chain_key = hlock->prev_chain_key;
5355 struct held_lock *hlock;
5369 hlock = find_held_lock(curr, lock, depth, &i);
5370 if (!hlock) {
5376 curr->curr_chain_key = hlock->prev_chain_key;
5378 WARN(hlock->read, "downgrading a read lock");
5379 hlock->read = 1;
5380 hlock->acquire_ip = ip;
5409 struct held_lock *hlock;
5429 hlock = find_held_lock(curr, lock, depth, &i);
5430 if (!hlock) {
5435 if (hlock->instance == lock)
5436 lock_release_holdtime(hlock);
5438 WARN(hlock->pin_count, "releasing a pinned lock\n");
5440 if (hlock->references) {
5441 hlock->references--;
5442 if (hlock->references) {
5453 * We have the right lock to unlock, 'hlock' points to it.
5459 curr->curr_chain_key = hlock->prev_chain_key;
5493 struct held_lock *hlock = curr->held_locks + i;
5495 if (match_held_lock(hlock, lock)) {
5496 if (read == -1 || !!hlock->read == read)
5516 struct held_lock *hlock = curr->held_locks + i;
5518 if (match_held_lock(hlock, lock)) {
5525 hlock->pin_count += cookie.val;
5543 struct held_lock *hlock = curr->held_locks + i;
5545 if (match_held_lock(hlock, lock)) {
5546 hlock->pin_count += cookie.val;
5563 struct held_lock *hlock = curr->held_locks + i;
5565 if (match_held_lock(hlock, lock)) {
5566 if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
5569 hlock->pin_count -= cookie.val;
5571 if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n"))
5572 hlock->pin_count = 0;
5664 static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock, int subclass)
5678 if (!hlock->read)
5684 hlock->class_idx = class - lock_classes;
5686 print_usage_bug(current, hlock, LOCK_USED, LOCK_USAGE_STATES);
5733 struct held_lock hlock;
5735 hlock.acquire_ip = ip;
5736 hlock.instance = lock;
5737 hlock.nest_lock = nest_lock;
5738 hlock.irq_context = 2; // XXX
5739 hlock.trylock = trylock;
5740 hlock.read = read;
5741 hlock.check = check;
5742 hlock.hardirqs_off = true;
5743 hlock.references = 0;
5745 verify_lock_unused(lock, &hlock, subclass);
5920 struct held_lock *hlock;
5933 hlock = find_held_lock(curr, lock, depth, &i);
5934 if (!hlock) {
5939 if (hlock->instance != lock)
5942 hlock->waittime_stamp = lockstat_clock();
5944 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
5945 contending_point = lock_point(hlock_class(hlock)->contending_point,
5948 stats = get_lock_stats(hlock_class(hlock));
5954 stats->bounces[bounce_contended + !!hlock->read]++;
5961 struct held_lock *hlock;
5975 hlock = find_held_lock(curr, lock, depth, &i);
5976 if (!hlock) {
5981 if (hlock->instance != lock)
5985 if (hlock->waittime_stamp) {
5987 waittime = now - hlock->waittime_stamp;
5988 hlock->holdtime_stamp = now;
5991 stats = get_lock_stats(hlock_class(hlock));
5993 if (hlock->read)
5999 stats->bounces[bounce_acquired + !!hlock->read]++;
6520 const void *mem_to, struct held_lock *hlock)
6534 print_lock(hlock);
6556 struct held_lock *hlock;
6565 hlock = curr->held_locks + i;
6567 if (not_in_range(mem_from, mem_len, hlock->instance,
6568 sizeof(*hlock->instance)))
6571 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);