Lines Matching defs:hash_lock

11  * A hash_lock controls and coordinates writing, index access, and dedupe among groups of data_vios
15 * index query is needed for each hash_lock, instead of one for every data_vio.
20 * more fine-grained locking for the hash_lock structures.
22 * A hash_lock acts like a state machine perhaps more than as a lock. Other than the starting and
49 * lock, releasing the hash_lock itself back to the hash zone (BYPASSING).
92 * When a hash_lock needs to query the index, it attempts to acquire an unused dedupe_context from
94 * hash_lock's agent, added to the list of pending contexts, and then sent to the index. The
113 * If a hash_lock needs a dedupe context, and the available list is empty, the timed_out list will
223 struct hash_lock {
347 static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *lock)
358 * the hash_lock the data_vio holds (if there is one).
365 if (data_vio->hash_lock == NULL)
368 return data_vio->hash_lock->duplicate_lock;
372 * hash_lock_key() - Return hash_lock's record name as a hash code.
377 static inline u64 hash_lock_key(struct hash_lock *lock)
405 VDO_ASSERT_LOG_ONLY(data_vio == data_vio->hash_lock->agent,
412 * @hash_lock: The hash lock to update.
415 static void set_duplicate_lock(struct hash_lock *hash_lock, struct pbn_lock *pbn_lock)
417 VDO_ASSERT_LOG_ONLY((hash_lock->duplicate_lock == NULL),
420 hash_lock->duplicate_lock = pbn_lock;
429 static inline struct data_vio *dequeue_lock_waiter(struct hash_lock *lock)
441 static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
443 struct hash_lock *old_lock = data_vio->hash_lock;
467 data_vio->hash_lock = NULL;
480 data_vio->hash_lock = new_lock;
485 static void start_deduping(struct hash_lock *lock, struct data_vio *agent,
487 static void start_locking(struct hash_lock *lock, struct data_vio *agent);
488 static void start_writing(struct hash_lock *lock, struct data_vio *agent);
528 static struct data_vio *retire_lock_agent(struct hash_lock *lock)
545 static void wait_on_hash_lock(struct hash_lock *lock, struct data_vio *data_vio)
586 static void start_bypassing(struct hash_lock *lock, struct data_vio *agent)
594 struct hash_lock *lock = data_vio->hash_lock;
637 struct hash_lock *lock = agent->hash_lock;
693 struct hash_lock *lock = agent->hash_lock;
715 static void start_unlocking(struct hash_lock *lock, struct data_vio *agent)
749 struct hash_lock *lock = agent->hash_lock;
795 static void start_updating(struct hash_lock *lock, struct data_vio *agent)
818 static void finish_deduping(struct hash_lock *lock, struct data_vio *data_vio)
869 struct hash_lock *replace_lock,
870 struct hash_lock **lock_ptr)
872 struct hash_lock *lock, *new_lock;
884 new_lock = list_entry(zone->lock_pool.prev, struct hash_lock, pool_node);
931 struct hash_lock *new_lock = context;
945 static void fork_hash_lock(struct hash_lock *old_lock, struct data_vio *new_agent)
947 struct hash_lock *new_lock;
983 static void launch_dedupe(struct hash_lock *lock, struct data_vio *data_vio,
1008 static void start_deduping(struct hash_lock *lock, struct data_vio *agent,
1080 struct hash_lock *lock = agent->hash_lock;
1195 static void start_verifying(struct hash_lock *lock, struct data_vio *agent)
1229 struct hash_lock *lock = agent->hash_lock;
1401 set_duplicate_lock(agent->hash_lock, lock);
1416 static void start_locking(struct hash_lock *lock, struct data_vio *agent)
1446 static void finish_writing(struct hash_lock *lock, struct data_vio *agent)
1510 static struct data_vio *select_writing_agent(struct hash_lock *lock)
1559 static void start_writing(struct hash_lock *lock, struct data_vio *agent)
1669 struct hash_lock *lock = agent->hash_lock;
1706 static void start_querying(struct hash_lock *lock, struct data_vio *data_vio)
1717 * report_bogus_lock_state() - Complain that a data_vio has entered a hash_lock that is in an
1723 static void report_bogus_lock_state(struct hash_lock *lock, struct data_vio *data_vio)
1744 struct hash_lock *lock = data_vio->hash_lock;
1789 static bool is_hash_collision(struct hash_lock *lock, struct data_vio *candidate)
1815 result = VDO_ASSERT(data_vio->hash_lock == NULL,
1841 struct hash_lock *lock;
1862 * a hash_lock as the compressed write depends on the hash_lock to manage the
1915 struct hash_lock *lock = data_vio->hash_lock;
1930 struct hash_lock *removed;
1963 struct hash_lock *hash_lock = data_vio->hash_lock;
1973 hash_lock->duplicate = data_vio->new_mapped;
1980 hash_lock->duplicate_lock = vdo_forget(allocation->lock);
2012 data_vio->hash_lock->duplicate = data_vio->new_mapped;
2013 set_duplicate_lock(data_vio->hash_lock, pbn_lock);
2016 * Claim a reference for this data_vio. Necessary since another hash_lock might start
2366 result = vdo_allocate(LOCK_POOL_CAPACITY, struct hash_lock, "hash_lock array",
2695 get_hash_zone_statistics(&zones->zones[zone], &stats->hash_lock);
2735 * dump_hash_lock() - Dump a compact description of hash_lock to the log if the lock is not on the
2739 static void dump_hash_lock(const struct hash_lock *lock)