Lines Matching defs:cblock

746  * This assumes the cblock hasn't already been allocated.
1198 work.cblock = infer_cblock(mq, e);
1229 work.cblock = infer_cblock(mq, e);
1261 * We allocate the entry now to reserve the cblock. If the
1269 work.cblock = infer_cblock(mq, e);
1376 static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
1390 *cblock = infer_cblock(mq, e);
1411 static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
1420 r = __lookup(mq, oblock, cblock,
1429 dm_oblock_t oblock, dm_cblock_t *cblock,
1439 r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
1467 * case of promotion free the entry for the destination cblock.
1474 from_cblock(work->cblock));
1528 static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
1530 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1541 static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1547 __smq_set_clear_dirty(mq, cblock, true);
1551 static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1557 __smq_set_clear_dirty(mq, cblock, false);
1561 static unsigned int random_level(dm_cblock_t cblock)
1563 return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
1567 dm_oblock_t oblock, dm_cblock_t cblock,
1573 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
1576 e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
1588 static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
1591 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1603 static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
1606 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));