Lines Matching defs:zone

21 	struct dm_zone		*zone;
86 struct dm_zone *zone = bioctx->zone;
88 if (zone) {
91 dmz_is_seq(zone))
92 set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
93 dmz_deactivate_zone(zone);
116 static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
122 struct dmz_dev *dev = zone->dev;
134 dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
144 if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
145 zone->wp_block += nr_blocks;
169 static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
180 if (!zone) {
185 DMDEBUG("(%s): READ chunk %llu -> %s zone %u, block %llu, %u blocks",
188 (dmz_is_rnd(zone) ? "RND" :
189 (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
190 zone->id,
194 bzone = zone->bzone;
197 if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
198 chunk_block < zone->wp_block) {
199 /* Test block validity in the data zone */
200 ret = dmz_block_valid(zmd, zone, chunk_block);
204 /* Read data zone blocks */
206 rzone = zone;
211 * No valid blocks found in the data zone.
212 * Check the buffer zone, if there is one.
219 /* Read buffer zone blocks */
245 * Write blocks directly in a data zone, at the write pointer.
246 * If a buffer zone is assigned, invalidate the blocks written
250 struct dm_zone *zone, struct bio *bio,
255 struct dm_zone *bzone = zone->bzone;
258 if (dmz_is_readonly(zone))
262 ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
267 * Validate the blocks in the data zone and invalidate
268 * in the buffer zone, if there is one.
270 ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
278 * Write blocks in the buffer zone of @zone.
279 * If no buffer zone is assigned yet, get one.
280 * Called with @zone write locked.
283 struct dm_zone *zone, struct bio *bio,
291 /* Get the buffer zone. One will be allocated if needed */
292 bzone = dmz_get_chunk_buffer(zmd, zone);
305 * Validate the blocks in the buffer zone
306 * and invalidate in the data zone.
309 if (ret == 0 && chunk_block < zone->wp_block)
310 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
318 static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
325 if (!zone)
328 DMDEBUG("(%s): WRITE chunk %llu -> %s zone %u, block %llu, %u blocks",
331 (dmz_is_rnd(zone) ? "RND" :
332 (dmz_is_cache(zone) ? "CACHE" : "SEQ")),
333 zone->id,
336 if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
337 chunk_block == zone->wp_block) {
339 * zone is a random zone or it is a sequential zone
340 * and the BIO is aligned to the zone write pointer:
341 * direct write the zone.
343 return dmz_handle_direct_write(dmz, zone, bio,
348 * This is an unaligned write in a sequential zone:
351 return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
357 static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
367 if (!zone)
370 if (dmz_is_readonly(zone))
373 DMDEBUG("(%s): DISCARD chunk %llu -> zone %u, block %llu, %u blocks",
376 zone->id,
380 * Invalidate blocks in the data zone and its
381 * buffer zone if one is mapped.
383 if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
384 chunk_block < zone->wp_block)
385 ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
386 if (ret == 0 && zone->bzone)
387 ret = dmz_invalidate_blocks(zmd, zone->bzone,
401 struct dm_zone *zone;
407 * Get the data zone mapping the chunk. There may be no
409 + the zone returned will be set to active state.
411 zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
413 if (IS_ERR(zone)) {
414 ret = PTR_ERR(zone);
419 if (zone) {
420 dmz_activate_zone(zone);
421 bioctx->zone = zone;
422 dmz_reclaim_bio_acc(zone->dev->reclaim);
427 ret = dmz_handle_read(dmz, zone, bio);
430 ret = dmz_handle_write(dmz, zone, bio);
434 ret = dmz_handle_discard(dmz, zone, bio);
444 * is still valid, that is, that the zone used still has valid blocks.
446 if (zone)
447 dmz_put_chunk_mapping(zmd, zone);
654 bioctx->zone = NULL;
667 /* Split zone BIOs to fit entirely into a zone */
1008 /* FS hint to try to align to the device zone size */