Lines Matching defs:data_vio

70 	/* The current height at which this data_vio is operating */
91 * Where a data_vio is on the compression path; advance_compression_stage() depends on the order of
95 /* A data_vio which has not yet entered the compression path */
97 /* A data_vio which is in the compressor */
99 /* A data_vio which is blocked in the packer */
101 /* A data_vio which is no longer on the compression path (and never will be) */
112 * The current compression status of this data_vio. This field contains a value which
114 * been made to cancel (or prevent) compression for this data_vio.
124 /* The packer input or output bin slot which holds the enclosing data_vio */
127 /* The packer bin to which the enclosing data_vio has been assigned */
131 struct data_vio *next_in_batch;
134 struct data_vio *lock_holder;
138 * other blocks for which this data_vio is the compressed write agent.
176 struct data_vio {
259 * a data_vio is processing a discard, even after the user_bio has been acknowledged.
265 /* Fields beyond this point will not be reset when a pooled data_vio is reused. */
281 static inline struct data_vio *vio_as_data_vio(struct vio *vio)
283 VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
284 return container_of(vio, struct data_vio, vio);
287 static inline struct data_vio *as_data_vio(struct vdo_completion *completion)
292 static inline struct data_vio *vdo_waiter_as_data_vio(struct vdo_waiter *waiter)
297 return container_of(waiter, struct data_vio, waiter);
300 static inline struct data_vio *data_vio_from_reference_updater(struct reference_updater *updater)
303 return container_of(updater, struct data_vio, increment_updater);
305 return container_of(updater, struct data_vio, decrement_updater);
308 static inline bool data_vio_has_flush_generation_lock(struct data_vio *data_vio)
310 return !list_empty(&data_vio->write_entry);
313 static inline struct vdo *vdo_from_data_vio(struct data_vio *data_vio)
315 return data_vio->vio.completion.vdo;
318 static inline bool data_vio_has_allocation(struct data_vio *data_vio)
320 return (data_vio->allocation.pbn != VDO_ZERO_BLOCK);
324 advance_data_vio_compression_stage(struct data_vio *data_vio);
326 get_data_vio_compression_status(struct data_vio *data_vio);
327 bool cancel_data_vio_compression(struct data_vio *data_vio);
351 static inline void continue_data_vio(struct data_vio *data_vio)
353 vdo_launch_completion(&data_vio->vio.completion);
357 * continue_data_vio_with_error() - Set an error code and then continue processing a data_vio.
362 static inline void continue_data_vio_with_error(struct data_vio *data_vio, int result)
364 vdo_continue_completion(&data_vio->vio.completion, result);
367 const char * __must_check get_data_vio_operation_name(struct data_vio *data_vio);
369 static inline void assert_data_vio_in_hash_zone(struct data_vio *data_vio)
371 thread_id_t expected = data_vio->hash_zone->thread_id;
378 "data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
379 (unsigned long long) data_vio->logical.lbn, thread_id, expected);
382 static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio,
385 vdo_set_completion_callback(&data_vio->vio.completion, callback,
386 data_vio->hash_zone->thread_id);
393 static inline void launch_data_vio_hash_zone_callback(struct data_vio *data_vio,
396 set_data_vio_hash_zone_callback(data_vio, callback);
397 vdo_launch_completion(&data_vio->vio.completion);
400 static inline void assert_data_vio_in_logical_zone(struct data_vio *data_vio)
402 thread_id_t expected = data_vio->logical.zone->thread_id;
406 "data_vio for logical block %llu on thread %u, should be on thread %u",
407 (unsigned long long) data_vio->logical.lbn, thread_id, expected);
410 static inline void set_data_vio_logical_callback(struct data_vio *data_vio,
413 vdo_set_completion_callback(&data_vio->vio.completion, callback,
414 data_vio->logical.zone->thread_id);
421 static inline void launch_data_vio_logical_callback(struct data_vio *data_vio,
424 set_data_vio_logical_callback(data_vio, callback);
425 vdo_launch_completion(&data_vio->vio.completion);
428 static inline void assert_data_vio_in_allocated_zone(struct data_vio *data_vio)
430 thread_id_t expected = data_vio->allocation.zone->thread_id;
434 "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
435 (unsigned long long) data_vio->allocation.pbn, thread_id,
439 static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio,
442 vdo_set_completion_callback(&data_vio->vio.completion, callback,
443 data_vio->allocation.zone->thread_id);
448 * data_vio's allocated zone and queue the data_vio and
451 static inline void launch_data_vio_allocated_zone_callback(struct data_vio *data_vio,
454 set_data_vio_allocated_zone_callback(data_vio, callback);
455 vdo_launch_completion(&data_vio->vio.completion);
458 static inline void assert_data_vio_in_duplicate_zone(struct data_vio *data_vio)
460 thread_id_t expected = data_vio->duplicate.zone->thread_id;
464 "data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
465 (unsigned long long) data_vio->duplicate.pbn, thread_id,
469 static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
472 vdo_set_completion_callback(&data_vio->vio.completion, callback,
473 data_vio->duplicate.zone->thread_id);
478 * data_vio's duplicate zone and queue the data_vio and
481 static inline void launch_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
484 set_data_vio_duplicate_zone_callback(data_vio, callback);
485 vdo_launch_completion(&data_vio->vio.completion);
488 static inline void assert_data_vio_in_mapped_zone(struct data_vio *data_vio)
490 thread_id_t expected = data_vio->mapped.zone->thread_id;
494 "data_vio for mapped physical block %llu on thread %u, should be on thread %u",
495 (unsigned long long) data_vio->mapped.pbn, thread_id, expected);
498 static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio,
501 vdo_set_completion_callback(&data_vio->vio.completion, callback,
502 data_vio->mapped.zone->thread_id);
505 static inline void assert_data_vio_in_new_mapped_zone(struct data_vio *data_vio)
507 thread_id_t expected = data_vio->new_mapped.zone->thread_id;
511 "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
512 (unsigned long long) data_vio->new_mapped.pbn, thread_id,
516 static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio,
519 vdo_set_completion_callback(&data_vio->vio.completion, callback,
520 data_vio->new_mapped.zone->thread_id);
523 static inline void assert_data_vio_in_journal_zone(struct data_vio *data_vio)
525 thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
529 "data_vio for logical block %llu on thread %u, should be on journal thread %u",
530 (unsigned long long) data_vio->logical.lbn, thread_id,
534 static inline void set_data_vio_journal_callback(struct data_vio *data_vio,
537 thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
539 vdo_set_completion_callback(&data_vio->vio.completion, callback, journal_thread);
546 static inline void launch_data_vio_journal_callback(struct data_vio *data_vio,
549 set_data_vio_journal_callback(data_vio, callback);
550 vdo_launch_completion(&data_vio->vio.completion);
553 static inline void assert_data_vio_in_packer_zone(struct data_vio *data_vio)
555 thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
559 "data_vio for logical block %llu on thread %u, should be on packer thread %u",
560 (unsigned long long) data_vio->logical.lbn, thread_id,
564 static inline void set_data_vio_packer_callback(struct data_vio *data_vio,
567 thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
569 vdo_set_completion_callback(&data_vio->vio.completion, callback, packer_thread);
576 static inline void launch_data_vio_packer_callback(struct data_vio *data_vio,
579 set_data_vio_packer_callback(data_vio, callback);
580 vdo_launch_completion(&data_vio->vio.completion);
583 static inline void assert_data_vio_on_cpu_thread(struct data_vio *data_vio)
585 thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
589 "data_vio for logical block %llu on thread %u, should be on cpu thread %u",
590 (unsigned long long) data_vio->logical.lbn, thread_id,
594 static inline void set_data_vio_cpu_callback(struct data_vio *data_vio,
597 thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
599 vdo_set_completion_callback(&data_vio->vio.completion, callback, cpu_thread);
606 static inline void launch_data_vio_cpu_callback(struct data_vio *data_vio,
610 set_data_vio_cpu_callback(data_vio, callback);
611 vdo_launch_completion_with_priority(&data_vio->vio.completion, priority);
614 static inline void set_data_vio_bio_zone_callback(struct data_vio *data_vio,
617 vdo_set_completion_callback(&data_vio->vio.completion, callback,
618 get_vio_bio_zone_thread_id(&data_vio->vio));
625 static inline void launch_data_vio_bio_zone_callback(struct data_vio *data_vio,
628 set_data_vio_bio_zone_callback(data_vio, callback);
629 vdo_launch_completion_with_priority(&data_vio->vio.completion,
638 static inline void launch_data_vio_on_bio_ack_queue(struct data_vio *data_vio,
641 struct vdo_completion *completion = &data_vio->vio.completion;
654 void data_vio_allocate_data_block(struct data_vio *data_vio,
658 void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset);
660 int __must_check uncompress_data_vio(struct data_vio *data_vio,
664 void update_metadata_for_data_vio_write(struct data_vio *data_vio,
666 void write_data_vio(struct data_vio *data_vio);
667 void launch_compress_data_vio(struct data_vio *data_vio);