Lines Matching defs:completion

58  *   completion is required for further work to be done by the issuer.
101 * completion will be enqueued on a cpu queue. This obviates the need for the releasing threads to
105 * Whenever the pool's completion is run on a cpu thread, it calls process_release_callback() which
160 struct vdo_completion completion;
215 as_data_vio_pool(struct vdo_completion *completion)
217 vdo_assert_completion_type(completion, VDO_DATA_VIO_POOL_COMPLETION);
218 return container_of(completion, struct data_vio_pool, completion);
275 int error = vdo_status_to_errno(data_vio->vio.completion.result);
414 * @completion: The data_vio for an external data request as a completion.
418 static void attempt_logical_block_lock(struct vdo_completion *completion)
420 struct data_vio *data_vio = as_data_vio(completion);
463 complete_data_vio(completion);
487 struct vdo_completion *completion = &data_vio->vio.completion;
504 vdo_reset_completion(completion);
505 completion->error_handler = handle_data_vio_error;
507 vdo_enqueue_completion(completion, VDO_DEFAULT_Q_MAP_BIO_PRIORITY);
588 launch_bio(limiter->pool->completion.vdo, data_vio, bio);
664 pool->completion.requeue = true;
665 vdo_launch_completion_with_priority(&pool->completion,
694 * @completion: The pool with data_vios to release.
696 static void process_release_callback(struct vdo_completion *completion)
698 struct data_vio_pool *pool = as_data_vio_pool(completion);
862 vdo_initialize_completion(&pool->completion, vdo, VDO_DATA_VIO_POOL_COMPLETION);
863 vdo_prepare_completion(&pool->completion, process_release_callback,
981 launch_bio(pool->completion.vdo, data_vio, bio);
1006 * @completion: The completion to notify when the pool has drained.
1008 void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
1010 assert_on_vdo_cpu_thread(completion->vdo, __func__);
1011 vdo_start_draining(&pool->state, VDO_ADMIN_STATE_SUSPENDING, completion,
1017 * @completion: The completion to notify when the pool has resumed.
1019 void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
1021 assert_on_vdo_cpu_thread(completion->vdo, __func__);
1022 vdo_continue_completion(completion, vdo_resume_if_quiescent(&pool->state));
1156 static void release_allocated_lock(struct vdo_completion *completion)
1158 struct data_vio *data_vio = as_data_vio(completion);
1228 next_lock_holder->vio.completion.requeue = true;
1236 static void release_logical_lock(struct vdo_completion *completion)
1238 struct data_vio *data_vio = as_data_vio(completion);
1253 static void clean_hash_lock(struct vdo_completion *completion)
1255 struct data_vio *data_vio = as_data_vio(completion);
1258 if (completion->result != VDO_SUCCESS) {
1275 struct vdo_completion *completion = &data_vio->vio.completion;
1282 (completion->result != VDO_SUCCESS)) {
1283 struct data_vio_pool *pool = completion->vdo->data_vio_pool;
1285 vdo_funnel_queue_put(pool->queue, &completion->work_queue_entry_link);
1295 completion->requeue = true;
1324 (data_vio->vio.completion.result != VDO_READ_ONLY))
1337 void complete_data_vio(struct vdo_completion *completion)
1339 struct data_vio *data_vio = as_data_vio(completion);
1341 completion->error_handler = NULL;
1347 static void enter_read_only_mode(struct vdo_completion *completion)
1349 if (vdo_is_read_only(completion->vdo))
1352 if (completion->result != VDO_READ_ONLY) {
1353 struct data_vio *data_vio = as_data_vio(completion);
1355 vdo_log_error_strerror(completion->result,
1364 vdo_enter_read_only_mode(completion->vdo, completion->result);
1367 void handle_data_vio_error(struct vdo_completion *completion)
1369 struct data_vio *data_vio = as_data_vio(completion);
1371 if ((completion->result == VDO_READ_ONLY) || (data_vio->user_bio == NULL))
1372 enter_read_only_mode(completion);
1375 complete_data_vio(completion);
1412 data_vio->vio.completion.error_handler = error_handler;
1467 * @completion: The data_vio which has just finished its read.
1471 static void modify_for_partial_write(struct vdo_completion *completion)
1473 struct data_vio *data_vio = as_data_vio(completion);
1493 static void complete_read(struct vdo_completion *completion)
1495 struct data_vio *data_vio = as_data_vio(completion);
1511 modify_for_partial_write(completion);
1519 complete_data_vio(completion);
1537 static void complete_zero_read(struct vdo_completion *completion)
1539 struct data_vio *data_vio = as_data_vio(completion);
1546 modify_for_partial_write(completion);
1553 complete_read(completion);
1561 static void read_block(struct vdo_completion *completion)
1563 struct data_vio *data_vio = as_data_vio(completion);
1564 struct vio *vio = as_vio(completion);
1604 reference_count_update_completion_as_data_vio(struct vdo_completion *completion)
1606 if (completion->type == VIO_COMPLETION)
1607 return as_data_vio(completion);
1609 return container_of(completion, struct data_vio, decrement_completion);
1616 * @completion: The completion of the write in progress.
1618 static void update_block_map(struct vdo_completion *completion)
1620 struct data_vio *data_vio = reference_count_update_completion_as_data_vio(completion);
1630 completion = &data_vio->vio.completion;
1631 vdo_set_completion_result(completion, data_vio->decrement_completion.result);
1632 if (completion->result != VDO_SUCCESS) {
1633 handle_data_vio_error(completion);
1637 completion->error_handler = handle_data_vio_error;
1641 completion->callback = complete_data_vio;
1647 static void decrement_reference_count(struct vdo_completion *completion)
1649 struct data_vio *data_vio = container_of(completion, struct data_vio,
1654 vdo_set_completion_callback(completion, update_block_map,
1656 completion->error_handler = update_block_map;
1657 vdo_modify_reference_count(completion, &data_vio->decrement_updater);
1660 static void increment_reference_count(struct vdo_completion *completion)
1662 struct data_vio *data_vio = as_data_vio(completion);
1678 completion->error_handler = update_block_map;
1679 vdo_modify_reference_count(completion, &data_vio->increment_updater);
1683 static void journal_remapping(struct vdo_completion *completion)
1685 struct data_vio *data_vio = as_data_vio(completion);
1709 vdo_add_recovery_journal_entry(completion->vdo->recovery_journal, data_vio);
1718 static void read_old_block_mapping(struct vdo_completion *completion)
1720 struct data_vio *data_vio = as_data_vio(completion);
1746 static void pack_compressed_data(struct vdo_completion *completion)
1748 struct data_vio *data_vio = as_data_vio(completion);
1767 static void compress_data_vio(struct vdo_completion *completion)
1769 struct data_vio *data_vio = as_data_vio(completion);
1838 static void hash_data_vio(struct vdo_completion *completion)
1840 struct data_vio *data_vio = as_data_vio(completion);
1877 vdo_set_completion_result(&data_vio->vio.completion,
1926 static void acknowledge_write_callback(struct vdo_completion *completion)
1928 struct data_vio *data_vio = as_data_vio(completion);
1929 struct vdo *vdo = completion->vdo;
1951 static void allocate_block(struct vdo_completion *completion)
1953 struct data_vio *data_vio = as_data_vio(completion);
1960 completion->error_handler = handle_data_vio_error;
1982 static void handle_allocation_error(struct vdo_completion *completion)
1984 struct data_vio *data_vio = as_data_vio(completion);
1986 if (completion->result == VDO_NO_SPACE) {
1988 vdo_reset_completion(completion);
1989 completion->error_handler = handle_data_vio_error;
1995 handle_data_vio_error(completion);
2011 void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
2013 struct data_vio *data_vio = as_data_vio(completion);
2030 completion->callback = complete_data_vio;