Lines Matching refs:completion

58  *   completion is required for further work to be done by the issuer.
101 * completion will be enqueued on a cpu queue. This obviates the need for the releasing threads to
105 * Whenever the pool's completion is run on a cpu thread, it calls process_release_callback() which
160 struct vdo_completion completion;
215 as_data_vio_pool(struct vdo_completion *completion)
217 vdo_assert_completion_type(completion, VDO_DATA_VIO_POOL_COMPLETION);
218 return container_of(completion, struct data_vio_pool, completion);
275 int error = vdo_status_to_errno(data_vio->vio.completion.result);
414 * @completion: The data_vio for an external data request as a completion.
418 static void attempt_logical_block_lock(struct vdo_completion *completion)
420 struct data_vio *data_vio = as_data_vio(completion);
463 complete_data_vio(completion);
487 struct vdo_completion *completion = &data_vio->vio.completion;
504 vdo_reset_completion(completion);
505 completion->error_handler = handle_data_vio_error;
507 vdo_enqueue_completion(completion, VDO_DEFAULT_Q_MAP_BIO_PRIORITY);
588 launch_bio(limiter->pool->completion.vdo, data_vio, bio);
665 pool->completion.requeue = true;
666 vdo_launch_completion_with_priority(&pool->completion,
695 * @completion: The pool with data_vios to release.
697 static void process_release_callback(struct vdo_completion *completion)
699 struct data_vio_pool *pool = as_data_vio_pool(completion);
863 vdo_initialize_completion(&pool->completion, vdo, VDO_DATA_VIO_POOL_COMPLETION);
864 vdo_prepare_completion(&pool->completion, process_release_callback,
982 launch_bio(pool->completion.vdo, data_vio, bio);
1007 * @completion: The completion to notify when the pool has drained.
1009 void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
1011 assert_on_vdo_cpu_thread(completion->vdo, __func__);
1012 vdo_start_draining(&pool->state, VDO_ADMIN_STATE_SUSPENDING, completion,
1018 * @completion: The completion to notify when the pool has resumed.
1020 void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
1022 assert_on_vdo_cpu_thread(completion->vdo, __func__);
1023 vdo_continue_completion(completion, vdo_resume_if_quiescent(&pool->state));
1157 static void release_allocated_lock(struct vdo_completion *completion)
1159 struct data_vio *data_vio = as_data_vio(completion);
1229 next_lock_holder->vio.completion.requeue = true;
1237 static void release_logical_lock(struct vdo_completion *completion)
1239 struct data_vio *data_vio = as_data_vio(completion);
1254 static void clean_hash_lock(struct vdo_completion *completion)
1256 struct data_vio *data_vio = as_data_vio(completion);
1259 if (completion->result != VDO_SUCCESS) {
1276 struct vdo_completion *completion = &data_vio->vio.completion;
1283 (completion->result != VDO_SUCCESS)) {
1284 struct data_vio_pool *pool = completion->vdo->data_vio_pool;
1286 vdo_funnel_queue_put(pool->queue, &completion->work_queue_entry_link);
1296 completion->requeue = true;
1325 (data_vio->vio.completion.result != VDO_READ_ONLY))
1338 void complete_data_vio(struct vdo_completion *completion)
1340 struct data_vio *data_vio = as_data_vio(completion);
1342 completion->error_handler = NULL;
1348 static void enter_read_only_mode(struct vdo_completion *completion)
1350 if (vdo_is_read_only(completion->vdo))
1353 if (completion->result != VDO_READ_ONLY) {
1354 struct data_vio *data_vio = as_data_vio(completion);
1356 vdo_log_error_strerror(completion->result,
1365 vdo_enter_read_only_mode(completion->vdo, completion->result);
1368 void handle_data_vio_error(struct vdo_completion *completion)
1370 struct data_vio *data_vio = as_data_vio(completion);
1372 if ((completion->result == VDO_READ_ONLY) || (data_vio->user_bio == NULL))
1373 enter_read_only_mode(completion);
1376 complete_data_vio(completion);
1413 data_vio->vio.completion.error_handler = error_handler;
1468 * @completion: The data_vio which has just finished its read.
1472 static void modify_for_partial_write(struct vdo_completion *completion)
1474 struct data_vio *data_vio = as_data_vio(completion);
1494 static void complete_read(struct vdo_completion *completion)
1496 struct data_vio *data_vio = as_data_vio(completion);
1512 modify_for_partial_write(completion);
1520 complete_data_vio(completion);
1538 static void complete_zero_read(struct vdo_completion *completion)
1540 struct data_vio *data_vio = as_data_vio(completion);
1547 modify_for_partial_write(completion);
1554 complete_read(completion);
1562 static void read_block(struct vdo_completion *completion)
1564 struct data_vio *data_vio = as_data_vio(completion);
1565 struct vio *vio = as_vio(completion);
1605 reference_count_update_completion_as_data_vio(struct vdo_completion *completion)
1607 if (completion->type == VIO_COMPLETION)
1608 return as_data_vio(completion);
1610 return container_of(completion, struct data_vio, decrement_completion);
1617 * @completion: The completion of the write in progress.
1619 static void update_block_map(struct vdo_completion *completion)
1621 struct data_vio *data_vio = reference_count_update_completion_as_data_vio(completion);
1631 completion = &data_vio->vio.completion;
1632 vdo_set_completion_result(completion, data_vio->decrement_completion.result);
1633 if (completion->result != VDO_SUCCESS) {
1634 handle_data_vio_error(completion);
1638 completion->error_handler = handle_data_vio_error;
1642 completion->callback = complete_data_vio;
1648 static void decrement_reference_count(struct vdo_completion *completion)
1650 struct data_vio *data_vio = container_of(completion, struct data_vio,
1655 vdo_set_completion_callback(completion, update_block_map,
1657 completion->error_handler = update_block_map;
1658 vdo_modify_reference_count(completion, &data_vio->decrement_updater);
1661 static void increment_reference_count(struct vdo_completion *completion)
1663 struct data_vio *data_vio = as_data_vio(completion);
1679 completion->error_handler = update_block_map;
1680 vdo_modify_reference_count(completion, &data_vio->increment_updater);
1684 static void journal_remapping(struct vdo_completion *completion)
1686 struct data_vio *data_vio = as_data_vio(completion);
1710 vdo_add_recovery_journal_entry(completion->vdo->recovery_journal, data_vio);
1719 static void read_old_block_mapping(struct vdo_completion *completion)
1721 struct data_vio *data_vio = as_data_vio(completion);
1747 static void pack_compressed_data(struct vdo_completion *completion)
1749 struct data_vio *data_vio = as_data_vio(completion);
1768 static void compress_data_vio(struct vdo_completion *completion)
1770 struct data_vio *data_vio = as_data_vio(completion);
1839 static void hash_data_vio(struct vdo_completion *completion)
1841 struct data_vio *data_vio = as_data_vio(completion);
1878 vdo_set_completion_result(&data_vio->vio.completion,
1927 static void acknowledge_write_callback(struct vdo_completion *completion)
1929 struct data_vio *data_vio = as_data_vio(completion);
1930 struct vdo *vdo = completion->vdo;
1952 static void allocate_block(struct vdo_completion *completion)
1954 struct data_vio *data_vio = as_data_vio(completion);
1961 completion->error_handler = handle_data_vio_error;
1983 static void handle_allocation_error(struct vdo_completion *completion)
1985 struct data_vio *data_vio = as_data_vio(completion);
1987 if (completion->result == VDO_NO_SPACE) {
1989 vdo_reset_completion(completion);
1990 completion->error_handler = handle_data_vio_error;
1996 handle_data_vio_error(completion);
2012 void continue_data_vio_with_block_map_slot(struct vdo_completion *completion)
2014 struct data_vio *data_vio = as_data_vio(completion);
2031 completion->callback = complete_data_vio;