Lines Matching refs:completion

16 #include "completion.h"
55 /* The completion header */
56 struct vdo_completion completion;
190 * as_repair_completion() - Convert a generic completion to a repair_completion.
191 * @completion: The completion to convert.
196 as_repair_completion(struct vdo_completion *completion)
198 vdo_assert_completion_type(completion, VDO_REPAIR_COMPLETION);
199 return container_of(completion, struct repair_completion, completion);
205 struct vdo_completion *completion = &repair->completion;
206 const struct thread_config *thread_config = &completion->vdo->thread_config;
213 vdo_reset_completion(completion);
214 vdo_set_completion_callback(completion, callback, thread_id);
221 vdo_launch_completion(&repair->completion);
241 repair->completion.vdo->block_map->zones[0].page_cache.rebuilding = false;
249 static void finish_repair(struct vdo_completion *completion)
251 struct vdo_completion *parent = completion->parent;
252 struct vdo *vdo = completion->vdo;
253 struct repair_completion *repair = as_repair_completion(completion);
277 * Now that we've freed the repair completion and its vast array of journal entries, we
285 * @completion: The repair completion.
287 static void abort_repair(struct vdo_completion *completion)
289 struct vdo_completion *parent = completion->parent;
290 int result = completion->result;
291 struct repair_completion *repair = as_repair_completion(completion);
293 if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state))
305 * @repair: The repair completion.
314 vdo_fail_completion(&repair->completion, result);
322 static void drain_slab_depot(struct vdo_completion *completion)
324 struct vdo *vdo = completion->vdo;
325 struct repair_completion *repair = as_repair_completion(completion);
340 vdo_drain_slab_depot(vdo->depot, operation, completion);
345 * @completion: The repair completion.
349 static void flush_block_map_updates(struct vdo_completion *completion)
351 vdo_assert_on_admin_thread(completion->vdo, __func__);
354 prepare_repair_completion(as_repair_completion(completion), drain_slab_depot,
356 vdo_drain_block_map(completion->vdo->block_map, VDO_ADMIN_STATE_RECOVERING,
357 completion);
361 struct vdo_completion *completion);
365 * @completion: The vdo_page_completion.
367 static void handle_page_load_error(struct vdo_completion *completion)
369 struct repair_completion *repair = completion->parent;
372 vdo_set_completion_result(&repair->completion, completion->result);
373 vdo_release_page_completion(completion);
374 fetch_page(repair, completion);
380 * @completion: The page_completion for writing the page
383 static void unmap_entry(struct block_map_page *page, struct vdo_completion *completion,
387 vdo_request_page_write(completion);
393 * @completion: The page_completion for writing the page
397 struct vdo_completion *completion,
406 unmap_entry(page, completion, slot);
413 * @completion: The page_completion for writing the page
418 static bool process_slot(struct block_map_page *page, struct vdo_completion *completion,
421 struct slab_depot *depot = completion->vdo->depot;
427 unmap_entry(page, completion, slot);
443 unmap_entry(page, completion, slot);
456 unmap_entry(page, completion, slot);
462 * @repair: The repair completion.
463 * @completion: The page completion holding the page.
466 struct vdo_completion *completion)
472 result = vdo_get_cached_page(completion, &page);
474 vdo_set_completion_result(&repair->completion, result);
484 remove_out_of_bounds_entries(page, completion, last_slot);
491 if (process_slot(page, completion, slot))
498 * @completion: The vdo_page_completion for the fetched page.
502 static void page_loaded(struct vdo_completion *completion)
504 struct repair_completion *repair = completion->parent;
507 rebuild_reference_counts_from_page(repair, completion);
508 vdo_release_page_completion(completion);
511 fetch_page(repair, completion);
519 if (repair->completion.result != VDO_SUCCESS)
525 if (vdo_is_physical_data_block(repair->completion.vdo->depot, pbn))
528 vdo_set_completion_result(&repair->completion, VDO_BAD_MAPPING);
535 * @completion: The page completion to use.
540 struct vdo_completion *completion)
542 struct vdo_page_completion *page_completion = (struct vdo_page_completion *) completion;
543 struct block_map *block_map = repair->completion.vdo->block_map;
565 * @completion: The repair completion.
571 static void rebuild_from_leaves(struct vdo_completion *completion)
574 struct repair_completion *repair = as_repair_completion(completion);
575 struct block_map *map = completion->vdo->block_map;
592 if (fetch_page(repair, &repair->page_completions[i].completion)) {
605 * @completion: The parent completion of the traversal.
611 static int process_entry(physical_block_number_t pbn, struct vdo_completion *completion)
613 struct repair_completion *repair = as_repair_completion(completion);
614 struct slab_depot *depot = completion->vdo->depot;
635 static void rebuild_reference_counts(struct vdo_completion *completion)
637 struct repair_completion *repair = as_repair_completion(completion);
638 struct vdo *vdo = completion->vdo;
653 vdo_traverse_forest(vdo->block_map, process_entry, completion);
729 * @repair: The repair completion.
739 sector = get_sector(repair->completion.vdo->recovery_journal,
790 * @completion: The allocator completion.
794 static void add_slab_journal_entries(struct vdo_completion *completion)
797 struct repair_completion *repair = completion->parent;
798 struct vdo *vdo = completion->vdo;
800 struct block_allocator *allocator = vdo_as_block_allocator(completion);
803 vdo_prepare_completion(completion, add_slab_journal_entries,
805 completion->callback_thread_id, repair);
819 vdo_fail_completion(completion, result);
837 completion))
843 vdo_notify_slab_journals_are_recovered(completion);
854 struct vdo_completion *completion = &allocator->completion;
856 struct vdo *vdo = completion->vdo;
863 vdo_notify_slab_journals_are_recovered(completion);
880 completion->parent = repair;
881 add_slab_journal_entries(completion);
884 static void load_slab_depot(struct vdo_completion *completion)
886 struct repair_completion *repair = as_repair_completion(completion);
889 vdo_assert_on_admin_thread(completion->vdo, __func__);
891 if (vdo_state_requires_read_only_rebuild(completion->vdo->load_state)) {
900 vdo_load_slab_depot(completion->vdo->depot, operation, completion, repair);
903 static void flush_block_map(struct vdo_completion *completion)
905 struct repair_completion *repair = as_repair_completion(completion);
908 vdo_assert_on_admin_thread(completion->vdo, __func__);
912 operation = (vdo_state_requires_read_only_rebuild(completion->vdo->load_state) ?
915 vdo_drain_block_map(completion->vdo->block_map, operation, completion);
924 if (repair->completion.result != VDO_SUCCESS) {
932 vdo_release_page_completion(&page_completion->completion);
935 vdo_launch_completion(&repair->completion);
948 vdo_set_completion_result(&repair->completion, result);
1006 struct vdo_completion *completion);
1008 static void block_map_page_loaded(struct vdo_completion *completion)
1010 struct repair_completion *repair = as_repair_completion(completion->parent);
1014 recover_ready_pages(repair, completion);
1017 static void handle_block_map_page_load_error(struct vdo_completion *completion)
1019 struct repair_completion *repair = as_repair_completion(completion->parent);
1022 abort_block_map_recovery(repair, completion->result);
1026 struct vdo_completion *completion)
1040 vdo_get_page(((struct vdo_page_completion *) completion),
1041 &repair->completion.vdo->block_map->zones[0], pbn, true,
1042 &repair->completion, block_map_page_loaded,
1047 struct vdo_page_completion *completion)
1049 completion++;
1050 if (completion == (&repair->page_completions[repair->page_count]))
1051 completion = &repair->page_completions[0];
1052 return completion;
1056 struct vdo_completion *completion)
1058 struct vdo_page_completion *page_completion = (struct vdo_page_completion *) completion;
1071 result = vdo_get_cached_page(completion, &page);
1083 vdo_request_page_write(completion);
1084 vdo_release_page_completion(completion);
1090 fetch_block_map_page(repair, completion);
1092 completion = &page_completion->completion;
1096 static void recover_block_map(struct vdo_completion *completion)
1098 struct repair_completion *repair = as_repair_completion(completion);
1099 struct vdo *vdo = completion->vdo;
1143 fetch_block_map_page(repair, &repair->page_completions[i].completion);
1148 recover_ready_pages(repair, &repair->page_completions[0].completion);
1226 struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
1316 * sector to the array of numbered mappings in the repair completion,
1318 * @repair: The repair completion.
1328 struct vdo *vdo = repair->completion.vdo;
1407 struct vdo *vdo = repair->completion.vdo;
1456 struct vdo *vdo = repair->completion.vdo;
1516 struct vdo *vdo = repair->completion.vdo;
1556 struct recovery_journal *journal = repair->completion.vdo->recovery_journal;
1576 vdo_enter_read_only_mode(repair->completion.vdo,
1647 return (vdo_state_requires_read_only_rebuild(repair->completion.vdo->load_state) ?
1652 static void finish_journal_load(struct vdo_completion *completion)
1654 struct repair_completion *repair = completion->parent;
1662 vdo_continue_completion(&repair->completion, parse_journal(repair));
1665 static void handle_journal_load_error(struct vdo_completion *completion)
1667 struct repair_completion *repair = completion->parent;
1670 vdo_set_completion_result(&repair->completion, completion->result);
1671 vio_record_metadata_io_error(as_vio(completion));
1672 completion->callback(completion);
1678 struct vdo *vdo = vio->completion.vdo;
1685 * @parent: The completion to notify when the operation is complete
1720 vdo_initialize_completion(&repair->completion, vdo, VDO_REPAIR_COMPLETION);
1721 repair->completion.error_handler = abort_repair;
1722 repair->completion.parent = parent;