Lines Matching refs:volume

6 #include "volume.h"
27 * The first block of the volume layout is reserved for the volume header, which is no longer used.
28 * The remainder of the volume is divided into chapters consisting of several pages of records, and
31 * volume storage acts as a circular log of the most recent chapters, with each new chapter
38 * when a record is read, the volume only has to load a single index page and a single record page,
43 * When reading a record, the volume index will indicate which chapter should contain it. The
44 * volume uses the index page map to determine which chapter index page needs to be loaded, and
48 * addition, the volume uses dm-bufio to manage access to the storage, which may allow for
58 * When an index rebuild is necessary, the volume reads each stored chapter to determine which
60 * in-memory volume index.
355 static void enqueue_page_read(struct volume *volume, struct uds_request *request,
359 while (!enqueue_read(&volume->page_cache, request, physical_page)) {
361 uds_wait_cond(&volume->read_threads_read_done_cond,
362 &volume->read_threads_mutex);
365 uds_signal_cond(&volume->read_threads_cond);
402 static inline struct queued_read *wait_to_reserve_read_queue_entry(struct volume *volume)
406 while (!volume->read_threads_exiting) {
407 queue_entry = reserve_read_queue_entry(&volume->page_cache);
411 uds_wait_cond(&volume->read_threads_cond, &volume->read_threads_mutex);
417 static int init_chapter_index_page(const struct volume *volume, u8 *index_page,
425 struct index_geometry *geometry = volume->geometry;
429 index_page, volume->nonce);
430 if (volume->lookup_mode == LOOKUP_FOR_REBUILD)
439 uds_get_list_number_bounds(volume->index_page_map, chapter, index_page_number,
449 (unsigned long long) volume->index_page_map->last_update);
459 static int initialize_index_page(const struct volume *volume, u32 physical_page,
462 u32 chapter = map_to_chapter_number(volume->geometry, physical_page);
463 u32 index_page_number = map_to_page_number(volume->geometry, physical_page);
465 return init_chapter_index_page(volume, dm_bufio_get_block_data(page->buffer),
506 static int search_page(struct cached_page *page, const struct volume *volume,
513 if (is_record_page(volume->geometry, physical_page)) {
515 &request->record_name, volume->geometry,
522 volume->geometry,
541 static int process_entry(struct volume *volume, struct queued_read *entry)
554 page = select_victim_in_cache(&volume->page_cache);
556 mutex_unlock(&volume->read_threads_mutex);
557 page_data = dm_bufio_read(volume->client, page_number, &page->buffer);
558 mutex_lock(&volume->read_threads_mutex);
562 "error reading physical page %u from volume",
564 cancel_page_in_cache(&volume->page_cache, page_number, page);
570 cancel_page_in_cache(&volume->page_cache, page_number, page);
574 if (!is_record_page(volume->geometry, page_number)) {
575 result = initialize_index_page(volume, page_number, page);
578 cancel_page_in_cache(&volume->page_cache, page_number, page);
583 result = put_page_in_cache(&volume->page_cache, page_number, page);
586 cancel_page_in_cache(&volume->page_cache, page_number, page);
592 result = search_page(page, volume, request, page_number);
599 static void release_queued_requests(struct volume *volume, struct queued_read *entry,
602 struct page_cache *cache = &volume->page_cache;
620 uds_broadcast_cond(&volume->read_threads_read_done_cond);
625 struct volume *volume = arg;
628 mutex_lock(&volume->read_threads_mutex);
633 queue_entry = wait_to_reserve_read_queue_entry(volume);
634 if (volume->read_threads_exiting)
637 result = process_entry(volume, queue_entry);
638 release_queued_requests(volume, queue_entry, result);
640 mutex_unlock(&volume->read_threads_mutex);
693 static int read_page_locked(struct volume *volume, u32 physical_page,
700 page = select_victim_in_cache(&volume->page_cache);
701 page_data = dm_bufio_read(volume->client, physical_page, &page->buffer);
705 "error reading physical page %u from volume",
707 cancel_page_in_cache(&volume->page_cache, physical_page, page);
711 if (!is_record_page(volume->geometry, physical_page)) {
712 result = initialize_index_page(volume, physical_page, page);
714 if (volume->lookup_mode != LOOKUP_FOR_REBUILD)
716 cancel_page_in_cache(&volume->page_cache, physical_page, page);
721 result = put_page_in_cache(&volume->page_cache, physical_page, page);
724 cancel_page_in_cache(&volume->page_cache, physical_page, page);
733 static int get_volume_page_locked(struct volume *volume, u32 physical_page,
739 get_page_from_cache(&volume->page_cache, physical_page, &page);
741 result = read_page_locked(volume, physical_page, &page);
745 make_page_most_recent(&volume->page_cache, page);
753 static int get_volume_page_protected(struct volume *volume, struct uds_request *request,
758 get_page_from_cache(&volume->page_cache, physical_page, &page);
762 make_page_most_recent(&volume->page_cache, page);
770 end_pending_search(&volume->page_cache, request->zone_number);
771 mutex_lock(&volume->read_threads_mutex);
781 get_page_from_cache(&volume->page_cache, physical_page, &page);
783 enqueue_page_read(volume, request, physical_page);
789 mutex_unlock(&volume->read_threads_mutex);
790 begin_pending_search(&volume->page_cache, physical_page,
796 * Now that the page is loaded, the volume needs to switch to "reader thread unlocked" and
800 begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
801 mutex_unlock(&volume->read_threads_mutex);
806 static int get_volume_page(struct volume *volume, u32 chapter, u32 page_number,
810 u32 physical_page = map_to_physical_page(volume->geometry, chapter, page_number);
812 mutex_lock(&volume->read_threads_mutex);
813 result = get_volume_page_locked(volume, physical_page, page_ptr);
814 mutex_unlock(&volume->read_threads_mutex);
818 int uds_get_volume_record_page(struct volume *volume, u32 chapter, u32 page_number,
824 result = get_volume_page(volume, chapter, page_number, &page);
830 int uds_get_volume_index_page(struct volume *volume, u32 chapter, u32 page_number,
836 result = get_volume_page(volume, chapter, page_number, &page);
846 static int search_cached_index_page(struct volume *volume, struct uds_request *request,
852 u32 physical_page = map_to_physical_page(volume->geometry, chapter,
861 begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
863 result = get_volume_page_protected(volume, request, physical_page, &page);
865 end_pending_search(&volume->page_cache, request->zone_number);
869 result = uds_search_chapter_index_page(&page->index_page, volume->geometry,
872 end_pending_search(&volume->page_cache, request->zone_number);
880 int uds_search_cached_record_page(struct volume *volume, struct uds_request *request,
884 struct index_geometry *geometry = volume->geometry;
900 physical_page = map_to_physical_page(volume->geometry, chapter, page_number);
908 begin_pending_search(&volume->page_cache, physical_page, request->zone_number);
910 result = get_volume_page_protected(volume, request, physical_page, &record_page);
912 end_pending_search(&volume->page_cache, request->zone_number);
920 end_pending_search(&volume->page_cache, request->zone_number);
924 void uds_prefetch_volume_chapter(const struct volume *volume, u32 chapter)
926 const struct index_geometry *geometry = volume->geometry;
929 dm_bufio_prefetch(volume->client, physical_page, geometry->pages_per_chapter);
932 int uds_read_chapter_index_from_volume(const struct volume *volume, u64 virtual_chapter,
938 const struct index_geometry *geometry = volume->geometry;
942 dm_bufio_prefetch(volume->client, physical_page, geometry->index_pages_per_chapter);
946 index_page = dm_bufio_read(volume->client, physical_page + i,
956 result = init_chapter_index_page(volume, index_page, physical_chapter, i,
965 int uds_search_volume_page_cache(struct volume *volume, struct uds_request *request,
970 uds_map_to_physical_chapter(volume->geometry, request->virtual_chapter);
974 index_page_number = uds_find_index_page_number(volume->index_page_map,
981 result = search_cached_index_page(volume, request, physical_chapter,
988 return uds_search_cached_record_page(volume, request, physical_chapter,
992 int uds_search_volume_page_cache_for_rebuild(struct volume *volume,
997 struct index_geometry *geometry = volume->geometry;
1006 uds_find_index_page_number(volume->index_page_map, name,
1008 result = get_volume_page(volume, physical_chapter, index_page_number, &page);
1021 result = get_volume_page(volume, physical_chapter, page_number, &page);
1047 void uds_forget_chapter(struct volume *volume, u64 virtual_chapter)
1050 uds_map_to_physical_chapter(volume->geometry, virtual_chapter);
1051 u32 first_page = map_to_physical_page(volume->geometry, physical_chapter, 0);
1055 mutex_lock(&volume->read_threads_mutex);
1056 for (i = 0; i < volume->geometry->pages_per_chapter; i++)
1057 invalidate_page(&volume->page_cache, first_page + i);
1058 mutex_unlock(&volume->read_threads_mutex);
1065 static int donate_index_page_locked(struct volume *volume, u32 physical_chapter,
1071 map_to_physical_page(volume->geometry, physical_chapter,
1074 page = select_victim_in_cache(&volume->page_cache);
1076 result = init_chapter_index_page(volume, dm_bufio_get_block_data(page_buffer),
1081 cancel_page_in_cache(&volume->page_cache, physical_page, page);
1085 result = put_page_in_cache(&volume->page_cache, physical_page, page);
1088 cancel_page_in_cache(&volume->page_cache, physical_page, page);
1095 static int write_index_pages(struct volume *volume, u32 physical_chapter_number,
1098 struct index_geometry *geometry = volume->geometry;
1113 page_data = dm_bufio_new(volume->client, physical_page, &page_buffer);
1138 uds_update_index_page_map(volume->index_page_map,
1143 mutex_lock(&volume->read_threads_mutex);
1144 result = donate_index_page_locked(volume, physical_chapter_number,
1146 mutex_unlock(&volume->read_threads_mutex);
1180 static int encode_record_page(const struct volume *volume,
1185 u32 records_per_page = volume->geometry->records_per_page;
1186 const struct uds_volume_record **record_pointers = volume->record_pointers;
1196 result = uds_radix_sort(volume->radix_sorter, (const u8 **) record_pointers,
1205 static int write_record_pages(struct volume *volume, u32 physical_chapter_number,
1209 struct index_geometry *geometry = volume->geometry;
1222 page_data = dm_bufio_new(volume->client, physical_page, &page_buffer);
1228 result = encode_record_page(volume, next_record, page_data);
1244 int uds_write_chapter(struct volume *volume, struct open_chapter_index *chapter_index,
1249 uds_map_to_physical_chapter(volume->geometry,
1252 result = write_index_pages(volume, physical_chapter_number, chapter_index);
1256 result = write_record_pages(volume, physical_chapter_number, records);
1260 result = -dm_bufio_write_dirty_buffers(volume->client);
1262 vdo_log_error_strerror(result, "cannot sync chapter to volume");
1267 static void probe_chapter(struct volume *volume, u32 chapter_number,
1270 const struct index_geometry *geometry = volume->geometry;
1276 dm_bufio_prefetch(volume->client,
1284 result = uds_get_volume_index_page(volume, chapter_number, i, &page);
1325 /* Find the last valid physical chapter in the volume. */
1326 static void find_real_end_of_volume(struct volume *volume, u32 limit, u32 *limit_ptr)
1335 probe_chapter(volume, chapter, &vcn);
1351 static int find_chapter_limits(struct volume *volume, u32 chapter_limit, u64 *lowest_vcn,
1354 struct index_geometry *geometry = volume->geometry;
1367 * volume is cleanly saved and somewhere in the middle of it the highest VCN immediately
1372 probe_chapter(volume, 0, &zero_vcn);
1387 probe_chapter(volume, geometry->remapped_physical, &remapped_vcn);
1402 probe_chapter(volume, chapter, &probe_vcn);
1417 probe_chapter(volume, left_chapter, &lowest);
1432 probe_chapter(volume, right_chapter, &highest);
1434 vdo_log_error("too many bad chapters in volume: %u",
1446 * Find the highest and lowest contiguous chapters present in the volume and determine their
1449 int uds_find_volume_chapter_boundaries(struct volume *volume, u64 *lowest_vcn,
1452 u32 chapter_limit = volume->geometry->chapters_per_volume;
1454 find_real_end_of_volume(volume, chapter_limit, &chapter_limit);
1463 return find_chapter_limits(volume, chapter_limit, lowest_vcn, highest_vcn);
1466 int __must_check uds_replace_volume_storage(struct volume *volume,
1478 for (i = 0; i < volume->page_cache.indexable_pages; i++)
1479 volume->page_cache.index[i] = volume->page_cache.cache_slots;
1480 for (i = 0; i < volume->page_cache.cache_slots; i++)
1481 clear_cache_page(&volume->page_cache, &volume->page_cache.cache[i]);
1482 if (volume->sparse_cache != NULL)
1483 uds_invalidate_sparse_cache(volume->sparse_cache);
1484 if (volume->client != NULL)
1485 dm_bufio_client_destroy(vdo_forget(volume->client));
1487 return uds_open_volume_bufio(layout, volume->geometry->bytes_per_page,
1488 volume->reserved_buffers, &volume->client);
1511 "volume read queue", &cache->read_queue);
1541 struct volume **new_volume)
1544 struct volume *volume = NULL;
1549 result = vdo_allocate(1, struct volume, "volume", &volume);
1553 volume->nonce = uds_get_volume_nonce(layout);
1555 result = uds_copy_index_geometry(config->geometry, &volume->geometry);
1557 uds_free_volume(volume);
1561 geometry = volume->geometry;
1571 volume->reserved_buffers = reserved_buffers;
1573 volume->reserved_buffers, &volume->client);
1575 uds_free_volume(volume);
1580 &volume->radix_sorter);
1582 uds_free_volume(volume);
1588 &volume->record_pointers);
1590 uds_free_volume(volume);
1599 &volume->sparse_cache);
1601 uds_free_volume(volume);
1605 volume->cache_size =
1609 result = initialize_page_cache(&volume->page_cache, geometry,
1612 uds_free_volume(volume);
1616 volume->cache_size += volume->page_cache.cache_slots * sizeof(struct delta_index_page);
1617 result = uds_make_index_page_map(geometry, &volume->index_page_map);
1619 uds_free_volume(volume);
1623 mutex_init(&volume->read_threads_mutex);
1624 uds_init_cond(&volume->read_threads_read_done_cond);
1625 uds_init_cond(&volume->read_threads_cond);
1628 &volume->reader_threads);
1630 uds_free_volume(volume);
1635 result = vdo_create_thread(read_thread_function, (void *) volume,
1636 "reader", &volume->reader_threads[i]);
1638 uds_free_volume(volume);
1642 volume->read_thread_count = i + 1;
1645 *new_volume = volume;
1663 void uds_free_volume(struct volume *volume)
1665 if (volume == NULL)
1668 if (volume->reader_threads != NULL) {
1672 mutex_lock(&volume->read_threads_mutex);
1673 volume->read_threads_exiting = true;
1674 uds_broadcast_cond(&volume->read_threads_cond);
1675 mutex_unlock(&volume->read_threads_mutex);
1676 for (i = 0; i < volume->read_thread_count; i++)
1677 vdo_join_threads(volume->reader_threads[i]);
1678 vdo_free(volume->reader_threads);
1679 volume->reader_threads = NULL;
1683 uninitialize_page_cache(&volume->page_cache);
1684 uds_free_sparse_cache(volume->sparse_cache);
1685 if (volume->client != NULL)
1686 dm_bufio_client_destroy(vdo_forget(volume->client));
1688 uds_free_index_page_map(volume->index_page_map);
1689 uds_free_radix_sorter(volume->radix_sorter);
1690 vdo_free(volume->geometry);
1691 vdo_free(volume->record_pointers);
1692 vdo_free(volume);