Lines Matching defs:zone

23 #include "physical-zone.h"
60 struct block_map_zone *zone;
98 struct block_map_zone *zone;
121 * For adjusting VDO page cache statistic fields which are only mutated on the logical zone thread.
199 info->vio->completion.callback_thread_id = cache->zone->thread_id;
249 VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id),
251 function_name, cache->zone->thread_id, thread_id);
257 VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state),
624 static void check_for_drain_complete(struct block_map_zone *zone)
626 if (vdo_is_state_draining(&zone->state) &&
627 (zone->active_lookups == 0) &&
628 !vdo_waitq_has_waiters(&zone->flush_waiters) &&
629 !is_vio_pool_busy(zone->vio_pool) &&
630 (zone->page_cache.outstanding_reads == 0) &&
631 (zone->page_cache.outstanding_writes == 0)) {
632 vdo_finish_draining_with_result(&zone->state,
633 (vdo_is_read_only(zone->block_map->vdo) ?
638 static void enter_zone_read_only_mode(struct block_map_zone *zone, int result)
640 vdo_enter_read_only_mode(zone->block_map->vdo, result);
644 * Just take all waiters off the waitq so the zone can drain.
646 vdo_waitq_init(&zone->flush_waiters);
647 check_for_drain_complete(zone);
659 enter_zone_read_only_mode(completion->info->cache->zone, result);
675 vdo_enter_read_only_mode(cache->zone->block_map->vdo, result);
686 check_for_drain_complete(cache->zone);
697 nonce_t nonce = info->cache->zone->block_map->nonce;
728 check_for_drain_complete(cache->zone);
758 continue_vio_after_io(vio, page_is_loaded, info->cache->zone->thread_id);
809 continue_vio_after_io(vio, write_pages, info->cache->zone->thread_id);
1027 check_for_drain_complete(cache->zone);
1037 continue_vio_after_io(vio, page_is_written_out, info->cache->zone->thread_id);
1062 vdo_release_recovery_journal_block_reference(cache->zone->block_map->journal,
1065 cache->zone->zone_number);
1083 check_for_drain_complete(cache->zone);
1197 * @zone: The block map zone of the desired page.
1211 struct block_map_zone *zone, physical_block_number_t pbn,
1215 struct vdo_page_cache *cache = &zone->page_cache;
1231 cache->zone->thread_id, parent);
1377 static inline struct tree_page *get_tree_page(const struct block_map_zone *zone,
1380 return get_tree_page_by_index(zone->block_map->forest, lock->root_index,
1432 * context of a zone's current generation range.
1433 * @zone: The zone in which to do the comparison.
1437 * Return: true if generation @a is not strictly older than generation @b in the context of @zone
1439 static bool __must_check is_not_older(struct block_map_zone *zone, u8 a, u8 b)
1443 result = VDO_ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) &&
1444 in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)),
1446 a, b, zone->oldest_generation, zone->generation);
1448 enter_zone_read_only_mode(zone, result);
1452 return in_cyclic_range(b, a, zone->generation, 1 << 8);
1455 static void release_generation(struct block_map_zone *zone, u8 generation)
1459 result = VDO_ASSERT((zone->dirty_page_counts[generation] > 0),
1462 enter_zone_read_only_mode(zone, result);
1466 zone->dirty_page_counts[generation]--;
1467 while ((zone->dirty_page_counts[zone->oldest_generation] == 0) &&
1468 (zone->oldest_generation != zone->generation))
1469 zone->oldest_generation++;
1472 static void set_generation(struct block_map_zone *zone, struct tree_page *page,
1484 new_count = ++zone->dirty_page_counts[new_generation];
1488 enter_zone_read_only_mode(zone, result);
1493 release_generation(zone, old_generation);
1504 static void acquire_vio(struct vdo_waiter *waiter, struct block_map_zone *zone)
1507 acquire_vio_from_pool(zone->vio_pool, waiter);
1511 static bool attempt_increment(struct block_map_zone *zone)
1513 u8 generation = zone->generation + 1;
1515 if (zone->oldest_generation == generation)
1518 zone->generation = generation;
1523 static void enqueue_page(struct tree_page *page, struct block_map_zone *zone)
1525 if ((zone->flusher == NULL) && attempt_increment(zone)) {
1526 zone->flusher = page;
1527 acquire_vio(&page->waiter, zone);
1531 vdo_waitq_enqueue_waiter(&zone->flush_waiters, &page->waiter);
1540 acquire_vio(waiter, write_context->zone);
1544 enqueue_page(page, write_context->zone);
1547 static void return_to_pool(struct block_map_zone *zone, struct pooled_vio *vio)
1549 return_vio_to_pool(zone->vio_pool, vio);
1550 check_for_drain_complete(zone);
1560 struct block_map_zone *zone = pooled->context;
1562 vdo_release_recovery_journal_block_reference(zone->block_map->journal,
1565 zone->zone_number);
1568 release_generation(zone, page->writing_generation);
1571 if (zone->flusher == page) {
1573 .zone = zone,
1577 vdo_waitq_notify_all_waiters(&zone->flush_waiters,
1579 if (dirty && attempt_increment(zone)) {
1584 zone->flusher = NULL;
1588 enqueue_page(page, zone);
1589 } else if ((zone->flusher == NULL) && vdo_waitq_has_waiters(&zone->flush_waiters) &&
1590 attempt_increment(zone)) {
1591 zone->flusher = container_of(vdo_waitq_dequeue_waiter(&zone->flush_waiters),
1593 write_page(zone->flusher, pooled);
1597 return_to_pool(zone, pooled);
1605 struct block_map_zone *zone = pooled->context;
1608 enter_zone_read_only_mode(zone, result);
1609 return_to_pool(zone, pooled);
1618 struct block_map_zone *zone = pooled->context;
1629 if (zone->flusher == tree_page)
1640 struct block_map_zone *zone = vio->context;
1646 zone->thread_id);
1652 struct block_map_zone *zone = vio->context;
1655 if ((zone->flusher != tree_page) &&
1656 is_not_older(zone, tree_page->generation, zone->generation)) {
1661 enqueue_page(tree_page, zone);
1662 return_to_pool(zone, vio);
1668 completion->callback_thread_id = zone->thread_id;
1697 struct block_map_zone *zone;
1705 zone = data_vio->logical.zone->block_map_zone;
1706 lock_holder = vdo_int_map_remove(zone->loading_pages, lock->key);
1717 --data_vio->logical.zone->block_map_zone->active_lookups;
1742 enter_zone_read_only_mode(data_vio->logical.zone->block_map_zone, result);
1775 static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio);
1776 static void allocate_block_map_page(struct block_map_zone *zone,
1799 allocate_block_map_page(data_vio->logical.zone->block_map_zone,
1811 load_block_map_page(data_vio->logical.zone->block_map_zone, data_vio);
1831 struct block_map_zone *zone = pooled->context;
1836 tree_page = get_tree_page(zone, tree_lock);
1838 nonce = zone->block_map->nonce;
1842 return_vio_to_pool(zone->vio_pool, pooled);
1856 struct block_map_zone *zone = pooled->context;
1859 return_vio_to_pool(zone->vio_pool, pooled);
1869 data_vio->logical.zone->thread_id);
1888 static int attempt_page_lock(struct block_map_zone *zone, struct data_vio *data_vio)
1905 result = vdo_int_map_put(zone->loading_pages, lock->key,
1922 static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio)
1926 result = attempt_page_lock(zone, data_vio);
1934 acquire_vio_from_pool(zone->vio_pool, &data_vio->waiter);
1943 data_vio->logical.zone->thread_id))
1963 allocate_block_map_page(data_vio->logical.zone->block_map_zone, data_vio);
1998 static void write_expired_elements(struct block_map_zone *zone)
2003 u8 generation = zone->generation;
2005 expired = &zone->dirty_lists->expired[VDO_TREE_PAGE];
2014 enter_zone_read_only_mode(zone, result);
2018 set_generation(zone, page, generation);
2020 enqueue_page(page, zone);
2023 expired = &zone->dirty_lists->expired[VDO_CACHE_PAGE];
2029 save_pages(&zone->page_cache);
2034 * @zone: The zone in which we are operating.
2041 static void add_to_dirty_lists(struct block_map_zone *zone,
2047 struct dirty_lists *dirty_lists = zone->dirty_lists;
2060 write_expired_elements(zone);
2074 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
2080 tree_page = get_tree_page(zone, tree_lock);
2092 if (zone->flusher != tree_page) {
2097 set_generation(zone, tree_page, zone->generation);
2103 add_to_dirty_lists(zone, &tree_page->entry, VDO_TREE_PAGE,
2110 tree_page = get_tree_page(zone, tree_lock);
2112 zone->block_map->nonce,
2125 allocate_block_map_page(zone, data_vio);
2190 static void allocate_block_map_page(struct block_map_zone *zone,
2201 result = attempt_page_lock(zone, data_vio);
2227 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
2229 zone->active_lookups++;
2230 if (vdo_is_state_draining(&zone->state)) {
2237 page_index = (lock->tree_slots[0].page_index / zone->block_map->root_count);
2250 page = (struct block_map_page *) (get_tree_page(zone, lock)->page_buffer);
2277 allocate_block_map_page(zone, data_vio);
2289 load_block_map_page(zone, data_vio);
2323 void vdo_write_tree_page(struct tree_page *page, struct block_map_zone *zone)
2327 if (waiting && (zone->flusher == page))
2330 set_generation(zone, page, zone->generation);
2334 enqueue_page(page, zone);
2539 cursor->parent->zone->block_map->nonce,
2550 cursor->parent->zone->thread_id);
2580 vdo_write_tree_page(tree_page, cursor->parent->zone);
2590 vdo_write_tree_page(tree_page, cursor->parent->zone);
2599 vdo_write_tree_page(tree_page, cursor->parent->zone);
2636 pooled->vio.completion.callback_thread_id = cursor->parent->zone->thread_id;
2691 cursors->zone = &map->zones[0];
2692 cursors->pool = cursors->zone->vio_pool;
2712 * initialize_block_map_zone() - Initialize the per-zone portions of the block map.
2724 struct block_map_zone *zone = &map->zones[zone_number];
2728 zone->zone_number = zone_number;
2729 zone->thread_id = vdo->thread_config.logical_threads[zone_number];
2730 zone->block_map = map;
2734 &zone->dirty_lists);
2738 zone->dirty_lists->maximum_age = maximum_age;
2739 INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_TREE_PAGE]);
2740 INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_CACHE_PAGE]);
2743 INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_TREE_PAGE]);
2744 INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_CACHE_PAGE]);
2747 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->loading_pages);
2752 zone->thread_id, VIO_TYPE_BLOCK_MAP_INTERIOR,
2753 VIO_PRIORITY_METADATA, zone, &zone->vio_pool);
2757 vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
2759 zone->page_cache.zone = zone;
2760 zone->page_cache.vdo = vdo;
2761 zone->page_cache.page_count = cache_size / map->zone_count;
2762 zone->page_cache.stats.free_pages = zone->page_cache.page_count;
2764 result = allocate_cache_components(&zone->page_cache);
2769 INIT_LIST_HEAD(&zone->page_cache.lru_list);
2770 INIT_LIST_HEAD(&zone->page_cache.outgoing_list);
2797 struct block_map_zone *zone = &map->zones[zone_number];
2799 update_period(zone->dirty_lists, map->current_era_point);
2800 write_expired_elements(zone);
2821 static void uninitialize_block_map_zone(struct block_map_zone *zone)
2823 struct vdo_page_cache *cache = &zone->page_cache;
2825 vdo_free(vdo_forget(zone->dirty_lists));
2826 free_vio_pool(vdo_forget(zone->vio_pool));
2827 vdo_int_map_free(vdo_forget(zone->loading_pages));
2842 zone_count_t zone;
2847 for (zone = 0; zone < map->zone_count; zone++)
2848 uninitialize_block_map_zone(&map->zones[zone]);
2865 zone_count_t zone = 0;
2896 for (zone = 0; zone < map->zone_count; zone++) {
2897 result = initialize_block_map_zone(map, zone, cache_size, maximum_age);
2947 /* Compute the logical zone for the LBN of a data vio. */
2972 struct block_map_zone *zone = container_of(state, struct block_map_zone, state);
2974 VDO_ASSERT_LOG_ONLY((zone->active_lookups == 0),
2978 while (zone->dirty_lists->oldest_period < zone->dirty_lists->next_period)
2979 expire_oldest_list(zone->dirty_lists);
2980 write_expired_elements(zone);
2983 check_for_drain_complete(zone);
2991 struct block_map_zone *zone = &map->zones[zone_number];
2993 vdo_start_draining(&zone->state,
3010 struct block_map_zone *zone = &map->zones[zone_number];
3012 vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state));
3082 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
3084 if (vdo_is_state_draining(&zone->state)) {
3089 vdo_get_page(&data_vio->page_completion, zone,
3124 mapped.pbn, &data_vio->mapped.zone);
3196 struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
3197 struct block_map *block_map = zone->block_map;
3213 zone->zone_number);
3218 zone->zone_number);
3258 add_to_dirty_lists(info->cache->zone, &info->state_entry,
3287 zone_count_t zone = 0;
3291 for (zone = 0; zone < map->zone_count; zone++) {
3293 &(map->zones[zone].page_cache.stats);