1// SPDX-License-Identifier: GPL-2.0 2/* 3 * KVM dirty logging page splitting test 4 * 5 * Based on dirty_log_perf.c 6 * 7 * Copyright (C) 2018, Red Hat, Inc. 8 * Copyright (C) 2023, Google, Inc. 9 */ 10 11#include <stdio.h> 12#include <stdlib.h> 13#include <pthread.h> 14#include <linux/bitmap.h> 15 16#include "kvm_util.h" 17#include "test_util.h" 18#include "memstress.h" 19#include "guest_modes.h" 20#include "ucall_common.h" 21 22#define VCPUS 2 23#define SLOTS 2 24#define ITERATIONS 2 25 26static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; 27 28static enum vm_mem_backing_src_type backing_src = VM_MEM_SRC_ANONYMOUS_HUGETLB; 29 30static u64 dirty_log_manual_caps; 31static bool host_quit; 32static int iteration; 33static int vcpu_last_completed_iteration[KVM_MAX_VCPUS]; 34 35struct kvm_page_stats { 36 uint64_t pages_4k; 37 uint64_t pages_2m; 38 uint64_t pages_1g; 39 uint64_t hugepages; 40}; 41 42static void get_page_stats(struct kvm_vm *vm, struct kvm_page_stats *stats, const char *stage) 43{ 44 stats->pages_4k = vm_get_stat(vm, "pages_4k"); 45 stats->pages_2m = vm_get_stat(vm, "pages_2m"); 46 stats->pages_1g = vm_get_stat(vm, "pages_1g"); 47 stats->hugepages = stats->pages_2m + stats->pages_1g; 48 49 pr_debug("\nPage stats after %s: 4K: %ld 2M: %ld 1G: %ld huge: %ld\n", 50 stage, stats->pages_4k, stats->pages_2m, stats->pages_1g, 51 stats->hugepages); 52} 53 54static void run_vcpu_iteration(struct kvm_vm *vm) 55{ 56 int i; 57 58 iteration++; 59 for (i = 0; i < VCPUS; i++) { 60 while (READ_ONCE(vcpu_last_completed_iteration[i]) != 61 iteration) 62 ; 63 } 64} 65 66static void vcpu_worker(struct memstress_vcpu_args *vcpu_args) 67{ 68 struct kvm_vcpu *vcpu = vcpu_args->vcpu; 69 int vcpu_idx = vcpu_args->vcpu_idx; 70 71 while (!READ_ONCE(host_quit)) { 72 int current_iteration = READ_ONCE(iteration); 73 74 vcpu_run(vcpu); 75 76 TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC); 77 78 vcpu_last_completed_iteration[vcpu_idx] = current_iteration; 79 80 /* Wait for the start of the next iteration to be signaled. */ 81 while (current_iteration == READ_ONCE(iteration) && 82 READ_ONCE(iteration) >= 0 && 83 !READ_ONCE(host_quit)) 84 ; 85 } 86} 87 88static void run_test(enum vm_guest_mode mode, void *unused) 89{ 90 struct kvm_vm *vm; 91 unsigned long **bitmaps; 92 uint64_t guest_num_pages; 93 uint64_t host_num_pages; 94 uint64_t pages_per_slot; 95 int i; 96 struct kvm_page_stats stats_populated; 97 struct kvm_page_stats stats_dirty_logging_enabled; 98 struct kvm_page_stats stats_dirty_pass[ITERATIONS]; 99 struct kvm_page_stats stats_clear_pass[ITERATIONS]; 100 struct kvm_page_stats stats_dirty_logging_disabled; 101 struct kvm_page_stats stats_repopulated; 102 103 vm = memstress_create_vm(mode, VCPUS, guest_percpu_mem_size, 104 SLOTS, backing_src, false); 105 106 guest_num_pages = (VCPUS * guest_percpu_mem_size) >> vm->page_shift; 107 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); 108 host_num_pages = vm_num_host_pages(mode, guest_num_pages); 109 pages_per_slot = host_num_pages / SLOTS; 110 TEST_ASSERT_EQ(host_num_pages, pages_per_slot * SLOTS); 111 TEST_ASSERT(!(host_num_pages % 512), 112 "Number of pages, '%lu' not a multiple of 2MiB", host_num_pages); 113 114 bitmaps = memstress_alloc_bitmaps(SLOTS, pages_per_slot); 115 116 if (dirty_log_manual_caps) 117 vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 118 dirty_log_manual_caps); 119 120 /* Start the iterations */ 121 iteration = -1; 122 host_quit = false; 123 124 for (i = 0; i < VCPUS; i++) 125 vcpu_last_completed_iteration[i] = -1; 126 127 memstress_start_vcpu_threads(VCPUS, vcpu_worker); 128 129 run_vcpu_iteration(vm); 130 get_page_stats(vm, &stats_populated, "populating memory"); 131 132 /* Enable dirty logging */ 133 memstress_enable_dirty_logging(vm, SLOTS); 134 135 get_page_stats(vm, &stats_dirty_logging_enabled, "enabling dirty logging"); 136 137 while (iteration < ITERATIONS) { 138 run_vcpu_iteration(vm); 139 get_page_stats(vm, &stats_dirty_pass[iteration - 1], 140 "dirtying memory"); 141 142 memstress_get_dirty_log(vm, bitmaps, SLOTS); 143 144 if (dirty_log_manual_caps) { 145 memstress_clear_dirty_log(vm, bitmaps, SLOTS, pages_per_slot); 146 147 get_page_stats(vm, &stats_clear_pass[iteration - 1], "clearing dirty log"); 148 } 149 } 150 151 /* Disable dirty logging */ 152 memstress_disable_dirty_logging(vm, SLOTS); 153 154 get_page_stats(vm, &stats_dirty_logging_disabled, "disabling dirty logging"); 155 156 /* Run vCPUs again to fault pages back in. */ 157 run_vcpu_iteration(vm); 158 get_page_stats(vm, &stats_repopulated, "repopulating memory"); 159 160 /* 161 * Tell the vCPU threads to quit. No need to manually check that vCPUs 162 * have stopped running after disabling dirty logging, the join will 163 * wait for them to exit. 164 */ 165 host_quit = true; 166 memstress_join_vcpu_threads(VCPUS); 167 168 memstress_free_bitmaps(bitmaps, SLOTS); 169 memstress_destroy_vm(vm); 170 171 TEST_ASSERT_EQ((stats_populated.pages_2m * 512 + 172 stats_populated.pages_1g * 512 * 512), host_num_pages); 173 174 /* 175 * Check that all huge pages were split. Since large pages can only 176 * exist in the data slot, and the vCPUs should have dirtied all pages 177 * in the data slot, there should be no huge pages left after splitting. 178 * Splitting happens at dirty log enable time without 179 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and after the first clear pass 180 * with that capability. 181 */ 182 if (dirty_log_manual_caps) { 183 TEST_ASSERT_EQ(stats_clear_pass[0].hugepages, 0); 184 TEST_ASSERT(stats_clear_pass[0].pages_4k >= host_num_pages, 185 "Expected at least '%lu' 4KiB pages, found only '%lu'", 186 host_num_pages, stats_clear_pass[0].pages_4k); 187 TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages); 188 } else { 189 TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0); 190 TEST_ASSERT(stats_dirty_logging_enabled.pages_4k >= host_num_pages, 191 "Expected at least '%lu' 4KiB pages, found only '%lu'", 192 host_num_pages, stats_dirty_logging_enabled.pages_4k); 193 } 194 195 /* 196 * Once dirty logging is disabled and the vCPUs have touched all their 197 * memory again, the hugepage counts should be the same as they were 198 * right after initial population of memory. 199 */ 200 TEST_ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m); 201 TEST_ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g); 202} 203 204static void help(char *name) 205{ 206 puts(""); 207 printf("usage: %s [-h] [-b vcpu bytes] [-s mem type]\n", 208 name); 209 puts(""); 210 printf(" -b: specify the size of the memory region which should be\n" 211 " dirtied by each vCPU. e.g. 10M or 3G.\n" 212 " (default: 1G)\n"); 213 backing_src_help("-s"); 214 puts(""); 215} 216 217int main(int argc, char *argv[]) 218{ 219 int opt; 220 221 TEST_REQUIRE(get_kvm_param_bool("eager_page_split")); 222 TEST_REQUIRE(get_kvm_param_bool("tdp_mmu")); 223 224 while ((opt = getopt(argc, argv, "b:hs:")) != -1) { 225 switch (opt) { 226 case 'b': 227 guest_percpu_mem_size = parse_size(optarg); 228 break; 229 case 'h': 230 help(argv[0]); 231 exit(0); 232 case 's': 233 backing_src = parse_backing_src_type(optarg); 234 break; 235 default: 236 help(argv[0]); 237 exit(1); 238 } 239 } 240 241 if (!is_backing_src_hugetlb(backing_src)) { 242 pr_info("This test will only work reliably with HugeTLB memory. " 243 "It can work with THP, but that is best effort.\n"); 244 } 245 246 guest_modes_append_default(); 247 248 dirty_log_manual_caps = 0; 249 for_each_guest_mode(run_test, NULL); 250 251 dirty_log_manual_caps = 252 kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); 253 254 if (dirty_log_manual_caps) { 255 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | 256 KVM_DIRTY_LOG_INITIALLY_SET); 257 for_each_guest_mode(run_test, NULL); 258 } else { 259 pr_info("Skipping testing with MANUAL_PROTECT as it is not supported"); 260 } 261 262 return 0; 263} 264