Lines Matching refs:gpa

30 #define memcmp_g(gpa, pattern,  size)								\
32 uint8_t *mem = (uint8_t *)gpa; \
37 "Guest expected 0x%x at offset %lu (gpa 0x%lx), got 0x%x", \
38 pattern, i, gpa + i, mem[i]); \
41 static void memcmp_h(uint8_t *mem, uint64_t gpa, uint8_t pattern, size_t size)
47 "Host expected 0x%x at gpa 0x%lx, got 0x%x",
48 pattern, gpa + i, mem[i]);
53 * Execute KVM hypercall to map/unmap gpa range which will cause userspace exit
54 * to back/unback private memory. Subsequent accesses by guest to the gpa range
73 static void guest_sync_shared(uint64_t gpa, uint64_t size,
76 GUEST_SYNC5(SYNC_SHARED, gpa, size, current_pattern, new_pattern);
79 static void guest_sync_private(uint64_t gpa, uint64_t size, uint8_t pattern)
81 GUEST_SYNC4(SYNC_PRIVATE, gpa, size, pattern);
89 static void guest_map_mem(uint64_t gpa, uint64_t size, bool map_shared,
98 kvm_hypercall_map_gpa_range(gpa, size, flags);
101 static void guest_map_shared(uint64_t gpa, uint64_t size, bool do_fallocate)
103 guest_map_mem(gpa, size, true, do_fallocate);
106 static void guest_map_private(uint64_t gpa, uint64_t size, bool do_fallocate)
108 guest_map_mem(gpa, size, false, do_fallocate);
137 uint64_t gpa = base_gpa + test_ranges[i].offset;
148 memset((void *)gpa, p1, size);
158 guest_map_private(gpa, size, do_fallocate);
161 memset((void *)gpa, p2, PAGE_SIZE);
165 memset((void *)gpa, p2, size);
166 guest_sync_private(gpa, size, p1);
172 memcmp_g(gpa, p2, size);
173 if (gpa > base_gpa)
174 memcmp_g(base_gpa, init_p, gpa - base_gpa);
175 if (gpa + size < base_gpa + PER_CPU_DATA_SIZE)
176 memcmp_g(gpa + size, init_p,
177 (base_gpa + PER_CPU_DATA_SIZE) - (gpa + size));
185 guest_map_shared(gpa + j, PAGE_SIZE, do_fallocate);
186 guest_sync_shared(gpa + j, PAGE_SIZE, p1, p3);
188 memcmp_g(gpa + j, p3, PAGE_SIZE);
190 guest_sync_private(gpa + j, PAGE_SIZE, p1);
200 guest_map_shared(gpa, size, do_fallocate);
201 memset((void *)gpa, p3, size);
202 guest_sync_shared(gpa, size, p3, p4);
203 memcmp_g(gpa, p4, size);
206 memset((void *)gpa, init_p, size);
217 static void guest_punch_hole(uint64_t gpa, uint64_t size)
222 kvm_hypercall_map_gpa_range(gpa, size, flags);
242 uint64_t gpa = base_gpa + test_ranges[i].offset;
255 memset((void *)gpa, init_p, size);
256 memcmp_g(gpa, init_p, size);
266 guest_punch_hole(gpa, size);
267 memcmp_g(gpa, 0, size);
292 uint64_t gpa = run->hypercall.args[0];
304 vm_guest_mem_fallocate(vm, gpa, size, map_shared);
307 vm_set_memory_attributes(vm, gpa, size,
340 uint64_t gpa = uc.args[1];
350 uint8_t *hva = addr_gpa2hva(vm, gpa + i);
353 memcmp_h(hva, gpa + i, uc.args[3], nr_bytes);
405 uint64_t gpa = BASE_DATA_GPA + i * per_cpu_size;
407 vcpu_args_set(vcpus[i], 1, gpa);
413 virt_map(vm, gpa, gpa, PER_CPU_DATA_SIZE / vm->page_size);