1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM dirty page logging test
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7
8#define _GNU_SOURCE /* for program_invocation_name */
9
10#include <stdio.h>
11#include <stdlib.h>
12#include <linux/bitmap.h>
13#include <linux/bitops.h>
14
15#include "test_util.h"
16#include "kvm_util.h"
17#include "processor.h"
18#include "vmx.h"
19
20/* The memory slot index to track dirty pages */
21#define TEST_MEM_SLOT_INDEX		1
22#define TEST_MEM_PAGES			3
23
24/* L1 guest test virtual memory offset */
25#define GUEST_TEST_MEM			0xc0000000
26
27/* L2 guest test virtual memory offset */
28#define NESTED_TEST_MEM1		0xc0001000
29#define NESTED_TEST_MEM2		0xc0002000
30
31static void l2_guest_code(u64 *a, u64 *b)
32{
33	READ_ONCE(*a);
34	WRITE_ONCE(*a, 1);
35	GUEST_SYNC(true);
36	GUEST_SYNC(false);
37
38	WRITE_ONCE(*b, 1);
39	GUEST_SYNC(true);
40	WRITE_ONCE(*b, 1);
41	GUEST_SYNC(true);
42	GUEST_SYNC(false);
43
44	/* Exit to L1 and never come back.  */
45	vmcall();
46}
47
48static void l2_guest_code_ept_enabled(void)
49{
50	l2_guest_code((u64 *)NESTED_TEST_MEM1, (u64 *)NESTED_TEST_MEM2);
51}
52
53static void l2_guest_code_ept_disabled(void)
54{
55	/* Access the same L1 GPAs as l2_guest_code_ept_enabled() */
56	l2_guest_code((u64 *)GUEST_TEST_MEM, (u64 *)GUEST_TEST_MEM);
57}
58
59void l1_guest_code(struct vmx_pages *vmx)
60{
61#define L2_GUEST_STACK_SIZE 64
62	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
63	void *l2_rip;
64
65	GUEST_ASSERT(vmx->vmcs_gpa);
66	GUEST_ASSERT(prepare_for_vmx_operation(vmx));
67	GUEST_ASSERT(load_vmcs(vmx));
68
69	if (vmx->eptp_gpa)
70		l2_rip = l2_guest_code_ept_enabled;
71	else
72		l2_rip = l2_guest_code_ept_disabled;
73
74	prepare_vmcs(vmx, l2_rip, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
75
76	GUEST_SYNC(false);
77	GUEST_ASSERT(!vmlaunch());
78	GUEST_SYNC(false);
79	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
80	GUEST_DONE();
81}
82
83static void test_vmx_dirty_log(bool enable_ept)
84{
85	vm_vaddr_t vmx_pages_gva = 0;
86	struct vmx_pages *vmx;
87	unsigned long *bmap;
88	uint64_t *host_test_mem;
89
90	struct kvm_vcpu *vcpu;
91	struct kvm_vm *vm;
92	struct ucall uc;
93	bool done = false;
94
95	pr_info("Nested EPT: %s\n", enable_ept ? "enabled" : "disabled");
96
97	/* Create VM */
98	vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
99	vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
100	vcpu_args_set(vcpu, 1, vmx_pages_gva);
101
102	/* Add an extra memory slot for testing dirty logging */
103	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
104				    GUEST_TEST_MEM,
105				    TEST_MEM_SLOT_INDEX,
106				    TEST_MEM_PAGES,
107				    KVM_MEM_LOG_DIRTY_PAGES);
108
109	/*
110	 * Add an identity map for GVA range [0xc0000000, 0xc0002000).  This
111	 * affects both L1 and L2.  However...
112	 */
113	virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, TEST_MEM_PAGES);
114
115	/*
116	 * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
117	 * 0xc0000000.
118	 *
119	 * Note that prepare_eptp should be called only L1's GPA map is done,
120	 * meaning after the last call to virt_map.
121	 *
122	 * When EPT is disabled, the L2 guest code will still access the same L1
123	 * GPAs as the EPT enabled case.
124	 */
125	if (enable_ept) {
126		prepare_eptp(vmx, vm, 0);
127		nested_map_memslot(vmx, vm, 0);
128		nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
129		nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
130	}
131
132	bmap = bitmap_zalloc(TEST_MEM_PAGES);
133	host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
134
135	while (!done) {
136		memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096);
137		vcpu_run(vcpu);
138		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
139
140		switch (get_ucall(vcpu, &uc)) {
141		case UCALL_ABORT:
142			REPORT_GUEST_ASSERT(uc);
143			/* NOT REACHED */
144		case UCALL_SYNC:
145			/*
146			 * The nested guest wrote at offset 0x1000 in the memslot, but the
147			 * dirty bitmap must be filled in according to L1 GPA, not L2.
148			 */
149			kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
150			if (uc.args[1]) {
151				TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean");
152				TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest");
153			} else {
154				TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty");
155				TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest");
156			}
157
158			TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty");
159			TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest");
160			TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty");
161			TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest");
162			break;
163		case UCALL_DONE:
164			done = true;
165			break;
166		default:
167			TEST_FAIL("Unknown ucall %lu", uc.cmd);
168		}
169	}
170}
171
172int main(int argc, char *argv[])
173{
174	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
175
176	test_vmx_dirty_log(/*enable_ept=*/false);
177
178	if (kvm_cpu_has_ept())
179		test_vmx_dirty_log(/*enable_ept=*/true);
180
181	return 0;
182}
183