1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Test for x86 KVM_CAP_SYNC_REGS
4 *
5 * Copyright (C) 2018, Google LLC.
6 *
7 * Verifies expected behavior of x86 KVM_CAP_SYNC_REGS functionality,
8 * including requesting an invalid register set, updates to/from values
9 * in kvm_run.s.regs when kvm_valid_regs and kvm_dirty_regs are toggled.
10 */
11
12#define _GNU_SOURCE /* for program_invocation_short_name */
13#include <fcntl.h>
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
17#include <sys/ioctl.h>
18#include <pthread.h>
19
20#include "kvm_test_harness.h"
21#include "test_util.h"
22#include "kvm_util.h"
23#include "processor.h"
24
25#define UCALL_PIO_PORT ((uint16_t)0x1000)
26
27struct ucall uc_none = {
28	.cmd = UCALL_NONE,
29};
30
31/*
32 * ucall is embedded here to protect against compiler reshuffling registers
33 * before calling a function. In this test we only need to get KVM_EXIT_IO
34 * vmexit and preserve RBX, no additional information is needed.
35 */
36void guest_code(void)
37{
38	asm volatile("1: in %[port], %%al\n"
39		     "add $0x1, %%rbx\n"
40		     "jmp 1b"
41		     : : [port] "d" (UCALL_PIO_PORT), "D" (&uc_none)
42		     : "rax", "rbx");
43}
44
45KVM_ONE_VCPU_TEST_SUITE(sync_regs_test);
46
47static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
48{
49#define REG_COMPARE(reg) \
50	TEST_ASSERT(left->reg == right->reg, \
51		    "Register " #reg \
52		    " values did not match: 0x%llx, 0x%llx", \
53		    left->reg, right->reg)
54	REG_COMPARE(rax);
55	REG_COMPARE(rbx);
56	REG_COMPARE(rcx);
57	REG_COMPARE(rdx);
58	REG_COMPARE(rsi);
59	REG_COMPARE(rdi);
60	REG_COMPARE(rsp);
61	REG_COMPARE(rbp);
62	REG_COMPARE(r8);
63	REG_COMPARE(r9);
64	REG_COMPARE(r10);
65	REG_COMPARE(r11);
66	REG_COMPARE(r12);
67	REG_COMPARE(r13);
68	REG_COMPARE(r14);
69	REG_COMPARE(r15);
70	REG_COMPARE(rip);
71	REG_COMPARE(rflags);
72#undef REG_COMPARE
73}
74
75static void compare_sregs(struct kvm_sregs *left, struct kvm_sregs *right)
76{
77}
78
79static void compare_vcpu_events(struct kvm_vcpu_events *left,
80				struct kvm_vcpu_events *right)
81{
82}
83
84#define TEST_SYNC_FIELDS   (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
85#define INVALID_SYNC_FIELD 0x80000000
86
87/*
88 * Set an exception as pending *and* injected while KVM is processing events.
89 * KVM is supposed to ignore/drop pending exceptions if userspace is also
90 * requesting that an exception be injected.
91 */
92static void *race_events_inj_pen(void *arg)
93{
94	struct kvm_run *run = (struct kvm_run *)arg;
95	struct kvm_vcpu_events *events = &run->s.regs.events;
96
97	WRITE_ONCE(events->exception.nr, UD_VECTOR);
98
99	for (;;) {
100		WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
101		WRITE_ONCE(events->flags, 0);
102		WRITE_ONCE(events->exception.injected, 1);
103		WRITE_ONCE(events->exception.pending, 1);
104
105		pthread_testcancel();
106	}
107
108	return NULL;
109}
110
111/*
112 * Set an invalid exception vector while KVM is processing events.  KVM is
113 * supposed to reject any vector >= 32, as well as NMIs (vector 2).
114 */
115static void *race_events_exc(void *arg)
116{
117	struct kvm_run *run = (struct kvm_run *)arg;
118	struct kvm_vcpu_events *events = &run->s.regs.events;
119
120	for (;;) {
121		WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
122		WRITE_ONCE(events->flags, 0);
123		WRITE_ONCE(events->exception.nr, UD_VECTOR);
124		WRITE_ONCE(events->exception.pending, 1);
125		WRITE_ONCE(events->exception.nr, 255);
126
127		pthread_testcancel();
128	}
129
130	return NULL;
131}
132
133/*
134 * Toggle CR4.PAE while KVM is processing SREGS, EFER.LME=1 with CR4.PAE=0 is
135 * illegal, and KVM's MMU heavily relies on vCPU state being valid.
136 */
137static noinline void *race_sregs_cr4(void *arg)
138{
139	struct kvm_run *run = (struct kvm_run *)arg;
140	__u64 *cr4 = &run->s.regs.sregs.cr4;
141	__u64 pae_enabled = *cr4;
142	__u64 pae_disabled = *cr4 & ~X86_CR4_PAE;
143
144	for (;;) {
145		WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_SREGS);
146		WRITE_ONCE(*cr4, pae_enabled);
147		asm volatile(".rept 512\n\t"
148			     "nop\n\t"
149			     ".endr");
150		WRITE_ONCE(*cr4, pae_disabled);
151
152		pthread_testcancel();
153	}
154
155	return NULL;
156}
157
158static void race_sync_regs(struct kvm_vcpu *vcpu, void *racer)
159{
160	const time_t TIMEOUT = 2; /* seconds, roughly */
161	struct kvm_x86_state *state;
162	struct kvm_translation tr;
163	struct kvm_run *run;
164	pthread_t thread;
165	time_t t;
166
167	run = vcpu->run;
168
169	run->kvm_valid_regs = KVM_SYNC_X86_SREGS;
170	vcpu_run(vcpu);
171	run->kvm_valid_regs = 0;
172
173	/* Save state *before* spawning the thread that mucks with vCPU state. */
174	state = vcpu_save_state(vcpu);
175
176	/*
177	 * Selftests run 64-bit guests by default, both EFER.LME and CR4.PAE
178	 * should already be set in guest state.
179	 */
180	TEST_ASSERT((run->s.regs.sregs.cr4 & X86_CR4_PAE) &&
181		    (run->s.regs.sregs.efer & EFER_LME),
182		    "vCPU should be in long mode, CR4.PAE=%d, EFER.LME=%d",
183		    !!(run->s.regs.sregs.cr4 & X86_CR4_PAE),
184		    !!(run->s.regs.sregs.efer & EFER_LME));
185
186	TEST_ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0);
187
188	for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
189		/*
190		 * Reload known good state if the vCPU triple faults, e.g. due
191		 * to the unhandled #GPs being injected.  VMX preserves state
192		 * on shutdown, but SVM synthesizes an INIT as the VMCB state
193		 * is architecturally undefined on triple fault.
194		 */
195		if (!__vcpu_run(vcpu) && run->exit_reason == KVM_EXIT_SHUTDOWN)
196			vcpu_load_state(vcpu, state);
197
198		if (racer == race_sregs_cr4) {
199			tr = (struct kvm_translation) { .linear_address = 0 };
200			__vcpu_ioctl(vcpu, KVM_TRANSLATE, &tr);
201		}
202	}
203
204	TEST_ASSERT_EQ(pthread_cancel(thread), 0);
205	TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
206
207	kvm_x86_state_cleanup(state);
208}
209
210KVM_ONE_VCPU_TEST(sync_regs_test, read_invalid, guest_code)
211{
212	struct kvm_run *run = vcpu->run;
213	int rv;
214
215	/* Request reading invalid register set from VCPU. */
216	run->kvm_valid_regs = INVALID_SYNC_FIELD;
217	rv = _vcpu_run(vcpu);
218	TEST_ASSERT(rv < 0 && errno == EINVAL,
219		    "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
220		    rv);
221	run->kvm_valid_regs = 0;
222
223	run->kvm_valid_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
224	rv = _vcpu_run(vcpu);
225	TEST_ASSERT(rv < 0 && errno == EINVAL,
226		    "Invalid kvm_valid_regs did not cause expected KVM_RUN error: %d",
227		    rv);
228	run->kvm_valid_regs = 0;
229}
230
231KVM_ONE_VCPU_TEST(sync_regs_test, set_invalid, guest_code)
232{
233	struct kvm_run *run = vcpu->run;
234	int rv;
235
236	/* Request setting invalid register set into VCPU. */
237	run->kvm_dirty_regs = INVALID_SYNC_FIELD;
238	rv = _vcpu_run(vcpu);
239	TEST_ASSERT(rv < 0 && errno == EINVAL,
240		    "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
241		    rv);
242	run->kvm_dirty_regs = 0;
243
244	run->kvm_dirty_regs = INVALID_SYNC_FIELD | TEST_SYNC_FIELDS;
245	rv = _vcpu_run(vcpu);
246	TEST_ASSERT(rv < 0 && errno == EINVAL,
247		    "Invalid kvm_dirty_regs did not cause expected KVM_RUN error: %d",
248		    rv);
249	run->kvm_dirty_regs = 0;
250}
251
252KVM_ONE_VCPU_TEST(sync_regs_test, req_and_verify_all_valid, guest_code)
253{
254	struct kvm_run *run = vcpu->run;
255	struct kvm_vcpu_events events;
256	struct kvm_sregs sregs;
257	struct kvm_regs regs;
258
259	/* Request and verify all valid register sets. */
260	/* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */
261	run->kvm_valid_regs = TEST_SYNC_FIELDS;
262	vcpu_run(vcpu);
263	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
264
265	vcpu_regs_get(vcpu, &regs);
266	compare_regs(&regs, &run->s.regs.regs);
267
268	vcpu_sregs_get(vcpu, &sregs);
269	compare_sregs(&sregs, &run->s.regs.sregs);
270
271	vcpu_events_get(vcpu, &events);
272	compare_vcpu_events(&events, &run->s.regs.events);
273}
274
275KVM_ONE_VCPU_TEST(sync_regs_test, set_and_verify_various, guest_code)
276{
277	struct kvm_run *run = vcpu->run;
278	struct kvm_vcpu_events events;
279	struct kvm_sregs sregs;
280	struct kvm_regs regs;
281
282	/* Run once to get register set */
283	run->kvm_valid_regs = TEST_SYNC_FIELDS;
284	vcpu_run(vcpu);
285	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
286
287	/* Set and verify various register values. */
288	run->s.regs.regs.rbx = 0xBAD1DEA;
289	run->s.regs.sregs.apic_base = 1 << 11;
290	/* TODO run->s.regs.events.XYZ = ABC; */
291
292	run->kvm_valid_regs = TEST_SYNC_FIELDS;
293	run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS;
294	vcpu_run(vcpu);
295	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
296	TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1,
297		    "rbx sync regs value incorrect 0x%llx.",
298		    run->s.regs.regs.rbx);
299	TEST_ASSERT(run->s.regs.sregs.apic_base == 1 << 11,
300		    "apic_base sync regs value incorrect 0x%llx.",
301		    run->s.regs.sregs.apic_base);
302
303	vcpu_regs_get(vcpu, &regs);
304	compare_regs(&regs, &run->s.regs.regs);
305
306	vcpu_sregs_get(vcpu, &sregs);
307	compare_sregs(&sregs, &run->s.regs.sregs);
308
309	vcpu_events_get(vcpu, &events);
310	compare_vcpu_events(&events, &run->s.regs.events);
311}
312
313KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_dirty_regs_bits, guest_code)
314{
315	struct kvm_run *run = vcpu->run;
316
317	/* Clear kvm_dirty_regs bits, verify new s.regs values are
318	 * overwritten with existing guest values.
319	 */
320	run->kvm_valid_regs = TEST_SYNC_FIELDS;
321	run->kvm_dirty_regs = 0;
322	run->s.regs.regs.rbx = 0xDEADBEEF;
323	vcpu_run(vcpu);
324	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
325	TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF,
326		    "rbx sync regs value incorrect 0x%llx.",
327		    run->s.regs.regs.rbx);
328}
329
330KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_valid_and_dirty_regs, guest_code)
331{
332	struct kvm_run *run = vcpu->run;
333	struct kvm_regs regs;
334
335	/* Run once to get register set */
336	run->kvm_valid_regs = TEST_SYNC_FIELDS;
337	vcpu_run(vcpu);
338	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
339
340	/* Clear kvm_valid_regs bits and kvm_dirty_bits.
341	 * Verify s.regs values are not overwritten with existing guest values
342	 * and that guest values are not overwritten with kvm_sync_regs values.
343	 */
344	run->kvm_valid_regs = 0;
345	run->kvm_dirty_regs = 0;
346	run->s.regs.regs.rbx = 0xAAAA;
347	vcpu_regs_get(vcpu, &regs);
348	regs.rbx = 0xBAC0;
349	vcpu_regs_set(vcpu, &regs);
350	vcpu_run(vcpu);
351	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
352	TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA,
353		    "rbx sync regs value incorrect 0x%llx.",
354		    run->s.regs.regs.rbx);
355	vcpu_regs_get(vcpu, &regs);
356	TEST_ASSERT(regs.rbx == 0xBAC0 + 1,
357		    "rbx guest value incorrect 0x%llx.",
358		    regs.rbx);
359}
360
361KVM_ONE_VCPU_TEST(sync_regs_test, clear_kvm_valid_regs_bits, guest_code)
362{
363	struct kvm_run *run = vcpu->run;
364	struct kvm_regs regs;
365
366	/* Run once to get register set */
367	run->kvm_valid_regs = TEST_SYNC_FIELDS;
368	vcpu_run(vcpu);
369	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
370
371	/* Clear kvm_valid_regs bits. Verify s.regs values are not overwritten
372	 * with existing guest values but that guest values are overwritten
373	 * with kvm_sync_regs values.
374	 */
375	run->kvm_valid_regs = 0;
376	run->kvm_dirty_regs = TEST_SYNC_FIELDS;
377	run->s.regs.regs.rbx = 0xBBBB;
378	vcpu_run(vcpu);
379	TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
380	TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB,
381		    "rbx sync regs value incorrect 0x%llx.",
382		    run->s.regs.regs.rbx);
383	vcpu_regs_get(vcpu, &regs);
384	TEST_ASSERT(regs.rbx == 0xBBBB + 1,
385		    "rbx guest value incorrect 0x%llx.",
386		    regs.rbx);
387}
388
389KVM_ONE_VCPU_TEST(sync_regs_test, race_cr4, guest_code)
390{
391	race_sync_regs(vcpu, race_sregs_cr4);
392}
393
394KVM_ONE_VCPU_TEST(sync_regs_test, race_exc, guest_code)
395{
396	race_sync_regs(vcpu, race_events_exc);
397}
398
399KVM_ONE_VCPU_TEST(sync_regs_test, race_inj_pen, guest_code)
400{
401	race_sync_regs(vcpu, race_events_inj_pen);
402}
403
404int main(int argc, char *argv[])
405{
406	int cap;
407
408	cap = kvm_check_cap(KVM_CAP_SYNC_REGS);
409	TEST_REQUIRE((cap & TEST_SYNC_FIELDS) == TEST_SYNC_FIELDS);
410	TEST_REQUIRE(!(cap & INVALID_SYNC_FIELD));
411
412	return test_harness_run(argc, argv);
413}
414