1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * vmx_tsc_adjust_test
4 *
5 * Copyright (C) 2018, Google LLC.
6 *
7 * IA32_TSC_ADJUST test
8 *
9 * According to the SDM, "if an execution of WRMSR to the
10 * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
11 * the logical processor also adds (or subtracts) value X from the
12 * IA32_TSC_ADJUST MSR.
13 *
14 * Note that when L1 doesn't intercept writes to IA32_TSC, a
15 * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
16 * value.
17 *
18 * This test verifies that this unusual case is handled correctly.
19 */
20
21#include "test_util.h"
22#include "kvm_util.h"
23#include "processor.h"
24#include "vmx.h"
25
26#include <string.h>
27#include <sys/ioctl.h>
28
29#include "kselftest.h"
30
31#ifndef MSR_IA32_TSC_ADJUST
32#define MSR_IA32_TSC_ADJUST 0x3b
33#endif
34
35#define TSC_ADJUST_VALUE (1ll << 32)
36#define TSC_OFFSET_VALUE -(1ll << 48)
37
38enum {
39	PORT_ABORT = 0x1000,
40	PORT_REPORT,
41	PORT_DONE,
42};
43
44enum {
45	VMXON_PAGE = 0,
46	VMCS_PAGE,
47	MSR_BITMAP_PAGE,
48
49	NUM_VMX_PAGES,
50};
51
52/* The virtual machine object. */
53static struct kvm_vm *vm;
54
55static void check_ia32_tsc_adjust(int64_t max)
56{
57	int64_t adjust;
58
59	adjust = rdmsr(MSR_IA32_TSC_ADJUST);
60	GUEST_SYNC(adjust);
61	GUEST_ASSERT(adjust <= max);
62}
63
64static void l2_guest_code(void)
65{
66	uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
67
68	wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
69	check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
70
71	/* Exit to L1 */
72	__asm__ __volatile__("vmcall");
73}
74
75static void l1_guest_code(struct vmx_pages *vmx_pages)
76{
77#define L2_GUEST_STACK_SIZE 64
78	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
79	uint32_t control;
80	uintptr_t save_cr3;
81
82	GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
83	wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
84	check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
85
86	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
87	GUEST_ASSERT(load_vmcs(vmx_pages));
88
89	/* Prepare the VMCS for L2 execution. */
90	prepare_vmcs(vmx_pages, l2_guest_code,
91		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
92	control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
93	control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
94	vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
95	vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
96
97	/* Jump into L2.  First, test failure to load guest CR3.  */
98	save_cr3 = vmreadz(GUEST_CR3);
99	vmwrite(GUEST_CR3, -1ull);
100	GUEST_ASSERT(!vmlaunch());
101	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
102		     (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
103	check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
104	vmwrite(GUEST_CR3, save_cr3);
105
106	GUEST_ASSERT(!vmlaunch());
107	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
108
109	check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
110
111	GUEST_DONE();
112}
113
114static void report(int64_t val)
115{
116	pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
117		val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
118}
119
120int main(int argc, char *argv[])
121{
122	vm_vaddr_t vmx_pages_gva;
123	struct kvm_vcpu *vcpu;
124
125	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
126
127	vm = vm_create_with_one_vcpu(&vcpu, (void *) l1_guest_code);
128
129	/* Allocate VMX pages and shared descriptors (vmx_pages). */
130	vcpu_alloc_vmx(vm, &vmx_pages_gva);
131	vcpu_args_set(vcpu, 1, vmx_pages_gva);
132
133	for (;;) {
134		struct ucall uc;
135
136		vcpu_run(vcpu);
137		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
138
139		switch (get_ucall(vcpu, &uc)) {
140		case UCALL_ABORT:
141			REPORT_GUEST_ASSERT(uc);
142			/* NOT REACHED */
143		case UCALL_SYNC:
144			report(uc.args[1]);
145			break;
146		case UCALL_DONE:
147			goto done;
148		default:
149			TEST_FAIL("Unknown ucall %lu", uc.cmd);
150		}
151	}
152
153done:
154	kvm_vm_free(vm);
155	return 0;
156}
157