1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2021, Google LLC.
4 *
5 * Tests for adjusting the KVM clock from userspace
6 */
7#include <asm/kvm_para.h>
8#include <asm/pvclock.h>
9#include <asm/pvclock-abi.h>
10#include <stdint.h>
11#include <string.h>
12#include <sys/stat.h>
13#include <time.h>
14
15#include "test_util.h"
16#include "kvm_util.h"
17#include "processor.h"
18
19struct test_case {
20	uint64_t kvmclock_base;
21	int64_t realtime_offset;
22};
23
24static struct test_case test_cases[] = {
25	{ .kvmclock_base = 0 },
26	{ .kvmclock_base = 180 * NSEC_PER_SEC },
27	{ .kvmclock_base = 0, .realtime_offset = -180 * NSEC_PER_SEC },
28	{ .kvmclock_base = 0, .realtime_offset = 180 * NSEC_PER_SEC },
29};
30
31#define GUEST_SYNC_CLOCK(__stage, __val)			\
32		GUEST_SYNC_ARGS(__stage, __val, 0, 0, 0)
33
34static void guest_main(vm_paddr_t pvti_pa, struct pvclock_vcpu_time_info *pvti)
35{
36	int i;
37
38	wrmsr(MSR_KVM_SYSTEM_TIME_NEW, pvti_pa | KVM_MSR_ENABLED);
39	for (i = 0; i < ARRAY_SIZE(test_cases); i++)
40		GUEST_SYNC_CLOCK(i, __pvclock_read_cycles(pvti, rdtsc()));
41}
42
43#define EXPECTED_FLAGS (KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC)
44
45static inline void assert_flags(struct kvm_clock_data *data)
46{
47	TEST_ASSERT((data->flags & EXPECTED_FLAGS) == EXPECTED_FLAGS,
48		    "unexpected clock data flags: %x (want set: %x)",
49		    data->flags, EXPECTED_FLAGS);
50}
51
52static void handle_sync(struct ucall *uc, struct kvm_clock_data *start,
53			struct kvm_clock_data *end)
54{
55	uint64_t obs, exp_lo, exp_hi;
56
57	obs = uc->args[2];
58	exp_lo = start->clock;
59	exp_hi = end->clock;
60
61	assert_flags(start);
62	assert_flags(end);
63
64	TEST_ASSERT(exp_lo <= obs && obs <= exp_hi,
65		    "unexpected kvm-clock value: %"PRIu64" expected range: [%"PRIu64", %"PRIu64"]",
66		    obs, exp_lo, exp_hi);
67
68	pr_info("kvm-clock value: %"PRIu64" expected range [%"PRIu64", %"PRIu64"]\n",
69		obs, exp_lo, exp_hi);
70}
71
72static void handle_abort(struct ucall *uc)
73{
74	REPORT_GUEST_ASSERT(*uc);
75}
76
77static void setup_clock(struct kvm_vm *vm, struct test_case *test_case)
78{
79	struct kvm_clock_data data;
80
81	memset(&data, 0, sizeof(data));
82
83	data.clock = test_case->kvmclock_base;
84	if (test_case->realtime_offset) {
85		struct timespec ts;
86		int r;
87
88		data.flags |= KVM_CLOCK_REALTIME;
89		do {
90			r = clock_gettime(CLOCK_REALTIME, &ts);
91			if (!r)
92				break;
93		} while (errno == EINTR);
94
95		TEST_ASSERT(!r, "clock_gettime() failed: %d", r);
96
97		data.realtime = ts.tv_sec * NSEC_PER_SEC;
98		data.realtime += ts.tv_nsec;
99		data.realtime += test_case->realtime_offset;
100	}
101
102	vm_ioctl(vm, KVM_SET_CLOCK, &data);
103}
104
105static void enter_guest(struct kvm_vcpu *vcpu)
106{
107	struct kvm_clock_data start, end;
108	struct kvm_vm *vm = vcpu->vm;
109	struct ucall uc;
110	int i;
111
112	for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
113		setup_clock(vm, &test_cases[i]);
114
115		vm_ioctl(vm, KVM_GET_CLOCK, &start);
116
117		vcpu_run(vcpu);
118		vm_ioctl(vm, KVM_GET_CLOCK, &end);
119
120		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
121
122		switch (get_ucall(vcpu, &uc)) {
123		case UCALL_SYNC:
124			handle_sync(&uc, &start, &end);
125			break;
126		case UCALL_ABORT:
127			handle_abort(&uc);
128			return;
129		default:
130			TEST_ASSERT(0, "unhandled ucall: %ld", uc.cmd);
131		}
132	}
133}
134
135int main(void)
136{
137	struct kvm_vcpu *vcpu;
138	vm_vaddr_t pvti_gva;
139	vm_paddr_t pvti_gpa;
140	struct kvm_vm *vm;
141	int flags;
142
143	flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK);
144	TEST_REQUIRE(flags & KVM_CLOCK_REALTIME);
145
146	TEST_REQUIRE(sys_clocksource_is_based_on_tsc());
147
148	vm = vm_create_with_one_vcpu(&vcpu, guest_main);
149
150	pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000);
151	pvti_gpa = addr_gva2gpa(vm, pvti_gva);
152	vcpu_args_set(vcpu, 2, pvti_gpa, pvti_gva);
153
154	enter_guest(vcpu);
155	kvm_vm_free(vm);
156}
157