1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020, Google LLC.
4 *
5 * Tests for KVM paravirtual feature disablement
6 */
7#include <asm/kvm_para.h>
8#include <linux/kvm_para.h>
9#include <linux/stringify.h>
10#include <stdint.h>
11
12#include "kvm_test_harness.h"
13#include "apic.h"
14#include "test_util.h"
15#include "kvm_util.h"
16#include "processor.h"
17
18/* VMCALL and VMMCALL are both 3-byte opcodes. */
19#define HYPERCALL_INSN_SIZE	3
20
21static bool quirk_disabled;
22
23static void guest_ud_handler(struct ex_regs *regs)
24{
25	regs->rax = -EFAULT;
26	regs->rip += HYPERCALL_INSN_SIZE;
27}
28
29static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE]  = { 0x0f, 0x01, 0xc1 };
30static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 };
31
32extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE];
33static uint64_t do_sched_yield(uint8_t apic_id)
34{
35	uint64_t ret;
36
37	asm volatile("hypercall_insn:\n\t"
38		     ".byte 0xcc,0xcc,0xcc\n\t"
39		     : "=a"(ret)
40		     : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id)
41		     : "memory");
42
43	return ret;
44}
45
46static void guest_main(void)
47{
48	const uint8_t *native_hypercall_insn;
49	const uint8_t *other_hypercall_insn;
50	uint64_t ret;
51
52	if (host_cpu_is_intel) {
53		native_hypercall_insn = vmx_vmcall;
54		other_hypercall_insn  = svm_vmmcall;
55	} else if (host_cpu_is_amd) {
56		native_hypercall_insn = svm_vmmcall;
57		other_hypercall_insn  = vmx_vmcall;
58	} else {
59		GUEST_ASSERT(0);
60		/* unreachable */
61		return;
62	}
63
64	memcpy(hypercall_insn, other_hypercall_insn, HYPERCALL_INSN_SIZE);
65
66	ret = do_sched_yield(GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)));
67
68	/*
69	 * If the quirk is disabled, verify that guest_ud_handler() "returned"
70	 * -EFAULT and that KVM did NOT patch the hypercall.  If the quirk is
71	 * enabled, verify that the hypercall succeeded and that KVM patched in
72	 * the "right" hypercall.
73	 */
74	if (quirk_disabled) {
75		GUEST_ASSERT(ret == (uint64_t)-EFAULT);
76		GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn,
77			     HYPERCALL_INSN_SIZE));
78	} else {
79		GUEST_ASSERT(!ret);
80		GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn,
81			     HYPERCALL_INSN_SIZE));
82	}
83
84	GUEST_DONE();
85}
86
87KVM_ONE_VCPU_TEST_SUITE(fix_hypercall);
88
89static void enter_guest(struct kvm_vcpu *vcpu)
90{
91	struct kvm_run *run = vcpu->run;
92	struct ucall uc;
93
94	vcpu_run(vcpu);
95	switch (get_ucall(vcpu, &uc)) {
96	case UCALL_SYNC:
97		pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]);
98		break;
99	case UCALL_DONE:
100		return;
101	case UCALL_ABORT:
102		REPORT_GUEST_ASSERT(uc);
103	default:
104		TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)",
105			  uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason));
106	}
107}
108
109static void test_fix_hypercall(struct kvm_vcpu *vcpu, bool disable_quirk)
110{
111	struct kvm_vm *vm = vcpu->vm;
112
113	vm_init_descriptor_tables(vm);
114	vcpu_init_descriptor_tables(vcpu);
115	vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
116
117	if (disable_quirk)
118		vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
119			      KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
120
121	quirk_disabled = disable_quirk;
122	sync_global_to_guest(vm, quirk_disabled);
123
124	virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
125
126	enter_guest(vcpu);
127}
128
129KVM_ONE_VCPU_TEST(fix_hypercall, enable_quirk, guest_main)
130{
131	test_fix_hypercall(vcpu, false);
132}
133
134KVM_ONE_VCPU_TEST(fix_hypercall, disable_quirk, guest_main)
135{
136	test_fix_hypercall(vcpu, true);
137}
138
139int main(int argc, char *argv[])
140{
141	TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
142
143	return test_harness_run(argc, argv);
144}
145