1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018, Red Hat, Inc.
4 *
5 * Tests for SMM.
6 */
7#define _GNU_SOURCE /* for program_invocation_short_name */
8#include <fcntl.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <stdint.h>
12#include <string.h>
13#include <sys/ioctl.h>
14
15#include "test_util.h"
16
17#include "kvm_util.h"
18
19#include "vmx.h"
20#include "svm_util.h"
21
22#define SMRAM_SIZE 65536
23#define SMRAM_MEMSLOT ((1 << 16) | 1)
24#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
25#define SMRAM_GPA 0x1000000
26#define SMRAM_STAGE 0xfe
27
28#define STR(x) #x
29#define XSTR(s) STR(s)
30
31#define SYNC_PORT 0xe
32#define DONE 0xff
33
34/*
35 * This is compiled as normal 64-bit code, however, SMI handler is executed
36 * in real-address mode. To stay simple we're limiting ourselves to a mode
37 * independent subset of asm here.
38 * SMI handler always report back fixed stage SMRAM_STAGE.
39 */
40uint8_t smi_handler[] = {
41	0xb0, SMRAM_STAGE,    /* mov $SMRAM_STAGE, %al */
42	0xe4, SYNC_PORT,      /* in $SYNC_PORT, %al */
43	0x0f, 0xaa,           /* rsm */
44};
45
46static inline void sync_with_host(uint64_t phase)
47{
48	asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
49		     : "+a" (phase));
50}
51
52static void self_smi(void)
53{
54	x2apic_write_reg(APIC_ICR,
55			 APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
56}
57
58static void l2_guest_code(void)
59{
60	sync_with_host(8);
61
62	sync_with_host(10);
63
64	vmcall();
65}
66
67static void guest_code(void *arg)
68{
69	#define L2_GUEST_STACK_SIZE 64
70	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
71	uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
72	struct svm_test_data *svm = arg;
73	struct vmx_pages *vmx_pages = arg;
74
75	sync_with_host(1);
76
77	wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
78
79	sync_with_host(2);
80
81	self_smi();
82
83	sync_with_host(4);
84
85	if (arg) {
86		if (this_cpu_has(X86_FEATURE_SVM)) {
87			generic_svm_setup(svm, l2_guest_code,
88					  &l2_guest_stack[L2_GUEST_STACK_SIZE]);
89		} else {
90			GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
91			GUEST_ASSERT(load_vmcs(vmx_pages));
92			prepare_vmcs(vmx_pages, l2_guest_code,
93				     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
94		}
95
96		sync_with_host(5);
97
98		self_smi();
99
100		sync_with_host(7);
101
102		if (this_cpu_has(X86_FEATURE_SVM)) {
103			run_guest(svm->vmcb, svm->vmcb_gpa);
104			run_guest(svm->vmcb, svm->vmcb_gpa);
105		} else {
106			vmlaunch();
107			vmresume();
108		}
109
110		/* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
111		sync_with_host(12);
112	}
113
114	sync_with_host(DONE);
115}
116
117void inject_smi(struct kvm_vcpu *vcpu)
118{
119	struct kvm_vcpu_events events;
120
121	vcpu_events_get(vcpu, &events);
122
123	events.smi.pending = 1;
124	events.flags |= KVM_VCPUEVENT_VALID_SMM;
125
126	vcpu_events_set(vcpu, &events);
127}
128
129int main(int argc, char *argv[])
130{
131	vm_vaddr_t nested_gva = 0;
132
133	struct kvm_vcpu *vcpu;
134	struct kvm_regs regs;
135	struct kvm_vm *vm;
136	struct kvm_x86_state *state;
137	int stage, stage_reported;
138
139	TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));
140
141	/* Create VM */
142	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
143
144	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
145				    SMRAM_MEMSLOT, SMRAM_PAGES, 0);
146	TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
147		    == SMRAM_GPA, "could not allocate guest physical addresses?");
148
149	memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
150	memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
151	       sizeof(smi_handler));
152
153	vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA);
154
155	if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {
156		if (kvm_cpu_has(X86_FEATURE_SVM))
157			vcpu_alloc_svm(vm, &nested_gva);
158		else if (kvm_cpu_has(X86_FEATURE_VMX))
159			vcpu_alloc_vmx(vm, &nested_gva);
160	}
161
162	if (!nested_gva)
163		pr_info("will skip SMM test with VMX enabled\n");
164
165	vcpu_args_set(vcpu, 1, nested_gva);
166
167	for (stage = 1;; stage++) {
168		vcpu_run(vcpu);
169		TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
170
171		memset(&regs, 0, sizeof(regs));
172		vcpu_regs_get(vcpu, &regs);
173
174		stage_reported = regs.rax & 0xff;
175
176		if (stage_reported == DONE)
177			goto done;
178
179		TEST_ASSERT(stage_reported == stage ||
180			    stage_reported == SMRAM_STAGE,
181			    "Unexpected stage: #%x, got %x",
182			    stage, stage_reported);
183
184		/*
185		 * Enter SMM during L2 execution and check that we correctly
186		 * return from it. Do not perform save/restore while in SMM yet.
187		 */
188		if (stage == 8) {
189			inject_smi(vcpu);
190			continue;
191		}
192
193		/*
194		 * Perform save/restore while the guest is in SMM triggered
195		 * during L2 execution.
196		 */
197		if (stage == 10)
198			inject_smi(vcpu);
199
200		state = vcpu_save_state(vcpu);
201		kvm_vm_release(vm);
202
203		vcpu = vm_recreate_with_one_vcpu(vm);
204		vcpu_load_state(vcpu, state);
205		kvm_x86_state_cleanup(state);
206	}
207
208done:
209	kvm_vm_free(vm);
210}
211