1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * tools/testing/selftests/kvm/include/x86_64/svm_utils.h
4 * Header for nested SVM testing
5 *
6 * Copyright (C) 2020, Red Hat, Inc.
7 */
8
9#ifndef SELFTEST_KVM_SVM_UTILS_H
10#define SELFTEST_KVM_SVM_UTILS_H
11
12#include <asm/svm.h>
13
14#include <stdint.h>
15#include "svm.h"
16#include "processor.h"
17
18struct svm_test_data {
19	/* VMCB */
20	struct vmcb *vmcb; /* gva */
21	void *vmcb_hva;
22	uint64_t vmcb_gpa;
23
24	/* host state-save area */
25	struct vmcb_save_area *save_area; /* gva */
26	void *save_area_hva;
27	uint64_t save_area_gpa;
28
29	/* MSR-Bitmap */
30	void *msr; /* gva */
31	void *msr_hva;
32	uint64_t msr_gpa;
33};
34
35static inline void vmmcall(void)
36{
37	/*
38	 * Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle
39	 * it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended
40	 * use of this function is to exit to L1 from L2.  Clobber all other
41	 * GPRs as L1 doesn't correctly preserve them during vmexits.
42	 */
43	__asm__ __volatile__("push %%rbp; vmmcall; pop %%rbp"
44			     : : "a"(0xdeadbeef), "c"(0xbeefdead)
45			     : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
46			       "r10", "r11", "r12", "r13", "r14", "r15");
47}
48
49#define stgi()			\
50	__asm__ __volatile__(	\
51		"stgi\n"	\
52		)
53
54#define clgi()			\
55	__asm__ __volatile__(	\
56		"clgi\n"	\
57		)
58
59struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
60void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
61void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
62
63int open_sev_dev_path_or_exit(void);
64
65#endif /* SELFTEST_KVM_SVM_UTILS_H */
66