1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2011 NetApp, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD$
29 */
30
31#ifndef	_VMX_CPUFUNC_H_
32#define	_VMX_CPUFUNC_H_
33
34struct vmcs;
35
36/*
37 * Section 5.2 "Conventions" from Intel Architecture Manual 2B.
38 *
39 *			error
40 * VMsucceed		  0
41 * VMFailInvalid	  1
42 * VMFailValid		  2	see also VMCS VM-Instruction Error Field
43 */
44#define	VM_SUCCESS		0
45#define	VM_FAIL_INVALID		1
46#define	VM_FAIL_VALID		2
47#define	VMX_SET_ERROR_CODE \
48	"	jnc 1f;"						\
49	"	mov $1, %[error];"	/* CF: error = 1 */		\
50	"	jmp 3f;"						\
51	"1:	jnz 2f;"						\
52	"	mov $2, %[error];"	/* ZF: error = 2 */		\
53	"	jmp 3f;"						\
54	"2:	mov $0, %[error];"					\
55	"3:"
56
57/* returns 0 on success and non-zero on failure */
58static __inline int
59vmxon(char *region)
60{
61	int error;
62	uint64_t addr;
63
64	addr = vtophys(region);
65	__asm __volatile("vmxon %[addr];"
66			 VMX_SET_ERROR_CODE
67			 : [error] "=r" (error)
68			 : [addr] "m" (*(uint64_t *)&addr)
69			 : "memory");
70
71	return (error);
72}
73
74/* returns 0 on success and non-zero on failure */
75static __inline int
76vmclear(struct vmcs *vmcs)
77{
78	int error;
79	uint64_t addr;
80
81	addr = vtophys(vmcs);
82	__asm __volatile("vmclear %[addr];"
83			 VMX_SET_ERROR_CODE
84			 : [error] "=r" (error)
85			 : [addr] "m" (*(uint64_t *)&addr)
86			 : "memory");
87	return (error);
88}
89
90static __inline void
91vmxoff(void)
92{
93
94	__asm __volatile("vmxoff");
95}
96
97static __inline void
98vmptrst(uint64_t *addr)
99{
100
101	__asm __volatile("vmptrst %[addr]" :: [addr]"m" (*addr) : "memory");
102}
103
104static __inline int
105vmptrld(struct vmcs *vmcs)
106{
107	int error;
108	uint64_t addr;
109
110	addr = vtophys(vmcs);
111	__asm __volatile("vmptrld %[addr];"
112			 VMX_SET_ERROR_CODE
113			 : [error] "=r" (error)
114			 : [addr] "m" (*(uint64_t *)&addr)
115			 : "memory");
116	return (error);
117}
118
119static __inline int
120vmwrite(uint64_t reg, uint64_t val)
121{
122	int error;
123
124	__asm __volatile("vmwrite %[val], %[reg];"
125			 VMX_SET_ERROR_CODE
126			 : [error] "=r" (error)
127			 : [val] "r" (val), [reg] "r" (reg)
128			 : "memory");
129
130	return (error);
131}
132
133static __inline int
134vmread(uint64_t r, uint64_t *addr)
135{
136	int error;
137
138	__asm __volatile("vmread %[r], %[addr];"
139			 VMX_SET_ERROR_CODE
140			 : [error] "=r" (error)
141			 : [r] "r" (r), [addr] "m" (*addr)
142			 : "memory");
143
144	return (error);
145}
146
147static void __inline
148VMCLEAR(struct vmcs *vmcs)
149{
150	int err;
151
152	err = vmclear(vmcs);
153	if (err != 0)
154		panic("%s: vmclear(%p) error %d", __func__, vmcs, err);
155
156	critical_exit();
157}
158
159static void __inline
160VMPTRLD(struct vmcs *vmcs)
161{
162	int err;
163
164	critical_enter();
165
166	err = vmptrld(vmcs);
167	if (err != 0)
168		panic("%s: vmptrld(%p) error %d", __func__, vmcs, err);
169}
170
171#define	INVVPID_TYPE_ADDRESS		0UL
172#define	INVVPID_TYPE_SINGLE_CONTEXT	1UL
173#define	INVVPID_TYPE_ALL_CONTEXTS	2UL
174
175struct invvpid_desc {
176	uint16_t	vpid;
177	uint16_t	_res1;
178	uint32_t	_res2;
179	uint64_t	linear_addr;
180};
181CTASSERT(sizeof(struct invvpid_desc) == 16);
182
183static void __inline
184invvpid(uint64_t type, struct invvpid_desc desc)
185{
186	int error;
187
188	__asm __volatile("invvpid %[desc], %[type];"
189			 VMX_SET_ERROR_CODE
190			 : [error] "=r" (error)
191			 : [desc] "m" (desc), [type] "r" (type)
192			 : "memory");
193
194	if (error)
195		panic("invvpid error %d", error);
196}
197
198#define	INVEPT_TYPE_SINGLE_CONTEXT	1UL
199#define	INVEPT_TYPE_ALL_CONTEXTS	2UL
200struct invept_desc {
201	uint64_t	eptp;
202	uint64_t	_res;
203};
204CTASSERT(sizeof(struct invept_desc) == 16);
205
206static void __inline
207invept(uint64_t type, struct invept_desc desc)
208{
209	int error;
210
211	__asm __volatile("invept %[desc], %[type];"
212			 VMX_SET_ERROR_CODE
213			 : [error] "=r" (error)
214			 : [desc] "m" (desc), [type] "r" (type)
215			 : "memory");
216
217	if (error)
218		panic("invept error %d", error);
219}
220#endif
221