1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#ifndef	_VMX_CPUFUNC_H_
30#define	_VMX_CPUFUNC_H_
31
32struct vmcs;
33
34/*
35 * Section 5.2 "Conventions" from Intel Architecture Manual 2B.
36 *
37 *			error
38 * VMsucceed		  0
39 * VMFailInvalid	  1
40 * VMFailValid		  2	see also VMCS VM-Instruction Error Field
41 */
42#define	VM_SUCCESS		0
43#define	VM_FAIL_INVALID		1
44#define	VM_FAIL_VALID		2
45#define	VMX_SET_ERROR_CODE \
46	"	jnc 1f;"						\
47	"	mov $1, %[error];"	/* CF: error = 1 */		\
48	"	jmp 3f;"						\
49	"1:	jnz 2f;"						\
50	"	mov $2, %[error];"	/* ZF: error = 2 */		\
51	"	jmp 3f;"						\
52	"2:	mov $0, %[error];"					\
53	"3:"
54
55/* returns 0 on success and non-zero on failure */
56static __inline int
57vmxon(char *region)
58{
59	int error;
60	uint64_t addr;
61
62	addr = vtophys(region);
63	__asm __volatile("vmxon %[addr];"
64			 VMX_SET_ERROR_CODE
65			 : [error] "=r" (error)
66			 : [addr] "m" (*(uint64_t *)&addr)
67			 : "memory");
68
69	return (error);
70}
71
72/* returns 0 on success and non-zero on failure */
73static __inline int
74vmclear(struct vmcs *vmcs)
75{
76	int error;
77	uint64_t addr;
78
79	addr = vtophys(vmcs);
80	__asm __volatile("vmclear %[addr];"
81			 VMX_SET_ERROR_CODE
82			 : [error] "=r" (error)
83			 : [addr] "m" (*(uint64_t *)&addr)
84			 : "memory");
85	return (error);
86}
87
88static __inline void
89vmxoff(void)
90{
91
92	__asm __volatile("vmxoff");
93}
94
95static __inline void
96vmptrst(uint64_t *addr)
97{
98
99	__asm __volatile("vmptrst %[addr]" :: [addr]"m" (*addr) : "memory");
100}
101
102static __inline int
103vmptrld(struct vmcs *vmcs)
104{
105	int error;
106	uint64_t addr;
107
108	addr = vtophys(vmcs);
109	__asm __volatile("vmptrld %[addr];"
110			 VMX_SET_ERROR_CODE
111			 : [error] "=r" (error)
112			 : [addr] "m" (*(uint64_t *)&addr)
113			 : "memory");
114	return (error);
115}
116
117static __inline int
118vmwrite(uint64_t reg, uint64_t val)
119{
120	int error;
121
122	__asm __volatile("vmwrite %[val], %[reg];"
123			 VMX_SET_ERROR_CODE
124			 : [error] "=r" (error)
125			 : [val] "r" (val), [reg] "r" (reg)
126			 : "memory");
127
128	return (error);
129}
130
131static __inline int
132vmread(uint64_t r, uint64_t *addr)
133{
134	int error;
135
136	__asm __volatile("vmread %[r], %[addr];"
137			 VMX_SET_ERROR_CODE
138			 : [error] "=r" (error)
139			 : [r] "r" (r), [addr] "m" (*addr)
140			 : "memory");
141
142	return (error);
143}
144
145static void __inline
146VMCLEAR(struct vmcs *vmcs)
147{
148	int err;
149
150	err = vmclear(vmcs);
151	if (err != 0)
152		panic("%s: vmclear(%p) error %d", __func__, vmcs, err);
153
154	critical_exit();
155}
156
157static void __inline
158VMPTRLD(struct vmcs *vmcs)
159{
160	int err;
161
162	critical_enter();
163
164	err = vmptrld(vmcs);
165	if (err != 0)
166		panic("%s: vmptrld(%p) error %d", __func__, vmcs, err);
167}
168
169#define	INVVPID_TYPE_ADDRESS		0UL
170#define	INVVPID_TYPE_SINGLE_CONTEXT	1UL
171#define	INVVPID_TYPE_ALL_CONTEXTS	2UL
172
173struct invvpid_desc {
174	uint16_t	vpid;
175	uint16_t	_res1;
176	uint32_t	_res2;
177	uint64_t	linear_addr;
178};
179CTASSERT(sizeof(struct invvpid_desc) == 16);
180
181static void __inline
182invvpid(uint64_t type, struct invvpid_desc desc)
183{
184	int error;
185
186	__asm __volatile("invvpid %[desc], %[type];"
187			 VMX_SET_ERROR_CODE
188			 : [error] "=r" (error)
189			 : [desc] "m" (desc), [type] "r" (type)
190			 : "memory");
191
192	if (error)
193		panic("invvpid error %d", error);
194}
195
196#define	INVEPT_TYPE_SINGLE_CONTEXT	1UL
197#define	INVEPT_TYPE_ALL_CONTEXTS	2UL
198struct invept_desc {
199	uint64_t	eptp;
200	uint64_t	_res;
201};
202CTASSERT(sizeof(struct invept_desc) == 16);
203
204static void __inline
205invept(uint64_t type, struct invept_desc desc)
206{
207	int error;
208
209	__asm __volatile("invept %[desc], %[type];"
210			 VMX_SET_ERROR_CODE
211			 : [error] "=r" (error)
212			 : [desc] "m" (desc), [type] "r" (type)
213			 : "memory");
214
215	if (error)
216		panic("invept error %d", error);
217}
218#endif
219