1/*
2 * Copyright (c) 2006-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <pexpert/pexpert.h>
30#include <i386/cpuid.h>
31#include <i386/cpu_data.h>
32#include <i386/mp.h>
33#include <i386/proc_reg.h>
34#include <i386/vmx.h>
35#include <i386/vmx/vmx_asm.h>
36#include <i386/vmx/vmx_shims.h>
37#include <i386/vmx/vmx_cpu.h>
38#include <mach/mach_host.h>             /* for host_info() */
39
40#define VMX_KPRINTF(x...) /* kprintf("vmx: " x) */
41
42int vmx_use_count = 0;
43boolean_t vmx_exclusive = FALSE;
44decl_simple_lock_data(static,vmx_use_count_lock)
45
46/* -----------------------------------------------------------------------------
47   vmx_is_available()
48	Is the VMX facility available on this CPU?
49   -------------------------------------------------------------------------- */
50static inline boolean_t
51vmx_is_available(void)
52{
53	return (0 != (cpuid_features() & CPUID_FEATURE_VMX));
54}
55
56/* -----------------------------------------------------------------------------
57   vmxon_is_enabled()
58	Is the VMXON instruction enabled on this CPU?
59   -------------------------------------------------------------------------- */
60static inline boolean_t
61vmxon_is_enabled(void)
62{
63	return (vmx_is_available() &&
64		(rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON));
65}
66
67/* -----------------------------------------------------------------------------
68   vmx_is_cr0_valid()
69	Is CR0 valid for executing VMXON on this CPU?
70   -------------------------------------------------------------------------- */
71static inline boolean_t
72vmx_is_cr0_valid(vmx_specs_t *specs)
73{
74	uintptr_t cr0 = get_cr0();
75	return (0 == ((~cr0 & specs->cr0_fixed_0)|(cr0 & ~specs->cr0_fixed_1)));
76}
77
78/* -----------------------------------------------------------------------------
79   vmx_is_cr4_valid()
80	Is CR4 valid for executing VMXON on this CPU?
81   -------------------------------------------------------------------------- */
82static inline boolean_t
83vmx_is_cr4_valid(vmx_specs_t *specs)
84{
85	uintptr_t cr4 = get_cr4();
86	return (0 == ((~cr4 & specs->cr4_fixed_0)|(cr4 & ~specs->cr4_fixed_1)));
87}
88
89static void
90vmx_init(void)
91{
92	uint64_t msr_image;
93
94	if (!vmx_is_available())
95		return;
96
97	/*
98	 * We don't count on EFI initializing MSR_IA32_FEATURE_CONTROL
99	 * and turning VMXON on and locking the bit, so we do that now.
100	 */
101	msr_image = rdmsr64(MSR_IA32_FEATURE_CONTROL);
102	if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK)))
103		wrmsr64(MSR_IA32_FEATURE_CONTROL,
104			(msr_image |
105			 MSR_IA32_FEATCTL_VMXON |
106			 MSR_IA32_FEATCTL_LOCK));
107}
108
109/* -----------------------------------------------------------------------------
110   vmx_get_specs()
111	Obtain VMX facility specifications for this CPU and
112	enter them into the vmx_specs_t structure. If VMX is not available or
113	disabled on this CPU, set vmx_present to false and return leaving
114	the remainder of the vmx_specs_t uninitialized.
115   -------------------------------------------------------------------------- */
116void
117vmx_get_specs()
118{
119	vmx_specs_t *specs = &current_cpu_datap()->cpu_vmx.specs;
120	uint64_t msr_image;
121
122	/* this is called once for every CPU, but the lock doesn't care :-) */
123	simple_lock_init(&vmx_use_count_lock, 0);
124
125	vmx_init();
126
127	/*
128	 * if we have read the data on boot, we won't read it
129	 *  again on wakeup, otherwise *bad* things will happen
130	 */
131	if (specs->initialized)
132		return;
133	else
134		specs->initialized = TRUE;
135
136	/* See if VMX is present, return if it is not */
137	specs->vmx_present = vmx_is_available() && vmxon_is_enabled();
138	if (!specs->vmx_present)
139		return;
140
141#define bitfield(x,f)	((x >> f##_BIT) & f##_MASK)
142	/* Obtain and decode VMX general capabilities */
143	msr_image = rdmsr64(MSR_IA32_VMX_BASIC);
144	specs->vmcs_id       = (uint32_t)(msr_image & VMX_VCR_VMCS_REV_ID);
145	specs->vmcs_mem_type = bitfield(msr_image, VMX_VCR_VMCS_MEM_TYPE) != 0;
146	specs->vmcs_size = bitfield(msr_image, VMX_VCR_VMCS_SIZE);
147
148	/* Obtain allowed settings for pin-based execution controls */
149	msr_image = rdmsr64(MSR_IA32_VMXPINBASED_CTLS);
150	specs->pin_exctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF);
151	specs->pin_exctls_1 = (uint32_t)(msr_image >> 32);
152
153	/* Obtain allowed settings for processor-based execution controls */
154	msr_image = rdmsr64(MSR_IA32_PROCBASED_CTLS);
155	specs->proc_exctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF);
156	specs->proc_exctls_1 = (uint32_t)(msr_image >> 32);
157
158	/* Obtain allowed settings for VM-exit controls */
159	msr_image = rdmsr64(MSR_IA32_VMX_EXIT_CTLS);
160	specs->exit_ctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF);
161	specs->exit_ctls_1 = (uint32_t)(msr_image >> 32);
162
163	/* Obtain allowed settings for VM-entry controls */
164	msr_image = rdmsr64(MSR_IA32_VMX_ENTRY_CTLS);
165	specs->enter_ctls_0 = (uint32_t)(msr_image & 0xFFFFFFFF);
166	specs->enter_ctls_0 = (uint32_t)(msr_image >> 32);
167
168	/* Obtain and decode miscellaneous capabilities */
169	msr_image = rdmsr64(MSR_IA32_VMX_MISC);
170	specs->act_halt     = bitfield(msr_image, VMX_VCR_ACT_HLT) != 0;
171	specs->act_shutdown = bitfield(msr_image, VMX_VCR_ACT_SHUTDOWN) != 0;
172	specs->act_SIPI     = bitfield(msr_image, VMX_VCR_ACT_SIPI) != 0;
173	specs->act_CSTATE   = bitfield(msr_image, VMX_VCR_ACT_CSTATE) != 0;
174	specs->cr3_targs    = bitfield(msr_image, VMX_VCR_CR3_TARGS);
175	specs->max_msrs     = (uint32_t)(512 * (1 + bitfield(msr_image, VMX_VCR_MAX_MSRS)));
176	specs->mseg_id      = (uint32_t)bitfield(msr_image, VMX_VCR_MSEG_ID);
177
178	/* Obtain VMX-fixed bits in CR0 */
179	specs->cr0_fixed_0 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR0_FIXED0) & 0xFFFFFFFF;
180	specs->cr0_fixed_1 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR0_FIXED1) & 0xFFFFFFFF;
181
182	/* Obtain VMX-fixed bits in CR4 */
183	specs->cr4_fixed_0 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR4_FIXED0) & 0xFFFFFFFF;
184	specs->cr4_fixed_1 = (uint32_t)rdmsr64(MSR_IA32_VMX_CR4_FIXED1) & 0xFFFFFFFF;
185}
186
187/* -----------------------------------------------------------------------------
188   vmx_on()
189	Enter VMX root operation on this CPU.
190   -------------------------------------------------------------------------- */
191static void
192vmx_on(void *arg __unused)
193{
194	vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
195	addr64_t vmxon_region_paddr;
196	int result;
197
198	vmx_init();
199
200	assert(cpu->specs.vmx_present);
201
202	if (NULL == cpu->vmxon_region)
203		panic("vmx_on: VMXON region not allocated");
204	vmxon_region_paddr = vmx_paddr(cpu->vmxon_region);
205
206	/*
207	 * Enable VMX operation.
208	 */
209	set_cr4(get_cr4() | CR4_VMXE);
210
211	assert(vmx_is_cr0_valid(&cpu->specs));
212	assert(vmx_is_cr4_valid(&cpu->specs));
213
214	result = __vmxon(vmxon_region_paddr);
215
216	if (result != VMX_SUCCEED) {
217		panic("vmx_on: unexpected return %d from __vmxon()", result);
218	}
219}
220
221/* -----------------------------------------------------------------------------
222   vmx_off()
223	Leave VMX root operation on this CPU.
224   -------------------------------------------------------------------------- */
225static void
226vmx_off(void *arg __unused)
227{
228	int result;
229
230	/* Tell the CPU to release the VMXON region */
231	result = __vmxoff();
232
233	if (result != VMX_SUCCEED) {
234		panic("vmx_off: unexpected return %d from __vmxoff()", result);
235	}
236
237	set_cr4(get_cr4() & ~CR4_VMXE);
238}
239
240/* -----------------------------------------------------------------------------
241   vmx_allocate_vmxon_regions()
242	Allocate, clear and init VMXON regions for all CPUs.
243   -------------------------------------------------------------------------- */
244static void
245vmx_allocate_vmxon_regions(void)
246{
247	unsigned int i;
248
249	for (i=0; i<real_ncpus; i++) {
250		vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
251
252		/* The size is defined to be always <= 4K, so we just allocate a page */
253		cpu->vmxon_region = vmx_pcalloc();
254		if (NULL == cpu->vmxon_region)
255			panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region");
256		*(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id;
257	}
258}
259
260/* -----------------------------------------------------------------------------
261   vmx_free_vmxon_regions()
262	Free VMXON regions for all CPUs.
263   -------------------------------------------------------------------------- */
264static void
265vmx_free_vmxon_regions(void)
266{
267	unsigned int i;
268
269	for (i=0; i<real_ncpus; i++) {
270		vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
271
272		vmx_pfree(cpu->vmxon_region);
273		cpu->vmxon_region = NULL;
274	}
275}
276
277/* -----------------------------------------------------------------------------
278   vmx_globally_available()
279	Checks whether VT can be turned on for all CPUs.
280   -------------------------------------------------------------------------- */
281static boolean_t
282vmx_globally_available(void)
283{
284	unsigned int i;
285
286	boolean_t available = TRUE;
287
288	for (i=0; i<real_ncpus; i++) {
289		vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
290
291		if (!cpu->specs.vmx_present)
292			available = FALSE;
293	}
294	VMX_KPRINTF("VMX available: %d\n", available);
295	return available;
296}
297
298
299/* -----------------------------------------------------------------------------
300   vmx_turn_on()
301	Turn on VT operation on all CPUs.
302   -------------------------------------------------------------------------- */
303int
304host_vmxon(boolean_t exclusive)
305{
306	int error;
307	boolean_t do_it = FALSE; /* do the cpu sync outside of the area holding the lock */
308
309	if (!vmx_globally_available())
310		return VMX_UNSUPPORTED;
311
312	simple_lock(&vmx_use_count_lock);
313
314	if (vmx_exclusive) {
315		error = VMX_INUSE;
316	} else {
317		vmx_use_count++;
318		if (vmx_use_count == 1) /* was turned off before */
319			do_it = TRUE;
320		vmx_exclusive = exclusive;
321
322		VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
323		error = VMX_OK;
324	}
325
326	simple_unlock(&vmx_use_count_lock);
327
328	if (do_it) {
329		vmx_allocate_vmxon_regions();
330		mp_rendezvous(NULL, vmx_on, NULL, NULL);
331	}
332	return error;
333}
334
335/* -----------------------------------------------------------------------------
336   vmx_turn_off()
337	Turn off VT operation on all CPUs.
338   -------------------------------------------------------------------------- */
339void
340host_vmxoff()
341{
342	boolean_t do_it = FALSE; /* do the cpu sync outside of the area holding the lock */
343
344	simple_lock(&vmx_use_count_lock);
345
346	if (vmx_use_count) {
347		vmx_use_count--;
348		vmx_exclusive = FALSE;
349		if (!vmx_use_count)
350			do_it = TRUE;
351	}
352
353	simple_unlock(&vmx_use_count_lock);
354
355	if (do_it) {
356		mp_rendezvous(NULL, vmx_off, NULL, NULL);
357		vmx_free_vmxon_regions();
358	}
359
360	VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
361}
362
363/* -----------------------------------------------------------------------------
364   vmx_suspend()
365	Turn off VT operation on this CPU if it was on.
366	Called when a CPU goes offline.
367   -------------------------------------------------------------------------- */
368void
369vmx_suspend()
370{
371	VMX_KPRINTF("vmx_suspend\n");
372	if (vmx_use_count)
373		vmx_off(NULL);
374}
375
376/* -----------------------------------------------------------------------------
377   vmx_suspend()
378	Restore the previous VT state. Called when CPU comes back online.
379   -------------------------------------------------------------------------- */
380void
381vmx_resume()
382{
383	VMX_KPRINTF("vmx_resume\n");
384	vmx_init(); /* init VMX on CPU #0 */
385	if (vmx_use_count)
386		vmx_on(NULL);
387}
388