1/*
2 * Copyright (c) 2006-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <pexpert/pexpert.h>
30#include <i386/cpuid.h>
31#include <i386/cpu_data.h>
32#include <i386/mp.h>
33#include <i386/proc_reg.h>
34#include <i386/vmx.h>
35#include <i386/vmx/vmx_asm.h>
36#include <i386/vmx/vmx_shims.h>
37#include <i386/vmx/vmx_cpu.h>
38#include <mach/mach_host.h>             /* for host_info() */
39
40#define VMX_KPRINTF(x...) /* kprintf("vmx: " x) */
41
42int vmx_use_count = 0;
43boolean_t vmx_exclusive = FALSE;
44
45lck_grp_t *vmx_lck_grp = NULL;
46lck_mtx_t *vmx_lck_mtx = NULL;
47
48/* -----------------------------------------------------------------------------
49   vmx_is_available()
50	Is the VMX facility available on this CPU?
51   -------------------------------------------------------------------------- */
52static inline boolean_t
53vmx_is_available(void)
54{
55	return (0 != (cpuid_features() & CPUID_FEATURE_VMX));
56}
57
58/* -----------------------------------------------------------------------------
59   vmxon_is_enabled()
60	Is the VMXON instruction enabled on this CPU?
61   -------------------------------------------------------------------------- */
62static inline boolean_t
63vmxon_is_enabled(void)
64{
65	return (vmx_is_available() &&
66		(rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON));
67}
68
69#if MACH_ASSERT
70/* -----------------------------------------------------------------------------
71   vmx_is_cr0_valid()
72	Is CR0 valid for executing VMXON on this CPU?
73   -------------------------------------------------------------------------- */
74static inline boolean_t
75vmx_is_cr0_valid(vmx_specs_t *specs)
76{
77	uintptr_t cr0 = get_cr0();
78	return (0 == ((~cr0 & specs->cr0_fixed_0)|(cr0 & ~specs->cr0_fixed_1)));
79}
80
81/* -----------------------------------------------------------------------------
82   vmx_is_cr4_valid()
83	Is CR4 valid for executing VMXON on this CPU?
84   -------------------------------------------------------------------------- */
85static inline boolean_t
86vmx_is_cr4_valid(vmx_specs_t *specs)
87{
88	uintptr_t cr4 = get_cr4();
89	return (0 == ((~cr4 & specs->cr4_fixed_0)|(cr4 & ~specs->cr4_fixed_1)));
90}
91
92#endif
93
94static void
95vmx_enable(void)
96{
97	uint64_t msr_image;
98
99	if (!vmx_is_available())
100		return;
101
102	/*
103	 * We don't count on EFI initializing MSR_IA32_FEATURE_CONTROL
104	 * and turning VMXON on and locking the bit, so we do that now.
105	 */
106	msr_image = rdmsr64(MSR_IA32_FEATURE_CONTROL);
107	if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK)))
108		wrmsr64(MSR_IA32_FEATURE_CONTROL,
109			(msr_image |
110			 MSR_IA32_FEATCTL_VMXON |
111			 MSR_IA32_FEATCTL_LOCK));
112
113	set_cr4(get_cr4() | CR4_VMXE);
114}
115
116void
117vmx_init()
118{
119	vmx_lck_grp = lck_grp_alloc_init("vmx", LCK_GRP_ATTR_NULL);
120	assert(vmx_lck_grp);
121
122	vmx_lck_mtx = lck_mtx_alloc_init(vmx_lck_grp, LCK_ATTR_NULL);
123	assert(vmx_lck_mtx);
124}
125
126/* -----------------------------------------------------------------------------
127   vmx_get_specs()
128	Obtain VMX facility specifications for this CPU and
129	enter them into the vmx_specs_t structure. If VMX is not available or
130	disabled on this CPU, set vmx_present to false and return leaving
131	the remainder of the vmx_specs_t uninitialized.
132   -------------------------------------------------------------------------- */
133void
134vmx_cpu_init()
135{
136	vmx_specs_t *specs = &current_cpu_datap()->cpu_vmx.specs;
137
138	vmx_enable();
139
140	/* if we have read the data on boot, we won't read it again on wakeup */
141	if (specs->initialized)
142		return;
143	else
144		specs->initialized = TRUE;
145
146	/* See if VMX is present, return if it is not */
147	specs->vmx_present = vmx_is_available() && vmxon_is_enabled();
148	if (!specs->vmx_present)
149		return;
150
151#define rdmsr_mask(msr, mask) (uint32_t)(rdmsr64(msr) & (mask))
152	specs->vmcs_id = rdmsr_mask(MSR_IA32_VMX_BASIC, VMX_VCR_VMCS_REV_ID);
153
154	/* Obtain VMX-fixed bits in CR0 */
155	specs->cr0_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED0, 0xFFFFFFFF);
156	specs->cr0_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED1, 0xFFFFFFFF);
157
158	/* Obtain VMX-fixed bits in CR4 */
159	specs->cr4_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED0, 0xFFFFFFFF);
160	specs->cr4_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED1, 0xFFFFFFFF);
161}
162
163/* -----------------------------------------------------------------------------
164   vmx_on()
165	Enter VMX root operation on this CPU.
166   -------------------------------------------------------------------------- */
167static void
168vmx_on(void *arg __unused)
169{
170	vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
171	addr64_t vmxon_region_paddr;
172	int result;
173
174	assert(cpu->specs.vmx_present);
175
176	if (NULL == cpu->vmxon_region)
177		panic("vmx_on: VMXON region not allocated");
178	vmxon_region_paddr = vmx_paddr(cpu->vmxon_region);
179
180	/*
181	 * Enable VMX operation.
182	 */
183	if (FALSE == cpu->specs.vmx_on) {
184		assert(vmx_is_cr0_valid(&cpu->specs));
185		assert(vmx_is_cr4_valid(&cpu->specs));
186
187		result = __vmxon(vmxon_region_paddr);
188
189		if (result != VMX_SUCCEED) {
190			panic("vmx_on: unexpected return %d from __vmxon()", result);
191		}
192
193		cpu->specs.vmx_on = TRUE;
194	}
195}
196
197/* -----------------------------------------------------------------------------
198   vmx_off()
199	Leave VMX root operation on this CPU.
200   -------------------------------------------------------------------------- */
201static void
202vmx_off(void *arg __unused)
203{
204	vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
205	int result;
206
207	if (TRUE == cpu->specs.vmx_on) {
208		/* Tell the CPU to release the VMXON region */
209		result = __vmxoff();
210
211		if (result != VMX_SUCCEED) {
212			panic("vmx_off: unexpected return %d from __vmxoff()", result);
213		}
214
215		cpu->specs.vmx_on = FALSE;
216	}
217}
218
219/* -----------------------------------------------------------------------------
220   vmx_allocate_vmxon_regions()
221	Allocate, clear and init VMXON regions for all CPUs.
222   -------------------------------------------------------------------------- */
223static void
224vmx_allocate_vmxon_regions(void)
225{
226	unsigned int i;
227
228	for (i=0; i<real_ncpus; i++) {
229		vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
230
231		/* The size is defined to be always <= 4K, so we just allocate a page */
232		cpu->vmxon_region = vmx_pcalloc();
233		if (NULL == cpu->vmxon_region)
234			panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region");
235		*(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id;
236	}
237}
238
239/* -----------------------------------------------------------------------------
240   vmx_free_vmxon_regions()
241	Free VMXON regions for all CPUs.
242   -------------------------------------------------------------------------- */
243static void
244vmx_free_vmxon_regions(void)
245{
246	unsigned int i;
247
248	for (i=0; i<real_ncpus; i++) {
249		vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
250
251		vmx_pfree(cpu->vmxon_region);
252		cpu->vmxon_region = NULL;
253	}
254}
255
256/* -----------------------------------------------------------------------------
257   vmx_globally_available()
258	Checks whether VT can be turned on for all CPUs.
259   -------------------------------------------------------------------------- */
260static boolean_t
261vmx_globally_available(void)
262{
263	unsigned int i;
264	unsigned int ncpus = ml_get_max_cpus();
265	boolean_t available = TRUE;
266
267	for (i=0; i<ncpus; i++) {
268		vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
269
270		if (!cpu->specs.vmx_present)
271			available = FALSE;
272	}
273	VMX_KPRINTF("VMX available: %d\n", available);
274	return available;
275}
276
277
278/* -----------------------------------------------------------------------------
279   vmx_turn_on()
280	Turn on VT operation on all CPUs.
281   -------------------------------------------------------------------------- */
282int
283host_vmxon(boolean_t exclusive)
284{
285	int error;
286
287	assert(0 == get_preemption_level());
288
289	if (!vmx_globally_available())
290		return VMX_UNSUPPORTED;
291
292	lck_mtx_lock(vmx_lck_mtx);
293
294	if (vmx_exclusive || (exclusive && vmx_use_count)) {
295		error = VMX_INUSE;
296	} else {
297		if (0 == vmx_use_count) {
298			vmx_allocate_vmxon_regions();
299			vmx_exclusive = exclusive;
300			vmx_use_count = 1;
301			mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_on, NULL);
302
303		} else {
304			vmx_use_count++;
305		}
306
307		VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
308		error = VMX_OK;
309	}
310
311	lck_mtx_unlock(vmx_lck_mtx);
312
313	return error;
314}
315
316/* -----------------------------------------------------------------------------
317   vmx_turn_off()
318	Turn off VT operation on all CPUs.
319   -------------------------------------------------------------------------- */
320void
321host_vmxoff()
322{
323	assert(0 == get_preemption_level());
324
325	lck_mtx_lock(vmx_lck_mtx);
326
327	if (1 == vmx_use_count) {
328		vmx_exclusive = FALSE;
329		vmx_use_count = 0;
330		mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_off, NULL);
331		vmx_free_vmxon_regions();
332	} else {
333		vmx_use_count--;
334	}
335
336	lck_mtx_unlock(vmx_lck_mtx);
337
338	VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
339}
340
341/* -----------------------------------------------------------------------------
342   vmx_suspend()
343	Turn off VT operation on this CPU if it was on.
344	Called when a CPU goes offline.
345   -------------------------------------------------------------------------- */
346void
347vmx_suspend()
348{
349	VMX_KPRINTF("vmx_suspend\n");
350
351	if (vmx_use_count)
352		vmx_off(NULL);
353}
354
355/* -----------------------------------------------------------------------------
356   vmx_suspend()
357	Restore the previous VT state. Called when CPU comes back online.
358   -------------------------------------------------------------------------- */
359void
360vmx_resume()
361{
362	VMX_KPRINTF("vmx_resume\n");
363
364	vmx_enable();
365
366	if (vmx_use_count)
367		vmx_on(NULL);
368}
369
370/* -----------------------------------------------------------------------------
371   vmx_hv_support()
372	Determine if the VMX feature set is sufficent for kernel HV support.
373   -------------------------------------------------------------------------- */
374boolean_t
375vmx_hv_support()
376{
377	if (!vmx_is_available())
378		return FALSE;
379
380#define CHK(msr, shift, mask) if (!VMX_CAP(msr, shift, mask)) return FALSE;
381
382	/* 'EPT' and 'Unrestricted Mode' are part of the secondary processor-based
383	 * VM-execution controls */
384	CHK(MSR_IA32_VMX_BASIC, 0, VMX_BASIC_TRUE_CTLS)
385	CHK(MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 32, VMX_TRUE_PROCBASED_SECONDARY_CTLS)
386
387	/* if we have these, check for 'EPT' and 'Unrestricted Mode' */
388	CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_EPT)
389	CHK(MSR_IA32_VMX_PROCBASED_CTLS2, 32, VMX_PROCBASED_CTLS2_UNRESTRICTED)
390
391	return TRUE;
392}
393