1/*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <pexpert/pexpert.h>
30#include <i386/cpuid.h>
31#include <i386/cpu_data.h>
32#include <i386/proc_reg.h>
33#include <i386/vmx.h>
34#include <i386/vmx/vmx_asm.h>
35#include <i386/vmx/vmx_shims.h>
36#include <i386/vmx/vmx_cpu.h>
37#include <i386/mtrr.h>
38#include <mach/mach_host.h>             /* for host_info() */
39#include <i386/mp.h>
40
41#define VMX_KPRINTF(x...) /* kprintf("vmx: " x) */
42
43int vmx_use_count = 0;
44int vmx_exclusive = 0;
45decl_simple_lock_data(static,vmx_use_count_lock)
46
47/* -----------------------------------------------------------------------------
48   vmx_is_available()
49	Is the VMX facility available on this CPU?
50   -------------------------------------------------------------------------- */
51static inline boolean_t
52vmx_is_available(void)
53{
54	return (0 != (cpuid_features() & CPUID_FEATURE_VMX));
55}
56
57/* -----------------------------------------------------------------------------
58   vmxon_is_enabled()
59	Is the VMXON instruction enabled on this CPU?
60   -------------------------------------------------------------------------- */
61static inline boolean_t
62vmxon_is_enabled(void)
63{
64	return (vmx_is_available() &&
65		(rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON));
66}
67
68/* -----------------------------------------------------------------------------
69   vmx_is_cr0_valid()
70	Is CR0 valid for executing VMXON on this CPU?
71   -------------------------------------------------------------------------- */
72static inline boolean_t
73vmx_is_cr0_valid(vmx_specs_t *specs)
74{
75	uint32_t cr0 = get_cr0();
76	return (0 == ((~cr0 & specs->cr0_fixed_0)|(cr0 & ~specs->cr0_fixed_1)));
77}
78
79/* -----------------------------------------------------------------------------
80   vmx_is_cr4_valid()
81	Is CR4 valid for executing VMXON on this CPU?
82   -------------------------------------------------------------------------- */
83static inline boolean_t
84vmx_is_cr4_valid(vmx_specs_t *specs)
85{
86	uint32_t cr4 = get_cr4();
87	return (0 == ((~cr4 & specs->cr4_fixed_0)|(cr4 & ~specs->cr4_fixed_1)));
88}
89
90static void
91vmx_init(void)
92{
93	uint64_t msr_image;
94
95	if (!vmx_is_available())
96		return;
97
98	/*
99	 * We don't count on EFI initializing MSR_IA32_FEATURE_CONTROL
100	 * and turning VMXON on and locking the bit, so we do that now.
101	 */
102	msr_image = rdmsr64(MSR_IA32_FEATURE_CONTROL);
103	if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK)))
104		wrmsr64(MSR_IA32_FEATURE_CONTROL,
105			(msr_image |
106			 MSR_IA32_FEATCTL_VMXON |
107			 MSR_IA32_FEATCTL_LOCK));
108}
109
110/* -----------------------------------------------------------------------------
111   vmx_get_specs()
112	Obtain VMX facility specifications for this CPU and
113	enter them into the vmx_specs_t structure. If VMX is not available or
114	disabled on this CPU, set vmx_present to false and return leaving
115	the remainder of the vmx_specs_t uninitialized.
116   -------------------------------------------------------------------------- */
117void
118vmx_get_specs()
119{
120	vmx_specs_t *specs = &current_cpu_datap()->cpu_vmx.specs;
121	uint64_t msr_image;
122
123	/* this is called once for every CPU, but the lock doesn't care :-) */
124	simple_lock_init(&vmx_use_count_lock, 0);
125
126	vmx_init();
127
128	/*
129	 * if we have read the data on boot, we won't read it
130	 *  again on wakeup, otherwise *bad* things will happen
131	 */
132	if (specs->initialized)
133		return;
134	else
135		specs->initialized = TRUE;
136
137	/* See if VMX is present, return if it is not */
138	specs->vmx_present = vmx_is_available() && vmxon_is_enabled();
139	if (!specs->vmx_present)
140		return;
141
142#define bitfield(x,f)	((x >> f##_BIT) & f##_MASK)
143	/* Obtain and decode VMX general capabilities */
144	msr_image = rdmsr64(MSR_IA32_VMX_BASIC);
145	specs->vmcs_id       = msr_image & VMX_VCR_VMCS_REV_ID;
146	specs->vmcs_mem_type = bitfield(msr_image, VMX_VCR_VMCS_MEM_TYPE) != 0;
147	specs->vmcs_size = bitfield(msr_image, VMX_VCR_VMCS_SIZE);
148
149	/* Obtain allowed settings for pin-based execution controls */
150	msr_image = rdmsr64(MSR_IA32_VMXPINBASED_CTLS);
151	specs->pin_exctls_0 = msr_image & 0xFFFFFFFF;
152	specs->pin_exctls_1 = msr_image >> 32;
153
154	/* Obtain allowed settings for processor-based execution controls */
155	msr_image = rdmsr64(MSR_IA32_PROCBASED_CTLS);
156	specs->proc_exctls_0 = msr_image & 0xFFFFFFFF;
157	specs->proc_exctls_1 = msr_image >> 32;
158
159	/* Obtain allowed settings for VM-exit controls */
160	msr_image = rdmsr64(MSR_IA32_VMX_EXIT_CTLS);
161	specs->exit_ctls_0 = msr_image & 0xFFFFFFFF;
162	specs->exit_ctls_1 = msr_image >> 32;
163
164	/* Obtain allowed settings for VM-entry controls */
165	msr_image = rdmsr64(MSR_IA32_VMX_ENTRY_CTLS);
166	specs->enter_ctls_0 = msr_image & 0xFFFFFFFF;
167	specs->enter_ctls_0 = msr_image >> 32;
168
169	/* Obtain and decode miscellaneous capabilities */
170	msr_image = rdmsr64(MSR_IA32_VMX_MISC);
171	specs->act_halt     = bitfield(msr_image, VMX_VCR_ACT_HLT) != 0;
172	specs->act_shutdown = bitfield(msr_image, VMX_VCR_ACT_SHUTDOWN) != 0;
173	specs->act_SIPI     = bitfield(msr_image, VMX_VCR_ACT_SIPI) != 0;
174	specs->act_CSTATE   = bitfield(msr_image, VMX_VCR_ACT_CSTATE) != 0;
175	specs->cr3_targs    = bitfield(msr_image, VMX_VCR_CR3_TARGS);
176	specs->max_msrs     = 512 * (1 + bitfield(msr_image, VMX_VCR_MAX_MSRS));
177	specs->mseg_id      = bitfield(msr_image, VMX_VCR_MSEG_ID);
178
179	/* Obtain VMX-fixed bits in CR0 */
180	specs->cr0_fixed_0 = rdmsr64(MSR_IA32_VMX_CR0_FIXED0) & 0xFFFFFFFF;
181	specs->cr0_fixed_1 = rdmsr64(MSR_IA32_VMX_CR0_FIXED1) & 0xFFFFFFFF;
182
183	/* Obtain VMX-fixed bits in CR4 */
184	specs->cr4_fixed_0 = rdmsr64(MSR_IA32_VMX_CR4_FIXED0) & 0xFFFFFFFF;
185	specs->cr4_fixed_1 = rdmsr64(MSR_IA32_VMX_CR4_FIXED1) & 0xFFFFFFFF;
186}
187
188/* -----------------------------------------------------------------------------
189   vmx_on()
190	Enter VMX root operation on this CPU.
191   -------------------------------------------------------------------------- */
192static void
193vmx_on(void)
194{
195	vmx_cpu_t *cpu = &current_cpu_datap()->cpu_vmx;
196	addr64_t vmxon_region_paddr;
197	int result;
198
199	vmx_init();
200
201	assert(cpu->specs.vmx_present);
202
203	if (NULL == cpu->vmxon_region)
204		panic("vmx_on: VMXON region not allocated");
205	vmxon_region_paddr = vmx_paddr(cpu->vmxon_region);
206
207	/*
208	 * Enable VMX operation.
209	 */
210	set_cr4(get_cr4() | CR4_VMXE);
211
212	assert(vmx_is_cr0_valid(&cpu->specs));
213	assert(vmx_is_cr4_valid(&cpu->specs));
214
215	if ((result = __vmxon(&vmxon_region_paddr)) != VMX_SUCCEED) {
216		panic("vmx_on: unexpected return %d from __vmxon()", result);
217	}
218}
219
220/* -----------------------------------------------------------------------------
221   vmx_off()
222	Leave VMX root operation on this CPU.
223   -------------------------------------------------------------------------- */
224static void
225vmx_off(void)
226{
227	int result;
228
229	/* Tell the CPU to release the VMXON region */
230	if ((result = __vmxoff()) != VMX_SUCCEED) {
231		panic("vmx_off: unexpected return %d from __vmxoff()", result);
232	}
233}
234
235/* -----------------------------------------------------------------------------
236   vmx_allocate_vmxon_regions()
237	Allocate, clear and init VMXON regions for all CPUs.
238   -------------------------------------------------------------------------- */
239static void
240vmx_allocate_vmxon_regions(void)
241{
242	unsigned int i;
243
244	for (i=0; i<real_ncpus; i++) {
245		vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
246
247		/* The size is defined to be always <= 4K, so we just allocate a page */
248		cpu->vmxon_region = vmx_pcalloc();
249		if (NULL == cpu->vmxon_region)
250			panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region");
251		*(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id;
252	}
253}
254
255/* -----------------------------------------------------------------------------
256   vmx_free_vmxon_regions()
257	Free VMXON regions for all CPUs.
258   -------------------------------------------------------------------------- */
259static void
260vmx_free_vmxon_regions(void)
261{
262	unsigned int i;
263
264	for (i=0; i<real_ncpus; i++) {
265		vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
266
267		vmx_pfree(cpu->vmxon_region);
268		cpu->vmxon_region = NULL;
269	}
270}
271
272/* -----------------------------------------------------------------------------
273   vmx_globally_available()
274	Checks whether VT can be turned on for all CPUs.
275   -------------------------------------------------------------------------- */
276static boolean_t
277vmx_globally_available(void)
278{
279	unsigned int i;
280
281	boolean_t available = TRUE;
282
283	for (i=0; i<real_ncpus; i++) {
284		vmx_cpu_t *cpu = &cpu_datap(i)->cpu_vmx;
285
286		if (!cpu->specs.vmx_present)
287			available = FALSE;
288	}
289	VMX_KPRINTF("VMX available: %d\n", available);
290	return available;
291}
292
293
294/* -----------------------------------------------------------------------------
295   vmx_turn_on()
296	Turn on VT operation on all CPUs.
297   -------------------------------------------------------------------------- */
298int
299host_vmxon(boolean_t exclusive)
300{
301	int error;
302
303	if (!vmx_globally_available())
304		return VMX_UNSUPPORTED;
305
306	simple_lock(&vmx_use_count_lock);
307
308	if (vmx_exclusive) {
309		error = VMX_INUSE;
310		goto out;
311	}
312	vmx_use_count++;
313	if (vmx_use_count == 1) { /* was turned off before */
314		vmx_allocate_vmxon_regions();
315		mp_rendezvous(NULL, (void (*)(void *))vmx_on, NULL, NULL);
316	}
317	vmx_exclusive = exclusive;
318
319	VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
320	error = VMX_OK;
321out:
322	simple_unlock(&vmx_use_count_lock);
323
324	return error;
325}
326
327/* -----------------------------------------------------------------------------
328   vmx_turn_off()
329	Turn off VT operation on all CPUs.
330   -------------------------------------------------------------------------- */
331void
332host_vmxoff()
333{
334	simple_lock(&vmx_use_count_lock);
335
336	if (vmx_use_count) {
337		vmx_use_count--;
338		vmx_exclusive = 0;
339		if (!vmx_use_count) {
340			mp_rendezvous(NULL, (void (*)(void *))vmx_off, NULL, NULL);
341			vmx_free_vmxon_regions();
342		}
343	}
344
345	simple_unlock(&vmx_use_count_lock);
346
347	VMX_KPRINTF("VMX use count: %d\n", vmx_use_count);
348}
349
350/* -----------------------------------------------------------------------------
351   vmx_suspend()
352	Turn off VT operation on this CPU if it was on.
353	Called when a CPU goes offline.
354   -------------------------------------------------------------------------- */
355void
356vmx_suspend()
357{
358	VMX_KPRINTF("vmx_suspend\n");
359	if (vmx_use_count)
360		vmx_off();
361}
362
363/* -----------------------------------------------------------------------------
364   vmx_suspend()
365	Restore the previous VT state. Called when CPU comes back online.
366   -------------------------------------------------------------------------- */
367void
368vmx_resume()
369{
370	VMX_KPRINTF("vmx_resume\n");
371	vmx_init(); /* init VMX on CPU #0 */
372	if (vmx_use_count)
373		vmx_on();
374}
375