svm.c revision 273375
1/*-
2 * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/amd64/vmm/amd/svm.c 273375 2014-10-21 07:10:43Z neel $");
29
30#include <sys/param.h>
31#include <sys/systm.h>
32#include <sys/smp.h>
33#include <sys/kernel.h>
34#include <sys/malloc.h>
35#include <sys/pcpu.h>
36#include <sys/proc.h>
37#include <sys/sysctl.h>
38
39#include <vm/vm.h>
40#include <vm/pmap.h>
41
42#include <machine/cpufunc.h>
43#include <machine/psl.h>
44#include <machine/pmap.h>
45#include <machine/md_var.h>
46#include <machine/specialreg.h>
47#include <machine/smp.h>
48#include <machine/vmm.h>
49#include <machine/vmm_instruction_emul.h>
50
51#include "vmm_lapic.h"
52#include "vmm_stat.h"
53#include "vmm_ktr.h"
54#include "vmm_ioport.h"
55#include "vatpic.h"
56#include "vlapic.h"
57#include "vlapic_priv.h"
58
59#include "x86.h"
60#include "vmcb.h"
61#include "svm.h"
62#include "svm_softc.h"
63#include "svm_msr.h"
64#include "npt.h"
65
66SYSCTL_DECL(_hw_vmm);
67SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL);
68
69/*
70 * SVM CPUID function 0x8000_000A, edx bit decoding.
71 */
72#define AMD_CPUID_SVM_NP		BIT(0)  /* Nested paging or RVI */
73#define AMD_CPUID_SVM_LBR		BIT(1)  /* Last branch virtualization */
74#define AMD_CPUID_SVM_SVML		BIT(2)  /* SVM lock */
75#define AMD_CPUID_SVM_NRIP_SAVE		BIT(3)  /* Next RIP is saved */
76#define AMD_CPUID_SVM_TSC_RATE		BIT(4)  /* TSC rate control. */
77#define AMD_CPUID_SVM_VMCB_CLEAN	BIT(5)  /* VMCB state caching */
78#define AMD_CPUID_SVM_FLUSH_BY_ASID	BIT(6)  /* Flush by ASID */
79#define AMD_CPUID_SVM_DECODE_ASSIST	BIT(7)  /* Decode assist */
80#define AMD_CPUID_SVM_PAUSE_INC		BIT(10) /* Pause intercept filter. */
81#define AMD_CPUID_SVM_PAUSE_FTH		BIT(12) /* Pause filter threshold */
82
83#define	VMCB_CACHE_DEFAULT	(VMCB_CACHE_ASID 	|	\
84				VMCB_CACHE_IOPM		|	\
85				VMCB_CACHE_I		|	\
86				VMCB_CACHE_TPR		|	\
87				VMCB_CACHE_CR2		|	\
88				VMCB_CACHE_CR		|	\
89				VMCB_CACHE_DT		|	\
90				VMCB_CACHE_SEG		|	\
91				VMCB_CACHE_NP)
92
93static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT;
94SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean,
95    0, NULL);
96
97static MALLOC_DEFINE(M_SVM, "svm", "svm");
98static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic");
99
100/* Per-CPU context area. */
101extern struct pcpu __pcpu[];
102
103static uint32_t svm_feature;	/* AMD SVM features. */
104SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0,
105    "SVM features advertised by CPUID.8000000AH:EDX");
106
107static int disable_npf_assist;
108SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN,
109    &disable_npf_assist, 0, NULL);
110
111/* Maximum ASIDs supported by the processor */
112static uint32_t nasid;
113SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0,
114    "Number of ASIDs supported by this processor");
115
116/* Current ASID generation for each host cpu */
117static struct asid asid[MAXCPU];
118
119/*
120 * SVM host state saved area of size 4KB for each core.
121 */
122static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE);
123
124static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery");
125static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry");
126static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window");
127
128static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
129
130static __inline int
131flush_by_asid(void)
132{
133
134	return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID);
135}
136
137static __inline int
138decode_assist(void)
139{
140
141	return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST);
142}
143
144static void
145svm_disable(void *arg __unused)
146{
147	uint64_t efer;
148
149	efer = rdmsr(MSR_EFER);
150	efer &= ~EFER_SVM;
151	wrmsr(MSR_EFER, efer);
152}
153
154/*
155 * Disable SVM on all CPUs.
156 */
157static int
158svm_cleanup(void)
159{
160
161	smp_rendezvous(NULL, svm_disable, NULL, NULL);
162	return (0);
163}
164
165/*
166 * Verify that all the features required by bhyve are available.
167 */
168static int
169check_svm_features(void)
170{
171	u_int regs[4];
172
173	/* CPUID Fn8000_000A is for SVM */
174	do_cpuid(0x8000000A, regs);
175	svm_feature = regs[3];
176
177	printf("SVM: Revision %d\n", regs[0] & 0xFF);
178	printf("SVM: NumASID %u\n", regs[1]);
179
180	nasid = regs[1];
181	KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid));
182
183	printf("SVM: Features 0x%b\n", svm_feature,
184		"\020"
185		"\001NP"		/* Nested paging */
186		"\002LbrVirt"		/* LBR virtualization */
187		"\003SVML"		/* SVM lock */
188		"\004NRIPS"		/* NRIP save */
189		"\005TscRateMsr"	/* MSR based TSC rate control */
190		"\006VmcbClean"		/* VMCB clean bits */
191		"\007FlushByAsid"	/* Flush by ASID */
192		"\010DecodeAssist"	/* Decode assist */
193		"\011<b8>"
194		"\012<b9>"
195		"\013PauseFilter"
196		"\014<b11>"
197		"\015PauseFilterThreshold"
198		"\016AVIC"
199		);
200
201	/* bhyve requires the Nested Paging feature */
202	if (!(svm_feature & AMD_CPUID_SVM_NP)) {
203		printf("SVM: Nested Paging feature not available.\n");
204		return (ENXIO);
205	}
206
207	/* bhyve requires the NRIP Save feature */
208	if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) {
209		printf("SVM: NRIP Save feature not available.\n");
210		return (ENXIO);
211	}
212
213	return (0);
214}
215
216static void
217svm_enable(void *arg __unused)
218{
219	uint64_t efer;
220
221	efer = rdmsr(MSR_EFER);
222	efer |= EFER_SVM;
223	wrmsr(MSR_EFER, efer);
224
225	wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu]));
226}
227
228/*
229 * Return 1 if SVM is enabled on this processor and 0 otherwise.
230 */
231static int
232svm_available(void)
233{
234	uint64_t msr;
235
236	/* Section 15.4 Enabling SVM from APM2. */
237	if ((amd_feature2 & AMDID2_SVM) == 0) {
238		printf("SVM: not available.\n");
239		return (0);
240	}
241
242	msr = rdmsr(MSR_VM_CR);
243	if ((msr & VM_CR_SVMDIS) != 0) {
244		printf("SVM: disabled by BIOS.\n");
245		return (0);
246	}
247
248	return (1);
249}
250
251static int
252svm_init(int ipinum)
253{
254	int error, cpu;
255
256	if (!svm_available())
257		return (ENXIO);
258
259	error = check_svm_features();
260	if (error)
261		return (error);
262
263	vmcb_clean &= VMCB_CACHE_DEFAULT;
264
265	for (cpu = 0; cpu < MAXCPU; cpu++) {
266		/*
267		 * Initialize the host ASIDs to their "highest" valid values.
268		 *
269		 * The next ASID allocation will rollover both 'gen' and 'num'
270		 * and start off the sequence at {1,1}.
271		 */
272		asid[cpu].gen = ~0UL;
273		asid[cpu].num = nasid - 1;
274	}
275
276	svm_msr_init();
277	svm_npt_init(ipinum);
278
279	/* Enable SVM on all CPUs */
280	smp_rendezvous(NULL, svm_enable, NULL, NULL);
281
282	return (0);
283}
284
285static void
286svm_restore(void)
287{
288
289	svm_enable(NULL);
290}
291
292/* Pentium compatible MSRs */
293#define MSR_PENTIUM_START 	0
294#define MSR_PENTIUM_END 	0x1FFF
295/* AMD 6th generation and Intel compatible MSRs */
296#define MSR_AMD6TH_START 	0xC0000000UL
297#define MSR_AMD6TH_END 		0xC0001FFFUL
298/* AMD 7th and 8th generation compatible MSRs */
299#define MSR_AMD7TH_START 	0xC0010000UL
300#define MSR_AMD7TH_END 		0xC0011FFFUL
301
302/*
303 * Get the index and bit position for a MSR in permission bitmap.
304 * Two bits are used for each MSR: lower bit for read and higher bit for write.
305 */
306static int
307svm_msr_index(uint64_t msr, int *index, int *bit)
308{
309	uint32_t base, off;
310
311	*index = -1;
312	*bit = (msr % 4) * 2;
313	base = 0;
314
315	if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) {
316		*index = msr / 4;
317		return (0);
318	}
319
320	base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1);
321	if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) {
322		off = (msr - MSR_AMD6TH_START);
323		*index = (off + base) / 4;
324		return (0);
325	}
326
327	base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1);
328	if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) {
329		off = (msr - MSR_AMD7TH_START);
330		*index = (off + base) / 4;
331		return (0);
332	}
333
334	return (EINVAL);
335}
336
337/*
338 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
339 */
340static void
341svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write)
342{
343	int index, bit, error;
344
345	error = svm_msr_index(msr, &index, &bit);
346	KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr));
347	KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE,
348	    ("%s: invalid index %d for msr %#lx", __func__, index, msr));
349	KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d "
350	    "msr %#lx", __func__, bit, msr));
351
352	if (read)
353		perm_bitmap[index] &= ~(1UL << bit);
354
355	if (write)
356		perm_bitmap[index] &= ~(2UL << bit);
357}
358
359static void
360svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr)
361{
362
363	svm_msr_perm(perm_bitmap, msr, true, true);
364}
365
366static void
367svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr)
368{
369
370	svm_msr_perm(perm_bitmap, msr, true, false);
371}
372
373static __inline int
374svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
375{
376	struct vmcb_ctrl *ctrl;
377
378	KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
379
380	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
381	return (ctrl->intercept[idx] & bitmask ? 1 : 0);
382}
383
384static __inline void
385svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
386    int enabled)
387{
388	struct vmcb_ctrl *ctrl;
389	uint32_t oldval;
390
391	KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx));
392
393	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
394	oldval = ctrl->intercept[idx];
395
396	if (enabled)
397		ctrl->intercept[idx] |= bitmask;
398	else
399		ctrl->intercept[idx] &= ~bitmask;
400
401	if (ctrl->intercept[idx] != oldval) {
402		svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
403		VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified "
404		    "from %#x to %#x", idx, oldval, ctrl->intercept[idx]);
405	}
406}
407
408static __inline void
409svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
410{
411
412	svm_set_intercept(sc, vcpu, off, bitmask, 0);
413}
414
415static __inline void
416svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
417{
418
419	svm_set_intercept(sc, vcpu, off, bitmask, 1);
420}
421
422static void
423vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
424    uint64_t msrpm_base_pa, uint64_t np_pml4)
425{
426	struct vmcb_ctrl *ctrl;
427	struct vmcb_state *state;
428	uint32_t mask;
429	int n;
430
431	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
432	state = svm_get_vmcb_state(sc, vcpu);
433
434	ctrl->iopm_base_pa = iopm_base_pa;
435	ctrl->msrpm_base_pa = msrpm_base_pa;
436
437	/* Enable nested paging */
438	ctrl->np_enable = 1;
439	ctrl->n_cr3 = np_pml4;
440
441	/*
442	 * Intercept accesses to the control registers that are not shadowed
443	 * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8.
444	 */
445	for (n = 0; n < 16; n++) {
446		mask = (BIT(n) << 16) | BIT(n);
447		if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8)
448			svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
449		else
450			svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
451	}
452
453	/* Intercept Machine Check exceptions. */
454	svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
455
456	/* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */
457	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
458	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
459	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
460	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
461	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
462	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
463	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
464	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
465	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
466	    VMCB_INTCPT_FERR_FREEZE);
467
468	/*
469	 * From section "Canonicalization and Consistency Checks" in APMv2
470	 * the VMRUN intercept bit must be set to pass the consistency check.
471	 */
472	svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
473
474	/*
475	 * The ASID will be set to a non-zero value just before VMRUN.
476	 */
477	ctrl->asid = 0;
478
479	/*
480	 * Section 15.21.1, Interrupt Masking in EFLAGS
481	 * Section 15.21.2, Virtualizing APIC.TPR
482	 *
483	 * This must be set for %rflag and %cr8 isolation of guest and host.
484	 */
485	ctrl->v_intr_masking = 1;
486
487	/* Enable Last Branch Record aka LBR for debugging */
488	ctrl->lbr_virt_en = 1;
489	state->dbgctl = BIT(0);
490
491	/* EFER_SVM must always be set when the guest is executing */
492	state->efer = EFER_SVM;
493
494	/* Set up the PAT to power-on state */
495	state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK)	|
496	    PAT_VALUE(1, PAT_WRITE_THROUGH)	|
497	    PAT_VALUE(2, PAT_UNCACHED)		|
498	    PAT_VALUE(3, PAT_UNCACHEABLE)	|
499	    PAT_VALUE(4, PAT_WRITE_BACK)	|
500	    PAT_VALUE(5, PAT_WRITE_THROUGH)	|
501	    PAT_VALUE(6, PAT_UNCACHED)		|
502	    PAT_VALUE(7, PAT_UNCACHEABLE);
503}
504
505/*
506 * Initialize a virtual machine.
507 */
508static void *
509svm_vminit(struct vm *vm, pmap_t pmap)
510{
511	struct svm_softc *svm_sc;
512	struct svm_vcpu *vcpu;
513	vm_paddr_t msrpm_pa, iopm_pa, pml4_pa;
514	int i;
515
516	svm_sc = malloc(sizeof (struct svm_softc), M_SVM, M_WAITOK | M_ZERO);
517	svm_sc->vm = vm;
518	svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4);
519
520	/*
521	 * Intercept read and write accesses to all MSRs.
522	 */
523	memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap));
524
525	/*
526	 * Access to the following MSRs is redirected to the VMCB when the
527	 * guest is executing. Therefore it is safe to allow the guest to
528	 * read/write these MSRs directly without hypervisor involvement.
529	 */
530	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE);
531	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE);
532	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE);
533
534	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR);
535	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR);
536	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR);
537	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK);
538	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR);
539	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR);
540	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR);
541	svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT);
542
543	svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC);
544
545	/*
546	 * Intercept writes to make sure that the EFER_SVM bit is not cleared.
547	 */
548	svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER);
549
550	/* Intercept access to all I/O ports. */
551	memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap));
552
553	iopm_pa = vtophys(svm_sc->iopm_bitmap);
554	msrpm_pa = vtophys(svm_sc->msr_bitmap);
555	pml4_pa = svm_sc->nptp;
556	for (i = 0; i < VM_MAXCPU; i++) {
557		vcpu = svm_get_vcpu(svm_sc, i);
558		vcpu->lastcpu = NOCPU;
559		vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
560		vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa);
561		svm_msr_guest_init(svm_sc, i);
562	}
563	return (svm_sc);
564}
565
566static int
567svm_cpl(struct vmcb_state *state)
568{
569
570	/*
571	 * From APMv2:
572	 *   "Retrieve the CPL from the CPL field in the VMCB, not
573	 *    from any segment DPL"
574	 */
575	return (state->cpl);
576}
577
578static enum vm_cpu_mode
579svm_vcpu_mode(struct vmcb *vmcb)
580{
581	struct vmcb_segment seg;
582	struct vmcb_state *state;
583	int error;
584
585	state = &vmcb->state;
586
587	if (state->efer & EFER_LMA) {
588		error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
589		KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__,
590		    error));
591
592		/*
593		 * Section 4.8.1 for APM2, check if Code Segment has
594		 * Long attribute set in descriptor.
595		 */
596		if (seg.attrib & VMCB_CS_ATTRIB_L)
597			return (CPU_MODE_64BIT);
598		else
599			return (CPU_MODE_COMPATIBILITY);
600	} else  if (state->cr0 & CR0_PE) {
601		return (CPU_MODE_PROTECTED);
602	} else {
603		return (CPU_MODE_REAL);
604	}
605}
606
607static enum vm_paging_mode
608svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer)
609{
610
611	if ((cr0 & CR0_PG) == 0)
612		return (PAGING_MODE_FLAT);
613	if ((cr4 & CR4_PAE) == 0)
614		return (PAGING_MODE_32);
615	if (efer & EFER_LME)
616		return (PAGING_MODE_64);
617	else
618		return (PAGING_MODE_PAE);
619}
620
621/*
622 * ins/outs utility routines
623 */
624static uint64_t
625svm_inout_str_index(struct svm_regctx *regs, int in)
626{
627	uint64_t val;
628
629	val = in ? regs->sctx_rdi : regs->sctx_rsi;
630
631	return (val);
632}
633
634static uint64_t
635svm_inout_str_count(struct svm_regctx *regs, int rep)
636{
637	uint64_t val;
638
639	val = rep ? regs->sctx_rcx : 1;
640
641	return (val);
642}
643
644static void
645svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1,
646    int in, struct vm_inout_str *vis)
647{
648	int error, s;
649
650	if (in) {
651		vis->seg_name = VM_REG_GUEST_ES;
652	} else {
653		/* The segment field has standard encoding */
654		s = (info1 >> 10) & 0x7;
655		vis->seg_name = vm_segment_name(s);
656	}
657
658	error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
659	KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error));
660}
661
662static int
663svm_inout_str_addrsize(uint64_t info1)
664{
665        uint32_t size;
666
667        size = (info1 >> 7) & 0x7;
668        switch (size) {
669        case 1:
670                return (2);     /* 16 bit */
671        case 2:
672                return (4);     /* 32 bit */
673        case 4:
674                return (8);     /* 64 bit */
675        default:
676                panic("%s: invalid size encoding %d", __func__, size);
677        }
678}
679
680static void
681svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging)
682{
683	struct vmcb_state *state;
684
685	state = &vmcb->state;
686	paging->cr3 = state->cr3;
687	paging->cpl = svm_cpl(state);
688	paging->cpu_mode = svm_vcpu_mode(vmcb);
689	paging->paging_mode = svm_paging_mode(state->cr0, state->cr4,
690	    state->efer);
691}
692
693#define	UNHANDLED 0
694
695/*
696 * Handle guest I/O intercept.
697 */
698static int
699svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
700{
701	struct vmcb_ctrl *ctrl;
702	struct vmcb_state *state;
703	struct svm_regctx *regs;
704	struct vm_inout_str *vis;
705	uint64_t info1;
706	int inout_string;
707
708	state = svm_get_vmcb_state(svm_sc, vcpu);
709	ctrl  = svm_get_vmcb_ctrl(svm_sc, vcpu);
710	regs  = svm_get_guest_regctx(svm_sc, vcpu);
711
712	info1 = ctrl->exitinfo1;
713	inout_string = info1 & BIT(2) ? 1 : 0;
714
715	/*
716	 * The effective segment number in EXITINFO1[12:10] is populated
717	 * only if the processor has the DecodeAssist capability.
718	 *
719	 * XXX this is not specified explicitly in APMv2 but can be verified
720	 * empirically.
721	 */
722	if (inout_string && !decode_assist())
723		return (UNHANDLED);
724
725	vmexit->exitcode 	= VM_EXITCODE_INOUT;
726	vmexit->u.inout.in 	= (info1 & BIT(0)) ? 1 : 0;
727	vmexit->u.inout.string 	= inout_string;
728	vmexit->u.inout.rep 	= (info1 & BIT(3)) ? 1 : 0;
729	vmexit->u.inout.bytes 	= (info1 >> 4) & 0x7;
730	vmexit->u.inout.port 	= (uint16_t)(info1 >> 16);
731	vmexit->u.inout.eax 	= (uint32_t)(state->rax);
732
733	if (inout_string) {
734		vmexit->exitcode = VM_EXITCODE_INOUT_STR;
735		vis = &vmexit->u.inout_str;
736		svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging);
737		vis->rflags = state->rflags;
738		vis->cr0 = state->cr0;
739		vis->index = svm_inout_str_index(regs, vmexit->u.inout.in);
740		vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep);
741		vis->addrsize = svm_inout_str_addrsize(info1);
742		svm_inout_str_seginfo(svm_sc, vcpu, info1,
743		    vmexit->u.inout.in, vis);
744	}
745
746	return (UNHANDLED);
747}
748
749static int
750npf_fault_type(uint64_t exitinfo1)
751{
752
753	if (exitinfo1 & VMCB_NPF_INFO1_W)
754		return (VM_PROT_WRITE);
755	else if (exitinfo1 & VMCB_NPF_INFO1_ID)
756		return (VM_PROT_EXECUTE);
757	else
758		return (VM_PROT_READ);
759}
760
761static bool
762svm_npf_emul_fault(uint64_t exitinfo1)
763{
764
765	if (exitinfo1 & VMCB_NPF_INFO1_ID) {
766		return (false);
767	}
768
769	if (exitinfo1 & VMCB_NPF_INFO1_GPT) {
770		return (false);
771	}
772
773	if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) {
774		return (false);
775	}
776
777	return (true);
778}
779
780static void
781svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit)
782{
783	struct vm_guest_paging *paging;
784	struct vmcb_segment seg;
785	struct vmcb_ctrl *ctrl;
786	char *inst_bytes;
787	int error, inst_len;
788
789	ctrl = &vmcb->ctrl;
790	paging = &vmexit->u.inst_emul.paging;
791
792	vmexit->exitcode = VM_EXITCODE_INST_EMUL;
793	vmexit->u.inst_emul.gpa = gpa;
794	vmexit->u.inst_emul.gla = VIE_INVALID_GLA;
795	svm_paging_info(vmcb, paging);
796
797	error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg);
798	KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error));
799
800	switch(paging->cpu_mode) {
801	case CPU_MODE_PROTECTED:
802	case CPU_MODE_COMPATIBILITY:
803		/*
804		 * Section 4.8.1 of APM2, Default Operand Size or D bit.
805		 */
806		vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ?
807		    1 : 0;
808		break;
809	default:
810		vmexit->u.inst_emul.cs_d = 0;
811		break;
812	}
813
814	/*
815	 * Copy the instruction bytes into 'vie' if available.
816	 */
817	if (decode_assist() && !disable_npf_assist) {
818		inst_len = ctrl->inst_len;
819		inst_bytes = ctrl->inst_bytes;
820	} else {
821		inst_len = 0;
822		inst_bytes = NULL;
823	}
824	vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len);
825}
826
827#ifdef KTR
828static const char *
829intrtype_to_str(int intr_type)
830{
831	switch (intr_type) {
832	case VMCB_EVENTINJ_TYPE_INTR:
833		return ("hwintr");
834	case VMCB_EVENTINJ_TYPE_NMI:
835		return ("nmi");
836	case VMCB_EVENTINJ_TYPE_INTn:
837		return ("swintr");
838	case VMCB_EVENTINJ_TYPE_EXCEPTION:
839		return ("exception");
840	default:
841		panic("%s: unknown intr_type %d", __func__, intr_type);
842	}
843}
844#endif
845
846/*
847 * Inject an event to vcpu as described in section 15.20, "Event injection".
848 */
849static void
850svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector,
851		 uint32_t error, bool ec_valid)
852{
853	struct vmcb_ctrl *ctrl;
854
855	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
856
857	KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0,
858	    ("%s: event already pending %#lx", __func__, ctrl->eventinj));
859
860	KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d",
861	    __func__, vector));
862
863	switch (intr_type) {
864	case VMCB_EVENTINJ_TYPE_INTR:
865	case VMCB_EVENTINJ_TYPE_NMI:
866	case VMCB_EVENTINJ_TYPE_INTn:
867		break;
868	case VMCB_EVENTINJ_TYPE_EXCEPTION:
869		if (vector >= 0 && vector <= 31 && vector != 2)
870			break;
871		/* FALLTHROUGH */
872	default:
873		panic("%s: invalid intr_type/vector: %d/%d", __func__,
874		    intr_type, vector);
875	}
876	ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID;
877	if (ec_valid) {
878		ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID;
879		ctrl->eventinj |= (uint64_t)error << 32;
880		VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x",
881		    intrtype_to_str(intr_type), vector, error);
882	} else {
883		VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d",
884		    intrtype_to_str(intr_type), vector);
885	}
886}
887
888static void
889svm_update_virqinfo(struct svm_softc *sc, int vcpu)
890{
891	struct vm *vm;
892	struct vlapic *vlapic;
893	struct vmcb_ctrl *ctrl;
894	int pending;
895
896	vm = sc->vm;
897	vlapic = vm_lapic(vm, vcpu);
898	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
899
900	/* Update %cr8 in the emulated vlapic */
901	vlapic_set_cr8(vlapic, ctrl->v_tpr);
902
903	/*
904	 * If V_IRQ indicates that the interrupt injection attempted on then
905	 * last VMRUN was successful then update the vlapic accordingly.
906	 */
907	if (ctrl->v_intr_vector != 0) {
908		pending = ctrl->v_irq;
909		KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid "
910		    "v_intr_vector %d", __func__, ctrl->v_intr_vector));
911		KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
912		VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector,
913		    pending ? "pending" : "accepted");
914		if (!pending)
915			vlapic_intr_accepted(vlapic, ctrl->v_intr_vector);
916	}
917}
918
919static void
920svm_save_intinfo(struct svm_softc *svm_sc, int vcpu)
921{
922	struct vmcb_ctrl *ctrl;
923	uint64_t intinfo;
924
925	ctrl  = svm_get_vmcb_ctrl(svm_sc, vcpu);
926	intinfo = ctrl->exitintinfo;
927	if (!VMCB_EXITINTINFO_VALID(intinfo))
928		return;
929
930	/*
931	 * From APMv2, Section "Intercepts during IDT interrupt delivery"
932	 *
933	 * If a #VMEXIT happened during event delivery then record the event
934	 * that was being delivered.
935	 */
936	VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n",
937		intinfo, VMCB_EXITINTINFO_VECTOR(intinfo));
938	vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
939	vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
940}
941
942static __inline int
943vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
944{
945
946	return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
947	    VMCB_INTCPT_VINTR));
948}
949
950static __inline void
951enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
952{
953	struct vmcb_ctrl *ctrl;
954
955	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
956
957	if (ctrl->v_irq && ctrl->v_intr_vector == 0) {
958		KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__));
959		KASSERT(vintr_intercept_enabled(sc, vcpu),
960		    ("%s: vintr intercept should be enabled", __func__));
961		return;
962	}
963
964	VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting");
965	ctrl->v_irq = 1;
966	ctrl->v_ign_tpr = 1;
967	ctrl->v_intr_vector = 0;
968	svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
969	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
970}
971
972static __inline void
973disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
974{
975	struct vmcb_ctrl *ctrl;
976
977	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
978
979	if (!ctrl->v_irq && ctrl->v_intr_vector == 0) {
980		KASSERT(!vintr_intercept_enabled(sc, vcpu),
981		    ("%s: vintr intercept should be disabled", __func__));
982		return;
983	}
984
985#ifdef KTR
986	if (ctrl->v_intr_vector == 0)
987		VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting");
988	else
989		VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection");
990#endif
991	ctrl->v_irq = 0;
992	ctrl->v_intr_vector = 0;
993	svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
994	svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
995}
996
997static int
998svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val)
999{
1000	struct vmcb_ctrl *ctrl;
1001	int oldval, newval;
1002
1003	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1004	oldval = ctrl->intr_shadow;
1005	newval = val ? 1 : 0;
1006	if (newval != oldval) {
1007		ctrl->intr_shadow = newval;
1008		VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval);
1009	}
1010	return (0);
1011}
1012
1013static int
1014svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val)
1015{
1016	struct vmcb_ctrl *ctrl;
1017
1018	ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1019	*val = ctrl->intr_shadow;
1020	return (0);
1021}
1022
1023/*
1024 * Once an NMI is injected it blocks delivery of further NMIs until the handler
1025 * executes an IRET. The IRET intercept is enabled when an NMI is injected to
1026 * to track when the vcpu is done handling the NMI.
1027 */
1028static int
1029nmi_blocked(struct svm_softc *sc, int vcpu)
1030{
1031	int blocked;
1032
1033	blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
1034	    VMCB_INTCPT_IRET);
1035	return (blocked);
1036}
1037
1038static void
1039enable_nmi_blocking(struct svm_softc *sc, int vcpu)
1040{
1041
1042	KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked"));
1043	VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled");
1044	svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1045}
1046
1047static void
1048clear_nmi_blocking(struct svm_softc *sc, int vcpu)
1049{
1050	int error;
1051
1052	KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
1053	VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared");
1054	/*
1055	 * When the IRET intercept is cleared the vcpu will attempt to execute
1056	 * the "iret" when it runs next. However, it is possible to inject
1057	 * another NMI into the vcpu before the "iret" has actually executed.
1058	 *
1059	 * For e.g. if the "iret" encounters a #NPF when accessing the stack
1060	 * it will trap back into the hypervisor. If an NMI is pending for
1061	 * the vcpu it will be injected into the guest.
1062	 *
1063	 * XXX this needs to be fixed
1064	 */
1065	svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1066
1067	/*
1068	 * Set 'intr_shadow' to prevent an NMI from being injected on the
1069	 * immediate VMRUN.
1070	 */
1071	error = svm_modify_intr_shadow(sc, vcpu, 1);
1072	KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error));
1073}
1074
1075static int
1076emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
1077    bool *retu)
1078{
1079	int error;
1080
1081	if (lapic_msr(num))
1082		error = lapic_wrmsr(sc->vm, vcpu, num, val, retu);
1083	else if (num == MSR_EFER)
1084		error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, val);
1085	else
1086		error = svm_wrmsr(sc, vcpu, num, val, retu);
1087
1088	return (error);
1089}
1090
1091static int
1092emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu)
1093{
1094	struct vmcb_state *state;
1095	struct svm_regctx *ctx;
1096	uint64_t result;
1097	int error;
1098
1099	if (lapic_msr(num))
1100		error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu);
1101	else
1102		error = svm_rdmsr(sc, vcpu, num, &result, retu);
1103
1104	if (error == 0) {
1105		state = svm_get_vmcb_state(sc, vcpu);
1106		ctx = svm_get_guest_regctx(sc, vcpu);
1107		state->rax = result & 0xffffffff;
1108		ctx->sctx_rdx = result >> 32;
1109	}
1110
1111	return (error);
1112}
1113
1114#ifdef KTR
1115static const char *
1116exit_reason_to_str(uint64_t reason)
1117{
1118	static char reasonbuf[32];
1119
1120	switch (reason) {
1121	case VMCB_EXIT_INVALID:
1122		return ("invalvmcb");
1123	case VMCB_EXIT_SHUTDOWN:
1124		return ("shutdown");
1125	case VMCB_EXIT_NPF:
1126		return ("nptfault");
1127	case VMCB_EXIT_PAUSE:
1128		return ("pause");
1129	case VMCB_EXIT_HLT:
1130		return ("hlt");
1131	case VMCB_EXIT_CPUID:
1132		return ("cpuid");
1133	case VMCB_EXIT_IO:
1134		return ("inout");
1135	case VMCB_EXIT_MC:
1136		return ("mchk");
1137	case VMCB_EXIT_INTR:
1138		return ("extintr");
1139	case VMCB_EXIT_NMI:
1140		return ("nmi");
1141	case VMCB_EXIT_VINTR:
1142		return ("vintr");
1143	case VMCB_EXIT_MSR:
1144		return ("msr");
1145	case VMCB_EXIT_IRET:
1146		return ("iret");
1147	default:
1148		snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason);
1149		return (reasonbuf);
1150	}
1151}
1152#endif	/* KTR */
1153
1154/*
1155 * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs
1156 * that are due to instruction intercepts as well as MSR and IOIO intercepts
1157 * and exceptions caused by INT3, INTO and BOUND instructions.
1158 *
1159 * Return 1 if the nRIP is valid and 0 otherwise.
1160 */
1161static int
1162nrip_valid(uint64_t exitcode)
1163{
1164	switch (exitcode) {
1165	case 0x00 ... 0x0F:	/* read of CR0 through CR15 */
1166	case 0x10 ... 0x1F:	/* write of CR0 through CR15 */
1167	case 0x20 ... 0x2F:	/* read of DR0 through DR15 */
1168	case 0x30 ... 0x3F:	/* write of DR0 through DR15 */
1169	case 0x43:		/* INT3 */
1170	case 0x44:		/* INTO */
1171	case 0x45:		/* BOUND */
1172	case 0x65 ... 0x7C:	/* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */
1173	case 0x80 ... 0x8D:	/* VMEXIT_VMRUN ... VMEXIT_XSETBV */
1174		return (1);
1175	default:
1176		return (0);
1177	}
1178}
1179
1180/*
1181 * Collateral for a generic SVM VM-exit.
1182 */
1183static void
1184vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2)
1185{
1186
1187	vme->exitcode = VM_EXITCODE_SVM;
1188	vme->u.svm.exitcode = code;
1189	vme->u.svm.exitinfo1 = info1;
1190	vme->u.svm.exitinfo2 = info2;
1191}
1192
1193static int
1194svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
1195{
1196	struct vmcb *vmcb;
1197	struct vmcb_state *state;
1198	struct vmcb_ctrl *ctrl;
1199	struct svm_regctx *ctx;
1200	uint64_t code, info1, info2, val;
1201	uint32_t eax, ecx, edx;
1202	int handled;
1203	bool retu;
1204
1205	ctx = svm_get_guest_regctx(svm_sc, vcpu);
1206	vmcb = svm_get_vmcb(svm_sc, vcpu);
1207	state = &vmcb->state;
1208	ctrl = &vmcb->ctrl;
1209
1210	handled = 0;
1211	code = ctrl->exitcode;
1212	info1 = ctrl->exitinfo1;
1213	info2 = ctrl->exitinfo2;
1214
1215	vmexit->exitcode = VM_EXITCODE_BOGUS;
1216	vmexit->rip = state->rip;
1217	vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
1218
1219	vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
1220
1221	/*
1222	 * #VMEXIT(INVALID) needs to be handled early because the VMCB is
1223	 * in an inconsistent state and can trigger assertions that would
1224	 * never happen otherwise.
1225	 */
1226	if (code == VMCB_EXIT_INVALID) {
1227		vm_exit_svm(vmexit, code, info1, info2);
1228		return (0);
1229	}
1230
1231	KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event "
1232	    "injection valid bit is set %#lx", __func__, ctrl->eventinj));
1233
1234	KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15,
1235	    ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)",
1236	    vmexit->inst_length, code, info1, info2));
1237
1238	svm_update_virqinfo(svm_sc, vcpu);
1239	svm_save_intinfo(svm_sc, vcpu);
1240
1241	switch (code) {
1242	case VMCB_EXIT_IRET:
1243		/*
1244		 * Restart execution at "iret" but with the intercept cleared.
1245		 */
1246		vmexit->inst_length = 0;
1247		clear_nmi_blocking(svm_sc, vcpu);
1248		handled = 1;
1249		break;
1250	case VMCB_EXIT_VINTR:	/* interrupt window exiting */
1251		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
1252		handled = 1;
1253		break;
1254	case VMCB_EXIT_INTR:	/* external interrupt */
1255		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
1256		handled = 1;
1257		break;
1258	case VMCB_EXIT_NMI:	/* external NMI */
1259		handled = 1;
1260		break;
1261	case VMCB_EXIT_MC:	/* machine check */
1262		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1263		break;
1264	case VMCB_EXIT_MSR:	/* MSR access. */
1265		eax = state->rax;
1266		ecx = ctx->sctx_rcx;
1267		edx = ctx->sctx_rdx;
1268		retu = false;
1269
1270		if (info1) {
1271			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1272			val = (uint64_t)edx << 32 | eax;
1273			VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
1274			    ecx, val);
1275			if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
1276				vmexit->exitcode = VM_EXITCODE_WRMSR;
1277				vmexit->u.msr.code = ecx;
1278				vmexit->u.msr.wval = val;
1279			} else if (!retu) {
1280				handled = 1;
1281			} else {
1282				KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1283				    ("emulate_wrmsr retu with bogus exitcode"));
1284			}
1285		} else {
1286			VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx);
1287			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
1288			if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
1289				vmexit->exitcode = VM_EXITCODE_RDMSR;
1290				vmexit->u.msr.code = ecx;
1291			} else if (!retu) {
1292				handled = 1;
1293			} else {
1294				KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS,
1295				    ("emulate_rdmsr retu with bogus exitcode"));
1296			}
1297		}
1298		break;
1299	case VMCB_EXIT_IO:
1300		handled = svm_handle_io(svm_sc, vcpu, vmexit);
1301		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
1302		break;
1303	case VMCB_EXIT_CPUID:
1304		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
1305		handled = x86_emulate_cpuid(svm_sc->vm, vcpu,
1306		    (uint32_t *)&state->rax,
1307		    (uint32_t *)&ctx->sctx_rbx,
1308		    (uint32_t *)&ctx->sctx_rcx,
1309		    (uint32_t *)&ctx->sctx_rdx);
1310		break;
1311	case VMCB_EXIT_HLT:
1312		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
1313		vmexit->exitcode = VM_EXITCODE_HLT;
1314		vmexit->u.hlt.rflags = state->rflags;
1315		break;
1316	case VMCB_EXIT_PAUSE:
1317		vmexit->exitcode = VM_EXITCODE_PAUSE;
1318		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
1319		break;
1320	case VMCB_EXIT_NPF:
1321		/* EXITINFO2 contains the faulting guest physical address */
1322		if (info1 & VMCB_NPF_INFO1_RSV) {
1323			VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with "
1324			    "reserved bits set: info1(%#lx) info2(%#lx)",
1325			    info1, info2);
1326		} else if (vm_mem_allocated(svm_sc->vm, info2)) {
1327			vmexit->exitcode = VM_EXITCODE_PAGING;
1328			vmexit->u.paging.gpa = info2;
1329			vmexit->u.paging.fault_type = npf_fault_type(info1);
1330			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
1331			VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault "
1332			    "on gpa %#lx/%#lx at rip %#lx",
1333			    info2, info1, state->rip);
1334		} else if (svm_npf_emul_fault(info1)) {
1335			svm_handle_inst_emul(vmcb, info2, vmexit);
1336			vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1);
1337			VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault "
1338			    "for gpa %#lx/%#lx at rip %#lx",
1339			    info2, info1, state->rip);
1340		}
1341		break;
1342	default:
1343		vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
1344		break;
1345	}
1346
1347	VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d",
1348	    handled ? "handled" : "unhandled", exit_reason_to_str(code),
1349	    vmexit->rip, vmexit->inst_length);
1350
1351	if (handled) {
1352		vmexit->rip += vmexit->inst_length;
1353		vmexit->inst_length = 0;
1354		state->rip = vmexit->rip;
1355	} else {
1356		if (vmexit->exitcode == VM_EXITCODE_BOGUS) {
1357			/*
1358			 * If this VM exit was not claimed by anybody then
1359			 * treat it as a generic SVM exit.
1360			 */
1361			vm_exit_svm(vmexit, code, info1, info2);
1362		} else {
1363			/*
1364			 * The exitcode and collateral have been populated.
1365			 * The VM exit will be processed further in userland.
1366			 */
1367		}
1368	}
1369	return (handled);
1370}
1371
1372static void
1373svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu)
1374{
1375	uint64_t intinfo;
1376
1377	if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo))
1378		return;
1379
1380	KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not "
1381	    "valid: %#lx", __func__, intinfo));
1382
1383	svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
1384		VMCB_EXITINTINFO_VECTOR(intinfo),
1385		VMCB_EXITINTINFO_EC(intinfo),
1386		VMCB_EXITINTINFO_EC_VALID(intinfo));
1387	vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
1388	VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo);
1389}
1390
1391/*
1392 * Inject event to virtual cpu.
1393 */
1394static void
1395svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
1396{
1397	struct vmcb_ctrl *ctrl;
1398	struct vmcb_state *state;
1399	uint8_t v_tpr;
1400	int vector, need_intr_window, pending_apic_vector;
1401
1402	state = svm_get_vmcb_state(sc, vcpu);
1403	ctrl  = svm_get_vmcb_ctrl(sc, vcpu);
1404
1405	need_intr_window = 0;
1406	pending_apic_vector = 0;
1407
1408	/*
1409	 * Inject pending events or exceptions for this vcpu.
1410	 *
1411	 * An event might be pending because the previous #VMEXIT happened
1412	 * during event delivery (i.e. ctrl->exitintinfo).
1413	 *
1414	 * An event might also be pending because an exception was injected
1415	 * by the hypervisor (e.g. #PF during instruction emulation).
1416	 */
1417	svm_inj_intinfo(sc, vcpu);
1418
1419	/* NMI event has priority over interrupts. */
1420	if (vm_nmi_pending(sc->vm, vcpu)) {
1421		if (nmi_blocked(sc, vcpu)) {
1422			/*
1423			 * Can't inject another NMI if the guest has not
1424			 * yet executed an "iret" after the last NMI.
1425			 */
1426			VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due "
1427			    "to NMI-blocking");
1428		} else if (ctrl->intr_shadow) {
1429			/*
1430			 * Can't inject an NMI if the vcpu is in an intr_shadow.
1431			 */
1432			VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to "
1433			    "interrupt shadow");
1434			need_intr_window = 1;
1435			goto done;
1436		} else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1437			/*
1438			 * If there is already an exception/interrupt pending
1439			 * then defer the NMI until after that.
1440			 */
1441			VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to "
1442			    "eventinj %#lx", ctrl->eventinj);
1443
1444			/*
1445			 * Use self-IPI to trigger a VM-exit as soon as
1446			 * possible after the event injection is completed.
1447			 *
1448			 * This works only if the external interrupt exiting
1449			 * is at a lower priority than the event injection.
1450			 *
1451			 * Although not explicitly specified in APMv2 the
1452			 * relative priorities were verified empirically.
1453			 */
1454			ipi_cpu(curcpu, IPI_AST);	/* XXX vmm_ipinum? */
1455		} else {
1456			vm_nmi_clear(sc->vm, vcpu);
1457
1458			/* Inject NMI, vector number is not used */
1459			svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI,
1460			    IDT_NMI, 0, false);
1461
1462			/* virtual NMI blocking is now in effect */
1463			enable_nmi_blocking(sc, vcpu);
1464
1465			VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI");
1466		}
1467	}
1468
1469	if (!vm_extint_pending(sc->vm, vcpu)) {
1470		/*
1471		 * APIC interrupts are delivered using the V_IRQ offload.
1472		 *
1473		 * The primary benefit is that the hypervisor doesn't need to
1474		 * deal with the various conditions that inhibit interrupts.
1475		 * It also means that TPR changes via CR8 will be handled
1476		 * without any hypervisor involvement.
1477		 *
1478		 * Note that the APIC vector must remain pending in the vIRR
1479		 * until it is confirmed that it was delivered to the guest.
1480		 * This can be confirmed based on the value of V_IRQ at the
1481		 * next #VMEXIT (1 = pending, 0 = delivered).
1482		 *
1483		 * Also note that it is possible that another higher priority
1484		 * vector can become pending before this vector is delivered
1485		 * to the guest. This is alright because vcpu_notify_event()
1486		 * will send an IPI and force the vcpu to trap back into the
1487		 * hypervisor. The higher priority vector will be injected on
1488		 * the next VMRUN.
1489		 */
1490		if (vlapic_pending_intr(vlapic, &vector)) {
1491			KASSERT(vector >= 16 && vector <= 255,
1492			    ("invalid vector %d from local APIC", vector));
1493			pending_apic_vector = vector;
1494		}
1495		goto done;
1496	}
1497
1498	/* Ask the legacy pic for a vector to inject */
1499	vatpic_pending_intr(sc->vm, &vector);
1500	KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR",
1501	    vector));
1502
1503	/*
1504	 * If the guest has disabled interrupts or is in an interrupt shadow
1505	 * then we cannot inject the pending interrupt.
1506	 */
1507	if ((state->rflags & PSL_I) == 0) {
1508		VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
1509		    "rflags %#lx", vector, state->rflags);
1510		need_intr_window = 1;
1511		goto done;
1512	}
1513
1514	if (ctrl->intr_shadow) {
1515		VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to "
1516		    "interrupt shadow", vector);
1517		need_intr_window = 1;
1518		goto done;
1519	}
1520
1521	if (ctrl->eventinj & VMCB_EVENTINJ_VALID) {
1522		VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
1523		    "eventinj %#lx", vector, ctrl->eventinj);
1524		need_intr_window = 1;
1525		goto done;
1526	}
1527
1528	/*
1529	 * Legacy PIC interrupts are delivered via the event injection
1530	 * mechanism.
1531	 */
1532	svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
1533
1534	vm_extint_clear(sc->vm, vcpu);
1535	vatpic_intr_accepted(sc->vm, vector);
1536
1537	/*
1538	 * Force a VM-exit as soon as the vcpu is ready to accept another
1539	 * interrupt. This is done because the PIC might have another vector
1540	 * that it wants to inject. Also, if the APIC has a pending interrupt
1541	 * that was preempted by the ExtInt then it allows us to inject the
1542	 * APIC vector as soon as possible.
1543	 */
1544	need_intr_window = 1;
1545done:
1546	/*
1547	 * The guest can modify the TPR by writing to %CR8. In guest mode
1548	 * the processor reflects this write to V_TPR without hypervisor
1549	 * intervention.
1550	 *
1551	 * The guest can also modify the TPR by writing to it via the memory
1552	 * mapped APIC page. In this case, the write will be emulated by the
1553	 * hypervisor. For this reason V_TPR must be updated before every
1554	 * VMRUN.
1555	 */
1556	v_tpr = vlapic_get_cr8(vlapic);
1557	KASSERT(v_tpr >= 0 && v_tpr <= 15, ("invalid v_tpr %#x", v_tpr));
1558	if (ctrl->v_tpr != v_tpr) {
1559		VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x",
1560		    ctrl->v_tpr, v_tpr);
1561		ctrl->v_tpr = v_tpr;
1562		svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1563	}
1564
1565	if (pending_apic_vector) {
1566		/*
1567		 * If an APIC vector is being injected then interrupt window
1568		 * exiting is not possible on this VMRUN.
1569		 */
1570		KASSERT(!need_intr_window, ("intr_window exiting impossible"));
1571		VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ",
1572		    pending_apic_vector);
1573
1574		ctrl->v_irq = 1;
1575		ctrl->v_ign_tpr = 0;
1576		ctrl->v_intr_vector = pending_apic_vector;
1577		ctrl->v_intr_prio = pending_apic_vector >> 4;
1578		svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1579	} else if (need_intr_window) {
1580		/*
1581		 * We use V_IRQ in conjunction with the VINTR intercept to
1582		 * trap into the hypervisor as soon as a virtual interrupt
1583		 * can be delivered.
1584		 *
1585		 * Since injected events are not subject to intercept checks
1586		 * we need to ensure that the V_IRQ is not actually going to
1587		 * be delivered on VM entry. The KASSERT below enforces this.
1588		 */
1589		KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 ||
1590		    (state->rflags & PSL_I) == 0 || ctrl->intr_shadow,
1591		    ("Bogus intr_window_exiting: eventinj (%#lx), "
1592		    "intr_shadow (%u), rflags (%#lx)",
1593		    ctrl->eventinj, ctrl->intr_shadow, state->rflags));
1594		enable_intr_window_exiting(sc, vcpu);
1595	} else {
1596		disable_intr_window_exiting(sc, vcpu);
1597	}
1598}
1599
1600static __inline void
1601restore_host_tss(void)
1602{
1603	struct system_segment_descriptor *tss_sd;
1604
1605	/*
1606	 * The TSS descriptor was in use prior to launching the guest so it
1607	 * has been marked busy.
1608	 *
1609	 * 'ltr' requires the descriptor to be marked available so change the
1610	 * type to "64-bit available TSS".
1611	 */
1612	tss_sd = PCPU_GET(tss);
1613	tss_sd->sd_type = SDT_SYSTSS;
1614	ltr(GSEL(GPROC0_SEL, SEL_KPL));
1615}
1616
1617static void
1618check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu)
1619{
1620	struct svm_vcpu *vcpustate;
1621	struct vmcb_ctrl *ctrl;
1622	long eptgen;
1623	bool alloc_asid;
1624
1625	KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not "
1626	    "active on cpu %u", __func__, thiscpu));
1627
1628	vcpustate = svm_get_vcpu(sc, vcpuid);
1629	ctrl = svm_get_vmcb_ctrl(sc, vcpuid);
1630
1631	/*
1632	 * The TLB entries associated with the vcpu's ASID are not valid
1633	 * if either of the following conditions is true:
1634	 *
1635	 * 1. The vcpu's ASID generation is different than the host cpu's
1636	 *    ASID generation. This happens when the vcpu migrates to a new
1637	 *    host cpu. It can also happen when the number of vcpus executing
1638	 *    on a host cpu is greater than the number of ASIDs available.
1639	 *
1640	 * 2. The pmap generation number is different than the value cached in
1641	 *    the 'vcpustate'. This happens when the host invalidates pages
1642	 *    belonging to the guest.
1643	 *
1644	 *	asidgen		eptgen	      Action
1645	 *	mismatch	mismatch
1646	 *	   0		   0		(a)
1647	 *	   0		   1		(b1) or (b2)
1648	 *	   1		   0		(c)
1649	 *	   1		   1		(d)
1650	 *
1651	 * (a) There is no mismatch in eptgen or ASID generation and therefore
1652	 *     no further action is needed.
1653	 *
1654	 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
1655	 *      retained and the TLB entries associated with this ASID
1656	 *      are flushed by VMRUN.
1657	 *
1658	 * (b2) If the cpu does not support FlushByAsid then a new ASID is
1659	 *      allocated.
1660	 *
1661	 * (c) A new ASID is allocated.
1662	 *
1663	 * (d) A new ASID is allocated.
1664	 */
1665
1666	alloc_asid = false;
1667	eptgen = pmap->pm_eptgen;
1668	ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING;
1669
1670	if (vcpustate->asid.gen != asid[thiscpu].gen) {
1671		alloc_asid = true;	/* (c) and (d) */
1672	} else if (vcpustate->eptgen != eptgen) {
1673		if (flush_by_asid())
1674			ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;	/* (b1) */
1675		else
1676			alloc_asid = true;			/* (b2) */
1677	} else {
1678		/*
1679		 * This is the common case (a).
1680		 */
1681		KASSERT(!alloc_asid, ("ASID allocation not necessary"));
1682		KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING,
1683		    ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl));
1684	}
1685
1686	if (alloc_asid) {
1687		if (++asid[thiscpu].num >= nasid) {
1688			asid[thiscpu].num = 1;
1689			if (++asid[thiscpu].gen == 0)
1690				asid[thiscpu].gen = 1;
1691			/*
1692			 * If this cpu does not support "flush-by-asid"
1693			 * then flush the entire TLB on a generation
1694			 * bump. Subsequent ASID allocation in this
1695			 * generation can be done without a TLB flush.
1696			 */
1697			if (!flush_by_asid())
1698				ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL;
1699		}
1700		vcpustate->asid.gen = asid[thiscpu].gen;
1701		vcpustate->asid.num = asid[thiscpu].num;
1702
1703		ctrl->asid = vcpustate->asid.num;
1704		svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID);
1705		/*
1706		 * If this cpu supports "flush-by-asid" then the TLB
1707		 * was not flushed after the generation bump. The TLB
1708		 * is flushed selectively after every new ASID allocation.
1709		 */
1710		if (flush_by_asid())
1711			ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST;
1712	}
1713	vcpustate->eptgen = eptgen;
1714
1715	KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero"));
1716	KASSERT(ctrl->asid == vcpustate->asid.num,
1717	    ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num));
1718}
1719
1720static __inline void
1721disable_gintr(void)
1722{
1723
1724        __asm __volatile("clgi" : : :);
1725}
1726
1727static __inline void
1728enable_gintr(void)
1729{
1730
1731        __asm __volatile("stgi" : : :);
1732}
1733
1734/*
1735 * Start vcpu with specified RIP.
1736 */
1737static int
1738svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
1739	void *rend_cookie, void *suspended_cookie)
1740{
1741	struct svm_regctx *gctx;
1742	struct svm_softc *svm_sc;
1743	struct svm_vcpu *vcpustate;
1744	struct vmcb_state *state;
1745	struct vmcb_ctrl *ctrl;
1746	struct vm_exit *vmexit;
1747	struct vlapic *vlapic;
1748	struct vm *vm;
1749	uint64_t vmcb_pa;
1750	u_int thiscpu;
1751	int handled;
1752
1753	svm_sc = arg;
1754	vm = svm_sc->vm;
1755
1756	vcpustate = svm_get_vcpu(svm_sc, vcpu);
1757	state = svm_get_vmcb_state(svm_sc, vcpu);
1758	ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1759	vmexit = vm_exitinfo(vm, vcpu);
1760	vlapic = vm_lapic(vm, vcpu);
1761
1762	/*
1763	 * Stash 'curcpu' on the stack as 'thiscpu'.
1764	 *
1765	 * The per-cpu data area is not accessible until MSR_GSBASE is restored
1766	 * after the #VMEXIT. Since VMRUN is executed inside a critical section
1767	 * 'curcpu' and 'thiscpu' are guaranteed to identical.
1768	 */
1769	thiscpu = curcpu;
1770
1771	gctx = svm_get_guest_regctx(svm_sc, vcpu);
1772	vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1773
1774	if (vcpustate->lastcpu != thiscpu) {
1775		/*
1776		 * Force new ASID allocation by invalidating the generation.
1777		 */
1778		vcpustate->asid.gen = 0;
1779
1780		/*
1781		 * Invalidate the VMCB state cache by marking all fields dirty.
1782		 */
1783		svm_set_dirty(svm_sc, vcpu, 0xffffffff);
1784
1785		/*
1786		 * XXX
1787		 * Setting 'vcpustate->lastcpu' here is bit premature because
1788		 * we may return from this function without actually executing
1789		 * the VMRUN  instruction. This could happen if a rendezvous
1790		 * or an AST is pending on the first time through the loop.
1791		 *
1792		 * This works for now but any new side-effects of vcpu
1793		 * migration should take this case into account.
1794		 */
1795		vcpustate->lastcpu = thiscpu;
1796		vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
1797	}
1798
1799	svm_msr_guest_enter(svm_sc, vcpu);
1800
1801	/* Update Guest RIP */
1802	state->rip = rip;
1803
1804	do {
1805		/*
1806		 * Disable global interrupts to guarantee atomicity during
1807		 * loading of guest state. This includes not only the state
1808		 * loaded by the "vmrun" instruction but also software state
1809		 * maintained by the hypervisor: suspended and rendezvous
1810		 * state, NPT generation number, vlapic interrupts etc.
1811		 */
1812		disable_gintr();
1813
1814		if (vcpu_suspended(suspended_cookie)) {
1815			enable_gintr();
1816			vm_exit_suspended(vm, vcpu, state->rip);
1817			break;
1818		}
1819
1820		if (vcpu_rendezvous_pending(rend_cookie)) {
1821			enable_gintr();
1822			vm_exit_rendezvous(vm, vcpu, state->rip);
1823			break;
1824		}
1825
1826		/* We are asked to give the cpu by scheduler. */
1827		if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED)) {
1828			enable_gintr();
1829			vm_exit_astpending(vm, vcpu, state->rip);
1830			break;
1831		}
1832
1833		svm_inj_interrupts(svm_sc, vcpu, vlapic);
1834
1835		/* Activate the nested pmap on 'thiscpu' */
1836		CPU_SET_ATOMIC_ACQ(thiscpu, &pmap->pm_active);
1837
1838		/*
1839		 * Check the pmap generation and the ASID generation to
1840		 * ensure that the vcpu does not use stale TLB mappings.
1841		 */
1842		check_asid(svm_sc, vcpu, pmap, thiscpu);
1843
1844		ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty;
1845		vcpustate->dirty = 0;
1846		VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
1847
1848		/* Launch Virtual Machine. */
1849		VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
1850		svm_launch(vmcb_pa, gctx);
1851
1852		CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active);
1853
1854		/*
1855		 * Restore MSR_GSBASE to point to the pcpu data area.
1856		 *
1857		 * Note that accesses done via PCPU_GET/PCPU_SET will work
1858		 * only after MSR_GSBASE is restored.
1859		 *
1860		 * Also note that we don't bother restoring MSR_KGSBASE
1861		 * since it is not used in the kernel and will be restored
1862		 * when the VMRUN ioctl returns to userspace.
1863		 */
1864		wrmsr(MSR_GSBASE, (uint64_t)&__pcpu[thiscpu]);
1865		KASSERT(curcpu == thiscpu, ("thiscpu/curcpu (%u/%u) mismatch",
1866		    thiscpu, curcpu));
1867
1868		/*
1869		 * The host GDTR and IDTR is saved by VMRUN and restored
1870		 * automatically on #VMEXIT. However, the host TSS needs
1871		 * to be restored explicitly.
1872		 */
1873		restore_host_tss();
1874
1875		/* #VMEXIT disables interrupts so re-enable them here. */
1876		enable_gintr();
1877
1878		/* Handle #VMEXIT and if required return to user space. */
1879		handled = svm_vmexit(svm_sc, vcpu, vmexit);
1880	} while (handled);
1881
1882	svm_msr_guest_exit(svm_sc, vcpu);
1883
1884	return (0);
1885}
1886
1887static void
1888svm_vmcleanup(void *arg)
1889{
1890	struct svm_softc *sc = arg;
1891
1892	free(sc, M_SVM);
1893}
1894
1895static register_t *
1896swctx_regptr(struct svm_regctx *regctx, int reg)
1897{
1898
1899	switch (reg) {
1900	case VM_REG_GUEST_RBX:
1901		return (&regctx->sctx_rbx);
1902	case VM_REG_GUEST_RCX:
1903		return (&regctx->sctx_rcx);
1904	case VM_REG_GUEST_RDX:
1905		return (&regctx->sctx_rdx);
1906	case VM_REG_GUEST_RDI:
1907		return (&regctx->sctx_rdi);
1908	case VM_REG_GUEST_RSI:
1909		return (&regctx->sctx_rsi);
1910	case VM_REG_GUEST_RBP:
1911		return (&regctx->sctx_rbp);
1912	case VM_REG_GUEST_R8:
1913		return (&regctx->sctx_r8);
1914	case VM_REG_GUEST_R9:
1915		return (&regctx->sctx_r9);
1916	case VM_REG_GUEST_R10:
1917		return (&regctx->sctx_r10);
1918	case VM_REG_GUEST_R11:
1919		return (&regctx->sctx_r11);
1920	case VM_REG_GUEST_R12:
1921		return (&regctx->sctx_r12);
1922	case VM_REG_GUEST_R13:
1923		return (&regctx->sctx_r13);
1924	case VM_REG_GUEST_R14:
1925		return (&regctx->sctx_r14);
1926	case VM_REG_GUEST_R15:
1927		return (&regctx->sctx_r15);
1928	default:
1929		return (NULL);
1930	}
1931}
1932
1933static int
1934svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
1935{
1936	struct svm_softc *svm_sc;
1937	register_t *reg;
1938
1939	svm_sc = arg;
1940
1941	if (ident == VM_REG_GUEST_INTR_SHADOW) {
1942		return (svm_get_intr_shadow(svm_sc, vcpu, val));
1943	}
1944
1945	if (vmcb_read(svm_sc, vcpu, ident, val) == 0) {
1946		return (0);
1947	}
1948
1949	reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
1950
1951	if (reg != NULL) {
1952		*val = *reg;
1953		return (0);
1954	}
1955
1956	VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident);
1957	return (EINVAL);
1958}
1959
1960static int
1961svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
1962{
1963	struct svm_softc *svm_sc;
1964	register_t *reg;
1965
1966	svm_sc = arg;
1967
1968	if (ident == VM_REG_GUEST_INTR_SHADOW) {
1969		return (svm_modify_intr_shadow(svm_sc, vcpu, val));
1970	}
1971
1972	if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
1973		return (0);
1974	}
1975
1976	reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
1977
1978	if (reg != NULL) {
1979		*reg = val;
1980		return (0);
1981	}
1982
1983	/*
1984	 * XXX deal with CR3 and invalidate TLB entries tagged with the
1985	 * vcpu's ASID. This needs to be treated differently depending on
1986	 * whether 'running' is true/false.
1987	 */
1988
1989	VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident);
1990	return (EINVAL);
1991}
1992
1993static int
1994svm_setcap(void *arg, int vcpu, int type, int val)
1995{
1996	struct svm_softc *sc;
1997	int error;
1998
1999	sc = arg;
2000	error = 0;
2001	switch (type) {
2002	case VM_CAP_HALT_EXIT:
2003		svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2004		    VMCB_INTCPT_HLT, val);
2005		break;
2006	case VM_CAP_PAUSE_EXIT:
2007		svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2008		    VMCB_INTCPT_PAUSE, val);
2009		break;
2010	case VM_CAP_UNRESTRICTED_GUEST:
2011		/* Unrestricted guest execution cannot be disabled in SVM */
2012		if (val == 0)
2013			error = EINVAL;
2014		break;
2015	default:
2016		error = ENOENT;
2017		break;
2018	}
2019	return (error);
2020}
2021
2022static int
2023svm_getcap(void *arg, int vcpu, int type, int *retval)
2024{
2025	struct svm_softc *sc;
2026	int error;
2027
2028	sc = arg;
2029	error = 0;
2030
2031	switch (type) {
2032	case VM_CAP_HALT_EXIT:
2033		*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2034		    VMCB_INTCPT_HLT);
2035		break;
2036	case VM_CAP_PAUSE_EXIT:
2037		*retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2038		    VMCB_INTCPT_PAUSE);
2039		break;
2040	case VM_CAP_UNRESTRICTED_GUEST:
2041		*retval = 1;	/* unrestricted guest is always enabled */
2042		break;
2043	default:
2044		error = ENOENT;
2045		break;
2046	}
2047	return (error);
2048}
2049
2050static struct vlapic *
2051svm_vlapic_init(void *arg, int vcpuid)
2052{
2053	struct svm_softc *svm_sc;
2054	struct vlapic *vlapic;
2055
2056	svm_sc = arg;
2057	vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO);
2058	vlapic->vm = svm_sc->vm;
2059	vlapic->vcpuid = vcpuid;
2060	vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid];
2061
2062	vlapic_init(vlapic);
2063
2064	return (vlapic);
2065}
2066
2067static void
2068svm_vlapic_cleanup(void *arg, struct vlapic *vlapic)
2069{
2070
2071        vlapic_cleanup(vlapic);
2072        free(vlapic, M_SVM_VLAPIC);
2073}
2074
2075struct vmm_ops vmm_ops_amd = {
2076	svm_init,
2077	svm_cleanup,
2078	svm_restore,
2079	svm_vminit,
2080	svm_vmrun,
2081	svm_vmcleanup,
2082	svm_getreg,
2083	svm_setreg,
2084	vmcb_getdesc,
2085	vmcb_setdesc,
2086	svm_getcap,
2087	svm_setcap,
2088	svm_npt_alloc,
2089	svm_npt_free,
2090	svm_vlapic_init,
2091	svm_vlapic_cleanup
2092};
2093