vmm.c revision 284894
110394SMichael.Corcoran@Sun.COM/*-
210394SMichael.Corcoran@Sun.COM * Copyright (c) 2011 NetApp, Inc.
310394SMichael.Corcoran@Sun.COM * All rights reserved.
410394SMichael.Corcoran@Sun.COM *
510394SMichael.Corcoran@Sun.COM * Redistribution and use in source and binary forms, with or without
610394SMichael.Corcoran@Sun.COM * modification, are permitted provided that the following conditions
710394SMichael.Corcoran@Sun.COM * are met:
810394SMichael.Corcoran@Sun.COM * 1. Redistributions of source code must retain the above copyright
910394SMichael.Corcoran@Sun.COM *    notice, this list of conditions and the following disclaimer.
1010394SMichael.Corcoran@Sun.COM * 2. Redistributions in binary form must reproduce the above copyright
1110394SMichael.Corcoran@Sun.COM *    notice, this list of conditions and the following disclaimer in the
1210394SMichael.Corcoran@Sun.COM *    documentation and/or other materials provided with the distribution.
1310394SMichael.Corcoran@Sun.COM *
1410394SMichael.Corcoran@Sun.COM * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
1510394SMichael.Corcoran@Sun.COM * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1610394SMichael.Corcoran@Sun.COM * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1710394SMichael.Corcoran@Sun.COM * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
1810394SMichael.Corcoran@Sun.COM * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1910394SMichael.Corcoran@Sun.COM * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2010394SMichael.Corcoran@Sun.COM * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2110394SMichael.Corcoran@Sun.COM * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2211225SDana.Myers@Sun.COM * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2311225SDana.Myers@Sun.COM * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2411225SDana.Myers@Sun.COM * SUCH DAMAGE.
2511225SDana.Myers@Sun.COM *
2612004Sjiang.liu@intel.com * $FreeBSD: stable/10/sys/amd64/vmm/vmm.c 284894 2015-06-27 22:48:22Z neel $
2710394SMichael.Corcoran@Sun.COM */
2810394SMichael.Corcoran@Sun.COM
2910394SMichael.Corcoran@Sun.COM#include <sys/cdefs.h>
3010394SMichael.Corcoran@Sun.COM__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm.c 284894 2015-06-27 22:48:22Z neel $");
3110394SMichael.Corcoran@Sun.COM
3210394SMichael.Corcoran@Sun.COM#include <sys/param.h>
3310394SMichael.Corcoran@Sun.COM#include <sys/systm.h>
3410394SMichael.Corcoran@Sun.COM#include <sys/kernel.h>
3510394SMichael.Corcoran@Sun.COM#include <sys/module.h>
3610394SMichael.Corcoran@Sun.COM#include <sys/sysctl.h>
3710394SMichael.Corcoran@Sun.COM#include <sys/malloc.h>
3810394SMichael.Corcoran@Sun.COM#include <sys/pcpu.h>
3910394SMichael.Corcoran@Sun.COM#include <sys/lock.h>
4010394SMichael.Corcoran@Sun.COM#include <sys/mutex.h>
4110394SMichael.Corcoran@Sun.COM#include <sys/proc.h>
4210394SMichael.Corcoran@Sun.COM#include <sys/rwlock.h>
4310394SMichael.Corcoran@Sun.COM#include <sys/sched.h>
4410394SMichael.Corcoran@Sun.COM#include <sys/smp.h>
4510394SMichael.Corcoran@Sun.COM#include <sys/systm.h>
4610394SMichael.Corcoran@Sun.COM
4710394SMichael.Corcoran@Sun.COM#include <vm/vm.h>
4810394SMichael.Corcoran@Sun.COM#include <vm/vm_object.h>
4910394SMichael.Corcoran@Sun.COM#include <vm/vm_page.h>
5010394SMichael.Corcoran@Sun.COM#include <vm/pmap.h>
5110394SMichael.Corcoran@Sun.COM#include <vm/vm_map.h>
5210394SMichael.Corcoran@Sun.COM#include <vm/vm_extern.h>
5310394SMichael.Corcoran@Sun.COM#include <vm/vm_param.h>
5410394SMichael.Corcoran@Sun.COM
5510394SMichael.Corcoran@Sun.COM#include <machine/cpu.h>
5610394SMichael.Corcoran@Sun.COM#include <machine/vm.h>
5710394SMichael.Corcoran@Sun.COM#include <machine/pcb.h>
5810394SMichael.Corcoran@Sun.COM#include <machine/smp.h>
5910394SMichael.Corcoran@Sun.COM#include <x86/psl.h>
6010394SMichael.Corcoran@Sun.COM#include <x86/apicreg.h>
6110394SMichael.Corcoran@Sun.COM#include <machine/vmparam.h>
6210394SMichael.Corcoran@Sun.COM
6310394SMichael.Corcoran@Sun.COM#include <machine/vmm.h>
6410394SMichael.Corcoran@Sun.COM#include <machine/vmm_dev.h>
6510394SMichael.Corcoran@Sun.COM#include <machine/vmm_instruction_emul.h>
6610394SMichael.Corcoran@Sun.COM
6710394SMichael.Corcoran@Sun.COM#include "vmm_ioport.h"
6810394SMichael.Corcoran@Sun.COM#include "vmm_ktr.h"
6910394SMichael.Corcoran@Sun.COM#include "vmm_host.h"
7010394SMichael.Corcoran@Sun.COM#include "vmm_mem.h"
7110394SMichael.Corcoran@Sun.COM#include "vmm_util.h"
7210394SMichael.Corcoran@Sun.COM#include "vatpic.h"
7310394SMichael.Corcoran@Sun.COM#include "vatpit.h"
7410394SMichael.Corcoran@Sun.COM#include "vhpet.h"
7510394SMichael.Corcoran@Sun.COM#include "vioapic.h"
7610394SMichael.Corcoran@Sun.COM#include "vlapic.h"
7710394SMichael.Corcoran@Sun.COM#include "vpmtmr.h"
7810394SMichael.Corcoran@Sun.COM#include "vrtc.h"
7910394SMichael.Corcoran@Sun.COM#include "vmm_ipi.h"
8010394SMichael.Corcoran@Sun.COM#include "vmm_stat.h"
8110394SMichael.Corcoran@Sun.COM#include "vmm_lapic.h"
8210394SMichael.Corcoran@Sun.COM
8310394SMichael.Corcoran@Sun.COM#include "io/ppt.h"
8410394SMichael.Corcoran@Sun.COM#include "io/iommu.h"
8510394SMichael.Corcoran@Sun.COM
8610394SMichael.Corcoran@Sun.COMstruct vlapic;
8710394SMichael.Corcoran@Sun.COM
8810394SMichael.Corcoran@Sun.COM/*
8910394SMichael.Corcoran@Sun.COM * Initialization:
9010394SMichael.Corcoran@Sun.COM * (a) allocated when vcpu is created
9110394SMichael.Corcoran@Sun.COM * (i) initialized when vcpu is created and when it is reinitialized
9210394SMichael.Corcoran@Sun.COM * (o) initialized the first time the vcpu is created
9310394SMichael.Corcoran@Sun.COM * (x) initialized before use
9410394SMichael.Corcoran@Sun.COM */
9510394SMichael.Corcoran@Sun.COMstruct vcpu {
9610394SMichael.Corcoran@Sun.COM	struct mtx 	mtx;		/* (o) protects 'state' and 'hostcpu' */
9710394SMichael.Corcoran@Sun.COM	enum vcpu_state	state;		/* (o) vcpu state */
9810394SMichael.Corcoran@Sun.COM	int		hostcpu;	/* (o) vcpu's host cpu */
9910394SMichael.Corcoran@Sun.COM	struct vlapic	*vlapic;	/* (i) APIC device model */
10010394SMichael.Corcoran@Sun.COM	enum x2apic_state x2apic_state;	/* (i) APIC mode */
10110394SMichael.Corcoran@Sun.COM	uint64_t	exitintinfo;	/* (i) events pending at VM exit */
10210394SMichael.Corcoran@Sun.COM	int		nmi_pending;	/* (i) NMI pending */
10310394SMichael.Corcoran@Sun.COM	int		extint_pending;	/* (i) INTR pending */
10410394SMichael.Corcoran@Sun.COM	int	exception_pending;	/* (i) exception pending */
10510394SMichael.Corcoran@Sun.COM	int	exc_vector;		/* (x) exception collateral */
10610394SMichael.Corcoran@Sun.COM	int	exc_errcode_valid;
10710394SMichael.Corcoran@Sun.COM	uint32_t exc_errcode;
10810394SMichael.Corcoran@Sun.COM	struct savefpu	*guestfpu;	/* (a,i) guest fpu state */
10910394SMichael.Corcoran@Sun.COM	uint64_t	guest_xcr0;	/* (i) guest %xcr0 register */
11010394SMichael.Corcoran@Sun.COM	void		*stats;		/* (a,i) statistics */
11110394SMichael.Corcoran@Sun.COM	struct vm_exit	exitinfo;	/* (x) exit reason and collateral */
11210394SMichael.Corcoran@Sun.COM	uint64_t	nextrip;	/* (x) next instruction to execute */
11310394SMichael.Corcoran@Sun.COM};
11410394SMichael.Corcoran@Sun.COM
11510394SMichael.Corcoran@Sun.COM#define	vcpu_lock_initialized(v) mtx_initialized(&((v)->mtx))
11610394SMichael.Corcoran@Sun.COM#define	vcpu_lock_init(v)	mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
11710394SMichael.Corcoran@Sun.COM#define	vcpu_lock(v)		mtx_lock_spin(&((v)->mtx))
11810394SMichael.Corcoran@Sun.COM#define	vcpu_unlock(v)		mtx_unlock_spin(&((v)->mtx))
11910394SMichael.Corcoran@Sun.COM#define	vcpu_assert_locked(v)	mtx_assert(&((v)->mtx), MA_OWNED)
12010394SMichael.Corcoran@Sun.COM
12110394SMichael.Corcoran@Sun.COMstruct mem_seg {
12210394SMichael.Corcoran@Sun.COM	vm_paddr_t	gpa;
12310394SMichael.Corcoran@Sun.COM	size_t		len;
12410394SMichael.Corcoran@Sun.COM	boolean_t	wired;
12510394SMichael.Corcoran@Sun.COM	vm_object_t	object;
12610394SMichael.Corcoran@Sun.COM};
12710394SMichael.Corcoran@Sun.COM#define	VM_MAX_MEMORY_SEGMENTS	2
12810394SMichael.Corcoran@Sun.COM
12910394SMichael.Corcoran@Sun.COM/*
13010394SMichael.Corcoran@Sun.COM * Initialization:
13110394SMichael.Corcoran@Sun.COM * (o) initialized the first time the VM is created
13210394SMichael.Corcoran@Sun.COM * (i) initialized when VM is created and when it is reinitialized
13310394SMichael.Corcoran@Sun.COM * (x) initialized before use
13410394SMichael.Corcoran@Sun.COM */
13510394SMichael.Corcoran@Sun.COMstruct vm {
13610394SMichael.Corcoran@Sun.COM	void		*cookie;		/* (i) cpu-specific data */
13710394SMichael.Corcoran@Sun.COM	void		*iommu;			/* (x) iommu-specific data */
13810394SMichael.Corcoran@Sun.COM	struct vhpet	*vhpet;			/* (i) virtual HPET */
13910394SMichael.Corcoran@Sun.COM	struct vioapic	*vioapic;		/* (i) virtual ioapic */
14010394SMichael.Corcoran@Sun.COM	struct vatpic	*vatpic;		/* (i) virtual atpic */
14110394SMichael.Corcoran@Sun.COM	struct vatpit	*vatpit;		/* (i) virtual atpit */
14210394SMichael.Corcoran@Sun.COM	struct vpmtmr	*vpmtmr;		/* (i) virtual ACPI PM timer */
14310394SMichael.Corcoran@Sun.COM	struct vrtc	*vrtc;			/* (o) virtual RTC */
14410394SMichael.Corcoran@Sun.COM	volatile cpuset_t active_cpus;		/* (i) active vcpus */
14510394SMichael.Corcoran@Sun.COM	int		suspend;		/* (i) stop VM execution */
14610394SMichael.Corcoran@Sun.COM	volatile cpuset_t suspended_cpus; 	/* (i) suspended vcpus */
14710394SMichael.Corcoran@Sun.COM	volatile cpuset_t halted_cpus;		/* (x) cpus in a hard halt */
14810394SMichael.Corcoran@Sun.COM	cpuset_t	rendezvous_req_cpus;	/* (x) rendezvous requested */
14910394SMichael.Corcoran@Sun.COM	cpuset_t	rendezvous_done_cpus;	/* (x) rendezvous finished */
15010394SMichael.Corcoran@Sun.COM	void		*rendezvous_arg;	/* (x) rendezvous func/arg */
15110394SMichael.Corcoran@Sun.COM	vm_rendezvous_func_t rendezvous_func;
15210394SMichael.Corcoran@Sun.COM	struct mtx	rendezvous_mtx;		/* (o) rendezvous lock */
15310394SMichael.Corcoran@Sun.COM	int		num_mem_segs;		/* (o) guest memory segments */
15410394SMichael.Corcoran@Sun.COM	struct mem_seg	mem_segs[VM_MAX_MEMORY_SEGMENTS];
15510394SMichael.Corcoran@Sun.COM	struct vmspace	*vmspace;		/* (o) guest's address space */
15610394SMichael.Corcoran@Sun.COM	char		name[VM_MAX_NAMELEN];	/* (o) virtual machine name */
15710394SMichael.Corcoran@Sun.COM	struct vcpu	vcpu[VM_MAXCPU];	/* (i) guest vcpus */
15810394SMichael.Corcoran@Sun.COM};
15910394SMichael.Corcoran@Sun.COM
16010394SMichael.Corcoran@Sun.COMstatic int vmm_initialized;
16110394SMichael.Corcoran@Sun.COM
16210394SMichael.Corcoran@Sun.COMstatic struct vmm_ops *ops;
16310394SMichael.Corcoran@Sun.COM#define	VMM_INIT(num)	(ops != NULL ? (*ops->init)(num) : 0)
16410394SMichael.Corcoran@Sun.COM#define	VMM_CLEANUP()	(ops != NULL ? (*ops->cleanup)() : 0)
16510394SMichael.Corcoran@Sun.COM#define	VMM_RESUME()	(ops != NULL ? (*ops->resume)() : 0)
16610394SMichael.Corcoran@Sun.COM
16710394SMichael.Corcoran@Sun.COM#define	VMINIT(vm, pmap) (ops != NULL ? (*ops->vminit)(vm, pmap): NULL)
16810394SMichael.Corcoran@Sun.COM#define	VMRUN(vmi, vcpu, rip, pmap, rptr, sptr) \
16910394SMichael.Corcoran@Sun.COM	(ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, pmap, rptr, sptr) : ENXIO)
17010394SMichael.Corcoran@Sun.COM#define	VMCLEANUP(vmi)	(ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
17110394SMichael.Corcoran@Sun.COM#define	VMSPACE_ALLOC(min, max) \
17210394SMichael.Corcoran@Sun.COM	(ops != NULL ? (*ops->vmspace_alloc)(min, max) : NULL)
17310394SMichael.Corcoran@Sun.COM#define	VMSPACE_FREE(vmspace) \
17410394SMichael.Corcoran@Sun.COM	(ops != NULL ? (*ops->vmspace_free)(vmspace) : ENXIO)
17510394SMichael.Corcoran@Sun.COM#define	VMGETREG(vmi, vcpu, num, retval)		\
17610394SMichael.Corcoran@Sun.COM	(ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
17710394SMichael.Corcoran@Sun.COM#define	VMSETREG(vmi, vcpu, num, val)		\
17810394SMichael.Corcoran@Sun.COM	(ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
17910394SMichael.Corcoran@Sun.COM#define	VMGETDESC(vmi, vcpu, num, desc)		\
18010394SMichael.Corcoran@Sun.COM	(ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
18110394SMichael.Corcoran@Sun.COM#define	VMSETDESC(vmi, vcpu, num, desc)		\
18210394SMichael.Corcoran@Sun.COM	(ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
18310394SMichael.Corcoran@Sun.COM#define	VMGETCAP(vmi, vcpu, num, retval)	\
18410394SMichael.Corcoran@Sun.COM	(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
18510394SMichael.Corcoran@Sun.COM#define	VMSETCAP(vmi, vcpu, num, val)		\
18610394SMichael.Corcoran@Sun.COM	(ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
18710394SMichael.Corcoran@Sun.COM#define	VLAPIC_INIT(vmi, vcpu)			\
18810394SMichael.Corcoran@Sun.COM	(ops != NULL ? (*ops->vlapic_init)(vmi, vcpu) : NULL)
18910394SMichael.Corcoran@Sun.COM#define	VLAPIC_CLEANUP(vmi, vlapic)		\
19010394SMichael.Corcoran@Sun.COM	(ops != NULL ? (*ops->vlapic_cleanup)(vmi, vlapic) : NULL)
19110394SMichael.Corcoran@Sun.COM
19210394SMichael.Corcoran@Sun.COM#define	fpu_start_emulating()	load_cr0(rcr0() | CR0_TS)
19310394SMichael.Corcoran@Sun.COM#define	fpu_stop_emulating()	clts()
19410394SMichael.Corcoran@Sun.COM
19510394SMichael.Corcoran@Sun.COMstatic MALLOC_DEFINE(M_VM, "vm", "vm");
19610394SMichael.Corcoran@Sun.COM
19710394SMichael.Corcoran@Sun.COM/* statistics */
19810394SMichael.Corcoran@Sun.COMstatic VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
19910394SMichael.Corcoran@Sun.COM
20010394SMichael.Corcoran@Sun.COMSYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
20110394SMichael.Corcoran@Sun.COM
20210394SMichael.Corcoran@Sun.COM/*
20310394SMichael.Corcoran@Sun.COM * Halt the guest if all vcpus are executing a HLT instruction with
20410394SMichael.Corcoran@Sun.COM * interrupts disabled.
20510394SMichael.Corcoran@Sun.COM */
20610394SMichael.Corcoran@Sun.COMstatic int halt_detection_enabled = 1;
20710394SMichael.Corcoran@Sun.COMTUNABLE_INT("hw.vmm.halt_detection", &halt_detection_enabled);
20810394SMichael.Corcoran@Sun.COMSYSCTL_INT(_hw_vmm, OID_AUTO, halt_detection, CTLFLAG_RDTUN,
20910394SMichael.Corcoran@Sun.COM    &halt_detection_enabled, 0,
21010394SMichael.Corcoran@Sun.COM    "Halt VM if all vcpus execute HLT with interrupts disabled");
21110394SMichael.Corcoran@Sun.COM
21210394SMichael.Corcoran@Sun.COMstatic int vmm_ipinum;
21310394SMichael.Corcoran@Sun.COMSYSCTL_INT(_hw_vmm, OID_AUTO, ipinum, CTLFLAG_RD, &vmm_ipinum, 0,
21410394SMichael.Corcoran@Sun.COM    "IPI vector used for vcpu notifications");
21510394SMichael.Corcoran@Sun.COM
21610394SMichael.Corcoran@Sun.COMstatic int trace_guest_exceptions;
21710394SMichael.Corcoran@Sun.COMSYSCTL_INT(_hw_vmm, OID_AUTO, trace_guest_exceptions, CTLFLAG_RDTUN,
21810394SMichael.Corcoran@Sun.COM    &trace_guest_exceptions, 0,
21910394SMichael.Corcoran@Sun.COM    "Trap into hypervisor on all guest exceptions and reflect them back");
22010394SMichael.Corcoran@Sun.COM
22110394SMichael.Corcoran@Sun.COMstatic void
22210394SMichael.Corcoran@Sun.COMvcpu_cleanup(struct vm *vm, int i, bool destroy)
22310394SMichael.Corcoran@Sun.COM{
22410394SMichael.Corcoran@Sun.COM	struct vcpu *vcpu = &vm->vcpu[i];
22510394SMichael.Corcoran@Sun.COM
22610394SMichael.Corcoran@Sun.COM	VLAPIC_CLEANUP(vm->cookie, vcpu->vlapic);
22710394SMichael.Corcoran@Sun.COM	if (destroy) {
22810394SMichael.Corcoran@Sun.COM		vmm_stat_free(vcpu->stats);
22910394SMichael.Corcoran@Sun.COM		fpu_save_area_free(vcpu->guestfpu);
23010394SMichael.Corcoran@Sun.COM	}
23110394SMichael.Corcoran@Sun.COM}
23210394SMichael.Corcoran@Sun.COM
23310394SMichael.Corcoran@Sun.COMstatic void
23410394SMichael.Corcoran@Sun.COMvcpu_init(struct vm *vm, int vcpu_id, bool create)
23510394SMichael.Corcoran@Sun.COM{
23610394SMichael.Corcoran@Sun.COM	struct vcpu *vcpu;
23710394SMichael.Corcoran@Sun.COM
23810394SMichael.Corcoran@Sun.COM	KASSERT(vcpu_id >= 0 && vcpu_id < VM_MAXCPU,
23910394SMichael.Corcoran@Sun.COM	    ("vcpu_init: invalid vcpu %d", vcpu_id));
24010394SMichael.Corcoran@Sun.COM
24110394SMichael.Corcoran@Sun.COM	vcpu = &vm->vcpu[vcpu_id];
24210394SMichael.Corcoran@Sun.COM
24310394SMichael.Corcoran@Sun.COM	if (create) {
24410394SMichael.Corcoran@Sun.COM		KASSERT(!vcpu_lock_initialized(vcpu), ("vcpu %d already "
24510394SMichael.Corcoran@Sun.COM		    "initialized", vcpu_id));
24610394SMichael.Corcoran@Sun.COM		vcpu_lock_init(vcpu);
24710394SMichael.Corcoran@Sun.COM		vcpu->state = VCPU_IDLE;
24810394SMichael.Corcoran@Sun.COM		vcpu->hostcpu = NOCPU;
24910394SMichael.Corcoran@Sun.COM		vcpu->guestfpu = fpu_save_area_alloc();
25010394SMichael.Corcoran@Sun.COM		vcpu->stats = vmm_stat_alloc();
25110394SMichael.Corcoran@Sun.COM	}
25210394SMichael.Corcoran@Sun.COM
25310394SMichael.Corcoran@Sun.COM	vcpu->vlapic = VLAPIC_INIT(vm->cookie, vcpu_id);
25410394SMichael.Corcoran@Sun.COM	vm_set_x2apic_state(vm, vcpu_id, X2APIC_DISABLED);
25510394SMichael.Corcoran@Sun.COM	vcpu->exitintinfo = 0;
25610394SMichael.Corcoran@Sun.COM	vcpu->nmi_pending = 0;
25710394SMichael.Corcoran@Sun.COM	vcpu->extint_pending = 0;
25810394SMichael.Corcoran@Sun.COM	vcpu->exception_pending = 0;
25910394SMichael.Corcoran@Sun.COM	vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
26010394SMichael.Corcoran@Sun.COM	fpu_save_area_reset(vcpu->guestfpu);
26110394SMichael.Corcoran@Sun.COM	vmm_stat_init(vcpu->stats);
26210394SMichael.Corcoran@Sun.COM}
26310394SMichael.Corcoran@Sun.COM
26410394SMichael.Corcoran@Sun.COMint
26510394SMichael.Corcoran@Sun.COMvcpu_trace_exceptions(struct vm *vm, int vcpuid)
26610394SMichael.Corcoran@Sun.COM{
26710394SMichael.Corcoran@Sun.COM
26810394SMichael.Corcoran@Sun.COM	return (trace_guest_exceptions);
26910394SMichael.Corcoran@Sun.COM}
27010394SMichael.Corcoran@Sun.COM
27110394SMichael.Corcoran@Sun.COMstruct vm_exit *
27210394SMichael.Corcoran@Sun.COMvm_exitinfo(struct vm *vm, int cpuid)
27310394SMichael.Corcoran@Sun.COM{
27410394SMichael.Corcoran@Sun.COM	struct vcpu *vcpu;
27510394SMichael.Corcoran@Sun.COM
27610394SMichael.Corcoran@Sun.COM	if (cpuid < 0 || cpuid >= VM_MAXCPU)
27710394SMichael.Corcoran@Sun.COM		panic("vm_exitinfo: invalid cpuid %d", cpuid);
27810394SMichael.Corcoran@Sun.COM
27910394SMichael.Corcoran@Sun.COM	vcpu = &vm->vcpu[cpuid];
28010394SMichael.Corcoran@Sun.COM
28110394SMichael.Corcoran@Sun.COM	return (&vcpu->exitinfo);
28210394SMichael.Corcoran@Sun.COM}
28310394SMichael.Corcoran@Sun.COM
28410394SMichael.Corcoran@Sun.COMstatic void
28510394SMichael.Corcoran@Sun.COMvmm_resume(void)
28610394SMichael.Corcoran@Sun.COM{
28710394SMichael.Corcoran@Sun.COM	VMM_RESUME();
28810394SMichael.Corcoran@Sun.COM}
28910394SMichael.Corcoran@Sun.COM
29010394SMichael.Corcoran@Sun.COMstatic int
29110394SMichael.Corcoran@Sun.COMvmm_init(void)
29210394SMichael.Corcoran@Sun.COM{
29310394SMichael.Corcoran@Sun.COM	int error;
29410394SMichael.Corcoran@Sun.COM
29510394SMichael.Corcoran@Sun.COM	vmm_host_state_init();
29610394SMichael.Corcoran@Sun.COM
29710394SMichael.Corcoran@Sun.COM	vmm_ipinum = vmm_ipi_alloc();
29810394SMichael.Corcoran@Sun.COM	if (vmm_ipinum == 0)
29910394SMichael.Corcoran@Sun.COM		vmm_ipinum = IPI_AST;
30010394SMichael.Corcoran@Sun.COM
30110394SMichael.Corcoran@Sun.COM	error = vmm_mem_init();
30210394SMichael.Corcoran@Sun.COM	if (error)
30310394SMichael.Corcoran@Sun.COM		return (error);
30410394SMichael.Corcoran@Sun.COM
30510394SMichael.Corcoran@Sun.COM	if (vmm_is_intel())
30610394SMichael.Corcoran@Sun.COM		ops = &vmm_ops_intel;
30710394SMichael.Corcoran@Sun.COM	else if (vmm_is_amd())
30810394SMichael.Corcoran@Sun.COM		ops = &vmm_ops_amd;
30910394SMichael.Corcoran@Sun.COM	else
31010394SMichael.Corcoran@Sun.COM		return (ENXIO);
31110394SMichael.Corcoran@Sun.COM
31210394SMichael.Corcoran@Sun.COM	vmm_resume_p = vmm_resume;
31310394SMichael.Corcoran@Sun.COM
31410394SMichael.Corcoran@Sun.COM	return (VMM_INIT(vmm_ipinum));
31510394SMichael.Corcoran@Sun.COM}
31610394SMichael.Corcoran@Sun.COM
31710394SMichael.Corcoran@Sun.COMstatic int
31810394SMichael.Corcoran@Sun.COMvmm_handler(module_t mod, int what, void *arg)
31910394SMichael.Corcoran@Sun.COM{
32010394SMichael.Corcoran@Sun.COM	int error;
32110394SMichael.Corcoran@Sun.COM
32210394SMichael.Corcoran@Sun.COM	switch (what) {
32310394SMichael.Corcoran@Sun.COM	case MOD_LOAD:
32410394SMichael.Corcoran@Sun.COM		vmmdev_init();
32510394SMichael.Corcoran@Sun.COM		if (ppt_avail_devices() > 0)
32610394SMichael.Corcoran@Sun.COM			iommu_init();
32710394SMichael.Corcoran@Sun.COM		error = vmm_init();
32810394SMichael.Corcoran@Sun.COM		if (error == 0)
32910394SMichael.Corcoran@Sun.COM			vmm_initialized = 1;
33010394SMichael.Corcoran@Sun.COM		break;
33110394SMichael.Corcoran@Sun.COM	case MOD_UNLOAD:
33210394SMichael.Corcoran@Sun.COM		error = vmmdev_cleanup();
33310394SMichael.Corcoran@Sun.COM		if (error == 0) {
33412004Sjiang.liu@intel.com			vmm_resume_p = NULL;
33510394SMichael.Corcoran@Sun.COM			iommu_cleanup();
33610394SMichael.Corcoran@Sun.COM			if (vmm_ipinum != IPI_AST)
33710394SMichael.Corcoran@Sun.COM				vmm_ipi_free(vmm_ipinum);
33810394SMichael.Corcoran@Sun.COM			error = VMM_CLEANUP();
33910394SMichael.Corcoran@Sun.COM			/*
34010394SMichael.Corcoran@Sun.COM			 * Something bad happened - prevent new
34110394SMichael.Corcoran@Sun.COM			 * VMs from being created
34210394SMichael.Corcoran@Sun.COM			 */
34310394SMichael.Corcoran@Sun.COM			if (error)
34410394SMichael.Corcoran@Sun.COM				vmm_initialized = 0;
34510394SMichael.Corcoran@Sun.COM		}
34610394SMichael.Corcoran@Sun.COM		break;
34710394SMichael.Corcoran@Sun.COM	default:
34810394SMichael.Corcoran@Sun.COM		error = 0;
34910394SMichael.Corcoran@Sun.COM		break;
35010394SMichael.Corcoran@Sun.COM	}
35110394SMichael.Corcoran@Sun.COM	return (error);
35210394SMichael.Corcoran@Sun.COM}
35310394SMichael.Corcoran@Sun.COM
35410394SMichael.Corcoran@Sun.COMstatic moduledata_t vmm_kmod = {
35510394SMichael.Corcoran@Sun.COM	"vmm",
35610394SMichael.Corcoran@Sun.COM	vmm_handler,
35710394SMichael.Corcoran@Sun.COM	NULL
35810394SMichael.Corcoran@Sun.COM};
35910394SMichael.Corcoran@Sun.COM
36010394SMichael.Corcoran@Sun.COM/*
36110394SMichael.Corcoran@Sun.COM * vmm initialization has the following dependencies:
36210394SMichael.Corcoran@Sun.COM *
36310394SMichael.Corcoran@Sun.COM * - iommu initialization must happen after the pci passthru driver has had
36410394SMichael.Corcoran@Sun.COM *   a chance to attach to any passthru devices (after SI_SUB_CONFIGURE).
36510394SMichael.Corcoran@Sun.COM *
36610394SMichael.Corcoran@Sun.COM * - VT-x initialization requires smp_rendezvous() and therefore must happen
36710394SMichael.Corcoran@Sun.COM *   after SMP is fully functional (after SI_SUB_SMP).
36810394SMichael.Corcoran@Sun.COM */
36910394SMichael.Corcoran@Sun.COMDECLARE_MODULE(vmm, vmm_kmod, SI_SUB_SMP + 1, SI_ORDER_ANY);
37010394SMichael.Corcoran@Sun.COMMODULE_VERSION(vmm, 1);
37110394SMichael.Corcoran@Sun.COM
37210394SMichael.Corcoran@Sun.COMstatic void
37310394SMichael.Corcoran@Sun.COMvm_init(struct vm *vm, bool create)
37410394SMichael.Corcoran@Sun.COM{
37510394SMichael.Corcoran@Sun.COM	int i;
37610394SMichael.Corcoran@Sun.COM
37710394SMichael.Corcoran@Sun.COM	vm->cookie = VMINIT(vm, vmspace_pmap(vm->vmspace));
37810394SMichael.Corcoran@Sun.COM	vm->iommu = NULL;
37910394SMichael.Corcoran@Sun.COM	vm->vioapic = vioapic_init(vm);
38010394SMichael.Corcoran@Sun.COM	vm->vhpet = vhpet_init(vm);
38110394SMichael.Corcoran@Sun.COM	vm->vatpic = vatpic_init(vm);
38210394SMichael.Corcoran@Sun.COM	vm->vatpit = vatpit_init(vm);
38312004Sjiang.liu@intel.com	vm->vpmtmr = vpmtmr_init(vm);
38410394SMichael.Corcoran@Sun.COM	if (create)
38510394SMichael.Corcoran@Sun.COM		vm->vrtc = vrtc_init(vm);
38610394SMichael.Corcoran@Sun.COM
38710394SMichael.Corcoran@Sun.COM	CPU_ZERO(&vm->active_cpus);
38810394SMichael.Corcoran@Sun.COM
38910394SMichael.Corcoran@Sun.COM	vm->suspend = 0;
39010394SMichael.Corcoran@Sun.COM	CPU_ZERO(&vm->suspended_cpus);
39110394SMichael.Corcoran@Sun.COM
39210394SMichael.Corcoran@Sun.COM	for (i = 0; i < VM_MAXCPU; i++)
39310394SMichael.Corcoran@Sun.COM		vcpu_init(vm, i, create);
39410394SMichael.Corcoran@Sun.COM}
39510394SMichael.Corcoran@Sun.COM
39610394SMichael.Corcoran@Sun.COMint
39710394SMichael.Corcoran@Sun.COMvm_create(const char *name, struct vm **retvm)
39810394SMichael.Corcoran@Sun.COM{
39910394SMichael.Corcoran@Sun.COM	struct vm *vm;
40010394SMichael.Corcoran@Sun.COM	struct vmspace *vmspace;
40110394SMichael.Corcoran@Sun.COM
40210394SMichael.Corcoran@Sun.COM	/*
40310394SMichael.Corcoran@Sun.COM	 * If vmm.ko could not be successfully initialized then don't attempt
40410394SMichael.Corcoran@Sun.COM	 * to create the virtual machine.
40510394SMichael.Corcoran@Sun.COM	 */
40610394SMichael.Corcoran@Sun.COM	if (!vmm_initialized)
40710394SMichael.Corcoran@Sun.COM		return (ENXIO);
40810394SMichael.Corcoran@Sun.COM
40910394SMichael.Corcoran@Sun.COM	if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
41010394SMichael.Corcoran@Sun.COM		return (EINVAL);
41110394SMichael.Corcoran@Sun.COM
41210394SMichael.Corcoran@Sun.COM	vmspace = VMSPACE_ALLOC(0, VM_MAXUSER_ADDRESS);
41310394SMichael.Corcoran@Sun.COM	if (vmspace == NULL)
41410394SMichael.Corcoran@Sun.COM		return (ENOMEM);
41510394SMichael.Corcoran@Sun.COM
41610394SMichael.Corcoran@Sun.COM	vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
41710394SMichael.Corcoran@Sun.COM	strcpy(vm->name, name);
41810394SMichael.Corcoran@Sun.COM	vm->num_mem_segs = 0;
41910394SMichael.Corcoran@Sun.COM	vm->vmspace = vmspace;
42010394SMichael.Corcoran@Sun.COM	mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
42110394SMichael.Corcoran@Sun.COM
42210394SMichael.Corcoran@Sun.COM	vm_init(vm, true);
42310394SMichael.Corcoran@Sun.COM
42410394SMichael.Corcoran@Sun.COM	*retvm = vm;
42510394SMichael.Corcoran@Sun.COM	return (0);
42610394SMichael.Corcoran@Sun.COM}
42710394SMichael.Corcoran@Sun.COM
42810394SMichael.Corcoran@Sun.COMstatic void
42910394SMichael.Corcoran@Sun.COMvm_free_mem_seg(struct vm *vm, struct mem_seg *seg)
43010394SMichael.Corcoran@Sun.COM{
43110394SMichael.Corcoran@Sun.COM
43210394SMichael.Corcoran@Sun.COM	if (seg->object != NULL)
43310394SMichael.Corcoran@Sun.COM		vmm_mem_free(vm->vmspace, seg->gpa, seg->len);
43410394SMichael.Corcoran@Sun.COM
43510394SMichael.Corcoran@Sun.COM	bzero(seg, sizeof(*seg));
43610394SMichael.Corcoran@Sun.COM}
43710394SMichael.Corcoran@Sun.COM
43810394SMichael.Corcoran@Sun.COMstatic void
43910394SMichael.Corcoran@Sun.COMvm_cleanup(struct vm *vm, bool destroy)
44010394SMichael.Corcoran@Sun.COM{
44110394SMichael.Corcoran@Sun.COM	int i;
44210394SMichael.Corcoran@Sun.COM
44310394SMichael.Corcoran@Sun.COM	ppt_unassign_all(vm);
44410394SMichael.Corcoran@Sun.COM
44510394SMichael.Corcoran@Sun.COM	if (vm->iommu != NULL)
44610394SMichael.Corcoran@Sun.COM		iommu_destroy_domain(vm->iommu);
44710394SMichael.Corcoran@Sun.COM
44810394SMichael.Corcoran@Sun.COM	if (destroy)
44910394SMichael.Corcoran@Sun.COM		vrtc_cleanup(vm->vrtc);
45010394SMichael.Corcoran@Sun.COM	else
45110394SMichael.Corcoran@Sun.COM		vrtc_reset(vm->vrtc);
45210394SMichael.Corcoran@Sun.COM	vpmtmr_cleanup(vm->vpmtmr);
45310394SMichael.Corcoran@Sun.COM	vatpit_cleanup(vm->vatpit);
45410394SMichael.Corcoran@Sun.COM	vhpet_cleanup(vm->vhpet);
45510394SMichael.Corcoran@Sun.COM	vatpic_cleanup(vm->vatpic);
45610394SMichael.Corcoran@Sun.COM	vioapic_cleanup(vm->vioapic);
45710394SMichael.Corcoran@Sun.COM
45810394SMichael.Corcoran@Sun.COM	for (i = 0; i < VM_MAXCPU; i++)
45912004Sjiang.liu@intel.com		vcpu_cleanup(vm, i, destroy);
46010394SMichael.Corcoran@Sun.COM
46110394SMichael.Corcoran@Sun.COM	VMCLEANUP(vm->cookie);
46210394SMichael.Corcoran@Sun.COM
46310394SMichael.Corcoran@Sun.COM	if (destroy) {
46410394SMichael.Corcoran@Sun.COM		for (i = 0; i < vm->num_mem_segs; i++)
46510394SMichael.Corcoran@Sun.COM			vm_free_mem_seg(vm, &vm->mem_segs[i]);
46610394SMichael.Corcoran@Sun.COM
46710394SMichael.Corcoran@Sun.COM		vm->num_mem_segs = 0;
46810394SMichael.Corcoran@Sun.COM
46910394SMichael.Corcoran@Sun.COM		VMSPACE_FREE(vm->vmspace);
47010394SMichael.Corcoran@Sun.COM		vm->vmspace = NULL;
47110394SMichael.Corcoran@Sun.COM	}
47210394SMichael.Corcoran@Sun.COM}
47310394SMichael.Corcoran@Sun.COM
47410394SMichael.Corcoran@Sun.COMvoid
47510394SMichael.Corcoran@Sun.COMvm_destroy(struct vm *vm)
47610394SMichael.Corcoran@Sun.COM{
47710394SMichael.Corcoran@Sun.COM	vm_cleanup(vm, true);
47810394SMichael.Corcoran@Sun.COM	free(vm, M_VM);
47910394SMichael.Corcoran@Sun.COM}
48012004Sjiang.liu@intel.com
48110394SMichael.Corcoran@Sun.COMint
48210394SMichael.Corcoran@Sun.COMvm_reinit(struct vm *vm)
48310394SMichael.Corcoran@Sun.COM{
48410394SMichael.Corcoran@Sun.COM	int error;
48510394SMichael.Corcoran@Sun.COM
48610394SMichael.Corcoran@Sun.COM	/*
48710394SMichael.Corcoran@Sun.COM	 * A virtual machine can be reset only if all vcpus are suspended.
48810394SMichael.Corcoran@Sun.COM	 */
48910394SMichael.Corcoran@Sun.COM	if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
49010394SMichael.Corcoran@Sun.COM		vm_cleanup(vm, false);
49110394SMichael.Corcoran@Sun.COM		vm_init(vm, false);
49210394SMichael.Corcoran@Sun.COM		error = 0;
49310394SMichael.Corcoran@Sun.COM	} else {
49410394SMichael.Corcoran@Sun.COM		error = EBUSY;
49510394SMichael.Corcoran@Sun.COM	}
49610394SMichael.Corcoran@Sun.COM
49710394SMichael.Corcoran@Sun.COM	return (error);
49810394SMichael.Corcoran@Sun.COM}
49910394SMichael.Corcoran@Sun.COM
50010394SMichael.Corcoran@Sun.COMconst char *
50110394SMichael.Corcoran@Sun.COMvm_name(struct vm *vm)
50210394SMichael.Corcoran@Sun.COM{
50310394SMichael.Corcoran@Sun.COM	return (vm->name);
50410394SMichael.Corcoran@Sun.COM}
50512004Sjiang.liu@intel.com
50610394SMichael.Corcoran@Sun.COMint
50710394SMichael.Corcoran@Sun.COMvm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
50810394SMichael.Corcoran@Sun.COM{
50910394SMichael.Corcoran@Sun.COM	vm_object_t obj;
51010394SMichael.Corcoran@Sun.COM
51110394SMichael.Corcoran@Sun.COM	if ((obj = vmm_mmio_alloc(vm->vmspace, gpa, len, hpa)) == NULL)
51210394SMichael.Corcoran@Sun.COM		return (ENOMEM);
51310394SMichael.Corcoran@Sun.COM	else
51410394SMichael.Corcoran@Sun.COM		return (0);
51510394SMichael.Corcoran@Sun.COM}
51610394SMichael.Corcoran@Sun.COM
51710394SMichael.Corcoran@Sun.COMint
51810394SMichael.Corcoran@Sun.COMvm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
51910394SMichael.Corcoran@Sun.COM{
52010394SMichael.Corcoran@Sun.COM
52110394SMichael.Corcoran@Sun.COM	vmm_mmio_free(vm->vmspace, gpa, len);
52210394SMichael.Corcoran@Sun.COM	return (0);
52310394SMichael.Corcoran@Sun.COM}
52410394SMichael.Corcoran@Sun.COM
52510394SMichael.Corcoran@Sun.COMboolean_t
52610394SMichael.Corcoran@Sun.COMvm_mem_allocated(struct vm *vm, vm_paddr_t gpa)
52712004Sjiang.liu@intel.com{
52810394SMichael.Corcoran@Sun.COM	int i;
52910394SMichael.Corcoran@Sun.COM	vm_paddr_t gpabase, gpalimit;
53010394SMichael.Corcoran@Sun.COM
53110394SMichael.Corcoran@Sun.COM	for (i = 0; i < vm->num_mem_segs; i++) {
53210394SMichael.Corcoran@Sun.COM		gpabase = vm->mem_segs[i].gpa;
53310394SMichael.Corcoran@Sun.COM		gpalimit = gpabase + vm->mem_segs[i].len;
53410394SMichael.Corcoran@Sun.COM		if (gpa >= gpabase && gpa < gpalimit)
53510394SMichael.Corcoran@Sun.COM			return (TRUE);		/* 'gpa' is regular memory */
53610394SMichael.Corcoran@Sun.COM	}
53710394SMichael.Corcoran@Sun.COM
53810394SMichael.Corcoran@Sun.COM	if (ppt_is_mmio(vm, gpa))
53910394SMichael.Corcoran@Sun.COM		return (TRUE);			/* 'gpa' is pci passthru mmio */
54010394SMichael.Corcoran@Sun.COM
54110394SMichael.Corcoran@Sun.COM	return (FALSE);
54210394SMichael.Corcoran@Sun.COM}
54310394SMichael.Corcoran@Sun.COM
54410394SMichael.Corcoran@Sun.COMint
54510394SMichael.Corcoran@Sun.COMvm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
54610394SMichael.Corcoran@Sun.COM{
54710394SMichael.Corcoran@Sun.COM	int available, allocated;
54812004Sjiang.liu@intel.com	struct mem_seg *seg;
54910394SMichael.Corcoran@Sun.COM	vm_object_t object;
55010394SMichael.Corcoran@Sun.COM	vm_paddr_t g;
55110394SMichael.Corcoran@Sun.COM
55210394SMichael.Corcoran@Sun.COM	if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0)
55310394SMichael.Corcoran@Sun.COM		return (EINVAL);
55410394SMichael.Corcoran@Sun.COM
55510394SMichael.Corcoran@Sun.COM	available = allocated = 0;
55610394SMichael.Corcoran@Sun.COM	g = gpa;
55710394SMichael.Corcoran@Sun.COM	while (g < gpa + len) {
55810394SMichael.Corcoran@Sun.COM		if (vm_mem_allocated(vm, g))
55910394SMichael.Corcoran@Sun.COM			allocated++;
56010394SMichael.Corcoran@Sun.COM		else
56110394SMichael.Corcoran@Sun.COM			available++;
56210394SMichael.Corcoran@Sun.COM
56310394SMichael.Corcoran@Sun.COM		g += PAGE_SIZE;
56410394SMichael.Corcoran@Sun.COM	}
56510394SMichael.Corcoran@Sun.COM
56610394SMichael.Corcoran@Sun.COM	/*
56710394SMichael.Corcoran@Sun.COM	 * If there are some allocated and some available pages in the address
56810394SMichael.Corcoran@Sun.COM	 * range then it is an error.
56910394SMichael.Corcoran@Sun.COM	 */
57010394SMichael.Corcoran@Sun.COM	if (allocated && available)
57110394SMichael.Corcoran@Sun.COM		return (EINVAL);
57210394SMichael.Corcoran@Sun.COM
57312004Sjiang.liu@intel.com	/*
57410394SMichael.Corcoran@Sun.COM	 * If the entire address range being requested has already been
57510394SMichael.Corcoran@Sun.COM	 * allocated then there isn't anything more to do.
57610394SMichael.Corcoran@Sun.COM	 */
57710394SMichael.Corcoran@Sun.COM	if (allocated && available == 0)
57810394SMichael.Corcoran@Sun.COM		return (0);
57910394SMichael.Corcoran@Sun.COM
58010394SMichael.Corcoran@Sun.COM	if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
58110394SMichael.Corcoran@Sun.COM		return (E2BIG);
58210394SMichael.Corcoran@Sun.COM
58310394SMichael.Corcoran@Sun.COM	seg = &vm->mem_segs[vm->num_mem_segs];
58410394SMichael.Corcoran@Sun.COM
58510394SMichael.Corcoran@Sun.COM	if ((object = vmm_mem_alloc(vm->vmspace, gpa, len)) == NULL)
58610394SMichael.Corcoran@Sun.COM		return (ENOMEM);
58710394SMichael.Corcoran@Sun.COM
58812004Sjiang.liu@intel.com	seg->gpa = gpa;
58910394SMichael.Corcoran@Sun.COM	seg->len = len;
59010394SMichael.Corcoran@Sun.COM	seg->object = object;
59110394SMichael.Corcoran@Sun.COM	seg->wired = FALSE;
59210394SMichael.Corcoran@Sun.COM
59310394SMichael.Corcoran@Sun.COM	vm->num_mem_segs++;
59410394SMichael.Corcoran@Sun.COM
59510394SMichael.Corcoran@Sun.COM	return (0);
59610394SMichael.Corcoran@Sun.COM}
59710394SMichael.Corcoran@Sun.COM
59812004Sjiang.liu@intel.comstatic vm_paddr_t
59910394SMichael.Corcoran@Sun.COMvm_maxmem(struct vm *vm)
60010394SMichael.Corcoran@Sun.COM{
60110394SMichael.Corcoran@Sun.COM	int i;
60210394SMichael.Corcoran@Sun.COM	vm_paddr_t gpa, maxmem;
60310394SMichael.Corcoran@Sun.COM
60410394SMichael.Corcoran@Sun.COM	maxmem = 0;
60510394SMichael.Corcoran@Sun.COM	for (i = 0; i < vm->num_mem_segs; i++) {
60612004Sjiang.liu@intel.com		gpa = vm->mem_segs[i].gpa + vm->mem_segs[i].len;
60710394SMichael.Corcoran@Sun.COM		if (gpa > maxmem)
60810394SMichael.Corcoran@Sun.COM			maxmem = gpa;
60910394SMichael.Corcoran@Sun.COM	}
61010394SMichael.Corcoran@Sun.COM	return (maxmem);
61110394SMichael.Corcoran@Sun.COM}
61210394SMichael.Corcoran@Sun.COM
61310394SMichael.Corcoran@Sun.COMstatic void
61410394SMichael.Corcoran@Sun.COMvm_gpa_unwire(struct vm *vm)
61510394SMichael.Corcoran@Sun.COM{
61610394SMichael.Corcoran@Sun.COM	int i, rv;
61710394SMichael.Corcoran@Sun.COM	struct mem_seg *seg;
61810394SMichael.Corcoran@Sun.COM
61910394SMichael.Corcoran@Sun.COM	for (i = 0; i < vm->num_mem_segs; i++) {
62010394SMichael.Corcoran@Sun.COM		seg = &vm->mem_segs[i];
62110394SMichael.Corcoran@Sun.COM		if (!seg->wired)
62210394SMichael.Corcoran@Sun.COM			continue;
62310394SMichael.Corcoran@Sun.COM
62410394SMichael.Corcoran@Sun.COM		rv = vm_map_unwire(&vm->vmspace->vm_map,
62510394SMichael.Corcoran@Sun.COM				   seg->gpa, seg->gpa + seg->len,
62610394SMichael.Corcoran@Sun.COM				   VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
62710394SMichael.Corcoran@Sun.COM		KASSERT(rv == KERN_SUCCESS, ("vm(%s) memory segment "
62810394SMichael.Corcoran@Sun.COM		    "%#lx/%ld could not be unwired: %d",
62910394SMichael.Corcoran@Sun.COM		    vm_name(vm), seg->gpa, seg->len, rv));
63010394SMichael.Corcoran@Sun.COM
63110394SMichael.Corcoran@Sun.COM		seg->wired = FALSE;
63210394SMichael.Corcoran@Sun.COM	}
63310394SMichael.Corcoran@Sun.COM}
63410394SMichael.Corcoran@Sun.COM
63512004Sjiang.liu@intel.comstatic int
63610394SMichael.Corcoran@Sun.COMvm_gpa_wire(struct vm *vm)
63710394SMichael.Corcoran@Sun.COM{
63810394SMichael.Corcoran@Sun.COM	int i, rv;
63910394SMichael.Corcoran@Sun.COM	struct mem_seg *seg;
64010394SMichael.Corcoran@Sun.COM
64110394SMichael.Corcoran@Sun.COM	for (i = 0; i < vm->num_mem_segs; i++) {
64210394SMichael.Corcoran@Sun.COM		seg = &vm->mem_segs[i];
64310394SMichael.Corcoran@Sun.COM		if (seg->wired)
64410394SMichael.Corcoran@Sun.COM			continue;
64510394SMichael.Corcoran@Sun.COM
64610394SMichael.Corcoran@Sun.COM		/* XXX rlimits? */
64710394SMichael.Corcoran@Sun.COM		rv = vm_map_wire(&vm->vmspace->vm_map,
64810394SMichael.Corcoran@Sun.COM				 seg->gpa, seg->gpa + seg->len,
64910394SMichael.Corcoran@Sun.COM				 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
65010394SMichael.Corcoran@Sun.COM		if (rv != KERN_SUCCESS)
65110394SMichael.Corcoran@Sun.COM			break;
65210394SMichael.Corcoran@Sun.COM
65310394SMichael.Corcoran@Sun.COM		seg->wired = TRUE;
65412004Sjiang.liu@intel.com	}
65510394SMichael.Corcoran@Sun.COM
65610394SMichael.Corcoran@Sun.COM	if (i < vm->num_mem_segs) {
65710394SMichael.Corcoran@Sun.COM		/*
65810394SMichael.Corcoran@Sun.COM		 * Undo the wiring before returning an error.
65910394SMichael.Corcoran@Sun.COM		 */
66012004Sjiang.liu@intel.com		vm_gpa_unwire(vm);
66110394SMichael.Corcoran@Sun.COM		return (EAGAIN);
66210394SMichael.Corcoran@Sun.COM	}
66310394SMichael.Corcoran@Sun.COM
66410394SMichael.Corcoran@Sun.COM	return (0);
66510394SMichael.Corcoran@Sun.COM}
66610394SMichael.Corcoran@Sun.COM
66710394SMichael.Corcoran@Sun.COMstatic void
66810394SMichael.Corcoran@Sun.COMvm_iommu_modify(struct vm *vm, boolean_t map)
66910394SMichael.Corcoran@Sun.COM{
67010394SMichael.Corcoran@Sun.COM	int i, sz;
67110394SMichael.Corcoran@Sun.COM	vm_paddr_t gpa, hpa;
67210394SMichael.Corcoran@Sun.COM	struct mem_seg *seg;
67310394SMichael.Corcoran@Sun.COM	void *vp, *cookie, *host_domain;
67412004Sjiang.liu@intel.com
67510394SMichael.Corcoran@Sun.COM	sz = PAGE_SIZE;
67610394SMichael.Corcoran@Sun.COM	host_domain = iommu_host_domain();
67710394SMichael.Corcoran@Sun.COM
67810394SMichael.Corcoran@Sun.COM	for (i = 0; i < vm->num_mem_segs; i++) {
67912004Sjiang.liu@intel.com		seg = &vm->mem_segs[i];
68010394SMichael.Corcoran@Sun.COM		KASSERT(seg->wired, ("vm(%s) memory segment %#lx/%ld not wired",
68110394SMichael.Corcoran@Sun.COM		    vm_name(vm), seg->gpa, seg->len));
68210394SMichael.Corcoran@Sun.COM
68312004Sjiang.liu@intel.com		gpa = seg->gpa;
68410394SMichael.Corcoran@Sun.COM		while (gpa < seg->gpa + seg->len) {
68510394SMichael.Corcoran@Sun.COM			vp = vm_gpa_hold(vm, gpa, PAGE_SIZE, VM_PROT_WRITE,
68610394SMichael.Corcoran@Sun.COM					 &cookie);
68710394SMichael.Corcoran@Sun.COM			KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
68810394SMichael.Corcoran@Sun.COM			    vm_name(vm), gpa));
68910394SMichael.Corcoran@Sun.COM
69010394SMichael.Corcoran@Sun.COM			vm_gpa_release(cookie);
69110394SMichael.Corcoran@Sun.COM
69210394SMichael.Corcoran@Sun.COM			hpa = DMAP_TO_PHYS((uintptr_t)vp);
69312004Sjiang.liu@intel.com			if (map) {
69410394SMichael.Corcoran@Sun.COM				iommu_create_mapping(vm->iommu, gpa, hpa, sz);
69510394SMichael.Corcoran@Sun.COM				iommu_remove_mapping(host_domain, hpa, sz);
69610394SMichael.Corcoran@Sun.COM			} else {
69710394SMichael.Corcoran@Sun.COM				iommu_remove_mapping(vm->iommu, gpa, sz);
69810394SMichael.Corcoran@Sun.COM				iommu_create_mapping(host_domain, hpa, hpa, sz);
69910394SMichael.Corcoran@Sun.COM			}
70010394SMichael.Corcoran@Sun.COM
70110394SMichael.Corcoran@Sun.COM			gpa += PAGE_SIZE;
70210394SMichael.Corcoran@Sun.COM		}
70310394SMichael.Corcoran@Sun.COM	}
70410394SMichael.Corcoran@Sun.COM
70510394SMichael.Corcoran@Sun.COM	/*
70610394SMichael.Corcoran@Sun.COM	 * Invalidate the cached translations associated with the domain
70710394SMichael.Corcoran@Sun.COM	 * from which pages were removed.
70812004Sjiang.liu@intel.com	 */
70910394SMichael.Corcoran@Sun.COM	if (map)
71010394SMichael.Corcoran@Sun.COM		iommu_invalidate_tlb(host_domain);
71110394SMichael.Corcoran@Sun.COM	else
71210394SMichael.Corcoran@Sun.COM		iommu_invalidate_tlb(vm->iommu);
71310394SMichael.Corcoran@Sun.COM}
71410394SMichael.Corcoran@Sun.COM
71512004Sjiang.liu@intel.com#define	vm_iommu_unmap(vm)	vm_iommu_modify((vm), FALSE)
71610394SMichael.Corcoran@Sun.COM#define	vm_iommu_map(vm)	vm_iommu_modify((vm), TRUE)
71710394SMichael.Corcoran@Sun.COM
71810394SMichael.Corcoran@Sun.COMint
71910394SMichael.Corcoran@Sun.COMvm_unassign_pptdev(struct vm *vm, int bus, int slot, int func)
72010394SMichael.Corcoran@Sun.COM{
72110394SMichael.Corcoran@Sun.COM	int error;
72210394SMichael.Corcoran@Sun.COM
72310394SMichael.Corcoran@Sun.COM	error = ppt_unassign_device(vm, bus, slot, func);
72410394SMichael.Corcoran@Sun.COM	if (error)
72510394SMichael.Corcoran@Sun.COM		return (error);
72612004Sjiang.liu@intel.com
72710394SMichael.Corcoran@Sun.COM	if (ppt_assigned_devices(vm) == 0) {
72810394SMichael.Corcoran@Sun.COM		vm_iommu_unmap(vm);
72910394SMichael.Corcoran@Sun.COM		vm_gpa_unwire(vm);
73010394SMichael.Corcoran@Sun.COM	}
73110394SMichael.Corcoran@Sun.COM	return (0);
73210394SMichael.Corcoran@Sun.COM}
73310394SMichael.Corcoran@Sun.COM
73410394SMichael.Corcoran@Sun.COMint
73510394SMichael.Corcoran@Sun.COMvm_assign_pptdev(struct vm *vm, int bus, int slot, int func)
73610394SMichael.Corcoran@Sun.COM{
73710394SMichael.Corcoran@Sun.COM	int error;
73810394SMichael.Corcoran@Sun.COM	vm_paddr_t maxaddr;
73910394SMichael.Corcoran@Sun.COM
74010394SMichael.Corcoran@Sun.COM	/*
74110394SMichael.Corcoran@Sun.COM	 * Virtual machines with pci passthru devices get special treatment:
74210394SMichael.Corcoran@Sun.COM	 * - the guest physical memory is wired
74310394SMichael.Corcoran@Sun.COM	 * - the iommu is programmed to do the 'gpa' to 'hpa' translation
74410394SMichael.Corcoran@Sun.COM	 *
74510394SMichael.Corcoran@Sun.COM	 * We need to do this before the first pci passthru device is attached.
74610394SMichael.Corcoran@Sun.COM	 */
74710394SMichael.Corcoran@Sun.COM	if (ppt_assigned_devices(vm) == 0) {
74810394SMichael.Corcoran@Sun.COM		KASSERT(vm->iommu == NULL,
74912004Sjiang.liu@intel.com		    ("vm_assign_pptdev: iommu must be NULL"));
75010394SMichael.Corcoran@Sun.COM		maxaddr = vm_maxmem(vm);
75110394SMichael.Corcoran@Sun.COM		vm->iommu = iommu_create_domain(maxaddr);
75210394SMichael.Corcoran@Sun.COM
75310394SMichael.Corcoran@Sun.COM		error = vm_gpa_wire(vm);
75410394SMichael.Corcoran@Sun.COM		if (error)
75510394SMichael.Corcoran@Sun.COM			return (error);
75610394SMichael.Corcoran@Sun.COM
75710394SMichael.Corcoran@Sun.COM		vm_iommu_map(vm);
75812004Sjiang.liu@intel.com	}
75910394SMichael.Corcoran@Sun.COM
76010394SMichael.Corcoran@Sun.COM	error = ppt_assign_device(vm, bus, slot, func);
76110394SMichael.Corcoran@Sun.COM	return (error);
76210394SMichael.Corcoran@Sun.COM}
76310394SMichael.Corcoran@Sun.COM
76410394SMichael.Corcoran@Sun.COMvoid *
76510394SMichael.Corcoran@Sun.COMvm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
76610394SMichael.Corcoran@Sun.COM	    void **cookie)
76712004Sjiang.liu@intel.com{
76810394SMichael.Corcoran@Sun.COM	int count, pageoff;
76910394SMichael.Corcoran@Sun.COM	vm_page_t m;
77010394SMichael.Corcoran@Sun.COM
77110394SMichael.Corcoran@Sun.COM	pageoff = gpa & PAGE_MASK;
77210394SMichael.Corcoran@Sun.COM	if (len > PAGE_SIZE - pageoff)
77310394SMichael.Corcoran@Sun.COM		panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
77410394SMichael.Corcoran@Sun.COM
77510394SMichael.Corcoran@Sun.COM	count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
77610394SMichael.Corcoran@Sun.COM	    trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);
77710394SMichael.Corcoran@Sun.COM
77810394SMichael.Corcoran@Sun.COM	if (count == 1) {
77910394SMichael.Corcoran@Sun.COM		*cookie = m;
78010394SMichael.Corcoran@Sun.COM		return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
78110394SMichael.Corcoran@Sun.COM	} else {
78210394SMichael.Corcoran@Sun.COM		*cookie = NULL;
78310394SMichael.Corcoran@Sun.COM		return (NULL);
78412004Sjiang.liu@intel.com	}
78510394SMichael.Corcoran@Sun.COM}
78610394SMichael.Corcoran@Sun.COM
78710394SMichael.Corcoran@Sun.COMvoid
78810394SMichael.Corcoran@Sun.COMvm_gpa_release(void *cookie)
78910394SMichael.Corcoran@Sun.COM{
79010394SMichael.Corcoran@Sun.COM	vm_page_t m = cookie;
79110394SMichael.Corcoran@Sun.COM
79210394SMichael.Corcoran@Sun.COM	vm_page_lock(m);
79310394SMichael.Corcoran@Sun.COM	vm_page_unhold(m);
79410394SMichael.Corcoran@Sun.COM	vm_page_unlock(m);
79510394SMichael.Corcoran@Sun.COM}
79610394SMichael.Corcoran@Sun.COM
79710394SMichael.Corcoran@Sun.COMint
79810394SMichael.Corcoran@Sun.COMvm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
79910394SMichael.Corcoran@Sun.COM		  struct vm_memory_segment *seg)
80010394SMichael.Corcoran@Sun.COM{
80112004Sjiang.liu@intel.com	int i;
80210394SMichael.Corcoran@Sun.COM
80310394SMichael.Corcoran@Sun.COM	for (i = 0; i < vm->num_mem_segs; i++) {
80410394SMichael.Corcoran@Sun.COM		if (gpabase == vm->mem_segs[i].gpa) {
80510394SMichael.Corcoran@Sun.COM			seg->gpa = vm->mem_segs[i].gpa;
80610394SMichael.Corcoran@Sun.COM			seg->len = vm->mem_segs[i].len;
80710394SMichael.Corcoran@Sun.COM			seg->wired = vm->mem_segs[i].wired;
80810394SMichael.Corcoran@Sun.COM			return (0);
80910394SMichael.Corcoran@Sun.COM		}
81010394SMichael.Corcoran@Sun.COM	}
81110394SMichael.Corcoran@Sun.COM	return (-1);
81210394SMichael.Corcoran@Sun.COM}
81310394SMichael.Corcoran@Sun.COM
81410394SMichael.Corcoran@Sun.COMint
81510394SMichael.Corcoran@Sun.COMvm_get_memobj(struct vm *vm, vm_paddr_t gpa, size_t len,
81610394SMichael.Corcoran@Sun.COM	      vm_offset_t *offset, struct vm_object **object)
81710394SMichael.Corcoran@Sun.COM{
81810394SMichael.Corcoran@Sun.COM	int i;
81910394SMichael.Corcoran@Sun.COM	size_t seg_len;
82010394SMichael.Corcoran@Sun.COM	vm_paddr_t seg_gpa;
82110394SMichael.Corcoran@Sun.COM	vm_object_t seg_obj;
82210394SMichael.Corcoran@Sun.COM
82310394SMichael.Corcoran@Sun.COM	for (i = 0; i < vm->num_mem_segs; i++) {
82410394SMichael.Corcoran@Sun.COM		if ((seg_obj = vm->mem_segs[i].object) == NULL)
82510394SMichael.Corcoran@Sun.COM			continue;
82610394SMichael.Corcoran@Sun.COM
82710394SMichael.Corcoran@Sun.COM		seg_gpa = vm->mem_segs[i].gpa;
82810394SMichael.Corcoran@Sun.COM		seg_len = vm->mem_segs[i].len;
82910394SMichael.Corcoran@Sun.COM
83012004Sjiang.liu@intel.com		if (gpa >= seg_gpa && gpa < seg_gpa + seg_len) {
83110394SMichael.Corcoran@Sun.COM			*offset = gpa - seg_gpa;
83210394SMichael.Corcoran@Sun.COM			*object = seg_obj;
83310394SMichael.Corcoran@Sun.COM			vm_object_reference(seg_obj);
83410394SMichael.Corcoran@Sun.COM			return (0);
83510394SMichael.Corcoran@Sun.COM		}
83612004Sjiang.liu@intel.com	}
83710394SMichael.Corcoran@Sun.COM
83810394SMichael.Corcoran@Sun.COM	return (EINVAL);
83910394SMichael.Corcoran@Sun.COM}
84010394SMichael.Corcoran@Sun.COM
84110394SMichael.Corcoran@Sun.COMint
84210394SMichael.Corcoran@Sun.COMvm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
84310394SMichael.Corcoran@Sun.COM{
84410394SMichael.Corcoran@Sun.COM
84510394SMichael.Corcoran@Sun.COM	if (vcpu < 0 || vcpu >= VM_MAXCPU)
84610394SMichael.Corcoran@Sun.COM		return (EINVAL);
84710394SMichael.Corcoran@Sun.COM
84810394SMichael.Corcoran@Sun.COM	if (reg >= VM_REG_LAST)
84910394SMichael.Corcoran@Sun.COM		return (EINVAL);
85010394SMichael.Corcoran@Sun.COM
85110394SMichael.Corcoran@Sun.COM	return (VMGETREG(vm->cookie, vcpu, reg, retval));
85210394SMichael.Corcoran@Sun.COM}
85310394SMichael.Corcoran@Sun.COM
85410394SMichael.Corcoran@Sun.COMint
85510394SMichael.Corcoran@Sun.COMvm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
85610394SMichael.Corcoran@Sun.COM{
85710394SMichael.Corcoran@Sun.COM	struct vcpu *vcpu;
85810394SMichael.Corcoran@Sun.COM	int error;
85910394SMichael.Corcoran@Sun.COM
86010394SMichael.Corcoran@Sun.COM	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
86110394SMichael.Corcoran@Sun.COM		return (EINVAL);
86210394SMichael.Corcoran@Sun.COM
86310394SMichael.Corcoran@Sun.COM	if (reg >= VM_REG_LAST)
86410394SMichael.Corcoran@Sun.COM		return (EINVAL);
86510394SMichael.Corcoran@Sun.COM
86610394SMichael.Corcoran@Sun.COM	error = VMSETREG(vm->cookie, vcpuid, reg, val);
86710394SMichael.Corcoran@Sun.COM	if (error || reg != VM_REG_GUEST_RIP)
86810394SMichael.Corcoran@Sun.COM		return (error);
86912004Sjiang.liu@intel.com
87010394SMichael.Corcoran@Sun.COM	/* Set 'nextrip' to match the value of %rip */
87110394SMichael.Corcoran@Sun.COM	VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
87210394SMichael.Corcoran@Sun.COM	vcpu = &vm->vcpu[vcpuid];
87310394SMichael.Corcoran@Sun.COM	vcpu->nextrip = val;
87410394SMichael.Corcoran@Sun.COM	return (0);
87512004Sjiang.liu@intel.com}
87610394SMichael.Corcoran@Sun.COM
87710394SMichael.Corcoran@Sun.COMstatic boolean_t
87810394SMichael.Corcoran@Sun.COMis_descriptor_table(int reg)
87910394SMichael.Corcoran@Sun.COM{
88010394SMichael.Corcoran@Sun.COM
88110394SMichael.Corcoran@Sun.COM	switch (reg) {
88210394SMichael.Corcoran@Sun.COM	case VM_REG_GUEST_IDTR:
88310394SMichael.Corcoran@Sun.COM	case VM_REG_GUEST_GDTR:
88410394SMichael.Corcoran@Sun.COM		return (TRUE);
88510394SMichael.Corcoran@Sun.COM	default:
88610394SMichael.Corcoran@Sun.COM		return (FALSE);
88710394SMichael.Corcoran@Sun.COM	}
88810394SMichael.Corcoran@Sun.COM}
88912004Sjiang.liu@intel.com
89010394SMichael.Corcoran@Sun.COMstatic boolean_t
89110394SMichael.Corcoran@Sun.COMis_segment_register(int reg)
89210394SMichael.Corcoran@Sun.COM{
89310394SMichael.Corcoran@Sun.COM
89412004Sjiang.liu@intel.com	switch (reg) {
89510394SMichael.Corcoran@Sun.COM	case VM_REG_GUEST_ES:
89610394SMichael.Corcoran@Sun.COM	case VM_REG_GUEST_CS:
89710394SMichael.Corcoran@Sun.COM	case VM_REG_GUEST_SS:
89812004Sjiang.liu@intel.com	case VM_REG_GUEST_DS:
89910394SMichael.Corcoran@Sun.COM	case VM_REG_GUEST_FS:
90010394SMichael.Corcoran@Sun.COM	case VM_REG_GUEST_GS:
90110394SMichael.Corcoran@Sun.COM	case VM_REG_GUEST_TR:
90210394SMichael.Corcoran@Sun.COM	case VM_REG_GUEST_LDTR:
90310394SMichael.Corcoran@Sun.COM		return (TRUE);
90410394SMichael.Corcoran@Sun.COM	default:
90510394SMichael.Corcoran@Sun.COM		return (FALSE);
90610394SMichael.Corcoran@Sun.COM	}
90710394SMichael.Corcoran@Sun.COM}
90812004Sjiang.liu@intel.com
90910394SMichael.Corcoran@Sun.COMint
91010394SMichael.Corcoran@Sun.COMvm_get_seg_desc(struct vm *vm, int vcpu, int reg,
91110394SMichael.Corcoran@Sun.COM		struct seg_desc *desc)
91210394SMichael.Corcoran@Sun.COM{
91310394SMichael.Corcoran@Sun.COM
91410394SMichael.Corcoran@Sun.COM	if (vcpu < 0 || vcpu >= VM_MAXCPU)
91510394SMichael.Corcoran@Sun.COM		return (EINVAL);
91610394SMichael.Corcoran@Sun.COM
91710394SMichael.Corcoran@Sun.COM	if (!is_segment_register(reg) && !is_descriptor_table(reg))
91810394SMichael.Corcoran@Sun.COM		return (EINVAL);
91910394SMichael.Corcoran@Sun.COM
92010394SMichael.Corcoran@Sun.COM	return (VMGETDESC(vm->cookie, vcpu, reg, desc));
92110394SMichael.Corcoran@Sun.COM}
92210394SMichael.Corcoran@Sun.COM
92312004Sjiang.liu@intel.comint
92410394SMichael.Corcoran@Sun.COMvm_set_seg_desc(struct vm *vm, int vcpu, int reg,
92510394SMichael.Corcoran@Sun.COM		struct seg_desc *desc)
92610394SMichael.Corcoran@Sun.COM{
92710394SMichael.Corcoran@Sun.COM	if (vcpu < 0 || vcpu >= VM_MAXCPU)
92810394SMichael.Corcoran@Sun.COM		return (EINVAL);
92910394SMichael.Corcoran@Sun.COM
93012004Sjiang.liu@intel.com	if (!is_segment_register(reg) && !is_descriptor_table(reg))
93110394SMichael.Corcoran@Sun.COM		return (EINVAL);
93210394SMichael.Corcoran@Sun.COM
93310394SMichael.Corcoran@Sun.COM	return (VMSETDESC(vm->cookie, vcpu, reg, desc));
93410394SMichael.Corcoran@Sun.COM}
93510394SMichael.Corcoran@Sun.COM
93610394SMichael.Corcoran@Sun.COMstatic void
93710394SMichael.Corcoran@Sun.COMrestore_guest_fpustate(struct vcpu *vcpu)
93810394SMichael.Corcoran@Sun.COM{
93910394SMichael.Corcoran@Sun.COM
94010394SMichael.Corcoran@Sun.COM	/* flush host state to the pcb */
94112004Sjiang.liu@intel.com	fpuexit(curthread);
94210394SMichael.Corcoran@Sun.COM
94310394SMichael.Corcoran@Sun.COM	/* restore guest FPU state */
94410394SMichael.Corcoran@Sun.COM	fpu_stop_emulating();
94510394SMichael.Corcoran@Sun.COM	fpurestore(vcpu->guestfpu);
94610394SMichael.Corcoran@Sun.COM
94710394SMichael.Corcoran@Sun.COM	/* restore guest XCR0 if XSAVE is enabled in the host */
94810394SMichael.Corcoran@Sun.COM	if (rcr4() & CR4_XSAVE)
94910394SMichael.Corcoran@Sun.COM		load_xcr(0, vcpu->guest_xcr0);
95010394SMichael.Corcoran@Sun.COM
95110394SMichael.Corcoran@Sun.COM	/*
95210394SMichael.Corcoran@Sun.COM	 * The FPU is now "dirty" with the guest's state so turn on emulation
95310394SMichael.Corcoran@Sun.COM	 * to trap any access to the FPU by the host.
95410394SMichael.Corcoran@Sun.COM	 */
95510394SMichael.Corcoran@Sun.COM	fpu_start_emulating();
95610394SMichael.Corcoran@Sun.COM}
95710394SMichael.Corcoran@Sun.COM
95810394SMichael.Corcoran@Sun.COMstatic void
95910394SMichael.Corcoran@Sun.COMsave_guest_fpustate(struct vcpu *vcpu)
96010394SMichael.Corcoran@Sun.COM{
96110394SMichael.Corcoran@Sun.COM
96212004Sjiang.liu@intel.com	if ((rcr0() & CR0_TS) == 0)
96310394SMichael.Corcoran@Sun.COM		panic("fpu emulation not enabled in host!");
96410394SMichael.Corcoran@Sun.COM
96510394SMichael.Corcoran@Sun.COM	/* save guest XCR0 and restore host XCR0 */
96612004Sjiang.liu@intel.com	if (rcr4() & CR4_XSAVE) {
96710394SMichael.Corcoran@Sun.COM		vcpu->guest_xcr0 = rxcr(0);
96810394SMichael.Corcoran@Sun.COM		load_xcr(0, vmm_get_host_xcr0());
96912004Sjiang.liu@intel.com	}
97010394SMichael.Corcoran@Sun.COM
97110394SMichael.Corcoran@Sun.COM	/* save guest FPU state */
97210394SMichael.Corcoran@Sun.COM	fpu_stop_emulating();
97310394SMichael.Corcoran@Sun.COM	fpusave(vcpu->guestfpu);
97410394SMichael.Corcoran@Sun.COM	fpu_start_emulating();
97510394SMichael.Corcoran@Sun.COM}
97610394SMichael.Corcoran@Sun.COM
97710394SMichael.Corcoran@Sun.COMstatic VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
97812004Sjiang.liu@intel.com
97910394SMichael.Corcoran@Sun.COMstatic int
98011225SDana.Myers@Sun.COMvcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
98110394SMichael.Corcoran@Sun.COM    bool from_idle)
98210394SMichael.Corcoran@Sun.COM{
98310394SMichael.Corcoran@Sun.COM	int error;
98410394SMichael.Corcoran@Sun.COM
98510394SMichael.Corcoran@Sun.COM	vcpu_assert_locked(vcpu);
98610394SMichael.Corcoran@Sun.COM
98710394SMichael.Corcoran@Sun.COM	/*
98810394SMichael.Corcoran@Sun.COM	 * State transitions from the vmmdev_ioctl() must always begin from
98910394SMichael.Corcoran@Sun.COM	 * the VCPU_IDLE state. This guarantees that there is only a single
99010394SMichael.Corcoran@Sun.COM	 * ioctl() operating on a vcpu at any point.
99110394SMichael.Corcoran@Sun.COM	 */
99210394SMichael.Corcoran@Sun.COM	if (from_idle) {
99310394SMichael.Corcoran@Sun.COM		while (vcpu->state != VCPU_IDLE)
99410394SMichael.Corcoran@Sun.COM			msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
99510394SMichael.Corcoran@Sun.COM	} else {
99610394SMichael.Corcoran@Sun.COM		KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
99710394SMichael.Corcoran@Sun.COM		    "vcpu idle state"));
99810394SMichael.Corcoran@Sun.COM	}
99910394SMichael.Corcoran@Sun.COM
100012004Sjiang.liu@intel.com	if (vcpu->state == VCPU_RUNNING) {
100112004Sjiang.liu@intel.com		KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
100211225SDana.Myers@Sun.COM		    "mismatch for running vcpu", curcpu, vcpu->hostcpu));
100310394SMichael.Corcoran@Sun.COM	} else {
100410394SMichael.Corcoran@Sun.COM		KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
100510394SMichael.Corcoran@Sun.COM		    "vcpu that is not running", vcpu->hostcpu));
100610394SMichael.Corcoran@Sun.COM	}
100710394SMichael.Corcoran@Sun.COM
100810394SMichael.Corcoran@Sun.COM	/*
100910394SMichael.Corcoran@Sun.COM	 * The following state transitions are allowed:
101010394SMichael.Corcoran@Sun.COM	 * IDLE -> FROZEN -> IDLE
101110394SMichael.Corcoran@Sun.COM	 * FROZEN -> RUNNING -> FROZEN
101210394SMichael.Corcoran@Sun.COM	 * FROZEN -> SLEEPING -> FROZEN
101310394SMichael.Corcoran@Sun.COM	 */
101410394SMichael.Corcoran@Sun.COM	switch (vcpu->state) {
101510394SMichael.Corcoran@Sun.COM	case VCPU_IDLE:
101610394SMichael.Corcoran@Sun.COM	case VCPU_RUNNING:
101712004Sjiang.liu@intel.com	case VCPU_SLEEPING:
101810394SMichael.Corcoran@Sun.COM		error = (newstate != VCPU_FROZEN);
101910394SMichael.Corcoran@Sun.COM		break;
102010394SMichael.Corcoran@Sun.COM	case VCPU_FROZEN:
102110394SMichael.Corcoran@Sun.COM		error = (newstate == VCPU_FROZEN);
102210394SMichael.Corcoran@Sun.COM		break;
102310394SMichael.Corcoran@Sun.COM	default:
102410394SMichael.Corcoran@Sun.COM		error = 1;
102510394SMichael.Corcoran@Sun.COM		break;
102610394SMichael.Corcoran@Sun.COM	}
102710394SMichael.Corcoran@Sun.COM
102812004Sjiang.liu@intel.com	if (error)
102910394SMichael.Corcoran@Sun.COM		return (EBUSY);
103010394SMichael.Corcoran@Sun.COM
103110394SMichael.Corcoran@Sun.COM	vcpu->state = newstate;
103210394SMichael.Corcoran@Sun.COM	if (newstate == VCPU_RUNNING)
103310394SMichael.Corcoran@Sun.COM		vcpu->hostcpu = curcpu;
103410394SMichael.Corcoran@Sun.COM	else
103510394SMichael.Corcoran@Sun.COM		vcpu->hostcpu = NOCPU;
103610394SMichael.Corcoran@Sun.COM
103710394SMichael.Corcoran@Sun.COM	if (newstate == VCPU_IDLE)
103810394SMichael.Corcoran@Sun.COM		wakeup(&vcpu->state);
103910394SMichael.Corcoran@Sun.COM
104010394SMichael.Corcoran@Sun.COM	return (0);
104110394SMichael.Corcoran@Sun.COM}
104212004Sjiang.liu@intel.com
104310394SMichael.Corcoran@Sun.COMstatic void
104410394SMichael.Corcoran@Sun.COMvcpu_require_state(struct vm *vm, int vcpuid, enum vcpu_state newstate)
104510394SMichael.Corcoran@Sun.COM{
104610394SMichael.Corcoran@Sun.COM	int error;
104710394SMichael.Corcoran@Sun.COM
104810394SMichael.Corcoran@Sun.COM	if ((error = vcpu_set_state(vm, vcpuid, newstate, false)) != 0)
104910394SMichael.Corcoran@Sun.COM		panic("Error %d setting state to %d\n", error, newstate);
105010394SMichael.Corcoran@Sun.COM}
105110394SMichael.Corcoran@Sun.COM
105212004Sjiang.liu@intel.comstatic void
105310394SMichael.Corcoran@Sun.COMvcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
105410394SMichael.Corcoran@Sun.COM{
105510394SMichael.Corcoran@Sun.COM	int error;
105610394SMichael.Corcoran@Sun.COM
105710394SMichael.Corcoran@Sun.COM	if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0)
105810394SMichael.Corcoran@Sun.COM		panic("Error %d setting state to %d", error, newstate);
105910394SMichael.Corcoran@Sun.COM}
106010394SMichael.Corcoran@Sun.COM
106110394SMichael.Corcoran@Sun.COMstatic void
106210394SMichael.Corcoran@Sun.COMvm_set_rendezvous_func(struct vm *vm, vm_rendezvous_func_t func)
106312004Sjiang.liu@intel.com{
106410394SMichael.Corcoran@Sun.COM
106510394SMichael.Corcoran@Sun.COM	KASSERT(mtx_owned(&vm->rendezvous_mtx), ("rendezvous_mtx not locked"));
106610394SMichael.Corcoran@Sun.COM
106710394SMichael.Corcoran@Sun.COM	/*
106810394SMichael.Corcoran@Sun.COM	 * Update 'rendezvous_func' and execute a write memory barrier to
106910394SMichael.Corcoran@Sun.COM	 * ensure that it is visible across all host cpus. This is not needed
107010394SMichael.Corcoran@Sun.COM	 * for correctness but it does ensure that all the vcpus will notice
107110394SMichael.Corcoran@Sun.COM	 * that the rendezvous is requested immediately.
107210394SMichael.Corcoran@Sun.COM	 */
107310394SMichael.Corcoran@Sun.COM	vm->rendezvous_func = func;
107412004Sjiang.liu@intel.com	wmb();
107510394SMichael.Corcoran@Sun.COM}
107610394SMichael.Corcoran@Sun.COM
107710394SMichael.Corcoran@Sun.COM#define	RENDEZVOUS_CTR0(vm, vcpuid, fmt)				\
107810394SMichael.Corcoran@Sun.COM	do {								\
107910394SMichael.Corcoran@Sun.COM		if (vcpuid >= 0)					\
108010394SMichael.Corcoran@Sun.COM			VCPU_CTR0(vm, vcpuid, fmt);			\
108110394SMichael.Corcoran@Sun.COM		else							\
108210394SMichael.Corcoran@Sun.COM			VM_CTR0(vm, fmt);				\
108310394SMichael.Corcoran@Sun.COM	} while (0)
108410394SMichael.Corcoran@Sun.COM
108510394SMichael.Corcoran@Sun.COMstatic void
108610394SMichael.Corcoran@Sun.COMvm_handle_rendezvous(struct vm *vm, int vcpuid)
108710394SMichael.Corcoran@Sun.COM{
108810394SMichael.Corcoran@Sun.COM
108912004Sjiang.liu@intel.com	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
109010394SMichael.Corcoran@Sun.COM	    ("vm_handle_rendezvous: invalid vcpuid %d", vcpuid));
109110394SMichael.Corcoran@Sun.COM
109210394SMichael.Corcoran@Sun.COM	mtx_lock(&vm->rendezvous_mtx);
109310394SMichael.Corcoran@Sun.COM	while (vm->rendezvous_func != NULL) {
109410394SMichael.Corcoran@Sun.COM		/* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
109510394SMichael.Corcoran@Sun.COM		CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
109610394SMichael.Corcoran@Sun.COM
109710394SMichael.Corcoran@Sun.COM		if (vcpuid != -1 &&
109810394SMichael.Corcoran@Sun.COM		    CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
109910394SMichael.Corcoran@Sun.COM		    !CPU_ISSET(vcpuid, &vm->rendezvous_done_cpus)) {
110010394SMichael.Corcoran@Sun.COM			VCPU_CTR0(vm, vcpuid, "Calling rendezvous func");
110112004Sjiang.liu@intel.com			(*vm->rendezvous_func)(vm, vcpuid, vm->rendezvous_arg);
110210394SMichael.Corcoran@Sun.COM			CPU_SET(vcpuid, &vm->rendezvous_done_cpus);
110310394SMichael.Corcoran@Sun.COM		}
110410394SMichael.Corcoran@Sun.COM		if (CPU_CMP(&vm->rendezvous_req_cpus,
110510394SMichael.Corcoran@Sun.COM		    &vm->rendezvous_done_cpus) == 0) {
110610394SMichael.Corcoran@Sun.COM			VCPU_CTR0(vm, vcpuid, "Rendezvous completed");
110710394SMichael.Corcoran@Sun.COM			vm_set_rendezvous_func(vm, NULL);
110810394SMichael.Corcoran@Sun.COM			wakeup(&vm->rendezvous_func);
110910394SMichael.Corcoran@Sun.COM			break;
111010394SMichael.Corcoran@Sun.COM		}
111110394SMichael.Corcoran@Sun.COM		RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
111210394SMichael.Corcoran@Sun.COM		mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
111310394SMichael.Corcoran@Sun.COM		    "vmrndv", 0);
1114	}
1115	mtx_unlock(&vm->rendezvous_mtx);
1116}
1117
1118/*
1119 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1120 */
1121static int
1122vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
1123{
1124	struct vcpu *vcpu;
1125	const char *wmesg;
1126	int t, vcpu_halted, vm_halted;
1127
1128	KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1129
1130	vcpu = &vm->vcpu[vcpuid];
1131	vcpu_halted = 0;
1132	vm_halted = 0;
1133
1134	vcpu_lock(vcpu);
1135	while (1) {
1136		/*
1137		 * Do a final check for pending NMI or interrupts before
1138		 * really putting this thread to sleep. Also check for
1139		 * software events that would cause this vcpu to wakeup.
1140		 *
1141		 * These interrupts/events could have happened after the
1142		 * vcpu returned from VMRUN() and before it acquired the
1143		 * vcpu lock above.
1144		 */
1145		if (vm->rendezvous_func != NULL || vm->suspend)
1146			break;
1147		if (vm_nmi_pending(vm, vcpuid))
1148			break;
1149		if (!intr_disabled) {
1150			if (vm_extint_pending(vm, vcpuid) ||
1151			    vlapic_pending_intr(vcpu->vlapic, NULL)) {
1152				break;
1153			}
1154		}
1155
1156		/* Don't go to sleep if the vcpu thread needs to yield */
1157		if (vcpu_should_yield(vm, vcpuid))
1158			break;
1159
1160		/*
1161		 * Some Linux guests implement "halt" by having all vcpus
1162		 * execute HLT with interrupts disabled. 'halted_cpus' keeps
1163		 * track of the vcpus that have entered this state. When all
1164		 * vcpus enter the halted state the virtual machine is halted.
1165		 */
1166		if (intr_disabled) {
1167			wmesg = "vmhalt";
1168			VCPU_CTR0(vm, vcpuid, "Halted");
1169			if (!vcpu_halted && halt_detection_enabled) {
1170				vcpu_halted = 1;
1171				CPU_SET_ATOMIC(vcpuid, &vm->halted_cpus);
1172			}
1173			if (CPU_CMP(&vm->halted_cpus, &vm->active_cpus) == 0) {
1174				vm_halted = 1;
1175				break;
1176			}
1177		} else {
1178			wmesg = "vmidle";
1179		}
1180
1181		t = ticks;
1182		vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1183		/*
1184		 * XXX msleep_spin() cannot be interrupted by signals so
1185		 * wake up periodically to check pending signals.
1186		 */
1187		msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1188		vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1189		vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
1190	}
1191
1192	if (vcpu_halted)
1193		CPU_CLR_ATOMIC(vcpuid, &vm->halted_cpus);
1194
1195	vcpu_unlock(vcpu);
1196
1197	if (vm_halted)
1198		vm_suspend(vm, VM_SUSPEND_HALT);
1199
1200	return (0);
1201}
1202
1203static int
1204vm_handle_paging(struct vm *vm, int vcpuid, bool *retu)
1205{
1206	int rv, ftype;
1207	struct vm_map *map;
1208	struct vcpu *vcpu;
1209	struct vm_exit *vme;
1210
1211	vcpu = &vm->vcpu[vcpuid];
1212	vme = &vcpu->exitinfo;
1213
1214	KASSERT(vme->inst_length == 0, ("%s: invalid inst_length %d",
1215	    __func__, vme->inst_length));
1216
1217	ftype = vme->u.paging.fault_type;
1218	KASSERT(ftype == VM_PROT_READ ||
1219	    ftype == VM_PROT_WRITE || ftype == VM_PROT_EXECUTE,
1220	    ("vm_handle_paging: invalid fault_type %d", ftype));
1221
1222	if (ftype == VM_PROT_READ || ftype == VM_PROT_WRITE) {
1223		rv = pmap_emulate_accessed_dirty(vmspace_pmap(vm->vmspace),
1224		    vme->u.paging.gpa, ftype);
1225		if (rv == 0) {
1226			VCPU_CTR2(vm, vcpuid, "%s bit emulation for gpa %#lx",
1227			    ftype == VM_PROT_READ ? "accessed" : "dirty",
1228			    vme->u.paging.gpa);
1229			goto done;
1230		}
1231	}
1232
1233	map = &vm->vmspace->vm_map;
1234	rv = vm_fault(map, vme->u.paging.gpa, ftype, VM_FAULT_NORMAL);
1235
1236	VCPU_CTR3(vm, vcpuid, "vm_handle_paging rv = %d, gpa = %#lx, "
1237	    "ftype = %d", rv, vme->u.paging.gpa, ftype);
1238
1239	if (rv != KERN_SUCCESS)
1240		return (EFAULT);
1241done:
1242	return (0);
1243}
1244
1245static int
1246vm_handle_inst_emul(struct vm *vm, int vcpuid, bool *retu)
1247{
1248	struct vie *vie;
1249	struct vcpu *vcpu;
1250	struct vm_exit *vme;
1251	uint64_t gla, gpa;
1252	struct vm_guest_paging *paging;
1253	mem_region_read_t mread;
1254	mem_region_write_t mwrite;
1255	enum vm_cpu_mode cpu_mode;
1256	int cs_d, error, length;
1257
1258	vcpu = &vm->vcpu[vcpuid];
1259	vme = &vcpu->exitinfo;
1260
1261	gla = vme->u.inst_emul.gla;
1262	gpa = vme->u.inst_emul.gpa;
1263	cs_d = vme->u.inst_emul.cs_d;
1264	vie = &vme->u.inst_emul.vie;
1265	paging = &vme->u.inst_emul.paging;
1266	cpu_mode = paging->cpu_mode;
1267
1268	VCPU_CTR1(vm, vcpuid, "inst_emul fault accessing gpa %#lx", gpa);
1269
1270	/* Fetch, decode and emulate the faulting instruction */
1271	if (vie->num_valid == 0) {
1272		/*
1273		 * If the instruction length is not known then assume a
1274		 * maximum size instruction.
1275		 */
1276		length = vme->inst_length ? vme->inst_length : VIE_INST_SIZE;
1277		error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip,
1278		    length, vie);
1279	} else {
1280		/*
1281		 * The instruction bytes have already been copied into 'vie'
1282		 */
1283		error = 0;
1284	}
1285	if (error == 1)
1286		return (0);		/* Resume guest to handle page fault */
1287	else if (error == -1)
1288		return (EFAULT);
1289	else if (error != 0)
1290		panic("%s: vmm_fetch_instruction error %d", __func__, error);
1291
1292	if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0)
1293		return (EFAULT);
1294
1295	/*
1296	 * If the instruction length was not specified then update it now
1297	 * along with 'nextrip'.
1298	 */
1299	if (vme->inst_length == 0) {
1300		vme->inst_length = vie->num_processed;
1301		vcpu->nextrip += vie->num_processed;
1302	}
1303
1304	/* return to userland unless this is an in-kernel emulated device */
1305	if (gpa >= DEFAULT_APIC_BASE && gpa < DEFAULT_APIC_BASE + PAGE_SIZE) {
1306		mread = lapic_mmio_read;
1307		mwrite = lapic_mmio_write;
1308	} else if (gpa >= VIOAPIC_BASE && gpa < VIOAPIC_BASE + VIOAPIC_SIZE) {
1309		mread = vioapic_mmio_read;
1310		mwrite = vioapic_mmio_write;
1311	} else if (gpa >= VHPET_BASE && gpa < VHPET_BASE + VHPET_SIZE) {
1312		mread = vhpet_mmio_read;
1313		mwrite = vhpet_mmio_write;
1314	} else {
1315		*retu = true;
1316		return (0);
1317	}
1318
1319	error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
1320	    mread, mwrite, retu);
1321
1322	return (error);
1323}
1324
1325static int
1326vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
1327{
1328	int i, done;
1329	struct vcpu *vcpu;
1330
1331	done = 0;
1332	vcpu = &vm->vcpu[vcpuid];
1333
1334	CPU_SET_ATOMIC(vcpuid, &vm->suspended_cpus);
1335
1336	/*
1337	 * Wait until all 'active_cpus' have suspended themselves.
1338	 *
1339	 * Since a VM may be suspended at any time including when one or
1340	 * more vcpus are doing a rendezvous we need to call the rendezvous
1341	 * handler while we are waiting to prevent a deadlock.
1342	 */
1343	vcpu_lock(vcpu);
1344	while (1) {
1345		if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0) {
1346			VCPU_CTR0(vm, vcpuid, "All vcpus suspended");
1347			break;
1348		}
1349
1350		if (vm->rendezvous_func == NULL) {
1351			VCPU_CTR0(vm, vcpuid, "Sleeping during suspend");
1352			vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1353			msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1354			vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1355		} else {
1356			VCPU_CTR0(vm, vcpuid, "Rendezvous during suspend");
1357			vcpu_unlock(vcpu);
1358			vm_handle_rendezvous(vm, vcpuid);
1359			vcpu_lock(vcpu);
1360		}
1361	}
1362	vcpu_unlock(vcpu);
1363
1364	/*
1365	 * Wakeup the other sleeping vcpus and return to userspace.
1366	 */
1367	for (i = 0; i < VM_MAXCPU; i++) {
1368		if (CPU_ISSET(i, &vm->suspended_cpus)) {
1369			vcpu_notify_event(vm, i, false);
1370		}
1371	}
1372
1373	*retu = true;
1374	return (0);
1375}
1376
1377int
1378vm_suspend(struct vm *vm, enum vm_suspend_how how)
1379{
1380	int i;
1381
1382	if (how <= VM_SUSPEND_NONE || how >= VM_SUSPEND_LAST)
1383		return (EINVAL);
1384
1385	if (atomic_cmpset_int(&vm->suspend, 0, how) == 0) {
1386		VM_CTR2(vm, "virtual machine already suspended %d/%d",
1387		    vm->suspend, how);
1388		return (EALREADY);
1389	}
1390
1391	VM_CTR1(vm, "virtual machine successfully suspended %d", how);
1392
1393	/*
1394	 * Notify all active vcpus that they are now suspended.
1395	 */
1396	for (i = 0; i < VM_MAXCPU; i++) {
1397		if (CPU_ISSET(i, &vm->active_cpus))
1398			vcpu_notify_event(vm, i, false);
1399	}
1400
1401	return (0);
1402}
1403
1404void
1405vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip)
1406{
1407	struct vm_exit *vmexit;
1408
1409	KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
1410	    ("vm_exit_suspended: invalid suspend type %d", vm->suspend));
1411
1412	vmexit = vm_exitinfo(vm, vcpuid);
1413	vmexit->rip = rip;
1414	vmexit->inst_length = 0;
1415	vmexit->exitcode = VM_EXITCODE_SUSPENDED;
1416	vmexit->u.suspended.how = vm->suspend;
1417}
1418
1419void
1420vm_exit_rendezvous(struct vm *vm, int vcpuid, uint64_t rip)
1421{
1422	struct vm_exit *vmexit;
1423
1424	KASSERT(vm->rendezvous_func != NULL, ("rendezvous not in progress"));
1425
1426	vmexit = vm_exitinfo(vm, vcpuid);
1427	vmexit->rip = rip;
1428	vmexit->inst_length = 0;
1429	vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
1430	vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
1431}
1432
1433void
1434vm_exit_astpending(struct vm *vm, int vcpuid, uint64_t rip)
1435{
1436	struct vm_exit *vmexit;
1437
1438	vmexit = vm_exitinfo(vm, vcpuid);
1439	vmexit->rip = rip;
1440	vmexit->inst_length = 0;
1441	vmexit->exitcode = VM_EXITCODE_BOGUS;
1442	vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
1443}
1444
1445int
1446vm_run(struct vm *vm, struct vm_run *vmrun)
1447{
1448	int error, vcpuid;
1449	struct vcpu *vcpu;
1450	struct pcb *pcb;
1451	uint64_t tscval;
1452	struct vm_exit *vme;
1453	bool retu, intr_disabled;
1454	pmap_t pmap;
1455	void *rptr, *sptr;
1456
1457	vcpuid = vmrun->cpuid;
1458
1459	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1460		return (EINVAL);
1461
1462	if (!CPU_ISSET(vcpuid, &vm->active_cpus))
1463		return (EINVAL);
1464
1465	if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
1466		return (EINVAL);
1467
1468	rptr = &vm->rendezvous_func;
1469	sptr = &vm->suspend;
1470	pmap = vmspace_pmap(vm->vmspace);
1471	vcpu = &vm->vcpu[vcpuid];
1472	vme = &vcpu->exitinfo;
1473restart:
1474	critical_enter();
1475
1476	KASSERT(!CPU_ISSET(curcpu, &pmap->pm_active),
1477	    ("vm_run: absurd pm_active"));
1478
1479	tscval = rdtsc();
1480
1481	pcb = PCPU_GET(curpcb);
1482	set_pcb_flags(pcb, PCB_FULL_IRET);
1483
1484	restore_guest_fpustate(vcpu);
1485
1486	vcpu_require_state(vm, vcpuid, VCPU_RUNNING);
1487	error = VMRUN(vm->cookie, vcpuid, vcpu->nextrip, pmap, rptr, sptr);
1488	vcpu_require_state(vm, vcpuid, VCPU_FROZEN);
1489
1490	save_guest_fpustate(vcpu);
1491
1492	vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1493
1494	critical_exit();
1495
1496	if (error == 0) {
1497		retu = false;
1498		vcpu->nextrip = vme->rip + vme->inst_length;
1499		switch (vme->exitcode) {
1500		case VM_EXITCODE_SUSPENDED:
1501			error = vm_handle_suspend(vm, vcpuid, &retu);
1502			break;
1503		case VM_EXITCODE_IOAPIC_EOI:
1504			vioapic_process_eoi(vm, vcpuid,
1505			    vme->u.ioapic_eoi.vector);
1506			break;
1507		case VM_EXITCODE_RENDEZVOUS:
1508			vm_handle_rendezvous(vm, vcpuid);
1509			error = 0;
1510			break;
1511		case VM_EXITCODE_HLT:
1512			intr_disabled = ((vme->u.hlt.rflags & PSL_I) == 0);
1513			error = vm_handle_hlt(vm, vcpuid, intr_disabled, &retu);
1514			break;
1515		case VM_EXITCODE_PAGING:
1516			error = vm_handle_paging(vm, vcpuid, &retu);
1517			break;
1518		case VM_EXITCODE_INST_EMUL:
1519			error = vm_handle_inst_emul(vm, vcpuid, &retu);
1520			break;
1521		case VM_EXITCODE_INOUT:
1522		case VM_EXITCODE_INOUT_STR:
1523			error = vm_handle_inout(vm, vcpuid, vme, &retu);
1524			break;
1525		case VM_EXITCODE_MONITOR:
1526		case VM_EXITCODE_MWAIT:
1527			vm_inject_ud(vm, vcpuid);
1528			break;
1529		default:
1530			retu = true;	/* handled in userland */
1531			break;
1532		}
1533	}
1534
1535	if (error == 0 && retu == false)
1536		goto restart;
1537
1538	/* copy the exit information */
1539	bcopy(vme, &vmrun->vm_exit, sizeof(struct vm_exit));
1540	return (error);
1541}
1542
1543int
1544vm_restart_instruction(void *arg, int vcpuid)
1545{
1546	struct vm *vm;
1547	struct vcpu *vcpu;
1548	enum vcpu_state state;
1549	uint64_t rip;
1550	int error;
1551
1552	vm = arg;
1553	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1554		return (EINVAL);
1555
1556	vcpu = &vm->vcpu[vcpuid];
1557	state = vcpu_get_state(vm, vcpuid, NULL);
1558	if (state == VCPU_RUNNING) {
1559		/*
1560		 * When a vcpu is "running" the next instruction is determined
1561		 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
1562		 * Thus setting 'inst_length' to zero will cause the current
1563		 * instruction to be restarted.
1564		 */
1565		vcpu->exitinfo.inst_length = 0;
1566		VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by "
1567		    "setting inst_length to zero", vcpu->exitinfo.rip);
1568	} else if (state == VCPU_FROZEN) {
1569		/*
1570		 * When a vcpu is "frozen" it is outside the critical section
1571		 * around VMRUN() and 'nextrip' points to the next instruction.
1572		 * Thus instruction restart is achieved by setting 'nextrip'
1573		 * to the vcpu's %rip.
1574		 */
1575		error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
1576		KASSERT(!error, ("%s: error %d getting rip", __func__, error));
1577		VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
1578		    "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
1579		vcpu->nextrip = rip;
1580	} else {
1581		panic("%s: invalid state %d", __func__, state);
1582	}
1583	return (0);
1584}
1585
1586int
1587vm_exit_intinfo(struct vm *vm, int vcpuid, uint64_t info)
1588{
1589	struct vcpu *vcpu;
1590	int type, vector;
1591
1592	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1593		return (EINVAL);
1594
1595	vcpu = &vm->vcpu[vcpuid];
1596
1597	if (info & VM_INTINFO_VALID) {
1598		type = info & VM_INTINFO_TYPE;
1599		vector = info & 0xff;
1600		if (type == VM_INTINFO_NMI && vector != IDT_NMI)
1601			return (EINVAL);
1602		if (type == VM_INTINFO_HWEXCEPTION && vector >= 32)
1603			return (EINVAL);
1604		if (info & VM_INTINFO_RSVD)
1605			return (EINVAL);
1606	} else {
1607		info = 0;
1608	}
1609	VCPU_CTR2(vm, vcpuid, "%s: info1(%#lx)", __func__, info);
1610	vcpu->exitintinfo = info;
1611	return (0);
1612}
1613
1614enum exc_class {
1615	EXC_BENIGN,
1616	EXC_CONTRIBUTORY,
1617	EXC_PAGEFAULT
1618};
1619
1620#define	IDT_VE	20	/* Virtualization Exception (Intel specific) */
1621
1622static enum exc_class
1623exception_class(uint64_t info)
1624{
1625	int type, vector;
1626
1627	KASSERT(info & VM_INTINFO_VALID, ("intinfo must be valid: %#lx", info));
1628	type = info & VM_INTINFO_TYPE;
1629	vector = info & 0xff;
1630
1631	/* Table 6-4, "Interrupt and Exception Classes", Intel SDM, Vol 3 */
1632	switch (type) {
1633	case VM_INTINFO_HWINTR:
1634	case VM_INTINFO_SWINTR:
1635	case VM_INTINFO_NMI:
1636		return (EXC_BENIGN);
1637	default:
1638		/*
1639		 * Hardware exception.
1640		 *
1641		 * SVM and VT-x use identical type values to represent NMI,
1642		 * hardware interrupt and software interrupt.
1643		 *
1644		 * SVM uses type '3' for all exceptions. VT-x uses type '3'
1645		 * for exceptions except #BP and #OF. #BP and #OF use a type
1646		 * value of '5' or '6'. Therefore we don't check for explicit
1647		 * values of 'type' to classify 'intinfo' into a hardware
1648		 * exception.
1649		 */
1650		break;
1651	}
1652
1653	switch (vector) {
1654	case IDT_PF:
1655	case IDT_VE:
1656		return (EXC_PAGEFAULT);
1657	case IDT_DE:
1658	case IDT_TS:
1659	case IDT_NP:
1660	case IDT_SS:
1661	case IDT_GP:
1662		return (EXC_CONTRIBUTORY);
1663	default:
1664		return (EXC_BENIGN);
1665	}
1666}
1667
1668static int
1669nested_fault(struct vm *vm, int vcpuid, uint64_t info1, uint64_t info2,
1670    uint64_t *retinfo)
1671{
1672	enum exc_class exc1, exc2;
1673	int type1, vector1;
1674
1675	KASSERT(info1 & VM_INTINFO_VALID, ("info1 %#lx is not valid", info1));
1676	KASSERT(info2 & VM_INTINFO_VALID, ("info2 %#lx is not valid", info2));
1677
1678	/*
1679	 * If an exception occurs while attempting to call the double-fault
1680	 * handler the processor enters shutdown mode (aka triple fault).
1681	 */
1682	type1 = info1 & VM_INTINFO_TYPE;
1683	vector1 = info1 & 0xff;
1684	if (type1 == VM_INTINFO_HWEXCEPTION && vector1 == IDT_DF) {
1685		VCPU_CTR2(vm, vcpuid, "triple fault: info1(%#lx), info2(%#lx)",
1686		    info1, info2);
1687		vm_suspend(vm, VM_SUSPEND_TRIPLEFAULT);
1688		*retinfo = 0;
1689		return (0);
1690	}
1691
1692	/*
1693	 * Table 6-5 "Conditions for Generating a Double Fault", Intel SDM, Vol3
1694	 */
1695	exc1 = exception_class(info1);
1696	exc2 = exception_class(info2);
1697	if ((exc1 == EXC_CONTRIBUTORY && exc2 == EXC_CONTRIBUTORY) ||
1698	    (exc1 == EXC_PAGEFAULT && exc2 != EXC_BENIGN)) {
1699		/* Convert nested fault into a double fault. */
1700		*retinfo = IDT_DF;
1701		*retinfo |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1702		*retinfo |= VM_INTINFO_DEL_ERRCODE;
1703	} else {
1704		/* Handle exceptions serially */
1705		*retinfo = info2;
1706	}
1707	return (1);
1708}
1709
1710static uint64_t
1711vcpu_exception_intinfo(struct vcpu *vcpu)
1712{
1713	uint64_t info = 0;
1714
1715	if (vcpu->exception_pending) {
1716		info = vcpu->exc_vector & 0xff;
1717		info |= VM_INTINFO_VALID | VM_INTINFO_HWEXCEPTION;
1718		if (vcpu->exc_errcode_valid) {
1719			info |= VM_INTINFO_DEL_ERRCODE;
1720			info |= (uint64_t)vcpu->exc_errcode << 32;
1721		}
1722	}
1723	return (info);
1724}
1725
1726int
1727vm_entry_intinfo(struct vm *vm, int vcpuid, uint64_t *retinfo)
1728{
1729	struct vcpu *vcpu;
1730	uint64_t info1, info2;
1731	int valid;
1732
1733	KASSERT(vcpuid >= 0 && vcpuid < VM_MAXCPU, ("invalid vcpu %d", vcpuid));
1734
1735	vcpu = &vm->vcpu[vcpuid];
1736
1737	info1 = vcpu->exitintinfo;
1738	vcpu->exitintinfo = 0;
1739
1740	info2 = 0;
1741	if (vcpu->exception_pending) {
1742		info2 = vcpu_exception_intinfo(vcpu);
1743		vcpu->exception_pending = 0;
1744		VCPU_CTR2(vm, vcpuid, "Exception %d delivered: %#lx",
1745		    vcpu->exc_vector, info2);
1746	}
1747
1748	if ((info1 & VM_INTINFO_VALID) && (info2 & VM_INTINFO_VALID)) {
1749		valid = nested_fault(vm, vcpuid, info1, info2, retinfo);
1750	} else if (info1 & VM_INTINFO_VALID) {
1751		*retinfo = info1;
1752		valid = 1;
1753	} else if (info2 & VM_INTINFO_VALID) {
1754		*retinfo = info2;
1755		valid = 1;
1756	} else {
1757		valid = 0;
1758	}
1759
1760	if (valid) {
1761		VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), "
1762		    "retinfo(%#lx)", __func__, info1, info2, *retinfo);
1763	}
1764
1765	return (valid);
1766}
1767
1768int
1769vm_get_intinfo(struct vm *vm, int vcpuid, uint64_t *info1, uint64_t *info2)
1770{
1771	struct vcpu *vcpu;
1772
1773	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1774		return (EINVAL);
1775
1776	vcpu = &vm->vcpu[vcpuid];
1777	*info1 = vcpu->exitintinfo;
1778	*info2 = vcpu_exception_intinfo(vcpu);
1779	return (0);
1780}
1781
1782int
1783vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
1784    uint32_t errcode, int restart_instruction)
1785{
1786	struct vcpu *vcpu;
1787	int error;
1788
1789	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1790		return (EINVAL);
1791
1792	if (vector < 0 || vector >= 32)
1793		return (EINVAL);
1794
1795	/*
1796	 * A double fault exception should never be injected directly into
1797	 * the guest. It is a derived exception that results from specific
1798	 * combinations of nested faults.
1799	 */
1800	if (vector == IDT_DF)
1801		return (EINVAL);
1802
1803	vcpu = &vm->vcpu[vcpuid];
1804
1805	if (vcpu->exception_pending) {
1806		VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
1807		    "pending exception %d", vector, vcpu->exc_vector);
1808		return (EBUSY);
1809	}
1810
1811	/*
1812	 * From section 26.6.1 "Interruptibility State" in Intel SDM:
1813	 *
1814	 * Event blocking by "STI" or "MOV SS" is cleared after guest executes
1815	 * one instruction or incurs an exception.
1816	 */
1817	error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
1818	KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
1819	    __func__, error));
1820
1821	if (restart_instruction)
1822		vm_restart_instruction(vm, vcpuid);
1823
1824	vcpu->exception_pending = 1;
1825	vcpu->exc_vector = vector;
1826	vcpu->exc_errcode = errcode;
1827	vcpu->exc_errcode_valid = errcode_valid;
1828	VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
1829	return (0);
1830}
1831
1832void
1833vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid,
1834    int errcode)
1835{
1836	struct vm *vm;
1837	int error, restart_instruction;
1838
1839	vm = vmarg;
1840	restart_instruction = 1;
1841
1842	error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
1843	    errcode, restart_instruction);
1844	KASSERT(error == 0, ("vm_inject_exception error %d", error));
1845}
1846
1847void
1848vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2)
1849{
1850	struct vm *vm;
1851	int error;
1852
1853	vm = vmarg;
1854	VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
1855	    error_code, cr2);
1856
1857	error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
1858	KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
1859
1860	vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
1861}
1862
1863static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
1864
1865int
1866vm_inject_nmi(struct vm *vm, int vcpuid)
1867{
1868	struct vcpu *vcpu;
1869
1870	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1871		return (EINVAL);
1872
1873	vcpu = &vm->vcpu[vcpuid];
1874
1875	vcpu->nmi_pending = 1;
1876	vcpu_notify_event(vm, vcpuid, false);
1877	return (0);
1878}
1879
1880int
1881vm_nmi_pending(struct vm *vm, int vcpuid)
1882{
1883	struct vcpu *vcpu;
1884
1885	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1886		panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1887
1888	vcpu = &vm->vcpu[vcpuid];
1889
1890	return (vcpu->nmi_pending);
1891}
1892
1893void
1894vm_nmi_clear(struct vm *vm, int vcpuid)
1895{
1896	struct vcpu *vcpu;
1897
1898	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1899		panic("vm_nmi_pending: invalid vcpuid %d", vcpuid);
1900
1901	vcpu = &vm->vcpu[vcpuid];
1902
1903	if (vcpu->nmi_pending == 0)
1904		panic("vm_nmi_clear: inconsistent nmi_pending state");
1905
1906	vcpu->nmi_pending = 0;
1907	vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
1908}
1909
1910static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
1911
1912int
1913vm_inject_extint(struct vm *vm, int vcpuid)
1914{
1915	struct vcpu *vcpu;
1916
1917	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1918		return (EINVAL);
1919
1920	vcpu = &vm->vcpu[vcpuid];
1921
1922	vcpu->extint_pending = 1;
1923	vcpu_notify_event(vm, vcpuid, false);
1924	return (0);
1925}
1926
1927int
1928vm_extint_pending(struct vm *vm, int vcpuid)
1929{
1930	struct vcpu *vcpu;
1931
1932	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1933		panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
1934
1935	vcpu = &vm->vcpu[vcpuid];
1936
1937	return (vcpu->extint_pending);
1938}
1939
1940void
1941vm_extint_clear(struct vm *vm, int vcpuid)
1942{
1943	struct vcpu *vcpu;
1944
1945	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
1946		panic("vm_extint_pending: invalid vcpuid %d", vcpuid);
1947
1948	vcpu = &vm->vcpu[vcpuid];
1949
1950	if (vcpu->extint_pending == 0)
1951		panic("vm_extint_clear: inconsistent extint_pending state");
1952
1953	vcpu->extint_pending = 0;
1954	vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
1955}
1956
1957int
1958vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
1959{
1960	if (vcpu < 0 || vcpu >= VM_MAXCPU)
1961		return (EINVAL);
1962
1963	if (type < 0 || type >= VM_CAP_MAX)
1964		return (EINVAL);
1965
1966	return (VMGETCAP(vm->cookie, vcpu, type, retval));
1967}
1968
1969int
1970vm_set_capability(struct vm *vm, int vcpu, int type, int val)
1971{
1972	if (vcpu < 0 || vcpu >= VM_MAXCPU)
1973		return (EINVAL);
1974
1975	if (type < 0 || type >= VM_CAP_MAX)
1976		return (EINVAL);
1977
1978	return (VMSETCAP(vm->cookie, vcpu, type, val));
1979}
1980
1981struct vlapic *
1982vm_lapic(struct vm *vm, int cpu)
1983{
1984	return (vm->vcpu[cpu].vlapic);
1985}
1986
1987struct vioapic *
1988vm_ioapic(struct vm *vm)
1989{
1990
1991	return (vm->vioapic);
1992}
1993
1994struct vhpet *
1995vm_hpet(struct vm *vm)
1996{
1997
1998	return (vm->vhpet);
1999}
2000
2001boolean_t
2002vmm_is_pptdev(int bus, int slot, int func)
2003{
2004	int found, i, n;
2005	int b, s, f;
2006	char *val, *cp, *cp2;
2007
2008	/*
2009	 * XXX
2010	 * The length of an environment variable is limited to 128 bytes which
2011	 * puts an upper limit on the number of passthru devices that may be
2012	 * specified using a single environment variable.
2013	 *
2014	 * Work around this by scanning multiple environment variable
2015	 * names instead of a single one - yuck!
2016	 */
2017	const char *names[] = { "pptdevs", "pptdevs2", "pptdevs3", NULL };
2018
2019	/* set pptdevs="1/2/3 4/5/6 7/8/9 10/11/12" */
2020	found = 0;
2021	for (i = 0; names[i] != NULL && !found; i++) {
2022		cp = val = getenv(names[i]);
2023		while (cp != NULL && *cp != '\0') {
2024			if ((cp2 = strchr(cp, ' ')) != NULL)
2025				*cp2 = '\0';
2026
2027			n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
2028			if (n == 3 && bus == b && slot == s && func == f) {
2029				found = 1;
2030				break;
2031			}
2032
2033			if (cp2 != NULL)
2034				*cp2++ = ' ';
2035
2036			cp = cp2;
2037		}
2038		freeenv(val);
2039	}
2040	return (found);
2041}
2042
2043void *
2044vm_iommu_domain(struct vm *vm)
2045{
2046
2047	return (vm->iommu);
2048}
2049
2050int
2051vcpu_set_state(struct vm *vm, int vcpuid, enum vcpu_state newstate,
2052    bool from_idle)
2053{
2054	int error;
2055	struct vcpu *vcpu;
2056
2057	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2058		panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
2059
2060	vcpu = &vm->vcpu[vcpuid];
2061
2062	vcpu_lock(vcpu);
2063	error = vcpu_set_state_locked(vcpu, newstate, from_idle);
2064	vcpu_unlock(vcpu);
2065
2066	return (error);
2067}
2068
2069enum vcpu_state
2070vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
2071{
2072	struct vcpu *vcpu;
2073	enum vcpu_state state;
2074
2075	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2076		panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
2077
2078	vcpu = &vm->vcpu[vcpuid];
2079
2080	vcpu_lock(vcpu);
2081	state = vcpu->state;
2082	if (hostcpu != NULL)
2083		*hostcpu = vcpu->hostcpu;
2084	vcpu_unlock(vcpu);
2085
2086	return (state);
2087}
2088
2089int
2090vm_activate_cpu(struct vm *vm, int vcpuid)
2091{
2092
2093	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2094		return (EINVAL);
2095
2096	if (CPU_ISSET(vcpuid, &vm->active_cpus))
2097		return (EBUSY);
2098
2099	VCPU_CTR0(vm, vcpuid, "activated");
2100	CPU_SET_ATOMIC(vcpuid, &vm->active_cpus);
2101	return (0);
2102}
2103
2104cpuset_t
2105vm_active_cpus(struct vm *vm)
2106{
2107
2108	return (vm->active_cpus);
2109}
2110
2111cpuset_t
2112vm_suspended_cpus(struct vm *vm)
2113{
2114
2115	return (vm->suspended_cpus);
2116}
2117
2118void *
2119vcpu_stats(struct vm *vm, int vcpuid)
2120{
2121
2122	return (vm->vcpu[vcpuid].stats);
2123}
2124
2125int
2126vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
2127{
2128	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2129		return (EINVAL);
2130
2131	*state = vm->vcpu[vcpuid].x2apic_state;
2132
2133	return (0);
2134}
2135
2136int
2137vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
2138{
2139	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
2140		return (EINVAL);
2141
2142	if (state >= X2APIC_STATE_LAST)
2143		return (EINVAL);
2144
2145	vm->vcpu[vcpuid].x2apic_state = state;
2146
2147	vlapic_set_x2apic_state(vm, vcpuid, state);
2148
2149	return (0);
2150}
2151
2152/*
2153 * This function is called to ensure that a vcpu "sees" a pending event
2154 * as soon as possible:
2155 * - If the vcpu thread is sleeping then it is woken up.
2156 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2157 *   to the host_cpu to cause the vcpu to trap into the hypervisor.
2158 */
2159void
2160vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr)
2161{
2162	int hostcpu;
2163	struct vcpu *vcpu;
2164
2165	vcpu = &vm->vcpu[vcpuid];
2166
2167	vcpu_lock(vcpu);
2168	hostcpu = vcpu->hostcpu;
2169	if (vcpu->state == VCPU_RUNNING) {
2170		KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
2171		if (hostcpu != curcpu) {
2172			if (lapic_intr) {
2173				vlapic_post_intr(vcpu->vlapic, hostcpu,
2174				    vmm_ipinum);
2175			} else {
2176				ipi_cpu(hostcpu, vmm_ipinum);
2177			}
2178		} else {
2179			/*
2180			 * If the 'vcpu' is running on 'curcpu' then it must
2181			 * be sending a notification to itself (e.g. SELF_IPI).
2182			 * The pending event will be picked up when the vcpu
2183			 * transitions back to guest context.
2184			 */
2185		}
2186	} else {
2187		KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
2188		    "with hostcpu %d", vcpu->state, hostcpu));
2189		if (vcpu->state == VCPU_SLEEPING)
2190			wakeup_one(vcpu);
2191	}
2192	vcpu_unlock(vcpu);
2193}
2194
2195struct vmspace *
2196vm_get_vmspace(struct vm *vm)
2197{
2198
2199	return (vm->vmspace);
2200}
2201
2202int
2203vm_apicid2vcpuid(struct vm *vm, int apicid)
2204{
2205	/*
2206	 * XXX apic id is assumed to be numerically identical to vcpu id
2207	 */
2208	return (apicid);
2209}
2210
2211void
2212vm_smp_rendezvous(struct vm *vm, int vcpuid, cpuset_t dest,
2213    vm_rendezvous_func_t func, void *arg)
2214{
2215	int i;
2216
2217	/*
2218	 * Enforce that this function is called without any locks
2219	 */
2220	WITNESS_WARN(WARN_PANIC, NULL, "vm_smp_rendezvous");
2221	KASSERT(vcpuid == -1 || (vcpuid >= 0 && vcpuid < VM_MAXCPU),
2222	    ("vm_smp_rendezvous: invalid vcpuid %d", vcpuid));
2223
2224restart:
2225	mtx_lock(&vm->rendezvous_mtx);
2226	if (vm->rendezvous_func != NULL) {
2227		/*
2228		 * If a rendezvous is already in progress then we need to
2229		 * call the rendezvous handler in case this 'vcpuid' is one
2230		 * of the targets of the rendezvous.
2231		 */
2232		RENDEZVOUS_CTR0(vm, vcpuid, "Rendezvous already in progress");
2233		mtx_unlock(&vm->rendezvous_mtx);
2234		vm_handle_rendezvous(vm, vcpuid);
2235		goto restart;
2236	}
2237	KASSERT(vm->rendezvous_func == NULL, ("vm_smp_rendezvous: previous "
2238	    "rendezvous is still in progress"));
2239
2240	RENDEZVOUS_CTR0(vm, vcpuid, "Initiating rendezvous");
2241	vm->rendezvous_req_cpus = dest;
2242	CPU_ZERO(&vm->rendezvous_done_cpus);
2243	vm->rendezvous_arg = arg;
2244	vm_set_rendezvous_func(vm, func);
2245	mtx_unlock(&vm->rendezvous_mtx);
2246
2247	/*
2248	 * Wake up any sleeping vcpus and trigger a VM-exit in any running
2249	 * vcpus so they handle the rendezvous as soon as possible.
2250	 */
2251	for (i = 0; i < VM_MAXCPU; i++) {
2252		if (CPU_ISSET(i, &dest))
2253			vcpu_notify_event(vm, i, false);
2254	}
2255
2256	vm_handle_rendezvous(vm, vcpuid);
2257}
2258
2259struct vatpic *
2260vm_atpic(struct vm *vm)
2261{
2262	return (vm->vatpic);
2263}
2264
2265struct vatpit *
2266vm_atpit(struct vm *vm)
2267{
2268	return (vm->vatpit);
2269}
2270
2271struct vpmtmr *
2272vm_pmtmr(struct vm *vm)
2273{
2274
2275	return (vm->vpmtmr);
2276}
2277
2278struct vrtc *
2279vm_rtc(struct vm *vm)
2280{
2281
2282	return (vm->vrtc);
2283}
2284
2285enum vm_reg_name
2286vm_segment_name(int seg)
2287{
2288	static enum vm_reg_name seg_names[] = {
2289		VM_REG_GUEST_ES,
2290		VM_REG_GUEST_CS,
2291		VM_REG_GUEST_SS,
2292		VM_REG_GUEST_DS,
2293		VM_REG_GUEST_FS,
2294		VM_REG_GUEST_GS
2295	};
2296
2297	KASSERT(seg >= 0 && seg < nitems(seg_names),
2298	    ("%s: invalid segment encoding %d", __func__, seg));
2299	return (seg_names[seg]);
2300}
2301
2302void
2303vm_copy_teardown(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo,
2304    int num_copyinfo)
2305{
2306	int idx;
2307
2308	for (idx = 0; idx < num_copyinfo; idx++) {
2309		if (copyinfo[idx].cookie != NULL)
2310			vm_gpa_release(copyinfo[idx].cookie);
2311	}
2312	bzero(copyinfo, num_copyinfo * sizeof(struct vm_copyinfo));
2313}
2314
2315int
2316vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
2317    uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
2318    int num_copyinfo)
2319{
2320	int error, idx, nused;
2321	size_t n, off, remaining;
2322	void *hva, *cookie;
2323	uint64_t gpa;
2324
2325	bzero(copyinfo, sizeof(struct vm_copyinfo) * num_copyinfo);
2326
2327	nused = 0;
2328	remaining = len;
2329	while (remaining > 0) {
2330		KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
2331		error = vmm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa);
2332		if (error)
2333			return (error);
2334		off = gpa & PAGE_MASK;
2335		n = min(remaining, PAGE_SIZE - off);
2336		copyinfo[nused].gpa = gpa;
2337		copyinfo[nused].len = n;
2338		remaining -= n;
2339		gla += n;
2340		nused++;
2341	}
2342
2343	for (idx = 0; idx < nused; idx++) {
2344		hva = vm_gpa_hold(vm, copyinfo[idx].gpa, copyinfo[idx].len,
2345		    prot, &cookie);
2346		if (hva == NULL)
2347			break;
2348		copyinfo[idx].hva = hva;
2349		copyinfo[idx].cookie = cookie;
2350	}
2351
2352	if (idx != nused) {
2353		vm_copy_teardown(vm, vcpuid, copyinfo, num_copyinfo);
2354		return (-1);
2355	} else {
2356		return (0);
2357	}
2358}
2359
2360void
2361vm_copyin(struct vm *vm, int vcpuid, struct vm_copyinfo *copyinfo, void *kaddr,
2362    size_t len)
2363{
2364	char *dst;
2365	int idx;
2366
2367	dst = kaddr;
2368	idx = 0;
2369	while (len > 0) {
2370		bcopy(copyinfo[idx].hva, dst, copyinfo[idx].len);
2371		len -= copyinfo[idx].len;
2372		dst += copyinfo[idx].len;
2373		idx++;
2374	}
2375}
2376
2377void
2378vm_copyout(struct vm *vm, int vcpuid, const void *kaddr,
2379    struct vm_copyinfo *copyinfo, size_t len)
2380{
2381	const char *src;
2382	int idx;
2383
2384	src = kaddr;
2385	idx = 0;
2386	while (len > 0) {
2387		bcopy(src, copyinfo[idx].hva, copyinfo[idx].len);
2388		len -= copyinfo[idx].len;
2389		src += copyinfo[idx].len;
2390		idx++;
2391	}
2392}
2393
2394/*
2395 * Return the amount of in-use and wired memory for the VM. Since
2396 * these are global stats, only return the values with for vCPU 0
2397 */
2398VMM_STAT_DECLARE(VMM_MEM_RESIDENT);
2399VMM_STAT_DECLARE(VMM_MEM_WIRED);
2400
2401static void
2402vm_get_rescnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2403{
2404
2405	if (vcpu == 0) {
2406		vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
2407	       	    PAGE_SIZE * vmspace_resident_count(vm->vmspace));
2408	}
2409}
2410
2411static void
2412vm_get_wiredcnt(struct vm *vm, int vcpu, struct vmm_stat_type *stat)
2413{
2414
2415	if (vcpu == 0) {
2416		vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
2417	      	    PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
2418	}
2419}
2420
2421VMM_STAT_FUNC(VMM_MEM_RESIDENT, "Resident memory", vm_get_rescnt);
2422VMM_STAT_FUNC(VMM_MEM_WIRED, "Wired memory", vm_get_wiredcnt);
2423