vmm.c revision 241454
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/module.h>
36#include <sys/sysctl.h>
37#include <sys/malloc.h>
38#include <sys/pcpu.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/sched.h>
43#include <sys/smp.h>
44#include <sys/systm.h>
45
46#include <vm/vm.h>
47
48#include <machine/vm.h>
49#include <machine/pcb.h>
50#include <x86/apicreg.h>
51
52#include <machine/vmm.h>
53#include "vmm_mem.h"
54#include "vmm_util.h"
55#include <machine/vmm_dev.h>
56#include "vlapic.h"
57#include "vmm_msr.h"
58#include "vmm_ipi.h"
59#include "vmm_stat.h"
60
61#include "io/ppt.h"
62#include "io/iommu.h"
63
64struct vlapic;
65
66struct vcpu {
67	int		flags;
68	int		pincpu;		/* host cpuid this vcpu is bound to */
69	int		hostcpu;	/* host cpuid this vcpu last ran on */
70	uint64_t	guest_msrs[VMM_MSR_NUM];
71	struct vlapic	*vlapic;
72	int		 vcpuid;
73	struct savefpu	*guestfpu;	/* guest fpu state */
74	void		*stats;
75	struct vm_exit	exitinfo;
76	enum x2apic_state x2apic_state;
77};
78#define	VCPU_F_PINNED	0x0001
79#define	VCPU_F_RUNNING	0x0002
80
81#define	VCPU_PINCPU(vm, vcpuid)	\
82    ((vm->vcpu[vcpuid].flags & VCPU_F_PINNED) ? vm->vcpu[vcpuid].pincpu : -1)
83
84#define	VCPU_UNPIN(vm, vcpuid)	(vm->vcpu[vcpuid].flags &= ~VCPU_F_PINNED)
85
86#define	VCPU_PIN(vm, vcpuid, host_cpuid)				\
87do {									\
88	vm->vcpu[vcpuid].flags |= VCPU_F_PINNED;			\
89	vm->vcpu[vcpuid].pincpu = host_cpuid;				\
90} while(0)
91
92#define	VM_MAX_MEMORY_SEGMENTS	2
93
94struct vm {
95	void		*cookie;	/* processor-specific data */
96	void		*iommu;		/* iommu-specific data */
97	struct vcpu	vcpu[VM_MAXCPU];
98	int		num_mem_segs;
99	struct vm_memory_segment mem_segs[VM_MAX_MEMORY_SEGMENTS];
100	char		name[VM_MAX_NAMELEN];
101
102	/*
103	 * Set of active vcpus.
104	 * An active vcpu is one that has been started implicitly (BSP) or
105	 * explicitly (AP) by sending it a startup ipi.
106	 */
107	cpuset_t	active_cpus;
108};
109
110static struct vmm_ops *ops;
111#define	VMM_INIT()	(ops != NULL ? (*ops->init)() : 0)
112#define	VMM_CLEANUP()	(ops != NULL ? (*ops->cleanup)() : 0)
113
114#define	VMINIT(vm)	(ops != NULL ? (*ops->vminit)(vm): NULL)
115#define	VMRUN(vmi, vcpu, rip) \
116	(ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip) : ENXIO)
117#define	VMCLEANUP(vmi)	(ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
118#define	VMMMAP_SET(vmi, gpa, hpa, len, attr, prot, spm)			\
119    	(ops != NULL ? 							\
120    	(*ops->vmmmap_set)(vmi, gpa, hpa, len, attr, prot, spm) :	\
121	ENXIO)
122#define	VMMMAP_GET(vmi, gpa) \
123	(ops != NULL ? (*ops->vmmmap_get)(vmi, gpa) : ENXIO)
124#define	VMGETREG(vmi, vcpu, num, retval)		\
125	(ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
126#define	VMSETREG(vmi, vcpu, num, val)		\
127	(ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
128#define	VMGETDESC(vmi, vcpu, num, desc)		\
129	(ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
130#define	VMSETDESC(vmi, vcpu, num, desc)		\
131	(ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
132#define	VMINJECT(vmi, vcpu, type, vec, ec, ecv)	\
133	(ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO)
134#define	VMNMI(vmi, vcpu)	\
135	(ops != NULL ? (*ops->vmnmi)(vmi, vcpu) : ENXIO)
136#define	VMGETCAP(vmi, vcpu, num, retval)	\
137	(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
138#define	VMSETCAP(vmi, vcpu, num, val)		\
139	(ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
140
141#define	fpu_start_emulating()	start_emulating()
142#define	fpu_stop_emulating()	stop_emulating()
143
144static MALLOC_DEFINE(M_VM, "vm", "vm");
145CTASSERT(VMM_MSR_NUM <= 64);	/* msr_mask can keep track of up to 64 msrs */
146
147/* statistics */
148static VMM_STAT_DEFINE(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
149
150static void
151vcpu_cleanup(struct vcpu *vcpu)
152{
153	vlapic_cleanup(vcpu->vlapic);
154	vmm_stat_free(vcpu->stats);
155	fpu_save_area_free(vcpu->guestfpu);
156}
157
158static void
159vcpu_init(struct vm *vm, uint32_t vcpu_id)
160{
161	struct vcpu *vcpu;
162
163	vcpu = &vm->vcpu[vcpu_id];
164
165	vcpu->hostcpu = -1;
166	vcpu->vcpuid = vcpu_id;
167	vcpu->vlapic = vlapic_init(vm, vcpu_id);
168	vm_set_x2apic_state(vm, vcpu_id, X2APIC_ENABLED);
169	vcpu->guestfpu = fpu_save_area_alloc();
170	fpu_save_area_reset(vcpu->guestfpu);
171	vcpu->stats = vmm_stat_alloc();
172}
173
174struct vm_exit *
175vm_exitinfo(struct vm *vm, int cpuid)
176{
177	struct vcpu *vcpu;
178
179	if (cpuid < 0 || cpuid >= VM_MAXCPU)
180		panic("vm_exitinfo: invalid cpuid %d", cpuid);
181
182	vcpu = &vm->vcpu[cpuid];
183
184	return (&vcpu->exitinfo);
185}
186
187static int
188vmm_init(void)
189{
190	int error;
191
192	vmm_ipi_init();
193
194	error = vmm_mem_init();
195	if (error)
196		return (error);
197
198	if (vmm_is_intel())
199		ops = &vmm_ops_intel;
200	else if (vmm_is_amd())
201		ops = &vmm_ops_amd;
202	else
203		return (ENXIO);
204
205	vmm_msr_init();
206
207	return (VMM_INIT());
208}
209
210static int
211vmm_handler(module_t mod, int what, void *arg)
212{
213	int error;
214
215	switch (what) {
216	case MOD_LOAD:
217		vmmdev_init();
218		iommu_init();
219		error = vmm_init();
220		break;
221	case MOD_UNLOAD:
222		error = vmmdev_cleanup();
223		if (error == 0) {
224			iommu_cleanup();
225			vmm_ipi_cleanup();
226			error = VMM_CLEANUP();
227		}
228		break;
229	default:
230		error = 0;
231		break;
232	}
233	return (error);
234}
235
236static moduledata_t vmm_kmod = {
237	"vmm",
238	vmm_handler,
239	NULL
240};
241
242/*
243 * Execute the module load handler after the pci passthru driver has had
244 * a chance to claim devices. We need this information at the time we do
245 * iommu initialization.
246 */
247DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_CONFIGURE + 1, SI_ORDER_ANY);
248MODULE_VERSION(vmm, 1);
249
250SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
251
252struct vm *
253vm_create(const char *name)
254{
255	int i;
256	struct vm *vm;
257	vm_paddr_t maxaddr;
258
259	const int BSP = 0;
260
261	if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
262		return (NULL);
263
264	vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
265	strcpy(vm->name, name);
266	vm->cookie = VMINIT(vm);
267
268	for (i = 0; i < VM_MAXCPU; i++) {
269		vcpu_init(vm, i);
270		guest_msrs_init(vm, i);
271	}
272
273	maxaddr = vmm_mem_maxaddr();
274	vm->iommu = iommu_create_domain(maxaddr);
275	vm_activate_cpu(vm, BSP);
276
277	return (vm);
278}
279
280static void
281vm_free_mem_seg(struct vm *vm, struct vm_memory_segment *seg)
282{
283	size_t len;
284	vm_paddr_t hpa;
285	void *host_domain;
286
287	host_domain = iommu_host_domain();
288
289	len = 0;
290	while (len < seg->len) {
291		hpa = vm_gpa2hpa(vm, seg->gpa + len, PAGE_SIZE);
292		if (hpa == (vm_paddr_t)-1) {
293			panic("vm_free_mem_segs: cannot free hpa "
294			      "associated with gpa 0x%016lx", seg->gpa + len);
295		}
296
297		/*
298		 * Remove the 'gpa' to 'hpa' mapping in VMs domain.
299		 * And resurrect the 1:1 mapping for 'hpa' in 'host_domain'.
300		 */
301		iommu_remove_mapping(vm->iommu, seg->gpa + len, PAGE_SIZE);
302		iommu_create_mapping(host_domain, hpa, hpa, PAGE_SIZE);
303
304		vmm_mem_free(hpa, PAGE_SIZE);
305
306		len += PAGE_SIZE;
307	}
308
309	/*
310	 * Invalidate cached translations associated with 'vm->iommu' since
311	 * we have now moved some pages from it.
312	 */
313	iommu_invalidate_tlb(vm->iommu);
314
315	bzero(seg, sizeof(struct vm_memory_segment));
316}
317
318void
319vm_destroy(struct vm *vm)
320{
321	int i;
322
323	ppt_unassign_all(vm);
324
325	for (i = 0; i < vm->num_mem_segs; i++)
326		vm_free_mem_seg(vm, &vm->mem_segs[i]);
327
328	vm->num_mem_segs = 0;
329
330	for (i = 0; i < VM_MAXCPU; i++)
331		vcpu_cleanup(&vm->vcpu[i]);
332
333	iommu_destroy_domain(vm->iommu);
334
335	VMCLEANUP(vm->cookie);
336
337	free(vm, M_VM);
338}
339
340const char *
341vm_name(struct vm *vm)
342{
343	return (vm->name);
344}
345
346int
347vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
348{
349	const boolean_t spok = TRUE;	/* superpage mappings are ok */
350
351	return (VMMMAP_SET(vm->cookie, gpa, hpa, len, VM_MEMATTR_UNCACHEABLE,
352			   VM_PROT_RW, spok));
353}
354
355int
356vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
357{
358	const boolean_t spok = TRUE;	/* superpage mappings are ok */
359
360	return (VMMMAP_SET(vm->cookie, gpa, 0, len, 0,
361			   VM_PROT_NONE, spok));
362}
363
364/*
365 * Returns TRUE if 'gpa' is available for allocation and FALSE otherwise
366 */
367static boolean_t
368vm_gpa_available(struct vm *vm, vm_paddr_t gpa)
369{
370	int i;
371	vm_paddr_t gpabase, gpalimit;
372
373	if (gpa & PAGE_MASK)
374		panic("vm_gpa_available: gpa (0x%016lx) not page aligned", gpa);
375
376	for (i = 0; i < vm->num_mem_segs; i++) {
377		gpabase = vm->mem_segs[i].gpa;
378		gpalimit = gpabase + vm->mem_segs[i].len;
379		if (gpa >= gpabase && gpa < gpalimit)
380			return (FALSE);
381	}
382
383	return (TRUE);
384}
385
386int
387vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len)
388{
389	int error, available, allocated;
390	struct vm_memory_segment *seg;
391	vm_paddr_t g, hpa;
392	void *host_domain;
393
394	const boolean_t spok = TRUE;	/* superpage mappings are ok */
395
396	if ((gpa & PAGE_MASK) || (len & PAGE_MASK) || len == 0)
397		return (EINVAL);
398
399	available = allocated = 0;
400	g = gpa;
401	while (g < gpa + len) {
402		if (vm_gpa_available(vm, g))
403			available++;
404		else
405			allocated++;
406
407		g += PAGE_SIZE;
408	}
409
410	/*
411	 * If there are some allocated and some available pages in the address
412	 * range then it is an error.
413	 */
414	if (allocated && available)
415		return (EINVAL);
416
417	/*
418	 * If the entire address range being requested has already been
419	 * allocated then there isn't anything more to do.
420	 */
421	if (allocated && available == 0)
422		return (0);
423
424	if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
425		return (E2BIG);
426
427	host_domain = iommu_host_domain();
428
429	seg = &vm->mem_segs[vm->num_mem_segs];
430
431	error = 0;
432	seg->gpa = gpa;
433	seg->len = 0;
434	while (seg->len < len) {
435		hpa = vmm_mem_alloc(PAGE_SIZE);
436		if (hpa == 0) {
437			error = ENOMEM;
438			break;
439		}
440
441		error = VMMMAP_SET(vm->cookie, gpa + seg->len, hpa, PAGE_SIZE,
442				   VM_MEMATTR_WRITE_BACK, VM_PROT_ALL, spok);
443		if (error)
444			break;
445
446		/*
447		 * Remove the 1:1 mapping for 'hpa' from the 'host_domain'.
448		 * Add mapping for 'gpa + seg->len' to 'hpa' in the VMs domain.
449		 */
450		iommu_remove_mapping(host_domain, hpa, PAGE_SIZE);
451		iommu_create_mapping(vm->iommu, gpa + seg->len, hpa, PAGE_SIZE);
452
453		seg->len += PAGE_SIZE;
454	}
455
456	if (error) {
457		vm_free_mem_seg(vm, seg);
458		return (error);
459	}
460
461	/*
462	 * Invalidate cached translations associated with 'host_domain' since
463	 * we have now moved some pages from it.
464	 */
465	iommu_invalidate_tlb(host_domain);
466
467	vm->num_mem_segs++;
468
469	return (0);
470}
471
472vm_paddr_t
473vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t len)
474{
475	vm_paddr_t nextpage;
476
477	nextpage = rounddown(gpa + PAGE_SIZE, PAGE_SIZE);
478	if (len > nextpage - gpa)
479		panic("vm_gpa2hpa: invalid gpa/len: 0x%016lx/%lu", gpa, len);
480
481	return (VMMMAP_GET(vm->cookie, gpa));
482}
483
484int
485vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
486		  struct vm_memory_segment *seg)
487{
488	int i;
489
490	for (i = 0; i < vm->num_mem_segs; i++) {
491		if (gpabase == vm->mem_segs[i].gpa) {
492			*seg = vm->mem_segs[i];
493			return (0);
494		}
495	}
496	return (-1);
497}
498
499int
500vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
501{
502
503	if (vcpu < 0 || vcpu >= VM_MAXCPU)
504		return (EINVAL);
505
506	if (reg >= VM_REG_LAST)
507		return (EINVAL);
508
509	return (VMGETREG(vm->cookie, vcpu, reg, retval));
510}
511
512int
513vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val)
514{
515
516	if (vcpu < 0 || vcpu >= VM_MAXCPU)
517		return (EINVAL);
518
519	if (reg >= VM_REG_LAST)
520		return (EINVAL);
521
522	return (VMSETREG(vm->cookie, vcpu, reg, val));
523}
524
525static boolean_t
526is_descriptor_table(int reg)
527{
528
529	switch (reg) {
530	case VM_REG_GUEST_IDTR:
531	case VM_REG_GUEST_GDTR:
532		return (TRUE);
533	default:
534		return (FALSE);
535	}
536}
537
538static boolean_t
539is_segment_register(int reg)
540{
541
542	switch (reg) {
543	case VM_REG_GUEST_ES:
544	case VM_REG_GUEST_CS:
545	case VM_REG_GUEST_SS:
546	case VM_REG_GUEST_DS:
547	case VM_REG_GUEST_FS:
548	case VM_REG_GUEST_GS:
549	case VM_REG_GUEST_TR:
550	case VM_REG_GUEST_LDTR:
551		return (TRUE);
552	default:
553		return (FALSE);
554	}
555}
556
557int
558vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
559		struct seg_desc *desc)
560{
561
562	if (vcpu < 0 || vcpu >= VM_MAXCPU)
563		return (EINVAL);
564
565	if (!is_segment_register(reg) && !is_descriptor_table(reg))
566		return (EINVAL);
567
568	return (VMGETDESC(vm->cookie, vcpu, reg, desc));
569}
570
571int
572vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
573		struct seg_desc *desc)
574{
575	if (vcpu < 0 || vcpu >= VM_MAXCPU)
576		return (EINVAL);
577
578	if (!is_segment_register(reg) && !is_descriptor_table(reg))
579		return (EINVAL);
580
581	return (VMSETDESC(vm->cookie, vcpu, reg, desc));
582}
583
584int
585vm_get_pinning(struct vm *vm, int vcpuid, int *cpuid)
586{
587
588	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
589		return (EINVAL);
590
591	*cpuid = VCPU_PINCPU(vm, vcpuid);
592
593	return (0);
594}
595
596int
597vm_set_pinning(struct vm *vm, int vcpuid, int host_cpuid)
598{
599	struct thread *td;
600
601	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
602		return (EINVAL);
603
604	td = curthread;		/* XXXSMP only safe when muxing vcpus */
605
606	/* unpin */
607	if (host_cpuid < 0) {
608		VCPU_UNPIN(vm, vcpuid);
609		thread_lock(td);
610		sched_unbind(td);
611		thread_unlock(td);
612		return (0);
613	}
614
615	if (CPU_ABSENT(host_cpuid))
616		return (EINVAL);
617
618	/*
619	 * XXX we should check that 'host_cpuid' has not already been pinned
620	 * by another vm.
621	 */
622	thread_lock(td);
623	sched_bind(td, host_cpuid);
624	thread_unlock(td);
625	VCPU_PIN(vm, vcpuid, host_cpuid);
626
627	return (0);
628}
629
630static void
631restore_guest_fpustate(struct vcpu *vcpu)
632{
633
634	/* flush host state to the pcb */
635	fpuexit(curthread);
636	fpu_stop_emulating();
637	fpurestore(vcpu->guestfpu);
638}
639
640static void
641save_guest_fpustate(struct vcpu *vcpu)
642{
643
644	fpusave(vcpu->guestfpu);
645	fpu_start_emulating();
646}
647
648int
649vm_run(struct vm *vm, struct vm_run *vmrun)
650{
651	int error, vcpuid;
652	struct vcpu *vcpu;
653	struct pcb *pcb;
654	uint64_t tscval;
655
656	vcpuid = vmrun->cpuid;
657
658	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
659		return (EINVAL);
660
661	vcpu = &vm->vcpu[vcpuid];
662
663	critical_enter();
664
665	tscval = rdtsc();
666
667	pcb = PCPU_GET(curpcb);
668	set_pcb_flags(pcb, PCB_FULL_IRET);
669
670	vcpu->hostcpu = curcpu;
671
672	restore_guest_msrs(vm, vcpuid);
673	restore_guest_fpustate(vcpu);
674	error = VMRUN(vm->cookie, vcpuid, vmrun->rip);
675	save_guest_fpustate(vcpu);
676	restore_host_msrs(vm, vcpuid);
677
678	vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
679
680	/* copy the exit information */
681	bcopy(&vcpu->exitinfo, &vmrun->vm_exit, sizeof(struct vm_exit));
682
683	critical_exit();
684
685	return (error);
686}
687
688int
689vm_inject_event(struct vm *vm, int vcpuid, int type,
690		int vector, uint32_t code, int code_valid)
691{
692	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
693		return (EINVAL);
694
695	if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0)
696		return (EINVAL);
697
698	if (vector < 0 || vector > 255)
699		return (EINVAL);
700
701	return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
702}
703
704int
705vm_inject_nmi(struct vm *vm, int vcpu)
706{
707	int error;
708
709	if (vcpu < 0 || vcpu >= VM_MAXCPU)
710		return (EINVAL);
711
712	error = VMNMI(vm->cookie, vcpu);
713	vm_interrupt_hostcpu(vm, vcpu);
714	return (error);
715}
716
717int
718vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
719{
720	if (vcpu < 0 || vcpu >= VM_MAXCPU)
721		return (EINVAL);
722
723	if (type < 0 || type >= VM_CAP_MAX)
724		return (EINVAL);
725
726	return (VMGETCAP(vm->cookie, vcpu, type, retval));
727}
728
729int
730vm_set_capability(struct vm *vm, int vcpu, int type, int val)
731{
732	if (vcpu < 0 || vcpu >= VM_MAXCPU)
733		return (EINVAL);
734
735	if (type < 0 || type >= VM_CAP_MAX)
736		return (EINVAL);
737
738	return (VMSETCAP(vm->cookie, vcpu, type, val));
739}
740
741uint64_t *
742vm_guest_msrs(struct vm *vm, int cpu)
743{
744	return (vm->vcpu[cpu].guest_msrs);
745}
746
747struct vlapic *
748vm_lapic(struct vm *vm, int cpu)
749{
750	return (vm->vcpu[cpu].vlapic);
751}
752
753boolean_t
754vmm_is_pptdev(int bus, int slot, int func)
755{
756	int found, b, s, f, n;
757	char *val, *cp, *cp2;
758
759	/*
760	 * setenv pptdevs "1/2/3 4/5/6 7/8/9 10/11/12"
761	 */
762	found = 0;
763	cp = val = getenv("pptdevs");
764	while (cp != NULL && *cp != '\0') {
765		if ((cp2 = strchr(cp, ' ')) != NULL)
766			*cp2 = '\0';
767
768		n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
769		if (n == 3 && bus == b && slot == s && func == f) {
770			found = 1;
771			break;
772		}
773
774		if (cp2 != NULL)
775			*cp2++ = ' ';
776
777		cp = cp2;
778	}
779	freeenv(val);
780	return (found);
781}
782
783void *
784vm_iommu_domain(struct vm *vm)
785{
786
787	return (vm->iommu);
788}
789
790void
791vm_set_run_state(struct vm *vm, int vcpuid, int state)
792{
793	struct vcpu *vcpu;
794
795	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
796		panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
797
798	vcpu = &vm->vcpu[vcpuid];
799
800	if (state == VCPU_RUNNING) {
801		if (vcpu->flags & VCPU_F_RUNNING) {
802			panic("vm_set_run_state: %s[%d] is already running",
803			      vm_name(vm), vcpuid);
804		}
805		vcpu->flags |= VCPU_F_RUNNING;
806	} else {
807		if ((vcpu->flags & VCPU_F_RUNNING) == 0) {
808			panic("vm_set_run_state: %s[%d] is already stopped",
809			      vm_name(vm), vcpuid);
810		}
811		vcpu->flags &= ~VCPU_F_RUNNING;
812	}
813}
814
815int
816vm_get_run_state(struct vm *vm, int vcpuid, int *cpuptr)
817{
818	int retval, hostcpu;
819	struct vcpu *vcpu;
820
821	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
822		panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
823
824	vcpu = &vm->vcpu[vcpuid];
825	if (vcpu->flags & VCPU_F_RUNNING) {
826		retval = VCPU_RUNNING;
827		hostcpu = vcpu->hostcpu;
828	} else {
829		retval = VCPU_STOPPED;
830		hostcpu = -1;
831	}
832
833	if (cpuptr)
834		*cpuptr = hostcpu;
835
836	return (retval);
837}
838
839void
840vm_activate_cpu(struct vm *vm, int vcpuid)
841{
842
843	if (vcpuid >= 0 && vcpuid < VM_MAXCPU)
844		CPU_SET(vcpuid, &vm->active_cpus);
845}
846
847cpuset_t
848vm_active_cpus(struct vm *vm)
849{
850
851	return (vm->active_cpus);
852}
853
854void *
855vcpu_stats(struct vm *vm, int vcpuid)
856{
857
858	return (vm->vcpu[vcpuid].stats);
859}
860
861int
862vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
863{
864	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
865		return (EINVAL);
866
867	*state = vm->vcpu[vcpuid].x2apic_state;
868
869	return (0);
870}
871
872int
873vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
874{
875	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
876		return (EINVAL);
877
878	if (state < 0 || state >= X2APIC_STATE_LAST)
879		return (EINVAL);
880
881	vm->vcpu[vcpuid].x2apic_state = state;
882
883	vlapic_set_x2apic_state(vm, vcpuid, state);
884
885	return (0);
886}
887