vmm.c revision 223621
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/module.h>
35#include <sys/sysctl.h>
36#include <sys/malloc.h>
37#include <sys/pcpu.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/proc.h>
41#include <sys/sched.h>
42#include <sys/smp.h>
43#include <sys/systm.h>
44
45#include <vm/vm.h>
46
47#include <machine/vm.h>
48#include <machine/pcb.h>
49#include <x86/apicreg.h>
50
51#include <machine/vmm.h>
52#include "vmm_mem.h"
53#include "vmm_util.h"
54#include <machine/vmm_dev.h>
55#include "vlapic.h"
56#include "vmm_msr.h"
57#include "vmm_ipi.h"
58#include "vmm_stat.h"
59
60#include "io/ppt.h"
61#include "io/iommu.h"
62
63struct vlapic;
64
65struct vcpu {
66	int		flags;
67	int		pincpu;		/* host cpuid this vcpu is bound to */
68	int		hostcpu;	/* host cpuid this vcpu last ran on */
69	uint64_t	guest_msrs[VMM_MSR_NUM];
70	struct vlapic	*vlapic;
71	int		 vcpuid;
72	struct savefpu	savefpu;	/* guest fpu state */
73	void		*stats;
74};
75#define	VCPU_F_PINNED	0x0001
76#define	VCPU_F_RUNNING	0x0002
77
78#define	VCPU_PINCPU(vm, vcpuid)	\
79    ((vm->vcpu[vcpuid].flags & VCPU_F_PINNED) ? vm->vcpu[vcpuid].pincpu : -1)
80
81#define	VCPU_UNPIN(vm, vcpuid)	(vm->vcpu[vcpuid].flags &= ~VCPU_F_PINNED)
82
83#define	VCPU_PIN(vm, vcpuid, host_cpuid)				\
84do {									\
85	vm->vcpu[vcpuid].flags |= VCPU_F_PINNED;			\
86	vm->vcpu[vcpuid].pincpu = host_cpuid;				\
87} while(0)
88
89#define	VM_MAX_MEMORY_SEGMENTS	2
90
91struct vm {
92	void		*cookie;	/* processor-specific data */
93	void		*iommu;		/* iommu-specific data */
94	struct vcpu	vcpu[VM_MAXCPU];
95	int		num_mem_segs;
96	struct vm_memory_segment mem_segs[VM_MAX_MEMORY_SEGMENTS];
97	char		name[VM_MAX_NAMELEN];
98
99	/*
100	 * Set of active vcpus.
101	 * An active vcpu is one that has been started implicitly (BSP) or
102	 * explicitly (AP) by sending it a startup ipi.
103	 */
104	cpuset_t	active_cpus;
105};
106
107static struct vmm_ops *ops;
108#define	VMM_INIT()	(ops != NULL ? (*ops->init)() : 0)
109#define	VMM_CLEANUP()	(ops != NULL ? (*ops->cleanup)() : 0)
110
111#define	VMINIT(vm)	(ops != NULL ? (*ops->vminit)(vm): NULL)
112#define	VMRUN(vmi, vcpu, rip, vmexit) \
113	(ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, vmexit) : ENXIO)
114#define	VMCLEANUP(vmi)	(ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
115#define	VMMMAP(vmi, gpa, hpa, len, attr, prot, spm)	\
116    (ops != NULL ? (*ops->vmmmap)(vmi, gpa, hpa, len, attr, prot, spm) : ENXIO)
117#define	VMGETREG(vmi, vcpu, num, retval)		\
118	(ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
119#define	VMSETREG(vmi, vcpu, num, val)		\
120	(ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
121#define	VMGETDESC(vmi, vcpu, num, desc)		\
122	(ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
123#define	VMSETDESC(vmi, vcpu, num, desc)		\
124	(ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
125#define	VMINJECT(vmi, vcpu, type, vec, ec, ecv)	\
126	(ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO)
127#define	VMNMI(vmi, vcpu)	\
128	(ops != NULL ? (*ops->vmnmi)(vmi, vcpu) : ENXIO)
129#define	VMGETCAP(vmi, vcpu, num, retval)	\
130	(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
131#define	VMSETCAP(vmi, vcpu, num, val)		\
132	(ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
133
134#define	fxrstor(addr)		__asm("fxrstor %0" : : "m" (*(addr)))
135#define	fxsave(addr)		__asm __volatile("fxsave %0" : "=m" (*(addr)))
136#define	fpu_start_emulating()	__asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
137				      : : "n" (CR0_TS) : "ax")
138#define	fpu_stop_emulating()	__asm("clts")
139
140static MALLOC_DEFINE(M_VM, "vm", "vm");
141CTASSERT(VMM_MSR_NUM <= 64);	/* msr_mask can keep track of up to 64 msrs */
142
143/* statistics */
144static VMM_STAT_DEFINE(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
145
146static void
147vcpu_cleanup(struct vcpu *vcpu)
148{
149	vlapic_cleanup(vcpu->vlapic);
150	vmm_stat_free(vcpu->stats);
151}
152
153static void
154vcpu_init(struct vm *vm, uint32_t vcpu_id)
155{
156	struct vcpu *vcpu;
157
158	vcpu = &vm->vcpu[vcpu_id];
159
160	vcpu->hostcpu = -1;
161	vcpu->vcpuid = vcpu_id;
162	vcpu->vlapic = vlapic_init(vm, vcpu_id);
163	fpugetregs(curthread);
164	vcpu->savefpu = curthread->td_pcb->pcb_user_save;
165	vcpu->stats = vmm_stat_alloc();
166}
167
168static int
169vmm_init(void)
170{
171	int error;
172
173	vmm_ipi_init();
174
175	error = vmm_mem_init();
176	if (error)
177		return (error);
178
179	if (vmm_is_intel())
180		ops = &vmm_ops_intel;
181	else if (vmm_is_amd())
182		ops = &vmm_ops_amd;
183	else
184		return (ENXIO);
185
186	vmm_msr_init();
187
188	return (VMM_INIT());
189}
190
191static int
192vmm_handler(module_t mod, int what, void *arg)
193{
194	int error;
195
196	switch (what) {
197	case MOD_LOAD:
198		vmmdev_init();
199		iommu_init();
200		error = vmm_init();
201		break;
202	case MOD_UNLOAD:
203		vmmdev_cleanup();
204		iommu_cleanup();
205		vmm_ipi_cleanup();
206		error = VMM_CLEANUP();
207		break;
208	default:
209		error = 0;
210		break;
211	}
212	return (error);
213}
214
215static moduledata_t vmm_kmod = {
216	"vmm",
217	vmm_handler,
218	NULL
219};
220
221/*
222 * Execute the module load handler after the pci passthru driver has had
223 * a chance to claim devices. We need this information at the time we do
224 * iommu initialization.
225 */
226DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_CONFIGURE + 1, SI_ORDER_ANY);
227MODULE_VERSION(vmm, 1);
228
229SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
230
231struct vm *
232vm_create(const char *name)
233{
234	int i;
235	struct vm *vm;
236	vm_paddr_t maxaddr;
237
238	const int BSP = 0;
239
240	if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
241		return (NULL);
242
243	vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
244	strcpy(vm->name, name);
245	vm->cookie = VMINIT(vm);
246
247	for (i = 0; i < VM_MAXCPU; i++) {
248		vcpu_init(vm, i);
249		guest_msrs_init(vm, i);
250	}
251
252	maxaddr = vmm_mem_maxaddr();
253	vm->iommu = iommu_create_domain(maxaddr);
254	vm_activate_cpu(vm, BSP);
255
256	return (vm);
257}
258
259void
260vm_destroy(struct vm *vm)
261{
262	int i;
263
264	ppt_unassign_all(vm);
265
266	for (i = 0; i < vm->num_mem_segs; i++)
267		vmm_mem_free(vm->mem_segs[i].hpa, vm->mem_segs[i].len);
268
269	for (i = 0; i < VM_MAXCPU; i++)
270		vcpu_cleanup(&vm->vcpu[i]);
271
272	iommu_destroy_domain(vm->iommu);
273
274	VMCLEANUP(vm->cookie);
275
276	free(vm, M_VM);
277}
278
279const char *
280vm_name(struct vm *vm)
281{
282	return (vm->name);
283}
284
285int
286vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
287{
288	const boolean_t spok = TRUE;	/* superpage mappings are ok */
289
290	return (VMMMAP(vm->cookie, gpa, hpa, len, VM_MEMATTR_UNCACHEABLE,
291		       VM_PROT_RW, spok));
292}
293
294int
295vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
296{
297	const boolean_t spok = TRUE;	/* superpage mappings are ok */
298
299	return (VMMMAP(vm->cookie, gpa, 0, len, VM_MEMATTR_UNCACHEABLE,
300		       VM_PROT_NONE, spok));
301}
302
303int
304vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa)
305{
306	int error;
307	vm_paddr_t hpa;
308
309	const boolean_t spok = TRUE;	/* superpage mappings are ok */
310
311	/*
312	 * find the hpa if already it was already vm_malloc'd.
313	 */
314	hpa = vm_gpa2hpa(vm, gpa, len);
315	if (hpa != ((vm_paddr_t)-1))
316		goto out;
317
318	if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
319		return (E2BIG);
320
321	hpa = vmm_mem_alloc(len);
322	if (hpa == 0)
323		return (ENOMEM);
324
325	error = VMMMAP(vm->cookie, gpa, hpa, len, VM_MEMATTR_WRITE_BACK,
326		       VM_PROT_ALL, spok);
327	if (error) {
328		vmm_mem_free(hpa, len);
329		return (error);
330	}
331
332	iommu_create_mapping(vm->iommu, gpa, hpa, len);
333
334	vm->mem_segs[vm->num_mem_segs].gpa = gpa;
335	vm->mem_segs[vm->num_mem_segs].hpa = hpa;
336	vm->mem_segs[vm->num_mem_segs].len = len;
337	vm->num_mem_segs++;
338out:
339	*ret_hpa = hpa;
340	return (0);
341}
342
343vm_paddr_t
344vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t len)
345{
346	int i;
347	vm_paddr_t gpabase, gpalimit, hpabase;
348
349	for (i = 0; i < vm->num_mem_segs; i++) {
350		hpabase = vm->mem_segs[i].hpa;
351		gpabase = vm->mem_segs[i].gpa;
352		gpalimit = gpabase + vm->mem_segs[i].len;
353		if (gpa >= gpabase && gpa + len <= gpalimit)
354			return ((gpa - gpabase) + hpabase);
355	}
356	return ((vm_paddr_t)-1);
357}
358
359int
360vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
361		  struct vm_memory_segment *seg)
362{
363	int i;
364
365	for (i = 0; i < vm->num_mem_segs; i++) {
366		if (gpabase == vm->mem_segs[i].gpa) {
367			*seg = vm->mem_segs[i];
368			return (0);
369		}
370	}
371	return (-1);
372}
373
374int
375vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
376{
377
378	if (vcpu < 0 || vcpu >= VM_MAXCPU)
379		return (EINVAL);
380
381	if (reg >= VM_REG_LAST)
382		return (EINVAL);
383
384	return (VMGETREG(vm->cookie, vcpu, reg, retval));
385}
386
387int
388vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val)
389{
390
391	if (vcpu < 0 || vcpu >= VM_MAXCPU)
392		return (EINVAL);
393
394	if (reg >= VM_REG_LAST)
395		return (EINVAL);
396
397	return (VMSETREG(vm->cookie, vcpu, reg, val));
398}
399
400static boolean_t
401is_descriptor_table(int reg)
402{
403
404	switch (reg) {
405	case VM_REG_GUEST_IDTR:
406	case VM_REG_GUEST_GDTR:
407		return (TRUE);
408	default:
409		return (FALSE);
410	}
411}
412
413static boolean_t
414is_segment_register(int reg)
415{
416
417	switch (reg) {
418	case VM_REG_GUEST_ES:
419	case VM_REG_GUEST_CS:
420	case VM_REG_GUEST_SS:
421	case VM_REG_GUEST_DS:
422	case VM_REG_GUEST_FS:
423	case VM_REG_GUEST_GS:
424	case VM_REG_GUEST_TR:
425	case VM_REG_GUEST_LDTR:
426		return (TRUE);
427	default:
428		return (FALSE);
429	}
430}
431
432int
433vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
434		struct seg_desc *desc)
435{
436
437	if (vcpu < 0 || vcpu >= VM_MAXCPU)
438		return (EINVAL);
439
440	if (!is_segment_register(reg) && !is_descriptor_table(reg))
441		return (EINVAL);
442
443	return (VMGETDESC(vm->cookie, vcpu, reg, desc));
444}
445
446int
447vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
448		struct seg_desc *desc)
449{
450	if (vcpu < 0 || vcpu >= VM_MAXCPU)
451		return (EINVAL);
452
453	if (!is_segment_register(reg) && !is_descriptor_table(reg))
454		return (EINVAL);
455
456	return (VMSETDESC(vm->cookie, vcpu, reg, desc));
457}
458
459int
460vm_get_pinning(struct vm *vm, int vcpuid, int *cpuid)
461{
462
463	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
464		return (EINVAL);
465
466	*cpuid = VCPU_PINCPU(vm, vcpuid);
467
468	return (0);
469}
470
471int
472vm_set_pinning(struct vm *vm, int vcpuid, int host_cpuid)
473{
474	struct thread *td;
475
476	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
477		return (EINVAL);
478
479	td = curthread;		/* XXXSMP only safe when muxing vcpus */
480
481	/* unpin */
482	if (host_cpuid < 0) {
483		VCPU_UNPIN(vm, vcpuid);
484		thread_lock(td);
485		sched_unbind(td);
486		thread_unlock(td);
487		return (0);
488	}
489
490	if (CPU_ABSENT(host_cpuid))
491		return (EINVAL);
492
493	/*
494	 * XXX we should check that 'host_cpuid' has not already been pinned
495	 * by another vm.
496	 */
497	thread_lock(td);
498	sched_bind(td, host_cpuid);
499	thread_unlock(td);
500	VCPU_PIN(vm, vcpuid, host_cpuid);
501
502	return (0);
503}
504
505static void
506restore_guest_fpustate(struct vcpu *vcpu)
507{
508	register_t s;
509
510	s = intr_disable();
511	fpu_stop_emulating();
512	fxrstor(&vcpu->savefpu);
513	fpu_start_emulating();
514	intr_restore(s);
515}
516
517static void
518save_guest_fpustate(struct vcpu *vcpu)
519{
520	register_t s;
521
522	s = intr_disable();
523	fpu_stop_emulating();
524	fxsave(&vcpu->savefpu);
525	fpu_start_emulating();
526	intr_restore(s);
527}
528
529int
530vm_run(struct vm *vm, struct vm_run *vmrun)
531{
532	int error, vcpuid;
533	struct vcpu *vcpu;
534	struct pcb *pcb;
535	uint64_t tscval;
536
537	vcpuid = vmrun->cpuid;
538
539	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
540		return (EINVAL);
541
542	vcpu = &vm->vcpu[vcpuid];
543
544	critical_enter();
545
546	tscval = rdtsc();
547
548	pcb = PCPU_GET(curpcb);
549	set_pcb_flags(pcb, PCB_FULL_IRET);
550
551	vcpu->hostcpu = curcpu;
552
553	fpuexit(curthread);
554	restore_guest_msrs(vm, vcpuid);
555	restore_guest_fpustate(vcpu);
556	error = VMRUN(vm->cookie, vcpuid, vmrun->rip, &vmrun->vm_exit);
557	save_guest_fpustate(vcpu);
558	restore_host_msrs(vm, vcpuid);
559
560	vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
561
562	critical_exit();
563
564	return (error);
565}
566
567int
568vm_inject_event(struct vm *vm, int vcpuid, int type,
569		int vector, uint32_t code, int code_valid)
570{
571	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
572		return (EINVAL);
573
574	if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0)
575		return (EINVAL);
576
577	if (vector < 0 || vector > 255)
578		return (EINVAL);
579
580	return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
581}
582
583int
584vm_inject_nmi(struct vm *vm, int vcpu)
585{
586	int error;
587
588	if (vcpu < 0 || vcpu >= VM_MAXCPU)
589		return (EINVAL);
590
591	error = VMNMI(vm->cookie, vcpu);
592	vm_interrupt_hostcpu(vm, vcpu);
593	return (error);
594}
595
596int
597vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
598{
599	if (vcpu < 0 || vcpu >= VM_MAXCPU)
600		return (EINVAL);
601
602	if (type < 0 || type >= VM_CAP_MAX)
603		return (EINVAL);
604
605	return (VMGETCAP(vm->cookie, vcpu, type, retval));
606}
607
608int
609vm_set_capability(struct vm *vm, int vcpu, int type, int val)
610{
611	if (vcpu < 0 || vcpu >= VM_MAXCPU)
612		return (EINVAL);
613
614	if (type < 0 || type >= VM_CAP_MAX)
615		return (EINVAL);
616
617	return (VMSETCAP(vm->cookie, vcpu, type, val));
618}
619
620uint64_t *
621vm_guest_msrs(struct vm *vm, int cpu)
622{
623	return (vm->vcpu[cpu].guest_msrs);
624}
625
626struct vlapic *
627vm_lapic(struct vm *vm, int cpu)
628{
629	return (vm->vcpu[cpu].vlapic);
630}
631
632boolean_t
633vmm_is_pptdev(int bus, int slot, int func)
634{
635	int found, b, s, f, n;
636	char *val, *cp, *cp2;
637
638	/*
639	 * setenv pptdevs "1/2/3 4/5/6 7/8/9 10/11/12"
640	 */
641	found = 0;
642	cp = val = getenv("pptdevs");
643	while (cp != NULL && *cp != '\0') {
644		if ((cp2 = strchr(cp, ' ')) != NULL)
645			*cp2 = '\0';
646
647		n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
648		if (n == 3 && bus == b && slot == s && func == f) {
649			found = 1;
650			break;
651		}
652
653		if (cp2 != NULL)
654			*cp2++ = ' ';
655
656		cp = cp2;
657	}
658	freeenv(val);
659	return (found);
660}
661
662void *
663vm_iommu_domain(struct vm *vm)
664{
665
666	return (vm->iommu);
667}
668
669void
670vm_set_run_state(struct vm *vm, int vcpuid, int state)
671{
672	struct vcpu *vcpu;
673
674	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
675		panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
676
677	vcpu = &vm->vcpu[vcpuid];
678
679	if (state == VCPU_RUNNING) {
680		if (vcpu->flags & VCPU_F_RUNNING) {
681			panic("vm_set_run_state: %s[%d] is already running",
682			      vm_name(vm), vcpuid);
683		}
684		vcpu->flags |= VCPU_F_RUNNING;
685	} else {
686		if ((vcpu->flags & VCPU_F_RUNNING) == 0) {
687			panic("vm_set_run_state: %s[%d] is already stopped",
688			      vm_name(vm), vcpuid);
689		}
690		vcpu->flags &= ~VCPU_F_RUNNING;
691	}
692}
693
694int
695vm_get_run_state(struct vm *vm, int vcpuid, int *cpuptr)
696{
697	int retval, hostcpu;
698	struct vcpu *vcpu;
699
700	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
701		panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
702
703	vcpu = &vm->vcpu[vcpuid];
704	if (vcpu->flags & VCPU_F_RUNNING) {
705		retval = VCPU_RUNNING;
706		hostcpu = vcpu->hostcpu;
707	} else {
708		retval = VCPU_STOPPED;
709		hostcpu = -1;
710	}
711
712	if (cpuptr)
713		*cpuptr = hostcpu;
714
715	return (retval);
716}
717
718void
719vm_activate_cpu(struct vm *vm, int vcpuid)
720{
721
722	if (vcpuid >= 0 && vcpuid < VM_MAXCPU)
723		CPU_SET(vcpuid, &vm->active_cpus);
724}
725
726cpuset_t
727vm_active_cpus(struct vm *vm)
728{
729
730	return (vm->active_cpus);
731}
732
733void *
734vcpu_stats(struct vm *vm, int vcpuid)
735{
736
737	return (vm->vcpu[vcpuid].stats);
738}
739