vmm.c revision 221828
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/module.h>
35#include <sys/sysctl.h>
36#include <sys/malloc.h>
37#include <sys/pcpu.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/proc.h>
41#include <sys/sched.h>
42#include <sys/smp.h>
43#include <sys/systm.h>
44
45#include <vm/vm.h>
46
47#include <machine/vm.h>
48#include <machine/pcb.h>
49#include <machine/apicreg.h>
50
51#include <machine/vmm.h>
52#include "vmm_mem.h"
53#include "vmm_util.h"
54#include <machine/vmm_dev.h>
55#include "vlapic.h"
56#include "vmm_msr.h"
57#include "vmm_ipi.h"
58#include "vmm_stat.h"
59
60#include "io/ppt.h"
61#include "io/iommu.h"
62
63struct vlapic;
64
65struct vcpu {
66	int		flags;
67	int		pincpu;		/* host cpuid this vcpu is bound to */
68	int		hostcpu;	/* host cpuid this vcpu last ran on */
69	uint64_t	guest_msrs[VMM_MSR_NUM];
70	struct vlapic	*vlapic;
71	int		 vcpuid;
72	struct savefpu	savefpu;	/* guest fpu state */
73	void		*stats;
74};
75#define	VCPU_F_PINNED	0x0001
76#define	VCPU_F_RUNNING	0x0002
77
78#define	VCPU_PINCPU(vm, vcpuid)	\
79    ((vm->vcpu[vcpuid].flags & VCPU_F_PINNED) ? vm->vcpu[vcpuid].pincpu : -1)
80
81#define	VCPU_UNPIN(vm, vcpuid)	(vm->vcpu[vcpuid].flags &= ~VCPU_F_PINNED)
82
83#define	VCPU_PIN(vm, vcpuid, host_cpuid)				\
84do {									\
85	vm->vcpu[vcpuid].flags |= VCPU_F_PINNED;			\
86	vm->vcpu[vcpuid].pincpu = host_cpuid;				\
87} while(0)
88
89#define	VM_MAX_MEMORY_SEGMENTS	2
90
91struct vm {
92	void		*cookie;	/* processor-specific data */
93	void		*iommu;		/* iommu-specific data */
94	struct vcpu	vcpu[VM_MAXCPU];
95	int		num_mem_segs;
96	struct vm_memory_segment mem_segs[VM_MAX_MEMORY_SEGMENTS];
97	char		name[VM_MAX_NAMELEN];
98
99	/*
100	 * Mask of active vcpus.
101	 * An active vcpu is one that has been started implicitly (BSP) or
102	 * explicitly (AP) by sending it a startup ipi.
103	 */
104	cpumask_t	active_cpus;
105};
106
107static struct vmm_ops *ops;
108#define	VMM_INIT()	(ops != NULL ? (*ops->init)() : 0)
109#define	VMM_CLEANUP()	(ops != NULL ? (*ops->cleanup)() : 0)
110
111#define	VMINIT(vm)	(ops != NULL ? (*ops->vminit)(vm): NULL)
112#define	VMRUN(vmi, vcpu, rip, vmexit) \
113	(ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip, vmexit) : ENXIO)
114#define	VMCLEANUP(vmi)	(ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
115#define	VMMMAP(vmi, gpa, hpa, len, attr, prot, spm)	\
116    (ops != NULL ? (*ops->vmmmap)(vmi, gpa, hpa, len, attr, prot, spm) : ENXIO)
117#define	VMGETREG(vmi, vcpu, num, retval)		\
118	(ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
119#define	VMSETREG(vmi, vcpu, num, val)		\
120	(ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
121#define	VMGETDESC(vmi, vcpu, num, desc)		\
122	(ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
123#define	VMSETDESC(vmi, vcpu, num, desc)		\
124	(ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
125#define	VMINJECT(vmi, vcpu, type, vec, ec, ecv)	\
126	(ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO)
127#define	VMNMI(vmi, vcpu)	\
128	(ops != NULL ? (*ops->vmnmi)(vmi, vcpu) : ENXIO)
129#define	VMGETCAP(vmi, vcpu, num, retval)	\
130	(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
131#define	VMSETCAP(vmi, vcpu, num, val)		\
132	(ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
133
134#define	fxrstor(addr)		__asm("fxrstor %0" : : "m" (*(addr)))
135#define	fxsave(addr)		__asm __volatile("fxsave %0" : "=m" (*(addr)))
136#define	fpu_start_emulating()	__asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \
137				      : : "n" (CR0_TS) : "ax")
138#define	fpu_stop_emulating()	__asm("clts")
139
140static MALLOC_DEFINE(M_VM, "vm", "vm");
141CTASSERT(VMM_MSR_NUM <= 64);	/* msr_mask can keep track of up to 64 msrs */
142
143/* statistics */
144static VMM_STAT_DEFINE(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
145
146static void
147vcpu_cleanup(struct vcpu *vcpu)
148{
149	vlapic_cleanup(vcpu->vlapic);
150	vmm_stat_free(vcpu->stats);
151}
152
153static void
154vcpu_init(struct vm *vm, uint32_t vcpu_id)
155{
156	struct vcpu *vcpu;
157
158	vcpu = &vm->vcpu[vcpu_id];
159
160	vcpu->hostcpu = -1;
161	vcpu->vcpuid = vcpu_id;
162	vcpu->vlapic = vlapic_init(vm, vcpu_id);
163	fpugetregs(curthread, &vcpu->savefpu);
164	vcpu->stats = vmm_stat_alloc();
165}
166
167static int
168vmm_init(void)
169{
170	int error;
171
172	vmm_ipi_init();
173
174	error = vmm_mem_init();
175	if (error)
176		return (error);
177
178	if (vmm_is_intel())
179		ops = &vmm_ops_intel;
180	else if (vmm_is_amd())
181		ops = &vmm_ops_amd;
182	else
183		return (ENXIO);
184
185	vmm_msr_init();
186
187	return (VMM_INIT());
188}
189
190static int
191vmm_handler(module_t mod, int what, void *arg)
192{
193	int error;
194
195	switch (what) {
196	case MOD_LOAD:
197		vmmdev_init();
198		iommu_init();
199		error = vmm_init();
200		break;
201	case MOD_UNLOAD:
202		vmmdev_cleanup();
203		iommu_cleanup();
204		vmm_ipi_cleanup();
205		error = VMM_CLEANUP();
206		break;
207	default:
208		error = 0;
209		break;
210	}
211	return (error);
212}
213
214static moduledata_t vmm_kmod = {
215	"vmm",
216	vmm_handler,
217	NULL
218};
219
220/*
221 * Execute the module load handler after the pci passthru driver has had
222 * a chance to claim devices. We need this information at the time we do
223 * iommu initialization.
224 */
225DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_CONFIGURE + 1, SI_ORDER_ANY);
226MODULE_VERSION(vmm, 1);
227
228SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
229
230struct vm *
231vm_create(const char *name)
232{
233	int i;
234	struct vm *vm;
235	vm_paddr_t maxaddr;
236
237	const int BSP = 0;
238
239	if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
240		return (NULL);
241
242	vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
243	strcpy(vm->name, name);
244	vm->cookie = VMINIT(vm);
245
246	for (i = 0; i < VM_MAXCPU; i++) {
247		vcpu_init(vm, i);
248		guest_msrs_init(vm, i);
249	}
250
251	maxaddr = vmm_mem_maxaddr();
252	vm->iommu = iommu_create_domain(maxaddr);
253	vm_activate_cpu(vm, BSP);
254
255	return (vm);
256}
257
258void
259vm_destroy(struct vm *vm)
260{
261	int i;
262
263	ppt_unassign_all(vm);
264
265	for (i = 0; i < vm->num_mem_segs; i++)
266		vmm_mem_free(vm->mem_segs[i].hpa, vm->mem_segs[i].len);
267
268	for (i = 0; i < VM_MAXCPU; i++)
269		vcpu_cleanup(&vm->vcpu[i]);
270
271	iommu_destroy_domain(vm->iommu);
272
273	VMCLEANUP(vm->cookie);
274
275	free(vm, M_VM);
276}
277
278const char *
279vm_name(struct vm *vm)
280{
281	return (vm->name);
282}
283
284int
285vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
286{
287	const boolean_t spok = TRUE;	/* superpage mappings are ok */
288
289	return (VMMMAP(vm->cookie, gpa, hpa, len, VM_MEMATTR_UNCACHEABLE,
290		       VM_PROT_RW, spok));
291}
292
293int
294vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
295{
296	const boolean_t spok = TRUE;	/* superpage mappings are ok */
297
298	return (VMMMAP(vm->cookie, gpa, 0, len, VM_MEMATTR_UNCACHEABLE,
299		       VM_PROT_NONE, spok));
300}
301
302int
303vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa)
304{
305	int error;
306	vm_paddr_t hpa;
307
308	const boolean_t spok = TRUE;	/* superpage mappings are ok */
309
310	/*
311	 * find the hpa if already it was already vm_malloc'd.
312	 */
313	hpa = vm_gpa2hpa(vm, gpa, len);
314	if (hpa != ((vm_paddr_t)-1))
315		goto out;
316
317	if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
318		return (E2BIG);
319
320	hpa = vmm_mem_alloc(len);
321	if (hpa == 0)
322		return (ENOMEM);
323
324	error = VMMMAP(vm->cookie, gpa, hpa, len, VM_MEMATTR_WRITE_BACK,
325		       VM_PROT_ALL, spok);
326	if (error) {
327		vmm_mem_free(hpa, len);
328		return (error);
329	}
330
331	iommu_create_mapping(vm->iommu, gpa, hpa, len);
332
333	vm->mem_segs[vm->num_mem_segs].gpa = gpa;
334	vm->mem_segs[vm->num_mem_segs].hpa = hpa;
335	vm->mem_segs[vm->num_mem_segs].len = len;
336	vm->num_mem_segs++;
337out:
338	*ret_hpa = hpa;
339	return (0);
340}
341
342vm_paddr_t
343vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t len)
344{
345	int i;
346	vm_paddr_t gpabase, gpalimit, hpabase;
347
348	for (i = 0; i < vm->num_mem_segs; i++) {
349		hpabase = vm->mem_segs[i].hpa;
350		gpabase = vm->mem_segs[i].gpa;
351		gpalimit = gpabase + vm->mem_segs[i].len;
352		if (gpa >= gpabase && gpa + len <= gpalimit)
353			return ((gpa - gpabase) + hpabase);
354	}
355	return ((vm_paddr_t)-1);
356}
357
358int
359vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
360		  struct vm_memory_segment *seg)
361{
362	int i;
363
364	for (i = 0; i < vm->num_mem_segs; i++) {
365		if (gpabase == vm->mem_segs[i].gpa) {
366			*seg = vm->mem_segs[i];
367			return (0);
368		}
369	}
370	return (-1);
371}
372
373int
374vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
375{
376
377	if (vcpu < 0 || vcpu >= VM_MAXCPU)
378		return (EINVAL);
379
380	if (reg >= VM_REG_LAST)
381		return (EINVAL);
382
383	return (VMGETREG(vm->cookie, vcpu, reg, retval));
384}
385
386int
387vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val)
388{
389
390	if (vcpu < 0 || vcpu >= VM_MAXCPU)
391		return (EINVAL);
392
393	if (reg >= VM_REG_LAST)
394		return (EINVAL);
395
396	return (VMSETREG(vm->cookie, vcpu, reg, val));
397}
398
399static boolean_t
400is_descriptor_table(int reg)
401{
402
403	switch (reg) {
404	case VM_REG_GUEST_IDTR:
405	case VM_REG_GUEST_GDTR:
406		return (TRUE);
407	default:
408		return (FALSE);
409	}
410}
411
412static boolean_t
413is_segment_register(int reg)
414{
415
416	switch (reg) {
417	case VM_REG_GUEST_ES:
418	case VM_REG_GUEST_CS:
419	case VM_REG_GUEST_SS:
420	case VM_REG_GUEST_DS:
421	case VM_REG_GUEST_FS:
422	case VM_REG_GUEST_GS:
423	case VM_REG_GUEST_TR:
424	case VM_REG_GUEST_LDTR:
425		return (TRUE);
426	default:
427		return (FALSE);
428	}
429}
430
431int
432vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
433		struct seg_desc *desc)
434{
435
436	if (vcpu < 0 || vcpu >= VM_MAXCPU)
437		return (EINVAL);
438
439	if (!is_segment_register(reg) && !is_descriptor_table(reg))
440		return (EINVAL);
441
442	return (VMGETDESC(vm->cookie, vcpu, reg, desc));
443}
444
445int
446vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
447		struct seg_desc *desc)
448{
449	if (vcpu < 0 || vcpu >= VM_MAXCPU)
450		return (EINVAL);
451
452	if (!is_segment_register(reg) && !is_descriptor_table(reg))
453		return (EINVAL);
454
455	return (VMSETDESC(vm->cookie, vcpu, reg, desc));
456}
457
458int
459vm_get_pinning(struct vm *vm, int vcpuid, int *cpuid)
460{
461
462	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
463		return (EINVAL);
464
465	*cpuid = VCPU_PINCPU(vm, vcpuid);
466
467	return (0);
468}
469
470int
471vm_set_pinning(struct vm *vm, int vcpuid, int host_cpuid)
472{
473	struct thread *td;
474
475	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
476		return (EINVAL);
477
478	td = curthread;		/* XXXSMP only safe when muxing vcpus */
479
480	/* unpin */
481	if (host_cpuid < 0) {
482		VCPU_UNPIN(vm, vcpuid);
483		thread_lock(td);
484		sched_unbind(td);
485		thread_unlock(td);
486		return (0);
487	}
488
489	if (CPU_ABSENT(host_cpuid))
490		return (EINVAL);
491
492	/*
493	 * XXX we should check that 'host_cpuid' has not already been pinned
494	 * by another vm.
495	 */
496	thread_lock(td);
497	sched_bind(td, host_cpuid);
498	thread_unlock(td);
499	VCPU_PIN(vm, vcpuid, host_cpuid);
500
501	return (0);
502}
503
504static void
505restore_guest_fpustate(struct vcpu *vcpu)
506{
507	register_t s;
508
509	s = intr_disable();
510	fpu_stop_emulating();
511	fxrstor(&vcpu->savefpu);
512	fpu_start_emulating();
513	intr_restore(s);
514}
515
516static void
517save_guest_fpustate(struct vcpu *vcpu)
518{
519	register_t s;
520
521	s = intr_disable();
522	fpu_stop_emulating();
523	fxsave(&vcpu->savefpu);
524	fpu_start_emulating();
525	intr_restore(s);
526}
527
528int
529vm_run(struct vm *vm, struct vm_run *vmrun)
530{
531	int error, vcpuid;
532	struct vcpu *vcpu;
533	struct pcb *pcb;
534	uint64_t tscval;
535
536	vcpuid = vmrun->cpuid;
537
538	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
539		return (EINVAL);
540
541	vcpu = &vm->vcpu[vcpuid];
542
543	critical_enter();
544
545	tscval = rdtsc();
546
547	pcb = PCPU_GET(curpcb);
548	pcb->pcb_full_iret = 1;
549
550	vcpu->hostcpu = curcpu;
551
552	fpuexit(curthread);
553	restore_guest_msrs(vm, vcpuid);
554	restore_guest_fpustate(vcpu);
555	error = VMRUN(vm->cookie, vcpuid, vmrun->rip, &vmrun->vm_exit);
556	save_guest_fpustate(vcpu);
557	restore_host_msrs(vm, vcpuid);
558
559	vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
560
561	critical_exit();
562
563	return (error);
564}
565
566int
567vm_inject_event(struct vm *vm, int vcpuid, int type,
568		int vector, uint32_t code, int code_valid)
569{
570	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
571		return (EINVAL);
572
573	if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0)
574		return (EINVAL);
575
576	if (vector < 0 || vector > 255)
577		return (EINVAL);
578
579	return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
580}
581
582int
583vm_inject_nmi(struct vm *vm, int vcpu)
584{
585	int error;
586
587	if (vcpu < 0 || vcpu >= VM_MAXCPU)
588		return (EINVAL);
589
590	error = VMNMI(vm->cookie, vcpu);
591	vm_interrupt_hostcpu(vm, vcpu);
592	return (error);
593}
594
595int
596vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
597{
598	if (vcpu < 0 || vcpu >= VM_MAXCPU)
599		return (EINVAL);
600
601	if (type < 0 || type >= VM_CAP_MAX)
602		return (EINVAL);
603
604	return (VMGETCAP(vm->cookie, vcpu, type, retval));
605}
606
607int
608vm_set_capability(struct vm *vm, int vcpu, int type, int val)
609{
610	if (vcpu < 0 || vcpu >= VM_MAXCPU)
611		return (EINVAL);
612
613	if (type < 0 || type >= VM_CAP_MAX)
614		return (EINVAL);
615
616	return (VMSETCAP(vm->cookie, vcpu, type, val));
617}
618
619uint64_t *
620vm_guest_msrs(struct vm *vm, int cpu)
621{
622	return (vm->vcpu[cpu].guest_msrs);
623}
624
625struct vlapic *
626vm_lapic(struct vm *vm, int cpu)
627{
628	return (vm->vcpu[cpu].vlapic);
629}
630
631boolean_t
632vmm_is_pptdev(int bus, int slot, int func)
633{
634	int found, b, s, f, n;
635	char *val, *cp, *cp2;
636
637	/*
638	 * setenv pptdevs "1/2/3 4/5/6 7/8/9 10/11/12"
639	 */
640	found = 0;
641	cp = val = getenv("pptdevs");
642	while (cp != NULL && *cp != '\0') {
643		if ((cp2 = strchr(cp, ' ')) != NULL)
644			*cp2 = '\0';
645
646		n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
647		if (n == 3 && bus == b && slot == s && func == f) {
648			found = 1;
649			break;
650		}
651
652		if (cp2 != NULL)
653			*cp2++ = ' ';
654
655		cp = cp2;
656	}
657	freeenv(val);
658	return (found);
659}
660
661void *
662vm_iommu_domain(struct vm *vm)
663{
664
665	return (vm->iommu);
666}
667
668void
669vm_set_run_state(struct vm *vm, int vcpuid, int state)
670{
671	struct vcpu *vcpu;
672
673	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
674		panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
675
676	vcpu = &vm->vcpu[vcpuid];
677
678	if (state == VCPU_RUNNING) {
679		if (vcpu->flags & VCPU_F_RUNNING) {
680			panic("vm_set_run_state: %s[%d] is already running",
681			      vm_name(vm), vcpuid);
682		}
683		vcpu->flags |= VCPU_F_RUNNING;
684	} else {
685		if ((vcpu->flags & VCPU_F_RUNNING) == 0) {
686			panic("vm_set_run_state: %s[%d] is already stopped",
687			      vm_name(vm), vcpuid);
688		}
689		vcpu->flags &= ~VCPU_F_RUNNING;
690	}
691}
692
693int
694vm_get_run_state(struct vm *vm, int vcpuid, int *cpuptr)
695{
696	int retval, hostcpu;
697	struct vcpu *vcpu;
698
699	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
700		panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
701
702	vcpu = &vm->vcpu[vcpuid];
703	if (vcpu->flags & VCPU_F_RUNNING) {
704		retval = VCPU_RUNNING;
705		hostcpu = vcpu->hostcpu;
706	} else {
707		retval = VCPU_STOPPED;
708		hostcpu = -1;
709	}
710
711	if (cpuptr)
712		*cpuptr = hostcpu;
713
714	return (retval);
715}
716
717void
718vm_activate_cpu(struct vm *vm, int vcpuid)
719{
720
721	if (vcpuid >= 0 && vcpuid < VM_MAXCPU)
722		vm->active_cpus |= vcpu_mask(vcpuid);
723}
724
725cpumask_t
726vm_active_cpus(struct vm *vm)
727{
728
729	return (vm->active_cpus);
730}
731
732void *
733vcpu_stats(struct vm *vm, int vcpuid)
734{
735
736	return (vm->vcpu[vcpuid].stats);
737}
738