vmm.c revision 240894
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/module.h>
36#include <sys/sysctl.h>
37#include <sys/malloc.h>
38#include <sys/pcpu.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/sched.h>
43#include <sys/smp.h>
44#include <sys/systm.h>
45
46#include <vm/vm.h>
47
48#include <machine/vm.h>
49#include <machine/pcb.h>
50#include <x86/apicreg.h>
51
52#include <machine/vmm.h>
53#include "vmm_mem.h"
54#include "vmm_util.h"
55#include <machine/vmm_dev.h>
56#include "vlapic.h"
57#include "vmm_msr.h"
58#include "vmm_ipi.h"
59#include "vmm_stat.h"
60
61#include "io/ppt.h"
62#include "io/iommu.h"
63
64struct vlapic;
65
66struct vcpu {
67	int		flags;
68	int		pincpu;		/* host cpuid this vcpu is bound to */
69	int		hostcpu;	/* host cpuid this vcpu last ran on */
70	uint64_t	guest_msrs[VMM_MSR_NUM];
71	struct vlapic	*vlapic;
72	int		 vcpuid;
73	struct savefpu	*guestfpu;	/* guest fpu state */
74	void		*stats;
75	struct vm_exit	exitinfo;
76};
77#define	VCPU_F_PINNED	0x0001
78#define	VCPU_F_RUNNING	0x0002
79
80#define	VCPU_PINCPU(vm, vcpuid)	\
81    ((vm->vcpu[vcpuid].flags & VCPU_F_PINNED) ? vm->vcpu[vcpuid].pincpu : -1)
82
83#define	VCPU_UNPIN(vm, vcpuid)	(vm->vcpu[vcpuid].flags &= ~VCPU_F_PINNED)
84
85#define	VCPU_PIN(vm, vcpuid, host_cpuid)				\
86do {									\
87	vm->vcpu[vcpuid].flags |= VCPU_F_PINNED;			\
88	vm->vcpu[vcpuid].pincpu = host_cpuid;				\
89} while(0)
90
91#define	VM_MAX_MEMORY_SEGMENTS	2
92
93struct vm {
94	void		*cookie;	/* processor-specific data */
95	void		*iommu;		/* iommu-specific data */
96	struct vcpu	vcpu[VM_MAXCPU];
97	int		num_mem_segs;
98	struct vm_memory_segment mem_segs[VM_MAX_MEMORY_SEGMENTS];
99	char		name[VM_MAX_NAMELEN];
100
101	/*
102	 * Set of active vcpus.
103	 * An active vcpu is one that has been started implicitly (BSP) or
104	 * explicitly (AP) by sending it a startup ipi.
105	 */
106	cpuset_t	active_cpus;
107};
108
109static struct vmm_ops *ops;
110#define	VMM_INIT()	(ops != NULL ? (*ops->init)() : 0)
111#define	VMM_CLEANUP()	(ops != NULL ? (*ops->cleanup)() : 0)
112
113#define	VMINIT(vm)	(ops != NULL ? (*ops->vminit)(vm): NULL)
114#define	VMRUN(vmi, vcpu, rip) \
115	(ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip) : ENXIO)
116#define	VMCLEANUP(vmi)	(ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
117#define	VMMMAP(vmi, gpa, hpa, len, attr, prot, spm)	\
118    (ops != NULL ? (*ops->vmmmap)(vmi, gpa, hpa, len, attr, prot, spm) : ENXIO)
119#define	VMGETREG(vmi, vcpu, num, retval)		\
120	(ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
121#define	VMSETREG(vmi, vcpu, num, val)		\
122	(ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
123#define	VMGETDESC(vmi, vcpu, num, desc)		\
124	(ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
125#define	VMSETDESC(vmi, vcpu, num, desc)		\
126	(ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
127#define	VMINJECT(vmi, vcpu, type, vec, ec, ecv)	\
128	(ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO)
129#define	VMNMI(vmi, vcpu)	\
130	(ops != NULL ? (*ops->vmnmi)(vmi, vcpu) : ENXIO)
131#define	VMGETCAP(vmi, vcpu, num, retval)	\
132	(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
133#define	VMSETCAP(vmi, vcpu, num, val)		\
134	(ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
135
136#define	fpu_start_emulating()	start_emulating()
137#define	fpu_stop_emulating()	stop_emulating()
138
139static MALLOC_DEFINE(M_VM, "vm", "vm");
140CTASSERT(VMM_MSR_NUM <= 64);	/* msr_mask can keep track of up to 64 msrs */
141
142/* statistics */
143static VMM_STAT_DEFINE(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
144
145static void
146vcpu_cleanup(struct vcpu *vcpu)
147{
148	vlapic_cleanup(vcpu->vlapic);
149	vmm_stat_free(vcpu->stats);
150	fpu_save_area_free(vcpu->guestfpu);
151}
152
153static void
154vcpu_init(struct vm *vm, uint32_t vcpu_id)
155{
156	struct vcpu *vcpu;
157
158	vcpu = &vm->vcpu[vcpu_id];
159
160	vcpu->hostcpu = -1;
161	vcpu->vcpuid = vcpu_id;
162	vcpu->vlapic = vlapic_init(vm, vcpu_id);
163	vcpu->guestfpu = fpu_save_area_alloc();
164	fpu_save_area_reset(vcpu->guestfpu);
165	vcpu->stats = vmm_stat_alloc();
166}
167
168struct vm_exit *
169vm_exitinfo(struct vm *vm, int cpuid)
170{
171	struct vcpu *vcpu;
172
173	if (cpuid < 0 || cpuid >= VM_MAXCPU)
174		panic("vm_exitinfo: invalid cpuid %d", cpuid);
175
176	vcpu = &vm->vcpu[cpuid];
177
178	return (&vcpu->exitinfo);
179}
180
181static int
182vmm_init(void)
183{
184	int error;
185
186	vmm_ipi_init();
187
188	error = vmm_mem_init();
189	if (error)
190		return (error);
191
192	if (vmm_is_intel())
193		ops = &vmm_ops_intel;
194	else if (vmm_is_amd())
195		ops = &vmm_ops_amd;
196	else
197		return (ENXIO);
198
199	vmm_msr_init();
200
201	return (VMM_INIT());
202}
203
204static int
205vmm_handler(module_t mod, int what, void *arg)
206{
207	int error;
208
209	switch (what) {
210	case MOD_LOAD:
211		vmmdev_init();
212		iommu_init();
213		error = vmm_init();
214		break;
215	case MOD_UNLOAD:
216		vmmdev_cleanup();
217		iommu_cleanup();
218		vmm_ipi_cleanup();
219		error = VMM_CLEANUP();
220		break;
221	default:
222		error = 0;
223		break;
224	}
225	return (error);
226}
227
228static moduledata_t vmm_kmod = {
229	"vmm",
230	vmm_handler,
231	NULL
232};
233
234/*
235 * Execute the module load handler after the pci passthru driver has had
236 * a chance to claim devices. We need this information at the time we do
237 * iommu initialization.
238 */
239DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_CONFIGURE + 1, SI_ORDER_ANY);
240MODULE_VERSION(vmm, 1);
241
242SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
243
244struct vm *
245vm_create(const char *name)
246{
247	int i;
248	struct vm *vm;
249	vm_paddr_t maxaddr;
250
251	const int BSP = 0;
252
253	if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
254		return (NULL);
255
256	vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
257	strcpy(vm->name, name);
258	vm->cookie = VMINIT(vm);
259
260	for (i = 0; i < VM_MAXCPU; i++) {
261		vcpu_init(vm, i);
262		guest_msrs_init(vm, i);
263	}
264
265	maxaddr = vmm_mem_maxaddr();
266	vm->iommu = iommu_create_domain(maxaddr);
267	vm_activate_cpu(vm, BSP);
268
269	return (vm);
270}
271
272void
273vm_destroy(struct vm *vm)
274{
275	int i;
276
277	ppt_unassign_all(vm);
278
279	for (i = 0; i < vm->num_mem_segs; i++)
280		vmm_mem_free(vm->mem_segs[i].hpa, vm->mem_segs[i].len);
281
282	for (i = 0; i < VM_MAXCPU; i++)
283		vcpu_cleanup(&vm->vcpu[i]);
284
285	iommu_destroy_domain(vm->iommu);
286
287	VMCLEANUP(vm->cookie);
288
289	free(vm, M_VM);
290}
291
292const char *
293vm_name(struct vm *vm)
294{
295	return (vm->name);
296}
297
298int
299vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
300{
301	const boolean_t spok = TRUE;	/* superpage mappings are ok */
302
303	return (VMMMAP(vm->cookie, gpa, hpa, len, VM_MEMATTR_UNCACHEABLE,
304		       VM_PROT_RW, spok));
305}
306
307int
308vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
309{
310	const boolean_t spok = TRUE;	/* superpage mappings are ok */
311
312	return (VMMMAP(vm->cookie, gpa, 0, len, VM_MEMATTR_UNCACHEABLE,
313		       VM_PROT_NONE, spok));
314}
315
316int
317vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa)
318{
319	int error;
320	vm_paddr_t hpa;
321
322	const boolean_t spok = TRUE;	/* superpage mappings are ok */
323
324	/*
325	 * find the hpa if already it was already vm_malloc'd.
326	 */
327	hpa = vm_gpa2hpa(vm, gpa, len);
328	if (hpa != ((vm_paddr_t)-1))
329		goto out;
330
331	if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
332		return (E2BIG);
333
334	hpa = vmm_mem_alloc(len);
335	if (hpa == 0)
336		return (ENOMEM);
337
338	error = VMMMAP(vm->cookie, gpa, hpa, len, VM_MEMATTR_WRITE_BACK,
339		       VM_PROT_ALL, spok);
340	if (error) {
341		vmm_mem_free(hpa, len);
342		return (error);
343	}
344
345	iommu_create_mapping(vm->iommu, gpa, hpa, len);
346
347	vm->mem_segs[vm->num_mem_segs].gpa = gpa;
348	vm->mem_segs[vm->num_mem_segs].hpa = hpa;
349	vm->mem_segs[vm->num_mem_segs].len = len;
350	vm->num_mem_segs++;
351out:
352	*ret_hpa = hpa;
353	return (0);
354}
355
356vm_paddr_t
357vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t len)
358{
359	int i;
360	vm_paddr_t gpabase, gpalimit, hpabase;
361
362	for (i = 0; i < vm->num_mem_segs; i++) {
363		hpabase = vm->mem_segs[i].hpa;
364		gpabase = vm->mem_segs[i].gpa;
365		gpalimit = gpabase + vm->mem_segs[i].len;
366		if (gpa >= gpabase && gpa + len <= gpalimit)
367			return ((gpa - gpabase) + hpabase);
368	}
369	return ((vm_paddr_t)-1);
370}
371
372int
373vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
374		  struct vm_memory_segment *seg)
375{
376	int i;
377
378	for (i = 0; i < vm->num_mem_segs; i++) {
379		if (gpabase == vm->mem_segs[i].gpa) {
380			*seg = vm->mem_segs[i];
381			return (0);
382		}
383	}
384	return (-1);
385}
386
387int
388vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
389{
390
391	if (vcpu < 0 || vcpu >= VM_MAXCPU)
392		return (EINVAL);
393
394	if (reg >= VM_REG_LAST)
395		return (EINVAL);
396
397	return (VMGETREG(vm->cookie, vcpu, reg, retval));
398}
399
400int
401vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val)
402{
403
404	if (vcpu < 0 || vcpu >= VM_MAXCPU)
405		return (EINVAL);
406
407	if (reg >= VM_REG_LAST)
408		return (EINVAL);
409
410	return (VMSETREG(vm->cookie, vcpu, reg, val));
411}
412
413static boolean_t
414is_descriptor_table(int reg)
415{
416
417	switch (reg) {
418	case VM_REG_GUEST_IDTR:
419	case VM_REG_GUEST_GDTR:
420		return (TRUE);
421	default:
422		return (FALSE);
423	}
424}
425
426static boolean_t
427is_segment_register(int reg)
428{
429
430	switch (reg) {
431	case VM_REG_GUEST_ES:
432	case VM_REG_GUEST_CS:
433	case VM_REG_GUEST_SS:
434	case VM_REG_GUEST_DS:
435	case VM_REG_GUEST_FS:
436	case VM_REG_GUEST_GS:
437	case VM_REG_GUEST_TR:
438	case VM_REG_GUEST_LDTR:
439		return (TRUE);
440	default:
441		return (FALSE);
442	}
443}
444
445int
446vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
447		struct seg_desc *desc)
448{
449
450	if (vcpu < 0 || vcpu >= VM_MAXCPU)
451		return (EINVAL);
452
453	if (!is_segment_register(reg) && !is_descriptor_table(reg))
454		return (EINVAL);
455
456	return (VMGETDESC(vm->cookie, vcpu, reg, desc));
457}
458
459int
460vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
461		struct seg_desc *desc)
462{
463	if (vcpu < 0 || vcpu >= VM_MAXCPU)
464		return (EINVAL);
465
466	if (!is_segment_register(reg) && !is_descriptor_table(reg))
467		return (EINVAL);
468
469	return (VMSETDESC(vm->cookie, vcpu, reg, desc));
470}
471
472int
473vm_get_pinning(struct vm *vm, int vcpuid, int *cpuid)
474{
475
476	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
477		return (EINVAL);
478
479	*cpuid = VCPU_PINCPU(vm, vcpuid);
480
481	return (0);
482}
483
484int
485vm_set_pinning(struct vm *vm, int vcpuid, int host_cpuid)
486{
487	struct thread *td;
488
489	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
490		return (EINVAL);
491
492	td = curthread;		/* XXXSMP only safe when muxing vcpus */
493
494	/* unpin */
495	if (host_cpuid < 0) {
496		VCPU_UNPIN(vm, vcpuid);
497		thread_lock(td);
498		sched_unbind(td);
499		thread_unlock(td);
500		return (0);
501	}
502
503	if (CPU_ABSENT(host_cpuid))
504		return (EINVAL);
505
506	/*
507	 * XXX we should check that 'host_cpuid' has not already been pinned
508	 * by another vm.
509	 */
510	thread_lock(td);
511	sched_bind(td, host_cpuid);
512	thread_unlock(td);
513	VCPU_PIN(vm, vcpuid, host_cpuid);
514
515	return (0);
516}
517
518static void
519restore_guest_fpustate(struct vcpu *vcpu)
520{
521
522	/* flush host state to the pcb */
523	fpuexit(curthread);
524	fpu_stop_emulating();
525	fpurestore(vcpu->guestfpu);
526}
527
528static void
529save_guest_fpustate(struct vcpu *vcpu)
530{
531
532	fpusave(vcpu->guestfpu);
533	fpu_start_emulating();
534}
535
536int
537vm_run(struct vm *vm, struct vm_run *vmrun)
538{
539	int error, vcpuid;
540	struct vcpu *vcpu;
541	struct pcb *pcb;
542	uint64_t tscval;
543
544	vcpuid = vmrun->cpuid;
545
546	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
547		return (EINVAL);
548
549	vcpu = &vm->vcpu[vcpuid];
550
551	critical_enter();
552
553	tscval = rdtsc();
554
555	pcb = PCPU_GET(curpcb);
556	set_pcb_flags(pcb, PCB_FULL_IRET);
557
558	vcpu->hostcpu = curcpu;
559
560	restore_guest_msrs(vm, vcpuid);
561	restore_guest_fpustate(vcpu);
562	error = VMRUN(vm->cookie, vcpuid, vmrun->rip);
563	save_guest_fpustate(vcpu);
564	restore_host_msrs(vm, vcpuid);
565
566	vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
567
568	/* copy the exit information */
569	bcopy(&vcpu->exitinfo, &vmrun->vm_exit, sizeof(struct vm_exit));
570
571	critical_exit();
572
573	return (error);
574}
575
576int
577vm_inject_event(struct vm *vm, int vcpuid, int type,
578		int vector, uint32_t code, int code_valid)
579{
580	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
581		return (EINVAL);
582
583	if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0)
584		return (EINVAL);
585
586	if (vector < 0 || vector > 255)
587		return (EINVAL);
588
589	return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
590}
591
592int
593vm_inject_nmi(struct vm *vm, int vcpu)
594{
595	int error;
596
597	if (vcpu < 0 || vcpu >= VM_MAXCPU)
598		return (EINVAL);
599
600	error = VMNMI(vm->cookie, vcpu);
601	vm_interrupt_hostcpu(vm, vcpu);
602	return (error);
603}
604
605int
606vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
607{
608	if (vcpu < 0 || vcpu >= VM_MAXCPU)
609		return (EINVAL);
610
611	if (type < 0 || type >= VM_CAP_MAX)
612		return (EINVAL);
613
614	return (VMGETCAP(vm->cookie, vcpu, type, retval));
615}
616
617int
618vm_set_capability(struct vm *vm, int vcpu, int type, int val)
619{
620	if (vcpu < 0 || vcpu >= VM_MAXCPU)
621		return (EINVAL);
622
623	if (type < 0 || type >= VM_CAP_MAX)
624		return (EINVAL);
625
626	return (VMSETCAP(vm->cookie, vcpu, type, val));
627}
628
629uint64_t *
630vm_guest_msrs(struct vm *vm, int cpu)
631{
632	return (vm->vcpu[cpu].guest_msrs);
633}
634
635struct vlapic *
636vm_lapic(struct vm *vm, int cpu)
637{
638	return (vm->vcpu[cpu].vlapic);
639}
640
641boolean_t
642vmm_is_pptdev(int bus, int slot, int func)
643{
644	int found, b, s, f, n;
645	char *val, *cp, *cp2;
646
647	/*
648	 * setenv pptdevs "1/2/3 4/5/6 7/8/9 10/11/12"
649	 */
650	found = 0;
651	cp = val = getenv("pptdevs");
652	while (cp != NULL && *cp != '\0') {
653		if ((cp2 = strchr(cp, ' ')) != NULL)
654			*cp2 = '\0';
655
656		n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
657		if (n == 3 && bus == b && slot == s && func == f) {
658			found = 1;
659			break;
660		}
661
662		if (cp2 != NULL)
663			*cp2++ = ' ';
664
665		cp = cp2;
666	}
667	freeenv(val);
668	return (found);
669}
670
671void *
672vm_iommu_domain(struct vm *vm)
673{
674
675	return (vm->iommu);
676}
677
678void
679vm_set_run_state(struct vm *vm, int vcpuid, int state)
680{
681	struct vcpu *vcpu;
682
683	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
684		panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
685
686	vcpu = &vm->vcpu[vcpuid];
687
688	if (state == VCPU_RUNNING) {
689		if (vcpu->flags & VCPU_F_RUNNING) {
690			panic("vm_set_run_state: %s[%d] is already running",
691			      vm_name(vm), vcpuid);
692		}
693		vcpu->flags |= VCPU_F_RUNNING;
694	} else {
695		if ((vcpu->flags & VCPU_F_RUNNING) == 0) {
696			panic("vm_set_run_state: %s[%d] is already stopped",
697			      vm_name(vm), vcpuid);
698		}
699		vcpu->flags &= ~VCPU_F_RUNNING;
700	}
701}
702
703int
704vm_get_run_state(struct vm *vm, int vcpuid, int *cpuptr)
705{
706	int retval, hostcpu;
707	struct vcpu *vcpu;
708
709	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
710		panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
711
712	vcpu = &vm->vcpu[vcpuid];
713	if (vcpu->flags & VCPU_F_RUNNING) {
714		retval = VCPU_RUNNING;
715		hostcpu = vcpu->hostcpu;
716	} else {
717		retval = VCPU_STOPPED;
718		hostcpu = -1;
719	}
720
721	if (cpuptr)
722		*cpuptr = hostcpu;
723
724	return (retval);
725}
726
727void
728vm_activate_cpu(struct vm *vm, int vcpuid)
729{
730
731	if (vcpuid >= 0 && vcpuid < VM_MAXCPU)
732		CPU_SET(vcpuid, &vm->active_cpus);
733}
734
735cpuset_t
736vm_active_cpus(struct vm *vm)
737{
738
739	return (vm->active_cpus);
740}
741
742void *
743vcpu_stats(struct vm *vm, int vcpuid)
744{
745
746	return (vm->vcpu[vcpuid].stats);
747}
748