vmm.c revision 240922
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/module.h>
36#include <sys/sysctl.h>
37#include <sys/malloc.h>
38#include <sys/pcpu.h>
39#include <sys/lock.h>
40#include <sys/mutex.h>
41#include <sys/proc.h>
42#include <sys/sched.h>
43#include <sys/smp.h>
44#include <sys/systm.h>
45
46#include <vm/vm.h>
47
48#include <machine/vm.h>
49#include <machine/pcb.h>
50#include <x86/apicreg.h>
51
52#include <machine/vmm.h>
53#include "vmm_mem.h"
54#include "vmm_util.h"
55#include <machine/vmm_dev.h>
56#include "vlapic.h"
57#include "vmm_msr.h"
58#include "vmm_ipi.h"
59#include "vmm_stat.h"
60
61#include "io/ppt.h"
62#include "io/iommu.h"
63
64struct vlapic;
65
66struct vcpu {
67	int		flags;
68	int		pincpu;		/* host cpuid this vcpu is bound to */
69	int		hostcpu;	/* host cpuid this vcpu last ran on */
70	uint64_t	guest_msrs[VMM_MSR_NUM];
71	struct vlapic	*vlapic;
72	int		 vcpuid;
73	struct savefpu	*guestfpu;	/* guest fpu state */
74	void		*stats;
75	struct vm_exit	exitinfo;
76	enum x2apic_state x2apic_state;
77};
78#define	VCPU_F_PINNED	0x0001
79#define	VCPU_F_RUNNING	0x0002
80
81#define	VCPU_PINCPU(vm, vcpuid)	\
82    ((vm->vcpu[vcpuid].flags & VCPU_F_PINNED) ? vm->vcpu[vcpuid].pincpu : -1)
83
84#define	VCPU_UNPIN(vm, vcpuid)	(vm->vcpu[vcpuid].flags &= ~VCPU_F_PINNED)
85
86#define	VCPU_PIN(vm, vcpuid, host_cpuid)				\
87do {									\
88	vm->vcpu[vcpuid].flags |= VCPU_F_PINNED;			\
89	vm->vcpu[vcpuid].pincpu = host_cpuid;				\
90} while(0)
91
92#define	VM_MAX_MEMORY_SEGMENTS	2
93
94struct vm {
95	void		*cookie;	/* processor-specific data */
96	void		*iommu;		/* iommu-specific data */
97	struct vcpu	vcpu[VM_MAXCPU];
98	int		num_mem_segs;
99	struct vm_memory_segment mem_segs[VM_MAX_MEMORY_SEGMENTS];
100	char		name[VM_MAX_NAMELEN];
101
102	/*
103	 * Set of active vcpus.
104	 * An active vcpu is one that has been started implicitly (BSP) or
105	 * explicitly (AP) by sending it a startup ipi.
106	 */
107	cpuset_t	active_cpus;
108};
109
110static struct vmm_ops *ops;
111#define	VMM_INIT()	(ops != NULL ? (*ops->init)() : 0)
112#define	VMM_CLEANUP()	(ops != NULL ? (*ops->cleanup)() : 0)
113
114#define	VMINIT(vm)	(ops != NULL ? (*ops->vminit)(vm): NULL)
115#define	VMRUN(vmi, vcpu, rip) \
116	(ops != NULL ? (*ops->vmrun)(vmi, vcpu, rip) : ENXIO)
117#define	VMCLEANUP(vmi)	(ops != NULL ? (*ops->vmcleanup)(vmi) : NULL)
118#define	VMMMAP(vmi, gpa, hpa, len, attr, prot, spm)	\
119    (ops != NULL ? (*ops->vmmmap)(vmi, gpa, hpa, len, attr, prot, spm) : ENXIO)
120#define	VMGETREG(vmi, vcpu, num, retval)		\
121	(ops != NULL ? (*ops->vmgetreg)(vmi, vcpu, num, retval) : ENXIO)
122#define	VMSETREG(vmi, vcpu, num, val)		\
123	(ops != NULL ? (*ops->vmsetreg)(vmi, vcpu, num, val) : ENXIO)
124#define	VMGETDESC(vmi, vcpu, num, desc)		\
125	(ops != NULL ? (*ops->vmgetdesc)(vmi, vcpu, num, desc) : ENXIO)
126#define	VMSETDESC(vmi, vcpu, num, desc)		\
127	(ops != NULL ? (*ops->vmsetdesc)(vmi, vcpu, num, desc) : ENXIO)
128#define	VMINJECT(vmi, vcpu, type, vec, ec, ecv)	\
129	(ops != NULL ? (*ops->vminject)(vmi, vcpu, type, vec, ec, ecv) : ENXIO)
130#define	VMNMI(vmi, vcpu)	\
131	(ops != NULL ? (*ops->vmnmi)(vmi, vcpu) : ENXIO)
132#define	VMGETCAP(vmi, vcpu, num, retval)	\
133	(ops != NULL ? (*ops->vmgetcap)(vmi, vcpu, num, retval) : ENXIO)
134#define	VMSETCAP(vmi, vcpu, num, val)		\
135	(ops != NULL ? (*ops->vmsetcap)(vmi, vcpu, num, val) : ENXIO)
136
137#define	fpu_start_emulating()	start_emulating()
138#define	fpu_stop_emulating()	stop_emulating()
139
140static MALLOC_DEFINE(M_VM, "vm", "vm");
141CTASSERT(VMM_MSR_NUM <= 64);	/* msr_mask can keep track of up to 64 msrs */
142
143/* statistics */
144static VMM_STAT_DEFINE(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
145
146static void
147vcpu_cleanup(struct vcpu *vcpu)
148{
149	vlapic_cleanup(vcpu->vlapic);
150	vmm_stat_free(vcpu->stats);
151	fpu_save_area_free(vcpu->guestfpu);
152}
153
154static void
155vcpu_init(struct vm *vm, uint32_t vcpu_id)
156{
157	struct vcpu *vcpu;
158
159	vcpu = &vm->vcpu[vcpu_id];
160
161	vcpu->hostcpu = -1;
162	vcpu->vcpuid = vcpu_id;
163	vcpu->vlapic = vlapic_init(vm, vcpu_id);
164	vcpu->guestfpu = fpu_save_area_alloc();
165	fpu_save_area_reset(vcpu->guestfpu);
166	vcpu->stats = vmm_stat_alloc();
167	vcpu->x2apic_state = X2APIC_ENABLED;
168}
169
170struct vm_exit *
171vm_exitinfo(struct vm *vm, int cpuid)
172{
173	struct vcpu *vcpu;
174
175	if (cpuid < 0 || cpuid >= VM_MAXCPU)
176		panic("vm_exitinfo: invalid cpuid %d", cpuid);
177
178	vcpu = &vm->vcpu[cpuid];
179
180	return (&vcpu->exitinfo);
181}
182
183static int
184vmm_init(void)
185{
186	int error;
187
188	vmm_ipi_init();
189
190	error = vmm_mem_init();
191	if (error)
192		return (error);
193
194	if (vmm_is_intel())
195		ops = &vmm_ops_intel;
196	else if (vmm_is_amd())
197		ops = &vmm_ops_amd;
198	else
199		return (ENXIO);
200
201	vmm_msr_init();
202
203	return (VMM_INIT());
204}
205
206static int
207vmm_handler(module_t mod, int what, void *arg)
208{
209	int error;
210
211	switch (what) {
212	case MOD_LOAD:
213		vmmdev_init();
214		iommu_init();
215		error = vmm_init();
216		break;
217	case MOD_UNLOAD:
218		vmmdev_cleanup();
219		iommu_cleanup();
220		vmm_ipi_cleanup();
221		error = VMM_CLEANUP();
222		break;
223	default:
224		error = 0;
225		break;
226	}
227	return (error);
228}
229
230static moduledata_t vmm_kmod = {
231	"vmm",
232	vmm_handler,
233	NULL
234};
235
236/*
237 * Execute the module load handler after the pci passthru driver has had
238 * a chance to claim devices. We need this information at the time we do
239 * iommu initialization.
240 */
241DECLARE_MODULE(vmm, vmm_kmod, SI_SUB_CONFIGURE + 1, SI_ORDER_ANY);
242MODULE_VERSION(vmm, 1);
243
244SYSCTL_NODE(_hw, OID_AUTO, vmm, CTLFLAG_RW, NULL, NULL);
245
246struct vm *
247vm_create(const char *name)
248{
249	int i;
250	struct vm *vm;
251	vm_paddr_t maxaddr;
252
253	const int BSP = 0;
254
255	if (name == NULL || strlen(name) >= VM_MAX_NAMELEN)
256		return (NULL);
257
258	vm = malloc(sizeof(struct vm), M_VM, M_WAITOK | M_ZERO);
259	strcpy(vm->name, name);
260	vm->cookie = VMINIT(vm);
261
262	for (i = 0; i < VM_MAXCPU; i++) {
263		vcpu_init(vm, i);
264		guest_msrs_init(vm, i);
265	}
266
267	maxaddr = vmm_mem_maxaddr();
268	vm->iommu = iommu_create_domain(maxaddr);
269	vm_activate_cpu(vm, BSP);
270
271	return (vm);
272}
273
274void
275vm_destroy(struct vm *vm)
276{
277	int i;
278
279	ppt_unassign_all(vm);
280
281	for (i = 0; i < vm->num_mem_segs; i++)
282		vmm_mem_free(vm->mem_segs[i].hpa, vm->mem_segs[i].len);
283
284	for (i = 0; i < VM_MAXCPU; i++)
285		vcpu_cleanup(&vm->vcpu[i]);
286
287	iommu_destroy_domain(vm->iommu);
288
289	VMCLEANUP(vm->cookie);
290
291	free(vm, M_VM);
292}
293
294const char *
295vm_name(struct vm *vm)
296{
297	return (vm->name);
298}
299
300int
301vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
302{
303	const boolean_t spok = TRUE;	/* superpage mappings are ok */
304
305	return (VMMMAP(vm->cookie, gpa, hpa, len, VM_MEMATTR_UNCACHEABLE,
306		       VM_PROT_RW, spok));
307}
308
309int
310vm_unmap_mmio(struct vm *vm, vm_paddr_t gpa, size_t len)
311{
312	const boolean_t spok = TRUE;	/* superpage mappings are ok */
313
314	return (VMMMAP(vm->cookie, gpa, 0, len, VM_MEMATTR_UNCACHEABLE,
315		       VM_PROT_NONE, spok));
316}
317
318int
319vm_malloc(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t *ret_hpa)
320{
321	int error;
322	vm_paddr_t hpa;
323
324	const boolean_t spok = TRUE;	/* superpage mappings are ok */
325
326	/*
327	 * find the hpa if already it was already vm_malloc'd.
328	 */
329	hpa = vm_gpa2hpa(vm, gpa, len);
330	if (hpa != ((vm_paddr_t)-1))
331		goto out;
332
333	if (vm->num_mem_segs >= VM_MAX_MEMORY_SEGMENTS)
334		return (E2BIG);
335
336	hpa = vmm_mem_alloc(len);
337	if (hpa == 0)
338		return (ENOMEM);
339
340	error = VMMMAP(vm->cookie, gpa, hpa, len, VM_MEMATTR_WRITE_BACK,
341		       VM_PROT_ALL, spok);
342	if (error) {
343		vmm_mem_free(hpa, len);
344		return (error);
345	}
346
347	iommu_create_mapping(vm->iommu, gpa, hpa, len);
348
349	vm->mem_segs[vm->num_mem_segs].gpa = gpa;
350	vm->mem_segs[vm->num_mem_segs].hpa = hpa;
351	vm->mem_segs[vm->num_mem_segs].len = len;
352	vm->num_mem_segs++;
353out:
354	*ret_hpa = hpa;
355	return (0);
356}
357
358vm_paddr_t
359vm_gpa2hpa(struct vm *vm, vm_paddr_t gpa, size_t len)
360{
361	int i;
362	vm_paddr_t gpabase, gpalimit, hpabase;
363
364	for (i = 0; i < vm->num_mem_segs; i++) {
365		hpabase = vm->mem_segs[i].hpa;
366		gpabase = vm->mem_segs[i].gpa;
367		gpalimit = gpabase + vm->mem_segs[i].len;
368		if (gpa >= gpabase && gpa + len <= gpalimit)
369			return ((gpa - gpabase) + hpabase);
370	}
371	return ((vm_paddr_t)-1);
372}
373
374int
375vm_gpabase2memseg(struct vm *vm, vm_paddr_t gpabase,
376		  struct vm_memory_segment *seg)
377{
378	int i;
379
380	for (i = 0; i < vm->num_mem_segs; i++) {
381		if (gpabase == vm->mem_segs[i].gpa) {
382			*seg = vm->mem_segs[i];
383			return (0);
384		}
385	}
386	return (-1);
387}
388
389int
390vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
391{
392
393	if (vcpu < 0 || vcpu >= VM_MAXCPU)
394		return (EINVAL);
395
396	if (reg >= VM_REG_LAST)
397		return (EINVAL);
398
399	return (VMGETREG(vm->cookie, vcpu, reg, retval));
400}
401
402int
403vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val)
404{
405
406	if (vcpu < 0 || vcpu >= VM_MAXCPU)
407		return (EINVAL);
408
409	if (reg >= VM_REG_LAST)
410		return (EINVAL);
411
412	return (VMSETREG(vm->cookie, vcpu, reg, val));
413}
414
415static boolean_t
416is_descriptor_table(int reg)
417{
418
419	switch (reg) {
420	case VM_REG_GUEST_IDTR:
421	case VM_REG_GUEST_GDTR:
422		return (TRUE);
423	default:
424		return (FALSE);
425	}
426}
427
428static boolean_t
429is_segment_register(int reg)
430{
431
432	switch (reg) {
433	case VM_REG_GUEST_ES:
434	case VM_REG_GUEST_CS:
435	case VM_REG_GUEST_SS:
436	case VM_REG_GUEST_DS:
437	case VM_REG_GUEST_FS:
438	case VM_REG_GUEST_GS:
439	case VM_REG_GUEST_TR:
440	case VM_REG_GUEST_LDTR:
441		return (TRUE);
442	default:
443		return (FALSE);
444	}
445}
446
447int
448vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
449		struct seg_desc *desc)
450{
451
452	if (vcpu < 0 || vcpu >= VM_MAXCPU)
453		return (EINVAL);
454
455	if (!is_segment_register(reg) && !is_descriptor_table(reg))
456		return (EINVAL);
457
458	return (VMGETDESC(vm->cookie, vcpu, reg, desc));
459}
460
461int
462vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
463		struct seg_desc *desc)
464{
465	if (vcpu < 0 || vcpu >= VM_MAXCPU)
466		return (EINVAL);
467
468	if (!is_segment_register(reg) && !is_descriptor_table(reg))
469		return (EINVAL);
470
471	return (VMSETDESC(vm->cookie, vcpu, reg, desc));
472}
473
474int
475vm_get_pinning(struct vm *vm, int vcpuid, int *cpuid)
476{
477
478	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
479		return (EINVAL);
480
481	*cpuid = VCPU_PINCPU(vm, vcpuid);
482
483	return (0);
484}
485
486int
487vm_set_pinning(struct vm *vm, int vcpuid, int host_cpuid)
488{
489	struct thread *td;
490
491	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
492		return (EINVAL);
493
494	td = curthread;		/* XXXSMP only safe when muxing vcpus */
495
496	/* unpin */
497	if (host_cpuid < 0) {
498		VCPU_UNPIN(vm, vcpuid);
499		thread_lock(td);
500		sched_unbind(td);
501		thread_unlock(td);
502		return (0);
503	}
504
505	if (CPU_ABSENT(host_cpuid))
506		return (EINVAL);
507
508	/*
509	 * XXX we should check that 'host_cpuid' has not already been pinned
510	 * by another vm.
511	 */
512	thread_lock(td);
513	sched_bind(td, host_cpuid);
514	thread_unlock(td);
515	VCPU_PIN(vm, vcpuid, host_cpuid);
516
517	return (0);
518}
519
520static void
521restore_guest_fpustate(struct vcpu *vcpu)
522{
523
524	/* flush host state to the pcb */
525	fpuexit(curthread);
526	fpu_stop_emulating();
527	fpurestore(vcpu->guestfpu);
528}
529
530static void
531save_guest_fpustate(struct vcpu *vcpu)
532{
533
534	fpusave(vcpu->guestfpu);
535	fpu_start_emulating();
536}
537
538int
539vm_run(struct vm *vm, struct vm_run *vmrun)
540{
541	int error, vcpuid;
542	struct vcpu *vcpu;
543	struct pcb *pcb;
544	uint64_t tscval;
545
546	vcpuid = vmrun->cpuid;
547
548	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
549		return (EINVAL);
550
551	vcpu = &vm->vcpu[vcpuid];
552
553	critical_enter();
554
555	tscval = rdtsc();
556
557	pcb = PCPU_GET(curpcb);
558	set_pcb_flags(pcb, PCB_FULL_IRET);
559
560	vcpu->hostcpu = curcpu;
561
562	restore_guest_msrs(vm, vcpuid);
563	restore_guest_fpustate(vcpu);
564	error = VMRUN(vm->cookie, vcpuid, vmrun->rip);
565	save_guest_fpustate(vcpu);
566	restore_host_msrs(vm, vcpuid);
567
568	vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
569
570	/* copy the exit information */
571	bcopy(&vcpu->exitinfo, &vmrun->vm_exit, sizeof(struct vm_exit));
572
573	critical_exit();
574
575	return (error);
576}
577
578int
579vm_inject_event(struct vm *vm, int vcpuid, int type,
580		int vector, uint32_t code, int code_valid)
581{
582	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
583		return (EINVAL);
584
585	if ((type > VM_EVENT_NONE && type < VM_EVENT_MAX) == 0)
586		return (EINVAL);
587
588	if (vector < 0 || vector > 255)
589		return (EINVAL);
590
591	return (VMINJECT(vm->cookie, vcpuid, type, vector, code, code_valid));
592}
593
594int
595vm_inject_nmi(struct vm *vm, int vcpu)
596{
597	int error;
598
599	if (vcpu < 0 || vcpu >= VM_MAXCPU)
600		return (EINVAL);
601
602	error = VMNMI(vm->cookie, vcpu);
603	vm_interrupt_hostcpu(vm, vcpu);
604	return (error);
605}
606
607int
608vm_get_capability(struct vm *vm, int vcpu, int type, int *retval)
609{
610	if (vcpu < 0 || vcpu >= VM_MAXCPU)
611		return (EINVAL);
612
613	if (type < 0 || type >= VM_CAP_MAX)
614		return (EINVAL);
615
616	return (VMGETCAP(vm->cookie, vcpu, type, retval));
617}
618
619int
620vm_set_capability(struct vm *vm, int vcpu, int type, int val)
621{
622	if (vcpu < 0 || vcpu >= VM_MAXCPU)
623		return (EINVAL);
624
625	if (type < 0 || type >= VM_CAP_MAX)
626		return (EINVAL);
627
628	return (VMSETCAP(vm->cookie, vcpu, type, val));
629}
630
631uint64_t *
632vm_guest_msrs(struct vm *vm, int cpu)
633{
634	return (vm->vcpu[cpu].guest_msrs);
635}
636
637struct vlapic *
638vm_lapic(struct vm *vm, int cpu)
639{
640	return (vm->vcpu[cpu].vlapic);
641}
642
643boolean_t
644vmm_is_pptdev(int bus, int slot, int func)
645{
646	int found, b, s, f, n;
647	char *val, *cp, *cp2;
648
649	/*
650	 * setenv pptdevs "1/2/3 4/5/6 7/8/9 10/11/12"
651	 */
652	found = 0;
653	cp = val = getenv("pptdevs");
654	while (cp != NULL && *cp != '\0') {
655		if ((cp2 = strchr(cp, ' ')) != NULL)
656			*cp2 = '\0';
657
658		n = sscanf(cp, "%d/%d/%d", &b, &s, &f);
659		if (n == 3 && bus == b && slot == s && func == f) {
660			found = 1;
661			break;
662		}
663
664		if (cp2 != NULL)
665			*cp2++ = ' ';
666
667		cp = cp2;
668	}
669	freeenv(val);
670	return (found);
671}
672
673void *
674vm_iommu_domain(struct vm *vm)
675{
676
677	return (vm->iommu);
678}
679
680void
681vm_set_run_state(struct vm *vm, int vcpuid, int state)
682{
683	struct vcpu *vcpu;
684
685	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
686		panic("vm_set_run_state: invalid vcpuid %d", vcpuid);
687
688	vcpu = &vm->vcpu[vcpuid];
689
690	if (state == VCPU_RUNNING) {
691		if (vcpu->flags & VCPU_F_RUNNING) {
692			panic("vm_set_run_state: %s[%d] is already running",
693			      vm_name(vm), vcpuid);
694		}
695		vcpu->flags |= VCPU_F_RUNNING;
696	} else {
697		if ((vcpu->flags & VCPU_F_RUNNING) == 0) {
698			panic("vm_set_run_state: %s[%d] is already stopped",
699			      vm_name(vm), vcpuid);
700		}
701		vcpu->flags &= ~VCPU_F_RUNNING;
702	}
703}
704
705int
706vm_get_run_state(struct vm *vm, int vcpuid, int *cpuptr)
707{
708	int retval, hostcpu;
709	struct vcpu *vcpu;
710
711	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
712		panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
713
714	vcpu = &vm->vcpu[vcpuid];
715	if (vcpu->flags & VCPU_F_RUNNING) {
716		retval = VCPU_RUNNING;
717		hostcpu = vcpu->hostcpu;
718	} else {
719		retval = VCPU_STOPPED;
720		hostcpu = -1;
721	}
722
723	if (cpuptr)
724		*cpuptr = hostcpu;
725
726	return (retval);
727}
728
729void
730vm_activate_cpu(struct vm *vm, int vcpuid)
731{
732
733	if (vcpuid >= 0 && vcpuid < VM_MAXCPU)
734		CPU_SET(vcpuid, &vm->active_cpus);
735}
736
737cpuset_t
738vm_active_cpus(struct vm *vm)
739{
740
741	return (vm->active_cpus);
742}
743
744void *
745vcpu_stats(struct vm *vm, int vcpuid)
746{
747
748	return (vm->vcpu[vcpuid].stats);
749}
750
751int
752vm_get_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state *state)
753{
754	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
755		return (EINVAL);
756
757	*state = vm->vcpu[vcpuid].x2apic_state;
758
759	return (0);
760}
761
762int
763vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
764{
765	if (vcpuid < 0 || vcpuid >= VM_MAXCPU)
766		return (EINVAL);
767
768	if (state < 0 || state >= X2APIC_STATE_LAST)
769		return (EINVAL);
770
771	vm->vcpu[vcpuid].x2apic_state = state;
772
773	return (0);
774}
775