vmmapi.c revision 295881
1/*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/lib/libvmmapi/vmmapi.c 295881 2016-02-22 09:04:36Z skra $
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: head/lib/libvmmapi/vmmapi.c 295881 2016-02-22 09:04:36Z skra $");
31
32#include <sys/param.h>
33#include <sys/sysctl.h>
34#include <sys/ioctl.h>
35#include <sys/mman.h>
36#include <sys/_iovec.h>
37#include <sys/cpuset.h>
38
39#include <x86/segments.h>
40#include <machine/specialreg.h>
41
42#include <errno.h>
43#include <stdio.h>
44#include <stdlib.h>
45#include <assert.h>
46#include <string.h>
47#include <fcntl.h>
48#include <unistd.h>
49
50#include <libutil.h>
51
52#include <machine/vmm.h>
53#include <machine/vmm_dev.h>
54
55#include "vmmapi.h"
56
57#define	MB	(1024 * 1024UL)
58#define	GB	(1024 * 1024 * 1024UL)
59
60/*
61 * Size of the guard region before and after the virtual address space
62 * mapping the guest physical memory. This must be a multiple of the
63 * superpage size for performance reasons.
64 */
65#define	VM_MMAP_GUARD_SIZE	(4 * MB)
66
67#define	PROT_RW		(PROT_READ | PROT_WRITE)
68#define	PROT_ALL	(PROT_READ | PROT_WRITE | PROT_EXEC)
69
70struct vmctx {
71	int	fd;
72	uint32_t lowmem_limit;
73	int	memflags;
74	size_t	lowmem;
75	size_t	highmem;
76	char	*baseaddr;
77	char	*name;
78};
79
80#define	CREATE(x)  sysctlbyname("hw.vmm.create", NULL, NULL, (x), strlen((x)))
81#define	DESTROY(x) sysctlbyname("hw.vmm.destroy", NULL, NULL, (x), strlen((x)))
82
83static int
84vm_device_open(const char *name)
85{
86        int fd, len;
87        char *vmfile;
88
89	len = strlen("/dev/vmm/") + strlen(name) + 1;
90	vmfile = malloc(len);
91	assert(vmfile != NULL);
92	snprintf(vmfile, len, "/dev/vmm/%s", name);
93
94        /* Open the device file */
95        fd = open(vmfile, O_RDWR, 0);
96
97	free(vmfile);
98        return (fd);
99}
100
101int
102vm_create(const char *name)
103{
104
105	return (CREATE((char *)name));
106}
107
108struct vmctx *
109vm_open(const char *name)
110{
111	struct vmctx *vm;
112
113	vm = malloc(sizeof(struct vmctx) + strlen(name) + 1);
114	assert(vm != NULL);
115
116	vm->fd = -1;
117	vm->memflags = 0;
118	vm->lowmem_limit = 3 * GB;
119	vm->name = (char *)(vm + 1);
120	strcpy(vm->name, name);
121
122	if ((vm->fd = vm_device_open(vm->name)) < 0)
123		goto err;
124
125	return (vm);
126err:
127	vm_destroy(vm);
128	return (NULL);
129}
130
131void
132vm_destroy(struct vmctx *vm)
133{
134	assert(vm != NULL);
135
136	if (vm->fd >= 0)
137		close(vm->fd);
138	DESTROY(vm->name);
139
140	free(vm);
141}
142
143int
144vm_parse_memsize(const char *optarg, size_t *ret_memsize)
145{
146	char *endptr;
147	size_t optval;
148	int error;
149
150	optval = strtoul(optarg, &endptr, 0);
151	if (*optarg != '\0' && *endptr == '\0') {
152		/*
153		 * For the sake of backward compatibility if the memory size
154		 * specified on the command line is less than a megabyte then
155		 * it is interpreted as being in units of MB.
156		 */
157		if (optval < MB)
158			optval *= MB;
159		*ret_memsize = optval;
160		error = 0;
161	} else
162		error = expand_number(optarg, ret_memsize);
163
164	return (error);
165}
166
167uint32_t
168vm_get_lowmem_limit(struct vmctx *ctx)
169{
170
171	return (ctx->lowmem_limit);
172}
173
174void
175vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit)
176{
177
178	ctx->lowmem_limit = limit;
179}
180
181void
182vm_set_memflags(struct vmctx *ctx, int flags)
183{
184
185	ctx->memflags = flags;
186}
187
188int
189vm_get_memflags(struct vmctx *ctx)
190{
191
192	return (ctx->memflags);
193}
194
195/*
196 * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
197 */
198int
199vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off,
200    size_t len, int prot)
201{
202	struct vm_memmap memmap;
203	int error, flags;
204
205	memmap.gpa = gpa;
206	memmap.segid = segid;
207	memmap.segoff = off;
208	memmap.len = len;
209	memmap.prot = prot;
210	memmap.flags = 0;
211
212	if (ctx->memflags & VM_MEM_F_WIRED)
213		memmap.flags |= VM_MEMMAP_F_WIRED;
214
215	/*
216	 * If this mapping already exists then don't create it again. This
217	 * is the common case for SYSMEM mappings created by bhyveload(8).
218	 */
219	error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags);
220	if (error == 0 && gpa == memmap.gpa) {
221		if (segid != memmap.segid || off != memmap.segoff ||
222		    prot != memmap.prot || flags != memmap.flags) {
223			errno = EEXIST;
224			return (-1);
225		} else {
226			return (0);
227		}
228	}
229
230	error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap);
231	return (error);
232}
233
234int
235vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid,
236    vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
237{
238	struct vm_memmap memmap;
239	int error;
240
241	bzero(&memmap, sizeof(struct vm_memmap));
242	memmap.gpa = *gpa;
243	error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap);
244	if (error == 0) {
245		*gpa = memmap.gpa;
246		*segid = memmap.segid;
247		*segoff = memmap.segoff;
248		*len = memmap.len;
249		*prot = memmap.prot;
250		*flags = memmap.flags;
251	}
252	return (error);
253}
254
255/*
256 * Return 0 if the segments are identical and non-zero otherwise.
257 *
258 * This is slightly complicated by the fact that only device memory segments
259 * are named.
260 */
261static int
262cmpseg(size_t len, const char *str, size_t len2, const char *str2)
263{
264
265	if (len == len2) {
266		if ((!str && !str2) || (str && str2 && !strcmp(str, str2)))
267			return (0);
268	}
269	return (-1);
270}
271
272static int
273vm_alloc_memseg(struct vmctx *ctx, int segid, size_t len, const char *name)
274{
275	struct vm_memseg memseg;
276	size_t n;
277	int error;
278
279	/*
280	 * If the memory segment has already been created then just return.
281	 * This is the usual case for the SYSMEM segment created by userspace
282	 * loaders like bhyveload(8).
283	 */
284	error = vm_get_memseg(ctx, segid, &memseg.len, memseg.name,
285	    sizeof(memseg.name));
286	if (error)
287		return (error);
288
289	if (memseg.len != 0) {
290		if (cmpseg(len, name, memseg.len, VM_MEMSEG_NAME(&memseg))) {
291			errno = EINVAL;
292			return (-1);
293		} else {
294			return (0);
295		}
296	}
297
298	bzero(&memseg, sizeof(struct vm_memseg));
299	memseg.segid = segid;
300	memseg.len = len;
301	if (name != NULL) {
302		n = strlcpy(memseg.name, name, sizeof(memseg.name));
303		if (n >= sizeof(memseg.name)) {
304			errno = ENAMETOOLONG;
305			return (-1);
306		}
307	}
308
309	error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg);
310	return (error);
311}
312
313int
314vm_get_memseg(struct vmctx *ctx, int segid, size_t *lenp, char *namebuf,
315    size_t bufsize)
316{
317	struct vm_memseg memseg;
318	size_t n;
319	int error;
320
321	memseg.segid = segid;
322	error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg);
323	if (error == 0) {
324		*lenp = memseg.len;
325		n = strlcpy(namebuf, memseg.name, bufsize);
326		if (n >= bufsize) {
327			errno = ENAMETOOLONG;
328			error = -1;
329		}
330	}
331	return (error);
332}
333
334static int
335setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base)
336{
337	char *ptr;
338	int error, flags;
339
340	/* Map 'len' bytes starting at 'gpa' in the guest address space */
341	error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL);
342	if (error)
343		return (error);
344
345	flags = MAP_SHARED | MAP_FIXED;
346	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
347		flags |= MAP_NOCORE;
348
349	/* mmap into the process address space on the host */
350	ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa);
351	if (ptr == MAP_FAILED)
352		return (-1);
353
354	return (0);
355}
356
357int
358vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms)
359{
360	size_t objsize, len;
361	vm_paddr_t gpa;
362	char *baseaddr, *ptr;
363	int error, flags;
364
365	assert(vms == VM_MMAP_ALL);
366
367	/*
368	 * If 'memsize' cannot fit entirely in the 'lowmem' segment then
369	 * create another 'highmem' segment above 4GB for the remainder.
370	 */
371	if (memsize > ctx->lowmem_limit) {
372		ctx->lowmem = ctx->lowmem_limit;
373		ctx->highmem = memsize - ctx->lowmem_limit;
374		objsize = 4*GB + ctx->highmem;
375	} else {
376		ctx->lowmem = memsize;
377		ctx->highmem = 0;
378		objsize = ctx->lowmem;
379	}
380
381	error = vm_alloc_memseg(ctx, VM_SYSMEM, objsize, NULL);
382	if (error)
383		return (error);
384
385	/*
386	 * Stake out a contiguous region covering the guest physical memory
387	 * and the adjoining guard regions.
388	 */
389	len = VM_MMAP_GUARD_SIZE + objsize + VM_MMAP_GUARD_SIZE;
390	flags = MAP_PRIVATE | MAP_ANON | MAP_NOCORE | MAP_ALIGNED_SUPER;
391	ptr = mmap(NULL, len, PROT_NONE, flags, -1, 0);
392	if (ptr == MAP_FAILED)
393		return (-1);
394
395	baseaddr = ptr + VM_MMAP_GUARD_SIZE;
396	if (ctx->highmem > 0) {
397		gpa = 4*GB;
398		len = ctx->highmem;
399		error = setup_memory_segment(ctx, gpa, len, baseaddr);
400		if (error)
401			return (error);
402	}
403
404	if (ctx->lowmem > 0) {
405		gpa = 0;
406		len = ctx->lowmem;
407		error = setup_memory_segment(ctx, gpa, len, baseaddr);
408		if (error)
409			return (error);
410	}
411
412	ctx->baseaddr = baseaddr;
413
414	return (0);
415}
416
417/*
418 * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
419 * the lowmem or highmem regions.
420 *
421 * In particular return NULL if [gaddr, gaddr+len) falls in guest MMIO region.
422 * The instruction emulation code depends on this behavior.
423 */
424void *
425vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len)
426{
427
428	if (ctx->lowmem > 0) {
429		if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem)
430			return (ctx->baseaddr + gaddr);
431	}
432
433	if (ctx->highmem > 0) {
434		if (gaddr >= 4*GB && gaddr + len <= 4*GB + ctx->highmem)
435			return (ctx->baseaddr + gaddr);
436	}
437
438	return (NULL);
439}
440
441size_t
442vm_get_lowmem_size(struct vmctx *ctx)
443{
444
445	return (ctx->lowmem);
446}
447
448size_t
449vm_get_highmem_size(struct vmctx *ctx)
450{
451
452	return (ctx->highmem);
453}
454
455void *
456vm_create_devmem(struct vmctx *ctx, int segid, const char *name, size_t len)
457{
458	char pathname[MAXPATHLEN];
459	size_t len2;
460	char *base, *ptr;
461	int fd, error, flags;
462
463	fd = -1;
464	ptr = MAP_FAILED;
465	if (name == NULL || strlen(name) == 0) {
466		errno = EINVAL;
467		goto done;
468	}
469
470	error = vm_alloc_memseg(ctx, segid, len, name);
471	if (error)
472		goto done;
473
474	strlcpy(pathname, "/dev/vmm.io/", sizeof(pathname));
475	strlcat(pathname, ctx->name, sizeof(pathname));
476	strlcat(pathname, ".", sizeof(pathname));
477	strlcat(pathname, name, sizeof(pathname));
478
479	fd = open(pathname, O_RDWR);
480	if (fd < 0)
481		goto done;
482
483	/*
484	 * Stake out a contiguous region covering the device memory and the
485	 * adjoining guard regions.
486	 */
487	len2 = VM_MMAP_GUARD_SIZE + len + VM_MMAP_GUARD_SIZE;
488	flags = MAP_PRIVATE | MAP_ANON | MAP_NOCORE | MAP_ALIGNED_SUPER;
489	base = mmap(NULL, len2, PROT_NONE, flags, -1, 0);
490	if (base == MAP_FAILED)
491		goto done;
492
493	flags = MAP_SHARED | MAP_FIXED;
494	if ((ctx->memflags & VM_MEM_F_INCORE) == 0)
495		flags |= MAP_NOCORE;
496
497	/* mmap the devmem region in the host address space */
498	ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0);
499done:
500	if (fd >= 0)
501		close(fd);
502	return (ptr);
503}
504
505int
506vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
507	    uint64_t base, uint32_t limit, uint32_t access)
508{
509	int error;
510	struct vm_seg_desc vmsegdesc;
511
512	bzero(&vmsegdesc, sizeof(vmsegdesc));
513	vmsegdesc.cpuid = vcpu;
514	vmsegdesc.regnum = reg;
515	vmsegdesc.desc.base = base;
516	vmsegdesc.desc.limit = limit;
517	vmsegdesc.desc.access = access;
518
519	error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
520	return (error);
521}
522
523int
524vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
525	    uint64_t *base, uint32_t *limit, uint32_t *access)
526{
527	int error;
528	struct vm_seg_desc vmsegdesc;
529
530	bzero(&vmsegdesc, sizeof(vmsegdesc));
531	vmsegdesc.cpuid = vcpu;
532	vmsegdesc.regnum = reg;
533
534	error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
535	if (error == 0) {
536		*base = vmsegdesc.desc.base;
537		*limit = vmsegdesc.desc.limit;
538		*access = vmsegdesc.desc.access;
539	}
540	return (error);
541}
542
543int
544vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *seg_desc)
545{
546	int error;
547
548	error = vm_get_desc(ctx, vcpu, reg, &seg_desc->base, &seg_desc->limit,
549	    &seg_desc->access);
550	return (error);
551}
552
553int
554vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
555{
556	int error;
557	struct vm_register vmreg;
558
559	bzero(&vmreg, sizeof(vmreg));
560	vmreg.cpuid = vcpu;
561	vmreg.regnum = reg;
562	vmreg.regval = val;
563
564	error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
565	return (error);
566}
567
568int
569vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
570{
571	int error;
572	struct vm_register vmreg;
573
574	bzero(&vmreg, sizeof(vmreg));
575	vmreg.cpuid = vcpu;
576	vmreg.regnum = reg;
577
578	error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
579	*ret_val = vmreg.regval;
580	return (error);
581}
582
583int
584vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
585{
586	int error;
587	struct vm_run vmrun;
588
589	bzero(&vmrun, sizeof(vmrun));
590	vmrun.cpuid = vcpu;
591
592	error = ioctl(ctx->fd, VM_RUN, &vmrun);
593	bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
594	return (error);
595}
596
597int
598vm_suspend(struct vmctx *ctx, enum vm_suspend_how how)
599{
600	struct vm_suspend vmsuspend;
601
602	bzero(&vmsuspend, sizeof(vmsuspend));
603	vmsuspend.how = how;
604	return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend));
605}
606
607int
608vm_reinit(struct vmctx *ctx)
609{
610
611	return (ioctl(ctx->fd, VM_REINIT, 0));
612}
613
614int
615vm_inject_exception(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
616    uint32_t errcode, int restart_instruction)
617{
618	struct vm_exception exc;
619
620	exc.cpuid = vcpu;
621	exc.vector = vector;
622	exc.error_code = errcode;
623	exc.error_code_valid = errcode_valid;
624	exc.restart_instruction = restart_instruction;
625
626	return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
627}
628
629int
630vm_apicid2vcpu(struct vmctx *ctx, int apicid)
631{
632	/*
633	 * The apic id associated with the 'vcpu' has the same numerical value
634	 * as the 'vcpu' itself.
635	 */
636	return (apicid);
637}
638
639int
640vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
641{
642	struct vm_lapic_irq vmirq;
643
644	bzero(&vmirq, sizeof(vmirq));
645	vmirq.cpuid = vcpu;
646	vmirq.vector = vector;
647
648	return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
649}
650
651int
652vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
653{
654	struct vm_lapic_irq vmirq;
655
656	bzero(&vmirq, sizeof(vmirq));
657	vmirq.cpuid = vcpu;
658	vmirq.vector = vector;
659
660	return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
661}
662
663int
664vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg)
665{
666	struct vm_lapic_msi vmmsi;
667
668	bzero(&vmmsi, sizeof(vmmsi));
669	vmmsi.addr = addr;
670	vmmsi.msg = msg;
671
672	return (ioctl(ctx->fd, VM_LAPIC_MSI, &vmmsi));
673}
674
675int
676vm_ioapic_assert_irq(struct vmctx *ctx, int irq)
677{
678	struct vm_ioapic_irq ioapic_irq;
679
680	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
681	ioapic_irq.irq = irq;
682
683	return (ioctl(ctx->fd, VM_IOAPIC_ASSERT_IRQ, &ioapic_irq));
684}
685
686int
687vm_ioapic_deassert_irq(struct vmctx *ctx, int irq)
688{
689	struct vm_ioapic_irq ioapic_irq;
690
691	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
692	ioapic_irq.irq = irq;
693
694	return (ioctl(ctx->fd, VM_IOAPIC_DEASSERT_IRQ, &ioapic_irq));
695}
696
697int
698vm_ioapic_pulse_irq(struct vmctx *ctx, int irq)
699{
700	struct vm_ioapic_irq ioapic_irq;
701
702	bzero(&ioapic_irq, sizeof(struct vm_ioapic_irq));
703	ioapic_irq.irq = irq;
704
705	return (ioctl(ctx->fd, VM_IOAPIC_PULSE_IRQ, &ioapic_irq));
706}
707
708int
709vm_ioapic_pincount(struct vmctx *ctx, int *pincount)
710{
711
712	return (ioctl(ctx->fd, VM_IOAPIC_PINCOUNT, pincount));
713}
714
715int
716vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
717{
718	struct vm_isa_irq isa_irq;
719
720	bzero(&isa_irq, sizeof(struct vm_isa_irq));
721	isa_irq.atpic_irq = atpic_irq;
722	isa_irq.ioapic_irq = ioapic_irq;
723
724	return (ioctl(ctx->fd, VM_ISA_ASSERT_IRQ, &isa_irq));
725}
726
727int
728vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
729{
730	struct vm_isa_irq isa_irq;
731
732	bzero(&isa_irq, sizeof(struct vm_isa_irq));
733	isa_irq.atpic_irq = atpic_irq;
734	isa_irq.ioapic_irq = ioapic_irq;
735
736	return (ioctl(ctx->fd, VM_ISA_DEASSERT_IRQ, &isa_irq));
737}
738
739int
740vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq)
741{
742	struct vm_isa_irq isa_irq;
743
744	bzero(&isa_irq, sizeof(struct vm_isa_irq));
745	isa_irq.atpic_irq = atpic_irq;
746	isa_irq.ioapic_irq = ioapic_irq;
747
748	return (ioctl(ctx->fd, VM_ISA_PULSE_IRQ, &isa_irq));
749}
750
751int
752vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
753    enum vm_intr_trigger trigger)
754{
755	struct vm_isa_irq_trigger isa_irq_trigger;
756
757	bzero(&isa_irq_trigger, sizeof(struct vm_isa_irq_trigger));
758	isa_irq_trigger.atpic_irq = atpic_irq;
759	isa_irq_trigger.trigger = trigger;
760
761	return (ioctl(ctx->fd, VM_ISA_SET_IRQ_TRIGGER, &isa_irq_trigger));
762}
763
764int
765vm_inject_nmi(struct vmctx *ctx, int vcpu)
766{
767	struct vm_nmi vmnmi;
768
769	bzero(&vmnmi, sizeof(vmnmi));
770	vmnmi.cpuid = vcpu;
771
772	return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
773}
774
775static struct {
776	const char	*name;
777	int		type;
778} capstrmap[] = {
779	{ "hlt_exit",		VM_CAP_HALT_EXIT },
780	{ "mtrap_exit",		VM_CAP_MTRAP_EXIT },
781	{ "pause_exit",		VM_CAP_PAUSE_EXIT },
782	{ "unrestricted_guest",	VM_CAP_UNRESTRICTED_GUEST },
783	{ "enable_invpcid",	VM_CAP_ENABLE_INVPCID },
784	{ 0 }
785};
786
787int
788vm_capability_name2type(const char *capname)
789{
790	int i;
791
792	for (i = 0; capstrmap[i].name != NULL && capname != NULL; i++) {
793		if (strcmp(capstrmap[i].name, capname) == 0)
794			return (capstrmap[i].type);
795	}
796
797	return (-1);
798}
799
800const char *
801vm_capability_type2name(int type)
802{
803	int i;
804
805	for (i = 0; capstrmap[i].name != NULL; i++) {
806		if (capstrmap[i].type == type)
807			return (capstrmap[i].name);
808	}
809
810	return (NULL);
811}
812
813int
814vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
815		  int *retval)
816{
817	int error;
818	struct vm_capability vmcap;
819
820	bzero(&vmcap, sizeof(vmcap));
821	vmcap.cpuid = vcpu;
822	vmcap.captype = cap;
823
824	error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
825	*retval = vmcap.capval;
826	return (error);
827}
828
829int
830vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
831{
832	struct vm_capability vmcap;
833
834	bzero(&vmcap, sizeof(vmcap));
835	vmcap.cpuid = vcpu;
836	vmcap.captype = cap;
837	vmcap.capval = val;
838
839	return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
840}
841
842int
843vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
844{
845	struct vm_pptdev pptdev;
846
847	bzero(&pptdev, sizeof(pptdev));
848	pptdev.bus = bus;
849	pptdev.slot = slot;
850	pptdev.func = func;
851
852	return (ioctl(ctx->fd, VM_BIND_PPTDEV, &pptdev));
853}
854
855int
856vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func)
857{
858	struct vm_pptdev pptdev;
859
860	bzero(&pptdev, sizeof(pptdev));
861	pptdev.bus = bus;
862	pptdev.slot = slot;
863	pptdev.func = func;
864
865	return (ioctl(ctx->fd, VM_UNBIND_PPTDEV, &pptdev));
866}
867
868int
869vm_map_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
870		   vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
871{
872	struct vm_pptdev_mmio pptmmio;
873
874	bzero(&pptmmio, sizeof(pptmmio));
875	pptmmio.bus = bus;
876	pptmmio.slot = slot;
877	pptmmio.func = func;
878	pptmmio.gpa = gpa;
879	pptmmio.len = len;
880	pptmmio.hpa = hpa;
881
882	return (ioctl(ctx->fd, VM_MAP_PPTDEV_MMIO, &pptmmio));
883}
884
885int
886vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
887    uint64_t addr, uint64_t msg, int numvec)
888{
889	struct vm_pptdev_msi pptmsi;
890
891	bzero(&pptmsi, sizeof(pptmsi));
892	pptmsi.vcpu = vcpu;
893	pptmsi.bus = bus;
894	pptmsi.slot = slot;
895	pptmsi.func = func;
896	pptmsi.msg = msg;
897	pptmsi.addr = addr;
898	pptmsi.numvec = numvec;
899
900	return (ioctl(ctx->fd, VM_PPTDEV_MSI, &pptmsi));
901}
902
903int
904vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
905    int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
906{
907	struct vm_pptdev_msix pptmsix;
908
909	bzero(&pptmsix, sizeof(pptmsix));
910	pptmsix.vcpu = vcpu;
911	pptmsix.bus = bus;
912	pptmsix.slot = slot;
913	pptmsix.func = func;
914	pptmsix.idx = idx;
915	pptmsix.msg = msg;
916	pptmsix.addr = addr;
917	pptmsix.vector_control = vector_control;
918
919	return ioctl(ctx->fd, VM_PPTDEV_MSIX, &pptmsix);
920}
921
922uint64_t *
923vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
924	     int *ret_entries)
925{
926	int error;
927
928	static struct vm_stats vmstats;
929
930	vmstats.cpuid = vcpu;
931
932	error = ioctl(ctx->fd, VM_STATS, &vmstats);
933	if (error == 0) {
934		if (ret_entries)
935			*ret_entries = vmstats.num_entries;
936		if (ret_tv)
937			*ret_tv = vmstats.tv;
938		return (vmstats.statbuf);
939	} else
940		return (NULL);
941}
942
943const char *
944vm_get_stat_desc(struct vmctx *ctx, int index)
945{
946	static struct vm_stat_desc statdesc;
947
948	statdesc.index = index;
949	if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0)
950		return (statdesc.desc);
951	else
952		return (NULL);
953}
954
955int
956vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
957{
958	int error;
959	struct vm_x2apic x2apic;
960
961	bzero(&x2apic, sizeof(x2apic));
962	x2apic.cpuid = vcpu;
963
964	error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
965	*state = x2apic.state;
966	return (error);
967}
968
969int
970vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
971{
972	int error;
973	struct vm_x2apic x2apic;
974
975	bzero(&x2apic, sizeof(x2apic));
976	x2apic.cpuid = vcpu;
977	x2apic.state = state;
978
979	error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
980
981	return (error);
982}
983
984/*
985 * From Intel Vol 3a:
986 * Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
987 */
988int
989vcpu_reset(struct vmctx *vmctx, int vcpu)
990{
991	int error;
992	uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
993	uint32_t desc_access, desc_limit;
994	uint16_t sel;
995
996	zero = 0;
997
998	rflags = 0x2;
999	error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
1000	if (error)
1001		goto done;
1002
1003	rip = 0xfff0;
1004	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
1005		goto done;
1006
1007	cr0 = CR0_NE;
1008	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
1009		goto done;
1010
1011	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
1012		goto done;
1013
1014	cr4 = 0;
1015	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
1016		goto done;
1017
1018	/*
1019	 * CS: present, r/w, accessed, 16-bit, byte granularity, usable
1020	 */
1021	desc_base = 0xffff0000;
1022	desc_limit = 0xffff;
1023	desc_access = 0x0093;
1024	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
1025			    desc_base, desc_limit, desc_access);
1026	if (error)
1027		goto done;
1028
1029	sel = 0xf000;
1030	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
1031		goto done;
1032
1033	/*
1034	 * SS,DS,ES,FS,GS: present, r/w, accessed, 16-bit, byte granularity
1035	 */
1036	desc_base = 0;
1037	desc_limit = 0xffff;
1038	desc_access = 0x0093;
1039	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
1040			    desc_base, desc_limit, desc_access);
1041	if (error)
1042		goto done;
1043
1044	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
1045			    desc_base, desc_limit, desc_access);
1046	if (error)
1047		goto done;
1048
1049	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
1050			    desc_base, desc_limit, desc_access);
1051	if (error)
1052		goto done;
1053
1054	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
1055			    desc_base, desc_limit, desc_access);
1056	if (error)
1057		goto done;
1058
1059	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
1060			    desc_base, desc_limit, desc_access);
1061	if (error)
1062		goto done;
1063
1064	sel = 0;
1065	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
1066		goto done;
1067	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
1068		goto done;
1069	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
1070		goto done;
1071	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
1072		goto done;
1073	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
1074		goto done;
1075
1076	/* General purpose registers */
1077	rdx = 0xf00;
1078	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
1079		goto done;
1080	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
1081		goto done;
1082	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
1083		goto done;
1084	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
1085		goto done;
1086	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
1087		goto done;
1088	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
1089		goto done;
1090	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
1091		goto done;
1092	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
1093		goto done;
1094
1095	/* GDTR, IDTR */
1096	desc_base = 0;
1097	desc_limit = 0xffff;
1098	desc_access = 0;
1099	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
1100			    desc_base, desc_limit, desc_access);
1101	if (error != 0)
1102		goto done;
1103
1104	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
1105			    desc_base, desc_limit, desc_access);
1106	if (error != 0)
1107		goto done;
1108
1109	/* TR */
1110	desc_base = 0;
1111	desc_limit = 0xffff;
1112	desc_access = 0x0000008b;
1113	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
1114	if (error)
1115		goto done;
1116
1117	sel = 0;
1118	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
1119		goto done;
1120
1121	/* LDTR */
1122	desc_base = 0;
1123	desc_limit = 0xffff;
1124	desc_access = 0x00000082;
1125	error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
1126			    desc_limit, desc_access);
1127	if (error)
1128		goto done;
1129
1130	sel = 0;
1131	if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
1132		goto done;
1133
1134	/* XXX cr2, debug registers */
1135
1136	error = 0;
1137done:
1138	return (error);
1139}
1140
1141int
1142vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num)
1143{
1144	int error, i;
1145	struct vm_gpa_pte gpapte;
1146
1147	bzero(&gpapte, sizeof(gpapte));
1148	gpapte.gpa = gpa;
1149
1150	error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte);
1151
1152	if (error == 0) {
1153		*num = gpapte.ptenum;
1154		for (i = 0; i < gpapte.ptenum; i++)
1155			pte[i] = gpapte.pte[i];
1156	}
1157
1158	return (error);
1159}
1160
1161int
1162vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities)
1163{
1164	int error;
1165	struct vm_hpet_cap cap;
1166
1167	bzero(&cap, sizeof(struct vm_hpet_cap));
1168	error = ioctl(ctx->fd, VM_GET_HPET_CAPABILITIES, &cap);
1169	if (capabilities != NULL)
1170		*capabilities = cap.capabilities;
1171	return (error);
1172}
1173
1174int
1175vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1176    uint64_t gla, int prot, uint64_t *gpa, int *fault)
1177{
1178	struct vm_gla2gpa gg;
1179	int error;
1180
1181	bzero(&gg, sizeof(struct vm_gla2gpa));
1182	gg.vcpuid = vcpu;
1183	gg.prot = prot;
1184	gg.gla = gla;
1185	gg.paging = *paging;
1186
1187	error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
1188	if (error == 0) {
1189		*fault = gg.fault;
1190		*gpa = gg.gpa;
1191	}
1192	return (error);
1193}
1194
1195#ifndef min
1196#define	min(a,b)	(((a) < (b)) ? (a) : (b))
1197#endif
1198
1199int
1200vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
1201    uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
1202    int *fault)
1203{
1204	void *va;
1205	uint64_t gpa;
1206	int error, i, n, off;
1207
1208	for (i = 0; i < iovcnt; i++) {
1209		iov[i].iov_base = 0;
1210		iov[i].iov_len = 0;
1211	}
1212
1213	while (len) {
1214		assert(iovcnt > 0);
1215		error = vm_gla2gpa(ctx, vcpu, paging, gla, prot, &gpa, fault);
1216		if (error || *fault)
1217			return (error);
1218
1219		off = gpa & PAGE_MASK;
1220		n = min(len, PAGE_SIZE - off);
1221
1222		va = vm_map_gpa(ctx, gpa, n);
1223		if (va == NULL)
1224			return (EFAULT);
1225
1226		iov->iov_base = va;
1227		iov->iov_len = n;
1228		iov++;
1229		iovcnt--;
1230
1231		gla += n;
1232		len -= n;
1233	}
1234	return (0);
1235}
1236
1237void
1238vm_copy_teardown(struct vmctx *ctx, int vcpu, struct iovec *iov, int iovcnt)
1239{
1240
1241	return;
1242}
1243
1244void
1245vm_copyin(struct vmctx *ctx, int vcpu, struct iovec *iov, void *vp, size_t len)
1246{
1247	const char *src;
1248	char *dst;
1249	size_t n;
1250
1251	dst = vp;
1252	while (len) {
1253		assert(iov->iov_len);
1254		n = min(len, iov->iov_len);
1255		src = iov->iov_base;
1256		bcopy(src, dst, n);
1257
1258		iov++;
1259		dst += n;
1260		len -= n;
1261	}
1262}
1263
1264void
1265vm_copyout(struct vmctx *ctx, int vcpu, const void *vp, struct iovec *iov,
1266    size_t len)
1267{
1268	const char *src;
1269	char *dst;
1270	size_t n;
1271
1272	src = vp;
1273	while (len) {
1274		assert(iov->iov_len);
1275		n = min(len, iov->iov_len);
1276		dst = iov->iov_base;
1277		bcopy(src, dst, n);
1278
1279		iov++;
1280		src += n;
1281		len -= n;
1282	}
1283}
1284
1285static int
1286vm_get_cpus(struct vmctx *ctx, int which, cpuset_t *cpus)
1287{
1288	struct vm_cpuset vm_cpuset;
1289	int error;
1290
1291	bzero(&vm_cpuset, sizeof(struct vm_cpuset));
1292	vm_cpuset.which = which;
1293	vm_cpuset.cpusetsize = sizeof(cpuset_t);
1294	vm_cpuset.cpus = cpus;
1295
1296	error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset);
1297	return (error);
1298}
1299
1300int
1301vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus)
1302{
1303
1304	return (vm_get_cpus(ctx, VM_ACTIVE_CPUS, cpus));
1305}
1306
1307int
1308vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus)
1309{
1310
1311	return (vm_get_cpus(ctx, VM_SUSPENDED_CPUS, cpus));
1312}
1313
1314int
1315vm_activate_cpu(struct vmctx *ctx, int vcpu)
1316{
1317	struct vm_activate_cpu ac;
1318	int error;
1319
1320	bzero(&ac, sizeof(struct vm_activate_cpu));
1321	ac.vcpuid = vcpu;
1322	error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac);
1323	return (error);
1324}
1325
1326int
1327vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2)
1328{
1329	struct vm_intinfo vmii;
1330	int error;
1331
1332	bzero(&vmii, sizeof(struct vm_intinfo));
1333	vmii.vcpuid = vcpu;
1334	error = ioctl(ctx->fd, VM_GET_INTINFO, &vmii);
1335	if (error == 0) {
1336		*info1 = vmii.info1;
1337		*info2 = vmii.info2;
1338	}
1339	return (error);
1340}
1341
1342int
1343vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1)
1344{
1345	struct vm_intinfo vmii;
1346	int error;
1347
1348	bzero(&vmii, sizeof(struct vm_intinfo));
1349	vmii.vcpuid = vcpu;
1350	vmii.info1 = info1;
1351	error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii);
1352	return (error);
1353}
1354
1355int
1356vm_rtc_write(struct vmctx *ctx, int offset, uint8_t value)
1357{
1358	struct vm_rtc_data rtcdata;
1359	int error;
1360
1361	bzero(&rtcdata, sizeof(struct vm_rtc_data));
1362	rtcdata.offset = offset;
1363	rtcdata.value = value;
1364	error = ioctl(ctx->fd, VM_RTC_WRITE, &rtcdata);
1365	return (error);
1366}
1367
1368int
1369vm_rtc_read(struct vmctx *ctx, int offset, uint8_t *retval)
1370{
1371	struct vm_rtc_data rtcdata;
1372	int error;
1373
1374	bzero(&rtcdata, sizeof(struct vm_rtc_data));
1375	rtcdata.offset = offset;
1376	error = ioctl(ctx->fd, VM_RTC_READ, &rtcdata);
1377	if (error == 0)
1378		*retval = rtcdata.value;
1379	return (error);
1380}
1381
1382int
1383vm_rtc_settime(struct vmctx *ctx, time_t secs)
1384{
1385	struct vm_rtc_time rtctime;
1386	int error;
1387
1388	bzero(&rtctime, sizeof(struct vm_rtc_time));
1389	rtctime.secs = secs;
1390	error = ioctl(ctx->fd, VM_RTC_SETTIME, &rtctime);
1391	return (error);
1392}
1393
1394int
1395vm_rtc_gettime(struct vmctx *ctx, time_t *secs)
1396{
1397	struct vm_rtc_time rtctime;
1398	int error;
1399
1400	bzero(&rtctime, sizeof(struct vm_rtc_time));
1401	error = ioctl(ctx->fd, VM_RTC_GETTIME, &rtctime);
1402	if (error == 0)
1403		*secs = rtctime.secs;
1404	return (error);
1405}
1406
1407int
1408vm_restart_instruction(void *arg, int vcpu)
1409{
1410	struct vmctx *ctx = arg;
1411
1412	return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu));
1413}
1414