vmm_instruction_emul.c revision 252641
1/*-
2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/amd64/vmm/vmm_instruction_emul.c 252641 2013-07-03 23:05:17Z neel $
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/amd64/vmm/vmm_instruction_emul.c 252641 2013-07-03 23:05:17Z neel $");
32
33#ifdef _KERNEL
34#include <sys/param.h>
35#include <sys/pcpu.h>
36#include <sys/systm.h>
37
38#include <vm/vm.h>
39#include <vm/pmap.h>
40
41#include <machine/pmap.h>
42#include <machine/vmparam.h>
43#include <machine/vmm.h>
44#else	/* !_KERNEL */
45#include <sys/types.h>
46#include <sys/errno.h>
47
48#include <machine/vmm.h>
49
50#include <vmmapi.h>
51#endif	/* _KERNEL */
52
53enum cpu_mode {
54	CPU_MODE_COMPATIBILITY,		/* IA-32E mode (CS.L = 0) */
55	CPU_MODE_64BIT,			/* IA-32E mode (CS.L = 1) */
56};
57
58/* struct vie_op.op_type */
59enum {
60	VIE_OP_TYPE_NONE = 0,
61	VIE_OP_TYPE_MOV,
62	VIE_OP_TYPE_AND,
63	VIE_OP_TYPE_LAST
64};
65
66/* struct vie_op.op_flags */
67#define	VIE_OP_F_IMM		(1 << 0)	/* immediate operand present */
68#define	VIE_OP_F_IMM8		(1 << 1)	/* 8-bit immediate operand */
69
70static const struct vie_op one_byte_opcodes[256] = {
71	[0x88] = {
72		.op_byte = 0x88,
73		.op_type = VIE_OP_TYPE_MOV,
74	},
75	[0x89] = {
76		.op_byte = 0x89,
77		.op_type = VIE_OP_TYPE_MOV,
78	},
79	[0x8B] = {
80		.op_byte = 0x8B,
81		.op_type = VIE_OP_TYPE_MOV,
82	},
83	[0xC7] = {
84		.op_byte = 0xC7,
85		.op_type = VIE_OP_TYPE_MOV,
86		.op_flags = VIE_OP_F_IMM,
87	},
88	[0x23] = {
89		.op_byte = 0x23,
90		.op_type = VIE_OP_TYPE_AND,
91	},
92	[0x81] = {
93		/* XXX Group 1 extended opcode - not just AND */
94		.op_byte = 0x81,
95		.op_type = VIE_OP_TYPE_AND,
96		.op_flags = VIE_OP_F_IMM,
97	}
98};
99
100/* struct vie.mod */
101#define	VIE_MOD_INDIRECT		0
102#define	VIE_MOD_INDIRECT_DISP8		1
103#define	VIE_MOD_INDIRECT_DISP32		2
104#define	VIE_MOD_DIRECT			3
105
106/* struct vie.rm */
107#define	VIE_RM_SIB			4
108#define	VIE_RM_DISP32			5
109
110#define	GB				(1024 * 1024 * 1024)
111
112static enum vm_reg_name gpr_map[16] = {
113	VM_REG_GUEST_RAX,
114	VM_REG_GUEST_RCX,
115	VM_REG_GUEST_RDX,
116	VM_REG_GUEST_RBX,
117	VM_REG_GUEST_RSP,
118	VM_REG_GUEST_RBP,
119	VM_REG_GUEST_RSI,
120	VM_REG_GUEST_RDI,
121	VM_REG_GUEST_R8,
122	VM_REG_GUEST_R9,
123	VM_REG_GUEST_R10,
124	VM_REG_GUEST_R11,
125	VM_REG_GUEST_R12,
126	VM_REG_GUEST_R13,
127	VM_REG_GUEST_R14,
128	VM_REG_GUEST_R15
129};
130
131static uint64_t size2mask[] = {
132	[1] = 0xff,
133	[2] = 0xffff,
134	[4] = 0xffffffff,
135	[8] = 0xffffffffffffffff,
136};
137
138static int
139vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
140{
141	int error;
142
143	error = vm_get_register(vm, vcpuid, reg, rval);
144
145	return (error);
146}
147
148static int
149vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
150{
151	uint64_t val;
152	int error, rshift;
153	enum vm_reg_name reg;
154
155	rshift = 0;
156	reg = gpr_map[vie->reg];
157
158	/*
159	 * 64-bit mode imposes limitations on accessing legacy byte registers.
160	 *
161	 * The legacy high-byte registers cannot be addressed if the REX
162	 * prefix is present. In this case the values 4, 5, 6 and 7 of the
163	 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
164	 *
165	 * If the REX prefix is not present then the values 4, 5, 6 and 7
166	 * of the 'ModRM:reg' field address the legacy high-byte registers,
167	 * %ah, %ch, %dh and %bh respectively.
168	 */
169	if (!vie->rex_present) {
170		if (vie->reg & 0x4) {
171			/*
172			 * Obtain the value of %ah by reading %rax and shifting
173			 * right by 8 bits (same for %bh, %ch and %dh).
174			 */
175			rshift = 8;
176			reg = gpr_map[vie->reg & 0x3];
177		}
178	}
179
180	error = vm_get_register(vm, vcpuid, reg, &val);
181	*rval = val >> rshift;
182	return (error);
183}
184
185static int
186vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
187		    uint64_t val, int size)
188{
189	int error;
190	uint64_t origval;
191
192	switch (size) {
193	case 1:
194	case 2:
195		error = vie_read_register(vm, vcpuid, reg, &origval);
196		if (error)
197			return (error);
198		val &= size2mask[size];
199		val |= origval & ~size2mask[size];
200		break;
201	case 4:
202		val &= 0xffffffffUL;
203		break;
204	case 8:
205		break;
206	default:
207		return (EINVAL);
208	}
209
210	error = vm_set_register(vm, vcpuid, reg, val);
211	return (error);
212}
213
214/*
215 * The following simplifying assumptions are made during emulation:
216 *
217 * - guest is in 64-bit mode
218 *   - default address size is 64-bits
219 *   - default operand size is 32-bits
220 *
221 * - operand size override is not supported
222 *
223 * - address size override is not supported
224 */
225static int
226emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
227	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
228{
229	int error, size;
230	enum vm_reg_name reg;
231	uint8_t byte;
232	uint64_t val;
233
234	size = 4;
235	error = EINVAL;
236
237	switch (vie->op.op_byte) {
238	case 0x88:
239		/*
240		 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
241		 * 88/r:	mov r/m8, r8
242		 * REX + 88/r:	mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
243		 */
244		size = 1;
245		error = vie_read_bytereg(vm, vcpuid, vie, &byte);
246		if (error == 0)
247			error = memwrite(vm, vcpuid, gpa, byte, size, arg);
248		break;
249	case 0x89:
250		/*
251		 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
252		 * 89/r:	mov r/m32, r32
253		 * REX.W + 89/r	mov r/m64, r64
254		 */
255		if (vie->rex_w)
256			size = 8;
257		reg = gpr_map[vie->reg];
258		error = vie_read_register(vm, vcpuid, reg, &val);
259		if (error == 0) {
260			val &= size2mask[size];
261			error = memwrite(vm, vcpuid, gpa, val, size, arg);
262		}
263		break;
264	case 0x8B:
265		/*
266		 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
267		 * 8B/r:	mov r32, r/m32
268		 * REX.W 8B/r:	mov r64, r/m64
269		 */
270		if (vie->rex_w)
271			size = 8;
272		error = memread(vm, vcpuid, gpa, &val, size, arg);
273		if (error == 0) {
274			reg = gpr_map[vie->reg];
275			error = vie_update_register(vm, vcpuid, reg, val, size);
276		}
277		break;
278	case 0xC7:
279		/*
280		 * MOV from imm32 to mem (ModRM:r/m)
281		 * C7/0		mov r/m32, imm32
282		 * REX.W + C7/0	mov r/m64, imm32 (sign-extended to 64-bits)
283		 */
284		val = vie->immediate;		/* already sign-extended */
285
286		if (vie->rex_w)
287			size = 8;
288
289		if (size != 8)
290			val &= size2mask[size];
291
292		error = memwrite(vm, vcpuid, gpa, val, size, arg);
293		break;
294	default:
295		break;
296	}
297
298	return (error);
299}
300
301static int
302emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
303	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
304{
305	int error, size;
306	enum vm_reg_name reg;
307	uint64_t val1, val2;
308
309	size = 4;
310	error = EINVAL;
311
312	switch (vie->op.op_byte) {
313	case 0x23:
314		/*
315		 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
316		 * result in reg.
317		 *
318		 * 23/r		and r32, r/m32
319		 * REX.W + 23/r	and r64, r/m64
320		 */
321		if (vie->rex_w)
322			size = 8;
323
324		/* get the first operand */
325		reg = gpr_map[vie->reg];
326		error = vie_read_register(vm, vcpuid, reg, &val1);
327		if (error)
328			break;
329
330		/* get the second operand */
331		error = memread(vm, vcpuid, gpa, &val2, size, arg);
332		if (error)
333			break;
334
335		/* perform the operation and write the result */
336		val1 &= val2;
337		error = vie_update_register(vm, vcpuid, reg, val1, size);
338		break;
339	case 0x81:
340		/*
341		 * AND reg (ModRM:reg) with immediate and store the
342		 * result in reg
343		 *
344		 * 81/          and r/m32, imm32
345		 * REX.W + 81/  and r/m64, imm32 sign-extended to 64
346		 *
347		 * Currently, only the AND operation of the 0x81 opcode
348		 * is implemented (ModRM:reg = b100).
349		 */
350		if ((vie->reg & 7) != 4)
351			break;
352
353		if (vie->rex_w)
354			size = 8;
355
356		/* get the first operand */
357                error = memread(vm, vcpuid, gpa, &val1, size, arg);
358                if (error)
359			break;
360
361                /*
362		 * perform the operation with the pre-fetched immediate
363		 * operand and write the result
364		 */
365                val1 &= vie->immediate;
366                error = memwrite(vm, vcpuid, gpa, val1, size, arg);
367		break;
368	default:
369		break;
370	}
371	return (error);
372}
373
374int
375vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
376			mem_region_read_t memread, mem_region_write_t memwrite,
377			void *memarg)
378{
379	int error;
380
381	if (!vie->decoded)
382		return (EINVAL);
383
384	switch (vie->op.op_type) {
385	case VIE_OP_TYPE_MOV:
386		error = emulate_mov(vm, vcpuid, gpa, vie,
387				    memread, memwrite, memarg);
388		break;
389	case VIE_OP_TYPE_AND:
390		error = emulate_and(vm, vcpuid, gpa, vie,
391				    memread, memwrite, memarg);
392		break;
393	default:
394		error = EINVAL;
395		break;
396	}
397
398	return (error);
399}
400
401#ifdef _KERNEL
402static void
403vie_init(struct vie *vie)
404{
405
406	bzero(vie, sizeof(struct vie));
407
408	vie->base_register = VM_REG_LAST;
409	vie->index_register = VM_REG_LAST;
410}
411
412static int
413gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys,
414	uint64_t *gpa, uint64_t *gpaend)
415{
416	vm_paddr_t hpa;
417	int nlevels, ptpshift, ptpindex;
418	uint64_t *ptpbase, pte, pgsize;
419
420	/*
421	 * XXX assumes 64-bit guest with 4 page walk levels
422	 */
423	nlevels = 4;
424	while (--nlevels >= 0) {
425		/* Zero out the lower 12 bits and the upper 12 bits */
426		ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
427
428		hpa = vm_gpa2hpa(vm, ptpphys, PAGE_SIZE);
429		if (hpa == -1)
430			goto error;
431
432		ptpbase = (uint64_t *)PHYS_TO_DMAP(hpa);
433
434		ptpshift = PAGE_SHIFT + nlevels * 9;
435		ptpindex = (gla >> ptpshift) & 0x1FF;
436		pgsize = 1UL << ptpshift;
437
438		pte = ptpbase[ptpindex];
439
440		if ((pte & PG_V) == 0)
441			goto error;
442
443		if (pte & PG_PS) {
444			if (pgsize > 1 * GB)
445				goto error;
446			else
447				break;
448		}
449
450		ptpphys = pte;
451	}
452
453	/* Zero out the lower 'ptpshift' bits and the upper 12 bits */
454	pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
455	*gpa = pte | (gla & (pgsize - 1));
456	*gpaend = pte + pgsize;
457	return (0);
458
459error:
460	return (-1);
461}
462
463int
464vmm_fetch_instruction(struct vm *vm, int cpuid, uint64_t rip, int inst_length,
465		      uint64_t cr3, struct vie *vie)
466{
467	int n, err;
468	uint64_t hpa, gpa, gpaend, off;
469
470	/*
471	 * XXX cache previously fetched instructions using 'rip' as the tag
472	 */
473
474	if (inst_length > VIE_INST_SIZE)
475		panic("vmm_fetch_instruction: invalid length %d", inst_length);
476
477	vie_init(vie);
478
479	/* Copy the instruction into 'vie' */
480	while (vie->num_valid < inst_length) {
481		err = gla2gpa(vm, rip, cr3, &gpa, &gpaend);
482		if (err)
483			break;
484
485		off = gpa & PAGE_MASK;
486		n = min(inst_length - vie->num_valid, PAGE_SIZE - off);
487
488		hpa = vm_gpa2hpa(vm, gpa, n);
489		if (hpa == -1)
490			break;
491
492		bcopy((void *)PHYS_TO_DMAP(hpa), &vie->inst[vie->num_valid], n);
493
494		rip += n;
495		vie->num_valid += n;
496	}
497
498	if (vie->num_valid == inst_length)
499		return (0);
500	else
501		return (-1);
502}
503
504static int
505vie_peek(struct vie *vie, uint8_t *x)
506{
507
508	if (vie->num_processed < vie->num_valid) {
509		*x = vie->inst[vie->num_processed];
510		return (0);
511	} else
512		return (-1);
513}
514
515static void
516vie_advance(struct vie *vie)
517{
518
519	vie->num_processed++;
520}
521
522static int
523decode_rex(struct vie *vie)
524{
525	uint8_t x;
526
527	if (vie_peek(vie, &x))
528		return (-1);
529
530	if (x >= 0x40 && x <= 0x4F) {
531		vie->rex_present = 1;
532
533		vie->rex_w = x & 0x8 ? 1 : 0;
534		vie->rex_r = x & 0x4 ? 1 : 0;
535		vie->rex_x = x & 0x2 ? 1 : 0;
536		vie->rex_b = x & 0x1 ? 1 : 0;
537
538		vie_advance(vie);
539	}
540
541	return (0);
542}
543
544static int
545decode_opcode(struct vie *vie)
546{
547	uint8_t x;
548
549	if (vie_peek(vie, &x))
550		return (-1);
551
552	vie->op = one_byte_opcodes[x];
553
554	if (vie->op.op_type == VIE_OP_TYPE_NONE)
555		return (-1);
556
557	vie_advance(vie);
558	return (0);
559}
560
561static int
562decode_modrm(struct vie *vie)
563{
564	uint8_t x;
565	enum cpu_mode cpu_mode;
566
567	/*
568	 * XXX assuming that guest is in IA-32E 64-bit mode
569	 */
570	cpu_mode = CPU_MODE_64BIT;
571
572	if (vie_peek(vie, &x))
573		return (-1);
574
575	vie->mod = (x >> 6) & 0x3;
576	vie->rm =  (x >> 0) & 0x7;
577	vie->reg = (x >> 3) & 0x7;
578
579	/*
580	 * A direct addressing mode makes no sense in the context of an EPT
581	 * fault. There has to be a memory access involved to cause the
582	 * EPT fault.
583	 */
584	if (vie->mod == VIE_MOD_DIRECT)
585		return (-1);
586
587	if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
588	    (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
589		/*
590		 * Table 2-5: Special Cases of REX Encodings
591		 *
592		 * mod=0, r/m=5 is used in the compatibility mode to
593		 * indicate a disp32 without a base register.
594		 *
595		 * mod!=3, r/m=4 is used in the compatibility mode to
596		 * indicate that the SIB byte is present.
597		 *
598		 * The 'b' bit in the REX prefix is don't care in
599		 * this case.
600		 */
601	} else {
602		vie->rm |= (vie->rex_b << 3);
603	}
604
605	vie->reg |= (vie->rex_r << 3);
606
607	/* SIB */
608	if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
609		goto done;
610
611	vie->base_register = gpr_map[vie->rm];
612
613	switch (vie->mod) {
614	case VIE_MOD_INDIRECT_DISP8:
615		vie->disp_bytes = 1;
616		break;
617	case VIE_MOD_INDIRECT_DISP32:
618		vie->disp_bytes = 4;
619		break;
620	case VIE_MOD_INDIRECT:
621		if (vie->rm == VIE_RM_DISP32) {
622			vie->disp_bytes = 4;
623			/*
624			 * Table 2-7. RIP-Relative Addressing
625			 *
626			 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
627			 * whereas in compatibility mode it just implies disp32.
628			 */
629
630			if (cpu_mode == CPU_MODE_64BIT)
631				vie->base_register = VM_REG_GUEST_RIP;
632			else
633				vie->base_register = VM_REG_LAST;
634
635		}
636		break;
637	}
638
639	/* Figure out immediate operand size (if any) */
640	if (vie->op.op_flags & VIE_OP_F_IMM)
641		vie->imm_bytes = 4;
642	else if (vie->op.op_flags & VIE_OP_F_IMM8)
643		vie->imm_bytes = 1;
644
645done:
646	vie_advance(vie);
647
648	return (0);
649}
650
651static int
652decode_sib(struct vie *vie)
653{
654	uint8_t x;
655
656	/* Proceed only if SIB byte is present */
657	if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
658		return (0);
659
660	if (vie_peek(vie, &x))
661		return (-1);
662
663	/* De-construct the SIB byte */
664	vie->ss = (x >> 6) & 0x3;
665	vie->index = (x >> 3) & 0x7;
666	vie->base = (x >> 0) & 0x7;
667
668	/* Apply the REX prefix modifiers */
669	vie->index |= vie->rex_x << 3;
670	vie->base |= vie->rex_b << 3;
671
672	switch (vie->mod) {
673	case VIE_MOD_INDIRECT_DISP8:
674		vie->disp_bytes = 1;
675		break;
676	case VIE_MOD_INDIRECT_DISP32:
677		vie->disp_bytes = 4;
678		break;
679	}
680
681	if (vie->mod == VIE_MOD_INDIRECT &&
682	    (vie->base == 5 || vie->base == 13)) {
683		/*
684		 * Special case when base register is unused if mod = 0
685		 * and base = %rbp or %r13.
686		 *
687		 * Documented in:
688		 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
689		 * Table 2-5: Special Cases of REX Encodings
690		 */
691		vie->disp_bytes = 4;
692	} else {
693		vie->base_register = gpr_map[vie->base];
694	}
695
696	/*
697	 * All encodings of 'index' are valid except for %rsp (4).
698	 *
699	 * Documented in:
700	 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
701	 * Table 2-5: Special Cases of REX Encodings
702	 */
703	if (vie->index != 4)
704		vie->index_register = gpr_map[vie->index];
705
706	/* 'scale' makes sense only in the context of an index register */
707	if (vie->index_register < VM_REG_LAST)
708		vie->scale = 1 << vie->ss;
709
710	vie_advance(vie);
711
712	return (0);
713}
714
715static int
716decode_displacement(struct vie *vie)
717{
718	int n, i;
719	uint8_t x;
720
721	union {
722		char	buf[4];
723		int8_t	signed8;
724		int32_t	signed32;
725	} u;
726
727	if ((n = vie->disp_bytes) == 0)
728		return (0);
729
730	if (n != 1 && n != 4)
731		panic("decode_displacement: invalid disp_bytes %d", n);
732
733	for (i = 0; i < n; i++) {
734		if (vie_peek(vie, &x))
735			return (-1);
736
737		u.buf[i] = x;
738		vie_advance(vie);
739	}
740
741	if (n == 1)
742		vie->displacement = u.signed8;		/* sign-extended */
743	else
744		vie->displacement = u.signed32;		/* sign-extended */
745
746	return (0);
747}
748
749static int
750decode_immediate(struct vie *vie)
751{
752	int i, n;
753	uint8_t x;
754	union {
755		char	buf[4];
756		int8_t	signed8;
757		int32_t	signed32;
758	} u;
759
760	if ((n = vie->imm_bytes) == 0)
761		return (0);
762
763	if (n != 1 && n != 4)
764		panic("decode_immediate: invalid imm_bytes %d", n);
765
766	for (i = 0; i < n; i++) {
767		if (vie_peek(vie, &x))
768			return (-1);
769
770		u.buf[i] = x;
771		vie_advance(vie);
772	}
773
774	if (n == 1)
775		vie->immediate = u.signed8;		/* sign-extended */
776	else
777		vie->immediate = u.signed32;		/* sign-extended */
778
779	return (0);
780}
781
782/*
783 * Verify that all the bytes in the instruction buffer were consumed.
784 */
785static int
786verify_inst_length(struct vie *vie)
787{
788
789	if (vie->num_processed == vie->num_valid)
790		return (0);
791	else
792		return (-1);
793}
794
795/*
796 * Verify that the 'guest linear address' provided as collateral of the nested
797 * page table fault matches with our instruction decoding.
798 */
799static int
800verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
801{
802	int error;
803	uint64_t base, idx;
804
805	/* Skip 'gla' verification */
806	if (gla == VIE_INVALID_GLA)
807		return (0);
808
809	base = 0;
810	if (vie->base_register != VM_REG_LAST) {
811		error = vm_get_register(vm, cpuid, vie->base_register, &base);
812		if (error) {
813			printf("verify_gla: error %d getting base reg %d\n",
814				error, vie->base_register);
815			return (-1);
816		}
817
818		/*
819		 * RIP-relative addressing starts from the following
820		 * instruction
821		 */
822		if (vie->base_register == VM_REG_GUEST_RIP)
823			base += vie->num_valid;
824	}
825
826	idx = 0;
827	if (vie->index_register != VM_REG_LAST) {
828		error = vm_get_register(vm, cpuid, vie->index_register, &idx);
829		if (error) {
830			printf("verify_gla: error %d getting index reg %d\n",
831				error, vie->index_register);
832			return (-1);
833		}
834	}
835
836	if (base + vie->scale * idx + vie->displacement != gla) {
837		printf("verify_gla mismatch: "
838		       "base(0x%0lx), scale(%d), index(0x%0lx), "
839		       "disp(0x%0lx), gla(0x%0lx)\n",
840		       base, vie->scale, idx, vie->displacement, gla);
841		return (-1);
842	}
843
844	return (0);
845}
846
847int
848vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
849{
850
851	if (decode_rex(vie))
852		return (-1);
853
854	if (decode_opcode(vie))
855		return (-1);
856
857	if (decode_modrm(vie))
858		return (-1);
859
860	if (decode_sib(vie))
861		return (-1);
862
863	if (decode_displacement(vie))
864		return (-1);
865
866	if (decode_immediate(vie))
867		return (-1);
868
869	if (verify_inst_length(vie))
870		return (-1);
871
872	if (verify_gla(vm, cpuid, gla, vie))
873		return (-1);
874
875	vie->decoded = 1;	/* success */
876
877	return (0);
878}
879#endif	/* _KERNEL */
880