vmm_instruction_emul.c revision 262349
1/*-
2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/10/sys/amd64/vmm/vmm_instruction_emul.c 262349 2014-02-22 23:34:39Z jhb $
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/10/sys/amd64/vmm/vmm_instruction_emul.c 262349 2014-02-22 23:34:39Z jhb $");
32
33#ifdef _KERNEL
34#include <sys/param.h>
35#include <sys/pcpu.h>
36#include <sys/systm.h>
37
38#include <vm/vm.h>
39#include <vm/pmap.h>
40
41#include <machine/vmparam.h>
42#include <machine/vmm.h>
43#else	/* !_KERNEL */
44#include <sys/types.h>
45#include <sys/errno.h>
46
47#include <machine/vmm.h>
48
49#include <vmmapi.h>
50#endif	/* _KERNEL */
51
52enum cpu_mode {
53	CPU_MODE_COMPATIBILITY,		/* IA-32E mode (CS.L = 0) */
54	CPU_MODE_64BIT,			/* IA-32E mode (CS.L = 1) */
55};
56
57/* struct vie_op.op_type */
58enum {
59	VIE_OP_TYPE_NONE = 0,
60	VIE_OP_TYPE_MOV,
61	VIE_OP_TYPE_AND,
62	VIE_OP_TYPE_OR,
63	VIE_OP_TYPE_LAST
64};
65
66/* struct vie_op.op_flags */
67#define	VIE_OP_F_IMM		(1 << 0)	/* immediate operand present */
68#define	VIE_OP_F_IMM8		(1 << 1)	/* 8-bit immediate operand */
69
70static const struct vie_op one_byte_opcodes[256] = {
71	[0x88] = {
72		.op_byte = 0x88,
73		.op_type = VIE_OP_TYPE_MOV,
74	},
75	[0x89] = {
76		.op_byte = 0x89,
77		.op_type = VIE_OP_TYPE_MOV,
78	},
79	[0x8A] = {
80		.op_byte = 0x8A,
81		.op_type = VIE_OP_TYPE_MOV,
82	},
83	[0x8B] = {
84		.op_byte = 0x8B,
85		.op_type = VIE_OP_TYPE_MOV,
86	},
87	[0xC7] = {
88		.op_byte = 0xC7,
89		.op_type = VIE_OP_TYPE_MOV,
90		.op_flags = VIE_OP_F_IMM,
91	},
92	[0x23] = {
93		.op_byte = 0x23,
94		.op_type = VIE_OP_TYPE_AND,
95	},
96	[0x81] = {
97		/* XXX Group 1 extended opcode - not just AND */
98		.op_byte = 0x81,
99		.op_type = VIE_OP_TYPE_AND,
100		.op_flags = VIE_OP_F_IMM,
101	},
102	[0x83] = {
103		/* XXX Group 1 extended opcode - not just OR */
104		.op_byte = 0x83,
105		.op_type = VIE_OP_TYPE_OR,
106		.op_flags = VIE_OP_F_IMM8,
107	},
108};
109
110/* struct vie.mod */
111#define	VIE_MOD_INDIRECT		0
112#define	VIE_MOD_INDIRECT_DISP8		1
113#define	VIE_MOD_INDIRECT_DISP32		2
114#define	VIE_MOD_DIRECT			3
115
116/* struct vie.rm */
117#define	VIE_RM_SIB			4
118#define	VIE_RM_DISP32			5
119
120#define	GB				(1024 * 1024 * 1024)
121
122static enum vm_reg_name gpr_map[16] = {
123	VM_REG_GUEST_RAX,
124	VM_REG_GUEST_RCX,
125	VM_REG_GUEST_RDX,
126	VM_REG_GUEST_RBX,
127	VM_REG_GUEST_RSP,
128	VM_REG_GUEST_RBP,
129	VM_REG_GUEST_RSI,
130	VM_REG_GUEST_RDI,
131	VM_REG_GUEST_R8,
132	VM_REG_GUEST_R9,
133	VM_REG_GUEST_R10,
134	VM_REG_GUEST_R11,
135	VM_REG_GUEST_R12,
136	VM_REG_GUEST_R13,
137	VM_REG_GUEST_R14,
138	VM_REG_GUEST_R15
139};
140
141static uint64_t size2mask[] = {
142	[1] = 0xff,
143	[2] = 0xffff,
144	[4] = 0xffffffff,
145	[8] = 0xffffffffffffffff,
146};
147
148static int
149vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
150{
151	int error;
152
153	error = vm_get_register(vm, vcpuid, reg, rval);
154
155	return (error);
156}
157
158static int
159vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
160{
161	uint64_t val;
162	int error, rshift;
163	enum vm_reg_name reg;
164
165	rshift = 0;
166	reg = gpr_map[vie->reg];
167
168	/*
169	 * 64-bit mode imposes limitations on accessing legacy byte registers.
170	 *
171	 * The legacy high-byte registers cannot be addressed if the REX
172	 * prefix is present. In this case the values 4, 5, 6 and 7 of the
173	 * 'ModRM:reg' field address %spl, %bpl, %sil and %dil respectively.
174	 *
175	 * If the REX prefix is not present then the values 4, 5, 6 and 7
176	 * of the 'ModRM:reg' field address the legacy high-byte registers,
177	 * %ah, %ch, %dh and %bh respectively.
178	 */
179	if (!vie->rex_present) {
180		if (vie->reg & 0x4) {
181			/*
182			 * Obtain the value of %ah by reading %rax and shifting
183			 * right by 8 bits (same for %bh, %ch and %dh).
184			 */
185			rshift = 8;
186			reg = gpr_map[vie->reg & 0x3];
187		}
188	}
189
190	error = vm_get_register(vm, vcpuid, reg, &val);
191	*rval = val >> rshift;
192	return (error);
193}
194
195static int
196vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
197		    uint64_t val, int size)
198{
199	int error;
200	uint64_t origval;
201
202	switch (size) {
203	case 1:
204	case 2:
205		error = vie_read_register(vm, vcpuid, reg, &origval);
206		if (error)
207			return (error);
208		val &= size2mask[size];
209		val |= origval & ~size2mask[size];
210		break;
211	case 4:
212		val &= 0xffffffffUL;
213		break;
214	case 8:
215		break;
216	default:
217		return (EINVAL);
218	}
219
220	error = vm_set_register(vm, vcpuid, reg, val);
221	return (error);
222}
223
224/*
225 * The following simplifying assumptions are made during emulation:
226 *
227 * - guest is in 64-bit mode
228 *   - default address size is 64-bits
229 *   - default operand size is 32-bits
230 *
231 * - operand size override is not supported
232 *
233 * - address size override is not supported
234 */
235static int
236emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
237	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
238{
239	int error, size;
240	enum vm_reg_name reg;
241	uint8_t byte;
242	uint64_t val;
243
244	size = 4;
245	error = EINVAL;
246
247	switch (vie->op.op_byte) {
248	case 0x88:
249		/*
250		 * MOV byte from reg (ModRM:reg) to mem (ModRM:r/m)
251		 * 88/r:	mov r/m8, r8
252		 * REX + 88/r:	mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
253		 */
254		size = 1;
255		error = vie_read_bytereg(vm, vcpuid, vie, &byte);
256		if (error == 0)
257			error = memwrite(vm, vcpuid, gpa, byte, size, arg);
258		break;
259	case 0x89:
260		/*
261		 * MOV from reg (ModRM:reg) to mem (ModRM:r/m)
262		 * 89/r:	mov r/m32, r32
263		 * REX.W + 89/r	mov r/m64, r64
264		 */
265		if (vie->rex_w)
266			size = 8;
267		reg = gpr_map[vie->reg];
268		error = vie_read_register(vm, vcpuid, reg, &val);
269		if (error == 0) {
270			val &= size2mask[size];
271			error = memwrite(vm, vcpuid, gpa, val, size, arg);
272		}
273		break;
274	case 0x8A:
275	case 0x8B:
276		/*
277		 * MOV from mem (ModRM:r/m) to reg (ModRM:reg)
278		 * 8A/r:	mov r/m8, r8
279		 * REX + 8A/r:	mov r/m8, r8
280		 * 8B/r:	mov r32, r/m32
281		 * REX.W 8B/r:	mov r64, r/m64
282		 */
283		if (vie->op.op_byte == 0x8A)
284			size = 1;
285		else if (vie->rex_w)
286			size = 8;
287		error = memread(vm, vcpuid, gpa, &val, size, arg);
288		if (error == 0) {
289			reg = gpr_map[vie->reg];
290			error = vie_update_register(vm, vcpuid, reg, val, size);
291		}
292		break;
293	case 0xC7:
294		/*
295		 * MOV from imm32 to mem (ModRM:r/m)
296		 * C7/0		mov r/m32, imm32
297		 * REX.W + C7/0	mov r/m64, imm32 (sign-extended to 64-bits)
298		 */
299		val = vie->immediate;		/* already sign-extended */
300
301		if (vie->rex_w)
302			size = 8;
303
304		if (size != 8)
305			val &= size2mask[size];
306
307		error = memwrite(vm, vcpuid, gpa, val, size, arg);
308		break;
309	default:
310		break;
311	}
312
313	return (error);
314}
315
316static int
317emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
318	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
319{
320	int error, size;
321	enum vm_reg_name reg;
322	uint64_t val1, val2;
323
324	size = 4;
325	error = EINVAL;
326
327	switch (vie->op.op_byte) {
328	case 0x23:
329		/*
330		 * AND reg (ModRM:reg) and mem (ModRM:r/m) and store the
331		 * result in reg.
332		 *
333		 * 23/r		and r32, r/m32
334		 * REX.W + 23/r	and r64, r/m64
335		 */
336		if (vie->rex_w)
337			size = 8;
338
339		/* get the first operand */
340		reg = gpr_map[vie->reg];
341		error = vie_read_register(vm, vcpuid, reg, &val1);
342		if (error)
343			break;
344
345		/* get the second operand */
346		error = memread(vm, vcpuid, gpa, &val2, size, arg);
347		if (error)
348			break;
349
350		/* perform the operation and write the result */
351		val1 &= val2;
352		error = vie_update_register(vm, vcpuid, reg, val1, size);
353		break;
354	case 0x81:
355		/*
356		 * AND mem (ModRM:r/m) with immediate and store the
357		 * result in mem.
358		 *
359		 * 81/          and r/m32, imm32
360		 * REX.W + 81/  and r/m64, imm32 sign-extended to 64
361		 *
362		 * Currently, only the AND operation of the 0x81 opcode
363		 * is implemented (ModRM:reg = b100).
364		 */
365		if ((vie->reg & 7) != 4)
366			break;
367
368		if (vie->rex_w)
369			size = 8;
370
371		/* get the first operand */
372                error = memread(vm, vcpuid, gpa, &val1, size, arg);
373                if (error)
374			break;
375
376                /*
377		 * perform the operation with the pre-fetched immediate
378		 * operand and write the result
379		 */
380                val1 &= vie->immediate;
381                error = memwrite(vm, vcpuid, gpa, val1, size, arg);
382		break;
383	default:
384		break;
385	}
386	return (error);
387}
388
389static int
390emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
391	    mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
392{
393	int error, size;
394	uint64_t val1;
395
396	size = 4;
397	error = EINVAL;
398
399	switch (vie->op.op_byte) {
400	case 0x83:
401		/*
402		 * OR mem (ModRM:r/m) with immediate and store the
403		 * result in mem.
404		 *
405		 * 83/          OR r/m32, imm8 sign-extended to 32
406		 * REX.W + 83/  OR r/m64, imm8 sign-extended to 64
407		 *
408		 * Currently, only the OR operation of the 0x83 opcode
409		 * is implemented (ModRM:reg = b001).
410		 */
411		if ((vie->reg & 7) != 1)
412			break;
413
414		if (vie->rex_w)
415			size = 8;
416
417		/* get the first operand */
418                error = memread(vm, vcpuid, gpa, &val1, size, arg);
419                if (error)
420			break;
421
422                /*
423		 * perform the operation with the pre-fetched immediate
424		 * operand and write the result
425		 */
426                val1 |= vie->immediate;
427                error = memwrite(vm, vcpuid, gpa, val1, size, arg);
428		break;
429	default:
430		break;
431	}
432	return (error);
433}
434
435int
436vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
437			mem_region_read_t memread, mem_region_write_t memwrite,
438			void *memarg)
439{
440	int error;
441
442	if (!vie->decoded)
443		return (EINVAL);
444
445	switch (vie->op.op_type) {
446	case VIE_OP_TYPE_MOV:
447		error = emulate_mov(vm, vcpuid, gpa, vie,
448				    memread, memwrite, memarg);
449		break;
450	case VIE_OP_TYPE_AND:
451		error = emulate_and(vm, vcpuid, gpa, vie,
452				    memread, memwrite, memarg);
453		break;
454	case VIE_OP_TYPE_OR:
455		error = emulate_or(vm, vcpuid, gpa, vie,
456				    memread, memwrite, memarg);
457		break;
458	default:
459		error = EINVAL;
460		break;
461	}
462
463	return (error);
464}
465
466#ifdef _KERNEL
467void
468vie_init(struct vie *vie)
469{
470
471	bzero(vie, sizeof(struct vie));
472
473	vie->base_register = VM_REG_LAST;
474	vie->index_register = VM_REG_LAST;
475}
476
477static int
478gla2gpa(struct vm *vm, uint64_t gla, uint64_t ptpphys,
479	uint64_t *gpa, uint64_t *gpaend)
480{
481	int nlevels, ptpshift, ptpindex;
482	uint64_t *ptpbase, pte, pgsize;
483	void *cookie;
484
485	/*
486	 * XXX assumes 64-bit guest with 4 page walk levels
487	 */
488	nlevels = 4;
489	while (--nlevels >= 0) {
490		/* Zero out the lower 12 bits and the upper 12 bits */
491		ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
492
493		ptpbase = vm_gpa_hold(vm, ptpphys, PAGE_SIZE, VM_PROT_READ,
494				      &cookie);
495		if (ptpbase == NULL)
496			goto error;
497
498		ptpshift = PAGE_SHIFT + nlevels * 9;
499		ptpindex = (gla >> ptpshift) & 0x1FF;
500		pgsize = 1UL << ptpshift;
501
502		pte = ptpbase[ptpindex];
503
504		vm_gpa_release(cookie);
505
506		if ((pte & PG_V) == 0)
507			goto error;
508
509		if (pte & PG_PS) {
510			if (pgsize > 1 * GB)
511				goto error;
512			else
513				break;
514		}
515
516		ptpphys = pte;
517	}
518
519	/* Zero out the lower 'ptpshift' bits and the upper 12 bits */
520	pte >>= ptpshift; pte <<= (ptpshift + 12); pte >>= 12;
521	*gpa = pte | (gla & (pgsize - 1));
522	*gpaend = pte + pgsize;
523	return (0);
524
525error:
526	return (-1);
527}
528
529int
530vmm_fetch_instruction(struct vm *vm, int cpuid, uint64_t rip, int inst_length,
531		      uint64_t cr3, struct vie *vie)
532{
533	int n, err, prot;
534	uint64_t gpa, gpaend, off;
535	void *hpa, *cookie;
536
537	/*
538	 * XXX cache previously fetched instructions using 'rip' as the tag
539	 */
540
541	prot = VM_PROT_READ | VM_PROT_EXECUTE;
542	if (inst_length > VIE_INST_SIZE)
543		panic("vmm_fetch_instruction: invalid length %d", inst_length);
544
545	/* Copy the instruction into 'vie' */
546	while (vie->num_valid < inst_length) {
547		err = gla2gpa(vm, rip, cr3, &gpa, &gpaend);
548		if (err)
549			break;
550
551		off = gpa & PAGE_MASK;
552		n = min(inst_length - vie->num_valid, PAGE_SIZE - off);
553
554		if ((hpa = vm_gpa_hold(vm, gpa, n, prot, &cookie)) == NULL)
555			break;
556
557		bcopy(hpa, &vie->inst[vie->num_valid], n);
558
559		vm_gpa_release(cookie);
560
561		rip += n;
562		vie->num_valid += n;
563	}
564
565	if (vie->num_valid == inst_length)
566		return (0);
567	else
568		return (-1);
569}
570
571static int
572vie_peek(struct vie *vie, uint8_t *x)
573{
574
575	if (vie->num_processed < vie->num_valid) {
576		*x = vie->inst[vie->num_processed];
577		return (0);
578	} else
579		return (-1);
580}
581
582static void
583vie_advance(struct vie *vie)
584{
585
586	vie->num_processed++;
587}
588
589static int
590decode_rex(struct vie *vie)
591{
592	uint8_t x;
593
594	if (vie_peek(vie, &x))
595		return (-1);
596
597	if (x >= 0x40 && x <= 0x4F) {
598		vie->rex_present = 1;
599
600		vie->rex_w = x & 0x8 ? 1 : 0;
601		vie->rex_r = x & 0x4 ? 1 : 0;
602		vie->rex_x = x & 0x2 ? 1 : 0;
603		vie->rex_b = x & 0x1 ? 1 : 0;
604
605		vie_advance(vie);
606	}
607
608	return (0);
609}
610
611static int
612decode_opcode(struct vie *vie)
613{
614	uint8_t x;
615
616	if (vie_peek(vie, &x))
617		return (-1);
618
619	vie->op = one_byte_opcodes[x];
620
621	if (vie->op.op_type == VIE_OP_TYPE_NONE)
622		return (-1);
623
624	vie_advance(vie);
625	return (0);
626}
627
628static int
629decode_modrm(struct vie *vie)
630{
631	uint8_t x;
632	enum cpu_mode cpu_mode;
633
634	/*
635	 * XXX assuming that guest is in IA-32E 64-bit mode
636	 */
637	cpu_mode = CPU_MODE_64BIT;
638
639	if (vie_peek(vie, &x))
640		return (-1);
641
642	vie->mod = (x >> 6) & 0x3;
643	vie->rm =  (x >> 0) & 0x7;
644	vie->reg = (x >> 3) & 0x7;
645
646	/*
647	 * A direct addressing mode makes no sense in the context of an EPT
648	 * fault. There has to be a memory access involved to cause the
649	 * EPT fault.
650	 */
651	if (vie->mod == VIE_MOD_DIRECT)
652		return (-1);
653
654	if ((vie->mod == VIE_MOD_INDIRECT && vie->rm == VIE_RM_DISP32) ||
655	    (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)) {
656		/*
657		 * Table 2-5: Special Cases of REX Encodings
658		 *
659		 * mod=0, r/m=5 is used in the compatibility mode to
660		 * indicate a disp32 without a base register.
661		 *
662		 * mod!=3, r/m=4 is used in the compatibility mode to
663		 * indicate that the SIB byte is present.
664		 *
665		 * The 'b' bit in the REX prefix is don't care in
666		 * this case.
667		 */
668	} else {
669		vie->rm |= (vie->rex_b << 3);
670	}
671
672	vie->reg |= (vie->rex_r << 3);
673
674	/* SIB */
675	if (vie->mod != VIE_MOD_DIRECT && vie->rm == VIE_RM_SIB)
676		goto done;
677
678	vie->base_register = gpr_map[vie->rm];
679
680	switch (vie->mod) {
681	case VIE_MOD_INDIRECT_DISP8:
682		vie->disp_bytes = 1;
683		break;
684	case VIE_MOD_INDIRECT_DISP32:
685		vie->disp_bytes = 4;
686		break;
687	case VIE_MOD_INDIRECT:
688		if (vie->rm == VIE_RM_DISP32) {
689			vie->disp_bytes = 4;
690			/*
691			 * Table 2-7. RIP-Relative Addressing
692			 *
693			 * In 64-bit mode mod=00 r/m=101 implies [rip] + disp32
694			 * whereas in compatibility mode it just implies disp32.
695			 */
696
697			if (cpu_mode == CPU_MODE_64BIT)
698				vie->base_register = VM_REG_GUEST_RIP;
699			else
700				vie->base_register = VM_REG_LAST;
701		}
702		break;
703	}
704
705done:
706	vie_advance(vie);
707
708	return (0);
709}
710
711static int
712decode_sib(struct vie *vie)
713{
714	uint8_t x;
715
716	/* Proceed only if SIB byte is present */
717	if (vie->mod == VIE_MOD_DIRECT || vie->rm != VIE_RM_SIB)
718		return (0);
719
720	if (vie_peek(vie, &x))
721		return (-1);
722
723	/* De-construct the SIB byte */
724	vie->ss = (x >> 6) & 0x3;
725	vie->index = (x >> 3) & 0x7;
726	vie->base = (x >> 0) & 0x7;
727
728	/* Apply the REX prefix modifiers */
729	vie->index |= vie->rex_x << 3;
730	vie->base |= vie->rex_b << 3;
731
732	switch (vie->mod) {
733	case VIE_MOD_INDIRECT_DISP8:
734		vie->disp_bytes = 1;
735		break;
736	case VIE_MOD_INDIRECT_DISP32:
737		vie->disp_bytes = 4;
738		break;
739	}
740
741	if (vie->mod == VIE_MOD_INDIRECT &&
742	    (vie->base == 5 || vie->base == 13)) {
743		/*
744		 * Special case when base register is unused if mod = 0
745		 * and base = %rbp or %r13.
746		 *
747		 * Documented in:
748		 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
749		 * Table 2-5: Special Cases of REX Encodings
750		 */
751		vie->disp_bytes = 4;
752	} else {
753		vie->base_register = gpr_map[vie->base];
754	}
755
756	/*
757	 * All encodings of 'index' are valid except for %rsp (4).
758	 *
759	 * Documented in:
760	 * Table 2-3: 32-bit Addressing Forms with the SIB Byte
761	 * Table 2-5: Special Cases of REX Encodings
762	 */
763	if (vie->index != 4)
764		vie->index_register = gpr_map[vie->index];
765
766	/* 'scale' makes sense only in the context of an index register */
767	if (vie->index_register < VM_REG_LAST)
768		vie->scale = 1 << vie->ss;
769
770	vie_advance(vie);
771
772	return (0);
773}
774
775static int
776decode_displacement(struct vie *vie)
777{
778	int n, i;
779	uint8_t x;
780
781	union {
782		char	buf[4];
783		int8_t	signed8;
784		int32_t	signed32;
785	} u;
786
787	if ((n = vie->disp_bytes) == 0)
788		return (0);
789
790	if (n != 1 && n != 4)
791		panic("decode_displacement: invalid disp_bytes %d", n);
792
793	for (i = 0; i < n; i++) {
794		if (vie_peek(vie, &x))
795			return (-1);
796
797		u.buf[i] = x;
798		vie_advance(vie);
799	}
800
801	if (n == 1)
802		vie->displacement = u.signed8;		/* sign-extended */
803	else
804		vie->displacement = u.signed32;		/* sign-extended */
805
806	return (0);
807}
808
809static int
810decode_immediate(struct vie *vie)
811{
812	int i, n;
813	uint8_t x;
814	union {
815		char	buf[4];
816		int8_t	signed8;
817		int32_t	signed32;
818	} u;
819
820	/* Figure out immediate operand size (if any) */
821	if (vie->op.op_flags & VIE_OP_F_IMM)
822		vie->imm_bytes = 4;
823	else if (vie->op.op_flags & VIE_OP_F_IMM8)
824		vie->imm_bytes = 1;
825
826	if ((n = vie->imm_bytes) == 0)
827		return (0);
828
829	if (n != 1 && n != 4)
830		panic("decode_immediate: invalid imm_bytes %d", n);
831
832	for (i = 0; i < n; i++) {
833		if (vie_peek(vie, &x))
834			return (-1);
835
836		u.buf[i] = x;
837		vie_advance(vie);
838	}
839
840	if (n == 1)
841		vie->immediate = u.signed8;		/* sign-extended */
842	else
843		vie->immediate = u.signed32;		/* sign-extended */
844
845	return (0);
846}
847
848/*
849 * Verify that all the bytes in the instruction buffer were consumed.
850 */
851static int
852verify_inst_length(struct vie *vie)
853{
854
855	if (vie->num_processed == vie->num_valid)
856		return (0);
857	else
858		return (-1);
859}
860
861/*
862 * Verify that the 'guest linear address' provided as collateral of the nested
863 * page table fault matches with our instruction decoding.
864 */
865static int
866verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
867{
868	int error;
869	uint64_t base, idx;
870
871	/* Skip 'gla' verification */
872	if (gla == VIE_INVALID_GLA)
873		return (0);
874
875	base = 0;
876	if (vie->base_register != VM_REG_LAST) {
877		error = vm_get_register(vm, cpuid, vie->base_register, &base);
878		if (error) {
879			printf("verify_gla: error %d getting base reg %d\n",
880				error, vie->base_register);
881			return (-1);
882		}
883
884		/*
885		 * RIP-relative addressing starts from the following
886		 * instruction
887		 */
888		if (vie->base_register == VM_REG_GUEST_RIP)
889			base += vie->num_valid;
890	}
891
892	idx = 0;
893	if (vie->index_register != VM_REG_LAST) {
894		error = vm_get_register(vm, cpuid, vie->index_register, &idx);
895		if (error) {
896			printf("verify_gla: error %d getting index reg %d\n",
897				error, vie->index_register);
898			return (-1);
899		}
900	}
901
902	if (base + vie->scale * idx + vie->displacement != gla) {
903		printf("verify_gla mismatch: "
904		       "base(0x%0lx), scale(%d), index(0x%0lx), "
905		       "disp(0x%0lx), gla(0x%0lx)\n",
906		       base, vie->scale, idx, vie->displacement, gla);
907		return (-1);
908	}
909
910	return (0);
911}
912
913int
914vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie)
915{
916
917	if (decode_rex(vie))
918		return (-1);
919
920	if (decode_opcode(vie))
921		return (-1);
922
923	if (decode_modrm(vie))
924		return (-1);
925
926	if (decode_sib(vie))
927		return (-1);
928
929	if (decode_displacement(vie))
930		return (-1);
931
932	if (decode_immediate(vie))
933		return (-1);
934
935	if (verify_inst_length(vie))
936		return (-1);
937
938	if (verify_gla(vm, cpuid, gla, vie))
939		return (-1);
940
941	vie->decoded = 1;	/* success */
942
943	return (0);
944}
945#endif	/* _KERNEL */
946