Deleted Added
full compact
vmm_instruction_emul.c (273375) vmm_instruction_emul.c (277360)
1/*-
2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 10 unchanged lines hidden (view full) ---

19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
1/*-
2 * Copyright (c) 2012 Sandvine, Inc.
3 * Copyright (c) 2012 NetApp, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 10 unchanged lines hidden (view full) ---

19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/amd64/vmm/vmm_instruction_emul.c 273375 2014-10-21 07:10:43Z neel $
27 * $FreeBSD: head/sys/amd64/vmm/vmm_instruction_emul.c 277360 2015-01-19 06:53:31Z neel $
28 */
29
30#include <sys/cdefs.h>
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/amd64/vmm/vmm_instruction_emul.c 273375 2014-10-21 07:10:43Z neel $");
31__FBSDID("$FreeBSD: head/sys/amd64/vmm/vmm_instruction_emul.c 277360 2015-01-19 06:53:31Z neel $");
32
33#ifdef _KERNEL
34#include <sys/param.h>
35#include <sys/pcpu.h>
36#include <sys/systm.h>
37#include <sys/proc.h>
38
39#include <vm/vm.h>

--- 25 unchanged lines hidden (view full) ---

65 VIE_OP_TYPE_MOVZX,
66 VIE_OP_TYPE_AND,
67 VIE_OP_TYPE_OR,
68 VIE_OP_TYPE_SUB,
69 VIE_OP_TYPE_TWO_BYTE,
70 VIE_OP_TYPE_PUSH,
71 VIE_OP_TYPE_CMP,
72 VIE_OP_TYPE_POP,
32
33#ifdef _KERNEL
34#include <sys/param.h>
35#include <sys/pcpu.h>
36#include <sys/systm.h>
37#include <sys/proc.h>
38
39#include <vm/vm.h>

--- 25 unchanged lines hidden (view full) ---

65 VIE_OP_TYPE_MOVZX,
66 VIE_OP_TYPE_AND,
67 VIE_OP_TYPE_OR,
68 VIE_OP_TYPE_SUB,
69 VIE_OP_TYPE_TWO_BYTE,
70 VIE_OP_TYPE_PUSH,
71 VIE_OP_TYPE_CMP,
72 VIE_OP_TYPE_POP,
73 VIE_OP_TYPE_MOVS,
73 VIE_OP_TYPE_LAST
74};
75
76/* struct vie_op.op_flags */
77#define VIE_OP_F_IMM (1 << 0) /* 16/32-bit immediate operand */
78#define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
79#define VIE_OP_F_MOFFSET (1 << 2) /* 16/32/64-bit immediate moffset */
80#define VIE_OP_F_NO_MODRM (1 << 3)
74 VIE_OP_TYPE_LAST
75};
76
77/* struct vie_op.op_flags */
78#define VIE_OP_F_IMM (1 << 0) /* 16/32-bit immediate operand */
79#define VIE_OP_F_IMM8 (1 << 1) /* 8-bit immediate operand */
80#define VIE_OP_F_MOFFSET (1 << 2) /* 16/32/64-bit immediate moffset */
81#define VIE_OP_F_NO_MODRM (1 << 3)
82#define VIE_OP_F_NO_GLA_VERIFICATION (1 << 4)
81
82static const struct vie_op two_byte_opcodes[256] = {
83 [0xB6] = {
84 .op_byte = 0xB6,
85 .op_type = VIE_OP_TYPE_MOVZX,
86 },
87 [0xB7] = {
88 .op_byte = 0xB7,

--- 39 unchanged lines hidden (view full) ---

128 .op_type = VIE_OP_TYPE_MOV,
129 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
130 },
131 [0xA3] = {
132 .op_byte = 0xA3,
133 .op_type = VIE_OP_TYPE_MOV,
134 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
135 },
83
84static const struct vie_op two_byte_opcodes[256] = {
85 [0xB6] = {
86 .op_byte = 0xB6,
87 .op_type = VIE_OP_TYPE_MOVZX,
88 },
89 [0xB7] = {
90 .op_byte = 0xB7,

--- 39 unchanged lines hidden (view full) ---

130 .op_type = VIE_OP_TYPE_MOV,
131 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
132 },
133 [0xA3] = {
134 .op_byte = 0xA3,
135 .op_type = VIE_OP_TYPE_MOV,
136 .op_flags = VIE_OP_F_MOFFSET | VIE_OP_F_NO_MODRM,
137 },
138 [0xA4] = {
139 .op_byte = 0xA4,
140 .op_type = VIE_OP_TYPE_MOVS,
141 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
142 },
143 [0xA5] = {
144 .op_byte = 0xA5,
145 .op_type = VIE_OP_TYPE_MOVS,
146 .op_flags = VIE_OP_F_NO_MODRM | VIE_OP_F_NO_GLA_VERIFICATION
147 },
136 [0xC6] = {
137 /* XXX Group 11 extended opcode - not just MOV */
138 .op_byte = 0xC6,
139 .op_type = VIE_OP_TYPE_MOV,
140 .op_flags = VIE_OP_F_IMM8,
141 },
142 [0xC7] = {
143 .op_byte = 0xC7,

--- 410 unchanged lines hidden (view full) ---

554 error = vie_update_register(vm, vcpuid, reg, val, size);
555 break;
556 default:
557 break;
558 }
559 return (error);
560}
561
148 [0xC6] = {
149 /* XXX Group 11 extended opcode - not just MOV */
150 .op_byte = 0xC6,
151 .op_type = VIE_OP_TYPE_MOV,
152 .op_flags = VIE_OP_F_IMM8,
153 },
154 [0xC7] = {
155 .op_byte = 0xC7,

--- 410 unchanged lines hidden (view full) ---

566 error = vie_update_register(vm, vcpuid, reg, val, size);
567 break;
568 default:
569 break;
570 }
571 return (error);
572}
573
574/*
575 * Helper function to calculate and validate a linear address.
576 *
577 * Returns 0 on success and 1 if an exception was injected into the guest.
578 */
562static int
579static int
580get_gla(void *vm, int vcpuid, struct vie *vie, struct vm_guest_paging *paging,
581 int opsize, int addrsize, int prot, enum vm_reg_name seg,
582 enum vm_reg_name gpr, uint64_t *gla)
583{
584 struct seg_desc desc;
585 uint64_t cr0, val, rflags;
586 int error;
587
588 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
589 KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
590
591 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
592 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
593
594 error = vm_get_seg_desc(vm, vcpuid, seg, &desc);
595 KASSERT(error == 0, ("%s: error %d getting segment descriptor %d",
596 __func__, error, seg));
597
598 error = vie_read_register(vm, vcpuid, gpr, &val);
599 KASSERT(error == 0, ("%s: error %d getting register %d", __func__,
600 error, gpr));
601
602 if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
603 addrsize, prot, gla)) {
604 if (seg == VM_REG_GUEST_SS)
605 vm_inject_ss(vm, vcpuid, 0);
606 else
607 vm_inject_gp(vm, vcpuid);
608 return (1);
609 }
610
611 if (vie_canonical_check(paging->cpu_mode, *gla)) {
612 if (seg == VM_REG_GUEST_SS)
613 vm_inject_ss(vm, vcpuid, 0);
614 else
615 vm_inject_gp(vm, vcpuid);
616 return (1);
617 }
618
619 if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla)) {
620 vm_inject_ac(vm, vcpuid, 0);
621 return (1);
622 }
623
624 return (0);
625}
626
627static int
628emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
629 struct vm_guest_paging *paging, mem_region_read_t memread,
630 mem_region_write_t memwrite, void *arg)
631{
632#ifdef _KERNEL
633 struct vm_copyinfo copyinfo[2];
634#else
635 struct iovec copyinfo[2];
636#endif
637 uint64_t dstaddr, srcaddr, val;
638 uint64_t rcx, rdi, rsi, rflags;
639 int error, opsize, seg, repeat;
640
641 opsize = (vie->op.op_byte == 0xA4) ? 1 : vie->opsize;
642 val = 0;
643 error = 0;
644
645 /*
646 * XXX although the MOVS instruction is only supposed to be used with
647 * the "rep" prefix some guests like FreeBSD will use "repnz" instead.
648 *
649 * Empirically the "repnz" prefix has identical behavior to "rep"
650 * and the zero flag does not make a difference.
651 */
652 repeat = vie->repz_present | vie->repnz_present;
653
654 if (repeat) {
655 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
656 KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
657
658 /*
659 * The count register is %rcx, %ecx or %cx depending on the
660 * address size of the instruction.
661 */
662 if ((rcx & vie_size2mask(vie->addrsize)) == 0)
663 return (0);
664 }
665
666 /*
667 * Source Destination Comments
668 * --------------------------------------------
669 * (1) memory memory n/a
670 * (2) memory mmio emulated
671 * (3) mmio memory emulated
672 * (4) mmio mmio not emulated
673 *
674 * At this point we don't have sufficient information to distinguish
675 * between (2), (3) and (4). We use 'vm_copy_setup()' to tease this
676 * out because it will succeed only when operating on regular memory.
677 *
678 * XXX the emulation doesn't properly handle the case where 'gpa'
679 * is straddling the boundary between the normal memory and MMIO.
680 */
681
682 seg = vie->segment_override ? vie->segment_register : VM_REG_GUEST_DS;
683 error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
684 PROT_READ, seg, VM_REG_GUEST_RSI, &srcaddr);
685 if (error)
686 goto done;
687
688 error = vm_copy_setup(vm, vcpuid, paging, srcaddr, opsize, PROT_READ,
689 copyinfo, nitems(copyinfo));
690 if (error == 0) {
691 /*
692 * case (2): read from system memory and write to mmio.
693 */
694 vm_copyin(vm, vcpuid, copyinfo, &val, opsize);
695 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
696 error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
697 goto done;
698 } else if (error > 0) {
699 /*
700 * Resume guest execution to handle fault.
701 */
702 goto done;
703 } else {
704 /*
705 * 'vm_copy_setup()' is expected to fail for cases (3) and (4)
706 * if 'srcaddr' is in the mmio space.
707 */
708 }
709
710 error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
711 PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr);
712 if (error)
713 goto done;
714
715 error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
716 PROT_WRITE, copyinfo, nitems(copyinfo));
717 if (error == 0) {
718 /*
719 * case (3): read from MMIO and write to system memory.
720 *
721 * A MMIO read can have side-effects so we commit to it
722 * only after vm_copy_setup() is successful. If a page-fault
723 * needs to be injected into the guest then it will happen
724 * before the MMIO read is attempted.
725 */
726 error = memread(vm, vcpuid, gpa, &val, opsize, arg);
727 if (error)
728 goto done;
729
730 vm_copyout(vm, vcpuid, &val, copyinfo, opsize);
731 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
732 } else if (error > 0) {
733 /*
734 * Resume guest execution to handle fault.
735 */
736 goto done;
737 } else {
738 goto done;
739 }
740
741 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSI, &rsi);
742 KASSERT(error == 0, ("%s: error %d getting rsi", __func__, error));
743
744 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
745 KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
746
747 error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
748 KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
749
750 if (rflags & PSL_D) {
751 rsi -= opsize;
752 rdi -= opsize;
753 } else {
754 rsi += opsize;
755 rdi += opsize;
756 }
757
758 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSI, rsi,
759 vie->addrsize);
760 KASSERT(error == 0, ("%s: error %d updating rsi", __func__, error));
761
762 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
763 vie->addrsize);
764 KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
765
766 if (repeat) {
767 rcx = rcx - 1;
768 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
769 rcx, vie->addrsize);
770 KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
771
772 /*
773 * Repeat the instruction if the count register is not zero.
774 */
775 if ((rcx & vie_size2mask(vie->addrsize)) != 0)
776 vm_restart_instruction(vm, vcpuid);
777 }
778done:
779 if (error < 0)
780 return (EFAULT);
781 else
782 return (0);
783}
784
785static int
563emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
564 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
565{
566 int error, size;
567 enum vm_reg_name reg;
568 uint64_t result, rflags, rflags2, val1, val2;
569
570 size = vie->opsize;

--- 350 unchanged lines hidden (view full) ---

921 error = memread(vm, vcpuid, mmio_gpa, &val, size, arg);
922 if (error == 0)
923 vm_copyout(vm, vcpuid, &val, copyinfo, size);
924 } else {
925 vm_copyin(vm, vcpuid, copyinfo, &val, size);
926 error = memwrite(vm, vcpuid, mmio_gpa, val, size, arg);
927 rsp += size;
928 }
786emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
787 mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
788{
789 int error, size;
790 enum vm_reg_name reg;
791 uint64_t result, rflags, rflags2, val1, val2;
792
793 size = vie->opsize;

--- 350 unchanged lines hidden (view full) ---

1144 error = memread(vm, vcpuid, mmio_gpa, &val, size, arg);
1145 if (error == 0)
1146 vm_copyout(vm, vcpuid, &val, copyinfo, size);
1147 } else {
1148 vm_copyin(vm, vcpuid, copyinfo, &val, size);
1149 error = memwrite(vm, vcpuid, mmio_gpa, val, size, arg);
1150 rsp += size;
1151 }
929#ifdef _KERNEL
930 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
1152 vm_copy_teardown(vm, vcpuid, copyinfo, nitems(copyinfo));
931#endif
932
933 if (error == 0) {
934 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSP, rsp,
935 stackaddrsize);
936 KASSERT(error == 0, ("error %d updating rsp", error));
937 }
938 return (error);
939}

--- 67 unchanged lines hidden (view full) ---

1007 error = emulate_mov(vm, vcpuid, gpa, vie,
1008 memread, memwrite, memarg);
1009 break;
1010 case VIE_OP_TYPE_MOVSX:
1011 case VIE_OP_TYPE_MOVZX:
1012 error = emulate_movx(vm, vcpuid, gpa, vie,
1013 memread, memwrite, memarg);
1014 break;
1153
1154 if (error == 0) {
1155 error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSP, rsp,
1156 stackaddrsize);
1157 KASSERT(error == 0, ("error %d updating rsp", error));
1158 }
1159 return (error);
1160}

--- 67 unchanged lines hidden (view full) ---

1228 error = emulate_mov(vm, vcpuid, gpa, vie,
1229 memread, memwrite, memarg);
1230 break;
1231 case VIE_OP_TYPE_MOVSX:
1232 case VIE_OP_TYPE_MOVZX:
1233 error = emulate_movx(vm, vcpuid, gpa, vie,
1234 memread, memwrite, memarg);
1235 break;
1236 case VIE_OP_TYPE_MOVS:
1237 error = emulate_movs(vm, vcpuid, gpa, vie, paging, memread,
1238 memwrite, memarg);
1239 break;
1015 case VIE_OP_TYPE_AND:
1016 error = emulate_and(vm, vcpuid, gpa, vie,
1017 memread, memwrite, memarg);
1018 break;
1019 case VIE_OP_TYPE_OR:
1020 error = emulate_or(vm, vcpuid, gpa, vie,
1021 memread, memwrite, memarg);
1022 break;

--- 165 unchanged lines hidden (view full) ---

1188{
1189 KASSERT(inst_length >= 0 && inst_length <= VIE_INST_SIZE,
1190 ("%s: invalid instruction length (%d)", __func__, inst_length));
1191
1192 bzero(vie, sizeof(struct vie));
1193
1194 vie->base_register = VM_REG_LAST;
1195 vie->index_register = VM_REG_LAST;
1240 case VIE_OP_TYPE_AND:
1241 error = emulate_and(vm, vcpuid, gpa, vie,
1242 memread, memwrite, memarg);
1243 break;
1244 case VIE_OP_TYPE_OR:
1245 error = emulate_or(vm, vcpuid, gpa, vie,
1246 memread, memwrite, memarg);
1247 break;

--- 165 unchanged lines hidden (view full) ---

1413{
1414 KASSERT(inst_length >= 0 && inst_length <= VIE_INST_SIZE,
1415 ("%s: invalid instruction length (%d)", __func__, inst_length));
1416
1417 bzero(vie, sizeof(struct vie));
1418
1419 vie->base_register = VM_REG_LAST;
1420 vie->index_register = VM_REG_LAST;
1421 vie->segment_register = VM_REG_LAST;
1196
1197 if (inst_length) {
1198 bcopy(inst_bytes, vie->inst, inst_length);
1199 vie->num_valid = inst_length;
1200 }
1201}
1202
1203static int

--- 249 unchanged lines hidden (view full) ---

1453
1454static void
1455vie_advance(struct vie *vie)
1456{
1457
1458 vie->num_processed++;
1459}
1460
1422
1423 if (inst_length) {
1424 bcopy(inst_bytes, vie->inst, inst_length);
1425 vie->num_valid = inst_length;
1426 }
1427}
1428
1429static int

--- 249 unchanged lines hidden (view full) ---

1679
1680static void
1681vie_advance(struct vie *vie)
1682{
1683
1684 vie->num_processed++;
1685}
1686
1687static bool
1688segment_override(uint8_t x, int *seg)
1689{
1690
1691 switch (x) {
1692 case 0x2E:
1693 *seg = VM_REG_GUEST_CS;
1694 break;
1695 case 0x36:
1696 *seg = VM_REG_GUEST_SS;
1697 break;
1698 case 0x3E:
1699 *seg = VM_REG_GUEST_DS;
1700 break;
1701 case 0x26:
1702 *seg = VM_REG_GUEST_ES;
1703 break;
1704 case 0x64:
1705 *seg = VM_REG_GUEST_FS;
1706 break;
1707 case 0x65:
1708 *seg = VM_REG_GUEST_GS;
1709 break;
1710 default:
1711 return (false);
1712 }
1713 return (true);
1714}
1715
1461static int
1462decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
1463{
1464 uint8_t x;
1465
1466 while (1) {
1467 if (vie_peek(vie, &x))
1468 return (-1);
1469
1470 if (x == 0x66)
1471 vie->opsize_override = 1;
1472 else if (x == 0x67)
1473 vie->addrsize_override = 1;
1716static int
1717decode_prefixes(struct vie *vie, enum vm_cpu_mode cpu_mode, int cs_d)
1718{
1719 uint8_t x;
1720
1721 while (1) {
1722 if (vie_peek(vie, &x))
1723 return (-1);
1724
1725 if (x == 0x66)
1726 vie->opsize_override = 1;
1727 else if (x == 0x67)
1728 vie->addrsize_override = 1;
1729 else if (x == 0xF3)
1730 vie->repz_present = 1;
1731 else if (x == 0xF2)
1732 vie->repnz_present = 1;
1733 else if (segment_override(x, &vie->segment_register))
1734 vie->segment_override = 1;
1474 else
1475 break;
1476
1477 vie_advance(vie);
1478 }
1479
1480 /*
1481 * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2:

--- 436 unchanged lines hidden (view full) ---

1918 return (-1);
1919
1920 if (decode_moffset(vie))
1921 return (-1);
1922
1923 if (verify_inst_length(vie))
1924 return (-1);
1925
1735 else
1736 break;
1737
1738 vie_advance(vie);
1739 }
1740
1741 /*
1742 * From section 2.2.1, "REX Prefixes", Intel SDM Vol 2:

--- 436 unchanged lines hidden (view full) ---

2179 return (-1);
2180
2181 if (decode_moffset(vie))
2182 return (-1);
2183
2184 if (verify_inst_length(vie))
2185 return (-1);
2186
1926 if (verify_gla(vm, cpuid, gla, vie))
1927 return (-1);
2187 if ((vie->op.op_flags & VIE_OP_F_NO_GLA_VERIFICATION) == 0) {
2188 if (verify_gla(vm, cpuid, gla, vie))
2189 return (-1);
2190 }
1928
1929 vie->decoded = 1; /* success */
1930
1931 return (0);
1932}
1933#endif /* _KERNEL */
2191
2192 vie->decoded = 1; /* success */
2193
2194 return (0);
2195}
2196#endif /* _KERNEL */