Lines Matching refs:vcpu

133 static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val);
327 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
363 svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask)
369 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
374 svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask,
382 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
391 svm_set_dirty(sc, vcpu, VMCB_CACHE_I);
392 VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified "
398 svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
401 svm_set_intercept(sc, vcpu, off, bitmask, 0);
405 svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask)
408 svm_set_intercept(sc, vcpu, off, bitmask, 1);
412 vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa,
420 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
421 state = svm_get_vmcb_state(sc, vcpu);
437 svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
439 svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask);
447 if (vcpu_trace_exceptions(sc->vm, vcpu)) {
455 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n));
458 svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
462 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
463 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
464 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
465 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
466 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
467 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
468 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
469 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
470 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
472 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
473 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
475 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
476 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
482 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
483 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
484 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
485 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
486 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
487 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
493 svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
537 struct svm_vcpu *vcpu;
596 vcpu = svm_get_vcpu(svm_sc, i);
597 vcpu->nextrip = ~0;
598 vcpu->lastcpu = NOCPU;
599 vcpu->vmcb_pa = vtophys(&vcpu->vmcb);
698 svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1,
711 error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc);
752 svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
761 state = svm_get_vmcb_state(svm_sc, vcpu);
762 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
763 regs = svm_get_guest_regctx(svm_sc, vcpu);
789 svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging);
795 svm_inout_str_seginfo(svm_sc, vcpu, info1,
907 * Inject an event to vcpu as described in section 15.20, "Event injection".
910 svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector,
915 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
940 VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x",
943 VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d",
949 svm_update_virqinfo(struct svm_softc *sc, int vcpu)
956 vlapic = vm_lapic(vm, vcpu);
957 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
968 svm_save_intinfo(struct svm_softc *svm_sc, int vcpu)
973 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
984 VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n",
986 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1);
987 vm_exit_intinfo(svm_sc->vm, vcpu, intinfo);
992 vintr_intercept_enabled(struct svm_softc *sc, int vcpu)
995 return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
1001 enable_intr_window_exiting(struct svm_softc *sc, int vcpu)
1005 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1009 KASSERT(vintr_intercept_enabled(sc, vcpu),
1014 VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting");
1018 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1019 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1023 disable_intr_window_exiting(struct svm_softc *sc, int vcpu)
1027 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1030 KASSERT(!vintr_intercept_enabled(sc, vcpu),
1035 VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting");
1038 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1039 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1043 svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val)
1048 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1053 VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval);
1059 svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val)
1063 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1071 * to track when the vcpu is done handling the NMI.
1074 nmi_blocked(struct svm_softc *sc, int vcpu)
1078 blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
1084 enable_nmi_blocking(struct svm_softc *sc, int vcpu)
1087 KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked"));
1088 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled");
1089 svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1093 clear_nmi_blocking(struct svm_softc *sc, int vcpu)
1097 KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked"));
1098 VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared");
1100 * When the IRET intercept is cleared the vcpu will attempt to execute
1102 * another NMI into the vcpu before the "iret" has actually executed.
1106 * the vcpu it will be injected into the guest.
1110 svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1116 error = svm_modify_intr_shadow(sc, vcpu, 1);
1123 svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu)
1130 state = svm_get_vmcb_state(sc, vcpu);
1133 VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval);
1157 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE))
1166 vme = vm_exitinfo(sc->vm, vcpu);
1173 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR))
1178 if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE))
1182 error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval);
1186 vm_inject_gp(sc->vm, vcpu);
1191 emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val,
1197 error = lapic_wrmsr(sc->vm, vcpu, num, val, retu);
1199 error = svm_write_efer(sc, vcpu, val, retu);
1201 error = svm_wrmsr(sc, vcpu, num, val, retu);
1207 emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu)
1215 error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu);
1217 error = svm_rdmsr(sc, vcpu, num, &result, retu);
1220 state = svm_get_vmcb_state(sc, vcpu);
1221 ctx = svm_get_guest_regctx(sc, vcpu);
1302 svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit)
1313 ctx = svm_get_guest_regctx(svm_sc, vcpu);
1314 vmcb = svm_get_vmcb(svm_sc, vcpu);
1327 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1);
1346 svm_update_virqinfo(svm_sc, vcpu);
1347 svm_save_intinfo(svm_sc, vcpu);
1355 clear_nmi_blocking(svm_sc, vcpu);
1359 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1);
1363 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1);
1370 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1);
1380 VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler");
1384 error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2,
1414 VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d "
1430 VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception "
1432 error = vm_inject_exception(svm_sc->vm, vcpu, idtvec,
1446 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1);
1448 VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx",
1450 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
1461 VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx);
1462 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1);
1463 if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
1475 handled = svm_handle_io(svm_sc, vcpu, vmexit);
1476 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1);
1479 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1);
1480 handled = x86_emulate_cpuid(svm_sc->vm, vcpu,
1487 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1);
1493 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1);
1498 VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with "
1501 } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) {
1505 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1);
1506 VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault "
1511 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1);
1512 VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault "
1534 vm_inject_ud(svm_sc->vm, vcpu);
1538 vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1);
1542 VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d",
1568 svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu)
1572 if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo))
1578 svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
1582 vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1);
1583 VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo);
1590 svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic)
1599 state = svm_get_vmcb_state(sc, vcpu);
1600 ctrl = svm_get_vmcb_ctrl(sc, vcpu);
1601 vcpustate = svm_get_vcpu(sc, vcpu);
1607 VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking "
1613 * Inject pending events or exceptions for this vcpu.
1621 svm_inj_intinfo(sc, vcpu);
1624 if (vm_nmi_pending(sc->vm, vcpu)) {
1625 if (nmi_blocked(sc, vcpu)) {
1630 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due "
1634 * Can't inject an NMI if the vcpu is in an intr_shadow.
1636 VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to "
1645 VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to "
1660 vm_nmi_clear(sc->vm, vcpu);
1663 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI,
1667 enable_nmi_blocking(sc, vcpu);
1669 VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI");
1673 extint_pending = vm_extint_pending(sc->vm, vcpu);
1691 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
1698 VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to "
1705 VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to "
1711 svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
1716 vm_extint_clear(sc->vm, vcpu);
1721 * Force a VM-exit as soon as the vcpu is ready to accept another
1742 VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x",
1745 svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR);
1763 enable_intr_window_exiting(sc, vcpu);
1765 disable_intr_window_exiting(sc, vcpu);
1801 * The TLB entries associated with the vcpu's ASID are not valid
1804 * 1. The vcpu's ASID generation is different than the host cpu's
1805 * ASID generation. This happens when the vcpu migrates to a new
1823 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
1958 * Start vcpu with specified RIP.
1961 svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap,
1979 vcpustate = svm_get_vcpu(svm_sc, vcpu);
1980 state = svm_get_vmcb_state(svm_sc, vcpu);
1981 ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu);
1982 vmexit = vm_exitinfo(vm, vcpu);
1983 vlapic = vm_lapic(vm, vcpu);
1985 gctx = svm_get_guest_regctx(svm_sc, vcpu);
1986 vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa;
1997 svm_set_dirty(svm_sc, vcpu, 0xffffffff);
2006 * This works for now but any new side-effects of vcpu
2010 vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1);
2013 svm_msr_guest_enter(svm_sc, vcpu);
2030 vm_exit_suspended(vm, vcpu, state->rip);
2036 vm_exit_rendezvous(vm, vcpu, state->rip);
2042 vm_exit_reqidle(vm, vcpu, state->rip);
2047 if (vcpu_should_yield(vm, vcpu)) {
2049 vm_exit_astpending(vm, vcpu, state->rip);
2062 svm_inj_interrupts(svm_sc, vcpu, vlapic);
2069 * ensure that the vcpu does not use stale TLB mappings.
2071 check_asid(svm_sc, vcpu, pmap, curcpu);
2075 VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
2078 VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip);
2102 handled = svm_vmexit(svm_sc, vcpu, vmexit);
2105 svm_msr_guest_exit(svm_sc, vcpu);
2167 svm_getreg(void *arg, int vcpu, int ident, uint64_t *val)
2175 return (svm_get_intr_shadow(svm_sc, vcpu, val));
2178 if (vmcb_read(svm_sc, vcpu, ident, val) == 0) {
2182 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
2189 VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident);
2194 svm_setreg(void *arg, int vcpu, int ident, uint64_t val)
2202 return (svm_modify_intr_shadow(svm_sc, vcpu, val));
2207 if (vmcb_write(svm_sc, vcpu, ident, val) == 0) {
2212 reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident);
2221 * vcpu's ASID. This needs to be treated differently depending on
2225 VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident);
2230 svm_setcap(void *arg, int vcpu, int type, int val)
2239 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2243 svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2259 svm_getcap(void *arg, int vcpu, int type, int *retval)
2269 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,
2273 *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT,