Lines Matching refs:vcpu

288 svm_set_tsc_offset(struct svm_vcpu *vcpu, uint64_t offset)
292 ctrl = svm_get_vmcb_ctrl(vcpu);
295 svm_set_dirty(vcpu, VMCB_CACHE_I);
296 SVM_CTR1(vcpu, "tsc offset changed to %#lx", offset);
298 vm_set_tsc_offset(vcpu->vcpu, offset);
348 * Allow vcpu to read or write the 'msr' without trapping into the hypervisor.
384 svm_get_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask)
390 ctrl = svm_get_vmcb_ctrl(vcpu);
395 svm_set_intercept(struct svm_vcpu *vcpu, int idx, uint32_t bitmask, int enabled)
402 ctrl = svm_get_vmcb_ctrl(vcpu);
411 svm_set_dirty(vcpu, VMCB_CACHE_I);
412 SVM_CTR3(vcpu, "intercept[%d] modified from %#x to %#x", idx,
418 svm_disable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask)
421 svm_set_intercept(vcpu, off, bitmask, 0);
425 svm_enable_intercept(struct svm_vcpu *vcpu, int off, uint32_t bitmask)
428 svm_set_intercept(vcpu, off, bitmask, 1);
432 vmcb_init(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t iopm_base_pa,
440 ctrl = svm_get_vmcb_ctrl(vcpu);
441 state = svm_get_vmcb_state(vcpu);
457 svm_disable_intercept(vcpu, VMCB_CR_INTCPT, mask);
459 svm_enable_intercept(vcpu, VMCB_CR_INTCPT, mask);
466 if (vcpu_trace_exceptions(vcpu->vcpu)) {
474 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(n));
477 svm_enable_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC));
481 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO);
482 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR);
483 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID);
484 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR);
485 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT);
486 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI);
487 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI);
488 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN);
489 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE);
490 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVD);
491 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INVLPGA);
493 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR);
494 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT);
500 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMLOAD);
501 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMSAVE);
502 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_STGI);
503 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_CLGI);
504 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_SKINIT);
505 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_ICEBP);
506 if (vcpu_trap_wbinvd(vcpu->vcpu)) {
507 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT,
515 svm_enable_intercept(vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN);
611 svm_vcpu_init(void *vmi, struct vcpu *vcpu1, int vcpuid)
614 struct svm_vcpu *vcpu;
616 vcpu = malloc(sizeof(*vcpu), M_SVM, M_WAITOK | M_ZERO);
617 vcpu->sc = sc;
618 vcpu->vcpu = vcpu1;
619 vcpu->vcpuid = vcpuid;
620 vcpu->vmcb = malloc_aligned(sizeof(struct vmcb), PAGE_SIZE, M_SVM,
622 vcpu->nextrip = ~0;
623 vcpu->lastcpu = NOCPU;
624 vcpu->vmcb_pa = vtophys(vcpu->vmcb);
625 vmcb_init(sc, vcpu, vtophys(sc->iopm_bitmap), vtophys(sc->msr_bitmap),
627 svm_msr_guest_init(sc, vcpu);
628 return (vcpu);
723 svm_inout_str_seginfo(struct svm_vcpu *vcpu, int64_t info1, int in,
736 error = svm_getdesc(vcpu, vis->seg_name, &vis->seg_desc);
777 svm_handle_io(struct svm_vcpu *vcpu, struct vm_exit *vmexit)
786 state = svm_get_vmcb_state(vcpu);
787 ctrl = svm_get_vmcb_ctrl(vcpu);
788 regs = svm_get_guest_regctx(vcpu);
814 svm_paging_info(svm_get_vmcb(vcpu), &vis->paging);
820 svm_inout_str_seginfo(vcpu, info1, vmexit->u.inout.in, vis);
931 * Inject an event to vcpu as described in section 15.20, "Event injection".
934 svm_eventinject(struct svm_vcpu *vcpu, int intr_type, int vector,
939 ctrl = svm_get_vmcb_ctrl(vcpu);
964 SVM_CTR3(vcpu, "Injecting %s at vector %d errcode %#x",
967 SVM_CTR2(vcpu, "Injecting %s at vector %d",
973 svm_update_virqinfo(struct svm_vcpu *vcpu)
978 vlapic = vm_lapic(vcpu->vcpu);
979 ctrl = svm_get_vmcb_ctrl(vcpu);
990 svm_save_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
995 ctrl = svm_get_vmcb_ctrl(vcpu);
1006 SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo,
1008 vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1);
1009 vm_exit_intinfo(vcpu->vcpu, intinfo);
1014 vintr_intercept_enabled(struct svm_vcpu *vcpu)
1017 return (svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR));
1022 enable_intr_window_exiting(struct svm_vcpu *vcpu)
1026 ctrl = svm_get_vmcb_ctrl(vcpu);
1030 KASSERT(vintr_intercept_enabled(vcpu),
1035 SVM_CTR0(vcpu, "Enable intr window exiting");
1039 svm_set_dirty(vcpu, VMCB_CACHE_TPR);
1040 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1044 disable_intr_window_exiting(struct svm_vcpu *vcpu)
1048 ctrl = svm_get_vmcb_ctrl(vcpu);
1051 KASSERT(!vintr_intercept_enabled(vcpu),
1056 SVM_CTR0(vcpu, "Disable intr window exiting");
1059 svm_set_dirty(vcpu, VMCB_CACHE_TPR);
1060 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR);
1064 svm_modify_intr_shadow(struct svm_vcpu *vcpu, uint64_t val)
1069 ctrl = svm_get_vmcb_ctrl(vcpu);
1074 SVM_CTR1(vcpu, "Setting intr_shadow to %d", newval);
1080 svm_get_intr_shadow(struct svm_vcpu *vcpu, uint64_t *val)
1084 ctrl = svm_get_vmcb_ctrl(vcpu);
1092 * to track when the vcpu is done handling the NMI.
1095 nmi_blocked(struct svm_vcpu *vcpu)
1099 blocked = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1104 enable_nmi_blocking(struct svm_vcpu *vcpu)
1107 KASSERT(!nmi_blocked(vcpu), ("vNMI already blocked"));
1108 SVM_CTR0(vcpu, "vNMI blocking enabled");
1109 svm_enable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1113 clear_nmi_blocking(struct svm_vcpu *vcpu)
1117 KASSERT(nmi_blocked(vcpu), ("vNMI already unblocked"));
1118 SVM_CTR0(vcpu, "vNMI blocking cleared");
1120 * When the IRET intercept is cleared the vcpu will attempt to execute
1122 * another NMI into the vcpu before the "iret" has actually executed.
1126 * the vcpu it will be injected into the guest.
1130 svm_disable_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET);
1136 error = svm_modify_intr_shadow(vcpu, 1);
1143 svm_write_efer(struct svm_softc *sc, struct svm_vcpu *vcpu, uint64_t newval,
1151 state = svm_get_vmcb_state(vcpu);
1154 SVM_CTR2(vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval);
1178 if (!vm_cpuid_capability(vcpu->vcpu, VCC_NO_EXECUTE))
1187 vme = vm_exitinfo(vcpu->vcpu);
1194 if (!vm_cpuid_capability(vcpu->vcpu, VCC_FFXSR))
1199 if (!vm_cpuid_capability(vcpu->vcpu, VCC_TCE))
1203 error = svm_setreg(vcpu, VM_REG_GUEST_EFER, newval);
1207 vm_inject_gp(vcpu->vcpu);
1212 emulate_wrmsr(struct svm_softc *sc, struct svm_vcpu *vcpu, u_int num,
1218 error = lapic_wrmsr(vcpu->vcpu, num, val, retu);
1220 error = svm_write_efer(sc, vcpu, val, retu);
1222 error = svm_wrmsr(vcpu, num, val, retu);
1228 emulate_rdmsr(struct svm_vcpu *vcpu, u_int num, bool *retu)
1236 error = lapic_rdmsr(vcpu->vcpu, num, &result, retu);
1238 error = svm_rdmsr(vcpu, num, &result, retu);
1241 state = svm_get_vmcb_state(vcpu);
1242 ctx = svm_get_guest_regctx(vcpu);
1325 svm_vmexit(struct svm_softc *svm_sc, struct svm_vcpu *vcpu,
1337 ctx = svm_get_guest_regctx(vcpu);
1338 vmcb = svm_get_vmcb(vcpu);
1351 vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1);
1370 svm_update_virqinfo(vcpu);
1371 svm_save_intinfo(svm_sc, vcpu);
1379 clear_nmi_blocking(vcpu);
1383 vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1);
1387 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1);
1394 vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1);
1404 SVM_CTR0(vcpu, "Vectoring to MCE handler");
1408 error = svm_setreg(vcpu, VM_REG_GUEST_CR2, info2);
1432 svm_getreg(vcpu, VM_REG_GUEST_DR6, &dr6);
1434 if (stepped && (vcpu->caps & (1 << VM_CAP_RFLAGS_TF))) {
1439 if (vcpu->dbg.popf_sstep) {
1446 vcpu->dbg.popf_sstep = 0;
1453 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS,
1455 vcpu->dbg.rflags_tf = rflags & PSL_T;
1456 } else if (vcpu->dbg.pushf_sstep) {
1461 vcpu->dbg.pushf_sstep = 0;
1471 vcpu->dbg.rflags_tf;
1472 svm_paging_info(svm_get_vmcb(vcpu),
1478 error = svm_setreg(vcpu, VM_REG_GUEST_DR6, dr6);
1505 SVM_CTR2(vcpu, "Reset inst_length from %d "
1522 SVM_CTR2(vcpu, "Reflecting exception "
1524 error = vm_inject_exception(vcpu->vcpu, idtvec,
1538 vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1);
1540 SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val);
1541 if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
1552 SVM_CTR1(vcpu, "rdmsr %#x", ecx);
1553 vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1);
1554 if (emulate_rdmsr(vcpu, ecx, &retu)) {
1566 handled = svm_handle_io(vcpu, vmexit);
1567 vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1);
1570 vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
1571 handled = x86_emulate_cpuid(vcpu->vcpu,
1576 vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1);
1582 vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1);
1587 SVM_CTR2(vcpu, "nested page fault with "
1590 } else if (vm_mem_allocated(vcpu->vcpu, info2)) {
1594 vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1);
1595 SVM_CTR3(vcpu, "nested page fault "
1600 vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1);
1601 SVM_CTR3(vcpu, "inst_emul fault "
1613 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
1616 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
1620 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
1623 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T));
1626 vcpu->dbg.pushf_sstep = 1;
1632 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
1635 svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
1639 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
1642 svm_setreg(vcpu, VM_REG_GUEST_RFLAGS, (rflags | PSL_T));
1643 vcpu->dbg.popf_sstep = 1;
1658 vm_inject_ud(vcpu->vcpu);
1667 vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1);
1671 SVM_CTR4(vcpu, "%s %s vmexit at %#lx/%d",
1697 svm_inj_intinfo(struct svm_softc *svm_sc, struct svm_vcpu *vcpu)
1701 if (!vm_entry_intinfo(vcpu->vcpu, &intinfo))
1707 svm_eventinject(vcpu, VMCB_EXITINTINFO_TYPE(intinfo),
1711 vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1);
1712 SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo);
1719 svm_inj_interrupts(struct svm_softc *sc, struct svm_vcpu *vcpu,
1728 if (vcpu->caps & (1 << VM_CAP_MASK_HWINTR)) {
1732 state = svm_get_vmcb_state(vcpu);
1733 ctrl = svm_get_vmcb_ctrl(vcpu);
1737 if (vcpu->nextrip != state->rip) {
1739 SVM_CTR2(vcpu, "Guest interrupt blocking "
1741 vcpu->nextrip, state->rip);
1745 * Inject pending events or exceptions for this vcpu.
1753 svm_inj_intinfo(sc, vcpu);
1756 if (vm_nmi_pending(vcpu->vcpu)) {
1757 if (nmi_blocked(vcpu)) {
1762 SVM_CTR0(vcpu, "Cannot inject NMI due "
1766 * Can't inject an NMI if the vcpu is in an intr_shadow.
1768 SVM_CTR0(vcpu, "Cannot inject NMI due to "
1777 SVM_CTR1(vcpu, "Cannot inject NMI due to "
1792 vm_nmi_clear(vcpu->vcpu);
1795 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_NMI,
1799 enable_nmi_blocking(vcpu);
1801 SVM_CTR0(vcpu, "Injecting vNMI");
1805 extint_pending = vm_extint_pending(vcpu->vcpu);
1823 SVM_CTR2(vcpu, "Cannot inject vector %d due to "
1830 SVM_CTR1(vcpu, "Cannot inject vector %d due to "
1837 SVM_CTR2(vcpu, "Cannot inject vector %d due to "
1843 svm_eventinject(vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false);
1848 vm_extint_clear(vcpu->vcpu);
1853 * Force a VM-exit as soon as the vcpu is ready to accept another
1874 SVM_CTR2(vcpu, "VMCB V_TPR changed from %#x to %#x",
1877 svm_set_dirty(vcpu, VMCB_CACHE_TPR);
1895 enable_intr_window_exiting(vcpu);
1897 disable_intr_window_exiting(vcpu);
1919 svm_pmap_activate(struct svm_vcpu *vcpu, pmap_t pmap)
1930 ctrl = svm_get_vmcb_ctrl(vcpu);
1933 * The TLB entries associated with the vcpu's ASID are not valid
1936 * 1. The vcpu's ASID generation is different than the host cpu's
1937 * ASID generation. This happens when the vcpu migrates to a new
1955 * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is
1971 if (vcpu->asid.gen != asid[cpu].gen) {
1973 } else if (vcpu->eptgen != eptgen) {
2001 vcpu->asid.gen = asid[cpu].gen;
2002 vcpu->asid.num = asid[cpu].num;
2004 ctrl->asid = vcpu->asid.num;
2005 svm_set_dirty(vcpu, VMCB_CACHE_ASID);
2014 vcpu->eptgen = eptgen;
2017 KASSERT(ctrl->asid == vcpu->asid.num,
2018 ("ASID mismatch: %u/%u", ctrl->asid, vcpu->asid.num));
2097 * Start vcpu with specified RIP.
2104 struct svm_vcpu *vcpu;
2113 vcpu = vcpui;
2114 svm_sc = vcpu->sc;
2115 state = svm_get_vmcb_state(vcpu);
2116 ctrl = svm_get_vmcb_ctrl(vcpu);
2117 vmexit = vm_exitinfo(vcpu->vcpu);
2118 vlapic = vm_lapic(vcpu->vcpu);
2120 gctx = svm_get_guest_regctx(vcpu);
2121 vmcb_pa = vcpu->vmcb_pa;
2123 if (vcpu->lastcpu != curcpu) {
2127 vcpu->asid.gen = 0;
2132 svm_set_dirty(vcpu, 0xffffffff);
2136 * Setting 'vcpu->lastcpu' here is bit premature because
2141 * This works for now but any new side-effects of vcpu
2144 vcpu->lastcpu = curcpu;
2145 vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1);
2148 svm_msr_guest_enter(vcpu);
2165 vm_exit_suspended(vcpu->vcpu, state->rip);
2169 if (vcpu_rendezvous_pending(vcpu->vcpu, evinfo)) {
2171 vm_exit_rendezvous(vcpu->vcpu, state->rip);
2177 vm_exit_reqidle(vcpu->vcpu, state->rip);
2182 if (vcpu_should_yield(vcpu->vcpu)) {
2184 vm_exit_astpending(vcpu->vcpu, state->rip);
2188 if (vcpu_debugged(vcpu->vcpu)) {
2190 vm_exit_debug(vcpu->vcpu, state->rip);
2203 svm_inj_interrupts(svm_sc, vcpu, vlapic);
2207 * ensure that the vcpu does not use stale TLB mappings.
2209 svm_pmap_activate(vcpu, pmap);
2211 ctrl->vmcb_clean = vmcb_clean & ~vcpu->dirty;
2212 vcpu->dirty = 0;
2213 SVM_CTR1(vcpu, "vmcb clean %#x", ctrl->vmcb_clean);
2216 SVM_CTR1(vcpu, "Resume execution at %#lx", state->rip);
2237 vcpu->nextrip = state->rip;
2240 handled = svm_vmexit(svm_sc, vcpu, vmexit);
2243 svm_msr_guest_exit(vcpu);
2251 struct svm_vcpu *vcpu = vcpui;
2253 free(vcpu->vmcb, M_SVM);
2254 free(vcpu, M_SVM);
2316 struct svm_vcpu *vcpu;
2319 vcpu = vcpui;
2322 return (svm_get_intr_shadow(vcpu, val));
2325 if (vmcb_read(vcpu, ident, val) == 0) {
2329 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident);
2336 SVM_CTR1(vcpu, "svm_getreg: unknown register %#x", ident);
2343 struct svm_vcpu *vcpu;
2346 vcpu = vcpui;
2349 return (svm_modify_intr_shadow(vcpu, val));
2354 if (vmcb_write(vcpu, ident, val) == 0) {
2359 reg = swctx_regptr(svm_get_guest_regctx(vcpu), ident);
2373 * vcpu's ASID. This needs to be treated differently depending on
2377 SVM_CTR1(vcpu, "svm_setreg: unknown register %#x", ident);
2425 struct svm_vcpu *vcpu;
2429 vcpu = vcpui;
2434 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
2438 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT,
2447 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP), val);
2450 vlapic = vm_lapic(vcpu->vcpu);
2454 vcpu->caps &= ~(1 << VM_CAP_MASK_HWINTR);
2455 vcpu->caps |= (val << VM_CAP_MASK_HWINTR);
2461 if (svm_getreg(vcpu, VM_REG_GUEST_RFLAGS, &rflags)) {
2467 vcpu->dbg.rflags_tf = rflags & PSL_T;
2469 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS,
2474 vcpu->caps |= (1 << VM_CAP_RFLAGS_TF);
2480 if (vcpu->caps & (1 << VM_CAP_RFLAGS_TF)) {
2482 rflags |= vcpu->dbg.rflags_tf;
2483 vcpu->dbg.rflags_tf = 0;
2485 if (svm_setreg(vcpu, VM_REG_GUEST_RFLAGS,
2490 vcpu->caps &= ~(1 << VM_CAP_RFLAGS_TF);
2494 svm_set_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_DB), val);
2495 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_POPF,
2497 svm_set_intercept(vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PUSHF,
2511 struct svm_vcpu *vcpu;
2515 vcpu = vcpui;
2520 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT,
2524 *retval = svm_get_intercept(vcpu, VMCB_CTRL1_INTCPT,
2531 *retval = svm_get_intercept(vcpu, VMCB_EXC_INTCPT, BIT(IDT_BP));
2534 vlapic = vm_lapic(vcpu->vcpu);
2538 *retval = !!(vcpu->caps & (1 << VM_CAP_RFLAGS_TF));
2541 *retval = !!(vcpu->caps & (1 << VM_CAP_MASK_HWINTR));
2565 struct svm_vcpu *vcpu;
2568 vcpu = vcpui;
2570 vlapic->vm = vcpu->sc->vm;
2571 vlapic->vcpu = vcpu->vcpu;
2572 vlapic->vcpuid = vcpu->vcpuid;
2594 struct svm_vcpu *vcpu;
2597 vcpu = vcpui;
2600 running = vcpu_is_running(vcpu->vcpu, &hostcpu);
2602 printf("%s: %s%d is running", __func__, vm_name(vcpu->sc->vm),
2603 vcpu->vcpuid);
2607 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR0, meta);
2608 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR2, meta);
2609 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR3, meta);
2610 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CR4, meta);
2612 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR6, meta);
2613 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DR7, meta);
2615 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RAX, meta);
2617 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RSP, meta);
2618 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RIP, meta);
2619 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_RFLAGS, meta);
2623 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_ES, meta);
2624 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_ES, meta);
2627 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_CS, meta);
2628 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_CS, meta);
2631 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_SS, meta);
2632 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_SS, meta);
2635 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_DS, meta);
2636 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_DS, meta);
2639 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_FS, meta);
2640 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_FS, meta);
2643 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_GS, meta);
2644 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GS, meta);
2647 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_TR, meta);
2648 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_TR, meta);
2651 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_LDTR, meta);
2652 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_LDTR, meta);
2655 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_EFER, meta);
2658 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_IDTR, meta);
2659 err += vmcb_snapshot_desc(vcpu, VM_REG_GUEST_GDTR, meta);
2662 err += svm_snapshot_reg(vcpu, VM_REG_GUEST_INTR_SHADOW, meta);
2664 err += vmcb_snapshot_any(vcpu,
2666 err += vmcb_snapshot_any(vcpu,
2668 err += vmcb_snapshot_any(vcpu,
2670 err += vmcb_snapshot_any(vcpu,
2672 err += vmcb_snapshot_any(vcpu,
2675 err += vmcb_snapshot_any(vcpu,
2677 err += vmcb_snapshot_any(vcpu,
2680 err += vmcb_snapshot_any(vcpu,
2683 err += vmcb_snapshot_any(vcpu,
2686 err += vmcb_snapshot_any(vcpu,
2689 err += vmcb_snapshot_any(vcpu,
2691 err += vmcb_snapshot_any(vcpu,
2693 err += vmcb_snapshot_any(vcpu,
2695 err += vmcb_snapshot_any(vcpu,
2698 err += vmcb_snapshot_any(vcpu,
2701 err += vmcb_snapshot_any(vcpu,
2703 err += vmcb_snapshot_any(vcpu,
2705 err += vmcb_snapshot_any(vcpu,
2707 err += vmcb_snapshot_any(vcpu,
2710 err += vmcb_snapshot_any(vcpu,
2713 err += vmcb_snapshot_any(vcpu,
2715 err += vmcb_snapshot_any(vcpu,
2717 err += vmcb_snapshot_any(vcpu,
2720 err += vmcb_snapshot_any(vcpu,
2723 err += vmcb_snapshot_any(vcpu,
2726 err += vmcb_snapshot_any(vcpu,
2728 err += vmcb_snapshot_any(vcpu,
2730 err += vmcb_snapshot_any(vcpu,
2733 err += vmcb_snapshot_any(vcpu,
2736 err += vmcb_snapshot_any(vcpu,
2738 err += vmcb_snapshot_any(vcpu,
2740 err += vmcb_snapshot_any(vcpu,
2742 err += vmcb_snapshot_any(vcpu,
2744 err += vmcb_snapshot_any(vcpu,
2750 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbp, meta, err, done);
2751 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rbx, meta, err, done);
2752 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rcx, meta, err, done);
2753 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdx, meta, err, done);
2754 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rdi, meta, err, done);
2755 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_rsi, meta, err, done);
2756 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r8, meta, err, done);
2757 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r9, meta, err, done);
2758 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r10, meta, err, done);
2759 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r11, meta, err, done);
2760 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r12, meta, err, done);
2761 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r13, meta, err, done);
2762 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r14, meta, err, done);
2763 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_r15, meta, err, done);
2764 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr0, meta, err, done);
2765 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr1, meta, err, done);
2766 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr2, meta, err, done);
2767 SNAPSHOT_VAR_OR_LEAVE(vcpu->swctx.sctx_dr3, meta, err, done);
2772 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, err, done);
2775 SNAPSHOT_VAR_OR_LEAVE(vcpu->lastcpu, meta, err, done);
2776 SNAPSHOT_VAR_OR_LEAVE(vcpu->dirty, meta, err, done);
2779 SNAPSHOT_VAR_OR_LEAVE(vcpu->eptgen, meta, err, done);
2781 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.gen, meta, err, done);
2782 SNAPSHOT_VAR_OR_LEAVE(vcpu->asid.num, meta, err, done);
2784 SNAPSHOT_BUF_OR_LEAVE(&vcpu->mtrr, sizeof(vcpu->mtrr), meta, err, done);
2788 svm_set_dirty(vcpu, 0xffffffff);
2797 struct svm_vcpu *vcpu = vcpui;
2799 svm_set_tsc_offset(vcpu, offset);