Lines Matching refs:vcpu

96  * (a) allocated when vcpu is created
97 * (i) initialized when vcpu is created and when it is reinitialized
98 * (o) initialized the first time the vcpu is created
101 struct vcpu {
103 enum vcpu_state state; /* (o) vcpu state */
105 int hostcpu; /* (o) vcpu's host cpu */
106 int reqidle; /* (i) request vcpu to idle */
127 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
159 * [v] reads require one frozen vcpu, writes require freezing all vcpus
186 struct vcpu **vcpu; /* (o) guest vcpus */
196 #define VMM_CTR0(vcpu, format) \
197 VCPU_CTR0((vcpu)->vm, (vcpu)->vcpuid, format)
199 #define VMM_CTR1(vcpu, format, p1) \
200 VCPU_CTR1((vcpu)->vm, (vcpu)->vcpuid, format, p1)
202 #define VMM_CTR2(vcpu, format, p1, p2) \
203 VCPU_CTR2((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2)
205 #define VMM_CTR3(vcpu, format, p1, p2, p3) \
206 VCPU_CTR3((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3)
208 #define VMM_CTR4(vcpu, format, p1, p2, p3, p4) \
209 VCPU_CTR4((vcpu)->vm, (vcpu)->vcpuid, format, p1, p2, p3, p4)
239 DEFINE_VMMOPS_IFUNC(void *, vcpu_init, (void *vmi, struct vcpu *vcpu,
264 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
280 "IPI vector used for vcpu notifications");
297 static void vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr);
328 vcpu_cleanup(struct vcpu *vcpu, bool destroy)
330 vmmops_vlapic_cleanup(vcpu->vlapic);
331 vmmops_vcpu_cleanup(vcpu->cookie);
332 vcpu->cookie = NULL;
334 vmm_stat_free(vcpu->stats);
335 fpu_save_area_free(vcpu->guestfpu);
336 vcpu_lock_destroy(vcpu);
337 free(vcpu, M_VM);
341 static struct vcpu *
344 struct vcpu *vcpu;
347 ("vcpu_init: invalid vcpu %d", vcpu_id));
349 vcpu = malloc(sizeof(*vcpu), M_VM, M_WAITOK | M_ZERO);
350 vcpu_lock_init(vcpu);
351 vcpu->state = VCPU_IDLE;
352 vcpu->hostcpu = NOCPU;
353 vcpu->vcpuid = vcpu_id;
354 vcpu->vm = vm;
355 vcpu->guestfpu = fpu_save_area_alloc();
356 vcpu->stats = vmm_stat_alloc();
357 vcpu->tsc_offset = 0;
358 return (vcpu);
362 vcpu_init(struct vcpu *vcpu)
364 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid);
365 vcpu->vlapic = vmmops_vlapic_init(vcpu->cookie);
366 vm_set_x2apic_state(vcpu, X2APIC_DISABLED);
367 vcpu->reqidle = 0;
368 vcpu->exitintinfo = 0;
369 vcpu->nmi_pending = 0;
370 vcpu->extint_pending = 0;
371 vcpu->exception_pending = 0;
372 vcpu->guest_xcr0 = XFEATURE_ENABLED_X87;
373 fpu_save_area_reset(vcpu->guestfpu);
374 vmm_stat_init(vcpu->stats);
378 vcpu_trace_exceptions(struct vcpu *vcpu)
385 vcpu_trap_wbinvd(struct vcpu *vcpu)
391 vm_exitinfo(struct vcpu *vcpu)
393 return (&vcpu->exitinfo);
397 vm_exitinfo_cpuset(struct vcpu *vcpu)
399 return (&vcpu->exitinfo_cpuset);
516 if (vm->vcpu[i] != NULL)
517 vcpu_init(vm->vcpu[i]);
530 struct vcpu *
533 struct vcpu *vcpu;
538 vcpu = atomic_load_ptr(&vm->vcpu[vcpuid]);
539 if (__predict_true(vcpu != NULL))
540 return (vcpu);
543 vcpu = vm->vcpu[vcpuid];
544 if (vcpu == NULL && !vm->dying) {
545 vcpu = vcpu_alloc(vm, vcpuid);
546 vcpu_init(vcpu);
552 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid],
553 (uintptr_t)vcpu);
556 return (vcpu);
604 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm_maxcpu, M_VM, M_WAITOK |
672 if (vm->vcpu[i] != NULL)
673 vcpu_cleanup(vm->vcpu[i], destroy);
700 free(vm->vcpu, M_VM);
779 * This function is called in the context of a running vcpu which acts as
783 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa)
785 struct vm *vm = vcpu->vm;
791 state = vcpu_get_state(vcpu, &hostcpu);
793 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
1191 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
1196 * The current vcpu should be frozen to ensure 'vm_memmap[]'
1199 int state = vcpu_get_state(vcpu, NULL);
1200 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
1203 return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie));
1223 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
1229 return (vmmops_getreg(vcpu->cookie, reg, retval));
1233 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
1240 error = vmmops_setreg(vcpu->cookie, reg, val);
1245 VMM_CTR1(vcpu, "Setting nextrip to %#lx", val);
1246 vcpu->nextrip = val;
1283 vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
1289 return (vmmops_getdesc(vcpu->cookie, reg, desc));
1293 vm_set_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
1299 return (vmmops_setdesc(vcpu->cookie, reg, desc));
1303 restore_guest_fpustate(struct vcpu *vcpu)
1311 fpurestore(vcpu->guestfpu);
1315 load_xcr(0, vcpu->guest_xcr0);
1325 save_guest_fpustate(struct vcpu *vcpu)
1333 vcpu->guest_xcr0 = rxcr(0);
1339 fpusave(vcpu->guestfpu);
1343 static VMM_STAT(VCPU_IDLE_TICKS, "number of ticks vcpu was idle");
1346 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
1351 vcpu_assert_locked(vcpu);
1356 * ioctl() operating on a vcpu at any point.
1359 while (vcpu->state != VCPU_IDLE) {
1360 vcpu->reqidle = 1;
1361 vcpu_notify_event_locked(vcpu, false);
1362 VMM_CTR1(vcpu, "vcpu state change from %s to "
1363 "idle requested", vcpu_state2str(vcpu->state));
1364 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
1367 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
1368 "vcpu idle state"));
1371 if (vcpu->state == VCPU_RUNNING) {
1372 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1373 "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1375 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1376 "vcpu that is not running", vcpu->hostcpu));
1385 switch (vcpu->state) {
1402 VMM_CTR2(vcpu, "vcpu state changed from %s to %s",
1403 vcpu_state2str(vcpu->state), vcpu_state2str(newstate));
1405 vcpu->state = newstate;
1407 vcpu->hostcpu = curcpu;
1409 vcpu->hostcpu = NOCPU;
1412 wakeup(&vcpu->state);
1418 vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate)
1422 if ((error = vcpu_set_state(vcpu, newstate, false)) != 0)
1427 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
1431 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0)
1436 vm_handle_rendezvous(struct vcpu *vcpu)
1438 struct vm *vm = vcpu->vm;
1443 vcpuid = vcpu->vcpuid;
1452 VMM_CTR0(vcpu, "Calling rendezvous func");
1453 (*vm->rendezvous_func)(vcpu, vm->rendezvous_arg);
1458 VMM_CTR0(vcpu, "Rendezvous completed");
1464 VMM_CTR0(vcpu, "Wait for rendezvous completion");
1480 * Emulate a guest 'hlt' by sleeping until the vcpu is ready to run.
1483 vm_handle_hlt(struct vcpu *vcpu, bool intr_disabled, bool *retu)
1485 struct vm *vm = vcpu->vm;
1490 vcpuid = vcpu->vcpuid;
1496 KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted"));
1498 vcpu_lock(vcpu);
1503 * software events that would cause this vcpu to wakeup.
1506 * vcpu returned from vmmops_run() and before it acquired the
1507 * vcpu lock above.
1509 if (vm->rendezvous_func != NULL || vm->suspend || vcpu->reqidle)
1511 if (vm_nmi_pending(vcpu))
1514 if (vm_extint_pending(vcpu) ||
1515 vlapic_pending_intr(vcpu->vlapic, NULL)) {
1520 /* Don't go to sleep if the vcpu thread needs to yield */
1521 if (vcpu_should_yield(vcpu))
1524 if (vcpu_debugged(vcpu))
1535 VMM_CTR0(vcpu, "Halted");
1549 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1554 msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
1555 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1556 vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t);
1558 vcpu_unlock(vcpu);
1567 vcpu_lock(vcpu);
1574 vcpu_unlock(vcpu);
1583 vm_handle_paging(struct vcpu *vcpu, bool *retu)
1585 struct vm *vm = vcpu->vm;
1590 vme = &vcpu->exitinfo;
1604 VMM_CTR2(vcpu, "%s bit emulation for gpa %#lx",
1614 VMM_CTR3(vcpu, "vm_handle_paging rv = %d, gpa = %#lx, "
1624 vm_handle_inst_emul(struct vcpu *vcpu, bool *retu)
1635 vme = &vcpu->exitinfo;
1648 VMM_CTR1(vcpu, "inst_emul fault accessing gpa %#lx", gpa);
1652 error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base,
1663 if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) {
1664 VMM_CTR1(vcpu, "Error decoding instruction at %#lx",
1674 vcpu->nextrip += vie->num_processed;
1675 VMM_CTR1(vcpu, "nextrip updated to %#lx after instruction decoding",
1676 vcpu->nextrip);
1693 error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite,
1700 vm_handle_suspend(struct vcpu *vcpu, bool *retu)
1702 struct vm *vm = vcpu->vm;
1709 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus);
1718 vcpu_lock(vcpu);
1721 VMM_CTR0(vcpu, "All vcpus suspended");
1726 VMM_CTR0(vcpu, "Sleeping during suspend");
1727 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1728 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1729 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1731 vcpu_unlock(vcpu);
1733 vcpu_lock(vcpu);
1736 VMM_CTR0(vcpu, "Rendezvous during suspend");
1737 vcpu_unlock(vcpu);
1738 error = vm_handle_rendezvous(vcpu);
1739 vcpu_lock(vcpu);
1742 vcpu_unlock(vcpu);
1758 vm_handle_reqidle(struct vcpu *vcpu, bool *retu)
1760 vcpu_lock(vcpu);
1761 KASSERT(vcpu->reqidle, ("invalid vcpu reqidle %d", vcpu->reqidle));
1762 vcpu->reqidle = 0;
1763 vcpu_unlock(vcpu);
1769 vm_handle_db(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
1781 vm_get_register(vcpu, VM_REG_GUEST_RSP, &rsp);
1782 error = vm_copy_setup(vcpu, &vme->u.dbg.paging, rsp, sizeof(uint64_t),
1830 vm_exit_suspended(struct vcpu *vcpu, uint64_t rip)
1832 struct vm *vm = vcpu->vm;
1838 vmexit = vm_exitinfo(vcpu);
1846 vm_exit_debug(struct vcpu *vcpu, uint64_t rip)
1850 vmexit = vm_exitinfo(vcpu);
1857 vm_exit_rendezvous(struct vcpu *vcpu, uint64_t rip)
1861 vmexit = vm_exitinfo(vcpu);
1865 vmm_stat_incr(vcpu, VMEXIT_RENDEZVOUS, 1);
1869 vm_exit_reqidle(struct vcpu *vcpu, uint64_t rip)
1873 vmexit = vm_exitinfo(vcpu);
1877 vmm_stat_incr(vcpu, VMEXIT_REQIDLE, 1);
1881 vm_exit_astpending(struct vcpu *vcpu, uint64_t rip)
1885 vmexit = vm_exitinfo(vcpu);
1889 vmm_stat_incr(vcpu, VMEXIT_ASTPENDING, 1);
1893 vm_run(struct vcpu *vcpu)
1895 struct vm *vm = vcpu->vm;
1904 vcpuid = vcpu->vcpuid;
1913 vme = &vcpu->exitinfo;
1916 evinfo.iptr = &vcpu->reqidle;
1928 restore_guest_fpustate(vcpu);
1930 vcpu_require_state(vcpu, VCPU_RUNNING);
1931 error = vmmops_run(vcpu->cookie, vcpu->nextrip, pmap, &evinfo);
1932 vcpu_require_state(vcpu, VCPU_FROZEN);
1934 save_guest_fpustate(vcpu);
1936 vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
1942 vcpu->nextrip = vme->rip + vme->inst_length;
1945 error = vm_handle_reqidle(vcpu, &retu);
1948 error = vm_handle_suspend(vcpu, &retu);
1954 error = vm_handle_rendezvous(vcpu);
1958 error = vm_handle_hlt(vcpu, intr_disabled, &retu);
1961 error = vm_handle_paging(vcpu, &retu);
1964 error = vm_handle_inst_emul(vcpu, &retu);
1968 error = vm_handle_inout(vcpu, vme, &retu);
1971 error = vm_handle_db(vcpu, vme, &retu);
1976 vm_inject_ud(vcpu);
1989 error = vm_handle_ipi(vcpu, vme, &retu);
1994 vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1);
1995 VMM_CTR2(vcpu, "retu %d/%d", error, vme->exitcode);
2001 vm_restart_instruction(struct vcpu *vcpu)
2007 state = vcpu_get_state(vcpu, NULL);
2010 * When a vcpu is "running" the next instruction is determined
2011 * by adding 'rip' and 'inst_length' in the vcpu's 'exitinfo'.
2015 vcpu->exitinfo.inst_length = 0;
2016 VMM_CTR1(vcpu, "restarting instruction at %#lx by "
2017 "setting inst_length to zero", vcpu->exitinfo.rip);
2020 * When a vcpu is "frozen" it is outside the critical section
2023 * 'nextrip' to the vcpu's %rip.
2025 error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip);
2027 VMM_CTR2(vcpu, "restarting instruction by updating "
2028 "nextrip from %#lx to %#lx", vcpu->nextrip, rip);
2029 vcpu->nextrip = rip;
2037 vm_exit_intinfo(struct vcpu *vcpu, uint64_t info)
2053 VMM_CTR2(vcpu, "%s: info1(%#lx)", __func__, info);
2054 vcpu->exitintinfo = info;
2113 nested_fault(struct vcpu *vcpu, uint64_t info1, uint64_t info2,
2129 VMM_CTR2(vcpu, "triple fault: info1(%#lx), info2(%#lx)",
2131 vm_suspend(vcpu->vm, VM_SUSPEND_TRIPLEFAULT);
2155 vcpu_exception_intinfo(struct vcpu *vcpu)
2159 if (vcpu->exception_pending) {
2160 info = vcpu->exc_vector & 0xff;
2162 if (vcpu->exc_errcode_valid) {
2164 info |= (uint64_t)vcpu->exc_errcode << 32;
2171 vm_entry_intinfo(struct vcpu *vcpu, uint64_t *retinfo)
2176 info1 = vcpu->exitintinfo;
2177 vcpu->exitintinfo = 0;
2180 if (vcpu->exception_pending) {
2181 info2 = vcpu_exception_intinfo(vcpu);
2182 vcpu->exception_pending = 0;
2183 VMM_CTR2(vcpu, "Exception %d delivered: %#lx",
2184 vcpu->exc_vector, info2);
2188 valid = nested_fault(vcpu, info1, info2, retinfo);
2200 VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), "
2208 vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
2210 *info1 = vcpu->exitintinfo;
2211 *info2 = vcpu_exception_intinfo(vcpu);
2216 vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
2233 if (vcpu->exception_pending) {
2234 VMM_CTR2(vcpu, "Unable to inject exception %d due to "
2235 "pending exception %d", vector, vcpu->exc_vector);
2243 error = vm_get_register(vcpu, VM_REG_GUEST_CR0, &regval);
2255 error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0);
2260 vm_restart_instruction(vcpu);
2262 vcpu->exception_pending = 1;
2263 vcpu->exc_vector = vector;
2264 vcpu->exc_errcode = errcode;
2265 vcpu->exc_errcode_valid = errcode_valid;
2266 VMM_CTR1(vcpu, "Exception %d pending", vector);
2271 vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode)
2277 error = vm_inject_exception(vcpu, vector, errcode_valid,
2283 vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2)
2287 VMM_CTR2(vcpu, "Injecting page fault: error_code %#x, cr2 %#lx",
2290 error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2);
2293 vm_inject_fault(vcpu, IDT_PF, 1, error_code);
2296 static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
2299 vm_inject_nmi(struct vcpu *vcpu)
2302 vcpu->nmi_pending = 1;
2303 vcpu_notify_event(vcpu, false);
2308 vm_nmi_pending(struct vcpu *vcpu)
2310 return (vcpu->nmi_pending);
2314 vm_nmi_clear(struct vcpu *vcpu)
2316 if (vcpu->nmi_pending == 0)
2319 vcpu->nmi_pending = 0;
2320 vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1);
2323 static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
2326 vm_inject_extint(struct vcpu *vcpu)
2329 vcpu->extint_pending = 1;
2330 vcpu_notify_event(vcpu, false);
2335 vm_extint_pending(struct vcpu *vcpu)
2337 return (vcpu->extint_pending);
2341 vm_extint_clear(struct vcpu *vcpu)
2343 if (vcpu->extint_pending == 0)
2346 vcpu->extint_pending = 0;
2347 vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1);
2351 vm_get_capability(struct vcpu *vcpu, int type, int *retval)
2356 return (vmmops_getcap(vcpu->cookie, type, retval));
2360 vm_set_capability(struct vcpu *vcpu, int type, int val)
2365 return (vmmops_setcap(vcpu->cookie, type, val));
2369 vcpu_vm(struct vcpu *vcpu)
2371 return (vcpu->vm);
2375 vcpu_vcpuid(struct vcpu *vcpu)
2377 return (vcpu->vcpuid);
2380 struct vcpu *
2383 return (vm->vcpu[vcpuid]);
2387 vm_lapic(struct vcpu *vcpu)
2389 return (vcpu->vlapic);
2456 vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle)
2460 vcpu_lock(vcpu);
2461 error = vcpu_set_state_locked(vcpu, newstate, from_idle);
2462 vcpu_unlock(vcpu);
2468 vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
2472 vcpu_lock(vcpu);
2473 state = vcpu->state;
2475 *hostcpu = vcpu->hostcpu;
2476 vcpu_unlock(vcpu);
2482 vm_activate_cpu(struct vcpu *vcpu)
2484 struct vm *vm = vcpu->vm;
2486 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
2489 VMM_CTR0(vcpu, "activated");
2490 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus);
2495 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
2497 if (vcpu == NULL) {
2504 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
2507 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
2508 vcpu_notify_event(vcpu, false);
2514 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu)
2517 if (vcpu == NULL) {
2520 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus))
2523 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
2529 vcpu_debugged(struct vcpu *vcpu)
2532 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
2581 vcpu_stats(struct vcpu *vcpu)
2584 return (vcpu->stats);
2588 vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
2590 *state = vcpu->x2apic_state;
2596 vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
2601 vcpu->x2apic_state = state;
2603 vlapic_set_x2apic_state(vcpu, state);
2609 * This function is called to ensure that a vcpu "sees" a pending event
2611 * - If the vcpu thread is sleeping then it is woken up.
2612 * - If the vcpu is running on a different host_cpu then an IPI will be directed
2613 * to the host_cpu to cause the vcpu to trap into the hypervisor.
2616 vcpu_notify_event_locked(struct vcpu *vcpu, bool lapic_intr)
2620 hostcpu = vcpu->hostcpu;
2621 if (vcpu->state == VCPU_RUNNING) {
2622 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
2625 vlapic_post_intr(vcpu->vlapic, hostcpu,
2632 * If the 'vcpu' is running on 'curcpu' then it must
2634 * The pending event will be picked up when the vcpu
2639 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
2640 "with hostcpu %d", vcpu->state, hostcpu));
2641 if (vcpu->state == VCPU_SLEEPING)
2642 wakeup_one(vcpu);
2647 vcpu_notify_event(struct vcpu *vcpu, bool lapic_intr)
2649 vcpu_lock(vcpu);
2650 vcpu_notify_event_locked(vcpu, lapic_intr);
2651 vcpu_unlock(vcpu);
2665 * XXX apic id is assumed to be numerically identical to vcpu id
2671 vm_smp_rendezvous(struct vcpu *vcpu, cpuset_t dest,
2674 struct vm *vm = vcpu->vm;
2687 * call the rendezvous handler in case this 'vcpu' is one
2690 VMM_CTR0(vcpu, "Rendezvous already in progress");
2692 error = vm_handle_rendezvous(vcpu);
2700 VMM_CTR0(vcpu, "Initiating rendezvous");
2716 return (vm_handle_rendezvous(vcpu));
2775 vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
2790 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
2803 hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa,
2860 vm_get_rescnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
2863 if (vcpu->vcpuid == 0) {
2864 vmm_stat_set(vcpu, VMM_MEM_RESIDENT, PAGE_SIZE *
2865 vmspace_resident_count(vcpu->vm->vmspace));
2870 vm_get_wiredcnt(struct vcpu *vcpu, struct vmm_stat_type *stat)
2873 if (vcpu->vcpuid == 0) {
2874 vmm_stat_set(vcpu, VMM_MEM_WIRED, PAGE_SIZE *
2875 pmap_wired_count(vmspace_pmap(vcpu->vm->vmspace)));
2888 struct vcpu *vcpu;
2894 vcpu = vm->vcpu[i];
2895 if (vcpu == NULL)
2898 SNAPSHOT_VAR_OR_LEAVE(vcpu->x2apic_state, meta, ret, done);
2899 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitintinfo, meta, ret, done);
2900 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_vector, meta, ret, done);
2901 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode_valid, meta, ret, done);
2902 SNAPSHOT_VAR_OR_LEAVE(vcpu->exc_errcode, meta, ret, done);
2903 SNAPSHOT_VAR_OR_LEAVE(vcpu->guest_xcr0, meta, ret, done);
2904 SNAPSHOT_VAR_OR_LEAVE(vcpu->exitinfo, meta, ret, done);
2905 SNAPSHOT_VAR_OR_LEAVE(vcpu->nextrip, meta, ret, done);
2913 tsc = now + vcpu->tsc_offset;
2916 vcpu->tsc_offset = tsc;
2941 struct vcpu *vcpu;
2948 vcpu = vm->vcpu[i];
2949 if (vcpu == NULL)
2952 error = vmmops_vcpu_snapshot(vcpu->cookie, meta);
3009 vm_set_tsc_offset(struct vcpu *vcpu, uint64_t offset)
3011 vcpu->tsc_offset = offset;
3019 struct vcpu *vcpu;
3030 vcpu = vm->vcpu[i];
3031 if (vcpu == NULL)
3034 error = vmmops_restore_tsc(vcpu->cookie,
3035 vcpu->tsc_offset - now);