Lines Matching refs:vcpu

76 struct vcpu {
80 int hostcpu; /* host cpuid this vcpu last ran on */
91 #define vcpu_lock_init(v) mtx_init(&((v)->mtx), "vcpu lock", 0, MTX_SPIN)
151 struct vcpu **vcpu; /* (i) guest vcpus */
166 static int vm_handle_wfi(struct vcpu *vcpu,
172 static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
178 "IPI vector used for vcpu notifications");
240 static void vcpu_notify_event_locked(struct vcpu *vcpu);
273 vcpu_cleanup(struct vcpu *vcpu, bool destroy)
275 vmmops_vcpu_cleanup(vcpu->cookie);
276 vcpu->cookie = NULL;
278 vmm_stat_free(vcpu->stats);
279 fpu_save_area_free(vcpu->guestfpu);
280 vcpu_lock_destroy(vcpu);
284 static struct vcpu *
287 struct vcpu *vcpu;
290 ("vcpu_alloc: invalid vcpu %d", vcpu_id));
292 vcpu = malloc(sizeof(*vcpu), M_VMM, M_WAITOK | M_ZERO);
293 vcpu_lock_init(vcpu);
294 vcpu->state = VCPU_IDLE;
295 vcpu->hostcpu = NOCPU;
296 vcpu->vcpuid = vcpu_id;
297 vcpu->vm = vm;
298 vcpu->guestfpu = fpu_save_area_alloc();
299 vcpu->stats = vmm_stat_alloc();
300 return (vcpu);
304 vcpu_init(struct vcpu *vcpu)
306 vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid);
307 MPASS(vcpu->cookie != NULL);
308 fpu_save_area_reset(vcpu->guestfpu);
309 vmm_stat_init(vcpu->stats);
313 vm_exitinfo(struct vcpu *vcpu)
315 return (&vcpu->exitinfo);
403 if (vm->vcpu[i] != NULL)
404 vcpu_init(vm->vcpu[i]);
417 struct vcpu *
420 struct vcpu *vcpu;
429 vcpu = atomic_load_ptr(&vm->vcpu[vcpuid]);
430 if (__predict_true(vcpu != NULL))
431 return (vcpu);
434 vcpu = vm->vcpu[vcpuid];
435 if (vcpu == NULL && !vm->dying) {
436 vcpu = vcpu_alloc(vm, vcpuid);
437 vcpu_init(vcpu);
443 atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid],
444 (uintptr_t)vcpu);
447 return (vcpu);
493 vm->vcpu = malloc(sizeof(*vm->vcpu) * vm->maxcpus, M_VMM,
551 if (vm->vcpu[i] != NULL)
552 vcpu_cleanup(vm->vcpu[i], destroy);
581 free(vm->vcpu[i], M_VMM);
582 free(vm->vcpu, M_VMM);
641 * This function is called in the context of a running vcpu which acts as
645 vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa)
647 struct vm *vm = vcpu->vm;
653 state = vcpu_get_state(vcpu, &hostcpu);
655 ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
898 vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
902 vmmops_gla2gpa(vcpu->cookie, paging, gla, prot, gpa, is_fault);
907 vmm_reg_raz(struct vcpu *vcpu, uint64_t *rval, void *arg)
914 vmm_reg_read_arg(struct vcpu *vcpu, uint64_t *rval, void *arg)
921 vmm_reg_wi(struct vcpu *vcpu, uint64_t wval, void *arg)
1025 vm_handle_reg_emul(struct vcpu *vcpu, bool *retu)
1032 vm = vcpu->vm;
1033 vme = &vcpu->exitinfo;
1043 rv = vmm_emulate_register(vcpu, vre,
1056 rv = vmm_emulate_register(vcpu, vre,
1111 vm_handle_inst_emul(struct vcpu *vcpu, bool *retu)
1122 vm = vcpu->vm;
1127 vme = &vcpu->exitinfo;
1144 error = vmm_emulate_instruction(vcpu, fault_ipa, vie, paging,
1181 vm_exit_suspended(struct vcpu *vcpu, uint64_t pc)
1183 struct vm *vm = vcpu->vm;
1189 vmexit = vm_exitinfo(vcpu);
1197 vm_exit_debug(struct vcpu *vcpu, uint64_t pc)
1201 vmexit = vm_exitinfo(vcpu);
1208 vm_activate_cpu(struct vcpu *vcpu)
1210 struct vm *vm = vcpu->vm;
1212 if (CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
1215 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->active_cpus);
1221 vm_suspend_cpu(struct vm *vm, struct vcpu *vcpu)
1223 if (vcpu == NULL) {
1230 if (!CPU_ISSET(vcpu->vcpuid, &vm->active_cpus))
1233 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
1234 vcpu_notify_event(vcpu);
1240 vm_resume_cpu(struct vm *vm, struct vcpu *vcpu)
1243 if (vcpu == NULL) {
1246 if (!CPU_ISSET(vcpu->vcpuid, &vm->debug_cpus))
1249 CPU_CLR_ATOMIC(vcpu->vcpuid, &vm->debug_cpus);
1255 vcpu_debugged(struct vcpu *vcpu)
1258 return (CPU_ISSET(vcpu->vcpuid, &vcpu->vm->debug_cpus));
1284 vcpu_stats(struct vcpu *vcpu)
1287 return (vcpu->stats);
1291 * This function is called to ensure that a vcpu "sees" a pending event
1293 * - If the vcpu thread is sleeping then it is woken up.
1294 * - If the vcpu is running on a different host_cpu then an IPI will be directed
1295 * to the host_cpu to cause the vcpu to trap into the hypervisor.
1298 vcpu_notify_event_locked(struct vcpu *vcpu)
1302 hostcpu = vcpu->hostcpu;
1303 if (vcpu->state == VCPU_RUNNING) {
1304 KASSERT(hostcpu != NOCPU, ("vcpu running on invalid hostcpu"));
1309 * If the 'vcpu' is running on 'curcpu' then it must
1311 * The pending event will be picked up when the vcpu
1316 KASSERT(hostcpu == NOCPU, ("vcpu state %d not consistent "
1317 "with hostcpu %d", vcpu->state, hostcpu));
1318 if (vcpu->state == VCPU_SLEEPING)
1319 wakeup_one(vcpu);
1324 vcpu_notify_event(struct vcpu *vcpu)
1326 vcpu_lock(vcpu);
1327 vcpu_notify_event_locked(vcpu);
1328 vcpu_unlock(vcpu);
1332 restore_guest_fpustate(struct vcpu *vcpu)
1342 vfp_restore(vcpu->guestfpu);
1352 save_guest_fpustate(struct vcpu *vcpu)
1360 vfp_store(vcpu->guestfpu);
1367 vcpu_set_state_locked(struct vcpu *vcpu, enum vcpu_state newstate,
1372 vcpu_assert_locked(vcpu);
1377 * ioctl() operating on a vcpu at any point.
1380 while (vcpu->state != VCPU_IDLE) {
1381 vcpu_notify_event_locked(vcpu);
1382 msleep_spin(&vcpu->state, &vcpu->mtx, "vmstat", hz);
1385 KASSERT(vcpu->state != VCPU_IDLE, ("invalid transition from "
1386 "vcpu idle state"));
1389 if (vcpu->state == VCPU_RUNNING) {
1390 KASSERT(vcpu->hostcpu == curcpu, ("curcpu %d and hostcpu %d "
1391 "mismatch for running vcpu", curcpu, vcpu->hostcpu));
1393 KASSERT(vcpu->hostcpu == NOCPU, ("Invalid hostcpu %d for a "
1394 "vcpu that is not running", vcpu->hostcpu));
1403 switch (vcpu->state) {
1420 vcpu->state = newstate;
1422 vcpu->hostcpu = curcpu;
1424 vcpu->hostcpu = NOCPU;
1427 wakeup(&vcpu->state);
1433 vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate)
1437 if ((error = vcpu_set_state(vcpu, newstate, false)) != 0)
1442 vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
1446 if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0)
1451 vm_get_capability(struct vcpu *vcpu, int type, int *retval)
1456 return (vmmops_getcap(vcpu->cookie, type, retval));
1460 vm_set_capability(struct vcpu *vcpu, int type, int val)
1465 return (vmmops_setcap(vcpu->cookie, type, val));
1469 vcpu_vm(struct vcpu *vcpu)
1471 return (vcpu->vm);
1475 vcpu_vcpuid(struct vcpu *vcpu)
1477 return (vcpu->vcpuid);
1481 vcpu_get_cookie(struct vcpu *vcpu)
1483 return (vcpu->cookie);
1486 struct vcpu *
1489 return (vm->vcpu[vcpuid]);
1493 vcpu_set_state(struct vcpu *vcpu, enum vcpu_state newstate, bool from_idle)
1497 vcpu_lock(vcpu);
1498 error = vcpu_set_state_locked(vcpu, newstate, from_idle);
1499 vcpu_unlock(vcpu);
1505 vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
1509 vcpu_lock(vcpu);
1510 state = vcpu->state;
1512 *hostcpu = vcpu->hostcpu;
1513 vcpu_unlock(vcpu);
1551 vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
1556 * The current vcpu should be frozen to ensure 'vm_memmap[]'
1559 int state = vcpu_get_state(vcpu, NULL);
1560 KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
1563 return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie));
1583 vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
1589 return (vmmops_getreg(vcpu->cookie, reg, retval));
1593 vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
1599 error = vmmops_setreg(vcpu->cookie, reg, val);
1603 vcpu->nextpc = val;
1615 vm_inject_exception(struct vcpu *vcpu, uint64_t esr, uint64_t far)
1617 return (vmmops_exception(vcpu->cookie, esr, far));
1647 vm_handle_smccc_call(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
1652 hypctx = vcpu_get_cookie(vcpu);
1667 vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
1669 vcpu_lock(vcpu);
1671 if (vgic_has_pending_irq(vcpu->cookie))
1674 if (vcpu_should_yield(vcpu))
1677 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1682 msleep_spin(vcpu, &vcpu->mtx, "vmidle", hz);
1683 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1685 vcpu_unlock(vcpu);
1692 vm_handle_paging(struct vcpu *vcpu, bool *retu)
1694 struct vm *vm = vcpu->vm;
1701 vme = &vcpu->exitinfo;
1703 pmap = vmspace_pmap(vcpu->vm->vmspace);
1729 vm_handle_suspend(struct vcpu *vcpu, bool *retu)
1731 struct vm *vm = vcpu->vm;
1738 CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus);
1747 vcpu_lock(vcpu);
1752 vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
1753 msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
1754 vcpu_require_state_locked(vcpu, VCPU_FROZEN);
1756 vcpu_unlock(vcpu);
1758 vcpu_lock(vcpu);
1761 vcpu_unlock(vcpu);
1777 vm_run(struct vcpu *vcpu)
1779 struct vm *vm = vcpu->vm;
1786 vcpuid = vcpu->vcpuid;
1795 vme = &vcpu->exitinfo;
1802 restore_guest_fpustate(vcpu);
1804 vcpu_require_state(vcpu, VCPU_RUNNING);
1805 error = vmmops_run(vcpu->cookie, vcpu->nextpc, pmap, &evinfo);
1806 vcpu_require_state(vcpu, VCPU_FROZEN);
1808 save_guest_fpustate(vcpu);
1816 vcpu->nextpc = vme->pc + vme->inst_length;
1817 error = vm_handle_inst_emul(vcpu, &retu);
1821 vcpu->nextpc = vme->pc + vme->inst_length;
1822 error = vm_handle_reg_emul(vcpu, &retu);
1830 vcpu->nextpc = vme->pc;
1835 error = vm_handle_smccc_call(vcpu, vme, &retu);
1839 vcpu->nextpc = vme->pc + vme->inst_length;
1840 error = vm_handle_wfi(vcpu, vme, &retu);
1844 vcpu->nextpc = vme->pc;
1845 error = vm_handle_paging(vcpu, &retu);
1849 vcpu->nextpc = vme->pc;
1850 error = vm_handle_suspend(vcpu, &retu);
1855 vcpu->nextpc = vme->pc;