Lines Matching refs:vcpu

19 #include <arch/object/vcpu.h>
115 vcpu_t *vcpu = ARCH_NODE_STATE(x86KSCurrentVCPU);
116 if (vcpu) {
117 vmclear(vcpu);
118 vcpu->launched = false;
140 static void switchVCPU(vcpu_t *vcpu)
143 if (vcpu->last_cpu != getCurrentCPUIndex() && ARCH_NODE_STATE_ON_CORE(x86KSCurrentVCPU, vcpu->last_cpu) == vcpu) {
144 /* vcpu is currently loaded on another core, need to do vmclear on that core */
145 doRemoteClearCurrentVCPU(vcpu->last_cpu);
149 vmptrld(vcpu);
151 if (vcpu->last_cpu != getCurrentCPUIndex()) {
159 vcpu->last_cpu = getCurrentCPUIndex();
161 ARCH_NODE_STATE(x86KSCurrentVCPU) = vcpu;
397 void vcpu_init(vcpu_t *vcpu)
399 vcpu->vcpuTCB = NULL;
400 vcpu->launched = false;
402 memcpy(vcpu->vmcs, &vmcs_revision, 4);
404 switchVCPU(vcpu);
406 vcpu->cr0 = cr0_high & cr0_low;
407 vcpu->cr0_shadow = 0;
408 vcpu->cr0_mask = 0;
409 vcpu->exception_bitmap = 0;
410 vcpu->vpid = VPID_INVALID;
412 vcpu->last_cpu = getCurrentCPUIndex();
437 vmwrite(VMX_HOST_RSP, (word_t)&vcpu->gp_registers[n_vcpu_gp_register]);
460 vmwrite(VMX_GUEST_CR0, vcpu->cr0);
466 memset(vcpu->io, ~(word_t)0, VCPU_IOBITMAP_SIZE);
467 vmwrite(VMX_CONTROL_IOA_ADDRESS, pptr_to_paddr(vcpu->io));
468 vmwrite(VMX_CONTROL_IOB_ADDRESS, pptr_to_paddr((char *)vcpu->io + (VCPU_IOBITMAP_SIZE / 2)));
471 static void dissociateVcpuTcb(tcb_t *tcb, vcpu_t *vcpu)
473 assert(tcb->tcbArch.tcbVCPU == vcpu);
474 assert(vcpu->vcpuTCB == tcb);
476 vcpu->vcpuTCB = NULL;
479 void vcpu_finalise(vcpu_t *vcpu)
481 if (vcpu->vcpuTCB) {
482 dissociateVcpuTcb(vcpu->vcpuTCB, vcpu);
484 if (ARCH_NODE_STATE_ON_CORE(x86KSCurrentVCPU, vcpu->last_cpu) == vcpu) {
486 if (vcpu->last_cpu != getCurrentCPUIndex()) {
487 doRemoteClearCurrentVCPU(vcpu->last_cpu);
496 static void associateVcpuTcb(tcb_t *tcb, vcpu_t *vcpu)
501 if (vcpu->vcpuTCB) {
502 dissociateVcpuTcb(vcpu->vcpuTCB, vcpu);
504 vcpu->vcpuTCB = tcb;
505 tcb->tcbArch.tcbVCPU = vcpu;
508 static exception_t invokeVCPUWriteRegisters(vcpu_t *vcpu, word_t *buffer)
512 vcpu->gp_registers[i] = getSyscallArg(i, buffer);
528 static exception_t invokeEnableIOPort(vcpu_t *vcpu, cte_t *slot, cap_t cap, uint16_t low, uint16_t high)
534 /* update the assigned vpid. If the vcpu does not have a valid vpid then
535 * this is fine as whilst the cap will not point to the vcpu, the vcpu
537 cap = cap_io_port_cap_set_capIOPortVPID(cap, vcpu->vpid);
539 setIOPortMask(vcpu->io, low, high, false);
545 vcpu_t *vcpu;
579 vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
582 return invokeEnableIOPort(vcpu, ioSlot, ioCap, low, high);
585 static exception_t invokeDisableIOPort(vcpu_t *vcpu, uint16_t low, uint16_t high)
587 setIOPortMask(vcpu->io, low, high, true);
594 vcpu_t *vcpu;
606 vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
608 return invokeDisableIOPort(vcpu, low, high);
611 static exception_t invokeWriteVMCS(vcpu_t *vcpu, word_t *buffer, word_t field, word_t value)
615 if (ARCH_NODE_STATE(x86KSCurrentVCPU) != vcpu) {
616 switchVCPU(vcpu);
620 vcpu->exception_bitmap = vcpu->cached_exception_bitmap = value;
623 vcpu->cr0 = vcpu->cached_cr0 = value;
626 vcpu->cr0_mask = vcpu->cached_cr0_mask = value;
629 vcpu->cr0_shadow = vcpu->cached_cr0_shadow = value;
743 static word_t readVMCSField(vcpu_t *vcpu, word_t field)
747 return vcpu->exception_bitmap;
749 return vcpu->cr0;
751 return vcpu->cr0_mask;
753 return vcpu->cr0_shadow;
755 if (ARCH_NODE_STATE(x86KSCurrentVCPU) != vcpu) {
756 switchVCPU(vcpu);
761 static exception_t invokeReadVMCS(vcpu_t *vcpu, word_t field, word_t *buffer)
766 setMR(thread, buffer, 0, readVMCSField(vcpu, field));
872 static exception_t invokeSetTCB(vcpu_t *vcpu, tcb_t *tcb)
874 associateVcpuTcb(tcb, vcpu);
899 void vcpu_update_state_sysvmenter(vcpu_t *vcpu)
902 if (ARCH_NODE_STATE(x86KSCurrentVCPU) != vcpu) {
903 switchVCPU(vcpu);
919 vcpu_t *vcpu;
922 vcpu = tcb->tcbArch.tcbVCPU;
924 assert(vcpu);
926 if (ARCH_NODE_STATE(x86KSCurrentVCPU) != vcpu) {
927 switchVCPU(vcpu);
1102 vcpu_t *vcpu = ARCH_NODE_STATE(x86KSCurrentVCPU);
1103 assert(vcpu == NODE_STATE(ksCurThread)->tcbArch.tcbVCPU);
1104 vcpu->launched = true;
1107 vcpu->cached_cr0 = vmread(VMX_GUEST_CR0);
1109 /* If the vcpu owns the fpu then we did not modify the active cr0 to anything different
1112 vcpu->cr0 = vcpu->cached_cr0;
1114 /* If the vcpu does not own the fpu then we will have forced the task switched flag
1121 vcpu->cr0 = (vcpu->cached_cr0 & ~CR0_TASK_SWITCH) | (NODE_STATE(ksCurThread)->tcbArch.tcbVCPU->cr0 & CR0_TASK_SWITCH);
1151 /* since this vcpu does not currently own the fpu state, check if the kernel should
1153 * exception AND the owner of this vcpu has not requested that these exceptions be forwarded
1313 static void setEPTRoot(cap_t vmxSpace, vcpu_t *vcpu)
1331 if (ept_root != vcpu->last_ept_root) {
1332 vcpu->last_ept_root = ept_root;
1340 assert(vcpu->vpid != VPID_INVALID);
1342 invvpid_context(vcpu->vpid);
1349 vcpu_t *vcpu = NODE_STATE(ksCurThread)->tcbArch.tcbVCPU;
1350 word_t cr0 = vcpu->cr0;
1351 word_t exception_bitmap = vcpu->exception_bitmap;
1352 word_t cr0_mask = vcpu->cr0_mask;
1353 word_t cr0_shadow = vcpu->cr0_shadow;
1354 /* if the vcpu actually owns the fpu then we do not need to change any bits
1357 /* when the vcpu doesn't own the fpu we need to force the task switched flag
1376 if (!(vcpu->cr0_mask & CR0_TASK_SWITCH)) {
1378 cr0_shadow |= vcpu->cr0 & CR0_TASK_SWITCH;
1381 if (cr0 != vcpu->cached_cr0) {
1383 vcpu->cached_cr0 = cr0;
1385 if (exception_bitmap != vcpu->cached_exception_bitmap) {
1387 vcpu->cached_exception_bitmap = exception_bitmap;
1389 if (cr0_mask != vcpu->cached_cr0_mask) {
1391 vcpu->cached_cr0_mask = cr0_mask;
1393 if (cr0_shadow != vcpu->cached_cr0_shadow) {
1395 vcpu->cached_cr0_shadow = cr0_shadow;
1404 vcpu_t *vcpu = x86KSVPIDTable[vpid];
1405 if (vcpu == NULL) {
1408 assert(vcpu->vpid == vpid);
1409 setIOPortMask(vcpu->io, first, last, true);
1423 vcpu_t *vcpu = x86KSVPIDTable[vpid];
1426 memset(vcpu->io, ~0, sizeof(vcpu->io));
1456 static void storeVPID(vcpu_t *vcpu, vpid_t vpid)
1459 assert(vcpu->vpid == VPID_INVALID);
1460 x86KSVPIDTable[vpid] = vcpu;
1461 vcpu->vpid = vpid;