Lines Matching refs:vcpu

11 #include <arch/object/vcpu.h>
12 #include <armv/vcpu.h>
32 static void vcpu_save(vcpu_t *vcpu, bool_t active)
37 assert(vcpu);
42 vcpu_save_reg(vcpu, seL4_VCPUReg_SCTLR);
43 vcpu->vgic.hcr = get_gic_vcpu_ctrl_hcr();
44 save_virt_timer(vcpu);
48 vcpu->vgic.vmcr = get_gic_vcpu_ctrl_vmcr();
49 vcpu->vgic.apr = get_gic_vcpu_ctrl_apr();
52 vcpu->vgic.lr[i] = get_gic_vcpu_ctrl_lr(i);
54 armv_vcpu_save(vcpu, active);
58 static word_t readVCPUReg(vcpu_t *vcpu, word_t field)
60 if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
64 * the vcpu, not when we switch vcpus */
68 return vcpu_read_reg(vcpu, field);
74 return vcpu_read_reg(vcpu, field);
78 static void writeVCPUReg(vcpu_t *vcpu, word_t field, word_t value)
80 if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
86 vcpu_write_reg(vcpu, field, value);
93 vcpu_write_reg(vcpu, field, value);
97 void vcpu_restore(vcpu_t *vcpu)
99 assert(vcpu);
107 set_gic_vcpu_ctrl_vmcr(vcpu->vgic.vmcr);
108 set_gic_vcpu_ctrl_apr(vcpu->vgic.apr);
111 set_gic_vcpu_ctrl_lr(i, vcpu->vgic.lr[i]);
116 vcpu_restore_reg_range(vcpu, seL4_VCPUReg_TTBR0, seL4_VCPUReg_SPSR_EL1);
118 vcpu_restore_reg_range(vcpu, seL4_VCPUReg_ACTLR, seL4_VCPUReg_SPSRfiq);
120 vcpu_enable(vcpu);
213 void vcpu_init(vcpu_t *vcpu)
215 armv_vcpu_init(vcpu);
217 vcpu->vgic.hcr = VGIC_HCR_EN;
220 vcpu->virtTimer.last_pcount = 0;
258 void vcpu_finalise(vcpu_t *vcpu)
260 if (vcpu->vcpuTCB) {
261 dissociateVCPUTCB(vcpu, vcpu->vcpuTCB);
265 void associateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb)
270 if (vcpu->vcpuTCB) {
271 dissociateVCPUTCB(vcpu, vcpu->vcpuTCB);
273 tcb->tcbArch.tcbVCPU = vcpu;
274 vcpu->vcpuTCB = tcb;
277 void dissociateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb)
279 if (tcb->tcbArch.tcbVCPU != vcpu || vcpu->vcpuTCB != tcb) {
282 if (vcpu == ARCH_NODE_STATE(armHSCurVCPU)) {
286 vcpu->vcpuTCB = NULL;
299 exception_t invokeVCPUWriteReg(vcpu_t *vcpu, word_t field, word_t value)
301 writeVCPUReg(vcpu, field, value);
326 exception_t invokeVCPUReadReg(vcpu_t *vcpu, word_t field, bool_t call)
330 word_t value = readVCPUReg(vcpu, field);
364 exception_t invokeVCPUInjectIRQ(vcpu_t *vcpu, unsigned long index, virq_t virq)
366 if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
369 } else if (vcpu->vcpuTCB->tcbAffinity != getCurrentCPUIndex()) {
370 doRemoteOp3Arg(IpiRemoteCall_VCPUInjectInterrupt, (word_t)vcpu, index, virq.words[0], vcpu->vcpuTCB->tcbAffinity);
373 vcpu->vgic.lr[index] = virq;
382 vcpu_t *vcpu;
386 vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
401 vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
451 if (virq_get_virqType(vcpu->vgic.lr[index]) == virq_virq_active) {
459 return invokeVCPUInjectIRQ(vcpu, index, virq);
493 vcpu_t *vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
516 return invokeVCPUAckVPPI(vcpu, vppi);
519 exception_t invokeVCPUAckVPPI(vcpu_t *vcpu, VPPIEventIRQ_t vppi)
521 vcpu->vppi_masked[vppi] = false;
545 exception_t invokeVCPUSetTCB(vcpu_t *vcpu, tcb_t *tcb)
547 associateVCPUTCB(vcpu, tcb);
568 void handleVCPUInjectInterruptIPI(vcpu_t *vcpu, unsigned long index, virq_t virq)
570 if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
573 vcpu->vgic.lr[index] = virq;