/* * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only */ #include #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT #include #include #include /* Arch_debug[A/Di]ssociateVCPUTCB() */ #include #include #include BOOT_CODE void vcpu_boot_init(void) { armv_vcpu_boot_init(); gic_vcpu_num_list_regs = VGIC_VTR_NLISTREGS(get_gic_vcpu_ctrl_vtr()); if (gic_vcpu_num_list_regs > GIC_VCPU_MAX_NUM_LR) { printf("Warning: VGIC is reporting more list registers than we support. Truncating\n"); gic_vcpu_num_list_regs = GIC_VCPU_MAX_NUM_LR; } vcpu_disable(NULL); ARCH_NODE_STATE(armHSCurVCPU) = NULL; ARCH_NODE_STATE(armHSVCPUActive) = false; } static void vcpu_save(vcpu_t *vcpu, bool_t active) { word_t i; unsigned int lr_num; assert(vcpu); dsb(); /* If we aren't active then this state already got stored when * we were disabled */ if (active) { vcpu_save_reg(vcpu, seL4_VCPUReg_SCTLR); vcpu->vgic.hcr = get_gic_vcpu_ctrl_hcr(); save_virt_timer(vcpu); } /* Store GIC VCPU control state */ vcpu->vgic.vmcr = get_gic_vcpu_ctrl_vmcr(); vcpu->vgic.apr = get_gic_vcpu_ctrl_apr(); lr_num = gic_vcpu_num_list_regs; for (i = 0; i < lr_num; i++) { vcpu->vgic.lr[i] = get_gic_vcpu_ctrl_lr(i); } armv_vcpu_save(vcpu, active); } static word_t readVCPUReg(vcpu_t *vcpu, word_t field) { if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) { switch (field) { case seL4_VCPUReg_SCTLR: /* The SCTLR value is switched to/from hardware when we enable/disable * the vcpu, not when we switch vcpus */ if (ARCH_NODE_STATE(armHSVCPUActive)) { return getSCTLR(); } else { return vcpu_read_reg(vcpu, field); } default: return vcpu_hw_read_reg(field); } } else { return vcpu_read_reg(vcpu, field); } } static void writeVCPUReg(vcpu_t *vcpu, word_t field, word_t value) { if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) { switch (field) { case seL4_VCPUReg_SCTLR: if (ARCH_NODE_STATE(armHSVCPUActive)) { setSCTLR(value); } else { vcpu_write_reg(vcpu, field, value); } break; default: vcpu_hw_write_reg(field, value); } } else { vcpu_write_reg(vcpu, field, value); } } void vcpu_restore(vcpu_t *vcpu) { assert(vcpu); word_t i; unsigned int lr_num; /* Turn off the VGIC */ set_gic_vcpu_ctrl_hcr(0); isb(); /* Restore GIC VCPU control state */ set_gic_vcpu_ctrl_vmcr(vcpu->vgic.vmcr); set_gic_vcpu_ctrl_apr(vcpu->vgic.apr); lr_num = gic_vcpu_num_list_regs; for (i = 0; i < lr_num; i++) { set_gic_vcpu_ctrl_lr(i, vcpu->vgic.lr[i]); } /* restore registers */ #ifdef CONFIG_ARCH_AARCH64 vcpu_restore_reg_range(vcpu, seL4_VCPUReg_TTBR0, seL4_VCPUReg_SPSR_EL1); #else vcpu_restore_reg_range(vcpu, seL4_VCPUReg_ACTLR, seL4_VCPUReg_SPSRfiq); #endif vcpu_enable(vcpu); } void VPPIEvent(irq_t irq) { if (ARCH_NODE_STATE(armHSVCPUActive)) { maskInterrupt(true, irq); assert(irqVPPIEventIndex(irq) != VPPIEventIRQ_invalid); ARCH_NODE_STATE(armHSCurVCPU)->vppi_masked[irqVPPIEventIndex(irq)] = true; current_fault = seL4_Fault_VPPIEvent_new(IRQT_TO_IRQ(irq)); /* Current VCPU being active should indicate that the current thread * is runnable. At present, verification cannot establish this so we * perform an extra check. */ assert(isRunnable(NODE_STATE(ksCurThread))); if (isRunnable(NODE_STATE(ksCurThread))) { handleFault(NODE_STATE(ksCurThread)); } } } void VGICMaintenance(void) { uint32_t eisr0, eisr1; uint32_t flags; /* We shouldn't get a VGICMaintenance interrupt while a VCPU isn't active, * but if one becomes pending before the VGIC is disabled we might get one * when returning to userlevel after disabling the current VCPU. In this * case we simply return and rely on the interrupt being raised again when * the VCPU is reenabled. */ if (!ARCH_NODE_STATE(armHSVCPUActive)) { printf("Received VGIC maintenance without active VCPU!\n"); return; } eisr0 = get_gic_vcpu_ctrl_eisr0(); eisr1 = get_gic_vcpu_ctrl_eisr1(); flags = get_gic_vcpu_ctrl_misr(); if (flags & VGIC_MISR_EOI) { int irq_idx; if (eisr0) { irq_idx = ctzl(eisr0); } else if (eisr1) { irq_idx = ctzl(eisr1) + 32; } else { irq_idx = -1; } /* the hardware should never give us an invalid index, but we don't * want to trust it that far */ if (irq_idx == -1 || irq_idx >= gic_vcpu_num_list_regs) { current_fault = seL4_Fault_VGICMaintenance_new(0, 0); } else { virq_t virq = get_gic_vcpu_ctrl_lr(irq_idx); switch (virq_get_virqType(virq)) { case virq_virq_active: virq = virq_virq_active_set_virqEOIIRQEN(virq, 0); break; case virq_virq_pending: virq = virq_virq_pending_set_virqEOIIRQEN(virq, 0); break; case virq_virq_invalid: virq = virq_virq_invalid_set_virqEOIIRQEN(virq, 0); break; } set_gic_vcpu_ctrl_lr(irq_idx, virq); /* decodeVCPUInjectIRQ below checks the vgic.lr register, * so we should also sync the shadow data structure as well */ assert(ARCH_NODE_STATE(armHSCurVCPU) != NULL && ARCH_NODE_STATE(armHSVCPUActive)); if (ARCH_NODE_STATE(armHSCurVCPU) != NULL && ARCH_NODE_STATE(armHSVCPUActive)) { ARCH_NODE_STATE(armHSCurVCPU)->vgic.lr[irq_idx] = virq; } else { /* FIXME This should not happen */ } current_fault = seL4_Fault_VGICMaintenance_new(irq_idx, 1); } } else { /* Assume that it was an EOI for a LR that was not present */ current_fault = seL4_Fault_VGICMaintenance_new(0, 0); } /* Current VCPU being active should indicate that the current thread * is runnable. At present, verification cannot establish this so we * perform an extra check. */ assert(isRunnable(NODE_STATE(ksCurThread))); if (isRunnable(NODE_STATE(ksCurThread))) { handleFault(NODE_STATE(ksCurThread)); } } void vcpu_init(vcpu_t *vcpu) { armv_vcpu_init(vcpu); /* GICH VCPU interface control */ vcpu->vgic.hcr = VGIC_HCR_EN; #ifdef CONFIG_VTIMER_UPDATE_VOFFSET /* Virtual Timer interface */ vcpu->virtTimer.last_pcount = 0; #endif } void vcpu_switch(vcpu_t *new) { if (likely(ARCH_NODE_STATE(armHSCurVCPU) != new)) { if (unlikely(new != NULL)) { if (unlikely(ARCH_NODE_STATE(armHSCurVCPU) != NULL)) { vcpu_save(ARCH_NODE_STATE(armHSCurVCPU), ARCH_NODE_STATE(armHSVCPUActive)); } vcpu_restore(new); ARCH_NODE_STATE(armHSCurVCPU) = new; ARCH_NODE_STATE(armHSVCPUActive) = true; } else if (unlikely(ARCH_NODE_STATE(armHSVCPUActive))) { /* leave the current VCPU state loaded, but disable vgic and mmu */ #ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS saveAllBreakpointState(ARCH_NODE_STATE(armHSCurVCPU)->vcpuTCB); #endif vcpu_disable(ARCH_NODE_STATE(armHSCurVCPU)); ARCH_NODE_STATE(armHSVCPUActive) = false; } } else if (likely(!ARCH_NODE_STATE(armHSVCPUActive) && new != NULL)) { isb(); vcpu_enable(new); ARCH_NODE_STATE(armHSVCPUActive) = true; } } static void vcpu_invalidate_active(void) { if (ARCH_NODE_STATE(armHSVCPUActive)) { vcpu_disable(NULL); ARCH_NODE_STATE(armHSVCPUActive) = false; } ARCH_NODE_STATE(armHSCurVCPU) = NULL; } void vcpu_finalise(vcpu_t *vcpu) { if (vcpu->vcpuTCB) { dissociateVCPUTCB(vcpu, vcpu->vcpuTCB); } } void associateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb) { if (tcb->tcbArch.tcbVCPU) { dissociateVCPUTCB(tcb->tcbArch.tcbVCPU, tcb); } if (vcpu->vcpuTCB) { dissociateVCPUTCB(vcpu, vcpu->vcpuTCB); } tcb->tcbArch.tcbVCPU = vcpu; vcpu->vcpuTCB = tcb; } void dissociateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb) { if (tcb->tcbArch.tcbVCPU != vcpu || vcpu->vcpuTCB != tcb) { fail("TCB and VCPU not associated."); } if (vcpu == ARCH_NODE_STATE(armHSCurVCPU)) { vcpu_invalidate_active(); } tcb->tcbArch.tcbVCPU = NULL; vcpu->vcpuTCB = NULL; #ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS Arch_debugDissociateVCPUTCB(tcb); #endif /* sanitize the CPSR as without a VCPU a thread should only be in user mode */ #ifdef CONFIG_ARCH_AARCH64 setRegister(tcb, SPSR_EL1, sanitiseRegister(SPSR_EL1, getRegister(tcb, SPSR_EL1), false)); #else setRegister(tcb, CPSR, sanitiseRegister(CPSR, getRegister(tcb, CPSR), false)); #endif } exception_t invokeVCPUWriteReg(vcpu_t *vcpu, word_t field, word_t value) { writeVCPUReg(vcpu, field, value); return EXCEPTION_NONE; } exception_t decodeVCPUWriteReg(cap_t cap, unsigned int length, word_t *buffer) { word_t field; word_t value; if (length < 2) { userError("VCPUWriteReg: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } field = getSyscallArg(0, buffer); value = getSyscallArg(1, buffer); if (field >= seL4_VCPUReg_Num) { userError("VCPUWriteReg: Invalid field 0x%lx.", (long)field); current_syscall_error.type = seL4_InvalidArgument; current_syscall_error.invalidArgumentNumber = 1; return EXCEPTION_SYSCALL_ERROR; } setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); return invokeVCPUWriteReg(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), field, value); } exception_t invokeVCPUReadReg(vcpu_t *vcpu, word_t field, bool_t call) { tcb_t *thread; thread = NODE_STATE(ksCurThread); word_t value = readVCPUReg(vcpu, field); if (call) { word_t *ipcBuffer = lookupIPCBuffer(true, thread); setRegister(thread, badgeRegister, 0); unsigned int length = setMR(thread, ipcBuffer, 0, value); setRegister(thread, msgInfoRegister, wordFromMessageInfo( seL4_MessageInfo_new(0, 0, 0, length))); } setThreadState(NODE_STATE(ksCurThread), ThreadState_Running); return EXCEPTION_NONE; } exception_t decodeVCPUReadReg(cap_t cap, unsigned int length, bool_t call, word_t *buffer) { word_t field; if (length < 1) { userError("VCPUReadReg: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } field = getSyscallArg(0, buffer); if (field >= seL4_VCPUReg_Num) { userError("VCPUReadReg: Invalid field 0x%lx.", (long)field); current_syscall_error.type = seL4_InvalidArgument; current_syscall_error.invalidArgumentNumber = 1; return EXCEPTION_SYSCALL_ERROR; } setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); return invokeVCPUReadReg(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), field, call); } exception_t invokeVCPUInjectIRQ(vcpu_t *vcpu, unsigned long index, virq_t virq) { if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) { set_gic_vcpu_ctrl_lr(index, virq); #ifdef ENABLE_SMP_SUPPORT } else if (vcpu->vcpuTCB->tcbAffinity != getCurrentCPUIndex()) { doRemoteOp3Arg(IpiRemoteCall_VCPUInjectInterrupt, (word_t)vcpu, index, virq.words[0], vcpu->vcpuTCB->tcbAffinity); #endif /* CONFIG_ENABLE_SMP */ } else { vcpu->vgic.lr[index] = virq; } return EXCEPTION_NONE; } exception_t decodeVCPUInjectIRQ(cap_t cap, unsigned int length, word_t *buffer) { word_t vid, priority, group, index; vcpu_t *vcpu; #ifdef CONFIG_ARCH_AARCH64 word_t mr0; vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)); if (length < 1) { current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } mr0 = getSyscallArg(0, buffer); vid = mr0 & 0xffff; priority = (mr0 >> 16) & 0xff; group = (mr0 >> 24) & 0xff; index = (mr0 >> 32) & 0xff; #else uint32_t mr0, mr1; vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)); if (length < 2) { current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } mr0 = getSyscallArg(0, buffer); mr1 = getSyscallArg(1, buffer); vid = mr0 & 0xffff; priority = (mr0 >> 16) & 0xff; group = (mr0 >> 24) & 0xff; index = mr1 & 0xff; #endif /* Check IRQ parameters */ if (vid > (1U << 10) - 1) { current_syscall_error.type = seL4_RangeError; current_syscall_error.rangeErrorMin = 0; current_syscall_error.rangeErrorMax = (1U << 10) - 1; current_syscall_error.invalidArgumentNumber = 1; current_syscall_error.type = seL4_RangeError; return EXCEPTION_SYSCALL_ERROR; } if (priority > 31) { current_syscall_error.type = seL4_RangeError; current_syscall_error.rangeErrorMin = 0; current_syscall_error.rangeErrorMax = 31; current_syscall_error.invalidArgumentNumber = 2; current_syscall_error.type = seL4_RangeError; return EXCEPTION_SYSCALL_ERROR; } if (group > 1) { current_syscall_error.type = seL4_RangeError; current_syscall_error.rangeErrorMin = 0; current_syscall_error.rangeErrorMax = 1; current_syscall_error.invalidArgumentNumber = 3; current_syscall_error.type = seL4_RangeError; return EXCEPTION_SYSCALL_ERROR; } /* LR index out of range */ if (index >= gic_vcpu_num_list_regs) { current_syscall_error.type = seL4_RangeError; current_syscall_error.rangeErrorMin = 0; current_syscall_error.rangeErrorMax = gic_vcpu_num_list_regs - 1; current_syscall_error.invalidArgumentNumber = 4; current_syscall_error.type = seL4_RangeError; return EXCEPTION_SYSCALL_ERROR; } /* LR index is in use */ if (virq_get_virqType(vcpu->vgic.lr[index]) == virq_virq_active) { userError("VGIC List register in use."); current_syscall_error.type = seL4_DeleteFirst; return EXCEPTION_SYSCALL_ERROR; } virq_t virq = virq_virq_pending_new(group, priority, 1, vid); setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); return invokeVCPUInjectIRQ(vcpu, index, virq); } exception_t decodeARMVCPUInvocation( word_t label, unsigned int length, cptr_t cptr, cte_t *slot, cap_t cap, extra_caps_t extraCaps, bool_t call, word_t *buffer ) { switch (label) { case ARMVCPUSetTCB: return decodeVCPUSetTCB(cap, extraCaps); case ARMVCPUReadReg: return decodeVCPUReadReg(cap, length, call, buffer); case ARMVCPUWriteReg: return decodeVCPUWriteReg(cap, length, buffer); case ARMVCPUInjectIRQ: return decodeVCPUInjectIRQ(cap, length, buffer); case ARMVCPUAckVPPI: return decodeVCPUAckVPPI(cap, length, buffer); default: userError("VCPU: Illegal operation."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } } exception_t decodeVCPUAckVPPI(cap_t cap, unsigned int length, word_t *buffer) { vcpu_t *vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)); if (length < 1) { current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } word_t irq_w = getSyscallArg(0, buffer); irq_t irq = (irq_t) CORE_IRQ_TO_IRQT(CURRENT_CPU_INDEX(), irq_w); exception_t status = Arch_checkIRQ(irq_w); if (status != EXCEPTION_NONE) { return status; } VPPIEventIRQ_t vppi = irqVPPIEventIndex(irq); if (vppi == VPPIEventIRQ_invalid) { userError("VCPUAckVPPI: Invalid irq number."); current_syscall_error.type = seL4_InvalidArgument; current_syscall_error.invalidArgumentNumber = 0; return EXCEPTION_SYSCALL_ERROR; } setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); return invokeVCPUAckVPPI(vcpu, vppi); } exception_t invokeVCPUAckVPPI(vcpu_t *vcpu, VPPIEventIRQ_t vppi) { vcpu->vppi_masked[vppi] = false; return EXCEPTION_NONE; } exception_t decodeVCPUSetTCB(cap_t cap, extra_caps_t extraCaps) { cap_t tcbCap; if (extraCaps.excaprefs[0] == NULL) { userError("VCPU SetTCB: Truncated message."); current_syscall_error.type = seL4_TruncatedMessage; return EXCEPTION_SYSCALL_ERROR; } tcbCap = extraCaps.excaprefs[0]->cap; if (cap_get_capType(tcbCap) != cap_thread_cap) { userError("TCB cap is not a TCB cap."); current_syscall_error.type = seL4_IllegalOperation; return EXCEPTION_SYSCALL_ERROR; } setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart); return invokeVCPUSetTCB(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), TCB_PTR(cap_thread_cap_get_capTCBPtr(tcbCap))); } exception_t invokeVCPUSetTCB(vcpu_t *vcpu, tcb_t *tcb) { associateVCPUTCB(vcpu, tcb); return EXCEPTION_NONE; } void handleVCPUFault(word_t hsr) { MCS_DO_IF_BUDGET({ if (armv_handleVCPUFault(hsr)) { return; } current_fault = seL4_Fault_VCPUFault_new(hsr); handleFault(NODE_STATE(ksCurThread)); }) schedule(); activateThread(); } #ifdef ENABLE_SMP_SUPPORT void handleVCPUInjectInterruptIPI(vcpu_t *vcpu, unsigned long index, virq_t virq) { if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) { set_gic_vcpu_ctrl_lr(index, virq); } else { vcpu->vgic.lr[index] = virq; } } #endif /* ENABLE_SMP_SUPPORT */ #endif