1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <config.h>
8
9#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
10
11#include <arch/object/vcpu.h>
12#include <armv/vcpu.h>
13#include <arch/machine/debug.h> /* Arch_debug[A/Di]ssociateVCPUTCB() */
14#include <arch/machine/debug_conf.h>
15#include <arch/machine/gic_v2.h>
16#include <drivers/timer/arm_generic.h>
17
18BOOT_CODE void vcpu_boot_init(void)
19{
20    armv_vcpu_boot_init();
21    gic_vcpu_num_list_regs = VGIC_VTR_NLISTREGS(get_gic_vcpu_ctrl_vtr());
22    if (gic_vcpu_num_list_regs > GIC_VCPU_MAX_NUM_LR) {
23        printf("Warning: VGIC is reporting more list registers than we support. Truncating\n");
24        gic_vcpu_num_list_regs = GIC_VCPU_MAX_NUM_LR;
25    }
26    vcpu_disable(NULL);
27    ARCH_NODE_STATE(armHSCurVCPU) = NULL;
28    ARCH_NODE_STATE(armHSVCPUActive) = false;
29
30}
31
32static void vcpu_save(vcpu_t *vcpu, bool_t active)
33{
34    word_t i;
35    unsigned int lr_num;
36
37    assert(vcpu);
38    dsb();
39    /* If we aren't active then this state already got stored when
40     * we were disabled */
41    if (active) {
42        vcpu_save_reg(vcpu, seL4_VCPUReg_SCTLR);
43        vcpu->vgic.hcr = get_gic_vcpu_ctrl_hcr();
44        save_virt_timer(vcpu);
45    }
46
47    /* Store GIC VCPU control state */
48    vcpu->vgic.vmcr = get_gic_vcpu_ctrl_vmcr();
49    vcpu->vgic.apr = get_gic_vcpu_ctrl_apr();
50    lr_num = gic_vcpu_num_list_regs;
51    for (i = 0; i < lr_num; i++) {
52        vcpu->vgic.lr[i] = get_gic_vcpu_ctrl_lr(i);
53    }
54    armv_vcpu_save(vcpu, active);
55}
56
57
58static word_t readVCPUReg(vcpu_t *vcpu, word_t field)
59{
60    if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
61        switch (field) {
62        case seL4_VCPUReg_SCTLR:
63            /* The SCTLR value is switched to/from hardware when we enable/disable
64             * the vcpu, not when we switch vcpus */
65            if (ARCH_NODE_STATE(armHSVCPUActive)) {
66                return getSCTLR();
67            } else {
68                return vcpu_read_reg(vcpu, field);
69            }
70        default:
71            return vcpu_hw_read_reg(field);
72        }
73    } else {
74        return vcpu_read_reg(vcpu, field);
75    }
76}
77
78static void writeVCPUReg(vcpu_t *vcpu, word_t field, word_t value)
79{
80    if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
81        switch (field) {
82        case seL4_VCPUReg_SCTLR:
83            if (ARCH_NODE_STATE(armHSVCPUActive)) {
84                setSCTLR(value);
85            } else {
86                vcpu_write_reg(vcpu, field, value);
87            }
88            break;
89        default:
90            vcpu_hw_write_reg(field, value);
91        }
92    } else {
93        vcpu_write_reg(vcpu, field, value);
94    }
95}
96
97void vcpu_restore(vcpu_t *vcpu)
98{
99    assert(vcpu);
100    word_t i;
101    unsigned int lr_num;
102    /* Turn off the VGIC */
103    set_gic_vcpu_ctrl_hcr(0);
104    isb();
105
106    /* Restore GIC VCPU control state */
107    set_gic_vcpu_ctrl_vmcr(vcpu->vgic.vmcr);
108    set_gic_vcpu_ctrl_apr(vcpu->vgic.apr);
109    lr_num = gic_vcpu_num_list_regs;
110    for (i = 0; i < lr_num; i++) {
111        set_gic_vcpu_ctrl_lr(i, vcpu->vgic.lr[i]);
112    }
113
114    /* restore registers */
115#ifdef CONFIG_ARCH_AARCH64
116    vcpu_restore_reg_range(vcpu, seL4_VCPUReg_TTBR0, seL4_VCPUReg_SPSR_EL1);
117#else
118    vcpu_restore_reg_range(vcpu, seL4_VCPUReg_ACTLR, seL4_VCPUReg_SPSRfiq);
119#endif
120    vcpu_enable(vcpu);
121}
122
123void VPPIEvent(irq_t irq)
124{
125    if (ARCH_NODE_STATE(armHSVCPUActive)) {
126        maskInterrupt(true, irq);
127        assert(irqVPPIEventIndex(irq) != VPPIEventIRQ_invalid);
128        ARCH_NODE_STATE(armHSCurVCPU)->vppi_masked[irqVPPIEventIndex(irq)] = true;
129        current_fault = seL4_Fault_VPPIEvent_new(IRQT_TO_IRQ(irq));
130        /* Current VCPU being active should indicate that the current thread
131         * is runnable. At present, verification cannot establish this so we
132         * perform an extra check. */
133        assert(isRunnable(NODE_STATE(ksCurThread)));
134        if (isRunnable(NODE_STATE(ksCurThread))) {
135            handleFault(NODE_STATE(ksCurThread));
136        }
137    }
138}
139
140void VGICMaintenance(void)
141{
142    uint32_t eisr0, eisr1;
143    uint32_t flags;
144
145    /* We shouldn't get a VGICMaintenance interrupt while a VCPU isn't active,
146     * but if one becomes pending before the VGIC is disabled we might get one
147     * when returning to userlevel after disabling the current VCPU. In this
148     * case we simply return and rely on the interrupt being raised again when
149     * the VCPU is reenabled.
150     */
151    if (!ARCH_NODE_STATE(armHSVCPUActive)) {
152        printf("Received VGIC maintenance without active VCPU!\n");
153        return;
154    }
155
156    eisr0 = get_gic_vcpu_ctrl_eisr0();
157    eisr1 = get_gic_vcpu_ctrl_eisr1();
158    flags = get_gic_vcpu_ctrl_misr();
159
160    if (flags & VGIC_MISR_EOI) {
161        int irq_idx;
162        if (eisr0) {
163            irq_idx = ctzl(eisr0);
164        } else if (eisr1) {
165            irq_idx = ctzl(eisr1) + 32;
166        } else {
167            irq_idx = -1;
168        }
169
170        /* the hardware should never give us an invalid index, but we don't
171         * want to trust it that far */
172        if (irq_idx == -1  || irq_idx >= gic_vcpu_num_list_regs) {
173            current_fault = seL4_Fault_VGICMaintenance_new(0, 0);
174        } else {
175            virq_t virq = get_gic_vcpu_ctrl_lr(irq_idx);
176            switch (virq_get_virqType(virq)) {
177            case virq_virq_active:
178                virq = virq_virq_active_set_virqEOIIRQEN(virq, 0);
179                break;
180            case virq_virq_pending:
181                virq = virq_virq_pending_set_virqEOIIRQEN(virq, 0);
182                break;
183            case virq_virq_invalid:
184                virq = virq_virq_invalid_set_virqEOIIRQEN(virq, 0);
185                break;
186            }
187            set_gic_vcpu_ctrl_lr(irq_idx, virq);
188            /* decodeVCPUInjectIRQ below checks the vgic.lr register,
189             * so we should also sync the shadow data structure as well */
190            assert(ARCH_NODE_STATE(armHSCurVCPU) != NULL && ARCH_NODE_STATE(armHSVCPUActive));
191            if (ARCH_NODE_STATE(armHSCurVCPU) != NULL && ARCH_NODE_STATE(armHSVCPUActive)) {
192                ARCH_NODE_STATE(armHSCurVCPU)->vgic.lr[irq_idx] = virq;
193            } else {
194                /* FIXME This should not happen */
195            }
196            current_fault = seL4_Fault_VGICMaintenance_new(irq_idx, 1);
197        }
198
199    } else {
200        /* Assume that it was an EOI for a LR that was not present */
201        current_fault = seL4_Fault_VGICMaintenance_new(0, 0);
202    }
203
204    /* Current VCPU being active should indicate that the current thread
205     * is runnable. At present, verification cannot establish this so we
206     * perform an extra check. */
207    assert(isRunnable(NODE_STATE(ksCurThread)));
208    if (isRunnable(NODE_STATE(ksCurThread))) {
209        handleFault(NODE_STATE(ksCurThread));
210    }
211}
212
213void vcpu_init(vcpu_t *vcpu)
214{
215    armv_vcpu_init(vcpu);
216    /* GICH VCPU interface control */
217    vcpu->vgic.hcr = VGIC_HCR_EN;
218#ifdef CONFIG_VTIMER_UPDATE_VOFFSET
219    /* Virtual Timer interface */
220    vcpu->virtTimer.last_pcount = 0;
221#endif
222}
223
224void vcpu_switch(vcpu_t *new)
225{
226    if (likely(ARCH_NODE_STATE(armHSCurVCPU) != new)) {
227        if (unlikely(new != NULL)) {
228            if (unlikely(ARCH_NODE_STATE(armHSCurVCPU) != NULL)) {
229                vcpu_save(ARCH_NODE_STATE(armHSCurVCPU), ARCH_NODE_STATE(armHSVCPUActive));
230            }
231            vcpu_restore(new);
232            ARCH_NODE_STATE(armHSCurVCPU) = new;
233            ARCH_NODE_STATE(armHSVCPUActive) = true;
234        } else if (unlikely(ARCH_NODE_STATE(armHSVCPUActive))) {
235            /* leave the current VCPU state loaded, but disable vgic and mmu */
236#ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS
237            saveAllBreakpointState(ARCH_NODE_STATE(armHSCurVCPU)->vcpuTCB);
238#endif
239            vcpu_disable(ARCH_NODE_STATE(armHSCurVCPU));
240            ARCH_NODE_STATE(armHSVCPUActive) = false;
241        }
242    } else if (likely(!ARCH_NODE_STATE(armHSVCPUActive) && new != NULL)) {
243        isb();
244        vcpu_enable(new);
245        ARCH_NODE_STATE(armHSVCPUActive) = true;
246    }
247}
248
249static void vcpu_invalidate_active(void)
250{
251    if (ARCH_NODE_STATE(armHSVCPUActive)) {
252        vcpu_disable(NULL);
253        ARCH_NODE_STATE(armHSVCPUActive) = false;
254    }
255    ARCH_NODE_STATE(armHSCurVCPU) = NULL;
256}
257
258void vcpu_finalise(vcpu_t *vcpu)
259{
260    if (vcpu->vcpuTCB) {
261        dissociateVCPUTCB(vcpu, vcpu->vcpuTCB);
262    }
263}
264
265void associateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb)
266{
267    if (tcb->tcbArch.tcbVCPU) {
268        dissociateVCPUTCB(tcb->tcbArch.tcbVCPU, tcb);
269    }
270    if (vcpu->vcpuTCB) {
271        dissociateVCPUTCB(vcpu, vcpu->vcpuTCB);
272    }
273    tcb->tcbArch.tcbVCPU = vcpu;
274    vcpu->vcpuTCB = tcb;
275}
276
277void dissociateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb)
278{
279    if (tcb->tcbArch.tcbVCPU != vcpu || vcpu->vcpuTCB != tcb) {
280        fail("TCB and VCPU not associated.");
281    }
282    if (vcpu == ARCH_NODE_STATE(armHSCurVCPU)) {
283        vcpu_invalidate_active();
284    }
285    tcb->tcbArch.tcbVCPU = NULL;
286    vcpu->vcpuTCB = NULL;
287#ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS
288    Arch_debugDissociateVCPUTCB(tcb);
289#endif
290
291    /* sanitize the CPSR as without a VCPU a thread should only be in user mode */
292#ifdef CONFIG_ARCH_AARCH64
293    setRegister(tcb, SPSR_EL1, sanitiseRegister(SPSR_EL1, getRegister(tcb, SPSR_EL1), false));
294#else
295    setRegister(tcb, CPSR, sanitiseRegister(CPSR, getRegister(tcb, CPSR), false));
296#endif
297}
298
299exception_t invokeVCPUWriteReg(vcpu_t *vcpu, word_t field, word_t value)
300{
301    writeVCPUReg(vcpu, field, value);
302    return EXCEPTION_NONE;
303}
304
305exception_t decodeVCPUWriteReg(cap_t cap, unsigned int length, word_t *buffer)
306{
307    word_t field;
308    word_t value;
309    if (length < 2) {
310        userError("VCPUWriteReg: Truncated message.");
311        current_syscall_error.type = seL4_TruncatedMessage;
312        return EXCEPTION_SYSCALL_ERROR;
313    }
314    field = getSyscallArg(0, buffer);
315    value = getSyscallArg(1, buffer);
316    if (field >= seL4_VCPUReg_Num) {
317        userError("VCPUWriteReg: Invalid field 0x%lx.", (long)field);
318        current_syscall_error.type = seL4_InvalidArgument;
319        current_syscall_error.invalidArgumentNumber = 1;
320        return EXCEPTION_SYSCALL_ERROR;
321    }
322    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
323    return invokeVCPUWriteReg(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), field, value);
324}
325
326exception_t invokeVCPUReadReg(vcpu_t *vcpu, word_t field, bool_t call)
327{
328    tcb_t *thread;
329    thread = NODE_STATE(ksCurThread);
330    word_t value = readVCPUReg(vcpu, field);
331    if (call) {
332        word_t *ipcBuffer = lookupIPCBuffer(true, thread);
333        setRegister(thread, badgeRegister, 0);
334        unsigned int length = setMR(thread, ipcBuffer, 0, value);
335        setRegister(thread, msgInfoRegister, wordFromMessageInfo(
336                        seL4_MessageInfo_new(0, 0, 0, length)));
337    }
338    setThreadState(NODE_STATE(ksCurThread), ThreadState_Running);
339    return EXCEPTION_NONE;
340}
341
342exception_t decodeVCPUReadReg(cap_t cap, unsigned int length, bool_t call, word_t *buffer)
343{
344    word_t field;
345    if (length < 1) {
346        userError("VCPUReadReg: Truncated message.");
347        current_syscall_error.type = seL4_TruncatedMessage;
348        return EXCEPTION_SYSCALL_ERROR;
349    }
350
351    field = getSyscallArg(0, buffer);
352
353    if (field >= seL4_VCPUReg_Num) {
354        userError("VCPUReadReg: Invalid field 0x%lx.", (long)field);
355        current_syscall_error.type = seL4_InvalidArgument;
356        current_syscall_error.invalidArgumentNumber = 1;
357        return EXCEPTION_SYSCALL_ERROR;
358    }
359
360    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
361    return invokeVCPUReadReg(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), field, call);
362}
363
364exception_t invokeVCPUInjectIRQ(vcpu_t *vcpu, unsigned long index, virq_t virq)
365{
366    if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
367        set_gic_vcpu_ctrl_lr(index, virq);
368#ifdef ENABLE_SMP_SUPPORT
369    } else if (vcpu->vcpuTCB->tcbAffinity != getCurrentCPUIndex()) {
370        doRemoteOp3Arg(IpiRemoteCall_VCPUInjectInterrupt, (word_t)vcpu, index, virq.words[0],      vcpu->vcpuTCB->tcbAffinity);
371#endif /* CONFIG_ENABLE_SMP */
372    } else {
373        vcpu->vgic.lr[index] = virq;
374    }
375
376    return EXCEPTION_NONE;
377}
378
379exception_t decodeVCPUInjectIRQ(cap_t cap, unsigned int length, word_t *buffer)
380{
381    word_t vid, priority, group, index;
382    vcpu_t *vcpu;
383#ifdef CONFIG_ARCH_AARCH64
384    word_t mr0;
385
386    vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
387
388    if (length < 1) {
389        current_syscall_error.type = seL4_TruncatedMessage;
390        return EXCEPTION_SYSCALL_ERROR;
391    }
392
393    mr0 = getSyscallArg(0, buffer);
394    vid = mr0 & 0xffff;
395    priority = (mr0 >> 16) & 0xff;
396    group = (mr0 >> 24) & 0xff;
397    index = (mr0 >> 32) & 0xff;
398#else
399    uint32_t mr0, mr1;
400
401    vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
402
403    if (length < 2) {
404        current_syscall_error.type = seL4_TruncatedMessage;
405        return EXCEPTION_SYSCALL_ERROR;
406    }
407
408    mr0 = getSyscallArg(0, buffer);
409    mr1 = getSyscallArg(1, buffer);
410    vid = mr0 & 0xffff;
411    priority = (mr0 >> 16) & 0xff;
412    group = (mr0 >> 24) & 0xff;
413    index = mr1 & 0xff;
414#endif
415
416    /* Check IRQ parameters */
417    if (vid > (1U << 10) - 1) {
418        current_syscall_error.type = seL4_RangeError;
419        current_syscall_error.rangeErrorMin = 0;
420        current_syscall_error.rangeErrorMax = (1U << 10) - 1;
421        current_syscall_error.invalidArgumentNumber = 1;
422        current_syscall_error.type = seL4_RangeError;
423        return EXCEPTION_SYSCALL_ERROR;
424    }
425    if (priority > 31) {
426        current_syscall_error.type = seL4_RangeError;
427        current_syscall_error.rangeErrorMin = 0;
428        current_syscall_error.rangeErrorMax = 31;
429        current_syscall_error.invalidArgumentNumber = 2;
430        current_syscall_error.type = seL4_RangeError;
431        return EXCEPTION_SYSCALL_ERROR;
432    }
433    if (group > 1) {
434        current_syscall_error.type = seL4_RangeError;
435        current_syscall_error.rangeErrorMin = 0;
436        current_syscall_error.rangeErrorMax = 1;
437        current_syscall_error.invalidArgumentNumber = 3;
438        current_syscall_error.type = seL4_RangeError;
439        return EXCEPTION_SYSCALL_ERROR;
440    }
441    /* LR index out of range */
442    if (index >= gic_vcpu_num_list_regs) {
443        current_syscall_error.type = seL4_RangeError;
444        current_syscall_error.rangeErrorMin = 0;
445        current_syscall_error.rangeErrorMax = gic_vcpu_num_list_regs - 1;
446        current_syscall_error.invalidArgumentNumber = 4;
447        current_syscall_error.type = seL4_RangeError;
448        return EXCEPTION_SYSCALL_ERROR;
449    }
450    /* LR index is in use */
451    if (virq_get_virqType(vcpu->vgic.lr[index]) == virq_virq_active) {
452        userError("VGIC List register in use.");
453        current_syscall_error.type = seL4_DeleteFirst;
454        return EXCEPTION_SYSCALL_ERROR;
455    }
456    virq_t virq = virq_virq_pending_new(group, priority, 1, vid);
457
458    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
459    return invokeVCPUInjectIRQ(vcpu, index, virq);
460}
461
462exception_t decodeARMVCPUInvocation(
463    word_t label,
464    unsigned int length,
465    cptr_t cptr,
466    cte_t *slot,
467    cap_t cap,
468    extra_caps_t extraCaps,
469    bool_t call,
470    word_t *buffer
471)
472{
473    switch (label) {
474    case ARMVCPUSetTCB:
475        return decodeVCPUSetTCB(cap, extraCaps);
476    case ARMVCPUReadReg:
477        return decodeVCPUReadReg(cap, length, call, buffer);
478    case ARMVCPUWriteReg:
479        return decodeVCPUWriteReg(cap, length, buffer);
480    case ARMVCPUInjectIRQ:
481        return decodeVCPUInjectIRQ(cap, length, buffer);
482    case ARMVCPUAckVPPI:
483        return decodeVCPUAckVPPI(cap, length, buffer);
484    default:
485        userError("VCPU: Illegal operation.");
486        current_syscall_error.type = seL4_IllegalOperation;
487        return EXCEPTION_SYSCALL_ERROR;
488    }
489}
490
491exception_t decodeVCPUAckVPPI(cap_t cap, unsigned int length, word_t *buffer)
492{
493    vcpu_t *vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
494
495    if (length < 1) {
496        current_syscall_error.type = seL4_TruncatedMessage;
497        return EXCEPTION_SYSCALL_ERROR;
498    }
499
500    word_t irq_w = getSyscallArg(0, buffer);
501    irq_t irq = (irq_t) CORE_IRQ_TO_IRQT(CURRENT_CPU_INDEX(), irq_w);
502    exception_t status = Arch_checkIRQ(irq_w);
503    if (status != EXCEPTION_NONE) {
504        return status;
505    }
506
507    VPPIEventIRQ_t vppi = irqVPPIEventIndex(irq);
508    if (vppi == VPPIEventIRQ_invalid) {
509        userError("VCPUAckVPPI: Invalid irq number.");
510        current_syscall_error.type = seL4_InvalidArgument;
511        current_syscall_error.invalidArgumentNumber = 0;
512        return EXCEPTION_SYSCALL_ERROR;
513    }
514
515    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
516    return invokeVCPUAckVPPI(vcpu, vppi);
517}
518
519exception_t invokeVCPUAckVPPI(vcpu_t *vcpu, VPPIEventIRQ_t vppi)
520{
521    vcpu->vppi_masked[vppi] = false;
522    return EXCEPTION_NONE;
523}
524
525exception_t decodeVCPUSetTCB(cap_t cap, extra_caps_t extraCaps)
526{
527    cap_t tcbCap;
528    if (extraCaps.excaprefs[0] == NULL) {
529        userError("VCPU SetTCB: Truncated message.");
530        current_syscall_error.type = seL4_TruncatedMessage;
531        return EXCEPTION_SYSCALL_ERROR;
532    }
533    tcbCap  = extraCaps.excaprefs[0]->cap;
534
535    if (cap_get_capType(tcbCap) != cap_thread_cap) {
536        userError("TCB cap is not a TCB cap.");
537        current_syscall_error.type = seL4_IllegalOperation;
538        return EXCEPTION_SYSCALL_ERROR;
539    }
540
541    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
542    return invokeVCPUSetTCB(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), TCB_PTR(cap_thread_cap_get_capTCBPtr(tcbCap)));
543}
544
545exception_t invokeVCPUSetTCB(vcpu_t *vcpu, tcb_t *tcb)
546{
547    associateVCPUTCB(vcpu, tcb);
548
549    return EXCEPTION_NONE;
550}
551
552
553void handleVCPUFault(word_t hsr)
554{
555    MCS_DO_IF_BUDGET({
556        if (armv_handleVCPUFault(hsr))
557        {
558            return;
559        }
560        current_fault = seL4_Fault_VCPUFault_new(hsr);
561        handleFault(NODE_STATE(ksCurThread));
562    })
563    schedule();
564    activateThread();
565}
566
567#ifdef ENABLE_SMP_SUPPORT
568void handleVCPUInjectInterruptIPI(vcpu_t *vcpu, unsigned long index, virq_t virq)
569{
570    if (likely(ARCH_NODE_STATE(armHSCurVCPU) == vcpu)) {
571        set_gic_vcpu_ctrl_lr(index, virq);
572    } else {
573        vcpu->vgic.lr[index] = virq;
574    }
575}
576#endif /* ENABLE_SMP_SUPPORT */
577
578#endif
579