1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * This software may be distributed and modified according to the terms of
5 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
6 * See "LICENSE_GPLv2.txt" for details.
7 *
8 * @TAG(GD_GPL)
9 */
10
11#include <config.h>
12
13#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
14
15#include <arch/object/vcpu.h>
16#include <armv/vcpu.h>
17#include <plat/machine/devices.h>
18#include <arch/machine/debug.h> /* Arch_debug[A/Di]ssociateVCPUTCB() */
19#include <arch/machine/debug_conf.h>
20#include <arch/machine/gic_pl390.h>
21
22
23static inline void
24vcpu_save_reg(vcpu_t *vcpu, word_t reg)
25{
26    if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
27        fail("ARM/HYP: Invalid register index or NULL VCPU");
28        return;
29    }
30    vcpu->regs[reg] = vcpu_hw_read_reg(reg);
31}
32
33static inline void
34vcpu_save_reg_range(vcpu_t *vcpu, word_t start, word_t end)
35{
36    for (word_t i = start; i <= end; i++) {
37        vcpu_save_reg(vcpu, i);
38    }
39}
40
41static inline void
42vcpu_restore_reg(vcpu_t *vcpu, word_t reg)
43{
44    if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
45        fail("ARM/HYP: Invalid register index or NULL VCPU");
46        return;
47    }
48    vcpu_hw_write_reg(reg, vcpu->regs[reg]);
49}
50
51static inline void
52vcpu_restore_reg_range(vcpu_t *vcpu, word_t start, word_t end)
53{
54    for (word_t i = start; i <= end; i++) {
55        vcpu_restore_reg(vcpu, i);
56    }
57}
58
59static inline word_t
60vcpu_read_reg(vcpu_t *vcpu, word_t reg)
61{
62    if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
63        fail("ARM/HYP: Invalid register index or NULL VCPU");
64        return 0;
65    }
66    return vcpu->regs[reg];
67}
68
69static inline void
70vcpu_write_reg(vcpu_t *vcpu, word_t reg, word_t value)
71{
72    if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
73        fail("ARM/HYP: Invalid register index or NULL VCPU");
74        return;
75    }
76    vcpu->regs[reg] = value;
77}
78
79#ifdef CONFIG_HAVE_FPU
80static inline void
81access_fpexc(vcpu_t *vcpu, bool_t write)
82{
83    /* save a copy of the current status since
84     * the enableFpuHyp modifies the armHSFPUEnabled
85     */
86    bool_t flag = armHSFPUEnabled;
87    if (!flag) {
88        enableFpuInstInHyp();
89    }
90    if (write) {
91        MCR(FPEXC, vcpu_read_reg(vcpu, seL4_VCPUReg_FPEXC));
92    } else {
93        word_t fpexc;
94        MRC(FPEXC, fpexc);
95        vcpu_write_reg(vcpu, seL4_VCPUReg_FPEXC, fpexc);
96    }
97    /* restore the status */
98    if (!flag) {
99        trapFpuInstToHyp();
100    }
101}
102#endif
103
104static void
105vcpu_enable(vcpu_t *vcpu)
106{
107#ifdef CONFIG_ARCH_AARCH64
108    armv_vcpu_enable(vcpu);
109#else
110    vcpu_restore_reg(vcpu, seL4_VCPUReg_SCTLR);
111    setHCR(HCR_VCPU);
112    isb();
113
114    /* Turn on the VGIC */
115    set_gic_vcpu_ctrl_hcr(vcpu->vgic.hcr);
116
117#if !defined(ARM_CP14_SAVE_AND_RESTORE_NATIVE_THREADS) && defined(ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS)
118    /* This is guarded by an #ifNdef (negation) ARM_CP14_SAVE_AND_RESTORE_NATIVE_THREADS
119     * because if it wasn't, we'd be calling restore_user_debug_context twice
120     * on a debug-API build; recall that restore_user_debug_context is called
121     * in restore_user_context.
122     *
123     * We call restore_user_debug_context here, because vcpu_restore calls this
124     * function (vcpu_enable). It's better to embed the
125     * restore_user_debug_context call in here than to call it in the outer
126     * level caller (vcpu_switch), because if the structure of this VCPU code
127     * changes later on, it will be less likely that the person who changes
128     * the code will be able to omit the debug register context restore, if
129     * it's done here.
130     */
131    restore_user_debug_context(vcpu->vcpuTCB);
132#endif
133#if defined(ARM_HYP_TRAP_CP14_IN_NATIVE_USER_THREADS)
134    /* Disable debug exception trapping and let the PL1 Guest VM handle all
135     * of its own debug faults.
136     */
137    setHDCRTrapDebugExceptionState(false);
138#endif
139#ifdef CONFIG_HAVE_FPU
140    /* We need to restore the FPEXC value early for the following reason:
141     *
142     * 1: When an application inside a VM is trying to execute an FPU
143     * instruction and the EN bit of FPEXC is disabled, an undefined
144     * instruction exception is sent to the guest Linux kernel instead of
145     * the seL4. Until the Linux kernel examines the EN bit of the FPEXC
146     * to determine if the exception FPU related, a VCPU trap is sent to
147     * the seL4 kernel. However, it can be too late to restore the value
148     * of saved FPEXC in the VCPU trap handler: if the EN bit of the saved
149     * FPEXC is enabled, the Linux kernel thinks the FPU is enabled and
150     * thus refuses to handle the exception. The result is the application
151     * is killed with the cause of illegal instruction.
152     *
153     * Note that we restore the FPEXC here, but the current FPU owner
154     * can be a different thread. Thus, it seems that we are modifying
155     * another thread's FPEXC. However, the modification is OK.
156     *
157     * 1: If the other thread is a native thread, even if the EN bit of
158     * the FPEXC is enabled, a trap th HYP mode will be triggered when
159     * the thread tries to use the FPU.
160     *
161     * 2: If the other thread has a VCPU, the FPEXC is already saved
162     * in the VCPU's vcpu->fpexc when the VCPU is saved or disabled.
163     *
164     * We also overwrite the fpuState.fpexc with the value saved in
165     * vcpu->fpexc. Since the following scenario can happen:
166     *
167     * VM0 (the FPU owner) -> VM1 (update the FPEXC in vcpu_enable) ->
168     * switchLocalFpuOwner (save VM0 with modified FPEXC) ->
169     * VM1 (the new FPU owner)
170     *
171     * In the case above, the fpuState.fpexc of VM0 saves the value written
172     * by the VM1, but the vcpu->fpexc of VM0 still contains the correct
173     * value when VM0 is disabed (vcpu_disable) or saved (vcpu_save).
174     *
175     *
176     */
177
178    vcpu->vcpuTCB->tcbArch.tcbContext.fpuState.fpexc = vcpu_read_reg(vcpu, seL4_VCPUReg_FPEXC);
179    access_fpexc(vcpu, true);
180#endif
181#endif
182}
183
184static void
185vcpu_disable(vcpu_t *vcpu)
186{
187#ifdef CONFIG_ARCH_AARCH64
188    armv_vcpu_disable(vcpu);
189#else
190    uint32_t hcr;
191    dsb();
192    if (likely(vcpu)) {
193        hcr = get_gic_vcpu_ctrl_hcr();
194        vcpu->vgic.hcr = hcr;
195        vcpu_save_reg(vcpu, seL4_VCPUReg_SCTLR);
196        isb();
197#ifdef CONFIG_HAVE_FPU
198        if (nativeThreadUsingFPU(vcpu->vcpuTCB)) {
199            access_fpexc(vcpu, false);
200        }
201#endif
202    }
203    /* Turn off the VGIC */
204    set_gic_vcpu_ctrl_hcr(0);
205    isb();
206
207    /* Stage 1 MMU off */
208    setSCTLR(SCTLR_DEFAULT);
209    setHCR(HCR_NATIVE);
210
211#if defined(ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS)
212    /* Disable all breakpoint registers from triggering their
213     * respective events, so that when we switch from a guest VM
214     * to a native thread, the native thread won't trigger events
215     * that were caused by things the guest VM did.
216     */
217    loadAllDisabledBreakpointState();
218#endif
219#if defined(ARM_HYP_TRAP_CP14_IN_NATIVE_USER_THREADS)
220    /* Enable debug exception trapping and let seL4 trap all PL0 (user) native
221     * seL4 threads' debug exceptions, so it can deliver them as fault messages.
222     */
223    setHDCRTrapDebugExceptionState(true);
224#endif
225    isb();
226#endif
227}
228
229BOOT_CODE void
230vcpu_boot_init(void)
231{
232#ifdef CONFIG_ARCH_AARCH64
233    armv_vcpu_boot_init();
234#endif
235    gic_vcpu_num_list_regs = VGIC_VTR_NLISTREGS(get_gic_vcpu_ctrl_vtr());
236    if (gic_vcpu_num_list_regs > GIC_VCPU_MAX_NUM_LR) {
237        printf("Warning: VGIC is reporting more list registers than we support. Truncating\n");
238        gic_vcpu_num_list_regs = GIC_VCPU_MAX_NUM_LR;
239    }
240    vcpu_disable(NULL);
241    armHSCurVCPU = NULL;
242    armHSVCPUActive = false;
243
244#if defined(ARM_HYP_TRAP_CP14_IN_VCPU_THREADS) || defined(ARM_HYP_TRAP_CP14_IN_NATIVE_USER_THREADS)
245    /* On the verified build, we have implemented a workaround that ensures
246     * that we don't need to save and restore the debug coprocessor's state
247     * (and therefore don't have to expose the CP14 registers to verification).
248     *
249     * This workaround is simple: we just trap and intercept all Guest VM
250     * accesses to the debug coprocessor, and deliver them as VMFault
251     * messages to the VM Monitor. To that end, the VM Monitor can then
252     * choose to either kill the Guest VM, or it can also choose to silently
253     * step over the Guest VM's accesses to the debug coprocessor, thereby
254     * silently eliminating the communication channel between the Guest VMs
255     * (because the debug coprocessor acted as a communication channel
256     * unless we saved/restored its state between VM switches).
257     *
258     * This workaround delegates the communication channel responsibility
259     * from the kernel to the VM Monitor, essentially.
260     */
261    initHDCR();
262#endif
263}
264
265static void
266vcpu_save(vcpu_t *vcpu, bool_t active)
267{
268    word_t i;
269    unsigned int lr_num;
270
271    assert(vcpu);
272    dsb();
273    /* If we aren't active then this state already got stored when
274     * we were disabled */
275    if (active) {
276        vcpu_save_reg(vcpu, seL4_VCPUReg_SCTLR);
277        vcpu->vgic.hcr = get_gic_vcpu_ctrl_hcr();
278    }
279
280    /* Store GIC VCPU control state */
281    vcpu->vgic.vmcr = get_gic_vcpu_ctrl_vmcr();
282    vcpu->vgic.apr = get_gic_vcpu_ctrl_apr();
283    lr_num = gic_vcpu_num_list_regs;
284    for (i = 0; i < lr_num; i++) {
285        vcpu->vgic.lr[i] = get_gic_vcpu_ctrl_lr(i);
286    }
287
288#ifdef CONFIG_ARCH_AARCH64
289    vcpu_save_reg_range(vcpu, seL4_VCPUReg_TTBR0, seL4_VCPUReg_SPSR_EL1);
290#else
291    /* save registers */
292    vcpu_save_reg_range(vcpu, seL4_VCPUReg_ACTLR, seL4_VCPUReg_SPSRfiq);
293
294#ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS
295    /* This is done when we are asked to save and restore the CP14 debug context
296     * of VCPU threads; the register context is saved into the underlying TCB.
297     */
298    saveAllBreakpointState(vcpu->vcpuTCB);
299#endif
300    isb();
301#ifdef CONFIG_HAVE_FPU
302    /* Other FPU registers are still lazily saved and restored when
303     * handleFPUFault is called. See the comments in vcpu_enable
304     * for more information.
305     */
306    if (active && nativeThreadUsingFPU(vcpu->vcpuTCB)) {
307        access_fpexc(vcpu, false);
308    }
309#endif
310#endif
311}
312
313
314static word_t
315readVCPUReg(vcpu_t *vcpu, word_t field)
316{
317    if (likely(armHSCurVCPU == vcpu)) {
318        switch (field) {
319        case seL4_VCPUReg_SCTLR:
320            /* The SCTLR value is switched to/from hardware when we enable/disable
321             * the vcpu, not when we switch vcpus */
322            if (armHSVCPUActive) {
323                return getSCTLR();
324            } else {
325                return vcpu_read_reg(vcpu, field);
326            }
327        default:
328            return vcpu_hw_read_reg(field);
329        }
330    } else {
331        return vcpu_read_reg(vcpu, field);
332    }
333}
334
335static void
336writeVCPUReg(vcpu_t *vcpu, word_t field, word_t value)
337{
338    if (likely(armHSCurVCPU == vcpu)) {
339        switch (field) {
340        case seL4_VCPUReg_SCTLR:
341            if (armHSVCPUActive) {
342                setSCTLR(value);
343            } else {
344                vcpu_write_reg(vcpu, field, value);
345            }
346            break;
347        default:
348            vcpu_hw_write_reg(field, value);
349        }
350    } else {
351        vcpu_write_reg(vcpu, field, value);
352    }
353}
354
355void
356vcpu_restore(vcpu_t *vcpu)
357{
358    assert(vcpu);
359    word_t i;
360    unsigned int lr_num;
361    /* Turn off the VGIC */
362    set_gic_vcpu_ctrl_hcr(0);
363    isb();
364
365    /* Restore GIC VCPU control state */
366    set_gic_vcpu_ctrl_vmcr(vcpu->vgic.vmcr);
367    set_gic_vcpu_ctrl_apr(vcpu->vgic.apr);
368    lr_num = gic_vcpu_num_list_regs;
369    for (i = 0; i < lr_num; i++) {
370        set_gic_vcpu_ctrl_lr(i, vcpu->vgic.lr[i]);
371    }
372
373    /* restore registers */
374#ifdef CONFIG_ARCH_AARCH64
375    vcpu_restore_reg_range(vcpu, seL4_VCPUReg_TTBR0, seL4_VCPUReg_SPSR_EL1);
376#else
377    vcpu_restore_reg_range(vcpu, seL4_VCPUReg_ACTLR, seL4_VCPUReg_SPSRfiq);
378#endif
379    vcpu_enable(vcpu);
380}
381
382void
383VGICMaintenance(void)
384{
385    uint32_t eisr0, eisr1;
386    uint32_t flags;
387
388    /* The current thread must be runnable at this point as we can only get
389     * a VGIC maintenance whilst we are actively running a thread with an
390     * associated VCPU. For the moment for the proof we leave a redundant
391     * check in here that this is indeed not happening */
392    if (!isRunnable(NODE_STATE(ksCurThread))) {
393        printf("Received VGIC maintenance on non-runnable thread!\n");
394        return;
395    }
396
397    eisr0 = get_gic_vcpu_ctrl_eisr0();
398    eisr1 = get_gic_vcpu_ctrl_eisr1();
399    flags = get_gic_vcpu_ctrl_misr();
400
401    if (flags & VGIC_MISR_EOI) {
402        int irq_idx;
403        if (eisr0) {
404            irq_idx = ctzl(eisr0);
405        } else if (eisr1) {
406            irq_idx = ctzl(eisr1) + 32;
407        } else {
408            irq_idx = -1;
409        }
410
411        /* the hardware should never give us an invalid index, but we don't
412         * want to trust it that far */
413        if (irq_idx == -1  || irq_idx >= gic_vcpu_num_list_regs) {
414            current_fault = seL4_Fault_VGICMaintenance_new(0, 0);
415        } else {
416            virq_t virq = get_gic_vcpu_ctrl_lr(irq_idx);
417            switch (virq_get_virqType(virq)) {
418            case virq_virq_active:
419                virq = virq_virq_active_set_virqEOIIRQEN(virq, 0);
420                break;
421            case virq_virq_pending:
422                virq = virq_virq_pending_set_virqEOIIRQEN(virq, 0);
423                break;
424            case virq_virq_invalid:
425                virq = virq_virq_invalid_set_virqEOIIRQEN(virq, 0);
426                break;
427            }
428            set_gic_vcpu_ctrl_lr(irq_idx, virq);
429            /* decodeVCPUInjectIRQ below checks the vgic.lr register,
430             * so we should also sync the shadow data structure as well */
431            assert(armHSCurVCPU != NULL && armHSVCPUActive);
432            if (armHSCurVCPU != NULL && armHSVCPUActive) {
433                armHSCurVCPU->vgic.lr[irq_idx] = virq;
434            } else {
435                /* FIXME This should not happen */
436            }
437            current_fault = seL4_Fault_VGICMaintenance_new(irq_idx, 1);
438        }
439
440    } else {
441        /* Assume that it was an EOI for a LR that was not present */
442        current_fault = seL4_Fault_VGICMaintenance_new(0, 0);
443    }
444
445    handleFault(NODE_STATE(ksCurThread));
446}
447
448void
449vcpu_init(vcpu_t *vcpu)
450{
451#ifdef CONFIG_ARCH_AARCH64
452    armv_vcpu_init(vcpu);
453#else
454    vcpu_write_reg(vcpu, seL4_VCPUReg_SCTLR, SCTLR_DEFAULT);
455    vcpu_write_reg(vcpu, seL4_VCPUReg_ACTLR, ACTLR_DEFAULT);
456#endif
457    /* GICH VCPU interface control */
458    vcpu->vgic.hcr = VGIC_HCR_EN;
459}
460
461void
462vcpu_switch(vcpu_t *new)
463{
464    if (likely(armHSCurVCPU != new)) {
465        if (unlikely(new != NULL)) {
466            if (unlikely(armHSCurVCPU != NULL)) {
467                vcpu_save(armHSCurVCPU, armHSVCPUActive);
468            }
469            vcpu_restore(new);
470            armHSCurVCPU = new;
471            armHSVCPUActive = true;
472        } else if (unlikely(armHSVCPUActive)) {
473            /* leave the current VCPU state loaded, but disable vgic and mmu */
474#ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS
475            saveAllBreakpointState(armHSCurVCPU->vcpuTCB);
476#endif
477            vcpu_disable(armHSCurVCPU);
478            armHSVCPUActive = false;
479        }
480    } else if (likely(!armHSVCPUActive && new != NULL)) {
481        isb();
482        vcpu_enable(new);
483        armHSVCPUActive = true;
484    }
485}
486
487static void
488vcpu_invalidate_active(void)
489{
490    if (armHSVCPUActive) {
491        vcpu_disable(NULL);
492        armHSVCPUActive = false;
493    }
494    armHSCurVCPU = NULL;
495}
496
497void
498vcpu_finalise(vcpu_t *vcpu)
499{
500    if (vcpu->vcpuTCB) {
501        dissociateVCPUTCB(vcpu, vcpu->vcpuTCB);
502    }
503}
504
505void
506associateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb)
507{
508    if (tcb->tcbArch.tcbVCPU) {
509        dissociateVCPUTCB(tcb->tcbArch.tcbVCPU, tcb);
510    }
511    if (vcpu->vcpuTCB) {
512        dissociateVCPUTCB(vcpu, vcpu->vcpuTCB);
513    }
514    tcb->tcbArch.tcbVCPU = vcpu;
515    vcpu->vcpuTCB = tcb;
516}
517
518void
519dissociateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb)
520{
521    if (tcb->tcbArch.tcbVCPU != vcpu || vcpu->vcpuTCB != tcb) {
522        fail("TCB and VCPU not associated.");
523    }
524    if (vcpu == armHSCurVCPU) {
525        vcpu_invalidate_active();
526    }
527    tcb->tcbArch.tcbVCPU = NULL;
528    vcpu->vcpuTCB = NULL;
529#ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS
530    Arch_debugDissociateVCPUTCB(tcb);
531#endif
532
533    /* sanitize the CPSR as without a VCPU a thread should only be in user mode */
534#ifdef CONFIG_ARCH_AARCH64
535    setRegister(tcb, SPSR_EL1, sanitiseRegister(SPSR_EL1, getRegister(tcb, SPSR_EL1), false));
536#else
537    setRegister(tcb, CPSR, sanitiseRegister(CPSR, getRegister(tcb, CPSR), false));
538#endif
539}
540
541exception_t
542invokeVCPUWriteReg(vcpu_t *vcpu, word_t field, word_t value)
543{
544    writeVCPUReg(vcpu, field, value);
545    return EXCEPTION_NONE;
546}
547
548exception_t
549decodeVCPUWriteReg(cap_t cap, unsigned int length, word_t* buffer)
550{
551    word_t field;
552    word_t value;
553    if (length < 2) {
554        userError("VCPUWriteReg: Truncated message.");
555        current_syscall_error.type = seL4_TruncatedMessage;
556        return EXCEPTION_SYSCALL_ERROR;
557    }
558    field = getSyscallArg(0, buffer);
559    value = getSyscallArg(1, buffer);
560    if (field >= seL4_VCPUReg_Num) {
561        userError("VCPUWriteReg: Invalid field 0x%lx.", (long)field);
562        current_syscall_error.type = seL4_InvalidArgument;
563        current_syscall_error.invalidArgumentNumber = 1;
564        return EXCEPTION_SYSCALL_ERROR;
565    }
566    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
567    return invokeVCPUWriteReg(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), field, value);
568}
569
570exception_t
571invokeVCPUReadReg(vcpu_t *vcpu, word_t field, bool_t call)
572{
573    tcb_t *thread;
574    thread = NODE_STATE(ksCurThread);
575    word_t value = readVCPUReg(vcpu, field);
576    if (call) {
577        word_t *ipcBuffer = lookupIPCBuffer(true, thread);
578        setRegister(thread, badgeRegister, 0);
579        unsigned int length = setMR(thread, ipcBuffer, 0, value);
580        setRegister(thread, msgInfoRegister, wordFromMessageInfo(
581                        seL4_MessageInfo_new(0, 0, 0, length)));
582    }
583    setThreadState(NODE_STATE(ksCurThread), ThreadState_Running);
584    return EXCEPTION_NONE;
585}
586
587exception_t
588decodeVCPUReadReg(cap_t cap, unsigned int length, bool_t call, word_t* buffer)
589{
590    word_t field;
591    if (length < 1) {
592        userError("VCPUReadReg: Truncated message.");
593        current_syscall_error.type = seL4_TruncatedMessage;
594        return EXCEPTION_SYSCALL_ERROR;
595    }
596
597    field = getSyscallArg(0, buffer);
598
599    if (field >= seL4_VCPUReg_Num) {
600        userError("VCPUReadReg: Invalid field 0x%lx.", (long)field);
601        current_syscall_error.type = seL4_InvalidArgument;
602        current_syscall_error.invalidArgumentNumber = 1;
603        return EXCEPTION_SYSCALL_ERROR;
604    }
605
606    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
607    return invokeVCPUReadReg(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), field, call);
608}
609
610exception_t
611invokeVCPUInjectIRQ(vcpu_t* vcpu, unsigned long index, virq_t virq)
612{
613    if (likely(armHSCurVCPU == vcpu)) {
614        set_gic_vcpu_ctrl_lr(index, virq);
615    } else {
616        vcpu->vgic.lr[index] = virq;
617    }
618
619    return EXCEPTION_NONE;
620}
621
622exception_t
623decodeVCPUInjectIRQ(cap_t cap, unsigned int length, word_t* buffer)
624{
625    word_t vid, priority, group, index;
626    vcpu_t *vcpu;
627#ifdef CONFIG_ARCH_AARCH64
628    word_t mr0;
629
630    vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
631
632    if (length < 1) {
633        current_syscall_error.type = seL4_TruncatedMessage;
634        return EXCEPTION_SYSCALL_ERROR;
635    }
636
637    mr0 = getSyscallArg(0, buffer);
638    vid = mr0 & 0xffff;
639    priority = (mr0 >> 16) & 0xff;
640    group = (mr0 >> 24) & 0xff;
641    index = (mr0 >> 32) & 0xff;
642#else
643    uint32_t mr0, mr1;
644
645    vcpu = VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap));
646
647    if (length < 2) {
648        current_syscall_error.type = seL4_TruncatedMessage;
649        return EXCEPTION_SYSCALL_ERROR;
650    }
651
652    mr0 = getSyscallArg(0, buffer);
653    mr1 = getSyscallArg(1, buffer);
654    vid = mr0 & 0xffff;
655    priority = (mr0 >> 16) & 0xff;
656    group = (mr0 >> 24) & 0xff;
657    index = mr1 & 0xff;
658#endif
659
660    /* Check IRQ parameters */
661    if (vid > (1U << 10) - 1) {
662        current_syscall_error.type = seL4_RangeError;
663        current_syscall_error.rangeErrorMin = 0;
664        current_syscall_error.rangeErrorMax = (1U << 10) - 1;
665        current_syscall_error.invalidArgumentNumber = 1;
666        current_syscall_error.type = seL4_RangeError;
667        return EXCEPTION_SYSCALL_ERROR;
668    }
669    if (priority > 31) {
670        current_syscall_error.type = seL4_RangeError;
671        current_syscall_error.rangeErrorMin = 0;
672        current_syscall_error.rangeErrorMax = 31;
673        current_syscall_error.invalidArgumentNumber = 2;
674        current_syscall_error.type = seL4_RangeError;
675        return EXCEPTION_SYSCALL_ERROR;
676    }
677    if (group > 1) {
678        current_syscall_error.type = seL4_RangeError;
679        current_syscall_error.rangeErrorMin = 0;
680        current_syscall_error.rangeErrorMax = 1;
681        current_syscall_error.invalidArgumentNumber = 3;
682        current_syscall_error.type = seL4_RangeError;
683        return EXCEPTION_SYSCALL_ERROR;
684    }
685    /* LR index out of range */
686    if (index >= gic_vcpu_num_list_regs) {
687        current_syscall_error.type = seL4_RangeError;
688        current_syscall_error.rangeErrorMin = 0;
689        current_syscall_error.rangeErrorMax = gic_vcpu_num_list_regs - 1;
690        current_syscall_error.invalidArgumentNumber = 4;
691        current_syscall_error.type = seL4_RangeError;
692        return EXCEPTION_SYSCALL_ERROR;
693    }
694    /* LR index is in use */
695    if (virq_get_virqType(vcpu->vgic.lr[index]) == virq_virq_active) {
696        userError("VGIC List register in use.");
697        current_syscall_error.type = seL4_DeleteFirst;
698        return EXCEPTION_SYSCALL_ERROR;
699    }
700    virq_t virq = virq_virq_pending_new(group, priority, 1, vid);
701
702    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
703    return invokeVCPUInjectIRQ(vcpu, index, virq);
704}
705
706exception_t decodeARMVCPUInvocation(
707    word_t label,
708    unsigned int length,
709    cptr_t cptr,
710    cte_t* slot,
711    cap_t cap,
712    extra_caps_t extraCaps,
713    bool_t call,
714    word_t* buffer
715)
716{
717    switch (label) {
718    case ARMVCPUSetTCB:
719        return decodeVCPUSetTCB(cap, extraCaps);
720    case ARMVCPUReadReg:
721        return decodeVCPUReadReg(cap, length, call, buffer);
722    case ARMVCPUWriteReg:
723        return decodeVCPUWriteReg(cap, length, buffer);
724    case ARMVCPUInjectIRQ:
725        return decodeVCPUInjectIRQ(cap, length, buffer);
726    default:
727        userError("VCPU: Illegal operation.");
728        current_syscall_error.type = seL4_IllegalOperation;
729        return EXCEPTION_SYSCALL_ERROR;
730    }
731}
732
733exception_t
734decodeVCPUSetTCB(cap_t cap, extra_caps_t extraCaps)
735{
736    cap_t tcbCap;
737    if ( extraCaps.excaprefs[0] == NULL) {
738        userError("VCPU SetTCB: Truncated message.");
739        current_syscall_error.type = seL4_TruncatedMessage;
740        return EXCEPTION_SYSCALL_ERROR;
741    }
742    tcbCap  = extraCaps.excaprefs[0]->cap;
743
744    if (cap_get_capType(tcbCap) != cap_thread_cap) {
745        userError("TCB cap is not a TCB cap.");
746        current_syscall_error.type = seL4_IllegalOperation;
747        return EXCEPTION_SYSCALL_ERROR;
748    }
749
750    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
751    return invokeVCPUSetTCB(VCPU_PTR(cap_vcpu_cap_get_capVCPUPtr(cap)), TCB_PTR(cap_thread_cap_get_capTCBPtr(tcbCap)));
752}
753
754exception_t
755invokeVCPUSetTCB(vcpu_t *vcpu, tcb_t *tcb)
756{
757    associateVCPUTCB(vcpu, tcb);
758
759    return EXCEPTION_NONE;
760}
761
762#define HSR_FPU_FAULT   (0x1fe0000a)
763#define HSR_TASE_FAULT  (0x1fe00020)
764
765void
766handleVCPUFault(word_t hsr)
767{
768#ifdef CONFIG_ARCH_AARCH64
769    if (armv_handleVCPUFault(hsr)) {
770        return;
771    }
772#endif
773#ifdef CONFIG_HAVE_FPU
774    if (hsr == HSR_FPU_FAULT || hsr == HSR_TASE_FAULT) {
775        assert(!isFpuEnable());
776        handleFPUFault();
777        setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread)));
778        return;
779    }
780#endif
781    current_fault = seL4_Fault_VCPUFault_new(hsr);
782    handleFault(NODE_STATE(ksCurThread));
783    schedule();
784    activateThread();
785}
786
787#endif
788