1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#pragma once
8
9#include <config.h>
10
11#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
12
13#include <api/failures.h>
14#include <linker.h>
15
16#define HCR_RW       BIT(31)     /* Execution state control        */
17#define HCR_TRVM     BIT(30)     /* trap reads of VM controls      */
18#define HCR_HCD      BIT(29)     /* Disable HVC                    */
19#define HCR_TDZ      BIT(28)     /* trap DC ZVA AArch64 only       */
20#define HCR_TGE      BIT(27)     /* Trap general exceptions        */
21#define HCR_TVM      BIT(26)     /* Trap MMU access                */
22#define HCR_TTLB     BIT(25)     /* Trap TLB operations            */
23#define HCR_TPU      BIT(24)     /* Trap cache maintenance         */
24#define HCR_TPC      BIT(23)     /* Trap cache maintenance PoC     */
25#define HCR_TSW      BIT(22)     /* Trap cache maintenance set/way */
26#define HCR_TCACHE   (HCR_TPU | HCR_TPC | HCR_TSW)
27#define HCR_TAC      BIT(21)     /* Trap ACTLR access              */
28#define HCR_TIDCP    BIT(20)     /* Trap lockdown                  */
29#define HCR_TSC      BIT(19)     /* Trap SMC instructions          */
30#define HCR_TID3     BIT(18)     /* Trap ID register 3             */
31#define HCR_TID2     BIT(17)     /* Trap ID register 2             */
32#define HCR_TID1     BIT(16)     /* Trap ID register 1             */
33#define HCR_TID0     BIT(15)     /* Trap ID register 0             */
34#define HCR_TID      (HCR_TID0 | HCR_TID1 | HCR_TID2 | HCR_TID3)
35#define HCR_TWE      BIT(14)     /* Trap WFE                       */
36#define HCR_TWI      BIT(13)     /* Trap WFI                       */
37#define HCR_DC       BIT(12)     /* Default cacheable              */
38#define HCR_BSU(x)   ((x) << 10) /* Barrier sharability upgrade    */
39#define HCR_FB       BIT( 9)     /* Force broadcast                */
40#define HCR_VA       BIT( 8)     /* Virtual async abort            */
41#define HCR_VI       BIT( 7)     /* Virtual IRQ                    */
42#define HCR_VF       BIT( 6)     /* Virtual FIRQ                   */
43#define HCR_AMO      BIT( 5)     /* CPSR.A override enable         */
44#define HCR_IMO      BIT( 4)     /* CPSR.I override enable         */
45#define HCR_FMO      BIT( 3)     /* CPSR.F override enable         */
46#define HCR_PTW      BIT( 2)     /* Protected table walk           */
47#define HCR_SWIO     BIT( 1)     /* set/way invalidate override    */
48#define HCR_VM       BIT( 0)     /* Virtualization MMU enable      */
49
50#define GIC_VCPU_MAX_NUM_LR 64
51
52struct gicVCpuIface {
53    uint32_t hcr;
54    uint32_t vmcr;
55    uint32_t apr;
56    virq_t lr[GIC_VCPU_MAX_NUM_LR];
57};
58
59#ifdef CONFIG_VTIMER_UPDATE_VOFFSET
60struct vTimer {
61    uint64_t last_pcount;
62};
63#endif
64
65enum VPPIEventIRQ {
66    VPPIEventIRQ_VTimer,
67    n_VPPIEventIRQ,
68    VPPIEventIRQ_invalid = n_VPPIEventIRQ,
69};
70typedef word_t VPPIEventIRQ_t;
71
72struct vcpu {
73    /* TCB associated with this VCPU. */
74    struct tcb *vcpuTCB;
75    struct gicVCpuIface vgic;
76    word_t regs[seL4_VCPUReg_Num];
77    bool_t vppi_masked[n_VPPIEventIRQ];
78#ifdef CONFIG_VTIMER_UPDATE_VOFFSET
79    /* vTimer is 8-bytes wide and has same alignment requirement.
80     * To keep the struct packed on 32-bit platforms when accompanied by an
81     * odd number of 32-bit words, we need to add a padding word.
82     * */
83    word_t vcpu_padding;
84    struct vTimer virtTimer;
85#endif
86};
87typedef struct vcpu vcpu_t;
88compile_assert(vcpu_size_correct, sizeof(struct vcpu) <= BIT(VCPU_SIZE_BITS))
89
90void VGICMaintenance(void);
91void handleVCPUFault(word_t hsr);
92void VPPIEvent(irq_t irq);
93
94void vcpu_init(vcpu_t *vcpu);
95
96/* Performs one off initialization of VCPU state and structures. Should be
97 * called in boot code before any other VCPU functions */
98BOOT_CODE void vcpu_boot_init(void);
99
100void vcpu_finalise(vcpu_t *vcpu);
101
102void associateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb);
103
104void dissociateVCPUTCB(vcpu_t *vcpu, tcb_t *tcb);
105
106exception_t decodeARMVCPUInvocation(
107    word_t label,
108    unsigned int length,
109    cptr_t cptr,
110    cte_t *slot,
111    cap_t cap,
112    extra_caps_t extraCaps,
113    bool_t call,
114    word_t *buffer
115);
116
117void vcpu_restore(vcpu_t *cpu);
118void vcpu_switch(vcpu_t *cpu);
119#ifdef ENABLE_SMP_SUPPORT
120void handleVCPUInjectInterruptIPI(vcpu_t *vcpu, unsigned long index, virq_t virq);
121#endif /* ENABLE_SMP_SUPPORT */
122
123exception_t decodeVCPUWriteReg(cap_t cap, unsigned int length, word_t *buffer);
124exception_t decodeVCPUReadReg(cap_t cap, unsigned int length, bool_t call, word_t *buffer);
125exception_t decodeVCPUInjectIRQ(cap_t cap, unsigned int length, word_t *buffer);
126exception_t decodeVCPUSetTCB(cap_t cap, extra_caps_t extraCaps);
127exception_t decodeVCPUAckVPPI(cap_t cap, unsigned int length, word_t *buffer);
128
129exception_t invokeVCPUWriteReg(vcpu_t *vcpu, word_t field, word_t value);
130exception_t invokeVCPUReadReg(vcpu_t *vcpu, word_t field, bool_t call);
131exception_t invokeVCPUInjectIRQ(vcpu_t *vcpu, unsigned long index, virq_t virq);
132exception_t invokeVCPUSetTCB(vcpu_t *vcpu, tcb_t *tcb);
133exception_t invokeVCPUAckVPPI(vcpu_t *vcpu, VPPIEventIRQ_t vppi);
134static word_t vcpu_hw_read_reg(word_t reg_index);
135static void vcpu_hw_write_reg(word_t reg_index, word_t reg);
136
137static inline void vcpu_save_reg(vcpu_t *vcpu, word_t reg)
138{
139    if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
140        fail("ARM/HYP: Invalid register index or NULL VCPU");
141        return;
142    }
143    vcpu->regs[reg] = vcpu_hw_read_reg(reg);
144}
145
146static inline void vcpu_save_reg_range(vcpu_t *vcpu, word_t start, word_t end)
147{
148    for (word_t i = start; i <= end; i++) {
149        vcpu_save_reg(vcpu, i);
150    }
151}
152
153static inline void vcpu_restore_reg(vcpu_t *vcpu, word_t reg)
154{
155    if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
156        fail("ARM/HYP: Invalid register index or NULL VCPU");
157        return;
158    }
159    vcpu_hw_write_reg(reg, vcpu->regs[reg]);
160}
161
162static inline void vcpu_restore_reg_range(vcpu_t *vcpu, word_t start, word_t end)
163{
164    for (word_t i = start; i <= end; i++) {
165        vcpu_restore_reg(vcpu, i);
166    }
167}
168
169static inline word_t vcpu_read_reg(vcpu_t *vcpu, word_t reg)
170{
171    if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
172        fail("ARM/HYP: Invalid register index or NULL VCPU");
173        return 0;
174    }
175    return vcpu->regs[reg];
176}
177
178static inline void vcpu_write_reg(vcpu_t *vcpu, word_t reg, word_t value)
179{
180    if (reg >= seL4_VCPUReg_Num || vcpu == NULL) {
181        fail("ARM/HYP: Invalid register index or NULL VCPU");
182        return;
183    }
184    vcpu->regs[reg] = value;
185}
186
187static inline VPPIEventIRQ_t irqVPPIEventIndex(irq_t irq)
188{
189    switch (IRQT_TO_IRQ(irq)) {
190    case INTERRUPT_VTIMER_EVENT:
191        return VPPIEventIRQ_VTimer;
192
193    default:
194        return VPPIEventIRQ_invalid;
195    }
196}
197
198#else /* end of CONFIG_ARM_HYPERVISOR_SUPPORT */
199
200/* used in boot.c with a guard, use a marco to avoid exposing vcpu_t */
201#define vcpu_boot_init() do {} while(0)
202#define vcpu_switch(x) do {} while(0)
203static inline void VGICMaintenance(void) {}
204
205#endif /* end of !CONFIG_ARM_HYPERVISOR_SUPPORT */
206
207