1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 * Copyright 2015, 2016 Hesham Almatary <heshamelmatary@gmail.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 */
7
8#include <config.h>
9#include <types.h>
10#include <machine/registerset.h>
11#include <machine/timer.h>
12#include <arch/machine.h>
13#include <arch/smp/ipi.h>
14
15
16#define SIPI_IP   1
17#define SIPI_IE   1
18#define STIMER_IP 5
19#define STIMER_IE 5
20#define STIMER_CAUSE 5
21#define SEXTERNAL_IP 9
22#define SEXTERNAL_IE 9
23#define SEXTERNAL_CAUSE 9
24
25#ifndef CONFIG_KERNEL_MCS
26#define RESET_CYCLES ((TIMER_CLOCK_HZ / MS_IN_S) * CONFIG_TIMER_TICK_MS)
27#endif /* !CONFIG_KERNEL_MCS */
28
29#define IS_IRQ_VALID(X) (((X)) <= maxIRQ && (X)!= irqInvalid)
30
31word_t PURE getRestartPC(tcb_t *thread)
32{
33    return getRegister(thread, FaultIP);
34}
35
36void setNextPC(tcb_t *thread, word_t v)
37{
38    setRegister(thread, NextIP, v);
39}
40
41BOOT_CODE int get_num_avail_p_regs(void)
42{
43    return sizeof(avail_p_regs) / sizeof(p_region_t);
44}
45
46BOOT_CODE p_region_t *get_avail_p_regs(void)
47{
48    return (p_region_t *) avail_p_regs;
49}
50
51BOOT_CODE void map_kernel_devices(void)
52{
53    if (kernel_devices == NULL) {
54        return;
55    }
56
57    for (int i = 0; i < (sizeof(kernel_devices) / sizeof(kernel_frame_t)); i++) {
58        map_kernel_frame(kernel_devices[i].paddr, kernel_devices[i].pptr,
59                         VMKernelOnly);
60        if (!kernel_devices[i].userAvailable) {
61            p_region_t reg = {
62                .start = kernel_devices[i].paddr,
63                .end = kernel_devices[i].paddr + (1 << seL4_LargePageBits),
64            };
65            reserve_region(reg);
66        }
67    }
68}
69
70/*
71 * The following assumes familiarity with RISC-V interrupt delivery and the PLIC.
72 * See the RISC-V privileged specifivation v1.10 and the comment in
73 * include/plat/spike/plat/machine.h for more information.
74 * RISC-V IRQ handling on seL4 works as follows:
75 *
76 * On other architectures the kernel masks interrupts between delivering them to
77 * userlevel and receiving the acknowledgement invocation. This strategy doesn't
78 * work on RISC-V as an IRQ is implicitly masked when it is claimed, until the
79 * claim is acknowledged. If we mask and unmask the interrupt at the PLIC while
80 * a claim is in progress we sometimes experience IRQ sources not being masked
81 * and unmasked as expected. Because of this, we don't mask and unmask IRQs that
82 * are for user level, and also call plic_complete_claim for seL4_IRQHandler_Ack.
83 */
84
85/**
86 * Gets the new active irq from the PLIC or STIP.
87 *
88 * getNewActiveIRQ is only called by getActiveIRQ and checks for a pending IRQ.
89 * We read sip and if the SEIP bit is set we claim an
90 * IRQ from the PLIC. If STIP is set then it is a kernel timer interrupt.
91 * Otherwise we return IRQ invalid. It is possible to reveive irqInvalid from
92 * the PLIC if another HART context has claimed the IRQ before us. This function
93 * is not idempotent as plic_get_claim is called which accepts an IRQ message
94 * from the PLIC and will claim different IRQs if called subsequent times.
95 *
96 * @return     The new active irq.
97 */
98static irq_t getNewActiveIRQ(void)
99{
100
101    uint64_t sip = read_sip();
102    /* Interrupt priority (high to low ): external -> software -> timer */
103    if (sip & BIT(SEXTERNAL_IP)) {
104        return plic_get_claim();
105#ifdef ENABLE_SMP_SUPPORT
106    } else if (sip & BIT(SIPI_IP)) {
107        sbi_clear_ipi();
108        return ipi_get_irq();
109#endif
110    } else if (sip & BIT(STIMER_IP)) {
111        return INTERRUPT_CORE_TIMER;
112    }
113
114    return irqInvalid;
115}
116
117static uint32_t active_irq[CONFIG_MAX_NUM_NODES] = { irqInvalid };
118
119
120/**
121 * Gets the active irq. Returns the same irq if called again before ackInterrupt.
122 *
123 * getActiveIRQ is used to return a currently pending IRQ. This function can be
124 * called multiple times and needs to return the same IRQ until ackInterrupt is
125 * called. getActiveIRQ returns irqInvalid if no interrupt is pending. It is
126 * assumed that if isIRQPending is true, then getActiveIRQ will not return
127 * irqInvalid. getActiveIRQ will call getNewActiveIRQ and cache its result until
128 * ackInterrupt is called.
129 *
130 * @return     The active irq.
131 */
132static inline irq_t getActiveIRQ(void)
133{
134
135    uint32_t irq;
136    if (!IS_IRQ_VALID(active_irq[CURRENT_CPU_INDEX()])) {
137        active_irq[CURRENT_CPU_INDEX()] = getNewActiveIRQ();
138    }
139
140    if (IS_IRQ_VALID(active_irq[CURRENT_CPU_INDEX()])) {
141        irq = active_irq[CURRENT_CPU_INDEX()];
142    } else {
143        irq = irqInvalid;
144    }
145
146    return irq;
147}
148
149#ifdef HAVE_SET_TRIGGER
150/**
151 * Sets the irq trigger.
152 *
153 * setIRQTrigger can change the trigger between edge and level at the PLIC for
154 * external interrupts. It is implementation specific as whether the PLIC has
155 * support for this operation.
156 *
157 * @param[in]  irq             The irq
158 * @param[in]  edge_triggered  edge triggered otherwise level triggered
159 */
160void setIRQTrigger(irq_t irq, bool_t edge_triggered)
161{
162    plic_irq_set_trigger(irq, edge_triggered);
163}
164#endif
165
166/* isIRQPending is used to determine whether to preempt long running
167 * operations at various preemption points throughout the kernel. If this
168 * returns true, it means that if the Kernel were to return to user mode, it
169 * would then immediately take an interrupt. We check the SIP register for if
170 * either a timer interrupt (STIP) or an external interrupt (SEIP) is pending.
171 * We don't check software generated interrupts. These are used to perform cross
172 * core signalling which isn't currently supported.
173 * TODO: Add SSIP check when SMP support is added.
174 */
175static inline bool_t isIRQPending(void)
176{
177    word_t sip = read_sip();
178    return (sip & (BIT(STIMER_IP) | BIT(SEXTERNAL_IP)));
179}
180
181/**
182 * Disable or enable IRQs.
183 *
184 * maskInterrupt disables and enables IRQs. When an IRQ is disabled, it should
185 * not raise an interrupt on the Kernel's HART context. This either masks the
186 * core timer on the sie register or masks an external IRQ at the plic.
187 *
188 * @param[in]  disable  The disable
189 * @param[in]  irq      The irq
190 */
191static inline void maskInterrupt(bool_t disable, irq_t irq)
192{
193    assert(IS_IRQ_VALID(irq));
194    if (irq == INTERRUPT_CORE_TIMER) {
195        if (disable) {
196            clear_sie_mask(BIT(STIMER_IE));
197        } else {
198            set_sie_mask(BIT(STIMER_IE));
199        }
200#ifdef ENABLE_SMP_SUPPORT
201    } else if (irq == irq_reschedule_ipi || irq == irq_remote_call_ipi) {
202        return;
203#endif
204    } else {
205        plic_mask_irq(disable, irq);
206    }
207}
208
209/**
210 * Kernel has dealt with the pending interrupt getActiveIRQ can return next IRQ.
211 *
212 * ackInterrupt is used by the kernel to indicate it has processed the interrupt
213 * delivery and getActiveIRQ is now able to return a different IRQ number. Note
214 * that this is called after a notification has been signalled to user level,
215 * but before user level has handled the cause.
216 *
217 * @param[in]  irq   The irq
218 */
219static inline void ackInterrupt(irq_t irq)
220{
221    assert(IS_IRQ_VALID(irq));
222    active_irq[CURRENT_CPU_INDEX()] = irqInvalid;
223
224    if (irq == INTERRUPT_CORE_TIMER) {
225        /* Reprogramming the timer has cleared the interrupt. */
226        return;
227    }
228#ifdef ENABLE_SMP_SUPPORT
229    if (irq == irq_reschedule_ipi || irq == irq_remote_call_ipi) {
230        ipi_clear_irq(irq);
231    }
232#endif
233}
234
235#ifndef CONFIG_KERNEL_MCS
236void resetTimer(void)
237{
238    uint64_t target;
239    // repeatedly try and set the timer in a loop as otherwise there is a race and we
240    // may set a timeout in the past, resulting in it never getting triggered
241    do {
242        target = riscv_read_time() + RESET_CYCLES;
243        sbi_set_timer(target);
244    } while (riscv_read_time() > target);
245}
246
247/**
248   DONT_TRANSLATE
249 */
250BOOT_CODE void initTimer(void)
251{
252    sbi_set_timer(riscv_read_time() + RESET_CYCLES);
253}
254#endif /* !CONFIG_KERNEL_MCS */
255
256void plat_cleanL2Range(paddr_t start, paddr_t end)
257{
258}
259void plat_invalidateL2Range(paddr_t start, paddr_t end)
260{
261}
262
263void plat_cleanInvalidateL2Range(paddr_t start, paddr_t end)
264{
265}
266
267BOOT_CODE void initL2Cache(void)
268{
269}
270
271BOOT_CODE void initLocalIRQController(void)
272{
273    printf("Init local IRQ\n");
274
275#ifdef CONFIG_PLAT_HIFIVE
276    /* Init per-hart PLIC */
277    plic_init_hart();
278#endif
279
280    word_t sie = 0;
281    sie |= BIT(SEXTERNAL_IE);
282    sie |= BIT(STIMER_IE);
283
284#ifdef ENABLE_SMP_SUPPORT
285    /* enable the software-generated interrupts */
286    sie |= BIT(SIPI_IE);
287#endif
288
289    set_sie_mask(sie);
290}
291
292BOOT_CODE void initIRQController(void)
293{
294    printf("Initialing PLIC...\n");
295
296    plic_init_controller();
297}
298
299static inline void handleSpuriousIRQ(void)
300{
301    /* Do nothing */
302    printf("Superior IRQ!! SIP %lx\n", read_sip());
303}
304