1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 * Copyright 2015, 2016 Hesham Almatary <heshamelmatary@gmail.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 */
7
8#include <config.h>
9#include <model/statedata.h>
10#include <arch/fastpath/fastpath.h>
11#include <arch/kernel/traps.h>
12#include <machine/debug.h>
13#include <api/syscall.h>
14#include <util.h>
15#include <arch/machine/hardware.h>
16#include <machine/fpu.h>
17
18#include <benchmark/benchmark_track.h>
19#include <benchmark/benchmark_utilisation.h>
20
21/** DONT_TRANSLATE */
22void VISIBLE NORETURN restore_user_context(void)
23{
24    word_t cur_thread_reg = (word_t) NODE_STATE(ksCurThread)->tcbArch.tcbContext.registers;
25    c_exit_hook();
26    NODE_UNLOCK_IF_HELD;
27
28#ifdef ENABLE_SMP_SUPPORT
29    word_t sp;
30    asm volatile("csrr %0, sscratch" : "=r"(sp));
31    sp -= sizeof(word_t);
32    *((word_t *)sp) = cur_thread_reg;
33#endif
34
35
36#ifdef CONFIG_HAVE_FPU
37    lazyFPURestore(NODE_STATE(ksCurThread));
38    set_tcb_fs_state(NODE_STATE(ksCurThread), isFpuEnable());
39#endif
40
41    asm volatile(
42        "mv t0, %[cur_thread]       \n"
43        LOAD_S " ra, (0*%[REGSIZE])(t0)  \n"
44        LOAD_S "  sp, (1*%[REGSIZE])(t0)  \n"
45        LOAD_S "  gp, (2*%[REGSIZE])(t0)  \n"
46        /* skip tp */
47        /* skip x5/t0 */
48        /* no-op store conditional to clear monitor state */
49        /* this may succeed in implementations with very large reservations, but the saved ra is dead */
50        "sc.w zero, zero, (t0)\n"
51        LOAD_S "  t2, (6*%[REGSIZE])(t0)  \n"
52        LOAD_S "  s0, (7*%[REGSIZE])(t0)  \n"
53        LOAD_S "  s1, (8*%[REGSIZE])(t0)  \n"
54        LOAD_S "  a0, (9*%[REGSIZE])(t0) \n"
55        LOAD_S "  a1, (10*%[REGSIZE])(t0) \n"
56        LOAD_S "  a2, (11*%[REGSIZE])(t0) \n"
57        LOAD_S "  a3, (12*%[REGSIZE])(t0) \n"
58        LOAD_S "  a4, (13*%[REGSIZE])(t0) \n"
59        LOAD_S "  a5, (14*%[REGSIZE])(t0) \n"
60        LOAD_S "  a6, (15*%[REGSIZE])(t0) \n"
61        LOAD_S "  a7, (16*%[REGSIZE])(t0) \n"
62        LOAD_S "  s2, (17*%[REGSIZE])(t0) \n"
63        LOAD_S "  s3, (18*%[REGSIZE])(t0) \n"
64        LOAD_S "  s4, (19*%[REGSIZE])(t0) \n"
65        LOAD_S "  s5, (20*%[REGSIZE])(t0) \n"
66        LOAD_S "  s6, (21*%[REGSIZE])(t0) \n"
67        LOAD_S "  s7, (22*%[REGSIZE])(t0) \n"
68        LOAD_S "  s8, (23*%[REGSIZE])(t0) \n"
69        LOAD_S "  s9, (24*%[REGSIZE])(t0) \n"
70        LOAD_S "  s10, (25*%[REGSIZE])(t0)\n"
71        LOAD_S "  s11, (26*%[REGSIZE])(t0)\n"
72        LOAD_S "  t3, (27*%[REGSIZE])(t0) \n"
73        LOAD_S "  t4, (28*%[REGSIZE])(t0) \n"
74        LOAD_S "  t5, (29*%[REGSIZE])(t0) \n"
75        LOAD_S "  t6, (30*%[REGSIZE])(t0) \n"
76        /* Get next restored tp */
77        LOAD_S "  t1, (3*%[REGSIZE])(t0)  \n"
78        /* get restored tp */
79        "add tp, t1, x0  \n"
80        /* get sepc */
81        LOAD_S "  t1, (34*%[REGSIZE])(t0)\n"
82        "csrw sepc, t1  \n"
83#ifndef ENABLE_SMP_SUPPORT
84        /* Write back sscratch with cur_thread_reg to get it back on the next trap entry */
85        "csrw sscratch, t0         \n"
86#endif
87        LOAD_S "  t1, (32*%[REGSIZE])(t0) \n"
88        "csrw sstatus, t1\n"
89
90        LOAD_S "  t1, (5*%[REGSIZE])(t0) \n"
91        LOAD_S "  t0, (4*%[REGSIZE])(t0) \n"
92        "sret"
93        : /* no output */
94        : [REGSIZE] "i"(sizeof(word_t)),
95        [cur_thread] "r"(cur_thread_reg)
96        : "memory"
97    );
98
99    UNREACHABLE();
100}
101
102void VISIBLE NORETURN c_handle_interrupt(void)
103{
104    NODE_LOCK_IRQ_IF(getActiveIRQ() != irq_remote_call_ipi);
105
106    c_entry_hook();
107
108    handleInterruptEntry();
109
110    restore_user_context();
111    UNREACHABLE();
112}
113
114void VISIBLE NORETURN c_handle_exception(void)
115{
116    NODE_LOCK_SYS;
117
118    c_entry_hook();
119
120    word_t scause = read_scause();
121    switch (scause) {
122    case RISCVInstructionAccessFault:
123    case RISCVLoadAccessFault:
124    case RISCVStoreAccessFault:
125    case RISCVLoadPageFault:
126    case RISCVStorePageFault:
127    case RISCVInstructionPageFault:
128        handleVMFaultEvent(scause);
129        break;
130    default:
131#ifdef CONFIG_HAVE_FPU
132        if (!isFpuEnable()) {
133            /* we assume the illegal instruction is caused by FPU first */
134            handleFPUFault();
135            setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread)));
136            break;
137        }
138#endif
139        handleUserLevelFault(scause, 0);
140        break;
141    }
142
143    restore_user_context();
144    UNREACHABLE();
145}
146
147void NORETURN slowpath(syscall_t syscall)
148{
149    /* check for undefined syscall */
150    if (unlikely(syscall < SYSCALL_MIN || syscall > SYSCALL_MAX)) {
151        handleUnknownSyscall(syscall);
152    } else {
153        handleSyscall(syscall);
154    }
155
156    restore_user_context();
157    UNREACHABLE();
158}
159
160void VISIBLE NORETURN c_handle_syscall(word_t cptr, word_t msgInfo, word_t unused1, word_t unused2, word_t unused3,
161                                       word_t unused4, word_t reply, syscall_t syscall)
162{
163    NODE_LOCK_SYS;
164
165    c_entry_hook();
166
167#ifdef CONFIG_FASTPATH
168    if (syscall == (syscall_t)SysCall) {
169        fastpath_call(cptr, msgInfo);
170        UNREACHABLE();
171    } else if (syscall == (syscall_t)SysReplyRecv) {
172#ifdef CONFIG_KERNEL_MCS
173        fastpath_reply_recv(cptr, msgInfo, reply);
174#else
175        fastpath_reply_recv(cptr, msgInfo);
176#endif
177        UNREACHABLE();
178    }
179#endif /* CONFIG_FASTPATH */
180    slowpath(syscall);
181    UNREACHABLE();
182}
183