1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#pragma once
8
9#include <config.h>
10#include <util.h>
11#include <linker.h>
12#include <api/types.h>
13#include <api/syscall.h>
14#include <armv/context_switch.h>
15#include <mode/model/statedata.h>
16#include <arch/object/vcpu.h>
17#include <machine/fpu.h>
18#include <smp/lock.h>
19
20/* When building the fastpath the assembler in traps.S makes these
21 * assumptions. Because compile_asserts are hard to do in assembler,
22 * we place them here */
23compile_assert(SysCall_Minus1, SysCall == -1)
24compile_assert(SysReplyRecv_Minus2, SysReplyRecv == -2)
25
26/* Use macros to not break verification */
27#define endpoint_ptr_get_epQueue_tail_fp(ep_ptr) TCB_PTR(endpoint_ptr_get_epQueue_tail(ep_ptr))
28#define cap_vtable_cap_get_vspace_root_fp(vtable_cap) cap_vtable_root_get_basePtr(vtable_cap)
29
30static inline void FORCE_INLINE
31switchToThread_fp(tcb_t *thread, vspace_root_t *vroot, pde_t stored_hw_asid)
32{
33    asid_t asid;
34
35    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
36        vcpu_switch(thread->tcbArch.tcbVCPU);
37    }
38    asid = (asid_t)(stored_hw_asid.words[0] & 0xffff);
39    armv_contextSwitch(vroot, asid);
40
41#ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
42    benchmark_utilisation_switch(NODE_STATE(ksCurThread), thread);
43#endif
44
45    NODE_STATE(ksCurThread) = thread;
46}
47
48static inline void mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged(
49    mdb_node_t *node_ptr, word_t mdbNext,
50    word_t mdbRevocable, word_t mdbFirstBadged)
51{
52    node_ptr->words[1] = mdbNext | (mdbRevocable << 1) | mdbFirstBadged;
53}
54
55static inline void mdb_node_ptr_set_mdbPrev_np(mdb_node_t *node_ptr, word_t mdbPrev)
56{
57    node_ptr->words[0] = mdbPrev;
58}
59
60static inline bool_t isValidVTableRoot_fp(cap_t vspace_root_cap)
61{
62    return cap_capType_equals(vspace_root_cap, cap_vtable_root_cap)
63           && cap_vtable_root_isMapped(vspace_root_cap);
64}
65
66/* This is an accelerated check that msgLength, which appears
67   in the bottom of the msgInfo word, is <= 4 and that msgExtraCaps
68   which appears above it is zero. We are assuming that n_msgRegisters == 4
69   for this check to be useful. By masking out the bottom 3 bits, we are
70   really checking that n + 3 <= MASK(3), i.e. n + 3 <= 7 or n <= 4. */
71compile_assert(n_msgRegisters_eq_4, n_msgRegisters == 4)
72static inline int
73fastpath_mi_check(word_t msgInfo)
74{
75    return (msgInfo & MASK(seL4_MsgLengthBits + seL4_MsgExtraCapBits)) > 4;
76}
77
78static inline void fastpath_copy_mrs(word_t length, tcb_t *src, tcb_t *dest)
79{
80    word_t i;
81    register_t reg;
82
83    /* assuming that length < n_msgRegisters */
84    for (i = 0; i < length; i ++) {
85        /* assuming that the message registers simply increment */
86        reg = msgRegisters[0] + i;
87        setRegister(dest, reg, getRegister(src, reg));
88    }
89}
90
91#ifndef CONFIG_KERNEL_MCS
92static inline int fastpath_reply_cap_check(cap_t cap)
93{
94    return cap_capType_equals(cap, cap_reply_cap);
95}
96#endif
97
98/** DONT_TRANSLATE */
99static inline void NORETURN FORCE_INLINE fastpath_restore(word_t badge, word_t msgInfo, tcb_t *cur_thread)
100{
101    NODE_UNLOCK;
102
103    c_exit_hook();
104
105#ifdef CONFIG_HAVE_FPU
106    lazyFPURestore(NODE_STATE(ksCurThread));
107#endif /* CONFIG_HAVE_FPU */
108
109    register word_t badge_reg asm("x0") = badge;
110    register word_t msgInfo_reg asm("x1") = msgInfo;
111    register word_t cur_thread_reg asm("x2") = (word_t)cur_thread->tcbArch.tcbContext.registers;
112
113    asm volatile(
114        "mov     sp, x2                     \n"
115
116        /* Restore thread's SPSR, LR, and SP */
117        "ldp     x21, x22, [sp, %[SP_EL0]]  \n"
118        "ldr     x23, [sp, %[SPSR_EL1]]     \n"
119        "msr     sp_el0, x21                \n"
120#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
121        "msr     elr_el2, x22               \n"
122        "msr     spsr_el2, x23              \n"
123#else
124        "msr     elr_el1, x22               \n"
125        "msr     spsr_el1, x23              \n"
126#endif
127
128        /* Restore remaining registers */
129        "ldp     x2,  x3,  [sp, #16 * 1]    \n"
130        "ldp     x4,  x5,  [sp, #16 * 2]    \n"
131        "ldp     x6,  x7,  [sp, #16 * 3]    \n"
132        "ldp     x8,  x9,  [sp, #16 * 4]    \n"
133        "ldp     x10, x11, [sp, #16 * 5]    \n"
134        "ldp     x12, x13, [sp, #16 * 6]    \n"
135        "ldp     x14, x15, [sp, #16 * 7]    \n"
136        "ldp     x16, x17, [sp, #16 * 8]    \n"
137        "ldp     x18, x19, [sp, #16 * 9]    \n"
138        "ldp     x20, x21, [sp, #16 * 10]   \n"
139        "ldp     x22, x23, [sp, #16 * 11]   \n"
140        "ldp     x24, x25, [sp, #16 * 12]   \n"
141        "ldp     x26, x27, [sp, #16 * 13]   \n"
142        "ldp     x28, x29, [sp, #16 * 14]   \n"
143        "ldr     x30, [sp, %[LR]]           \n"
144        "eret                                 "
145        :
146        : "r"(badge_reg), "r"(msgInfo_reg), "r"(cur_thread_reg),
147        [SP_EL0] "i"(PT_SP_EL0), [SPSR_EL1] "i"(PT_SPSR_EL1), [LR] "i"(PT_LR)
148        : "memory"
149    );
150
151    UNREACHABLE();
152}
153
154