1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#pragma once
8
9#include <config.h>
10#include <util.h>
11#include <linker.h>
12#include <api/types.h>
13#include <api/syscall.h>
14#include <armv/context_switch.h>
15#include <arch/machine/debug.h>
16#include <smp/lock.h>
17#include <machine/fpu.h>
18
19/* When building the fastpath the assembler in traps.S makes these
20 * assumptions. Because compile_asserts are hard to do in assembler,
21 * we place them here */
22compile_assert(SysCall_Minus1, SysCall == -1)
23compile_assert(SysReplyRecv_Minus2, SysReplyRecv == -2)
24
25/* Use macros to not break verification */
26#define endpoint_ptr_get_epQueue_tail_fp(ep_ptr) TCB_PTR(endpoint_ptr_get_epQueue_tail(ep_ptr))
27#define cap_vtable_cap_get_vspace_root_fp(vtable_cap) PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(vtable_cap))
28
29/** MODIFIES: [*] */
30/** DONT_TRANSLATE */
31static inline void
32clearExMonitor_fp(void)
33{
34    word_t temp1 = 0;
35    word_t temp2;
36    asm volatile(
37        "strex %[output], %[mem], [%[mem]]"
38        : [output]"+r"(temp1)
39        : [mem]"r"(&temp2)
40    );
41}
42
43static inline void FORCE_INLINE switchToThread_fp(tcb_t *thread, pde_t *cap_pd, pde_t stored_hw_asid)
44{
45    hw_asid_t hw_asid;
46
47    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
48        vcpu_switch(thread->tcbArch.tcbVCPU);
49    }
50    hw_asid = pde_pde_invalid_get_stored_hw_asid(stored_hw_asid);
51    armv_contextSwitch_HWASID(cap_pd, hw_asid);
52
53#ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
54    benchmark_utilisation_switch(NODE_STATE(ksCurThread), thread);
55#endif
56
57    NODE_STATE(ksCurThread) = thread;
58    clearExMonitor_fp();
59}
60
61#ifndef CONFIG_KERNEL_MCS
62static inline void mdb_node_ptr_mset_mdbNext_mdbRevocable_mdbFirstBadged(
63    mdb_node_t *node_ptr, word_t mdbNext,
64    word_t mdbRevocable, word_t mdbFirstBadged)
65{
66    node_ptr->words[1] = mdbNext | (mdbRevocable << 1) | mdbFirstBadged;
67}
68
69static inline void mdb_node_ptr_set_mdbPrev_np(mdb_node_t *node_ptr, word_t mdbPrev)
70{
71    node_ptr->words[0] = mdbPrev;
72}
73#endif
74
75static inline bool_t isValidVTableRoot_fp(cap_t pd_cap)
76{
77    return (pd_cap.words[0] & MASK(5)) ==
78           (BIT(4) | cap_page_directory_cap);
79}
80
81/* This is an accelerated check that msgLength, which appears
82   in the bottom of the msgInfo word, is <= 4 and that msgExtraCaps
83   which appears above it is zero. We are assuming that n_msgRegisters == 4
84   for this check to be useful. By masking out the bottom 3 bits, we are
85   really checking that n + 3 <= MASK(3), i.e. n + 3 <= 7 or n <= 4. */
86compile_assert(n_msgRegisters_eq_4, n_msgRegisters == 4)
87static inline int
88fastpath_mi_check(word_t msgInfo)
89{
90    return ((msgInfo & MASK(seL4_MsgLengthBits + seL4_MsgExtraCapBits))
91            + 3) & ~MASK(3);
92}
93
94static inline void fastpath_copy_mrs(word_t length, tcb_t *src, tcb_t *dest)
95{
96    word_t i;
97    register_t reg;
98
99    /* assuming that length < n_msgRegisters */
100    for (i = 0; i < length; i ++) {
101        /* assuming that the message registers simply increment */
102        reg = msgRegisters[0] + i;
103        setRegister(dest, reg, getRegister(src, reg));
104    }
105}
106
107#ifndef CONFIG_KERNEL_MCS
108static inline int fastpath_reply_cap_check(cap_t cap)
109{
110    return (cap.words[0] & MASK(5)) == cap_reply_cap;
111}
112#endif
113
114/** DONT_TRANSLATE */
115static inline void NORETURN FORCE_INLINE fastpath_restore(word_t badge, word_t msgInfo, tcb_t *cur_thread)
116{
117    NODE_UNLOCK;
118
119    c_exit_hook();
120
121#ifdef CONFIG_ARM_CP14_SAVE_AND_RESTORE_NATIVE_THREADS
122    restore_user_debug_context(NODE_STATE(ksCurThread));
123#endif
124
125#ifdef CONFIG_HAVE_FPU
126    lazyFPURestore(NODE_STATE(ksCurThread));
127#endif /* CONFIG_HAVE_FPU */
128
129    register word_t badge_reg asm("r0") = badge;
130    register word_t msgInfo_reg asm("r1") = msgInfo;
131    register word_t cur_thread_reg asm("r2") = (word_t)cur_thread;
132
133    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
134        asm volatile( /* r0 and r1 should be preserved */
135            "mov sp, r2         \n"
136            /* Pop user registers, preserving r0 and r1 */
137            "add sp, sp, #8     \n"
138            "pop {r2-r12}       \n"
139            /* Retore the user stack pointer */
140            "pop {lr}           \n"
141            "msr sp_usr, lr     \n"
142            /* prepare the exception return lr */
143            "ldr lr, [sp, #4]   \n"
144            "msr elr_hyp, lr    \n"
145            /* prepare the user status register */
146            "ldr lr, [sp, #8]   \n"
147            "msr spsr_hyp, lr   \n"
148            /* Finally, pop our LR */
149            "pop {lr}           \n"
150            /* Return to user */
151            "eret"
152            :
153            : [badge] "r"(badge_reg),
154            [msginfo]"r"(msgInfo_reg),
155            [cur_thread]"r"(cur_thread_reg)
156            : "memory"
157        );
158    } else {
159        asm volatile("mov sp, r2 \n\
160                  add sp, sp, %[LR_SVC_OFFSET] \n\
161                  ldmdb sp, {r2-lr}^ \n\
162                  rfeia sp"
163                     :
164                     : [badge]"r"(badge_reg),
165                     [msginfo]"r"(msgInfo_reg),
166                     [cur_thread]"r"(cur_thread_reg),
167                     [LR_SVC_OFFSET]"i"(NextIP * sizeof(word_t))
168                     : "memory"
169                    );
170    }
171    UNREACHABLE();
172}
173
174