1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * This software may be distributed and modified according to the terms of
5 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
6 * See "LICENSE_GPLv2.txt" for details.
7 *
8 * @TAG(GD_GPL)
9 */
10
11#ifndef __ARCH_FASTPATH_32_H
12#define __ARCH_FASTPATH_32_H
13
14#include <config.h>
15#include <util.h>
16#include <linker.h>
17#include <api/types.h>
18#include <api/syscall.h>
19#include <armv/context_switch.h>
20#include <arch/machine/debug.h>
21#include <smp/lock.h>
22#include <machine/fpu.h>
23
24/* When building the fastpath the assembler in traps.S makes these
25 * assumptions. Because compile_asserts are hard to do in assembler,
26 * we place them here */
27compile_assert(SysCall_Minus1, SysCall == -1)
28compile_assert(SysReplyRecv_Minus2, SysReplyRecv == -2)
29
30/* Use macros to not break verification */
31#define endpoint_ptr_get_epQueue_tail_fp(ep_ptr) TCB_PTR(endpoint_ptr_get_epQueue_tail(ep_ptr))
32#define cap_vtable_cap_get_vspace_root_fp(vtable_cap) PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(vtable_cap))
33
34/** MODIFIES: [*] */
35/** DONT_TRANSLATE */
36static inline void
37clearExMonitor_fp(void)
38{
39    word_t temp1 = 0;
40    word_t temp2;
41    asm volatile (
42        "strex %[output], %[mem], [%[mem]]"
43        : [output]"+r"(temp1)
44        : [mem]"r"(&temp2)
45    );
46}
47
48static inline void FORCE_INLINE
49switchToThread_fp(tcb_t *thread, pde_t *cap_pd, pde_t stored_hw_asid)
50{
51    hw_asid_t hw_asid;
52
53    hw_asid = pde_pde_invalid_get_stored_hw_asid(stored_hw_asid);
54    armv_contextSwitch_HWASID(cap_pd, hw_asid);
55    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
56        vcpu_switch(thread->tcbArch.tcbVCPU);
57    }
58
59#ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
60    benchmark_utilisation_switch(NODE_STATE(ksCurThread), thread);
61#endif
62
63#if defined(CONFIG_IPC_BUF_GLOBALS_FRAME)
64    *armKSGlobalsFrame = thread->tcbIPCBuffer;
65    armKSGlobalsFrame[1] = getRegister(thread, TLS_BASE);
66#endif
67    NODE_STATE(ksCurThread) = thread;
68    clearExMonitor_fp();
69}
70
71static inline bool_t
72isValidVTableRoot_fp(cap_t pd_cap)
73{
74    return (pd_cap.words[0] & MASK(5)) ==
75           (BIT(4) | cap_page_directory_cap);
76}
77
78/* This is an accelerated check that msgLength, which appears
79   in the bottom of the msgInfo word, is <= 4 and that msgExtraCaps
80   which appears above it is zero. We are assuming that n_msgRegisters == 4
81   for this check to be useful. By masking out the bottom 3 bits, we are
82   really checking that n + 3 <= MASK(3), i.e. n + 3 <= 7 or n <= 4. */
83compile_assert (n_msgRegisters_eq_4, n_msgRegisters == 4)
84static inline int
85fastpath_mi_check(word_t msgInfo)
86{
87    return ((msgInfo & MASK(seL4_MsgLengthBits + seL4_MsgExtraCapBits))
88            + 3) & ~MASK(3);
89}
90
91static inline void
92fastpath_copy_mrs(word_t length, tcb_t *src, tcb_t *dest)
93{
94    word_t i;
95    register_t reg;
96
97    /* assuming that length < n_msgRegisters */
98    for (i = 0; i < length; i ++) {
99        /* assuming that the message registers simply increment */
100        reg = msgRegisters[0] + i;
101        setRegister(dest, reg, getRegister(src, reg));
102    }
103}
104
105/** DONT_TRANSLATE */
106static inline void NORETURN
107fastpath_restore(word_t badge, word_t msgInfo, tcb_t *cur_thread)
108{
109    NODE_UNLOCK;
110
111    c_exit_hook();
112
113#ifdef CONFIG_ARM_CP14_SAVE_AND_RESTORE_NATIVE_THREADS
114    restore_user_debug_context(NODE_STATE(ksCurThread));
115#endif
116
117#ifndef CONFIG_ARCH_ARM_V6
118    writeTPIDRURW(getRegister(NODE_STATE(ksCurThread), TPIDRURW));
119    writeTPIDRURO(getRegister(NODE_STATE(ksCurThread), TLS_BASE));
120#endif
121
122#ifdef CONFIG_HAVE_FPU
123    lazyFPURestore(NODE_STATE(ksCurThread));
124#endif /* CONFIG_HAVE_FPU */
125
126    register word_t badge_reg asm("r0") = badge;
127    register word_t msgInfo_reg asm("r1") = msgInfo;
128    register word_t cur_thread_reg asm("r2") = (word_t)cur_thread;
129
130    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
131        asm volatile( /* r0 and r1 should be preserved */
132            "mov sp, r2         \n"
133            /* Pop user registers, preserving r0 and r1 */
134            "add sp, sp, #8     \n"
135            "pop {r2-r12}       \n"
136            /* Retore the user stack pointer */
137            "pop {lr}           \n"
138            "msr sp_usr, lr     \n"
139            /* prepare the exception return lr */
140            "ldr lr, [sp, #4]   \n"
141            "msr elr_hyp, lr    \n"
142            /* prepare the user status register */
143            "ldr lr, [sp, #8]   \n"
144            "msr spsr_hyp, lr   \n"
145            /* Finally, pop our LR */
146            "pop {lr}           \n"
147            /* Return to user */
148            "eret"
149            :
150            : [badge] "r" (badge_reg),
151            [msginfo]"r"(msgInfo_reg),
152            [cur_thread]"r"(cur_thread_reg)
153            : "memory"
154        );
155    } else {
156        asm volatile("mov sp, r2 \n\
157                  add sp, sp, %[LR_SVC_OFFSET] \n\
158                  ldmdb sp, {r2-lr}^ \n\
159                  rfeia sp"
160                     :
161                     : [badge]"r"(badge_reg),
162                     [msginfo]"r"(msgInfo_reg),
163                     [cur_thread]"r"(cur_thread_reg),
164                     [LR_SVC_OFFSET]"i"(LR_svc * sizeof(word_t))
165                     : "memory"
166                    );
167    }
168    UNREACHABLE();
169}
170
171#endif /* __ARCH_FASTPATH_32_H */
172