1/**
2 * \file
3 * \brief Dispatcher architecture-specific implementation.
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <barrelfish/barrelfish.h>
16#include <barrelfish/dispatch.h>
17#include <barrelfish/dispatcher_arch.h>
18#include <barrelfish/curdispatcher_arch.h>
19#include <barrelfish/syscalls.h>
20#include "threads_priv.h"
21#include <arch/ldt.h>
22
23/* entry points defined in assembler code */
24extern void run_entry(void);
25extern void pagefault_entry(void);
26extern void disabled_pagefault_entry(void);
27extern void trap_entry(void);
28extern void lrpc_entry(void);
29
30void __attribute__ ((visibility ("hidden"))) disp_resume_end(void);
31
32/**
33 * \brief Architecture-specific dispatcher initialisation
34 */
35void disp_arch_init(dispatcher_handle_t handle)
36{
37    struct dispatcher_shared_x86_64 *disp =
38        get_dispatcher_shared_x86_64(handle);
39
40    /* Set entry points */
41    disp->d.dispatcher_run = (lvaddr_t)run_entry;
42    disp->d.dispatcher_lrpc = (lvaddr_t)lrpc_entry;
43    disp->d.dispatcher_pagefault = (lvaddr_t)pagefault_entry;
44    disp->d.dispatcher_pagefault_disabled = (lvaddr_t)disabled_pagefault_entry;
45    disp->d.dispatcher_trap = (lvaddr_t)trap_entry;
46
47    disp->crit_pc_low = (lvaddr_t)disp_resume;
48    disp->crit_pc_high = (lvaddr_t)disp_resume_end;
49
50    /* Setup LDT */
51    ldt_init_disabled(handle);
52}
53
54/**
55 * \brief Resume execution of a given register state
56 *
57 * This function resumes the execution of the given register state on the
58 * current dispatcher. It may only be called while the dispatcher is disabled.
59 *
60 * \param disp Current dispatcher pointer
61 * \param regs Register state snapshot
62 */
63void disp_resume(dispatcher_handle_t handle, arch_registers_state_t *archregs)
64{
65    struct dispatcher_shared_generic *disp =
66        get_dispatcher_shared_generic(handle);
67
68    assert_disabled(disp->disabled);
69    assert_disabled(disp->haswork);
70
71    /* sanity check user state */
72    struct registers_x86_64 *regs = archregs;
73    assert_disabled(regs->rip > BASE_PAGE_SIZE);
74    assert_disabled((regs->eflags & USER_EFLAGS) == USER_EFLAGS); // flags
75
76#ifdef CONFIG_DEBUG_DEADLOCKS
77    ((struct disp_priv *)disp)->yieldcount = 0;
78#endif
79
80    // Re-enable dispatcher
81    disp->disabled = false; // doesn't take effect while we're in disp_resume()
82
83    // Resume program execution
84    __asm volatile ("mov        %[fs], %%ax             \n\t"
85                    "mov        %%ax, %%fs              \n\t"
86                    "mov        %[gs], %%ax             \n\t"
87                    "mov        %%ax, %%gs              \n\t"
88                    "fxrstor     %[fxsave_area]         \n\t"
89                    "movq        0*8(%[regs]), %%rax    \n\t"
90                    "movq        2*8(%[regs]), %%rcx    \n\t"
91                    "movq        3*8(%[regs]), %%rdx    \n\t"
92                    "movq        4*8(%[regs]), %%rsi    \n\t"
93                    "movq        5*8(%[regs]), %%rdi    \n\t"
94                    "movq        6*8(%[regs]), %%rbp    \n\t"
95                    "movq        8*8(%[regs]), %%r8     \n\t"
96                    "movq        9*8(%[regs]), %%r9     \n\t"
97                    "movq       10*8(%[regs]), %%r10    \n\t"
98                    "movq       11*8(%[regs]), %%r11    \n\t"
99                    "movq       12*8(%[regs]), %%r12    \n\t"
100                    "movq       13*8(%[regs]), %%r13    \n\t"
101                    "movq       14*8(%[regs]), %%r14    \n\t"
102                    "movq       15*8(%[regs]), %%r15    \n\t"
103                    "pushq      %[ss]                   \n\t"   // SS
104                    "pushq       7*8(%[regs])           \n\t"   // RSP
105                    "pushq      17*8(%[regs])           \n\t"   // RFLAGS
106                    "pushq      %[cs]                   \n\t"   // CS
107                    "pushq      16*8(%[regs])           \n\t"   // RIP
108                    "movq        1*8(%[regs]), %%rbx    \n\t"   // RBX was base register
109                    "iretq                              \n\t"
110                    : /* No output */
111                    :
112                    [regs] "b" (regs),
113                    [ss] "i" (USER_SS),
114                    [cs] "i" (USER_CS),
115                    [fs] "m" (regs->fs),
116                    [gs] "m" (regs->gs),
117                    [fxsave_area] "m" (regs->fxsave_area)
118                    );
119
120    __asm volatile ("disp_resume_end:");
121}
122
123/**
124 * \brief Switch execution between two register states, and turn off
125 * disabled activations.
126 *
127 * This function saves as much as necessary of the current register state
128 * (which, when resumed will return to the caller), and switches execution
129 * by resuming the given register state.  It may only be called while the
130 * dispatcher is disabled.  A side effect is that activations are reenabled.
131 * Note that the thread context saved is a voluntary save so only callee
132 * save registers need to be saved, but we dont currently provide any
133 * way to optimise the corresponding resume.
134 *
135 * \param disp Current dispatcher pointer
136 * \param from_regs Location to save current register state
137 * \param to_regs Location from which to resume new register state
138 */
139// XXX: Needs to be compiled with -O2, otherwise we use too many
140// registers. Have to think about how to circumvent this without needing
141// -O2.
142void
143#if defined(__GNUC__) && !defined(__clang__) && !defined(__ICC)
144__attribute__((optimize(2)))
145#endif
146disp_switch(dispatcher_handle_t handle, arch_registers_state_t *from_state,
147            arch_registers_state_t *to_state)
148{
149    struct dispatcher_shared_generic *disp =
150        get_dispatcher_shared_generic(handle);
151    assert_disabled(disp->disabled);
152    assert_disabled(disp->haswork);
153
154    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
155
156    struct registers_x86_64 *from_regs = (struct registers_x86_64*)from_state;
157    struct registers_x86_64 *to_regs   = (struct registers_x86_64*)to_state;
158    assert_disabled(to_regs != NULL);
159
160    // Save resume IP, stack and control registers, ...
161    // then switch stacks to dispatcher, and call resume to switch context
162    // Note the embedded call to disp_resume above.
163    /*
164     * NB: we shouldn't have to save RBP here, rather just list it as clobbered
165     * However, GCC without optimisations uses RBP as a frame pointer and won't
166     * let us do that, so instead we manually save and restore it. This is
167     * usually redundant (without optimisation, GCC saves and restores it; with
168     * optimisation the register is used and thus GCC saves it anyway).
169     */
170    __asm volatile ("movq       %%rbp,  6*8(%[regs])    \n\t"
171                    "movq       %%rsp,  7*8(%[regs])    \n\t"
172                    "lea        switch_resume(%%rip), %%rcx\n\t"
173                    "movq       %%rcx, 16*8(%[regs])    \n\t"   // RIP
174                    "pushfq                             \n\t"
175                    "popq       17*8(%[regs])           \n\t"   // RFLAGS
176                    "mov        %%fs, %%bx              \n\t"
177                    "mov        %%bx, %[fs]             \n\t"
178                    "mov        %%gs, %%bx              \n\t"
179                    "mov        %%bx, %[gs]             \n\t"
180                    "movq       %[stack], %%rsp         \n\t"   // Switch stack
181                    "callq      disp_resume             \n\t"
182                    :
183                    : [regs] "a" (from_regs),
184                      [fs] "m" (from_regs->fs),
185                      [gs] "m" (from_regs->gs),
186                      [stack] "d" ((lvaddr_t)&disp_gen->stack[DISPATCHER_STACK_WORDS]),
187                      [disp] "D" (disp),
188                      [to_regs] "S" (to_regs)
189                    : "rbx", "rcx", "rsp",
190                      "r8", "r9", "r10", "r12", "r13", "r14", "r15"
191                    );
192
193    __asm volatile ("switch_resume:");
194}
195
196/**
197 * \brief Save the current register state and optionally yield the CPU
198 *
199 * This function saves as much as necessary of the current register state
200 * (which, when resumed will return to the caller), and then either
201 * re-enters the thread scheduler or yields the CPU.
202 * It may only be called while the dispatcher is disabled.
203 * Note that the thread context saved is a voluntary save so only callee
204 * save registers need to be saved, but we dont currently provide any
205 * way to optimise the corresponding resume.
206 *
207 * \param disp Current dispatcher pointer
208 * \param regs Location to save current register state
209 * \param yield If true, yield CPU to kernel; otherwise re-run thread scheduler
210 * \param yield_to Endpoint capability for dispatcher to which we want to yield
211 */
212// XXX: Needs to be compiled with -O2, otherwise we use too many
213// registers. Have to think about how to circumvent this without needing
214// -O2.
215void
216#if defined(__GNUC__) && !defined(__clang__) && !defined(__ICC)
217__attribute__((optimize(2)))
218#endif
219disp_save(dispatcher_handle_t handle, arch_registers_state_t *state,
220          bool yield, capaddr_t yield_to)
221{
222    struct dispatcher_shared_generic *disp =
223        get_dispatcher_shared_generic(handle);
224    assert_disabled(disp->disabled);
225
226    struct registers_x86_64 *regs = state;
227
228    // Save resume IP, stack and control registers
229    // See disp_switch above for details
230    // XXX: Using the clobber list here to make the compiler save only
231    // used registers. Be very careful when changing the code below
232    // this asm block! If registers in the clobber list are
233    // subsequently used, they won't be restored at save_resume.
234    __asm volatile ("movq       %%rbp,  6*8(%[regs])    \n\t"
235                    "movq       %%rsp,  7*8(%[regs])    \n\t"
236                    "lea        save_resume(%%rip), %%rcx\n\t"
237                    "movq       %%rcx, 16*8(%[regs])    \n\t"   // RIP
238                    "pushfq                             \n\t"
239                    "popq       17*8(%[regs])           \n\t"   // RFLAGS
240                    "mov        %%fs, %%bx              \n\t"
241                    "mov        %%bx, %[fs]             \n\t"
242                    "mov        %%gs, %%bx              \n\t"
243                    "mov        %%bx, %[gs]             \n\t"
244                    "fxsave     %[fxsave_area]          \n\t"
245                    :
246                    : [regs] "a" (regs),
247                      [fs] "m" (regs->fs),
248                      [fxsave_area] "m" (regs->fxsave_area),
249                      [gs] "m" (regs->gs)
250                    : "rbx", "rcx", "rdx", "rsi", "rdi",
251                      "r8", "r9", "r10", "r12", "r13", "r14", "r15"
252                    );
253
254    if (yield) {
255//        trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_SYS_YIELD, 1);
256
257        sys_yield(yield_to);
258        // may fail if target doesn't exist; if so, just fall through
259    }
260    // this code won't run if the yield succeeded
261
262    // enter thread scheduler again
263    // this doesn't return, and will call disp_yield if there's nothing to do
264    thread_run_disabled(handle);
265
266    __asm volatile ("save_resume:");
267}
268
269void
270#if defined(__GNUC__) && !defined(__clang__) && !defined(__ICC)
271__attribute__((optimize(2)))
272#endif
273disp_save_rm_kcb(void)
274{
275    dispatcher_handle_t handle = disp_disable();
276    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
277    struct dispatcher_shared_generic *disp =
278        get_dispatcher_shared_generic(handle);
279    arch_registers_state_t *state =
280        dispatcher_get_enabled_save_area(handle);
281
282    assert_disabled(disp_gen->runq != NULL);
283    assert_disabled(disp->haswork);
284
285    struct registers_x86_64 *regs = state;
286
287    // Save resume IP, stack and control registers
288    // See disp_switch above for details
289    // XXX: Using the clobber list here to make the compiler save only
290    // used registers. Be very careful when changing the code below
291    // this asm block! If registers in the clobber list are
292    // subsequently used, they won't be restored at save_resume.
293    __asm volatile ("movq       %%rbp,  6*8(%[regs])    \n\t"
294                    "movq       %%rsp,  7*8(%[regs])    \n\t"
295                    "lea        save_rm_kcb_resume(%%rip), %%rcx\n\t"
296                    "movq       %%rcx, 16*8(%[regs])    \n\t"   // RIP
297                    "pushfq                             \n\t"
298                    "popq       17*8(%[regs])           \n\t"   // RFLAGS
299                    "mov        %%fs, %%bx              \n\t"
300                    "mov        %%bx, %[fs]             \n\t"
301                    "mov        %%gs, %%bx              \n\t"
302                    "mov        %%bx, %[gs]             \n\t"
303                    :
304                    : [regs] "a" (regs),
305                      [fs] "m" (regs->fs),
306                      [gs] "m" (regs->gs)
307                    : "rbx", "rcx", "rdx", "rsi", "rdi",
308                      "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
309                    );
310
311    // don't halt but remove kcb of this domain
312    sys_suspend(false);
313    //assert_disabled(!"This code won't run if the yield succeeded.");
314
315    __asm volatile ("save_rm_kcb_resume:");
316    // Instead we go here directly
317}
318