1/**
2 * \file
3 * \brief Dispatcher architecture-specific implementation.
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <barrelfish/barrelfish.h>
16#include <barrelfish/dispatch.h>
17#include <barrelfish/dispatcher_arch.h>
18#include <barrelfish/curdispatcher_arch.h>
19#include <barrelfish/syscalls.h>
20#include "threads_priv.h"
21
22/* entry points defined in assembler code */
23extern void run_entry(void);
24extern void pagefault_entry(void);
25extern void disabled_pagefault_entry(void);
26extern void trap_entry(void);
27
28void __attribute__ ((visibility ("hidden"))) disp_resume_end(void);
29
30/**
31 * \brief Architecture-specific dispatcher initialisation
32 */
33void disp_arch_init(dispatcher_handle_t handle)
34{
35    struct dispatcher_shared_x86_32 *disp =
36        get_dispatcher_shared_x86_32(handle);
37    struct dispatcher_x86_32 *disp_priv = get_dispatcher_x86_32(handle);
38
39    disp->d.dispatcher_run = (lvaddr_t)run_entry;
40    disp->d.dispatcher_pagefault = (lvaddr_t)pagefault_entry;
41    disp->d.dispatcher_pagefault_disabled = (lvaddr_t)disabled_pagefault_entry;
42    disp->d.dispatcher_trap = (lvaddr_t)trap_entry;
43
44    disp->crit_pc_low = (lvaddr_t)disp_resume;
45    disp->crit_pc_high = (lvaddr_t)disp_resume_end;
46
47    // Setup GS register
48    // XXX: Set this to the kernel-provided GDT dispatcher entry, until we have
49    // LDT setup code for x86-32
50    disp_priv->disp_seg_selector = 0x33;
51
52    /* load this segment to GS */
53    __asm volatile("mov %%ax, %%gs"
54                   : /* No outputs */
55                   : "a" (disp_priv->disp_seg_selector));
56}
57
58/**
59 * \brief Resume execution of a given register state
60 *
61 * This function resumes the execution of the given register state on the
62 * current dispatcher. It may only be called while the dispatcher is disabled.
63 *
64 * \param disp Current dispatcher pointer
65 * \param regs Register state snapshot
66 */
67void disp_resume(dispatcher_handle_t handle, arch_registers_state_t *archregs)
68{
69    struct dispatcher_shared_generic *disp =
70        get_dispatcher_shared_generic(handle);
71
72    assert_disabled(disp->disabled);
73    assert_disabled(disp->haswork);
74
75    /* sanity check user state */
76    struct registers_x86_32* regs = archregs;
77    assert_disabled(regs->eip > BASE_PAGE_SIZE);
78    assert_disabled((regs->eflags & USER_EFLAGS) == USER_EFLAGS); // flags
79    assert_disabled(regs->cs == USER_CS);
80    assert_disabled(regs->ss == USER_SS);
81
82#ifdef CONFIG_DEBUG_DEADLOCKS
83    ((struct disp_priv *)disp)->yieldcount = 0;
84#endif
85
86    // Re-enable dispatcher
87    disp->disabled = 0; // doesn't take effect while we're in disp_resume()
88
89    // Resume program execution
90    __asm volatile ("mov        %[fs], %%fs             \n\t"
91                    "mov        %[gs], %%gs             \n\t"
92                    "movl        1*4(%[regs]), %%ebx    \n\t"
93                    "movl        2*4(%[regs]), %%ecx    \n\t"
94                    "movl        3*4(%[regs]), %%edx    \n\t"
95                    "movl        4*4(%[regs]), %%esi    \n\t"
96                    "movl        5*4(%[regs]), %%edi    \n\t"
97                    "movl        6*4(%[regs]), %%ebp    \n\t"
98                    // XXX: This is now done on the thread's stack
99                    // Dunno if there's a better way in protected mode
100                    // XXX: Assuming SS is always the same
101                    "mov        11*4(%[regs]), %%ss     \n\t"
102                    "movl        7*4(%[regs]), %%esp    \n\t"
103                    "pushl       9*4(%[regs])           \n\t"   // EFLAGS
104                    "pushl      10*4(%[regs])           \n\t"   // CS
105                    "pushl       8*4(%[regs])           \n\t"   // EIP
106                    "movl        0*4(%[regs]), %%eax    \n\t"   // EAX was base register
107                    "iretl                              \n\t"
108                    : /* No output */
109                    :
110                    [regs] "a" (regs),
111                    [fs] "m" (regs->fs),
112                    [gs] "m" (regs->gs)
113                    );
114
115    __asm volatile ("disp_resume_end:");
116}
117
118/**
119 * \brief Switch execution between two register states
120 *
121 * This function saves as much as necessary of the current register state
122 * (which, when resumed will return to the caller), and switches execution
123 * by resuming the given register state.  It may only be called while the
124 * dispatcher is disabled.
125 *
126 * \param disp Current dispatcher pointer
127 * \param from_regs Location to save current register state
128 * \param to_regs Location from which to resume new register state
129 */
130void disp_switch(dispatcher_handle_t handle, arch_registers_state_t *from_state,
131                 arch_registers_state_t *to_state)
132{
133    struct dispatcher_shared_generic *disp =
134        get_dispatcher_shared_generic(handle);
135    assert_disabled(disp->disabled);
136    assert_disabled(disp->haswork);
137
138    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
139
140    struct registers_x86_32 *from_regs = (struct registers_x86_32*)from_state;
141    struct registers_x86_32 *to_regs   = (struct registers_x86_32*)to_state;
142    assert_disabled(to_regs != NULL);
143
144    // Save resume IP, stack and control registers, ...
145    // then switch stacks to dispatcher, and call resume to switch context
146    /*
147     * NB: we shouldn't have to save RBP here, rather just list it as clobbered
148     * However, GCC without optimisations uses RBP as a frame pointer and won't
149     * let us do that, so instead we manually save and restore it. This is
150     * usually redundant (without optimisation, GCC saves and restores it; with
151     * optimisation the register is used and thus GCC saves it anyway).
152     */
153    __asm volatile ("movl       %%ebp,  6*4(%[regs])    \n\t"
154                    "movl       %%ebx,  1*4(%[regs])    \n\t"
155                    "movl       %%esp,  7*4(%[regs])    \n\t"
156                    // XXX: This is not PIC! - Need to fix
157                    "lea        switch_resume, %%ecx    \n\t"
158                    "movl       %%ecx,  8*4(%[regs])    \n\t"   // EIP
159                    "pushfl                             \n\t"
160                    "popl       9*4(%[regs])            \n\t"   // EFLAGS
161                    "movl       %%cs, %%ecx             \n\t"
162                    "movl       %%ecx, 10*4(%[regs])    \n\t"   // CS
163                    "movl       %%ss, %%ecx             \n\t"
164                    "movl       %%ecx, 11*4(%[regs])    \n\t"   // SS
165                    "mov        %%fs, %[fs]             \n\t"
166                    "mov        %%gs, %[gs]             \n\t"
167                    "movl       %[stack], %%esp         \n\t"   // Switch stack
168                    // Call disp_resume on new stack
169                    "movl       %[to_regs], %%eax       \n\t"
170                    "pushl      %%eax                   \n\t"
171                    "movl       %[disp], %%eax          \n\t"
172                    "pushl      %%eax                   \n\t"
173                    "calll      disp_resume             \n\t"
174                    :
175                    : [regs] "a" (from_regs),
176                      [fs] "m" (from_regs->fs),
177                      [gs] "m" (from_regs->gs),
178                      [stack] "d" ((lvaddr_t)&disp_gen->stack[DISPATCHER_STACK_WORDS]),
179                      [disp] "m" (disp),
180                      [to_regs] "m" (to_regs)
181                    : "ecx", "esp", "esi", "edi"
182                    );
183
184    __asm volatile ("switch_resume:");
185}
186
187/**
188 * \brief Save the current register state and optionally yield the CPU
189 *
190 * This function saves as much as necessary of the current register state
191 * (which, when resumed will return to the caller), and then either
192 * re-enters the thread scheduler or yields the CPU.
193 * It may only be called while the dispatcher is disabled.
194 *
195 * \param disp Current dispatcher pointer
196 * \param regs Location to save current register state
197 * \param yield If true, yield CPU to kernel; otherwise re-run thread scheduler
198 * \param yield_to Endpoint capability for dispatcher to which we want to yield
199 */
200void disp_save(dispatcher_handle_t handle, arch_registers_state_t *state,
201               bool yield, capaddr_t yield_to)
202{
203    struct dispatcher_shared_generic *disp =
204        get_dispatcher_shared_generic(handle);
205    assert_disabled(disp->disabled);
206
207    struct registers_x86_32 *regs = state;
208
209    // Save resume IP, stack and control registers
210    // See disp_switch above for details
211    // XXX: Using the clobber list here to make the compiler save only
212    // used registers. Be very careful when changing the code below
213    // this asm block! If registers in the clobber list are
214    // subsequently used, they won't be restored at save_resume.
215    __asm volatile ("movl       %%ebp,  6*4(%[regs])    \n\t"
216                    "movl       %%esp,  7*4(%[regs])    \n\t"
217                    // XXX: This is not PIC! - Need to fix
218                    "lea        save_resume, %%ecx      \n\t"
219                    "movl       %%ecx,  8*4(%[regs])    \n\t"   // EIP
220                    "pushfl                             \n\t"
221                    "popl       9*4(%[regs])            \n\t"   // EFLAGS
222                    "movl       %%cs, %%ecx             \n\t"
223                    "movl       %%ecx, 10*4(%[regs])    \n\t"   // CS
224                    "movl       %%ss, %%ecx             \n\t"
225                    "movl       %%ecx, 11*4(%[regs])    \n\t"   // SS
226                    "mov        %%fs, %[fs]             \n\t"
227                    "mov        %%gs, %[gs]             \n\t"
228                    :
229                    : [regs] "a" (regs),
230                      [fs] "m" (regs->fs),
231                      [gs] "m" (regs->gs)
232                    : "ecx", "edx", "esi", "edi"
233                    );
234
235    if (yield) {
236        sys_yield(yield_to);
237        // may fail if target doesn't exist; if so, just fall through
238    }
239    // this code won't run if the yield succeeded
240
241    // enter thread scheduler again
242    // this doesn't return, and will call disp_yield if there's nothing to do
243    thread_run_disabled(handle);
244
245    __asm volatile ("save_resume:");
246}
247
248
249/**
250 * \brief Save register state, remove our KCB from current CPU.
251 */
252void disp_save_rm_kcb(void)
253{
254    dispatcher_handle_t handle = disp_disable();
255    struct dispatcher_shared_generic *disp =
256        get_dispatcher_shared_generic(handle);
257    arch_registers_state_t *state =
258        dispatcher_get_enabled_save_area(handle);
259    assert_disabled(disp->disabled);
260
261    struct registers_x86_32 *regs = state;
262
263    // Save resume IP, stack and control registers
264    // See disp_switch above for details
265    // XXX: Using the clobber list here to make the compiler save only
266    // used registers. Be very careful when changing the code below
267    // this asm block! If registers in the clobber list are
268    // subsequently used, they won't be restored at save_resume.
269    __asm volatile ("movl       %%ebp,  6*4(%[regs])    \n\t"
270                    "movl       %%esp,  7*4(%[regs])    \n\t"
271                    // XXX: This is not PIC! - Need to fix
272                    "lea        save_rm_kcb_resume, %%ecx      \n\t"
273                    "movl       %%ecx,  8*4(%[regs])    \n\t"   // EIP
274                    "pushfl                             \n\t"
275                    "popl       9*4(%[regs])            \n\t"   // EFLAGS
276                    "movl       %%cs, %%ecx             \n\t"
277                    "movl       %%ecx, 10*4(%[regs])    \n\t"   // CS
278                    "movl       %%ss, %%ecx             \n\t"
279                    "movl       %%ecx, 11*4(%[regs])    \n\t"   // SS
280                    "mov        %%fs, %[fs]             \n\t"
281                    "mov        %%gs, %[gs]             \n\t"
282                    :
283                    : [regs] "a" (regs),
284                      [fs] "m" (regs->fs),
285                      [gs] "m" (regs->gs)
286                    : "ecx", "edx", "esi", "edi"
287                    );
288
289    // don't halt but remove kcb of this domain
290    sys_suspend(false);
291
292    // this code won't run if the yield succeeded
293
294    // enter thread scheduler again
295    // this doesn't return, and will call disp_yield if there's nothing to do
296    thread_run_disabled(handle);
297
298    __asm volatile ("save_rm_kcb_resume:");
299}
300