1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * This software may be distributed and modified according to the terms of
5 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
6 * See "LICENSE_GPLv2.txt" for details.
7 *
8 * @TAG(GD_GPL)
9 */
10
11#include <api/syscall.h>
12#include <config.h>
13#include <machine/io.h>
14#include <kernel/boot.h>
15#include <kernel/cspace.h>
16#include <kernel/thread.h>
17#include <model/statedata.h>
18#include <object/cnode.h>
19#include <arch/api/invocation.h>
20#include <arch/kernel/apic.h>
21#include <arch/kernel/vspace.h>
22#include <linker.h>
23#include <util.h>
24
25/* setup initial boot page directory */
26
27/* The boot pd is referenced by code that runs before paging, so
28 * place it in PHYS_DATA */
29pde_t _boot_pd[BIT(PD_INDEX_BITS)] ALIGN(BIT(PAGE_BITS)) VISIBLE PHYS_BSS;
30
31BOOT_CODE
32pde_t *get_boot_pd()
33{
34    return _boot_pd;
35}
36
37/* This function is duplicated from pde_pde_large_new, generated by the
38 * bitfield tool in structures_gen.h. It is required by functions that need to
39 * call it before the MMU is turned on. Any changes made to the bitfield
40 * generation need to be replicated here.
41 */
42PHYS_CODE
43static inline pde_t
44pde_pde_large_new_phys(uint32_t page_base_address,
45                       uint32_t pat, uint32_t avl, uint32_t global, uint32_t dirty,
46                       uint32_t accessed, uint32_t cache_disabled, uint32_t write_through,
47                       uint32_t super_user, uint32_t read_write, uint32_t present)
48{
49    pde_t pde;
50    pde.words[0] = 0;
51
52    pde.words[0] |= (page_base_address & 0xffc00000) >> 0;
53    pde.words[0] |= (pat & 0x1) << 12;
54    pde.words[0] |= (avl & 0x7) << 9;
55    pde.words[0] |= (global & 0x1) << 8;
56    pde.words[0] |= (pde_pde_large & 0x1) << 7;
57    pde.words[0] |= (dirty & 0x1) << 6;
58    pde.words[0] |= (accessed & 0x1) << 5;
59    pde.words[0] |= (cache_disabled & 0x1) << 4;
60    pde.words[0] |= (write_through & 0x1) << 3;
61    pde.words[0] |= (super_user & 0x1) << 2;
62    pde.words[0] |= (read_write & 0x1) << 1;
63    pde.words[0] |= (present & 0x1) << 0;
64
65    return pde;
66}
67
68PHYS_CODE VISIBLE void
69init_boot_pd(void)
70{
71    word_t i;
72
73    /* identity mapping from 0 up to PPTR_BASE (virtual address) */
74    for (i = 0; i < (PPTR_BASE >> seL4_LargePageBits); i++) {
75        *(_boot_pd + i) = pde_pde_large_new_phys(
76                              i << seL4_LargePageBits, /* physical address */
77                              0, /* pat            */
78                              0, /* avl            */
79                              1, /* global         */
80                              0, /* dirty          */
81                              0, /* accessed       */
82                              0, /* cache_disabled */
83                              0, /* write_through  */
84                              0, /* super_user     */
85                              1, /* read_write     */
86                              1  /* present        */
87                          );
88    }
89
90    /* mapping of PPTR_BASE (virtual address) to PADDR_BASE up to end of virtual address space */
91    for (i = 0; i < ((-PPTR_BASE) >> seL4_LargePageBits); i++) {
92        *(_boot_pd + i + (PPTR_BASE >> seL4_LargePageBits)) = pde_pde_large_new_phys(
93                                                                  (i << seL4_LargePageBits) + PADDR_BASE, /* physical address */
94                                                                  0, /* pat            */
95                                                                  0, /* avl            */
96                                                                  1, /* global         */
97                                                                  0, /* dirty          */
98                                                                  0, /* accessed       */
99                                                                  0, /* cache_disabled */
100                                                                  0, /* write_through  */
101                                                                  0, /* super_user     */
102                                                                  1, /* read_write     */
103                                                                  1  /* present        */
104                                                              );
105    }
106}
107
108BOOT_CODE void
109map_it_pt_cap(cap_t vspace_cap, cap_t pt_cap)
110{
111    pde_t* pd   = PDE_PTR(pptr_of_cap(vspace_cap));
112    pte_t* pt   = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(pt_cap));
113    vptr_t vptr = cap_page_table_cap_get_capPTMappedAddress(pt_cap);
114
115    assert(cap_page_table_cap_get_capPTIsMapped(pt_cap));
116    *(pd + (vptr >> seL4_LargePageBits)) = pde_pde_pt_new(
117                                               pptr_to_paddr(pt), /* pt_base_address */
118                                               0,                 /* avl             */
119                                               0,                 /* accessed        */
120                                               0,                 /* cache_disabled  */
121                                               0,                 /* write_through   */
122                                               1,                 /* super_user      */
123                                               1,                 /* read_write      */
124                                               1                  /* present         */
125                                           );
126    invalidateLocalPageStructureCache();
127}
128
129BOOT_CODE void
130map_it_pd_cap(cap_t vspace_cap, cap_t pd_cap)
131{
132    /* this shouldn't be called, and it does nothing */
133    fail("Should not be called");
134}
135
136BOOT_CODE void
137map_it_frame_cap(cap_t pd_cap, cap_t frame_cap)
138{
139    pte_t* pt;
140    pde_t* pd    = PDE_PTR(pptr_of_cap(pd_cap));
141    void*  frame = (void*)cap_frame_cap_get_capFBasePtr(frame_cap);
142    vptr_t vptr  = cap_frame_cap_get_capFMappedAddress(frame_cap);
143
144    assert(cap_frame_cap_get_capFMappedASID(frame_cap) != 0);
145    pd += (vptr >> seL4_LargePageBits);
146    pt = paddr_to_pptr(pde_pde_pt_ptr_get_pt_base_address(pd));
147    *(pt + ((vptr & MASK(seL4_LargePageBits)) >> seL4_PageBits)) = pte_new(
148                                                                       pptr_to_paddr(frame), /* page_base_address */
149                                                                       0,                    /* avl               */
150                                                                       0,                    /* global            */
151                                                                       0,                    /* pat               */
152                                                                       0,                    /* dirty             */
153                                                                       0,                    /* accessed          */
154                                                                       0,                    /* cache_disabled    */
155                                                                       0,                    /* write_through     */
156                                                                       1,                    /* super_user        */
157                                                                       1,                    /* read_write        */
158                                                                       1                     /* present           */
159                                                                   );
160    invalidateLocalPageStructureCache();
161}
162
163/* ==================== BOOT CODE FINISHES HERE ==================== */
164
165lookupPDSlot_ret_t lookupPDSlot(vspace_root_t *vspace, vptr_t vptr)
166{
167    lookupPDSlot_ret_t pdSlot;
168    pde_t *pd = PDE_PTR(vspace);
169    unsigned int pdIndex;
170
171    pdIndex = vptr >> (PAGE_BITS + PT_INDEX_BITS);
172    pdSlot.status = EXCEPTION_NONE;
173    pdSlot.pdSlot = pd + pdIndex;
174    return pdSlot;
175}
176
177bool_t CONST isVTableRoot(cap_t cap)
178{
179    return cap_get_capType(cap) == cap_page_directory_cap;
180}
181
182bool_t CONST isValidNativeRoot(cap_t cap)
183{
184    return isVTableRoot(cap) &&
185           cap_page_directory_cap_get_capPDIsMapped(cap);
186}
187
188vspace_root_t *getValidNativeRoot(cap_t vspace_cap)
189{
190    if (isValidNativeRoot(vspace_cap)) {
191        return PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(vspace_cap));
192    }
193    return NULL;
194}
195
196void copyGlobalMappings(vspace_root_t* new_vspace)
197{
198    word_t i;
199    pde_t *newPD = (pde_t*)new_vspace;
200
201    for (i = PPTR_BASE >> seL4_LargePageBits; i < BIT(PD_INDEX_BITS); i++) {
202        newPD[i] = ia32KSGlobalPD[i];
203    }
204}
205
206exception_t performASIDPoolInvocation(asid_t asid, asid_pool_t* poolPtr, cte_t* vspaceCapSlot)
207{
208    asid_map_t asid_map;
209#ifdef CONFIG_VTX
210    if (cap_get_capType(vspaceCapSlot->cap) == cap_ept_pml4_cap) {
211        cap_ept_pml4_cap_ptr_set_capPML4MappedASID(&vspaceCapSlot->cap, asid);
212        cap_ept_pml4_cap_ptr_set_capPML4IsMapped(&vspaceCapSlot->cap, 1);
213        asid_map = asid_map_asid_map_ept_new(cap_ept_pml4_cap_get_capPML4BasePtr(vspaceCapSlot->cap));
214    } else
215#endif
216    {
217        assert(cap_get_capType(vspaceCapSlot->cap) == cap_page_directory_cap);
218        cap_page_directory_cap_ptr_set_capPDMappedASID(&vspaceCapSlot->cap, asid);
219        cap_page_directory_cap_ptr_set_capPDIsMapped(&vspaceCapSlot->cap, 1);
220        asid_map = asid_map_asid_map_vspace_new(cap_page_directory_cap_get_capPDBasePtr(vspaceCapSlot->cap));
221    }
222    poolPtr->array[asid & MASK(asidLowBits)] = asid_map;
223
224    return EXCEPTION_NONE;
225}
226
227void unmapPageDirectory(asid_t asid, vptr_t vaddr, pde_t *pd)
228{
229    deleteASID(asid, pd);
230}
231
232static exception_t
233performIA32PageDirectoryGetStatusBits(lookupPTSlot_ret_t ptSlot, lookupPDSlot_ret_t pdSlot, word_t *buffer)
234{
235    if (pdSlot.status == EXCEPTION_NONE &&
236            ((pde_ptr_get_page_size(pdSlot.pdSlot) == pde_pde_large) &&
237             pde_pde_large_ptr_get_present(pdSlot.pdSlot))) {
238
239        setMR(NODE_STATE(ksCurThread), buffer, 0, pde_pde_large_ptr_get_accessed(pdSlot.pdSlot));
240        setMR(NODE_STATE(ksCurThread), buffer, 1, pde_pde_large_ptr_get_dirty(pdSlot.pdSlot));
241        return EXCEPTION_NONE;
242    }
243
244    assert(ptSlot.status == EXCEPTION_NONE && pte_ptr_get_present(ptSlot.ptSlot));
245
246    setMR(NODE_STATE(ksCurThread), buffer, 0, pte_ptr_get_accessed(ptSlot.ptSlot));
247    setMR(NODE_STATE(ksCurThread), buffer, 1, pte_ptr_get_dirty(ptSlot.ptSlot));
248
249    return EXCEPTION_NONE;
250}
251
252exception_t
253decodeIA32PageDirectoryInvocation(
254    word_t invLabel,
255    word_t length,
256    cte_t* cte,
257    cap_t cap,
258    extra_caps_t excaps,
259    word_t* buffer
260)
261{
262
263    switch (invLabel) {
264    case X86PageDirectoryGetStatusBits: {
265        word_t vaddr;
266        vspace_root_t *vspace;
267        lookupPTSlot_ret_t ptSlot;
268        lookupPDSlot_ret_t pdSlot;
269
270        if (length < 1) {
271            userError("X86PageDirectoryGetStatusBits: Truncated message");
272            current_syscall_error.type = seL4_TruncatedMessage;
273
274            return EXCEPTION_SYSCALL_ERROR;
275        }
276
277        vaddr = getSyscallArg(0, buffer);
278
279        if (vaddr > PPTR_USER_TOP) {
280            userError("X86PageDirectoryGetStatusBits: address inside kernel window");
281            current_syscall_error.type = seL4_InvalidArgument;
282            current_syscall_error.invalidArgumentNumber = 0;
283
284            return EXCEPTION_SYSCALL_ERROR;
285        }
286
287        vspace = (vspace_root_t*)pptr_of_cap(cap);
288
289        /* perform both lookups */
290        pdSlot = lookupPDSlot(vspace, vaddr);
291        ptSlot = lookupPTSlot(vspace, vaddr);
292
293        /* need either a valid PD mapping or PT mapping */
294        if ((pdSlot.status != EXCEPTION_NONE ||
295                ((pde_ptr_get_page_size(pdSlot.pdSlot) != pde_pde_large) ||
296                 !pde_pde_large_ptr_get_present(pdSlot.pdSlot))) &&
297                (ptSlot.status != EXCEPTION_NONE ||
298                 (!pte_ptr_get_present(ptSlot.ptSlot)))) {
299            userError("X86PageDirectoryGetStatusBits: No mapping found");
300
301            current_syscall_error.type = seL4_InvalidArgument;
302            current_syscall_error.invalidArgumentNumber = 1;
303
304            return EXCEPTION_SYSCALL_ERROR;
305        }
306
307        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
308        return performIA32PageDirectoryGetStatusBits(ptSlot, pdSlot, buffer);
309    }
310
311    default:
312        userError("decodeIA32PageDirectoryInvocation: illegal operation");
313        current_syscall_error.type = seL4_IllegalOperation;
314
315        return EXCEPTION_SYSCALL_ERROR;
316    }
317}
318
319#ifdef CONFIG_PRINTING
320typedef struct readWordFromVSpace_ret {
321    exception_t status;
322    word_t value;
323} readWordFromVSpace_ret_t;
324
325static readWordFromVSpace_ret_t
326readWordFromVSpace(vspace_root_t *vspace, word_t vaddr)
327{
328    readWordFromVSpace_ret_t ret;
329    lookupPTSlot_ret_t ptSlot;
330    lookupPDSlot_ret_t pdSlot;
331    paddr_t paddr;
332    word_t offset;
333    pptr_t kernel_vaddr;
334    word_t *value;
335
336    pdSlot = lookupPDSlot(vspace, vaddr);
337    if (pdSlot.status == EXCEPTION_NONE &&
338            ((pde_ptr_get_page_size(pdSlot.pdSlot) == pde_pde_large) &&
339             pde_pde_large_ptr_get_present(pdSlot.pdSlot))) {
340
341        paddr = pde_pde_large_ptr_get_page_base_address(pdSlot.pdSlot);
342        offset = vaddr & MASK(seL4_LargePageBits);
343    } else {
344        ptSlot = lookupPTSlot(vspace, vaddr);
345        if (ptSlot.status == EXCEPTION_NONE && pte_ptr_get_present(ptSlot.ptSlot)) {
346            paddr = pte_ptr_get_page_base_address(ptSlot.ptSlot);
347            offset = vaddr & MASK(seL4_PageBits);
348        } else {
349            ret.status = EXCEPTION_LOOKUP_FAULT;
350            return ret;
351        }
352    }
353
354
355    kernel_vaddr = (word_t)paddr_to_pptr(paddr);
356    value = (word_t*)(kernel_vaddr + offset);
357    ret.status = EXCEPTION_NONE;
358    ret.value = *value;
359    return ret;
360}
361
362void
363Arch_userStackTrace(tcb_t *tptr)
364{
365    cap_t threadRoot;
366    vspace_root_t *vspace_root;
367    word_t sp;
368    int i;
369
370    threadRoot = TCB_PTR_CTE_PTR(tptr, tcbVTable)->cap;
371
372    /* lookup the PD */
373    if (cap_get_capType(threadRoot) != cap_page_directory_cap) {
374        printf("Invalid vspace\n");
375        return;
376    }
377
378    vspace_root = (vspace_root_t*)pptr_of_cap(threadRoot);
379
380    sp = getRegister(tptr, ESP);
381    /* check for alignment so we don't have to worry about accessing
382     * words that might be on two different pages */
383    if (!IS_ALIGNED(sp, seL4_WordSizeBits)) {
384        printf("ESP not aligned\n");
385        return;
386    }
387
388    for (i = 0; i < CONFIG_USER_STACK_TRACE_LENGTH; i++) {
389        word_t address = sp + (i * sizeof(word_t));
390        readWordFromVSpace_ret_t result;
391        result = readWordFromVSpace(vspace_root, address);
392        if (result.status == EXCEPTION_NONE) {
393            printf("0x%lx: 0x%lx\n", (long)address, (long)result.value);
394        } else {
395            printf("0x%lx: INVALID\n", (long)address);
396        }
397    }
398}
399#endif
400