1/*
2 * Copyright 2018, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * Copyright 2018, DornerWorks
7 *
8 * This software may be distributed and modified according to the terms of
9 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
10 * See "LICENSE_GPLv2.txt" for details.
11 *
12 * @TAG(DATA61_DORNERWORKS_GPL)
13 */
14
15/*
16 *
17 * Copyright 2016, 2017 Hesham Almatary, Data61/CSIRO <hesham.almatary@data61.csiro.au>
18 * Copyright 2015, 2016 Hesham Almatary <heshamelmatary@gmail.com>
19 */
20
21#include <types.h>
22#include <benchmark/benchmark.h>
23#include <api/failures.h>
24#include <api/syscall.h>
25#include <kernel/boot.h>
26#include <kernel/cspace.h>
27#include <kernel/thread.h>
28#include <object/tcb.h>
29#include <machine/io.h>
30#include <model/preemption.h>
31#include <model/statedata.h>
32#include <object/cnode.h>
33#include <object/untyped.h>
34#include <arch/api/invocation.h>
35#include <arch/kernel/vspace.h>
36#include <linker.h>
37#include <plat/machine/devices.h>
38#include <arch/machine.h>
39#include <plat/machine/hardware.h>
40#include <kernel/stack.h>
41
42struct resolve_ret {
43    paddr_t frameBase;
44    vm_page_size_t frameSize;
45    bool_t valid;
46};
47typedef struct resolve_ret resolve_ret_t;
48
49static exception_t performPageGetAddress(void *vbase_ptr);
50
51static word_t CONST
52RISCVGetWriteFromVMRights(vm_rights_t vm_rights)
53{
54    return vm_rights != VMReadOnly;
55}
56
57static word_t RISCVGetUserFromVMRights(vm_rights_t vm_rights)
58{
59    return vm_rights != VMKernelOnly;
60}
61
62static inline word_t CONST
63RISCVGetReadFromVMRights(vm_rights_t vm_rights)
64{
65    return vm_rights != VMWriteOnly;
66}
67
68static inline bool_t isPTEPageTable(pte_t *pte)
69{
70    return pte_ptr_get_valid(pte) &&
71           !(pte_ptr_get_read(pte) || pte_ptr_get_write(pte) || pte_ptr_get_execute(pte));
72}
73
74/** Helper function meant only to be used for mapping the kernel
75 * window.
76 *
77 * Maps all pages with full RWX and supervisor perms by default.
78 */
79static pte_t pte_next(word_t phys_addr, bool_t is_leaf)
80{
81    word_t ppn = (word_t)(phys_addr >> 12);
82
83    uint8_t read = is_leaf ? 1 : 0;
84    uint8_t write = read;
85    uint8_t exec = read;
86
87    return pte_new(ppn,
88                   0,     /* sw */
89                   1,     /* dirty */
90                   1,     /* accessed */
91                   1,     /* global */
92                   0,     /* user */
93                   exec,  /* execute */
94                   write, /* write */
95                   read,  /* read */
96                   1      /* valid */
97                  );
98}
99
100/* ==================== BOOT CODE STARTS HERE ==================== */
101
102BOOT_CODE VISIBLE void
103map_kernel_window(void)
104{
105    /* mapping of kernelBase (virtual address) to kernel's physBase  */
106    assert(CONFIG_PT_LEVELS > 1 && CONFIG_PT_LEVELS <= 4);
107
108    /* kernel window starts at PPTR_BASE */
109    word_t pptr = PPTR_BASE;
110
111    /* first we map in memory from PADDR_BASE */
112    word_t paddr = PADDR_BASE;
113    while (pptr < KERNEL_BASE) {
114        assert(IS_ALIGNED(pptr, RISCV_GET_LVL_PGSIZE_BITS(1)));
115        assert(IS_ALIGNED(paddr, RISCV_GET_LVL_PGSIZE_BITS(1)));
116
117        kernel_root_pageTable[RISCV_GET_PT_INDEX(pptr, 1)] = pte_next(paddr, true);
118
119        pptr += RISCV_GET_LVL_PGSIZE(1);
120        paddr += RISCV_GET_LVL_PGSIZE(1);
121    }
122    /* now we should be mapping the 1GiB kernel base, starting again from PADDR_LOAD */
123    assert(pptr == KERNEL_BASE);
124    paddr = PADDR_LOAD;
125
126#ifndef RISCV_KERNEL_WINDOW_LEVEL2_PT
127    kernel_root_pageTable[RISCV_GET_PT_INDEX(pptr, 1)] = pte_next(paddr, true);
128    pptr += RISCV_GET_LVL_PGSIZE(1);
129    paddr += RISCV_GET_LVL_PGSIZE(1);
130#else
131    word_t index = 0;
132    kernel_root_pageTable[RISCV_GET_PT_INDEX(pptr, 1)] =
133        pte_next(kpptr_to_paddr(kernel_image_level2_pt), false);
134    while (pptr < KERNEL_BASE + RISCV_GET_LVL_PGSIZE(1)) {
135        kernel_image_level2_pt[index] = pte_next(paddr, true);
136        index++;
137        pptr += RISCV_GET_LVL_PGSIZE(2);
138        paddr += RISCV_GET_LVL_PGSIZE(2);
139    }
140#endif
141
142    /* There should be 1GiB free where we will put device mapping some day */
143    assert(pptr == UINTPTR_MAX - RISCV_GET_LVL_PGSIZE(1) + 1);
144}
145
146BOOT_CODE void
147map_it_pt_cap(cap_t vspace_cap, cap_t pt_cap)
148{
149    lookupPTSlot_ret_t pt_ret;
150    pte_t* targetSlot;
151    vptr_t vptr = cap_page_table_cap_get_capPTMappedAddress(pt_cap);
152    pte_t* lvl1pt = PTE_PTR(pptr_of_cap(vspace_cap));
153
154    /* pt to be mapped */
155    pte_t* pt   = PTE_PTR(pptr_of_cap(pt_cap));
156
157    /* Get PT slot to install the address in */
158    pt_ret = lookupPTSlot(lvl1pt, vptr);
159
160    targetSlot = pt_ret.ptSlot;
161
162    *targetSlot = pte_new(
163                      (addrFromPPtr(pt) >> seL4_PageBits),
164                      0, /* sw */
165                      1, /* dirty */
166                      1, /* accessed */
167                      0,  /* global */
168                      0,  /* user */
169                      0,  /* execute */
170                      0,  /* write */
171                      0,  /* read */
172                      1 /* valid */
173                  );
174    sfence();
175}
176
177BOOT_CODE void
178map_it_frame_cap(cap_t vspace_cap, cap_t frame_cap)
179{
180    pte_t* lvl1pt   = PTE_PTR(pptr_of_cap(vspace_cap));
181    pte_t* frame_pptr   = PTE_PTR(pptr_of_cap(frame_cap));
182    vptr_t frame_vptr = cap_frame_cap_get_capFMappedAddress(frame_cap);
183
184    /* We deal with a frame as 4KiB */
185    lookupPTSlot_ret_t lu_ret = lookupPTSlot(lvl1pt, frame_vptr);
186    assert(lu_ret.ptBitsLeft == seL4_PageBits);
187
188    pte_t* targetSlot = lu_ret.ptSlot;
189
190    *targetSlot = pte_new(
191                      (pptr_to_paddr(frame_pptr) >> seL4_PageBits),
192                      0, /* sw */
193                      1, /* dirty */
194                      1, /* accessed */
195                      0,  /* global */
196                      1,  /* user */
197                      1,  /* execute */
198                      1,  /* write */
199                      1,  /* read */
200                      1   /* valid */
201                  );
202    sfence();
203}
204
205BOOT_CODE cap_t
206create_unmapped_it_frame_cap(pptr_t pptr, bool_t use_large)
207{
208    cap_t cap = cap_frame_cap_new(
209                    asidInvalid,                     /* capFMappedASID       */
210                    pptr,                            /* capFBasePtr          */
211                    0,                               /* capFSize             */
212                    0,                               /* capFVMRights         */
213                    0,
214                    0                                /* capFMappedAddress    */
215                );
216
217    return cap;
218}
219
220/* Create a page table for the initial thread */
221static BOOT_CODE cap_t
222create_it_pt_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
223{
224    cap_t cap;
225    cap = cap_page_table_cap_new(
226              asid,   /* capPTMappedASID      */
227              pptr,   /* capPTBasePtr         */
228              1,      /* capPTIsMapped        */
229              vptr    /* capPTMappedAddress   */
230          );
231
232    map_it_pt_cap(vspace_cap, cap);
233    return cap;
234}
235
236/* Create an address space for the initial thread.
237 * This includes page directory and page tables */
238BOOT_CODE cap_t
239create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
240{
241    cap_t      lvl1pt_cap;
242    vptr_t     pt_vptr;
243    pptr_t     pt_pptr;
244    pptr_t lvl1pt_pptr;
245
246    /* create 1st level page table obj and cap */
247    lvl1pt_pptr = alloc_region(PT_SIZE_BITS);
248
249    if (!lvl1pt_pptr) {
250        return cap_null_cap_new();
251    }
252    memzero(PTE_PTR(lvl1pt_pptr), 1 << PT_SIZE_BITS);
253
254    copyGlobalMappings(PTE_PTR(lvl1pt_pptr));
255
256    lvl1pt_cap =
257        cap_page_table_cap_new(
258            IT_ASID,               /* capPTMappedASID    */
259            (word_t) lvl1pt_pptr,  /* capPTBasePtr       */
260            1,                     /* capPTIsMapped      */
261            (word_t) lvl1pt_pptr   /* capPTMappedAddress */
262        );
263
264    seL4_SlotPos slot_pos_before = ndks_boot.slot_pos_cur;
265    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), lvl1pt_cap);
266
267    /* create all n level PT objs and caps necessary to cover userland image in 4KiB pages */
268
269    for (int i = 2; i <= CONFIG_PT_LEVELS; i++) {
270
271        for (pt_vptr = ROUND_DOWN(it_v_reg.start, RISCV_GET_LVL_PGSIZE_BITS(i - 1));
272                pt_vptr < it_v_reg.end;
273                pt_vptr += RISCV_GET_LVL_PGSIZE(i - 1)) {
274            pt_pptr = alloc_region(PT_SIZE_BITS);
275
276            if (!pt_pptr) {
277                return cap_null_cap_new();
278            }
279
280            memzero(PTE_PTR(pt_pptr), 1 << PT_SIZE_BITS);
281            if (!provide_cap(root_cnode_cap,
282                             create_it_pt_cap(lvl1pt_cap, pt_pptr, pt_vptr, IT_ASID))
283               ) {
284                return cap_null_cap_new();
285            }
286        }
287
288    }
289
290    seL4_SlotPos slot_pos_after = ndks_boot.slot_pos_cur;
291    ndks_boot.bi_frame->userImagePaging = (seL4_SlotRegion) {
292        slot_pos_before, slot_pos_after
293    };
294
295    return lvl1pt_cap;
296}
297
298BOOT_CODE void
299activate_kernel_vspace(void)
300{
301    setVSpaceRoot(kpptr_to_paddr(&kernel_root_pageTable), 0);
302}
303
304BOOT_CODE void
305write_it_asid_pool(cap_t it_ap_cap, cap_t it_lvl1pt_cap)
306{
307    asid_pool_t* ap = ASID_POOL_PTR(pptr_of_cap(it_ap_cap));
308    ap->array[IT_ASID] = PTE_PTR(pptr_of_cap(it_lvl1pt_cap));
309    riscvKSASIDTable[IT_ASID >> asidLowBits] = ap;
310}
311
312/* ==================== BOOT CODE FINISHES HERE ==================== */
313
314static findVSpaceForASID_ret_t findVSpaceForASID(asid_t asid)
315{
316    findVSpaceForASID_ret_t ret;
317    asid_pool_t*        poolPtr;
318    pte_t*     vspace_root;
319
320    poolPtr = riscvKSASIDTable[asid >> asidLowBits];
321    if (!poolPtr) {
322        current_lookup_fault = lookup_fault_invalid_root_new();
323
324        ret.vspace_root = NULL;
325        ret.status = EXCEPTION_LOOKUP_FAULT;
326        return ret;
327    }
328
329    vspace_root = poolPtr->array[asid & MASK(asidLowBits)];
330    if (!vspace_root) {
331        current_lookup_fault = lookup_fault_invalid_root_new();
332
333        ret.vspace_root = NULL;
334        ret.status = EXCEPTION_LOOKUP_FAULT;
335        return ret;
336    }
337
338    ret.vspace_root = vspace_root;
339    ret.status = EXCEPTION_NONE;
340    return ret;
341}
342
343void
344copyGlobalMappings(pte_t *newLvl1pt)
345{
346    unsigned int i;
347    pte_t *global_kernel_vspace = kernel_root_pageTable;
348
349    for (i = RISCV_GET_PT_INDEX(PPTR_BASE, 1); i < BIT(PT_INDEX_BITS); i++) {
350        newLvl1pt[i] = global_kernel_vspace[i];
351    }
352}
353
354word_t * PURE
355lookupIPCBuffer(bool_t isReceiver, tcb_t *thread)
356{
357    word_t w_bufferPtr;
358    cap_t bufferCap;
359    vm_rights_t vm_rights;
360
361    w_bufferPtr = thread->tcbIPCBuffer;
362    bufferCap = TCB_PTR_CTE_PTR(thread, tcbBuffer)->cap;
363
364    if (unlikely(cap_get_capType(bufferCap) != cap_frame_cap)) {
365        return NULL;
366    }
367    if (unlikely(cap_frame_cap_get_capFIsDevice(bufferCap))) {
368        return NULL;
369    }
370
371    vm_rights = cap_frame_cap_get_capFVMRights(bufferCap);
372    if (likely(vm_rights == VMReadWrite ||
373               (!isReceiver && vm_rights == VMReadOnly))) {
374        word_t basePtr;
375        unsigned int pageBits;
376
377        basePtr = cap_frame_cap_get_capFBasePtr(bufferCap);
378        pageBits = pageBitsForSize(cap_frame_cap_get_capFSize(bufferCap));
379        return (word_t *)(basePtr + (w_bufferPtr & MASK(pageBits)));
380    } else {
381        return NULL;
382    }
383}
384
385static inline pte_t *getPPtrFromHWPTE(pte_t *pte)
386{
387    return PTE_PTR(ptrFromPAddr(pte_ptr_get_ppn(pte) << seL4_PageTableBits));
388}
389
390lookupPTSlot_ret_t
391lookupPTSlot(pte_t *lvl1pt, vptr_t vptr)
392{
393    lookupPTSlot_ret_t ret;
394    /* this is how many bits we potentially have left to decode. Initially we have the
395     * full address space to decode, and every time we walk this will be reduced. The
396     * final value of this after the walk is the size of the frame that can be inserted,
397     * or already exists, in ret.ptSlot */
398    ret.ptBitsLeft = PT_INDEX_BITS * CONFIG_PT_LEVELS + seL4_PageBits;
399    ret.ptSlot = NULL;
400
401    pte_t* pt = lvl1pt;
402    do {
403        ret.ptBitsLeft -= PT_INDEX_BITS;
404        word_t index = (vptr >> ret.ptBitsLeft) & MASK(PT_INDEX_BITS);
405        ret.ptSlot = pt + index;
406        pt = getPPtrFromHWPTE(ret.ptSlot);
407        /* stop when we find something that isn't a page table - either a mapped frame or
408         * an empty slot */
409    } while (isPTEPageTable(ret.ptSlot));
410
411    return ret;
412}
413
414exception_t
415handleVMFault(tcb_t *thread, vm_fault_type_t vm_faultType)
416{
417    uint64_t addr;
418
419    addr = read_sbadaddr();
420
421    switch (vm_faultType) {
422    case RISCVLoadPageFault:
423    case RISCVLoadAccessFault:
424        current_fault = seL4_Fault_VMFault_new(addr, RISCVLoadAccessFault, false);
425        return EXCEPTION_FAULT;
426    case RISCVStorePageFault:
427    case RISCVStoreAccessFault:
428        current_fault = seL4_Fault_VMFault_new(addr, RISCVStoreAccessFault, false);
429        return EXCEPTION_FAULT;
430    case RISCVInstructionPageFault:
431    case RISCVInstructionAccessFault:
432        setRegister(thread, NEXTPC, getRegister(thread, SEPC));
433        current_fault = seL4_Fault_VMFault_new(addr, RISCVInstructionAccessFault, true);
434        return EXCEPTION_FAULT;
435
436    default:
437        fail("Invalid VM fault type");
438    }
439}
440
441void deleteASIDPool(asid_t asid_base, asid_pool_t* pool)
442{
443    /* Haskell error: "ASID pool's base must be aligned" */
444    assert(IS_ALIGNED(asid_base, asidLowBits));
445
446    if (riscvKSASIDTable[asid_base >> asidLowBits] == pool) {
447        riscvKSASIDTable[asid_base >> asidLowBits] = NULL;
448        setVMRoot(NODE_STATE(ksCurThread));
449    }
450}
451
452static exception_t performASIDControlInvocation(void* frame, cte_t* slot, cte_t* parent, asid_t asid_base)
453{
454    cap_untyped_cap_ptr_set_capFreeIndex(&(parent->cap),
455                                         MAX_FREE_INDEX(cap_untyped_cap_get_capBlockSize(parent->cap)));
456
457    memzero(frame, 1 << pageBitsForSize(RISCV_4K_Page));
458    cteInsert(
459        cap_asid_pool_cap_new(
460            asid_base,          /* capASIDBase  */
461            WORD_REF(frame)     /* capASIDPool  */
462        ),
463        parent,
464        slot
465    );
466    /* Haskell error: "ASID pool's base must be aligned" */
467    assert((asid_base & MASK(asidLowBits)) == 0);
468    riscvKSASIDTable[asid_base >> asidLowBits] = (asid_pool_t*)frame;
469
470    return EXCEPTION_NONE;
471}
472
473static exception_t performASIDPoolInvocation(asid_t asid, asid_pool_t* poolPtr, cte_t* vspaceCapSlot)
474{
475    pte_t *regionBase = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(vspaceCapSlot->cap));
476    cap_t cap = vspaceCapSlot->cap;
477    cap = cap_page_table_cap_set_capPTMappedASID(cap, asid);
478    cap = cap_page_table_cap_set_capPTIsMapped(cap, 1);
479    vspaceCapSlot->cap = cap;
480
481    copyGlobalMappings(regionBase);
482
483    poolPtr->array[asid & MASK(asidLowBits)] = regionBase;
484
485    return EXCEPTION_NONE;
486}
487
488void deleteASID(asid_t asid, pte_t *vspace)
489{
490    asid_pool_t* poolPtr;
491
492    poolPtr = riscvKSASIDTable[asid >> asidLowBits];
493    if (poolPtr != NULL && poolPtr->array[asid & MASK(asidLowBits)] == vspace) {
494        hwASIDFlush(asid);
495        poolPtr->array[asid & MASK(asidLowBits)] = NULL;
496        setVMRoot(NODE_STATE(ksCurThread));
497    }
498}
499
500void
501unmapPageTable(asid_t asid, vptr_t vptr, pte_t* target_pt)
502{
503    findVSpaceForASID_ret_t find_ret = findVSpaceForASID(asid);
504    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
505        /* nothing to do */
506        return;
507    }
508
509    pte_t *ptSlot = find_ret.vspace_root + RISCV_GET_PT_INDEX(vptr, 1);
510    pte_t *pt = find_ret.vspace_root;
511
512    for (int i = 2; i <= CONFIG_PT_LEVELS; i++) {
513        if (unlikely(!isPTEPageTable(ptSlot))) {
514            /* couldn't find it */
515            return;
516        }
517        pt = getPPtrFromHWPTE(ptSlot);
518        if (pt == target_pt) {
519            /* Found the PT Slot */
520            ptSlot = pt + RISCV_GET_PT_INDEX(vptr, i - 1);
521            break;
522        }
523        ptSlot = pt + RISCV_GET_PT_INDEX(vptr, i);
524    }
525
526    if (pt != target_pt) {
527        /* didn't find it */
528        return;
529    }
530
531    *ptSlot = pte_new(
532                  0,  /* phy_address */
533                  0,  /* sw */
534                  0,  /* dirty */
535                  0,  /* accessed */
536                  0,  /* global */
537                  0,  /* user */
538                  0,  /* execute */
539                  0,  /* write */
540                  0,  /* read */
541                  0  /* valid */
542              );
543    sfence();
544}
545
546static pte_t pte_pte_invalid_new(void)
547{
548    return (pte_t) {
549        0
550    };
551}
552
553void
554unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, pptr_t pptr)
555{
556    findVSpaceForASID_ret_t find_ret;
557    lookupPTSlot_ret_t  lu_ret;
558
559    find_ret = findVSpaceForASID(asid);
560    if (find_ret.status != EXCEPTION_NONE) {
561        return;
562    }
563
564    lu_ret = lookupPTSlot(find_ret.vspace_root, vptr);
565    if (unlikely(lu_ret.ptBitsLeft != pageBitsForSize(page_size))) {
566        return;
567    }
568    if (!pte_ptr_get_valid(lu_ret.ptSlot) || isPTEPageTable(lu_ret.ptSlot)
569            || (pte_ptr_get_ppn(lu_ret.ptSlot) << seL4_PageBits) != pptr_to_paddr((void*)pptr)) {
570        return;
571    }
572
573    lu_ret.ptSlot[0] = pte_pte_invalid_new();
574    sfence();
575}
576
577void
578setVMRoot(tcb_t *tcb)
579{
580    cap_t threadRoot;
581    asid_t asid;
582    pte_t *lvl1pt;
583    findVSpaceForASID_ret_t  find_ret;
584
585    threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap;
586
587    if (cap_get_capType(threadRoot) != cap_page_table_cap) {
588        setVSpaceRoot(kpptr_to_paddr(&kernel_root_pageTable), 0);
589        return;
590    }
591
592    lvl1pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(threadRoot));
593
594    asid = cap_page_table_cap_get_capPTMappedASID(threadRoot);
595    find_ret = findVSpaceForASID(asid);
596    if (unlikely(find_ret.status != EXCEPTION_NONE || find_ret.vspace_root != lvl1pt)) {
597        setVSpaceRoot(kpptr_to_paddr(&kernel_root_pageTable), 0);
598        return;
599    }
600
601    setVSpaceRoot(addrFromPPtr(lvl1pt), asid);
602}
603
604bool_t CONST
605isValidVTableRoot(cap_t cap)
606{
607    return (cap_get_capType(cap) == cap_page_table_cap &&
608            cap_page_table_cap_get_capPTMappedASID(cap) != asidInvalid);
609}
610
611exception_t
612checkValidIPCBuffer(vptr_t vptr, cap_t cap)
613{
614    if (unlikely(cap_get_capType(cap) != cap_frame_cap)) {
615        userError("Requested IPC Buffer is not a frame cap.");
616        current_syscall_error.type = seL4_IllegalOperation;
617        return EXCEPTION_SYSCALL_ERROR;
618    }
619
620    if (unlikely(cap_frame_cap_get_capFIsDevice(cap))) {
621        userError("Specifying a device frame as an IPC buffer is not permitted.");
622        current_syscall_error.type = seL4_IllegalOperation;
623        return EXCEPTION_SYSCALL_ERROR;
624    }
625
626    if (unlikely(!IS_ALIGNED(vptr, seL4_IPCBufferSizeBits))) {
627        userError("Requested IPC Buffer location 0x%x is not aligned.",
628                  (int)vptr);
629        current_syscall_error.type = seL4_AlignmentError;
630        return EXCEPTION_SYSCALL_ERROR;
631    }
632
633    return EXCEPTION_NONE;
634}
635
636vm_rights_t CONST
637maskVMRights(vm_rights_t vm_rights, seL4_CapRights_t cap_rights_mask)
638{
639    if (vm_rights == VMReadOnly &&
640            seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
641        return VMReadOnly;
642    }
643    if (vm_rights == VMReadWrite &&
644            (seL4_CapRights_get_capAllowRead(cap_rights_mask) || seL4_CapRights_get_capAllowWrite(cap_rights_mask))) {
645        if (!seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
646            return VMReadOnly;
647        } else if (!seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
648            return VMWriteOnly;
649        } else {
650            return VMReadWrite;
651        }
652    }
653    if (vm_rights == VMWriteOnly &&
654            seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
655        return VMWriteOnly;
656    }
657    if (vm_rights == VMKernelOnly) {
658        return VMKernelOnly;
659    }
660    return VMKernelOnly;
661}
662
663/* The rest of the file implements the RISCV object invocations */
664
665static pte_t CONST
666makeUserPTE(paddr_t paddr, bool_t executable, vm_rights_t vm_rights)
667{
668    return pte_new(
669               paddr >> seL4_PageBits,
670               0, /* sw */
671               1, /* dirty */
672               1, /* accessed */
673               0, /* global */
674               RISCVGetUserFromVMRights(vm_rights),   /* user */
675               executable, /* execute */
676               RISCVGetWriteFromVMRights(vm_rights),  /* write */
677               RISCVGetReadFromVMRights(vm_rights), /* read */
678               1 /* valid */
679           );
680}
681
682static inline bool_t CONST
683checkVPAlignment(vm_page_size_t sz, word_t w)
684{
685    return (w & MASK(pageBitsForSize(sz))) == 0;
686}
687
688static exception_t
689decodeRISCVPageTableInvocation(word_t label, unsigned int length,
690                               cte_t *cte, cap_t cap, extra_caps_t extraCaps,
691                               word_t *buffer)
692{
693    if (label == RISCVPageTableUnmap) {
694        if (unlikely(!isFinalCapability(cte))) {
695            userError("RISCVPageTableUnmap: cannot unmap if more than once cap exists");
696            current_syscall_error.type = seL4_RevokeFirst;
697            return EXCEPTION_SYSCALL_ERROR;
698        }
699        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
700        return performPageTableInvocationUnmap (cap, cte);
701    }
702
703    if (unlikely((label != RISCVPageTableMap))) {
704        userError("RISCVPageTable: Illegal Operation");
705        current_syscall_error.type = seL4_IllegalOperation;
706        return EXCEPTION_SYSCALL_ERROR;
707    }
708
709    if (unlikely(length < 2 || extraCaps.excaprefs[0] == NULL)) {
710        userError("RISCVPageTable: truncated message");
711        current_syscall_error.type = seL4_TruncatedMessage;
712        return EXCEPTION_SYSCALL_ERROR;
713    }
714
715    if (unlikely(cap_page_table_cap_get_capPTMappedASID(cap) != asidInvalid)) {
716        userError("RISCVPageTable: PageTable is already mapped.");
717        current_syscall_error.type = seL4_InvalidCapability;
718        current_syscall_error.invalidCapNumber = 0;
719        return EXCEPTION_SYSCALL_ERROR;
720    }
721
722    word_t vaddr = getSyscallArg(0, buffer);
723    cap_t lvl1ptCap = extraCaps.excaprefs[0]->cap;
724
725    if (unlikely(cap_get_capType(lvl1ptCap) != cap_page_table_cap ||
726                 cap_page_table_cap_get_capPTIsMapped(lvl1ptCap) == asidInvalid)) {
727        userError("RISCVPageTableMap: Invalid top-level PageTable.");
728        current_syscall_error.type = seL4_InvalidCapability;
729        current_syscall_error.invalidCapNumber = 1;
730
731        return EXCEPTION_SYSCALL_ERROR;
732    }
733
734    pte_t *lvl1pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(lvl1ptCap));
735    asid_t asid = cap_page_table_cap_get_capPTMappedASID(lvl1ptCap);
736
737    if (unlikely(vaddr >= PPTR_USER_TOP)) {
738        userError("RISCVPageTableMap: Virtual address cannot be in kernel window.");
739        current_syscall_error.type = seL4_InvalidArgument;
740        current_syscall_error.invalidArgumentNumber = 0;
741
742        return EXCEPTION_SYSCALL_ERROR;
743    }
744
745    findVSpaceForASID_ret_t find_ret = findVSpaceForASID(asid);
746    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
747        userError("RISCVPageTableMap: ASID lookup failed");
748        current_syscall_error.type = seL4_FailedLookup;
749        current_syscall_error.failedLookupWasSource = false;
750        return EXCEPTION_SYSCALL_ERROR;
751    }
752
753    if (unlikely(find_ret.vspace_root != lvl1pt)) {
754        userError("RISCVPageTableMap: ASID lookup failed");
755        current_syscall_error.type = seL4_InvalidCapability;
756        current_syscall_error.invalidCapNumber = 1;
757        return EXCEPTION_SYSCALL_ERROR;
758    }
759
760    lookupPTSlot_ret_t lu_ret = lookupPTSlot(lvl1pt, vaddr);
761
762    /* if there is already something mapped (valid is set) or we have traversed far enough
763     * that a page table is not valid to map then tell the user that they ahve to delete
764     * something before they can put a PT here */
765    if (lu_ret.ptBitsLeft == seL4_PageBits || pte_ptr_get_valid(lu_ret.ptSlot)) {
766        userError("RISCVPageTableMap: All objects mapped at this address");
767        current_syscall_error.type = seL4_DeleteFirst;
768        return EXCEPTION_SYSCALL_ERROR;
769    }
770
771    /* Get the slot to install the PT in */
772    pte_t *ptSlot = lu_ret.ptSlot;
773
774    paddr_t paddr = addrFromPPtr(
775                        PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap)));
776    pte_t pte = pte_new((paddr >> seL4_PageBits),
777                        0, /* sw */
778                        1, /* dirty */
779                        1, /* accessed */
780                        0,  /* global */
781                        0,  /* user */
782                        0,  /* execute */
783                        0,  /* write */
784                        0,  /* read */
785                        1 /* valid */
786                       );
787
788    cap = cap_page_table_cap_set_capPTIsMapped(cap, 1);
789    cap = cap_page_table_cap_set_capPTMappedASID(cap, asid);
790    cap = cap_page_table_cap_set_capPTMappedAddress(cap, vaddr);
791
792    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
793    return performPageTableInvocationMap(cap, cte, pte, ptSlot);
794}
795
796static exception_t
797decodeRISCVFrameInvocation(word_t label, unsigned int length,
798                           cte_t *cte, cap_t cap, extra_caps_t extraCaps,
799                           word_t *buffer)
800{
801    switch (label) {
802    case RISCVPageMap: {
803        if (unlikely(length < 3 || extraCaps.excaprefs[0] == NULL)) {
804            userError("RISCVPageMap: Truncated message.");
805            current_syscall_error.type = seL4_TruncatedMessage;
806            return EXCEPTION_SYSCALL_ERROR;
807        }
808
809        word_t vaddr = getSyscallArg(0, buffer);
810        word_t w_rightsMask = getSyscallArg(1, buffer);
811        vm_attributes_t attr = vmAttributesFromWord(getSyscallArg(2, buffer));
812        cap_t lvl1ptCap = extraCaps.excaprefs[0]->cap;
813
814        vm_page_size_t frameSize = cap_frame_cap_get_capFSize(cap);
815        vm_rights_t capVMRights = cap_frame_cap_get_capFVMRights(cap);
816
817        /* check the frame isn't already mapped */
818        if (unlikely(cap_frame_cap_get_capFMappedASID(cap)) != asidInvalid) {
819            userError("RISCVPageMap: frame already mapped");
820            current_syscall_error.type = seL4_InvalidCapability;
821            current_syscall_error.invalidCapNumber = 0;
822            return EXCEPTION_SYSCALL_ERROR;
823        }
824
825        if (unlikely(cap_get_capType(lvl1ptCap) != cap_page_table_cap ||
826                     cap_page_table_cap_get_capPTMappedASID(lvl1ptCap) == asidInvalid)) {
827            userError("RISCVPageMap: Bad PageTable cap.");
828            current_syscall_error.type = seL4_InvalidCapability;
829            current_syscall_error.invalidCapNumber = 1;
830            return EXCEPTION_SYSCALL_ERROR;
831        }
832
833        pte_t *lvl1pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(lvl1ptCap));
834        asid_t asid = cap_page_table_cap_get_capPTMappedASID(lvl1ptCap);
835
836        findVSpaceForASID_ret_t find_ret = findVSpaceForASID(asid);
837        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
838            userError("RISCVPageMap: No PageTable for ASID");
839            current_syscall_error.type = seL4_FailedLookup;
840            current_syscall_error.failedLookupWasSource = false;
841            return EXCEPTION_SYSCALL_ERROR;
842        }
843
844        if (unlikely(find_ret.vspace_root != lvl1pt)) {
845            userError("RISCVPageMap: ASID lookup failed");
846            current_syscall_error.type = seL4_InvalidCapability;
847            current_syscall_error.invalidCapNumber = 1;
848            return EXCEPTION_SYSCALL_ERROR;
849        }
850
851        /* check the vaddr is valid */
852        word_t vtop = vaddr + BIT(pageBitsForSize(frameSize)) - 1;
853        if (unlikely(vtop >= PPTR_USER_TOP)) {
854            current_syscall_error.type = seL4_InvalidArgument;
855            current_syscall_error.invalidArgumentNumber = 0;
856            return EXCEPTION_SYSCALL_ERROR;
857        }
858        if (unlikely(!checkVPAlignment(frameSize, vaddr))) {
859            current_syscall_error.type = seL4_AlignmentError;
860            return EXCEPTION_SYSCALL_ERROR;
861        }
862
863        /* Check if this page is already mapped */
864        lookupPTSlot_ret_t lu_ret = lookupPTSlot(lvl1pt, vaddr);
865        if (unlikely(lu_ret.ptBitsLeft != pageBitsForSize(frameSize))) {
866            current_lookup_fault = lookup_fault_missing_capability_new(lu_ret.ptBitsLeft);
867            current_syscall_error.type = seL4_FailedLookup;
868            current_syscall_error.failedLookupWasSource = false;
869            return EXCEPTION_SYSCALL_ERROR;
870        }
871
872        /* check this vaddr isn't already mapped */
873        if (unlikely(pte_ptr_get_valid(lu_ret.ptSlot))) {
874            userError("Virtual address already mapped");
875            current_syscall_error.type = seL4_DeleteFirst;
876            return EXCEPTION_SYSCALL_ERROR;
877        }
878
879        vm_rights_t vmRights = maskVMRights(capVMRights, rightsFromWord(w_rightsMask));
880        paddr_t frame_paddr = addrFromPPtr((void *) cap_frame_cap_get_capFBasePtr(cap));
881        cap = cap_frame_cap_set_capFMappedASID(cap, asid);
882        cap = cap_frame_cap_set_capFMappedAddress(cap,  vaddr);
883
884        bool_t executable = !vm_attributes_get_riscvExecuteNever(attr);
885        pte_t pte = makeUserPTE(frame_paddr, executable, vmRights);
886        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
887        return performPageInvocationMapPTE(cap, cte, pte, lu_ret.ptSlot);
888    }
889
890    case RISCVPageRemap: {
891        if (unlikely(length < 2 || extraCaps.excaprefs[0] == NULL)) {
892            userError("RISCVPageRemap: Truncated message.");
893            current_syscall_error.type = seL4_TruncatedMessage;
894            return EXCEPTION_SYSCALL_ERROR;
895        }
896
897        word_t w_rightsMask = getSyscallArg(0, buffer);
898        vm_attributes_t attr = vmAttributesFromWord(getSyscallArg(1, buffer));
899        cap_t lvl1ptCap = extraCaps.excaprefs[0]->cap;
900        vm_page_size_t frameSize = cap_frame_cap_get_capFSize(cap);
901        vm_rights_t capVMRights = cap_frame_cap_get_capFVMRights(cap);
902
903        if (unlikely(cap_get_capType(lvl1ptCap) != cap_page_table_cap ||
904                     cap_page_table_cap_get_capPTMappedASID(lvl1ptCap) == asidInvalid)) {
905            userError("RISCVPageRemap: Bad PageTable cap.");
906            current_syscall_error.type = seL4_InvalidCapability;
907            current_syscall_error.invalidCapNumber = 1;
908            return EXCEPTION_SYSCALL_ERROR;
909        }
910
911        if (unlikely(cap_frame_cap_get_capFMappedASID(cap)) == asidInvalid) {
912            userError("RISCVPageRemap: frame is not mapped");
913            current_syscall_error.type = seL4_InvalidCapability;
914            current_syscall_error.invalidCapNumber = 0;
915            return EXCEPTION_SYSCALL_ERROR;
916        }
917
918        asid_t mappedASID = cap_frame_cap_get_capFMappedASID(cap);
919        findVSpaceForASID_ret_t find_ret = findVSpaceForASID(mappedASID);
920        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
921            userError("RISCVPageRemap: No PageTable for ASID");
922            current_syscall_error.type = seL4_FailedLookup;
923            current_syscall_error.failedLookupWasSource = false;
924            return EXCEPTION_SYSCALL_ERROR;
925        }
926
927        pte_t *lvl1pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(lvl1ptCap));
928        if (unlikely(find_ret.vspace_root != lvl1pt ||
929                     cap_page_table_cap_get_capPTMappedASID(lvl1ptCap) != mappedASID)) {
930            userError("RISCVPageRemap: ASID lookup failed");
931            current_syscall_error.type = seL4_InvalidCapability;
932            current_syscall_error.invalidCapNumber = 1;
933            return EXCEPTION_SYSCALL_ERROR;
934        }
935
936        word_t vaddr = cap_frame_cap_get_capFMappedAddress(cap);
937        if (unlikely(!checkVPAlignment(frameSize, vaddr))) {
938            current_syscall_error.type = seL4_AlignmentError;
939            return EXCEPTION_SYSCALL_ERROR;
940        }
941
942        /* Check if this page is already mapped */
943        lookupPTSlot_ret_t lu_ret = lookupPTSlot(lvl1pt, vaddr);
944
945        if (unlikely(lu_ret.ptBitsLeft != pageBitsForSize(frameSize))) {
946            userError("RISCVPageRemap: No PageTable for this page %p", (void*)vaddr);
947            current_lookup_fault = lookup_fault_missing_capability_new(lu_ret.ptBitsLeft);
948            current_syscall_error.type = seL4_FailedLookup;
949            current_syscall_error.failedLookupWasSource = false;
950            return EXCEPTION_SYSCALL_ERROR;
951        }
952
953        if (unlikely(isPTEPageTable(lu_ret.ptSlot))) {
954            userError("RISCVPageRemap: no mapping to remap.");
955            current_syscall_error.type = seL4_InvalidCapability;
956            current_syscall_error.invalidCapNumber = 0;
957            return EXCEPTION_SYSCALL_ERROR;
958        }
959
960        vm_rights_t vmRights = maskVMRights(capVMRights, rightsFromWord(w_rightsMask));
961        paddr_t frame_paddr = addrFromPPtr((void *) cap_frame_cap_get_capFBasePtr(cap));
962        bool_t executable = !vm_attributes_get_riscvExecuteNever(attr);
963        pte_t pte = makeUserPTE(frame_paddr, executable, vmRights);
964        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
965        return performPageInvocationRemapPTE(pte, lu_ret.ptSlot);
966    }
967    case RISCVPageUnmap: {
968        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
969        return performPageInvocationUnmap(cap, cte);
970    }
971
972    case RISCVPageGetAddress: {
973
974        /* Check that there are enough message registers */
975        assert(n_msgRegisters >= 1);
976
977        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
978        return performPageGetAddress((void*)cap_frame_cap_get_capFBasePtr(cap));
979    }
980
981    default:
982        userError("RISCVPage: Illegal operation.");
983        current_syscall_error.type = seL4_IllegalOperation;
984
985        return EXCEPTION_SYSCALL_ERROR;
986    }
987
988}
989
990exception_t
991decodeRISCVMMUInvocation(word_t label, unsigned int length, cptr_t cptr,
992                         cte_t *cte, cap_t cap, extra_caps_t extraCaps,
993                         word_t *buffer)
994{
995    switch (cap_get_capType(cap)) {
996
997    case cap_page_table_cap:
998        return decodeRISCVPageTableInvocation(label, length, cte, cap, extraCaps, buffer);
999
1000    case cap_frame_cap:
1001        return decodeRISCVFrameInvocation(label, length, cte, cap, extraCaps, buffer);
1002
1003    case cap_asid_control_cap: {
1004        word_t     i;
1005        asid_t           asid_base;
1006        word_t           index;
1007        word_t           depth;
1008        cap_t            untyped;
1009        cap_t            root;
1010        cte_t*           parentSlot;
1011        cte_t*           destSlot;
1012        lookupSlot_ret_t lu_ret;
1013        void*            frame;
1014        exception_t      status;
1015
1016        if (label != RISCVASIDControlMakePool) {
1017            current_syscall_error.type = seL4_IllegalOperation;
1018
1019            return EXCEPTION_SYSCALL_ERROR;
1020        }
1021
1022        if (length < 2 || extraCaps.excaprefs[0] == NULL
1023                || extraCaps.excaprefs[1] == NULL) {
1024            current_syscall_error.type = seL4_TruncatedMessage;
1025            return EXCEPTION_SYSCALL_ERROR;
1026        }
1027
1028        index = getSyscallArg(0, buffer);
1029        depth = getSyscallArg(1, buffer);
1030        parentSlot = extraCaps.excaprefs[0];
1031        untyped = parentSlot->cap;
1032        root = extraCaps.excaprefs[1]->cap;
1033
1034        /* Find first free pool */
1035        for (i = 0; i < nASIDPools && riscvKSASIDTable[i]; i++);
1036
1037        if (i == nASIDPools) {
1038            /* no unallocated pool is found */
1039            current_syscall_error.type = seL4_DeleteFirst;
1040
1041            return EXCEPTION_SYSCALL_ERROR;
1042        }
1043
1044        asid_base = i << asidLowBits;
1045
1046        if (cap_get_capType(untyped) != cap_untyped_cap ||
1047                cap_untyped_cap_get_capBlockSize(untyped) != seL4_ASIDPoolBits ||
1048                cap_untyped_cap_get_capIsDevice(untyped)) {
1049            current_syscall_error.type = seL4_InvalidCapability;
1050            current_syscall_error.invalidCapNumber = 1;
1051
1052            return EXCEPTION_SYSCALL_ERROR;
1053        }
1054
1055        status = ensureNoChildren(parentSlot);
1056        if (status != EXCEPTION_NONE) {
1057            return status;
1058        }
1059
1060        frame = WORD_PTR(cap_untyped_cap_get_capPtr(untyped));
1061
1062        lu_ret = lookupTargetSlot(root, index, depth);
1063        if (lu_ret.status != EXCEPTION_NONE) {
1064            return lu_ret.status;
1065        }
1066        destSlot = lu_ret.slot;
1067
1068        status = ensureEmptySlot(destSlot);
1069        if (status != EXCEPTION_NONE) {
1070            return status;
1071        }
1072
1073        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1074        return performASIDControlInvocation(frame, destSlot, parentSlot, asid_base);
1075    }
1076
1077    case cap_asid_pool_cap: {
1078        cap_t        vspaceCap;
1079        cte_t*       vspaceCapSlot;
1080        asid_pool_t* pool;
1081        word_t i;
1082        asid_t       asid;
1083
1084        if (label != RISCVASIDPoolAssign) {
1085            current_syscall_error.type = seL4_IllegalOperation;
1086
1087            return EXCEPTION_SYSCALL_ERROR;
1088        }
1089        if (extraCaps.excaprefs[0] == NULL) {
1090            current_syscall_error.type = seL4_TruncatedMessage;
1091
1092            return EXCEPTION_SYSCALL_ERROR;
1093        }
1094
1095        vspaceCapSlot = extraCaps.excaprefs[0];
1096        vspaceCap = vspaceCapSlot->cap;
1097
1098        if (cap_page_table_cap_get_capPTMappedASID(vspaceCap) != asidInvalid) {
1099            userError("RISCVASIDPool: Invalid vspace root.");
1100            current_syscall_error.type = seL4_InvalidCapability;
1101            current_syscall_error.invalidCapNumber = 1;
1102
1103            return EXCEPTION_SYSCALL_ERROR;
1104        }
1105
1106        pool = riscvKSASIDTable[cap_asid_pool_cap_get_capASIDBase(cap) >> asidLowBits];
1107        if (!pool) {
1108            current_syscall_error.type = seL4_FailedLookup;
1109            current_syscall_error.failedLookupWasSource = false;
1110            current_lookup_fault = lookup_fault_invalid_root_new();
1111            return EXCEPTION_SYSCALL_ERROR;
1112        }
1113
1114        if (pool != ASID_POOL_PTR(cap_asid_pool_cap_get_capASIDPool(cap))) {
1115            current_syscall_error.type = seL4_InvalidCapability;
1116            current_syscall_error.invalidCapNumber = 0;
1117            return EXCEPTION_SYSCALL_ERROR;
1118        }
1119
1120        /* Find first free ASID */
1121        asid = cap_asid_pool_cap_get_capASIDBase(cap);
1122        for (i = 0; i < BIT(asidLowBits) && (asid + i == 0 || pool->array[i]); i++);
1123
1124        if (i == BIT(asidLowBits)) {
1125            current_syscall_error.type = seL4_DeleteFirst;
1126
1127            return EXCEPTION_SYSCALL_ERROR;
1128        }
1129
1130        asid += i;
1131
1132        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1133        return performASIDPoolInvocation(asid, pool, vspaceCapSlot);
1134    }
1135    default:
1136        fail("Invalid arch cap type");
1137    }
1138}
1139
1140exception_t
1141performPageTableInvocationMap(cap_t cap, cte_t *ctSlot,
1142                              pte_t pte, pte_t *ptSlot)
1143{
1144    ctSlot->cap = cap;
1145    *ptSlot = pte;
1146    sfence();
1147
1148    return EXCEPTION_NONE;
1149}
1150
1151exception_t
1152performPageTableInvocationUnmap(cap_t cap, cte_t *ctSlot)
1153{
1154    if (cap_page_table_cap_get_capPTIsMapped(cap)) {
1155        pte_t *pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap));
1156        unmapPageTable(
1157            cap_page_table_cap_get_capPTMappedASID(cap),
1158            cap_page_table_cap_get_capPTMappedAddress(cap),
1159            pt
1160        );
1161        clearMemory((void *)pt, seL4_PageBits);
1162    }
1163    cap_page_table_cap_ptr_set_capPTIsMapped(&(ctSlot->cap), 0);
1164
1165    return EXCEPTION_NONE;
1166}
1167
1168static exception_t
1169performPageGetAddress(void *vbase_ptr)
1170{
1171    paddr_t capFBasePtr;
1172
1173    /* Get the physical address of this frame. */
1174    capFBasePtr = addrFromPPtr(vbase_ptr);
1175
1176    /* return it in the first message register */
1177    setRegister(NODE_STATE(ksCurThread), msgRegisters[0], capFBasePtr);
1178    setRegister(NODE_STATE(ksCurThread), msgInfoRegister,
1179                wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, 1)));
1180
1181    return EXCEPTION_NONE;
1182}
1183
1184static exception_t updatePTE(pte_t pte, pte_t *base)
1185{
1186    *base = pte;
1187    sfence();
1188    return EXCEPTION_NONE;
1189}
1190
1191exception_t performPageInvocationMapPTE(cap_t cap, cte_t *ctSlot,
1192                                        pte_t pte, pte_t *base)
1193{
1194    ctSlot->cap = cap;
1195    return updatePTE(pte, base);
1196}
1197
1198exception_t
1199performPageInvocationRemapPTE(pte_t pte, pte_t *base)
1200{
1201    return updatePTE(pte, base);
1202}
1203
1204exception_t
1205performPageInvocationUnmap(cap_t cap, cte_t *ctSlot)
1206{
1207
1208    if (cap_frame_cap_get_capFMappedASID(cap) != asidInvalid) {
1209        unmapPage(cap_frame_cap_get_capFSize(cap),
1210                  cap_frame_cap_get_capFMappedASID(cap),
1211                  cap_frame_cap_get_capFMappedAddress(cap),
1212                  cap_frame_cap_get_capFBasePtr(cap)
1213                 );
1214    }
1215    ctSlot->cap = cap_frame_cap_set_capFMappedAddress(ctSlot->cap, 0);
1216    ctSlot->cap = cap_frame_cap_set_capFMappedASID(ctSlot->cap, asidInvalid);
1217    return EXCEPTION_NONE;
1218}
1219
1220#ifdef CONFIG_PRINTING
1221void
1222Arch_userStackTrace(tcb_t *tptr)
1223{
1224    cap_t threadRoot = TCB_PTR_CTE_PTR(tptr, tcbVTable)->cap;
1225    if (!isValidVTableRoot(threadRoot)) {
1226        printf("Invalid vspace\n");
1227        return;
1228    }
1229
1230    word_t sp = getRegister(tptr, SP);
1231    if (!IS_ALIGNED(sp, seL4_WordSizeBits)) {
1232        printf("SP %p not aligned", (void *) sp);
1233        return;
1234    }
1235
1236    pte_t *vspace_root = PTE_PTR(pptr_of_cap(threadRoot));
1237    for (int i = 0; i < CONFIG_USER_STACK_TRACE_LENGTH; i++) {
1238        word_t address = sp + (i * sizeof(word_t));
1239        lookupPTSlot_ret_t ret = lookupPTSlot(vspace_root, address);
1240        if (pte_ptr_get_valid(ret.ptSlot) && !isPTEPageTable(ret.ptSlot)) {
1241            pptr_t pptr = (pptr_t) (getPPtrFromHWPTE(ret.ptSlot));
1242            word_t *value = (word_t*) ((word_t)pptr + (address & MASK(ret.ptBitsLeft)));
1243            printf("0x%lx: 0x%lx\n", (long) address, (long) *value);
1244        } else {
1245            printf("0x%lx: INVALID\n", (long) address);
1246        }
1247    }
1248}
1249#endif
1250