1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
8 * See "LICENSE_GPLv2.txt" for details.
9 *
10 * @TAG(DATA61_GPL)
11 */
12
13#include <config.h>
14#include <types.h>
15#include <benchmark/benchmark.h>
16#include <api/failures.h>
17#include <api/syscall.h>
18#include <kernel/boot.h>
19#include <kernel/cspace.h>
20#include <kernel/thread.h>
21#include <kernel/stack.h>
22#include <machine/io.h>
23#include <machine/debug.h>
24#include <model/statedata.h>
25#include <object/cnode.h>
26#include <object/untyped.h>
27#include <arch/api/invocation.h>
28#include <arch/kernel/vspace.h>
29#include <linker.h>
30#include <plat/machine/devices.h>
31#include <plat/machine/hardware.h>
32#include <armv/context_switch.h>
33#include <arch/object/iospace.h>
34#include <arch/object/vcpu.h>
35#include <arch/machine/tlb.h>
36
37/* PGD slot reserved for storing the PGD's allocated hardware VMID.
38 * This is only necessary when running EL2 and when we only have
39 * 8-bit VMID. Note that this assumes that the IPA size for S2
40 * translation does not use full 48-bit.
41 */
42#define PGD_VMID_SLOT   511
43#define VMID_MASK       0xff
44#define PGDE_VMID_SHIFT 12
45
46
47/*
48 * Memory types are defined in Memory Attribute Indirection Register.
49 *  - nGnRnE Device non-Gathering, non-Reordering, No Early write acknowledgement
50 *  - nGnRE Unused Device non-Gathering, non-Reordering, Early write acknowledgement
51 *  - GRE Unused Device Gathering, Reordering, Early write acknowledgement
52 *  - NORMAL_NC Normal Memory, Inner/Outer non-cacheable
53 *  - NORMAL Normal Memory, Inner/Outer Write-back non-transient, Write-allocate, Read-allocate
54 * Note: These should match with contents of MAIR_EL1 register!
55 */
56enum mair_types {
57    DEVICE_nGnRnE = 0,
58    DEVICE_nGnRE = 1,
59    DEVICE_GRE = 2,
60    NORMAL_NC = 3,
61    NORMAL = 4
62};
63
64/* Stage-2 translation memory attributes */
65enum mair_s2_types {
66    S2_DEVICE_nGnRnE = 0b0000,
67    S2_DEVICE_nGnRE = 0b0001,
68    S2_DEVICE_nGRE  = 0b0010,
69    S2_DEVICE_GRE = 0b0011,
70
71    S2_NORMAL_INNER_NC_OUTER_NC = 0b0101,
72    S2_NORMAL_INNER_WTC_OUTER_NC = 0b0110,
73    S2_NORMAL_INNER_WBC_OUTER_NC = 0b0111,
74
75    S2_NORMAL_INNER_NC_OUTER_WTC = 0b1001,
76    S2_NORMAL_INNER_WTC_OUTER_WTC = 0b1010,
77    S2_NORMAL_INNER_WBC_OUTER_WTC = 0b1011,
78
79    S2_NORMAL_INNER_NC_OUTER_WBC = 0b1101,
80    S2_NORMAL_INNER_WTC_OUTER_WBC = 0b1110,
81    S2_NORMAL_INNER_WBC_OUTER_WBC = 0b1111,
82
83    S2_NORMAL = S2_NORMAL_INNER_WBC_OUTER_WBC
84};
85
86/* Leif from Linaro said the big.LITTLE clusters should be treated as
87 * inner shareable, and we believe so, although the Example B2-1 given in
88 * ARM ARM DDI 0487B.b (ID092517) says otherwise.
89 */
90
91#define SMP_SHARE   3
92
93struct lookupPGDSlot_ret {
94    exception_t status;
95    pgde_t *pgdSlot;
96};
97typedef struct lookupPGDSlot_ret lookupPGDSlot_ret_t;
98
99struct lookupPUDSlot_ret {
100    exception_t status;
101    pude_t *pudSlot;
102};
103typedef struct lookupPUDSlot_ret lookupPUDSlot_ret_t;
104
105struct lookupPDSlot_ret {
106    exception_t status;
107    pde_t *pdSlot;
108};
109typedef struct lookupPDSlot_ret lookupPDSlot_ret_t;
110
111struct lookupPTSlot_ret {
112    exception_t status;
113    pte_t *ptSlot;
114};
115typedef struct lookupPTSlot_ret lookupPTSlot_ret_t;
116
117struct lookupFrame_ret {
118    paddr_t frameBase;
119    vm_page_size_t frameSize;
120    bool_t valid;
121};
122typedef struct lookupFrame_ret lookupFrame_ret_t;
123
124struct findVSpaceForASID_ret {
125    exception_t status;
126    vspace_root_t *vspace_root;
127};
128typedef struct findVSpaceForASID_ret findVSpaceForASID_ret_t;
129
130/* Stage-1 access permissions:
131 * AP[2:1]  higer EL        EL0
132 *   00       rw            None
133 *   01       rw            rw
134 *   10       r             None
135 *   11       r             r
136 *
137 * Stage-2 access permissions:
138 * S2AP    Access from Nonsecure EL1 or Non-secure EL0
139 *  00                      None
140 *  01                      r
141 *  10                      w
142 *  11                      rw
143 *
144 *  For VMs or native seL4 applications, if hypervisor support
145 *  is enabled, we use the S2AP. The kernel itself running in
146 *  EL2 still uses the Stage-1 AP format.
147 */
148
149static word_t CONST
150APFromVMRights(vm_rights_t vm_rights)
151{
152    switch (vm_rights) {
153    case VMKernelOnly:
154        if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
155            return 0;
156        } else {
157            return 0;
158        }
159
160    case VMReadWrite:
161        if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
162            return 3;
163        } else {
164            return 1;
165        }
166
167    case VMKernelReadOnly:
168        if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
169            /* no corresponding AP for S2AP, return None */
170            return 0;
171        } else {
172            return 2;
173        }
174
175    case VMReadOnly:
176        if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
177            return 1;
178        } else {
179            return 3;
180        }
181
182    default:
183        fail("Invalid VM rights");
184    }
185}
186
187vm_rights_t CONST
188maskVMRights(vm_rights_t vm_rights, seL4_CapRights_t cap_rights_mask)
189{
190    if (vm_rights == VMReadOnly &&
191            seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
192        return VMReadOnly;
193    }
194    if (vm_rights == VMReadWrite &&
195            seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
196        if (!seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
197            return VMReadOnly;
198        } else {
199            return VMReadWrite;
200        }
201    }
202    if (vm_rights == VMReadWrite &&
203            !seL4_CapRights_get_capAllowRead(cap_rights_mask) &&
204            seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
205        userError("Attempted to make unsupported write only mapping");
206    }
207    return VMKernelOnly;
208}
209
210/* ==================== BOOT CODE STARTS HERE ==================== */
211
212/* The 54th bit is defined as UXN (unprivileged execute-never) for stage 1
213 * of any tranlsation regime for which stage 1 translation can support
214 * two VA ranges. This field applies only to execution at EL0. A value
215 * of 0 indicates that this control permits execution.
216 *
217 * The 54th bit is defined as XN (execute-never) for stage 1 of any translation
218 * regime for which the stage 1 translation can support only a singe VA range or
219 * stage 2 translation when ARMVv8.2-TTS2UXN is not implemented.
220 * This field applies to execution at any exception level to which the stage of
221 * translation applies. A value of 0 indicates that this control permits execution.
222 *
223 * When the kernel is running in EL2, the stage-1 translation only supports one
224 * VA range so that the 54th bit is XN. Setting the bit to 0 allows execution.
225 *
226 */
227BOOT_CODE void
228map_kernel_frame(paddr_t paddr, pptr_t vaddr, vm_rights_t vm_rights, vm_attributes_t attributes)
229{
230    assert(vaddr >= PPTR_TOP);
231
232    if (vm_attributes_get_armPageCacheable(attributes)) {
233        armKSGlobalKernelPT[GET_PT_INDEX(vaddr)] = pte_new(
234#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
235                                                       0, // XN
236#else
237                                                       1, /* unprivileged execute never */
238#endif
239                                                       paddr,
240                                                       0,                          /* global */
241                                                       1,                          /* access flag */
242                                                       SMP_TERNARY(SMP_SHARE, 0),          /* Inner-shareable if SMP enabled, otherwise unshared */
243                                                       APFromVMRights(vm_rights),
244                                                       NORMAL,
245                                                       0b11                        /* reserved */
246                                                   );
247    } else {
248        armKSGlobalKernelPT[GET_PT_INDEX(vaddr)] = pte_new(
249#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
250                                                       0, // XN
251#else
252                                                       1, /* unprivileged execute never */
253#endif
254                                                       paddr,
255                                                       0,                          /* global */
256                                                       1,                          /* access flag */
257                                                       0,                          /* Ignored - Outter shareable */
258                                                       APFromVMRights(vm_rights),
259                                                       DEVICE_nGnRnE,
260                                                       0b11                        /* reserved */
261                                                   );
262    }
263}
264
265BOOT_CODE void
266map_kernel_window(void)
267{
268
269    paddr_t paddr;
270    pptr_t vaddr;
271    word_t idx;
272
273    /* verify that the kernel window as at the last entry of the PGD */
274    assert(GET_PGD_INDEX(kernelBase) == BIT(PGD_INDEX_BITS) - 1);
275    assert(IS_ALIGNED(kernelBase, seL4_LargePageBits));
276    /* verify that the kernel device window is 1gb aligned and 1gb in size */
277    assert(GET_PUD_INDEX(PPTR_TOP) == BIT(PUD_INDEX_BITS) - 1);
278    assert(IS_ALIGNED(PPTR_TOP, seL4_HugePageBits));
279
280    /* place the PUD into the PGD */
281    armKSGlobalKernelPGD[GET_PGD_INDEX(kernelBase)] = pgde_new(
282                                                          pptr_to_paddr(armKSGlobalKernelPUD),
283                                                          0b11  /* reserved */
284                                                      );
285
286    /* place all PDs except the last one in PUD */
287    for (idx = GET_PUD_INDEX(kernelBase); idx < GET_PUD_INDEX(PPTR_TOP); idx++) {
288        armKSGlobalKernelPUD[idx] = pude_pude_pd_new(
289                                        pptr_to_paddr(&armKSGlobalKernelPDs[idx][0])
290                                    );
291    }
292
293    /* map the kernel window using large pages */
294    vaddr = kernelBase;
295    for (paddr = physBase; paddr < PADDR_TOP; paddr += BIT(seL4_LargePageBits)) {
296        armKSGlobalKernelPDs[GET_PUD_INDEX(vaddr)][GET_PD_INDEX(vaddr)] = pde_pde_large_new(
297#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
298                                                                              0, // XN
299#else
300                                                                              1, // UXN
301#endif
302                                                                              paddr,
303                                                                              0,                        /* global */
304                                                                              1,                        /* access flag */
305                                                                              SMP_TERNARY(SMP_SHARE, 0),        /* Inner-shareable if SMP enabled, otherwise unshared */
306                                                                              0,                        /* VMKernelOnly */
307                                                                              NORMAL
308                                                                          );
309        vaddr += BIT(seL4_LargePageBits);
310    }
311
312    /* put the PD into the PUD for device window */
313    armKSGlobalKernelPUD[GET_PUD_INDEX(PPTR_TOP)] = pude_pude_pd_new(
314                                                        pptr_to_paddr(&armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS) - 1][0])
315                                                    );
316
317    /* put the PT into the PD for device window */
318    armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS) - 1][BIT(PD_INDEX_BITS) - 1] = pde_pde_small_new(
319                                                                                pptr_to_paddr(armKSGlobalKernelPT)
320                                                                            );
321
322    map_kernel_devices();
323}
324
325/* When the hypervisor support is enabled, the stage-2 translation table format
326 * is used for applications.
327 * The global bit is always 0.
328 * The memory attributes use the S2 translation values.
329 */
330static BOOT_CODE void
331map_it_frame_cap(cap_t vspace_cap, cap_t frame_cap, bool_t executable)
332{
333    pgde_t *pgd = PGD_PTR(pptr_of_cap(vspace_cap));
334    pude_t *pud;
335    pde_t *pd;
336    pte_t *pt;
337
338    vptr_t vptr = cap_frame_cap_get_capFMappedAddress(frame_cap);
339    void *pptr = (void*)cap_frame_cap_get_capFBasePtr(frame_cap);
340
341    assert(cap_frame_cap_get_capFMappedASID(frame_cap) != 0);
342
343    pgd += GET_PGD_INDEX(vptr);
344    assert(pgde_ptr_get_present(pgd));
345    pud = paddr_to_pptr(pgde_ptr_get_pud_base_address(pgd));
346    pud += GET_PUD_INDEX(vptr);
347    assert(pude_pude_pd_ptr_get_present(pud));
348    pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pud));
349    pd += GET_PD_INDEX(vptr);
350    assert(pde_pde_small_ptr_get_present(pd));
351    pt = paddr_to_pptr(pde_pde_small_ptr_get_pt_base_address(pd));
352    *(pt + GET_PT_INDEX(vptr)) = pte_new(
353                                     !executable,                    /* unprivileged execute never */
354                                     pptr_to_paddr(pptr),            /* page_base_address    */
355#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
356                                     0,
357#else
358                                     1,                              /* not global */
359#endif
360                                     1,                              /* access flag */
361                                     SMP_TERNARY(SMP_SHARE, 0),              /* Inner-shareable if SMP enabled, otherwise unshared */
362                                     APFromVMRights(VMReadWrite),
363#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
364                                     S2_NORMAL,
365#else
366                                     NORMAL,
367#endif
368                                     0b11                            /* reserved */
369                                 );
370}
371
372static BOOT_CODE cap_t
373create_it_frame_cap(pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large)
374{
375    vm_page_size_t frame_size;
376    if (use_large) {
377        frame_size = ARMLargePage;
378    } else {
379        frame_size = ARMSmallPage;
380    }
381    return
382        cap_frame_cap_new(
383            asid,                          /* capFMappedASID */
384            pptr,                          /* capFBasePtr */
385            frame_size,                    /* capFSize */
386            vptr,                          /* capFMappedAddress */
387            wordFromVMRights(VMReadWrite), /* capFVMRights */
388            false                          /* capFIsDevice */
389        );
390}
391
392static BOOT_CODE void
393map_it_pt_cap(cap_t vspace_cap, cap_t pt_cap)
394{
395    pgde_t *pgd = PGD_PTR(pptr_of_cap(vspace_cap));
396    pude_t *pud;
397    pde_t *pd;
398    pte_t *pt = PT_PTR(cap_page_table_cap_get_capPTBasePtr(pt_cap));
399    vptr_t vptr = cap_page_table_cap_get_capPTMappedAddress(pt_cap);
400
401    assert(cap_page_table_cap_get_capPTIsMapped(pt_cap));
402
403    pgd += GET_PGD_INDEX(vptr);
404    assert(pgde_ptr_get_present(pgd));
405    pud = paddr_to_pptr(pgde_ptr_get_pud_base_address(pgd));
406    pud += GET_PUD_INDEX(vptr);
407    assert(pude_pude_pd_ptr_get_present(pud));
408    pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pud));
409    *(pd + GET_PD_INDEX(vptr)) = pde_pde_small_new(
410                                     pptr_to_paddr(pt)
411                                 );
412}
413
414static BOOT_CODE cap_t
415create_it_pt_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
416{
417    cap_t cap;
418    cap = cap_page_table_cap_new(
419              asid,                   /* capPTMappedASID */
420              pptr,                   /* capPTBasePtr */
421              1,                      /* capPTIsMapped */
422              vptr                    /* capPTMappedAddress */
423          );
424    map_it_pt_cap(vspace_cap, cap);
425    return cap;
426}
427
428static BOOT_CODE void
429map_it_pd_cap(cap_t vspace_cap, cap_t pd_cap)
430{
431    pgde_t *pgd = PGD_PTR(pptr_of_cap(vspace_cap));
432    pude_t *pud;
433    pde_t *pd = PD_PTR(cap_page_directory_cap_get_capPDBasePtr(pd_cap));
434    vptr_t vptr = cap_page_directory_cap_get_capPDMappedAddress(pd_cap);
435
436    assert(cap_page_directory_cap_get_capPDIsMapped(pd_cap));
437
438    pgd += GET_PGD_INDEX(vptr);
439    assert(pgde_ptr_get_present(pgd));
440    pud = paddr_to_pptr(pgde_ptr_get_pud_base_address(pgd));
441    *(pud + GET_PUD_INDEX(vptr)) = pude_pude_pd_new(
442                                       pptr_to_paddr(pd)
443                                   );
444}
445
446static BOOT_CODE cap_t
447create_it_pd_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
448{
449    cap_t cap;
450    cap = cap_page_directory_cap_new(
451              asid,                   /* capPDMappedASID */
452              pptr,                   /* capPDBasePtr */
453              1,                      /* capPDIsMapped */
454              vptr                    /* capPDMappedAddress */
455          );
456    map_it_pd_cap(vspace_cap, cap);
457    return cap;
458}
459
460static BOOT_CODE void
461map_it_pud_cap(cap_t vspace_cap, cap_t pud_cap)
462{
463    pgde_t *pgd = PGD_PTR(pptr_of_cap(vspace_cap));
464    pude_t *pud = PUD_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(pud_cap));
465    vptr_t vptr = cap_page_upper_directory_cap_get_capPUDMappedAddress(pud_cap);
466
467    assert(cap_page_upper_directory_cap_get_capPUDIsMapped(pud_cap));
468
469    *(pgd + GET_PGD_INDEX(vptr)) = pgde_new(
470                                       pptr_to_paddr(pud),
471                                       0b11                        /* reserved */
472                                   );
473}
474
475static BOOT_CODE cap_t
476create_it_pud_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
477{
478    cap_t cap;
479    cap = cap_page_upper_directory_cap_new(
480              asid,               /* capPUDMappedASID */
481              pptr,               /* capPUDBasePtr */
482              1,                  /* capPUDIsMapped */
483              vptr                /* capPUDMappedAddress */
484          );
485    map_it_pud_cap(vspace_cap, cap);
486    return cap;
487}
488
489BOOT_CODE cap_t
490create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
491{
492    cap_t      vspace_cap;
493    vptr_t     vptr;
494    pptr_t     pptr;
495    seL4_SlotPos slot_pos_before;
496    seL4_SlotPos slot_pos_after;
497
498    /* create the PGD */
499    pptr = alloc_region(seL4_PGDBits);
500    if (!pptr) {
501        return cap_null_cap_new();
502    }
503    memzero(PGD_PTR(pptr), BIT(seL4_PGDBits));
504    vspace_cap = cap_page_global_directory_cap_new(
505                     IT_ASID,        /* capPGDMappedASID */
506                     pptr,           /* capPGDBasePtr   */
507                     1               /* capPGDIsMapped   */
508                 );
509    slot_pos_before = ndks_boot.slot_pos_cur;
510    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), vspace_cap);
511
512    /* Create any PUDs needed for the user land image */
513    for (vptr = ROUND_DOWN(it_v_reg.start, PGD_INDEX_OFFSET);
514            vptr < it_v_reg.end;
515            vptr += BIT(PGD_INDEX_OFFSET)) {
516        pptr = alloc_region(seL4_PUDBits);
517        if (!pptr) {
518            return cap_null_cap_new();
519        }
520        memzero(PUD_PTR(pptr), BIT(seL4_PUDBits));
521        if (!provide_cap(root_cnode_cap, create_it_pud_cap(vspace_cap, pptr, vptr, IT_ASID))) {
522            return cap_null_cap_new();
523        }
524    }
525
526    /* Create any PDs needed for the user land image */
527    for (vptr = ROUND_DOWN(it_v_reg.start, PUD_INDEX_OFFSET);
528            vptr < it_v_reg.end;
529            vptr += BIT(PUD_INDEX_OFFSET)) {
530        pptr = alloc_region(seL4_PageDirBits);
531        if (!pptr) {
532            return cap_null_cap_new();
533        }
534        memzero(PD_PTR(pptr), BIT(seL4_PageDirBits));
535        if (!provide_cap(root_cnode_cap, create_it_pd_cap(vspace_cap, pptr, vptr, IT_ASID))) {
536            return cap_null_cap_new();
537        }
538    }
539
540    /* Create any PTs needed for the user land image */
541    for (vptr = ROUND_DOWN(it_v_reg.start, PD_INDEX_OFFSET);
542            vptr < it_v_reg.end;
543            vptr += BIT(PD_INDEX_OFFSET)) {
544        pptr = alloc_region(seL4_PageTableBits);
545        if (!pptr) {
546            return cap_null_cap_new();
547        }
548        memzero(PT_PTR(pptr), BIT(seL4_PageTableBits));
549        if (!provide_cap(root_cnode_cap, create_it_pt_cap(vspace_cap, pptr, vptr, IT_ASID))) {
550            return cap_null_cap_new();
551        }
552    }
553
554    slot_pos_after = ndks_boot.slot_pos_cur;
555    ndks_boot.bi_frame->userImagePaging = (seL4_SlotRegion) {
556        slot_pos_before, slot_pos_after
557    };
558    return vspace_cap;
559}
560
561BOOT_CODE cap_t
562create_unmapped_it_frame_cap(pptr_t pptr, bool_t use_large)
563{
564    return create_it_frame_cap(pptr, 0, asidInvalid, use_large);
565}
566
567BOOT_CODE cap_t
568create_mapped_it_frame_cap(cap_t pd_cap, pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large, bool_t executable)
569{
570    cap_t cap = create_it_frame_cap(pptr, vptr, asid, use_large);
571    map_it_frame_cap(pd_cap, cap, executable);
572    return cap;
573}
574
575BOOT_CODE void
576activate_kernel_vspace(void)
577{
578    cleanInvalidateL1Caches();
579    setCurrentKernelVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalKernelPGD)));
580
581    /* Prevent elf-loader address translation to fill up TLB */
582    setCurrentUserVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalUserPGD)));
583
584    invalidateLocalTLB();
585    lockTLBEntry(kernelBase);
586}
587
588BOOT_CODE void
589write_it_asid_pool(cap_t it_ap_cap, cap_t it_vspace_cap)
590{
591    asid_pool_t* ap = ASID_POOL_PTR(pptr_of_cap(it_ap_cap));
592    ap->array[IT_ASID] = (void *)(pptr_of_cap(it_vspace_cap));
593    armKSASIDTable[IT_ASID >> asidLowBits] = ap;
594}
595
596/* ==================== BOOT CODE FINISHES HERE ==================== */
597
598static findVSpaceForASID_ret_t
599findVSpaceForASID(asid_t asid)
600{
601    findVSpaceForASID_ret_t ret;
602    asid_pool_t *poolPtr;
603    vspace_root_t *vspace_root;
604
605    poolPtr = armKSASIDTable[asid >> asidLowBits];
606    if (!poolPtr) {
607        current_lookup_fault = lookup_fault_invalid_root_new();
608
609        ret.vspace_root = NULL;
610        ret.status = EXCEPTION_LOOKUP_FAULT;
611        return ret;
612    }
613
614    vspace_root = poolPtr->array[asid & MASK(asidLowBits)];
615    if (!vspace_root) {
616        current_lookup_fault = lookup_fault_invalid_root_new();
617
618        ret.vspace_root = NULL;
619        ret.status = EXCEPTION_LOOKUP_FAULT;
620        return ret;
621    }
622
623    ret.vspace_root = vspace_root;
624    ret.status = EXCEPTION_NONE;
625    return ret;
626}
627
628word_t * PURE
629lookupIPCBuffer(bool_t isReceiver, tcb_t *thread)
630{
631    word_t w_bufferPtr;
632    cap_t bufferCap;
633    vm_rights_t vm_rights;
634
635    w_bufferPtr = thread->tcbIPCBuffer;
636    bufferCap = TCB_PTR_CTE_PTR(thread, tcbBuffer)->cap;
637
638    if (unlikely(cap_get_capType(bufferCap) != cap_frame_cap)) {
639        return NULL;
640    }
641    if (unlikely(cap_frame_cap_get_capFIsDevice(bufferCap))) {
642        return NULL;
643    }
644
645    vm_rights = cap_frame_cap_get_capFVMRights(bufferCap);
646    if (likely(vm_rights == VMReadWrite ||
647               (!isReceiver && vm_rights == VMReadOnly))) {
648        word_t basePtr;
649        unsigned int pageBits;
650
651        basePtr = cap_frame_cap_get_capFBasePtr(bufferCap);
652        pageBits = pageBitsForSize(cap_frame_cap_get_capFSize(bufferCap));
653        return (word_t *)(basePtr + (w_bufferPtr & MASK(pageBits)));
654    } else {
655        return NULL;
656    }
657}
658
659exception_t
660checkValidIPCBuffer(vptr_t vptr, cap_t cap)
661{
662    if (cap_get_capType(cap) != cap_frame_cap) {
663        userError("IPC Buffer is an invalid cap.");
664        current_syscall_error.type = seL4_IllegalOperation;
665        return EXCEPTION_SYSCALL_ERROR;
666    }
667
668    if (unlikely(cap_frame_cap_get_capFIsDevice(cap))) {
669        userError("Specifying a device frame as an IPC buffer is not permitted.");
670        current_syscall_error.type = seL4_IllegalOperation;
671        return EXCEPTION_SYSCALL_ERROR;
672    }
673
674    if (!IS_ALIGNED(vptr, seL4_IPCBufferSizeBits)) {
675        userError("IPC Buffer vaddr 0x%x is not aligned.", (int)vptr);
676        current_syscall_error.type = seL4_AlignmentError;
677        return EXCEPTION_SYSCALL_ERROR;
678    }
679
680    return EXCEPTION_NONE;
681}
682
683static lookupPGDSlot_ret_t
684lookupPGDSlot(vspace_root_t *vspace, vptr_t vptr)
685{
686    lookupPGDSlot_ret_t ret;
687
688    pgde_t *pgd = PGDE_PTR(vspace);
689    word_t pgdIndex = GET_PGD_INDEX(vptr);
690    ret.status = EXCEPTION_NONE;
691    ret.pgdSlot = pgd + pgdIndex;
692    return ret;
693}
694
695static lookupPUDSlot_ret_t lookupPUDSlot(vspace_root_t *vspace, vptr_t vptr)
696{
697    lookupPGDSlot_ret_t pgdSlot;
698    lookupPUDSlot_ret_t ret;
699
700    pgdSlot = lookupPGDSlot(vspace, vptr);
701
702    if (!pgde_ptr_get_present(pgdSlot.pgdSlot)) {
703        current_lookup_fault = lookup_fault_missing_capability_new(PGD_INDEX_OFFSET);
704
705        ret.pudSlot = NULL;
706        ret.status = EXCEPTION_LOOKUP_FAULT;
707        return ret;
708    } else {
709        pude_t *pud;
710        pude_t *pudSlot;
711        word_t pudIndex = GET_PUD_INDEX(vptr);
712        pud = paddr_to_pptr(pgde_ptr_get_pud_base_address(pgdSlot.pgdSlot));
713        pudSlot = pud + pudIndex;
714
715        ret.status = EXCEPTION_NONE;
716        ret.pudSlot = pudSlot;
717        return ret;
718    }
719}
720
721static lookupPDSlot_ret_t
722lookupPDSlot(vspace_root_t *vspace, vptr_t vptr)
723{
724    lookupPUDSlot_ret_t pudSlot;
725    lookupPDSlot_ret_t ret;
726
727    pudSlot = lookupPUDSlot(vspace, vptr);
728    if (pudSlot.status != EXCEPTION_NONE) {
729        ret.pdSlot = NULL;
730        ret.status = pudSlot.status;
731        return ret;
732    }
733    if (!pude_pude_pd_ptr_get_present(pudSlot.pudSlot)) {
734        current_lookup_fault = lookup_fault_missing_capability_new(PUD_INDEX_OFFSET);
735
736        ret.pdSlot = NULL;
737        ret.status = EXCEPTION_LOOKUP_FAULT;
738        return ret;
739    } else {
740        pde_t *pd;
741        pde_t *pdSlot;
742        word_t pdIndex = GET_PD_INDEX(vptr);
743        pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pudSlot.pudSlot));
744        pdSlot = pd + pdIndex;
745
746        ret.status = EXCEPTION_NONE;
747        ret.pdSlot = pdSlot;
748        return ret;
749    }
750}
751
752static lookupPTSlot_ret_t
753lookupPTSlot(vspace_root_t *vspace, vptr_t vptr)
754{
755    lookupPTSlot_ret_t ret;
756    lookupPDSlot_ret_t pdSlot;
757
758    pdSlot = lookupPDSlot(vspace, vptr);
759    if (pdSlot.status != EXCEPTION_NONE) {
760        ret.ptSlot = NULL;
761        ret.status = pdSlot.status;
762        return ret;
763    }
764    if (!pde_pde_small_ptr_get_present(pdSlot.pdSlot)) {
765        current_lookup_fault = lookup_fault_missing_capability_new(PD_INDEX_OFFSET);
766
767        ret.ptSlot = NULL;
768        ret.status = EXCEPTION_LOOKUP_FAULT;
769        return ret;
770    } else {
771        pte_t* pt;
772        pte_t* ptSlot;
773        word_t ptIndex = GET_PT_INDEX(vptr);
774        pt = paddr_to_pptr(pde_pde_small_ptr_get_pt_base_address(pdSlot.pdSlot));
775        ptSlot = pt + ptIndex;
776
777        ret.ptSlot = ptSlot;
778        ret.status = EXCEPTION_NONE;
779        return ret;
780    }
781}
782
783static lookupFrame_ret_t
784lookupFrame(vspace_root_t *vspace, vptr_t vptr)
785{
786    lookupPUDSlot_ret_t pudSlot;
787    lookupFrame_ret_t ret;
788
789    pudSlot = lookupPUDSlot(vspace, vptr);
790    if (pudSlot.status != EXCEPTION_NONE) {
791        ret.valid = false;
792        return ret;
793    }
794
795    switch (pude_ptr_get_pude_type(pudSlot.pudSlot)) {
796    case pude_pude_1g:
797        ret.frameBase = pude_pude_1g_ptr_get_page_base_address(pudSlot.pudSlot);
798        ret.frameSize = ARMHugePage;
799        ret.valid = true;
800        return ret;
801
802    case pude_pude_pd: {
803        pde_t *pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pudSlot.pudSlot));
804        pde_t *pdSlot = pd + GET_PD_INDEX(vptr);
805
806        if (pde_ptr_get_pde_type(pdSlot) == pde_pde_large) {
807            ret.frameBase = pde_pde_large_ptr_get_page_base_address(pdSlot);
808            ret.frameSize = ARMLargePage;
809            ret.valid = true;
810            return ret;
811        }
812
813        if (pde_ptr_get_pde_type(pdSlot) == pde_pde_small) {
814            pte_t *pt = paddr_to_pptr(pde_pde_small_ptr_get_pt_base_address(pdSlot));
815            pte_t *ptSlot = pt + GET_PT_INDEX(vptr);
816
817            if (pte_ptr_get_present(ptSlot)) {
818                ret.frameBase = pte_ptr_get_page_base_address(ptSlot);
819                ret.frameSize = ARMSmallPage;
820                ret.valid = true;
821                return ret;
822            }
823        }
824    }
825    }
826
827    ret.valid = false;
828    return ret;
829}
830
831/* Note that if the hypervisor support is enabled, the user page tables use
832 * stage-2 translation format. Otherwise, they follow the stage-1 translation format.
833 */
834static pte_t
835makeUser3rdLevel(paddr_t paddr, vm_rights_t vm_rights, vm_attributes_t attributes)
836{
837    bool_t nonexecutable = vm_attributes_get_armExecuteNever(attributes);
838
839    if (vm_attributes_get_armPageCacheable(attributes)) {
840        return pte_new(
841                   nonexecutable,              /* unprivileged execute never */
842                   paddr,
843#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
844                   0,
845#else
846                   1,                          /* not global */
847#endif
848                   1,                          /* access flag */
849                   SMP_TERNARY(SMP_SHARE, 0),          /* Inner-shareable if SMP enabled, otherwise unshared */
850                   APFromVMRights(vm_rights),
851#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
852                   S2_NORMAL,
853#else
854                   NORMAL,
855#endif
856                   0b11                        /* reserved */
857               );
858    } else {
859        return pte_new(
860                   nonexecutable,              /* unprivileged execute never */
861                   paddr,
862#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
863                   0,
864#else
865                   1,                          /* not global */
866#endif
867                   1,                          /* access flag */
868                   0,                          /* Ignored - Outter shareable */
869                   APFromVMRights(vm_rights),
870#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
871                   S2_DEVICE_nGnRnE,
872#else
873                   DEVICE_nGnRnE,
874#endif
875
876                   0b11                        /* reserved */
877               );
878    }
879}
880
881static pde_t
882makeUser2ndLevel(paddr_t paddr, vm_rights_t vm_rights, vm_attributes_t attributes)
883{
884    bool_t nonexecutable = vm_attributes_get_armExecuteNever(attributes);
885
886    if (vm_attributes_get_armPageCacheable(attributes)) {
887        return pde_pde_large_new(
888                   nonexecutable,              /* unprivileged execute never */
889                   paddr,
890#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
891                   0,
892#else
893                   1,                          /* not global */
894#endif
895                   1,                          /* access flag */
896                   SMP_TERNARY(SMP_SHARE, 0),          /* Inner-shareable if SMP enabled, otherwise unshared */
897                   APFromVMRights(vm_rights),
898#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
899                   S2_NORMAL
900#else
901                   NORMAL
902#endif
903               );
904    } else {
905        return pde_pde_large_new(
906                   nonexecutable,              /* unprivileged execute never */
907                   paddr,
908#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
909                   0,
910#else
911                   1,                          /* not global */
912#endif
913                   1,                          /* access flag */
914                   0,                          /* Ignored - Outter shareable */
915                   APFromVMRights(vm_rights),
916#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
917                   S2_DEVICE_nGnRnE
918#else
919                   DEVICE_nGnRnE
920#endif
921               );
922    }
923}
924
925static pude_t
926makeUser1stLevel(paddr_t paddr, vm_rights_t vm_rights, vm_attributes_t attributes)
927{
928    bool_t nonexecutable = vm_attributes_get_armExecuteNever(attributes);
929
930    if (vm_attributes_get_armPageCacheable(attributes)) {
931        return pude_pude_1g_new(
932                   nonexecutable,              /* unprivileged execute never */
933                   paddr,
934#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
935                   0,
936#else
937                   1,                          /* not global */
938#endif
939                   1,                          /* access flag */
940                   SMP_TERNARY(SMP_SHARE, 0),          /* Inner-shareable if SMP enabled, otherwise unshared */
941                   APFromVMRights(vm_rights),
942#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
943                   S2_NORMAL
944#else
945                   NORMAL
946#endif
947               );
948    } else {
949        return pude_pude_1g_new(
950                   nonexecutable,              /* unprivileged execute never */
951                   paddr,
952#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
953                   0,
954#else
955                   1,                          /* not global */
956#endif
957                   1,                          /* access flag */
958                   0,                          /* Ignored - Outter shareable */
959                   APFromVMRights(vm_rights),
960#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
961                   S2_DEVICE_nGnRnE
962#else
963                   DEVICE_nGnRnE
964#endif
965               );
966    }
967}
968
969#define PAR_EL1_MASK 0x0000fffffffff000ul
970#define GET_PAR_ADDR(x) ((x) & PAR_EL1_MASK)
971exception_t
972handleVMFault(tcb_t *thread, vm_fault_type_t vm_faultType)
973{
974    switch (vm_faultType) {
975    case ARMDataAbort: {
976        word_t addr, fault;
977
978        addr = getFAR();
979        fault = getDFSR();
980
981#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
982        /* use the IPA */
983        if (armHSVCPUActive) {
984            addr = GET_PAR_ADDR(ats1e1r(addr)) | (addr & MASK(PAGE_BITS));
985        }
986#endif
987        current_fault = seL4_Fault_VMFault_new(addr, fault, false);
988        return EXCEPTION_FAULT;
989    }
990
991    case ARMPrefetchAbort: {
992        word_t pc, fault;
993
994        pc = getRestartPC(thread);
995        fault = getIFSR();
996
997#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
998        if (armHSVCPUActive) {
999            pc = GET_PAR_ADDR(ats1e1r(pc)) | (pc & MASK(PAGE_BITS));
1000        }
1001#endif
1002        current_fault = seL4_Fault_VMFault_new(pc, fault, true);
1003        return EXCEPTION_FAULT;
1004    }
1005
1006    default:
1007        fail("Invalid VM fault type");
1008    }
1009}
1010
1011bool_t CONST
1012isVTableRoot(cap_t cap)
1013{
1014    return cap_get_capType(cap) == cap_page_global_directory_cap;
1015}
1016
1017bool_t CONST
1018isValidNativeRoot(cap_t cap)
1019{
1020    return isVTableRoot(cap) &&
1021           cap_page_global_directory_cap_get_capPGDIsMapped(cap);
1022}
1023
1024bool_t CONST
1025isValidVTableRoot(cap_t cap)
1026{
1027    return isValidNativeRoot(cap);
1028}
1029
1030void
1031setVMRoot(tcb_t *tcb)
1032{
1033    cap_t threadRoot;
1034    asid_t asid;
1035    pgde_t *pgd;
1036    findVSpaceForASID_ret_t find_ret;
1037
1038    threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap;
1039
1040    if (!isValidNativeRoot(threadRoot)) {
1041        setCurrentUserVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalUserPGD)));
1042        return;
1043    }
1044
1045    pgd = PGD_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(threadRoot));
1046    asid = cap_page_global_directory_cap_get_capPGDMappedASID(threadRoot);
1047    find_ret = findVSpaceForASID(asid);
1048    if (unlikely(find_ret.status != EXCEPTION_NONE || find_ret.vspace_root != pgd)) {
1049        setCurrentUserVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalUserPGD)));
1050        return;
1051    }
1052
1053    armv_contextSwitch(pgd, asid);
1054    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
1055        vcpu_switch(tcb->tcbArch.tcbVCPU);
1056    }
1057}
1058
1059static bool_t
1060setVMRootForFlush(vspace_root_t *vspace, asid_t asid)
1061{
1062    cap_t threadRoot;
1063
1064    threadRoot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbVTable)->cap;
1065
1066    if (cap_get_capType(threadRoot) == cap_page_global_directory_cap &&
1067            cap_page_global_directory_cap_get_capPGDIsMapped(threadRoot) &&
1068            PGD_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(threadRoot)) == vspace) {
1069        return false;
1070    }
1071
1072    armv_contextSwitch(vspace, asid);
1073    return true;
1074}
1075
1076pgde_t *pageUpperDirectoryMapped(asid_t asid, vptr_t vaddr, pude_t* pud)
1077{
1078    findVSpaceForASID_ret_t find_ret;
1079    lookupPGDSlot_ret_t lu_ret;
1080
1081    find_ret = findVSpaceForASID(asid);
1082    if (find_ret.status != EXCEPTION_NONE) {
1083        return NULL;
1084    }
1085
1086    lu_ret = lookupPGDSlot(find_ret.vspace_root, vaddr);
1087    if (pgde_ptr_get_present(lu_ret.pgdSlot) &&
1088            (pgde_ptr_get_pud_base_address(lu_ret.pgdSlot) == pptr_to_paddr(pud))) {
1089        return lu_ret.pgdSlot;
1090    }
1091
1092    return NULL;
1093}
1094
1095pude_t *pageDirectoryMapped(asid_t asid, vptr_t vaddr, pde_t* pd)
1096{
1097    findVSpaceForASID_ret_t find_ret;
1098    lookupPUDSlot_ret_t lu_ret;
1099
1100    find_ret = findVSpaceForASID(asid);
1101    if (find_ret.status != EXCEPTION_NONE) {
1102        return NULL;
1103    }
1104
1105    lu_ret = lookupPUDSlot(find_ret.vspace_root, vaddr);
1106    if (lu_ret.status != EXCEPTION_NONE) {
1107        return NULL;
1108    }
1109
1110    if (pude_pude_pd_ptr_get_present(lu_ret.pudSlot) &&
1111            (pude_pude_pd_ptr_get_pd_base_address(lu_ret.pudSlot) == pptr_to_paddr(pd))) {
1112        return lu_ret.pudSlot;
1113    }
1114
1115    return NULL;
1116}
1117
1118#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1119
1120static inline pgde_t
1121makePGDEWithHWASID(hw_asid_t hw_asid)
1122{
1123    /* We need to shift the hw_asid by PGDE_VMID_SHIFT bits in
1124     * order to avoid assertion failures in pgde_new which
1125     * expects the least 12 bits are 0s.
1126     */
1127    return pgde_new(((hw_asid & VMID_MASK) << PGDE_VMID_SHIFT), 0);
1128}
1129
1130static inline hw_asid_t
1131getHWASIDFromPGDE(pgde_t pgde)
1132{
1133    return (hw_asid_t)((pgde_get_pud_base_address(pgde) >> PGDE_VMID_SHIFT) & VMID_MASK);
1134}
1135
1136static void
1137invalidateASID(asid_t asid)
1138{
1139    asid_pool_t *asidPool;
1140    pgde_t *pgd;
1141
1142    asidPool = armKSASIDTable[asid >> asidLowBits];
1143    assert(asidPool);
1144
1145    pgd = asidPool->array[asid & MASK(asidLowBits)];
1146    assert(pgd);
1147    /* 0 is reserved as the invalid VMID */
1148    pgd[PGD_VMID_SLOT] = makePGDEWithHWASID(0);
1149}
1150
1151static pgde_t PURE
1152loadHWASID(asid_t asid)
1153{
1154    asid_pool_t *asidPool;
1155    pgde_t *pgd;
1156
1157    asidPool = armKSASIDTable[asid >> asidLowBits];
1158    assert(asidPool);
1159
1160    pgd = asidPool->array[asid & MASK(asidLowBits)];
1161    assert(pgd);
1162
1163    return pgd[PGD_VMID_SLOT];
1164}
1165
1166static void
1167storeHWASID(asid_t asid, hw_asid_t hw_asid)
1168{
1169    asid_pool_t *asidPool;
1170    pgde_t *pgd;
1171
1172    asidPool = armKSASIDTable[asid >> asidLowBits];
1173    assert(asidPool);
1174
1175    pgd = asidPool->array[asid & MASK(asidLowBits)];
1176    assert(pgd);
1177
1178    /* Store HW VMID in the last entry
1179       Masquerade as an invalid PDGE */
1180    pgd[PGD_VMID_SLOT] = makePGDEWithHWASID(hw_asid);
1181
1182    armKSHWASIDTable[hw_asid] = asid;
1183}
1184
1185static hw_asid_t
1186findFreeHWASID(void)
1187{
1188    word_t hw_asid_offset;
1189    hw_asid_t hw_asid;
1190
1191    /* Find a free hardware ASID */
1192    for (hw_asid_offset = 0;
1193            hw_asid_offset <= (word_t)((hw_asid_t) - 1);
1194            hw_asid_offset++) {
1195        hw_asid = armKSNextASID + ((hw_asid_t)hw_asid_offset);
1196        if (armKSHWASIDTable[hw_asid] == asidInvalid) {
1197            return hw_asid;
1198        }
1199    }
1200
1201    hw_asid = armKSNextASID;
1202
1203    /* If we've scanned the table without finding a free ASID */
1204    invalidateASID(armKSHWASIDTable[hw_asid]);
1205
1206    /* Flush TLB */
1207    invalidateTranslationASID(hw_asid);
1208    armKSHWASIDTable[hw_asid] = asidInvalid;
1209
1210    /* Increment the NextASID index */
1211    armKSNextASID++;
1212
1213    return hw_asid;
1214}
1215
1216static inline hw_asid_t
1217getHWASIDByASID(asid_t asid)
1218{
1219    return getHWASIDFromPGDE(loadHWASID(asid));
1220}
1221
1222hw_asid_t
1223getHWASID(asid_t asid)
1224{
1225    if (getHWASIDByASID(asid)) {
1226        return getHWASIDByASID(asid);
1227    } else {
1228        hw_asid_t new_hw_asid;
1229
1230        new_hw_asid = findFreeHWASID();
1231        storeHWASID(asid, new_hw_asid);
1232        return new_hw_asid;
1233    }
1234}
1235
1236static void
1237invalidateASIDEntry(asid_t asid)
1238{
1239    hw_asid_t hw_asid = getHWASIDByASID(asid);
1240    if (hw_asid) {
1241        armKSHWASIDTable[hw_asid] =
1242            asidInvalid;
1243    }
1244    invalidateASID(asid);
1245}
1246
1247#endif
1248
1249static inline void
1250invalidateTLBByASID(asid_t asid)
1251{
1252#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1253    hw_asid_t hw_asid = getHWASIDByASID(asid);
1254    if (!hw_asid) {
1255        return;
1256    }
1257    invalidateTranslationASID(hw_asid);
1258#else
1259    invalidateTranslationASID(asid);
1260#endif
1261}
1262
1263static inline void
1264invalidateTLBByASIDVA(asid_t asid, vptr_t vaddr)
1265{
1266#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1267    word_t hw_asid = getHWASIDByASID(asid);
1268    if (!hw_asid) {
1269        return;
1270    }
1271    invalidateTranslationSingle((hw_asid << 48) | vaddr >> seL4_PageBits);
1272#else
1273    invalidateTranslationSingle((asid << 48) | vaddr >> seL4_PageBits);
1274#endif
1275}
1276
1277pde_t *pageTableMapped(asid_t asid, vptr_t vaddr, pte_t* pt)
1278{
1279    findVSpaceForASID_ret_t find_ret;
1280    lookupPDSlot_ret_t lu_ret;
1281
1282    find_ret = findVSpaceForASID(asid);
1283    if (find_ret.status != EXCEPTION_NONE) {
1284        return NULL;
1285    }
1286
1287    lu_ret = lookupPDSlot(find_ret.vspace_root, vaddr);
1288    if (lu_ret.status != EXCEPTION_NONE) {
1289        return NULL;
1290    }
1291
1292    if (pde_pde_small_ptr_get_present(lu_ret.pdSlot) &&
1293            (pde_pde_small_ptr_get_pt_base_address(lu_ret.pdSlot) == pptr_to_paddr(pt))) {
1294        return lu_ret.pdSlot;
1295    }
1296
1297    return NULL;
1298}
1299
1300void unmapPageUpperDirectory(asid_t asid, vptr_t vaddr, pude_t* pud)
1301{
1302    pgde_t *pgdSlot;
1303
1304    pgdSlot = pageUpperDirectoryMapped(asid, vaddr, pud);
1305    if (likely(pgdSlot != NULL)) {
1306        *pgdSlot = pgde_invalid_new();
1307
1308        cleanByVA_PoU((vptr_t)pgdSlot, pptr_to_paddr(pgdSlot));
1309        invalidateTLBByASID(asid);
1310    }
1311}
1312
1313void unmapPageDirectory(asid_t asid, vptr_t vaddr, pde_t* pd)
1314{
1315    pude_t *pudSlot;
1316
1317    pudSlot = pageDirectoryMapped(asid, vaddr, pd);
1318    if (likely(pudSlot != NULL)) {
1319        *pudSlot = pude_invalid_new();
1320
1321        cleanByVA_PoU((vptr_t)pudSlot, pptr_to_paddr(pudSlot));
1322        invalidateTLBByASID(asid);
1323    }
1324}
1325
1326void unmapPageTable(asid_t asid, vptr_t vaddr, pte_t *pt)
1327{
1328    pde_t *pdSlot;
1329
1330    pdSlot = pageTableMapped(asid, vaddr, pt);
1331    if (likely(pdSlot != NULL)) {
1332        *pdSlot = pde_invalid_new();
1333
1334        cleanByVA_PoU((vptr_t)pdSlot, pptr_to_paddr(pdSlot));
1335        invalidateTLBByASID(asid);
1336    }
1337}
1338
1339void unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, pptr_t pptr)
1340{
1341    paddr_t addr;
1342    findVSpaceForASID_ret_t find_ret;
1343
1344    addr = pptr_to_paddr((void *)pptr);
1345    find_ret = findVSpaceForASID(asid);
1346    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1347        return;
1348    }
1349
1350    switch (page_size) {
1351    case ARMSmallPage: {
1352        lookupPTSlot_ret_t lu_ret;
1353
1354        lu_ret = lookupPTSlot(find_ret.vspace_root, vptr);
1355        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1356            return;
1357        }
1358
1359        if (pte_ptr_get_present(lu_ret.ptSlot) &&
1360                pte_ptr_get_page_base_address(lu_ret.ptSlot) == addr) {
1361            *(lu_ret.ptSlot) = pte_invalid_new();
1362
1363            cleanByVA_PoU((vptr_t)lu_ret.ptSlot, pptr_to_paddr(lu_ret.ptSlot));
1364        }
1365        break;
1366    }
1367
1368    case ARMLargePage: {
1369        lookupPDSlot_ret_t lu_ret;
1370
1371        lu_ret = lookupPDSlot(find_ret.vspace_root, vptr);
1372        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1373            return;
1374        }
1375
1376        if (pde_pde_large_ptr_get_present(lu_ret.pdSlot) &&
1377                pde_pde_large_ptr_get_page_base_address(lu_ret.pdSlot) == addr) {
1378            *(lu_ret.pdSlot) = pde_invalid_new();
1379
1380            cleanByVA_PoU((vptr_t)lu_ret.pdSlot, pptr_to_paddr(lu_ret.pdSlot));
1381        }
1382        break;
1383    }
1384
1385    case ARMHugePage: {
1386        lookupPUDSlot_ret_t lu_ret;
1387
1388        lu_ret = lookupPUDSlot(find_ret.vspace_root, vptr);
1389        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1390            return;
1391        }
1392
1393        if (pude_pude_1g_ptr_get_present(lu_ret.pudSlot) &&
1394                pude_pude_1g_ptr_get_page_base_address(lu_ret.pudSlot) == addr) {
1395            *(lu_ret.pudSlot) = pude_invalid_new();
1396
1397            cleanByVA_PoU((vptr_t)lu_ret.pudSlot, pptr_to_paddr(lu_ret.pudSlot));
1398        }
1399        break;
1400    }
1401
1402    default:
1403        fail("Invalid ARM page type");
1404    }
1405
1406    assert(asid < BIT(16));
1407    invalidateTLBByASIDVA(asid, vptr);
1408}
1409
1410void
1411deleteASID(asid_t asid, vspace_root_t *vspace)
1412{
1413    asid_pool_t *poolPtr;
1414
1415    poolPtr = armKSASIDTable[asid >> asidLowBits];
1416
1417    if (poolPtr != NULL && poolPtr->array[asid & MASK(asidLowBits)] == vspace) {
1418#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1419        invalidateASIDEntry(asid);
1420#endif
1421        invalidateTLBByASID(asid);
1422        poolPtr->array[asid & MASK(asidLowBits)] = NULL;
1423        setVMRoot(NODE_STATE(ksCurThread));
1424    }
1425}
1426
1427void
1428deleteASIDPool(asid_t asid_base, asid_pool_t* pool)
1429{
1430    word_t offset;
1431
1432    assert((asid_base & MASK(asidLowBits)) == 0);
1433
1434    if (armKSASIDTable[asid_base >> asidLowBits] == pool) {
1435        for (offset = 0; offset < BIT(asidLowBits); offset++) {
1436            if (pool->array[offset]) {
1437#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1438                invalidateASIDEntry(asid_base + offset);
1439#endif
1440                invalidateTLBByASID(asid_base + offset);
1441            }
1442        }
1443        armKSASIDTable[asid_base >> asidLowBits] = NULL;
1444        setVMRoot(NODE_STATE(ksCurThread));
1445    }
1446}
1447
1448static void
1449doFlush(int invLabel, vptr_t start, vptr_t end, paddr_t pstart)
1450{
1451    switch (invLabel) {
1452    case ARMPageGlobalDirectoryClean_Data:
1453    case ARMPageClean_Data:
1454        cleanCacheRange_RAM(start, end, pstart);
1455        break;
1456
1457    case ARMPageGlobalDirectoryInvalidate_Data:
1458    case ARMPageInvalidate_Data:
1459        invalidateCacheRange_RAM(start, end, pstart);
1460        break;
1461
1462    case ARMPageGlobalDirectoryCleanInvalidate_Data:
1463    case ARMPageCleanInvalidate_Data:
1464        cleanInvalidateCacheRange_RAM(start, end, pstart);
1465        break;
1466
1467    case ARMPageGlobalDirectoryUnify_Instruction:
1468    case ARMPageUnify_Instruction:
1469        /* First clean data lines to point of unification... */
1470        cleanCacheRange_PoU(start, end, pstart);
1471        /* Ensure it's been written. */
1472        dsb();
1473        /* ...then invalidate the corresponding instruction lines
1474           to point of unification... */
1475        invalidateCacheRange_I(start, end, pstart);
1476        /* ... and ensure new instructions come from fresh cache lines. */
1477        isb();
1478        break;
1479    default:
1480        fail("Invalid operation, shouldn't get here.\n");
1481    }
1482}
1483
1484/* ================= INVOCATION HANDLING STARTS HERE ================== */
1485
1486static exception_t
1487performPageGlobalDirectoryFlush(int invLabel, pgde_t *pgd, asid_t asid,
1488                                vptr_t start, vptr_t end, paddr_t pstart)
1489{
1490
1491    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
1492        word_t size = end - start;
1493        start = (vptr_t)paddr_to_pptr(pstart);
1494        end = start + size;
1495        if (start < end) {
1496            doFlush(invLabel, start, end, pstart);
1497        }
1498    } else {
1499        bool_t root_switched;
1500
1501        /* Flush if given a non zero range */
1502        if (start < end) {
1503            root_switched = setVMRootForFlush(pgd, asid);
1504            doFlush(invLabel, start, end, pstart);
1505            if (root_switched) {
1506                setVMRoot(NODE_STATE(ksCurThread));
1507            }
1508        }
1509    }
1510    return EXCEPTION_NONE;
1511}
1512
1513static exception_t
1514performUpperPageDirectoryInvocationMap(cap_t cap, cte_t *ctSlot, pgde_t pgde, pgde_t *pgdSlot)
1515{
1516    ctSlot->cap = cap;
1517    *pgdSlot = pgde;
1518    cleanByVA_PoU((vptr_t)pgdSlot, pptr_to_paddr(pgdSlot));
1519
1520    return EXCEPTION_NONE;
1521}
1522
1523static exception_t
1524performUpperPageDirectoryInvocationUnmap(cap_t cap, cte_t *ctSlot)
1525{
1526    if (cap_page_upper_directory_cap_get_capPUDIsMapped(cap)) {
1527        pude_t *pud = PUD_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(cap));
1528        unmapPageUpperDirectory(cap_page_upper_directory_cap_get_capPUDMappedASID(cap),
1529                                cap_page_upper_directory_cap_get_capPUDMappedAddress(cap), pud);
1530        clearMemory((void *)pud, cap_get_capSizeBits(cap));
1531    }
1532
1533    cap_page_upper_directory_cap_ptr_set_capPUDIsMapped(&(ctSlot->cap), 0);
1534    return EXCEPTION_NONE;
1535}
1536
1537static exception_t
1538performPageDirectoryInvocationMap(cap_t cap, cte_t *ctSlot, pude_t pude, pude_t *pudSlot)
1539{
1540    ctSlot->cap = cap;
1541    *pudSlot = pude;
1542    cleanByVA_PoU((vptr_t)pudSlot, pptr_to_paddr(pudSlot));
1543
1544    return EXCEPTION_NONE;
1545}
1546
1547static exception_t
1548performPageDirectoryInvocationUnmap(cap_t cap, cte_t *ctSlot)
1549{
1550    if (cap_page_directory_cap_get_capPDIsMapped(cap)) {
1551        pde_t *pd = PD_PTR(cap_page_directory_cap_get_capPDBasePtr(cap));
1552        unmapPageDirectory(cap_page_directory_cap_get_capPDMappedASID(cap),
1553                           cap_page_directory_cap_get_capPDMappedAddress(cap), pd);
1554        clearMemory((void *)pd, cap_get_capSizeBits(cap));
1555    }
1556
1557    cap_page_directory_cap_ptr_set_capPDIsMapped(&(ctSlot->cap), 0);
1558    return EXCEPTION_NONE;
1559}
1560
1561static exception_t
1562performPageTableInvocationMap(cap_t cap, cte_t *ctSlot, pde_t pde, pde_t *pdSlot)
1563{
1564    ctSlot->cap = cap;
1565    *pdSlot = pde;
1566    cleanByVA_PoU((vptr_t)pdSlot, pptr_to_paddr(pdSlot));
1567
1568    return EXCEPTION_NONE;
1569}
1570
1571static exception_t
1572performPageTableInvocationUnmap(cap_t cap, cte_t *ctSlot)
1573{
1574    if (cap_page_table_cap_get_capPTIsMapped(cap)) {
1575        pte_t *pt = PT_PTR(cap_page_table_cap_get_capPTBasePtr(cap));
1576        unmapPageTable(cap_page_table_cap_get_capPTMappedASID(cap),
1577                       cap_page_table_cap_get_capPTMappedAddress(cap), pt);
1578        clearMemory((void *)pt, cap_get_capSizeBits(cap));
1579    }
1580
1581    cap_page_table_cap_ptr_set_capPTIsMapped(&(ctSlot->cap), 0);
1582    return EXCEPTION_NONE;
1583}
1584
1585static exception_t
1586performHugePageInvocationMap(asid_t asid, cap_t cap, cte_t *ctSlot,
1587                             pude_t pude, pude_t *pudSlot)
1588{
1589    bool_t tlbflush_required = pude_pude_1g_ptr_get_present(pudSlot);
1590
1591    ctSlot->cap = cap;
1592    *pudSlot = pude;
1593
1594    cleanByVA_PoU((vptr_t)pudSlot, pptr_to_paddr(pudSlot));
1595    if (unlikely(tlbflush_required)) {
1596        assert(asid < BIT(16));
1597        invalidateTLBByASIDVA(asid, cap_frame_cap_get_capFMappedAddress(cap));
1598    }
1599
1600    return EXCEPTION_NONE;
1601}
1602
1603static exception_t
1604performLargePageInvocationMap(asid_t asid, cap_t cap, cte_t *ctSlot,
1605                              pde_t pde, pde_t *pdSlot)
1606{
1607    bool_t tlbflush_required = pde_pde_large_ptr_get_present(pdSlot);
1608
1609    ctSlot->cap = cap;
1610    *pdSlot = pde;
1611
1612    cleanByVA_PoU((vptr_t)pdSlot, pptr_to_paddr(pdSlot));
1613    if (unlikely(tlbflush_required)) {
1614        assert(asid < BIT(16));
1615        invalidateTLBByASIDVA(asid, cap_frame_cap_get_capFMappedAddress(cap));
1616    }
1617
1618    return EXCEPTION_NONE;
1619}
1620
1621static exception_t
1622performSmallPageInvocationMap(asid_t asid, cap_t cap, cte_t *ctSlot,
1623                              pte_t pte, pte_t *ptSlot)
1624{
1625    bool_t tlbflush_required = pte_ptr_get_present(ptSlot);
1626
1627    ctSlot->cap = cap;
1628    *ptSlot = pte;
1629
1630    cleanByVA_PoU((vptr_t)ptSlot, pptr_to_paddr(ptSlot));
1631    if (unlikely(tlbflush_required)) {
1632        assert(asid < BIT(16));
1633        invalidateTLBByASIDVA(asid, cap_frame_cap_get_capFMappedAddress(cap));
1634    }
1635
1636    return EXCEPTION_NONE;
1637}
1638
1639static exception_t
1640performPageInvocationUnmap(cap_t cap, cte_t *ctSlot)
1641{
1642    if (cap_frame_cap_get_capFMappedASID(cap) != 0) {
1643        unmapPage(cap_frame_cap_get_capFSize(cap),
1644                  cap_frame_cap_get_capFMappedASID(cap),
1645                  cap_frame_cap_get_capFMappedAddress(cap),
1646                  cap_frame_cap_get_capFBasePtr(cap));
1647    }
1648
1649    cap_frame_cap_ptr_set_capFMappedASID(&ctSlot->cap, asidInvalid);
1650    cap_frame_cap_ptr_set_capFMappedAddress(&ctSlot->cap, 0);
1651    return EXCEPTION_NONE;
1652}
1653
1654static exception_t
1655performPageFlush(int invLabel, pgde_t *pgd, asid_t asid,
1656                 vptr_t start, vptr_t end, paddr_t pstart)
1657{
1658    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
1659        /* We flush the cache with kernel virtual addresses since
1660         * the user virtual addresses are not valid in EL2.
1661         * Switching VMRoot is not required.
1662         */
1663        word_t size = end - start;
1664        start = (vptr_t)paddr_to_pptr(pstart);
1665        end = start + size;
1666
1667        if (start < end) {
1668            doFlush(invLabel, start, end, pstart);
1669        }
1670    } else {
1671        bool_t root_switched;
1672
1673        if (start < end) {
1674            root_switched = setVMRootForFlush(pgd, asid);
1675            doFlush(invLabel, start, end, pstart);
1676            if (root_switched) {
1677                setVMRoot(NODE_STATE(ksCurThread));
1678            }
1679        }
1680    }
1681    return EXCEPTION_NONE;
1682}
1683
1684static exception_t
1685performPageGetAddress(pptr_t base_ptr)
1686{
1687    paddr_t base = pptr_to_paddr((void *)base_ptr);
1688
1689    setRegister(NODE_STATE(ksCurThread), msgRegisters[0], base);
1690    setRegister(NODE_STATE(ksCurThread), msgInfoRegister,
1691                wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, 1)));
1692
1693    return EXCEPTION_NONE;
1694}
1695
1696static exception_t
1697performASIDPoolInvocation(asid_t asid, asid_pool_t *poolPtr, cte_t *vspaceCapSlot)
1698{
1699    cap_page_global_directory_cap_ptr_set_capPGDMappedASID(&vspaceCapSlot->cap, asid);
1700    cap_page_global_directory_cap_ptr_set_capPGDIsMapped(&vspaceCapSlot->cap, 1);
1701    poolPtr->array[asid & MASK(asidLowBits)] =
1702        PGD_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(vspaceCapSlot->cap));
1703
1704    return EXCEPTION_NONE;
1705}
1706
1707static exception_t
1708performASIDControlInvocation(void *frame, cte_t *slot,
1709                             cte_t *parent, asid_t asid_base)
1710{
1711    cap_untyped_cap_ptr_set_capFreeIndex(&(parent->cap),
1712                                         MAX_FREE_INDEX(cap_untyped_cap_get_capBlockSize(parent->cap)));
1713
1714    memzero(frame, BIT(seL4_ASIDPoolBits));
1715
1716    cteInsert(
1717        cap_asid_pool_cap_new(
1718            asid_base,         /* capASIDBase  */
1719            WORD_REF(frame)    /* capASIDPool  */
1720        ), parent, slot);
1721
1722    assert((asid_base & MASK(asidLowBits)) == 0);
1723    armKSASIDTable[asid_base >> asidLowBits] = (asid_pool_t *)frame;
1724
1725    return EXCEPTION_NONE;
1726}
1727
1728static exception_t
1729decodeARMPageGlobalDirectoryInvocation(word_t invLabel, unsigned int length,
1730                                       cte_t *cte, cap_t cap, extra_caps_t extraCaps,
1731                                       word_t *buffer)
1732{
1733    vptr_t start, end;
1734    paddr_t pstart;
1735    asid_t asid;
1736    pgde_t *pgd;
1737    lookupFrame_ret_t resolve_ret;
1738    findVSpaceForASID_ret_t find_ret;
1739
1740    switch (invLabel) {
1741    case ARMPageGlobalDirectoryClean_Data:
1742    case ARMPageGlobalDirectoryInvalidate_Data:
1743    case ARMPageGlobalDirectoryCleanInvalidate_Data:
1744    case ARMPageGlobalDirectoryUnify_Instruction:
1745
1746        if (length < 2) {
1747            userError("PGD Flush: Truncated message.");
1748            current_syscall_error.type = seL4_TruncatedMessage;
1749            return EXCEPTION_SYSCALL_ERROR;
1750        }
1751
1752        start = getSyscallArg(0, buffer);
1753        end =   getSyscallArg(1, buffer);
1754
1755        /* Check sanity of arguments */
1756        if (end <= start) {
1757            userError("PGD Flush: Invalid range.");
1758            current_syscall_error.type = seL4_InvalidArgument;
1759            current_syscall_error.invalidArgumentNumber = 1;
1760            return EXCEPTION_SYSCALL_ERROR;
1761        }
1762
1763        /* Don't let applications flush kernel regions. */
1764        if (end > USER_TOP) {
1765            userError("PGD Flush: Exceed the user addressable region.");
1766            current_syscall_error.type = seL4_IllegalOperation;
1767            return EXCEPTION_SYSCALL_ERROR;
1768        }
1769
1770        if (unlikely(!isValidNativeRoot(cap))) {
1771            current_syscall_error.type = seL4_InvalidCapability;
1772            current_syscall_error.invalidCapNumber = 0;
1773            return EXCEPTION_SYSCALL_ERROR;
1774        }
1775
1776        /* Make sure that the supplied pgd is ok */
1777        pgd = PGDE_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(cap));
1778        asid = cap_page_global_directory_cap_get_capPGDMappedASID(cap);
1779
1780        find_ret = findVSpaceForASID(asid);
1781        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1782            userError("PGD Flush: No PGD for ASID");
1783            current_syscall_error.type = seL4_FailedLookup;
1784            current_syscall_error.failedLookupWasSource = false;
1785            return EXCEPTION_SYSCALL_ERROR;
1786        }
1787
1788        if (unlikely(find_ret.vspace_root != pgd)) {
1789            userError("PGD Flush: Invalid PGD Cap");
1790            current_syscall_error.type = seL4_InvalidCapability;
1791            current_syscall_error.invalidCapNumber = 0;
1792            return EXCEPTION_SYSCALL_ERROR;
1793        }
1794
1795        /* Look up the frame containing 'start'. */
1796        resolve_ret = lookupFrame(pgd, start);
1797
1798        if (!resolve_ret.valid) {
1799            /* Fail silently, as there can't be any stale cached data (for the
1800             * given address space), and getting a syscall error because the
1801             * relevant page is non-resident would be 'astonishing'. */
1802            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1803            return EXCEPTION_NONE;
1804        }
1805
1806        /* Refuse to cross a page boundary. */
1807        if (PAGE_BASE(start, resolve_ret.frameSize) != PAGE_BASE(end - 1, resolve_ret.frameSize)) {
1808            current_syscall_error.type = seL4_RangeError;
1809            current_syscall_error.rangeErrorMin = start;
1810            current_syscall_error.rangeErrorMax = PAGE_BASE(start, resolve_ret.frameSize) +
1811                                                  MASK(pageBitsForSize(resolve_ret.frameSize));
1812            return EXCEPTION_SYSCALL_ERROR;
1813        }
1814
1815        /* Calculate the physical start address. */
1816        pstart = resolve_ret.frameBase + PAGE_OFFSET(start, resolve_ret.frameSize);
1817
1818        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1819        return performPageGlobalDirectoryFlush(invLabel, pgd, asid, start, end - 1, pstart);
1820
1821    default:
1822        current_syscall_error.type = seL4_IllegalOperation;
1823        return EXCEPTION_SYSCALL_ERROR;
1824    }
1825}
1826
1827static exception_t
1828decodeARMPageUpperDirectoryInvocation(word_t invLabel, unsigned int length,
1829                                      cte_t *cte, cap_t cap, extra_caps_t extraCaps,
1830                                      word_t *buffer)
1831{
1832    cap_t pgdCap;
1833    pgde_t *pgd;
1834    pgde_t pgde;
1835    asid_t asid;
1836    vptr_t vaddr;
1837    lookupPGDSlot_ret_t pgdSlot;
1838    findVSpaceForASID_ret_t find_ret;
1839
1840    if (invLabel == ARMPageUpperDirectoryUnmap) {
1841        if (unlikely(!isFinalCapability(cte))) {
1842            current_syscall_error.type = seL4_RevokeFirst;
1843            return EXCEPTION_SYSCALL_ERROR;
1844        }
1845
1846        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1847        return performUpperPageDirectoryInvocationUnmap(cap, cte);
1848    }
1849
1850    if (unlikely(invLabel != ARMPageUpperDirectoryMap)) {
1851        current_syscall_error.type = seL4_IllegalOperation;
1852        return EXCEPTION_SYSCALL_ERROR;
1853    }
1854
1855    if (unlikely(length < 2 || extraCaps.excaprefs[0] == NULL)) {
1856        current_syscall_error.type = seL4_TruncatedMessage;
1857        return EXCEPTION_SYSCALL_ERROR;
1858    }
1859
1860    if (unlikely(cap_page_upper_directory_cap_get_capPUDIsMapped(cap))) {
1861        current_syscall_error.type = seL4_InvalidCapability;
1862        current_syscall_error.invalidCapNumber = 0;
1863        return EXCEPTION_SYSCALL_ERROR;
1864    }
1865
1866    vaddr = getSyscallArg(0, buffer) & (~MASK(PGD_INDEX_OFFSET));
1867    pgdCap = extraCaps.excaprefs[0]->cap;
1868
1869    if (unlikely(!isValidNativeRoot(pgdCap))) {
1870        current_syscall_error.type = seL4_InvalidCapability;
1871        current_syscall_error.invalidCapNumber = 1;
1872        return EXCEPTION_SYSCALL_ERROR;
1873    }
1874
1875    pgd = PGDE_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(pgdCap));
1876    asid = cap_page_global_directory_cap_get_capPGDMappedASID(pgdCap);
1877
1878    if (unlikely(vaddr > USER_TOP)) {
1879        current_syscall_error.type = seL4_InvalidArgument;
1880        current_syscall_error.invalidArgumentNumber = 0;
1881        return EXCEPTION_SYSCALL_ERROR;
1882    }
1883
1884    find_ret = findVSpaceForASID(asid);
1885    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1886        current_syscall_error.type = seL4_FailedLookup;
1887        current_syscall_error.failedLookupWasSource = false;
1888        return EXCEPTION_SYSCALL_ERROR;
1889    }
1890
1891    if (unlikely(find_ret.vspace_root != pgd)) {
1892        current_syscall_error.type = seL4_InvalidCapability;
1893        current_syscall_error.invalidCapNumber = 1;
1894        return EXCEPTION_SYSCALL_ERROR;
1895    }
1896
1897    pgdSlot = lookupPGDSlot(pgd, vaddr);
1898
1899    if (unlikely(pgde_ptr_get_present(pgdSlot.pgdSlot))) {
1900        current_syscall_error.type = seL4_DeleteFirst;
1901        return EXCEPTION_SYSCALL_ERROR;
1902    }
1903
1904    pgde = pgde_new(
1905               pptr_to_paddr(PUDE_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(cap))),
1906               0b11  /* reserved */
1907           );
1908
1909    cap_page_upper_directory_cap_ptr_set_capPUDIsMapped(&cap, 1);
1910    cap_page_upper_directory_cap_ptr_set_capPUDMappedASID(&cap, asid);
1911    cap_page_upper_directory_cap_ptr_set_capPUDMappedAddress(&cap, vaddr);
1912
1913    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1914    return performUpperPageDirectoryInvocationMap(cap, cte, pgde, pgdSlot.pgdSlot);
1915}
1916
1917static exception_t
1918decodeARMPageDirectoryInvocation(word_t invLabel, unsigned int length,
1919                                 cte_t *cte, cap_t cap, extra_caps_t extraCaps,
1920                                 word_t *buffer)
1921{
1922    cap_t pgdCap;
1923    pgde_t *pgd;
1924    pude_t pude;
1925    asid_t asid;
1926    vptr_t vaddr;
1927    lookupPUDSlot_ret_t pudSlot;
1928    findVSpaceForASID_ret_t find_ret;
1929
1930    if (invLabel == ARMPageDirectoryUnmap) {
1931        if (unlikely(!isFinalCapability(cte))) {
1932            current_syscall_error.type = seL4_RevokeFirst;
1933            return EXCEPTION_SYSCALL_ERROR;
1934        }
1935
1936        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1937        return performPageDirectoryInvocationUnmap(cap, cte);
1938    }
1939
1940    if (unlikely(invLabel != ARMPageDirectoryMap)) {
1941        current_syscall_error.type = seL4_IllegalOperation;
1942        return EXCEPTION_SYSCALL_ERROR;
1943    }
1944
1945    if (unlikely(length < 2 || extraCaps.excaprefs[0] == NULL)) {
1946        current_syscall_error.type = seL4_TruncatedMessage;
1947        return EXCEPTION_SYSCALL_ERROR;
1948    }
1949
1950    if (unlikely(cap_page_directory_cap_get_capPDIsMapped(cap))) {
1951        current_syscall_error.type = seL4_InvalidCapability;
1952        current_syscall_error.invalidCapNumber = 0;
1953        return EXCEPTION_SYSCALL_ERROR;
1954    }
1955
1956    vaddr = getSyscallArg(0, buffer) & (~MASK(PUD_INDEX_OFFSET));
1957    pgdCap = extraCaps.excaprefs[0]->cap;
1958
1959    if (unlikely(!isValidNativeRoot(pgdCap))) {
1960        current_syscall_error.type = seL4_InvalidCapability;
1961        current_syscall_error.invalidCapNumber = 1;
1962        return EXCEPTION_SYSCALL_ERROR;
1963    }
1964
1965    pgd = PGDE_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(pgdCap));
1966    asid = cap_page_global_directory_cap_get_capPGDMappedASID(pgdCap);
1967
1968    if (unlikely(vaddr > USER_TOP)) {
1969        current_syscall_error.type = seL4_InvalidArgument;
1970        current_syscall_error.invalidArgumentNumber = 0;
1971        return EXCEPTION_SYSCALL_ERROR;
1972    }
1973
1974    find_ret = findVSpaceForASID(asid);
1975    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1976        current_syscall_error.type = seL4_FailedLookup;
1977        current_syscall_error.failedLookupWasSource = false;
1978        return EXCEPTION_SYSCALL_ERROR;
1979    }
1980
1981    if (unlikely(find_ret.vspace_root != pgd)) {
1982        current_syscall_error.type = seL4_InvalidCapability;
1983        current_syscall_error.invalidCapNumber = 1;
1984        return EXCEPTION_SYSCALL_ERROR;
1985    }
1986
1987    pudSlot = lookupPUDSlot(pgd, vaddr);
1988
1989    if (pudSlot.status != EXCEPTION_NONE) {
1990        current_syscall_error.type = seL4_FailedLookup;
1991        current_syscall_error.failedLookupWasSource = false;
1992        return EXCEPTION_SYSCALL_ERROR;
1993    }
1994
1995    if (unlikely(pude_pude_pd_ptr_get_present(pudSlot.pudSlot) ||
1996                 pude_pude_1g_ptr_get_present(pudSlot.pudSlot))) {
1997        current_syscall_error.type = seL4_DeleteFirst;
1998        return EXCEPTION_SYSCALL_ERROR;
1999    }
2000
2001    pude = pude_pude_pd_new(pptr_to_paddr(PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(cap))));
2002
2003    cap_page_directory_cap_ptr_set_capPDIsMapped(&cap, 1);
2004    cap_page_directory_cap_ptr_set_capPDMappedASID(&cap, asid);
2005    cap_page_directory_cap_ptr_set_capPDMappedAddress(&cap, vaddr);
2006
2007    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2008    return performPageDirectoryInvocationMap(cap, cte, pude, pudSlot.pudSlot);
2009}
2010
2011static exception_t
2012decodeARMPageTableInvocation(word_t invLabel, unsigned int length,
2013                             cte_t *cte, cap_t cap, extra_caps_t extraCaps,
2014                             word_t *buffer)
2015{
2016    cap_t pgdCap;
2017    pgde_t *pgd;
2018    pde_t pde;
2019    asid_t asid;
2020    vptr_t vaddr;
2021    lookupPDSlot_ret_t pdSlot;
2022    findVSpaceForASID_ret_t find_ret;
2023
2024    if (invLabel == ARMPageTableUnmap) {
2025        if (unlikely(!isFinalCapability(cte))) {
2026            current_syscall_error.type = seL4_RevokeFirst;
2027            return EXCEPTION_SYSCALL_ERROR;
2028        }
2029
2030        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2031        return performPageTableInvocationUnmap(cap, cte);
2032    }
2033
2034    if (unlikely(invLabel != ARMPageTableMap)) {
2035        current_syscall_error.type = seL4_IllegalOperation;
2036        return EXCEPTION_SYSCALL_ERROR;
2037    }
2038
2039    if (unlikely(length < 2 || extraCaps.excaprefs[0] == NULL)) {
2040        current_syscall_error.type = seL4_TruncatedMessage;
2041        return EXCEPTION_SYSCALL_ERROR;
2042    }
2043
2044    if (unlikely(cap_page_table_cap_get_capPTIsMapped(cap))) {
2045        current_syscall_error.type = seL4_InvalidCapability;
2046        current_syscall_error.invalidCapNumber = 0;
2047        return EXCEPTION_SYSCALL_ERROR;
2048    }
2049
2050    vaddr = getSyscallArg(0, buffer) & (~MASK(PD_INDEX_OFFSET));
2051    pgdCap = extraCaps.excaprefs[0]->cap;
2052
2053    if (unlikely(!isValidNativeRoot(pgdCap))) {
2054        current_syscall_error.type = seL4_InvalidCapability;
2055        current_syscall_error.invalidCapNumber = 1;
2056        return EXCEPTION_SYSCALL_ERROR;
2057    }
2058
2059    pgd = PGDE_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(pgdCap));
2060    asid = cap_page_global_directory_cap_get_capPGDMappedASID(pgdCap);
2061
2062    if (unlikely(vaddr > USER_TOP)) {
2063        current_syscall_error.type = seL4_InvalidArgument;
2064        current_syscall_error.invalidArgumentNumber = 0;
2065        return EXCEPTION_SYSCALL_ERROR;
2066    }
2067
2068    find_ret = findVSpaceForASID(asid);
2069    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2070        current_syscall_error.type = seL4_FailedLookup;
2071        current_syscall_error.failedLookupWasSource = false;
2072        return EXCEPTION_SYSCALL_ERROR;
2073    }
2074
2075    if (unlikely(find_ret.vspace_root != pgd)) {
2076        current_syscall_error.type = seL4_InvalidCapability;
2077        current_syscall_error.invalidCapNumber = 1;
2078        return EXCEPTION_SYSCALL_ERROR;
2079    }
2080
2081    pdSlot = lookupPDSlot(pgd, vaddr);
2082
2083    if (pdSlot.status != EXCEPTION_NONE) {
2084        current_syscall_error.type = seL4_FailedLookup;
2085        current_syscall_error.failedLookupWasSource = false;
2086        return EXCEPTION_SYSCALL_ERROR;
2087    }
2088
2089    if (unlikely(pde_pde_small_ptr_get_present(pdSlot.pdSlot) ||
2090                 pde_pde_large_ptr_get_present(pdSlot.pdSlot))) {
2091        current_syscall_error.type = seL4_DeleteFirst;
2092        return EXCEPTION_SYSCALL_ERROR;
2093    }
2094
2095    pde = pde_pde_small_new(pptr_to_paddr(PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap))));
2096
2097    cap_page_table_cap_ptr_set_capPTIsMapped(&cap, 1);
2098    cap_page_table_cap_ptr_set_capPTMappedASID(&cap, asid);
2099    cap_page_table_cap_ptr_set_capPTMappedAddress(&cap, vaddr);
2100
2101    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2102    return performPageTableInvocationMap(cap, cte, pde, pdSlot.pdSlot);
2103}
2104
2105static exception_t
2106decodeARMFrameInvocation(word_t invLabel, unsigned int length,
2107                         cte_t *cte, cap_t cap, extra_caps_t extraCaps,
2108                         word_t *buffer)
2109{
2110    switch (invLabel) {
2111    case ARMPageMap: {
2112        vptr_t vaddr;
2113        paddr_t base;
2114        cap_t pgdCap;
2115        pgde_t *pgd;
2116        asid_t asid;
2117        vm_rights_t vmRights;
2118        vm_page_size_t frameSize;
2119        vm_attributes_t attributes;
2120        findVSpaceForASID_ret_t find_ret;
2121
2122        if (unlikely(length < 3 || extraCaps.excaprefs[0] == NULL)) {
2123            current_syscall_error.type = seL4_TruncatedMessage;
2124            return EXCEPTION_SYSCALL_ERROR;
2125        }
2126
2127        if (unlikely(cap_frame_cap_get_capFMappedASID(cap) != 0)) {
2128            current_syscall_error.type = seL4_InvalidCapability;
2129            current_syscall_error.invalidCapNumber = 0;
2130            return EXCEPTION_SYSCALL_ERROR;
2131        }
2132
2133        vaddr = getSyscallArg(0, buffer);
2134        attributes = vmAttributesFromWord(getSyscallArg(2, buffer));
2135        pgdCap = extraCaps.excaprefs[0]->cap;
2136
2137        frameSize = cap_frame_cap_get_capFSize(cap);
2138        vmRights = maskVMRights(cap_frame_cap_get_capFVMRights(cap),
2139                                rightsFromWord(getSyscallArg(1, buffer)));
2140
2141        if (unlikely(!isValidNativeRoot(pgdCap))) {
2142            current_syscall_error.type = seL4_InvalidCapability;
2143            current_syscall_error.invalidCapNumber = 1;
2144            return EXCEPTION_SYSCALL_ERROR;
2145        }
2146
2147        pgd = PGDE_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(pgdCap));
2148        asid = cap_page_global_directory_cap_get_capPGDMappedASID(pgdCap);
2149
2150        find_ret = findVSpaceForASID(asid);
2151        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2152            current_syscall_error.type = seL4_FailedLookup;
2153            current_syscall_error.failedLookupWasSource = false;
2154            return EXCEPTION_SYSCALL_ERROR;
2155        }
2156
2157        if (unlikely(find_ret.vspace_root != pgd)) {
2158            current_syscall_error.type = seL4_InvalidCapability;
2159            current_syscall_error.invalidCapNumber = 1;
2160            return EXCEPTION_SYSCALL_ERROR;
2161        }
2162
2163        if (unlikely(!IS_PAGE_ALIGNED(vaddr, frameSize))) {
2164            current_syscall_error.type = seL4_AlignmentError;
2165            return EXCEPTION_SYSCALL_ERROR;
2166        }
2167
2168        if (unlikely(vaddr + BIT(pageBitsForSize(frameSize)) - 1 > USER_TOP)) {
2169            current_syscall_error.type = seL4_InvalidArgument;
2170            current_syscall_error.invalidArgumentNumber = 0;
2171            return EXCEPTION_SYSCALL_ERROR;
2172        }
2173
2174        cap = cap_frame_cap_set_capFMappedASID(cap, asid);
2175        cap = cap_frame_cap_set_capFMappedAddress(cap, vaddr);
2176
2177        base = pptr_to_paddr((void *)cap_frame_cap_get_capFBasePtr(cap));
2178
2179        if (frameSize == ARMSmallPage) {
2180            lookupPTSlot_ret_t lu_ret = lookupPTSlot(pgd, vaddr);
2181
2182            if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2183                current_syscall_error.type = seL4_FailedLookup;
2184                current_syscall_error.failedLookupWasSource = false;
2185                return EXCEPTION_SYSCALL_ERROR;
2186            }
2187
2188            if (pte_ptr_get_present(lu_ret.ptSlot)) {
2189                current_syscall_error.type = seL4_DeleteFirst;
2190                return EXCEPTION_SYSCALL_ERROR;
2191            }
2192
2193            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2194            return performSmallPageInvocationMap(asid, cap, cte,
2195                                                 makeUser3rdLevel(base, vmRights, attributes), lu_ret.ptSlot);
2196
2197        } else if (frameSize == ARMLargePage) {
2198            lookupPDSlot_ret_t lu_ret = lookupPDSlot(pgd, vaddr);
2199
2200            if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2201                current_syscall_error.type = seL4_FailedLookup;
2202                current_syscall_error.failedLookupWasSource = false;
2203                return EXCEPTION_SYSCALL_ERROR;
2204            }
2205
2206            if (pde_pde_small_ptr_get_present(lu_ret.pdSlot) ||
2207                    pde_pde_large_ptr_get_present(lu_ret.pdSlot)) {
2208                current_syscall_error.type = seL4_DeleteFirst;
2209                return EXCEPTION_SYSCALL_ERROR;
2210            }
2211
2212            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2213            return performLargePageInvocationMap(asid, cap, cte,
2214                                                 makeUser2ndLevel(base, vmRights, attributes), lu_ret.pdSlot);
2215
2216        } else {
2217            lookupPUDSlot_ret_t lu_ret = lookupPUDSlot(pgd, vaddr);
2218
2219            if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2220                current_syscall_error.type = seL4_FailedLookup;
2221                current_syscall_error.failedLookupWasSource = false;
2222                return EXCEPTION_SYSCALL_ERROR;
2223            }
2224
2225            if (pude_pude_pd_ptr_get_present(lu_ret.pudSlot) ||
2226                    pude_pude_1g_ptr_get_present(lu_ret.pudSlot)) {
2227                current_syscall_error.type = seL4_DeleteFirst;
2228                return EXCEPTION_SYSCALL_ERROR;
2229            }
2230
2231            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2232            return performHugePageInvocationMap(asid, cap, cte,
2233                                                makeUser1stLevel(base, vmRights, attributes), lu_ret.pudSlot);
2234        }
2235    }
2236
2237    case ARMPageRemap: {
2238        vptr_t vaddr;
2239        paddr_t base;
2240        cap_t pgdCap;
2241        pgde_t *pgd;
2242        asid_t asid;
2243        vm_rights_t vmRights;
2244        vm_page_size_t frameSize;
2245        vm_attributes_t attributes;
2246        findVSpaceForASID_ret_t find_ret;
2247
2248        if (unlikely(length < 2 || extraCaps.excaprefs[0] == NULL)) {
2249            current_syscall_error.type = seL4_TruncatedMessage;
2250            return EXCEPTION_SYSCALL_ERROR;
2251        }
2252
2253        attributes = vmAttributesFromWord(getSyscallArg(1, buffer));
2254        pgdCap = extraCaps.excaprefs[0]->cap;
2255
2256        frameSize = cap_frame_cap_get_capFSize(cap);
2257        vmRights = maskVMRights(cap_frame_cap_get_capFVMRights(cap),
2258                                rightsFromWord(getSyscallArg(0, buffer)));
2259
2260        if (unlikely(!isValidNativeRoot(pgdCap))) {
2261            current_syscall_error.type = seL4_InvalidCapability;
2262            current_syscall_error.invalidCapNumber = 1;
2263            return EXCEPTION_SYSCALL_ERROR;
2264        }
2265
2266        pgd = PGDE_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(pgdCap));
2267        asid = cap_page_global_directory_cap_get_capPGDMappedASID(pgdCap);
2268
2269        find_ret = findVSpaceForASID(asid);
2270        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2271            current_syscall_error.type = seL4_FailedLookup;
2272            current_syscall_error.failedLookupWasSource = false;
2273            return EXCEPTION_SYSCALL_ERROR;
2274        }
2275
2276        if (unlikely(find_ret.vspace_root != pgd)) {
2277            current_syscall_error.type = seL4_InvalidCapability;
2278            current_syscall_error.invalidCapNumber = 1;
2279            return EXCEPTION_SYSCALL_ERROR;
2280        }
2281
2282        if (unlikely(cap_frame_cap_get_capFMappedASID(cap) != asid)) {
2283            current_syscall_error.type = seL4_InvalidCapability;
2284            current_syscall_error.invalidCapNumber = 0;
2285            return EXCEPTION_SYSCALL_ERROR;
2286        }
2287
2288        base = pptr_to_paddr((void *)cap_frame_cap_get_capFBasePtr(cap));
2289        vaddr = cap_frame_cap_get_capFMappedAddress(cap);
2290
2291        if (frameSize == ARMSmallPage) {
2292            lookupPTSlot_ret_t lu_ret = lookupPTSlot(pgd, vaddr);
2293
2294            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2295            return performSmallPageInvocationMap(asid, cap, cte,
2296                                                 makeUser3rdLevel(base, vmRights, attributes), lu_ret.ptSlot);
2297
2298        } else if (frameSize == ARMLargePage) {
2299            lookupPDSlot_ret_t lu_ret = lookupPDSlot(pgd, vaddr);
2300
2301            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2302            return performLargePageInvocationMap(asid, cap, cte,
2303                                                 makeUser2ndLevel(base, vmRights, attributes), lu_ret.pdSlot);
2304
2305        } else {
2306            lookupPUDSlot_ret_t lu_ret = lookupPUDSlot(pgd, vaddr);
2307
2308            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2309            return performHugePageInvocationMap(asid, cap, cte,
2310                                                makeUser1stLevel(base, vmRights, attributes), lu_ret.pudSlot);
2311        }
2312    }
2313
2314    case ARMPageUnmap:
2315        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2316        return performPageInvocationUnmap(cap, cte);
2317
2318    case ARMPageClean_Data:
2319    case ARMPageInvalidate_Data:
2320    case ARMPageCleanInvalidate_Data:
2321    case ARMPageUnify_Instruction: {
2322        vptr_t start, end;
2323        vptr_t vaddr;
2324        asid_t asid;
2325        word_t page_size;
2326        findVSpaceForASID_ret_t find_ret;
2327
2328        if (length < 2) {
2329            userError("Page Flush: Truncated message.");
2330            current_syscall_error.type = seL4_TruncatedMessage;
2331            return EXCEPTION_SYSCALL_ERROR;
2332        }
2333
2334        if (unlikely(cap_frame_cap_get_capFMappedASID(cap) == 0)) {
2335            userError("Page Flush: Frame is not mapped.");
2336            current_syscall_error.type = seL4_IllegalOperation;
2337            return EXCEPTION_SYSCALL_ERROR;
2338        }
2339
2340        vaddr = cap_frame_cap_get_capFMappedAddress(cap);
2341        asid = cap_frame_cap_get_capFMappedASID(cap);
2342
2343        find_ret = findVSpaceForASID(asid);
2344        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2345            userError("Page Flush: No PGD for ASID");
2346            current_syscall_error.type = seL4_FailedLookup;
2347            current_syscall_error.failedLookupWasSource = false;
2348            return EXCEPTION_SYSCALL_ERROR;
2349        }
2350
2351        start = getSyscallArg(0, buffer);
2352        end =   getSyscallArg(1, buffer);
2353
2354        /* check that the range is sane */
2355        if (end <= start) {
2356            userError("PageFlush: Invalid range");
2357            current_syscall_error.type = seL4_InvalidArgument;
2358            current_syscall_error.invalidArgumentNumber = 1;
2359            return EXCEPTION_SYSCALL_ERROR;
2360        }
2361
2362        /* start and end are currently relative inside this page */
2363        page_size = BIT(pageBitsForSize(cap_frame_cap_get_capFSize(cap)));
2364        if (start >= page_size || end > page_size) {
2365            userError("Page Flush: Requested range not inside page");
2366            current_syscall_error.type = seL4_InvalidArgument;
2367            current_syscall_error.invalidArgumentNumber = 0;
2368            return EXCEPTION_SYSCALL_ERROR;
2369        }
2370
2371        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2372        return performPageFlush(invLabel, find_ret.vspace_root, asid, vaddr + start, vaddr + end - 1,
2373                                pptr_to_paddr((void*)cap_frame_cap_get_capFBasePtr(cap)) + start);
2374    }
2375
2376    case ARMPageGetAddress:
2377        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2378        return performPageGetAddress(cap_frame_cap_get_capFBasePtr(cap));
2379
2380    default:
2381        current_syscall_error.type = seL4_IllegalOperation;
2382        return EXCEPTION_SYSCALL_ERROR;
2383    }
2384}
2385
2386exception_t
2387decodeARMMMUInvocation(word_t invLabel, word_t length, cptr_t cptr,
2388                       cte_t *cte, cap_t cap, extra_caps_t extraCaps,
2389                       word_t *buffer)
2390{
2391    switch (cap_get_capType(cap)) {
2392    case cap_page_global_directory_cap:
2393        return decodeARMPageGlobalDirectoryInvocation(invLabel, length, cte,
2394                                                      cap, extraCaps, buffer);
2395
2396    case cap_page_upper_directory_cap:
2397        return decodeARMPageUpperDirectoryInvocation(invLabel, length, cte,
2398                                                     cap, extraCaps, buffer);
2399
2400    case cap_page_directory_cap:
2401        return decodeARMPageDirectoryInvocation(invLabel, length, cte,
2402                                                cap, extraCaps, buffer);
2403
2404    case cap_page_table_cap:
2405        return decodeARMPageTableInvocation (invLabel, length, cte,
2406                                             cap, extraCaps, buffer);
2407
2408    case cap_frame_cap:
2409        return decodeARMFrameInvocation (invLabel, length, cte,
2410                                         cap, extraCaps, buffer);
2411
2412    case cap_asid_control_cap: {
2413        unsigned int i;
2414        asid_t asid_base;
2415        word_t index, depth;
2416        cap_t untyped, root;
2417        cte_t *parentSlot, *destSlot;
2418        lookupSlot_ret_t lu_ret;
2419        void *frame;
2420        exception_t status;
2421
2422        if (unlikely(invLabel != ARMASIDControlMakePool)) {
2423            current_syscall_error.type = seL4_IllegalOperation;
2424
2425            return EXCEPTION_SYSCALL_ERROR;
2426        }
2427
2428        if (unlikely(length < 2 ||
2429                     extraCaps.excaprefs[0] == NULL ||
2430                     extraCaps.excaprefs[1] == NULL)) {
2431            current_syscall_error.type = seL4_TruncatedMessage;
2432
2433            return EXCEPTION_SYSCALL_ERROR;
2434        }
2435
2436        index = getSyscallArg(0, buffer);
2437        depth = getSyscallArg(1, buffer);
2438        parentSlot = extraCaps.excaprefs[0];
2439        untyped = parentSlot->cap;
2440        root = extraCaps.excaprefs[1]->cap;
2441
2442        /* Find first free pool */
2443        for (i = 0; i < nASIDPools && armKSASIDTable[i]; i++);
2444
2445        if (unlikely(i == nASIDPools)) { /* If no unallocated pool is found */
2446            current_syscall_error.type = seL4_DeleteFirst;
2447
2448            return EXCEPTION_SYSCALL_ERROR;
2449        }
2450
2451        asid_base = i << asidLowBits;
2452
2453        if (unlikely(cap_get_capType(untyped) != cap_untyped_cap ||
2454                     cap_untyped_cap_get_capBlockSize(untyped) != seL4_ASIDPoolBits) ||
2455                cap_untyped_cap_get_capIsDevice(untyped)) {
2456            current_syscall_error.type = seL4_InvalidCapability;
2457            current_syscall_error.invalidCapNumber = 1;
2458
2459            return EXCEPTION_SYSCALL_ERROR;
2460        }
2461
2462        status = ensureNoChildren(parentSlot);
2463        if (unlikely(status != EXCEPTION_NONE)) {
2464            return status;
2465        }
2466
2467        frame = WORD_PTR(cap_untyped_cap_get_capPtr(untyped));
2468
2469        lu_ret = lookupTargetSlot(root, index, depth);
2470        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2471            return lu_ret.status;
2472        }
2473        destSlot = lu_ret.slot;
2474
2475        status = ensureEmptySlot(destSlot);
2476        if (unlikely(status != EXCEPTION_NONE)) {
2477            return status;
2478        }
2479
2480        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2481        return performASIDControlInvocation(frame, destSlot, parentSlot, asid_base);
2482    }
2483
2484    case cap_asid_pool_cap: {
2485        cap_t vspaceCap;
2486        cte_t *vspaceCapSlot;
2487        asid_pool_t *pool;
2488        unsigned int i;
2489        asid_t asid;
2490
2491        if (unlikely(invLabel != ARMASIDPoolAssign)) {
2492            current_syscall_error.type = seL4_IllegalOperation;
2493
2494            return EXCEPTION_SYSCALL_ERROR;
2495        }
2496
2497        if (unlikely(extraCaps.excaprefs[0] == NULL)) {
2498            current_syscall_error.type = seL4_TruncatedMessage;
2499
2500            return EXCEPTION_SYSCALL_ERROR;
2501        }
2502
2503        vspaceCapSlot = extraCaps.excaprefs[0];
2504        vspaceCap = vspaceCapSlot->cap;
2505
2506        if (unlikely(!isVTableRoot(vspaceCap) ||
2507                     cap_page_global_directory_cap_get_capPGDIsMapped(vspaceCap))) {
2508            current_syscall_error.type = seL4_InvalidCapability;
2509            current_syscall_error.invalidCapNumber = 1;
2510
2511            return EXCEPTION_SYSCALL_ERROR;
2512        }
2513
2514        pool = armKSASIDTable[cap_asid_pool_cap_get_capASIDBase(cap) >> asidLowBits];
2515
2516        if (unlikely(!pool)) {
2517            current_syscall_error.type = seL4_FailedLookup;
2518            current_syscall_error.failedLookupWasSource = false;
2519            current_lookup_fault = lookup_fault_invalid_root_new();
2520
2521            return EXCEPTION_SYSCALL_ERROR;
2522        }
2523
2524        if (unlikely(pool != ASID_POOL_PTR(cap_asid_pool_cap_get_capASIDPool(cap)))) {
2525            current_syscall_error.type = seL4_InvalidCapability;
2526            current_syscall_error.invalidCapNumber = 0;
2527
2528            return EXCEPTION_SYSCALL_ERROR;
2529        }
2530
2531        /* Find first free ASID */
2532        asid = cap_asid_pool_cap_get_capASIDBase(cap);
2533        for (i = 0; i < (1 << asidLowBits) && (asid + i == 0 || pool->array[i]); i++);
2534
2535        if (unlikely(i == 1 << asidLowBits)) {
2536            current_syscall_error.type = seL4_DeleteFirst;
2537
2538            return EXCEPTION_SYSCALL_ERROR;
2539        }
2540
2541        asid += i;
2542
2543        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2544        return performASIDPoolInvocation(asid, pool, vspaceCapSlot);
2545    }
2546
2547    default:
2548        fail("Invalid ARM arch cap type");
2549    }
2550}
2551
2552#ifdef CONFIG_DEBUG_BUILD
2553void kernelPrefetchAbort(word_t pc) VISIBLE;
2554void kernelDataAbort(word_t pc) VISIBLE;
2555
2556void
2557kernelPrefetchAbort(word_t pc)
2558{
2559    word_t ifsr = getIFSR();
2560
2561    printf("\n\nKERNEL PREFETCH ABORT!\n");
2562    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
2563    printf("ESR (IFSR): 0x%x\n", (unsigned int)ifsr);
2564
2565    halt();
2566}
2567
2568void
2569kernelDataAbort(word_t pc)
2570{
2571    word_t dfsr = getDFSR();
2572    word_t far = getFAR();
2573
2574    printf("\n\nKERNEL DATA ABORT!\n");
2575    printf("Faulting instruction: 0x%lx\n", (unsigned long)pc);
2576    printf("FAR: 0x%lx ESR (DFSR): 0x%x\n", (unsigned long)far, (unsigned int)dfsr);
2577
2578    halt();
2579}
2580#endif
2581
2582#ifdef CONFIG_PRINTING
2583typedef struct readWordFromVSpace_ret {
2584    exception_t status;
2585    word_t value;
2586} readWordFromVSpace_ret_t;
2587
2588static readWordFromVSpace_ret_t
2589readWordFromVSpace(vspace_root_t *pd, word_t vaddr)
2590{
2591    lookupFrame_ret_t lookup_frame_ret;
2592    readWordFromVSpace_ret_t ret;
2593    word_t offset;
2594    pptr_t kernel_vaddr;
2595    word_t *value;
2596
2597    lookup_frame_ret = lookupFrame(pd, vaddr);
2598
2599    if (!lookup_frame_ret.valid) {
2600        ret.status = EXCEPTION_LOOKUP_FAULT;
2601        return ret;
2602    }
2603
2604    offset = vaddr & MASK(pageBitsForSize(lookup_frame_ret.frameSize));
2605    kernel_vaddr = (word_t)paddr_to_pptr(lookup_frame_ret.frameBase);
2606    value = (word_t*)(kernel_vaddr + offset);
2607
2608    ret.status = EXCEPTION_NONE;
2609    ret.value = *value;
2610    return ret;
2611}
2612
2613void Arch_userStackTrace(tcb_t *tptr)
2614{
2615    cap_t threadRoot;
2616    pgde_t *pgd;
2617    word_t sp;
2618    int i;
2619
2620    threadRoot = TCB_PTR_CTE_PTR(tptr, tcbVTable)->cap;
2621
2622    /* lookup the PGD */
2623    if (cap_get_capType(threadRoot) != cap_page_global_directory_cap) {
2624        printf("Invalid vspace\n");
2625        return;
2626    }
2627
2628    pgd = PGDE_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(threadRoot));
2629    sp = getRegister(tptr, SP_EL0);
2630
2631    /* check for alignment so we don't have to worry about accessing
2632     * words that might be on two different pages */
2633    if (!IS_ALIGNED(sp, seL4_WordSizeBits)) {
2634        printf("SP not aligned\n");
2635        return;
2636    }
2637
2638    for (i = 0; i < CONFIG_USER_STACK_TRACE_LENGTH; i++) {
2639        word_t address = sp + (i * sizeof(word_t));
2640        readWordFromVSpace_ret_t result;
2641        result = readWordFromVSpace(pgd, address);
2642        if (result.status == EXCEPTION_NONE) {
2643            printf("0x%lx: 0x%lx\n", (unsigned long)address, (unsigned long)result.value);
2644        } else {
2645            printf("0x%lx: INVALID\n", (unsigned long)address);
2646        }
2647    }
2648}
2649#endif
2650
2651
2652
2653
2654
2655
2656