1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <config.h>
8#include <types.h>
9#include <benchmark/benchmark.h>
10#include <api/failures.h>
11#include <api/syscall.h>
12#include <kernel/boot.h>
13#include <kernel/cspace.h>
14#include <kernel/thread.h>
15#include <kernel/stack.h>
16#include <machine/io.h>
17#include <machine/debug.h>
18#include <model/statedata.h>
19#include <object/cnode.h>
20#include <object/untyped.h>
21#include <arch/api/invocation.h>
22#include <arch/kernel/vspace.h>
23#include <linker.h>
24#include <plat/machine/hardware.h>
25#include <armv/context_switch.h>
26#include <arch/object/iospace.h>
27#include <arch/object/vcpu.h>
28#include <arch/machine/tlb.h>
29#define RESERVED 3
30
31/*
32 * Memory types are defined in Memory Attribute Indirection Register.
33 *  - nGnRnE Device non-Gathering, non-Reordering, No Early write acknowledgement
34 *  - nGnRE Unused Device non-Gathering, non-Reordering, Early write acknowledgement
35 *  - GRE Unused Device Gathering, Reordering, Early write acknowledgement
36 *  - NORMAL_NC Normal Memory, Inner/Outer non-cacheable
37 *  - NORMAL Normal Memory, Inner/Outer Write-back non-transient, Write-allocate, Read-allocate
38 * Note: These should match with contents of MAIR_EL1 register!
39 */
40enum mair_types {
41    DEVICE_nGnRnE = 0,
42    DEVICE_nGnRE = 1,
43    DEVICE_GRE = 2,
44    NORMAL_NC = 3,
45    NORMAL = 4
46};
47
48/* Stage-2 translation memory attributes */
49enum mair_s2_types {
50    S2_DEVICE_nGnRnE = 0,
51    S2_DEVICE_nGnRE = 1,
52    S2_DEVICE_nGRE  = 2,
53    S2_DEVICE_GRE = 3,
54
55    S2_NORMAL_INNER_NC_OUTER_NC = 5,
56    S2_NORMAL_INNER_WTC_OUTER_NC = 6,
57    S2_NORMAL_INNER_WBC_OUTER_NC = 7,
58
59    S2_NORMAL_INNER_NC_OUTER_WTC = 9,
60    S2_NORMAL_INNER_WTC_OUTER_WTC = 10,
61    S2_NORMAL_INNER_WBC_OUTER_WTC = 11,
62
63    S2_NORMAL_INNER_NC_OUTER_WBC = 13,
64    S2_NORMAL_INNER_WTC_OUTER_WBC = 14,
65    S2_NORMAL_INNER_WBC_OUTER_WBC = 15,
66
67    S2_NORMAL = S2_NORMAL_INNER_WBC_OUTER_WBC
68};
69
70/* Leif from Linaro said the big.LITTLE clusters should be treated as
71 * inner shareable, and we believe so, although the Example B2-1 given in
72 * ARM ARM DDI 0487B.b (ID092517) says otherwise.
73 */
74
75#define SMP_SHARE   3
76
77struct lookupPGDSlot_ret {
78    exception_t status;
79    pgde_t *pgdSlot;
80};
81typedef struct lookupPGDSlot_ret lookupPGDSlot_ret_t;
82
83struct lookupPUDSlot_ret {
84    exception_t status;
85    pude_t *pudSlot;
86};
87typedef struct lookupPUDSlot_ret lookupPUDSlot_ret_t;
88
89struct lookupPDSlot_ret {
90    exception_t status;
91    pde_t *pdSlot;
92};
93typedef struct lookupPDSlot_ret lookupPDSlot_ret_t;
94
95struct lookupPTSlot_ret {
96    exception_t status;
97    pte_t *ptSlot;
98};
99typedef struct lookupPTSlot_ret lookupPTSlot_ret_t;
100
101struct lookupFrame_ret {
102    paddr_t frameBase;
103    vm_page_size_t frameSize;
104    bool_t valid;
105};
106typedef struct lookupFrame_ret lookupFrame_ret_t;
107
108struct findVSpaceForASID_ret {
109    exception_t status;
110    vspace_root_t *vspace_root;
111};
112typedef struct findVSpaceForASID_ret findVSpaceForASID_ret_t;
113
114/* Stage-1 access permissions:
115 * AP[2:1]  higer EL        EL0
116 *   00       rw            None
117 *   01       rw            rw
118 *   10       r             None
119 *   11       r             r
120 *
121 * Stage-2 access permissions:
122 * S2AP    Access from Nonsecure EL1 or Non-secure EL0
123 *  00                      None
124 *  01                      r
125 *  10                      w
126 *  11                      rw
127 *
128 *  For VMs or native seL4 applications, if hypervisor support
129 *  is enabled, we use the S2AP. The kernel itself running in
130 *  EL2 still uses the Stage-1 AP format.
131 */
132
133static word_t CONST APFromVMRights(vm_rights_t vm_rights)
134{
135    switch (vm_rights) {
136    case VMKernelOnly:
137        if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
138            return 0;
139        } else {
140            return 0;
141        }
142
143    case VMReadWrite:
144        if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
145            return 3;
146        } else {
147            return 1;
148        }
149
150    case VMKernelReadOnly:
151        if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
152            /* no corresponding AP for S2AP, return None */
153            return 0;
154        } else {
155            return 2;
156        }
157
158    case VMReadOnly:
159        if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
160            return 1;
161        } else {
162            return 3;
163        }
164
165    default:
166        fail("Invalid VM rights");
167    }
168}
169
170vm_rights_t CONST maskVMRights(vm_rights_t vm_rights, seL4_CapRights_t cap_rights_mask)
171{
172    if (vm_rights == VMReadOnly &&
173        seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
174        return VMReadOnly;
175    }
176    if (vm_rights == VMReadWrite &&
177        seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
178        if (!seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
179            return VMReadOnly;
180        } else {
181            return VMReadWrite;
182        }
183    }
184    if (vm_rights == VMReadWrite &&
185        !seL4_CapRights_get_capAllowRead(cap_rights_mask) &&
186        seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
187        userError("Attempted to make unsupported write only mapping");
188    }
189    return VMKernelOnly;
190}
191
192/* ==================== BOOT CODE STARTS HERE ==================== */
193
194/* The 54th bit is defined as UXN (unprivileged execute-never) for stage 1
195 * of any tranlsation regime for which stage 1 translation can support
196 * two VA ranges. This field applies only to execution at EL0. A value
197 * of 0 indicates that this control permits execution.
198 *
199 * The 54th bit is defined as XN (execute-never) for stage 1 of any translation
200 * regime for which the stage 1 translation can support only a singe VA range or
201 * stage 2 translation when ARMVv8.2-TTS2UXN is not implemented.
202 * This field applies to execution at any exception level to which the stage of
203 * translation applies. A value of 0 indicates that this control permits execution.
204 *
205 * When the kernel is running in EL2, the stage-1 translation only supports one
206 * VA range so that the 54th bit is XN. Setting the bit to 0 allows execution.
207 *
208 */
209BOOT_CODE void map_kernel_frame(paddr_t paddr, pptr_t vaddr, vm_rights_t vm_rights, vm_attributes_t attributes)
210{
211    assert(vaddr >= PPTR_TOP);
212
213#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
214    word_t uxn = vm_attributes_get_armExecuteNever(attributes);
215#else
216    word_t uxn = 1; /* unprivileged execute never */
217#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
218    word_t attr_index;
219    word_t shareable;
220    if (vm_attributes_get_armPageCacheable(attributes)) {
221        attr_index = NORMAL;
222        shareable = SMP_TERNARY(SMP_SHARE, 0);
223    } else {
224        attr_index = DEVICE_nGnRnE;
225        shareable = 0;
226    }
227    armKSGlobalKernelPT[GET_PT_INDEX(vaddr)] = pte_new(uxn, paddr,
228                                                       0, /* global */
229                                                       1, /* access flag */
230                                                       shareable,
231                                                       APFromVMRights(vm_rights),
232                                                       attr_index,
233                                                       RESERVED);
234}
235
236BOOT_CODE void map_kernel_window(void)
237{
238
239    paddr_t paddr;
240    pptr_t vaddr;
241    word_t idx;
242
243    /* verify that the kernel window as at the last entry of the PGD */
244    assert(GET_PGD_INDEX(PPTR_BASE) == BIT(PGD_INDEX_BITS) - 1);
245    assert(IS_ALIGNED(PPTR_BASE, seL4_LargePageBits));
246    /* verify that the kernel device window is 1gb aligned and 1gb in size */
247    assert(GET_PUD_INDEX(PPTR_TOP) == BIT(PUD_INDEX_BITS) - 1);
248    assert(IS_ALIGNED(PPTR_TOP, seL4_HugePageBits));
249
250    /* place the PUD into the PGD */
251    armKSGlobalKernelPGD[GET_PGD_INDEX(PPTR_BASE)] = pgde_pgde_pud_new(
252                                                         pptr_to_paddr(armKSGlobalKernelPUD));
253
254    /* place all PDs except the last one in PUD */
255    for (idx = GET_PUD_INDEX(PPTR_BASE); idx < GET_PUD_INDEX(PPTR_TOP); idx++) {
256        armKSGlobalKernelPUD[idx] = pude_pude_pd_new(
257                                        pptr_to_paddr(&armKSGlobalKernelPDs[idx][0])
258                                    );
259    }
260
261    /* map the kernel window using large pages */
262    vaddr = PPTR_BASE;
263    for (paddr = PADDR_BASE; paddr < PADDR_TOP; paddr += BIT(seL4_LargePageBits)) {
264        armKSGlobalKernelPDs[GET_PUD_INDEX(vaddr)][GET_PD_INDEX(vaddr)] = pde_pde_large_new(
265#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
266                                                                              0, // XN
267#else
268                                                                              1, // UXN
269#endif
270                                                                              paddr,
271                                                                              0,                        /* global */
272                                                                              1,                        /* access flag */
273                                                                              SMP_TERNARY(SMP_SHARE, 0),        /* Inner-shareable if SMP enabled, otherwise unshared */
274                                                                              0,                        /* VMKernelOnly */
275                                                                              NORMAL
276                                                                          );
277        vaddr += BIT(seL4_LargePageBits);
278    }
279
280    /* put the PD into the PUD for device window */
281    armKSGlobalKernelPUD[GET_PUD_INDEX(PPTR_TOP)] = pude_pude_pd_new(
282                                                        pptr_to_paddr(&armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS) - 1][0])
283                                                    );
284
285    /* put the PT into the PD for device window */
286    armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS) - 1][BIT(PD_INDEX_BITS) - 1] = pde_pde_small_new(
287                                                                                pptr_to_paddr(armKSGlobalKernelPT)
288                                                                            );
289
290    map_kernel_devices();
291}
292
293/* When the hypervisor support is enabled, the stage-2 translation table format
294 * is used for applications.
295 * The global bit is always 0.
296 * The memory attributes use the S2 translation values.
297 */
298static BOOT_CODE void map_it_frame_cap(cap_t vspace_cap, cap_t frame_cap, bool_t executable)
299{
300    vspace_root_t *vspaceRoot = VSPACE_PTR(pptr_of_cap(vspace_cap));
301    pude_t *pud;
302    pde_t *pd;
303    pte_t *pt;
304
305    vptr_t vptr = cap_frame_cap_get_capFMappedAddress(frame_cap);
306    void *pptr = (void *)cap_frame_cap_get_capFBasePtr(frame_cap);
307
308    assert(cap_frame_cap_get_capFMappedASID(frame_cap) != 0);
309
310#ifdef AARCH64_VSPACE_S2_START_L1
311    pud = vspaceRoot;
312#else
313    vspaceRoot += GET_PGD_INDEX(vptr);
314    assert(pgde_pgde_pud_ptr_get_present(vspaceRoot));
315    pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(vspaceRoot));
316#endif
317    pud += GET_UPUD_INDEX(vptr);
318    assert(pude_pude_pd_ptr_get_present(pud));
319    pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pud));
320    pd += GET_PD_INDEX(vptr);
321    assert(pde_pde_small_ptr_get_present(pd));
322    pt = paddr_to_pptr(pde_pde_small_ptr_get_pt_base_address(pd));
323    *(pt + GET_PT_INDEX(vptr)) = pte_new(
324                                     !executable,                    /* unprivileged execute never */
325                                     pptr_to_paddr(pptr),            /* page_base_address    */
326#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
327                                     0,
328#else
329                                     1,                              /* not global */
330#endif
331                                     1,                              /* access flag */
332                                     SMP_TERNARY(SMP_SHARE, 0),              /* Inner-shareable if SMP enabled, otherwise unshared */
333                                     APFromVMRights(VMReadWrite),
334#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
335                                     S2_NORMAL,
336#else
337                                     NORMAL,
338#endif
339                                     RESERVED
340                                 );
341}
342
343static BOOT_CODE cap_t create_it_frame_cap(pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large)
344{
345    vm_page_size_t frame_size;
346    if (use_large) {
347        frame_size = ARMLargePage;
348    } else {
349        frame_size = ARMSmallPage;
350    }
351    return
352        cap_frame_cap_new(
353            asid,                          /* capFMappedASID */
354            pptr,                          /* capFBasePtr */
355            frame_size,                    /* capFSize */
356            vptr,                          /* capFMappedAddress */
357            wordFromVMRights(VMReadWrite), /* capFVMRights */
358            false                          /* capFIsDevice */
359        );
360}
361
362static BOOT_CODE void map_it_pt_cap(cap_t vspace_cap, cap_t pt_cap)
363{
364    vspace_root_t *vspaceRoot = VSPACE_PTR(pptr_of_cap(vspace_cap));
365    pude_t *pud;
366    pde_t *pd;
367    pte_t *pt = PT_PTR(cap_page_table_cap_get_capPTBasePtr(pt_cap));
368    vptr_t vptr = cap_page_table_cap_get_capPTMappedAddress(pt_cap);
369
370    assert(cap_page_table_cap_get_capPTIsMapped(pt_cap));
371
372#ifdef AARCH64_VSPACE_S2_START_L1
373    pud = vspaceRoot;
374#else
375    vspaceRoot += GET_PGD_INDEX(vptr);
376    assert(pgde_pgde_pud_ptr_get_present(vspaceRoot));
377    pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(vspaceRoot));
378#endif
379    pud += GET_UPUD_INDEX(vptr);
380    assert(pude_pude_pd_ptr_get_present(pud));
381    pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pud));
382    *(pd + GET_PD_INDEX(vptr)) = pde_pde_small_new(
383                                     pptr_to_paddr(pt)
384                                 );
385}
386
387static BOOT_CODE cap_t create_it_pt_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
388{
389    cap_t cap;
390    cap = cap_page_table_cap_new(
391              asid,                   /* capPTMappedASID */
392              pptr,                   /* capPTBasePtr */
393              1,                      /* capPTIsMapped */
394              vptr                    /* capPTMappedAddress */
395          );
396    map_it_pt_cap(vspace_cap, cap);
397    return cap;
398}
399
400static BOOT_CODE void map_it_pd_cap(cap_t vspace_cap, cap_t pd_cap)
401{
402    vspace_root_t *vspaceRoot = VSPACE_PTR(pptr_of_cap(vspace_cap));
403    pude_t *pud;
404    pde_t *pd = PD_PTR(cap_page_directory_cap_get_capPDBasePtr(pd_cap));
405    vptr_t vptr = cap_page_directory_cap_get_capPDMappedAddress(pd_cap);
406
407    assert(cap_page_directory_cap_get_capPDIsMapped(pd_cap));
408
409#ifdef AARCH64_VSPACE_S2_START_L1
410    pud = vspaceRoot;
411#else
412    vspaceRoot += GET_PGD_INDEX(vptr);
413    assert(pgde_pgde_pud_ptr_get_present(vspaceRoot));
414    pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(vspaceRoot));
415#endif
416    *(pud + GET_UPUD_INDEX(vptr)) = pude_pude_pd_new(
417                                        pptr_to_paddr(pd)
418                                    );
419}
420
421static BOOT_CODE cap_t create_it_pd_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
422{
423    cap_t cap;
424    cap = cap_page_directory_cap_new(
425              asid,                   /* capPDMappedASID */
426              pptr,                   /* capPDBasePtr */
427              1,                      /* capPDIsMapped */
428              vptr                    /* capPDMappedAddress */
429          );
430    map_it_pd_cap(vspace_cap, cap);
431    return cap;
432}
433
434#ifndef AARCH64_VSPACE_S2_START_L1
435static BOOT_CODE void map_it_pud_cap(cap_t vspace_cap, cap_t pud_cap)
436{
437    pgde_t *pgd = PGD_PTR(pptr_of_cap(vspace_cap));
438    pude_t *pud = PUD_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(pud_cap));
439    vptr_t vptr = cap_page_upper_directory_cap_get_capPUDMappedAddress(pud_cap);
440
441    assert(cap_page_upper_directory_cap_get_capPUDIsMapped(pud_cap));
442
443    *(pgd + GET_PGD_INDEX(vptr)) = pgde_pgde_pud_new(
444                                       pptr_to_paddr(pud));
445}
446
447static BOOT_CODE cap_t create_it_pud_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
448{
449    cap_t cap;
450    cap = cap_page_upper_directory_cap_new(
451              asid,               /* capPUDMappedASID */
452              pptr,               /* capPUDBasePtr */
453              1,                  /* capPUDIsMapped */
454              vptr                /* capPUDMappedAddress */
455          );
456    map_it_pud_cap(vspace_cap, cap);
457    return cap;
458}
459#endif /* AARCH64_VSPACE_S2_START_L1 */
460BOOT_CODE word_t arch_get_n_paging(v_region_t it_v_reg)
461{
462    return
463#ifndef AARCH64_VSPACE_S2_START_L1
464        get_n_paging(it_v_reg, PGD_INDEX_OFFSET) +
465#endif
466        get_n_paging(it_v_reg, PUD_INDEX_OFFSET) +
467        get_n_paging(it_v_reg, PD_INDEX_OFFSET);
468}
469
470BOOT_CODE cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
471{
472    cap_t      vspace_cap;
473    vptr_t     vptr;
474    seL4_SlotPos slot_pos_before;
475    seL4_SlotPos slot_pos_after;
476
477    /* create the PGD */
478    vspace_cap = cap_vtable_cap_new(
479                     IT_ASID,        /* capPGDMappedASID */
480                     rootserver.vspace, /* capPGDBasePtr   */
481                     1               /* capPGDIsMapped   */
482                 );
483    slot_pos_before = ndks_boot.slot_pos_cur;
484    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), vspace_cap);
485
486#ifndef AARCH64_VSPACE_S2_START_L1
487    /* Create any PUDs needed for the user land image */
488    for (vptr = ROUND_DOWN(it_v_reg.start, PGD_INDEX_OFFSET);
489         vptr < it_v_reg.end;
490         vptr += BIT(PGD_INDEX_OFFSET)) {
491        if (!provide_cap(root_cnode_cap, create_it_pud_cap(vspace_cap, it_alloc_paging(), vptr, IT_ASID))) {
492            return cap_null_cap_new();
493        }
494    }
495#endif
496    /* Create any PDs needed for the user land image */
497    for (vptr = ROUND_DOWN(it_v_reg.start, PUD_INDEX_OFFSET);
498         vptr < it_v_reg.end;
499         vptr += BIT(PUD_INDEX_OFFSET)) {
500        if (!provide_cap(root_cnode_cap, create_it_pd_cap(vspace_cap, it_alloc_paging(), vptr, IT_ASID))) {
501            return cap_null_cap_new();
502        }
503    }
504
505    /* Create any PTs needed for the user land image */
506    for (vptr = ROUND_DOWN(it_v_reg.start, PD_INDEX_OFFSET);
507         vptr < it_v_reg.end;
508         vptr += BIT(PD_INDEX_OFFSET)) {
509        if (!provide_cap(root_cnode_cap, create_it_pt_cap(vspace_cap, it_alloc_paging(), vptr, IT_ASID))) {
510            return cap_null_cap_new();
511        }
512    }
513
514    slot_pos_after = ndks_boot.slot_pos_cur;
515    ndks_boot.bi_frame->userImagePaging = (seL4_SlotRegion) {
516        slot_pos_before, slot_pos_after
517    };
518    return vspace_cap;
519}
520
521BOOT_CODE cap_t create_unmapped_it_frame_cap(pptr_t pptr, bool_t use_large)
522{
523    return create_it_frame_cap(pptr, 0, asidInvalid, use_large);
524}
525
526BOOT_CODE cap_t create_mapped_it_frame_cap(cap_t pd_cap, pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large,
527                                           bool_t executable)
528{
529    cap_t cap = create_it_frame_cap(pptr, vptr, asid, use_large);
530    map_it_frame_cap(pd_cap, cap, executable);
531    return cap;
532}
533
534BOOT_CODE void activate_kernel_vspace(void)
535{
536    cleanInvalidateL1Caches();
537    setCurrentKernelVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalKernelPGD)));
538
539    /* Prevent elf-loader address translation to fill up TLB */
540    setCurrentUserVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalUserVSpace)));
541
542    invalidateLocalTLB();
543    lockTLBEntry(KERNEL_ELF_BASE);
544}
545
546BOOT_CODE void write_it_asid_pool(cap_t it_ap_cap, cap_t it_vspace_cap)
547{
548    asid_pool_t *ap = ASID_POOL_PTR(pptr_of_cap(it_ap_cap));
549    ap->array[IT_ASID] = (void *)(pptr_of_cap(it_vspace_cap));
550    armKSASIDTable[IT_ASID >> asidLowBits] = ap;
551#ifdef CONFIG_ARM_SMMU
552    vspace_root_t *vtable = ap->array[IT_ASID];
553    vtable[VTABLE_SMMU_SLOT] = vtable_invalid_smmu_new(0);
554#endif
555}
556
557/* ==================== BOOT CODE FINISHES HERE ==================== */
558
559static findVSpaceForASID_ret_t findVSpaceForASID(asid_t asid)
560{
561    findVSpaceForASID_ret_t ret;
562    asid_pool_t *poolPtr;
563    vspace_root_t *vspace_root;
564
565    poolPtr = armKSASIDTable[asid >> asidLowBits];
566    if (!poolPtr) {
567        current_lookup_fault = lookup_fault_invalid_root_new();
568
569        ret.vspace_root = NULL;
570        ret.status = EXCEPTION_LOOKUP_FAULT;
571        return ret;
572    }
573
574    vspace_root = poolPtr->array[asid & MASK(asidLowBits)];
575    if (!vspace_root) {
576        current_lookup_fault = lookup_fault_invalid_root_new();
577
578        ret.vspace_root = NULL;
579        ret.status = EXCEPTION_LOOKUP_FAULT;
580        return ret;
581    }
582
583    ret.vspace_root = vspace_root;
584    ret.status = EXCEPTION_NONE;
585    return ret;
586}
587
588word_t *PURE lookupIPCBuffer(bool_t isReceiver, tcb_t *thread)
589{
590    word_t w_bufferPtr;
591    cap_t bufferCap;
592    vm_rights_t vm_rights;
593
594    w_bufferPtr = thread->tcbIPCBuffer;
595    bufferCap = TCB_PTR_CTE_PTR(thread, tcbBuffer)->cap;
596
597    if (unlikely(cap_get_capType(bufferCap) != cap_frame_cap)) {
598        return NULL;
599    }
600    if (unlikely(cap_frame_cap_get_capFIsDevice(bufferCap))) {
601        return NULL;
602    }
603
604    vm_rights = cap_frame_cap_get_capFVMRights(bufferCap);
605    if (likely(vm_rights == VMReadWrite ||
606               (!isReceiver && vm_rights == VMReadOnly))) {
607        word_t basePtr;
608        unsigned int pageBits;
609
610        basePtr = cap_frame_cap_get_capFBasePtr(bufferCap);
611        pageBits = pageBitsForSize(cap_frame_cap_get_capFSize(bufferCap));
612        return (word_t *)(basePtr + (w_bufferPtr & MASK(pageBits)));
613    } else {
614        return NULL;
615    }
616}
617
618exception_t checkValidIPCBuffer(vptr_t vptr, cap_t cap)
619{
620    if (cap_get_capType(cap) != cap_frame_cap) {
621        userError("IPC Buffer is an invalid cap.");
622        current_syscall_error.type = seL4_IllegalOperation;
623        return EXCEPTION_SYSCALL_ERROR;
624    }
625
626    if (unlikely(cap_frame_cap_get_capFIsDevice(cap))) {
627        userError("Specifying a device frame as an IPC buffer is not permitted.");
628        current_syscall_error.type = seL4_IllegalOperation;
629        return EXCEPTION_SYSCALL_ERROR;
630    }
631
632    if (!IS_ALIGNED(vptr, seL4_IPCBufferSizeBits)) {
633        userError("IPC Buffer vaddr 0x%x is not aligned.", (int)vptr);
634        current_syscall_error.type = seL4_AlignmentError;
635        return EXCEPTION_SYSCALL_ERROR;
636    }
637
638    return EXCEPTION_NONE;
639}
640
641static lookupPGDSlot_ret_t lookupPGDSlot(vspace_root_t *vspace, vptr_t vptr)
642{
643    lookupPGDSlot_ret_t ret;
644
645    pgde_t *pgd = PGDE_PTR(vspace);
646    word_t pgdIndex = GET_PGD_INDEX(vptr);
647    ret.status = EXCEPTION_NONE;
648    ret.pgdSlot = pgd + pgdIndex;
649    return ret;
650}
651
652static lookupPUDSlot_ret_t lookupPUDSlot(vspace_root_t *vspace, vptr_t vptr)
653{
654    lookupPUDSlot_ret_t ret;
655
656#ifdef AARCH64_VSPACE_S2_START_L1
657    pude_t *pud = PUDE_PTR(vspace);
658    word_t pudIndex = GET_UPUD_INDEX(vptr);
659    ret.status = EXCEPTION_NONE;
660    ret.pudSlot = pud + pudIndex;
661    return ret;
662#else
663    lookupPGDSlot_ret_t pgdSlot = lookupPGDSlot(vspace, vptr);
664
665    if (!pgde_pgde_pud_ptr_get_present(pgdSlot.pgdSlot)) {
666        current_lookup_fault = lookup_fault_missing_capability_new(PGD_INDEX_OFFSET);
667
668        ret.pudSlot = NULL;
669        ret.status = EXCEPTION_LOOKUP_FAULT;
670        return ret;
671    } else {
672        pude_t *pud;
673        pude_t *pudSlot;
674        word_t pudIndex = GET_UPUD_INDEX(vptr);
675        pud = paddr_to_pptr(pgde_pgde_pud_ptr_get_pud_base_address(pgdSlot.pgdSlot));
676        pudSlot = pud + pudIndex;
677
678        ret.status = EXCEPTION_NONE;
679        ret.pudSlot = pudSlot;
680        return ret;
681    }
682#endif
683}
684
685static lookupPDSlot_ret_t lookupPDSlot(vspace_root_t *vspace, vptr_t vptr)
686{
687    lookupPUDSlot_ret_t pudSlot;
688    lookupPDSlot_ret_t ret;
689
690    pudSlot = lookupPUDSlot(vspace, vptr);
691    if (pudSlot.status != EXCEPTION_NONE) {
692        ret.pdSlot = NULL;
693        ret.status = pudSlot.status;
694        return ret;
695    }
696    if (!pude_pude_pd_ptr_get_present(pudSlot.pudSlot)) {
697        current_lookup_fault = lookup_fault_missing_capability_new(PUD_INDEX_OFFSET);
698
699        ret.pdSlot = NULL;
700        ret.status = EXCEPTION_LOOKUP_FAULT;
701        return ret;
702    } else {
703        pde_t *pd;
704        pde_t *pdSlot;
705        word_t pdIndex = GET_PD_INDEX(vptr);
706        pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pudSlot.pudSlot));
707        pdSlot = pd + pdIndex;
708
709        ret.status = EXCEPTION_NONE;
710        ret.pdSlot = pdSlot;
711        return ret;
712    }
713}
714
715static lookupPTSlot_ret_t lookupPTSlot(vspace_root_t *vspace, vptr_t vptr)
716{
717    lookupPTSlot_ret_t ret;
718    lookupPDSlot_ret_t pdSlot;
719
720    pdSlot = lookupPDSlot(vspace, vptr);
721    if (pdSlot.status != EXCEPTION_NONE) {
722        ret.ptSlot = NULL;
723        ret.status = pdSlot.status;
724        return ret;
725    }
726    if (!pde_pde_small_ptr_get_present(pdSlot.pdSlot)) {
727        current_lookup_fault = lookup_fault_missing_capability_new(PD_INDEX_OFFSET);
728
729        ret.ptSlot = NULL;
730        ret.status = EXCEPTION_LOOKUP_FAULT;
731        return ret;
732    } else {
733        pte_t *pt;
734        pte_t *ptSlot;
735        word_t ptIndex = GET_PT_INDEX(vptr);
736        pt = paddr_to_pptr(pde_pde_small_ptr_get_pt_base_address(pdSlot.pdSlot));
737        ptSlot = pt + ptIndex;
738
739        ret.ptSlot = ptSlot;
740        ret.status = EXCEPTION_NONE;
741        return ret;
742    }
743}
744
745static lookupFrame_ret_t lookupFrame(vspace_root_t *vspace, vptr_t vptr)
746{
747    lookupPUDSlot_ret_t pudSlot;
748    lookupFrame_ret_t ret;
749
750    pudSlot = lookupPUDSlot(vspace, vptr);
751    if (pudSlot.status != EXCEPTION_NONE) {
752        ret.valid = false;
753        return ret;
754    }
755
756    switch (pude_ptr_get_pude_type(pudSlot.pudSlot)) {
757    case pude_pude_1g:
758        ret.frameBase = pude_pude_1g_ptr_get_page_base_address(pudSlot.pudSlot);
759        ret.frameSize = ARMHugePage;
760        ret.valid = true;
761        return ret;
762
763    case pude_pude_pd: {
764        pde_t *pd = paddr_to_pptr(pude_pude_pd_ptr_get_pd_base_address(pudSlot.pudSlot));
765        pde_t *pdSlot = pd + GET_PD_INDEX(vptr);
766
767        if (pde_ptr_get_pde_type(pdSlot) == pde_pde_large) {
768            ret.frameBase = pde_pde_large_ptr_get_page_base_address(pdSlot);
769            ret.frameSize = ARMLargePage;
770            ret.valid = true;
771            return ret;
772        }
773
774        if (pde_ptr_get_pde_type(pdSlot) == pde_pde_small) {
775            pte_t *pt = paddr_to_pptr(pde_pde_small_ptr_get_pt_base_address(pdSlot));
776            pte_t *ptSlot = pt + GET_PT_INDEX(vptr);
777
778            if (pte_ptr_get_present(ptSlot)) {
779                ret.frameBase = pte_ptr_get_page_base_address(ptSlot);
780                ret.frameSize = ARMSmallPage;
781                ret.valid = true;
782                return ret;
783            }
784        }
785    }
786    }
787
788    ret.valid = false;
789    return ret;
790}
791
792/* Note that if the hypervisor support is enabled, the user page tables use
793 * stage-2 translation format. Otherwise, they follow the stage-1 translation format.
794 */
795static pte_t makeUser3rdLevel(paddr_t paddr, vm_rights_t vm_rights, vm_attributes_t attributes)
796{
797    bool_t nonexecutable = vm_attributes_get_armExecuteNever(attributes);
798
799    if (vm_attributes_get_armPageCacheable(attributes)) {
800        return pte_new(
801                   nonexecutable,              /* unprivileged execute never */
802                   paddr,
803#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
804                   0,
805#else
806                   1,                          /* not global */
807#endif
808                   1,                          /* access flag */
809                   SMP_TERNARY(SMP_SHARE, 0),          /* Inner-shareable if SMP enabled, otherwise unshared */
810                   APFromVMRights(vm_rights),
811#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
812                   S2_NORMAL,
813#else
814                   NORMAL,
815#endif
816                   RESERVED
817               );
818    } else {
819        return pte_new(
820                   nonexecutable,              /* unprivileged execute never */
821                   paddr,
822#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
823                   0,
824#else
825                   1,                          /* not global */
826#endif
827                   1,                          /* access flag */
828                   0,                          /* Ignored - Outter shareable */
829                   APFromVMRights(vm_rights),
830#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
831                   S2_DEVICE_nGnRnE,
832#else
833                   DEVICE_nGnRnE,
834#endif
835
836                   RESERVED
837               );
838    }
839}
840
841static pde_t makeUser2ndLevel(paddr_t paddr, vm_rights_t vm_rights, vm_attributes_t attributes)
842{
843    bool_t nonexecutable = vm_attributes_get_armExecuteNever(attributes);
844
845    if (vm_attributes_get_armPageCacheable(attributes)) {
846        return pde_pde_large_new(
847                   nonexecutable,              /* unprivileged execute never */
848                   paddr,
849#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
850                   0,
851#else
852                   1,                          /* not global */
853#endif
854                   1,                          /* access flag */
855                   SMP_TERNARY(SMP_SHARE, 0),          /* Inner-shareable if SMP enabled, otherwise unshared */
856                   APFromVMRights(vm_rights),
857#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
858                   S2_NORMAL
859#else
860                   NORMAL
861#endif
862               );
863    } else {
864        return pde_pde_large_new(
865                   nonexecutable,              /* unprivileged execute never */
866                   paddr,
867#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
868                   0,
869#else
870                   1,                          /* not global */
871#endif
872                   1,                          /* access flag */
873                   0,                          /* Ignored - Outter shareable */
874                   APFromVMRights(vm_rights),
875#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
876                   S2_DEVICE_nGnRnE
877#else
878                   DEVICE_nGnRnE
879#endif
880               );
881    }
882}
883
884static pude_t makeUser1stLevel(paddr_t paddr, vm_rights_t vm_rights, vm_attributes_t attributes)
885{
886    bool_t nonexecutable = vm_attributes_get_armExecuteNever(attributes);
887
888    if (vm_attributes_get_armPageCacheable(attributes)) {
889        return pude_pude_1g_new(
890                   nonexecutable,              /* unprivileged execute never */
891                   paddr,
892#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
893                   0,
894#else
895                   1,                          /* not global */
896#endif
897                   1,                          /* access flag */
898                   SMP_TERNARY(SMP_SHARE, 0),          /* Inner-shareable if SMP enabled, otherwise unshared */
899                   APFromVMRights(vm_rights),
900#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
901                   S2_NORMAL
902#else
903                   NORMAL
904#endif
905               );
906    } else {
907        return pude_pude_1g_new(
908                   nonexecutable,              /* unprivileged execute never */
909                   paddr,
910#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
911                   0,
912#else
913                   1,                          /* not global */
914#endif
915                   1,                          /* access flag */
916                   0,                          /* Ignored - Outter shareable */
917                   APFromVMRights(vm_rights),
918#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
919                   S2_DEVICE_nGnRnE
920#else
921                   DEVICE_nGnRnE
922#endif
923               );
924    }
925}
926
927#define PAR_EL1_MASK 0x0000fffffffff000ul
928#define GET_PAR_ADDR(x) ((x) & PAR_EL1_MASK)
929exception_t handleVMFault(tcb_t *thread, vm_fault_type_t vm_faultType)
930{
931    switch (vm_faultType) {
932    case ARMDataAbort: {
933        word_t addr, fault;
934
935        addr = getFAR();
936        fault = getDFSR();
937
938#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
939        /* use the IPA */
940        if (ARCH_NODE_STATE(armHSVCPUActive)) {
941            addr = GET_PAR_ADDR(ats1e1r(addr)) | (addr & MASK(PAGE_BITS));
942        }
943#endif
944        current_fault = seL4_Fault_VMFault_new(addr, fault, false);
945        return EXCEPTION_FAULT;
946    }
947
948    case ARMPrefetchAbort: {
949        word_t pc, fault;
950
951        pc = getRestartPC(thread);
952        fault = getIFSR();
953
954#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
955        if (ARCH_NODE_STATE(armHSVCPUActive)) {
956            pc = GET_PAR_ADDR(ats1e1r(pc)) | (pc & MASK(PAGE_BITS));
957        }
958#endif
959        current_fault = seL4_Fault_VMFault_new(pc, fault, true);
960        return EXCEPTION_FAULT;
961    }
962
963    default:
964        fail("Invalid VM fault type");
965    }
966}
967
968bool_t CONST isVTableRoot(cap_t cap)
969{
970    return cap_get_capType(cap) == cap_vtable_root_cap;
971}
972
973bool_t CONST isValidNativeRoot(cap_t cap)
974{
975    return isVTableRoot(cap) &&
976           cap_vtable_root_isMapped(cap);
977}
978
979bool_t CONST isValidVTableRoot(cap_t cap)
980{
981    return isValidNativeRoot(cap);
982}
983
984void setVMRoot(tcb_t *tcb)
985{
986    cap_t threadRoot;
987    asid_t asid;
988    vspace_root_t *vspaceRoot;
989    findVSpaceForASID_ret_t find_ret;
990
991    threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap;
992
993    if (!isValidNativeRoot(threadRoot)) {
994        setCurrentUserVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalUserVSpace)));
995        return;
996    }
997
998    vspaceRoot = VSPACE_PTR(cap_vtable_root_get_basePtr(threadRoot));
999    asid = cap_vtable_root_get_mappedASID(threadRoot);
1000    find_ret = findVSpaceForASID(asid);
1001    if (unlikely(find_ret.status != EXCEPTION_NONE || find_ret.vspace_root != vspaceRoot)) {
1002        setCurrentUserVSpaceRoot(ttbr_new(0, pptr_to_paddr(armKSGlobalUserVSpace)));
1003        return;
1004    }
1005
1006    armv_contextSwitch(vspaceRoot, asid);
1007}
1008
1009static bool_t setVMRootForFlush(vspace_root_t *vspace, asid_t asid)
1010{
1011    cap_t threadRoot;
1012
1013    threadRoot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbVTable)->cap;
1014
1015    if (cap_get_capType(threadRoot) == cap_vtable_root_cap &&
1016        cap_vtable_root_isMapped(threadRoot) &&
1017        cap_vtable_root_get_basePtr(threadRoot) == vspace) {
1018        return false;
1019    }
1020
1021    armv_contextSwitch(vspace, asid);
1022    return true;
1023}
1024
1025pgde_t *pageUpperDirectoryMapped(asid_t asid, vptr_t vaddr, pude_t *pud)
1026{
1027    findVSpaceForASID_ret_t find_ret;
1028    lookupPGDSlot_ret_t lu_ret;
1029
1030    find_ret = findVSpaceForASID(asid);
1031    if (find_ret.status != EXCEPTION_NONE) {
1032        return NULL;
1033    }
1034
1035    lu_ret = lookupPGDSlot(find_ret.vspace_root, vaddr);
1036    if (pgde_pgde_pud_ptr_get_present(lu_ret.pgdSlot) &&
1037        (pgde_pgde_pud_ptr_get_pud_base_address(lu_ret.pgdSlot) == pptr_to_paddr(pud))) {
1038        return lu_ret.pgdSlot;
1039    }
1040
1041    return NULL;
1042}
1043
1044pude_t *pageDirectoryMapped(asid_t asid, vptr_t vaddr, pde_t *pd)
1045{
1046    findVSpaceForASID_ret_t find_ret;
1047    lookupPUDSlot_ret_t lu_ret;
1048
1049    find_ret = findVSpaceForASID(asid);
1050    if (find_ret.status != EXCEPTION_NONE) {
1051        return NULL;
1052    }
1053
1054    lu_ret = lookupPUDSlot(find_ret.vspace_root, vaddr);
1055    if (lu_ret.status != EXCEPTION_NONE) {
1056        return NULL;
1057    }
1058
1059    if (pude_pude_pd_ptr_get_present(lu_ret.pudSlot) &&
1060        (pude_pude_pd_ptr_get_pd_base_address(lu_ret.pudSlot) == pptr_to_paddr(pd))) {
1061        return lu_ret.pudSlot;
1062    }
1063
1064    return NULL;
1065}
1066
1067#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1068
1069static void invalidateASID(asid_t asid)
1070{
1071    asid_pool_t *asidPool;
1072
1073    asidPool = armKSASIDTable[asid >> asidLowBits];
1074    assert(asidPool);
1075
1076    vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1077    assert(vtable);
1078
1079    vtable[VTABLE_VMID_SLOT] = vtable_invalid_new(0, false);
1080}
1081
1082static vspace_root_t PURE loadHWASID(asid_t asid)
1083{
1084    asid_pool_t *asidPool;
1085
1086    asidPool = armKSASIDTable[asid >> asidLowBits];
1087    assert(asidPool);
1088
1089    vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1090    assert(vtable);
1091
1092    return vtable[VTABLE_VMID_SLOT];
1093}
1094
1095static void storeHWASID(asid_t asid, hw_asid_t hw_asid)
1096{
1097    asid_pool_t *asidPool;
1098
1099    asidPool = armKSASIDTable[asid >> asidLowBits];
1100    assert(asidPool);
1101
1102    vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1103    assert(vtable);
1104
1105    /* Store HW VMID in the last entry
1106       Masquerade as an invalid PDGE */
1107    vtable[VTABLE_VMID_SLOT] = vtable_invalid_new(hw_asid, true);
1108
1109    armKSHWASIDTable[hw_asid] = asid;
1110}
1111
1112static hw_asid_t findFreeHWASID(void)
1113{
1114    word_t hw_asid_offset;
1115    hw_asid_t hw_asid;
1116
1117    /* Find a free hardware ASID */
1118    for (hw_asid_offset = 0;
1119         hw_asid_offset <= (word_t)((hw_asid_t) - 1);
1120         hw_asid_offset++) {
1121        hw_asid = armKSNextASID + ((hw_asid_t)hw_asid_offset);
1122        if (armKSHWASIDTable[hw_asid] == asidInvalid) {
1123            return hw_asid;
1124        }
1125    }
1126
1127    hw_asid = armKSNextASID;
1128
1129    /* If we've scanned the table without finding a free ASID */
1130    invalidateASID(armKSHWASIDTable[hw_asid]);
1131
1132    /* Flush TLB */
1133    invalidateTranslationASID(hw_asid);
1134    armKSHWASIDTable[hw_asid] = asidInvalid;
1135
1136    /* Increment the NextASID index */
1137    armKSNextASID++;
1138
1139    return hw_asid;
1140}
1141
1142hw_asid_t getHWASID(asid_t asid)
1143{
1144    vspace_root_t stored_hw_asid;
1145
1146    stored_hw_asid = loadHWASID(asid);
1147    if (vtable_invalid_get_stored_asid_valid(stored_hw_asid)) {
1148        return vtable_invalid_get_stored_hw_asid(stored_hw_asid);
1149    } else {
1150        hw_asid_t new_hw_asid;
1151
1152        new_hw_asid = findFreeHWASID();
1153        storeHWASID(asid, new_hw_asid);
1154        return new_hw_asid;
1155    }
1156}
1157
1158static void invalidateASIDEntry(asid_t asid)
1159{
1160    vspace_root_t stored_hw_asid;
1161
1162    stored_hw_asid = loadHWASID(asid);
1163    if (vtable_invalid_get_stored_asid_valid(stored_hw_asid)) {
1164        armKSHWASIDTable[vtable_invalid_get_stored_hw_asid(stored_hw_asid)] =
1165            asidInvalid;
1166    }
1167    invalidateASID(asid);
1168}
1169
1170#endif
1171
1172#ifdef CONFIG_ARM_SMMU
1173static vspace_root_t getASIDBindCB(asid_t asid)
1174{
1175    asid_pool_t *asidPool;
1176
1177    asidPool = armKSASIDTable[asid >> asidLowBits];
1178    assert(asidPool);
1179
1180    vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1181    assert(vtable);
1182
1183    return vtable[VTABLE_SMMU_SLOT];
1184}
1185
1186void increaseASIDBindCB(asid_t asid)
1187{
1188    asid_pool_t *asidPool;
1189    vspace_root_t stored_info;
1190
1191    asidPool = armKSASIDTable[asid >> asidLowBits];
1192    assert(asidPool);
1193
1194    vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1195    assert(vtable);
1196
1197    stored_info = vtable[VTABLE_SMMU_SLOT];
1198    vtable[VTABLE_SMMU_SLOT] = vtable_invalid_smmu_new(vtable_invalid_get_bind_cb(stored_info) + 1);
1199}
1200
1201void decreaseASIDBindCB(asid_t asid)
1202{
1203    asid_pool_t *asidPool;
1204    vspace_root_t stored_info;
1205
1206    asidPool = armKSASIDTable[asid >> asidLowBits];
1207    assert(asidPool);
1208
1209    vspace_root_t *vtable = asidPool->array[asid & MASK(asidLowBits)];
1210    assert(vtable);
1211
1212    stored_info = vtable[VTABLE_SMMU_SLOT];
1213    vtable[VTABLE_SMMU_SLOT] = vtable_invalid_smmu_new(vtable_invalid_get_bind_cb(stored_info) - 1);
1214}
1215#endif
1216
1217static inline void invalidateTLBByASID(asid_t asid)
1218{
1219#ifdef CONFIG_ARM_SMMU
1220    vspace_root_t bind_cb = getASIDBindCB(asid);
1221    if (unlikely(vtable_invalid_get_bind_cb(bind_cb))) {
1222        invalidateSMMUTLBByASID(asid, vtable_invalid_get_bind_cb(bind_cb));
1223    }
1224#endif
1225#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1226    vspace_root_t stored_hw_asid;
1227
1228    stored_hw_asid = loadHWASID(asid);
1229    if (!vtable_invalid_get_stored_asid_valid(stored_hw_asid)) {
1230        return;
1231    }
1232    invalidateTranslationASID(vtable_invalid_get_stored_hw_asid(stored_hw_asid));
1233#else
1234    invalidateTranslationASID(asid);
1235#endif
1236}
1237
1238static inline void invalidateTLBByASIDVA(asid_t asid, vptr_t vaddr)
1239{
1240#ifdef CONFIG_ARM_SMMU
1241    vspace_root_t bind_cb = getASIDBindCB(asid);
1242    if (unlikely(vtable_invalid_get_bind_cb(bind_cb))) {
1243        invalidateSMMUTLBByASIDVA(asid, vaddr, vtable_invalid_get_bind_cb(bind_cb));
1244    }
1245#endif
1246#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1247    vspace_root_t stored_hw_asid;
1248
1249    stored_hw_asid = loadHWASID(asid);
1250    if (!vtable_invalid_get_stored_asid_valid(stored_hw_asid)) {
1251        return;
1252    }
1253    uint64_t hw_asid = vtable_invalid_get_stored_hw_asid(stored_hw_asid);
1254    invalidateTranslationSingle((hw_asid << 48) | vaddr >> seL4_PageBits);
1255#else
1256    invalidateTranslationSingle((asid << 48) | vaddr >> seL4_PageBits);
1257#endif
1258}
1259
1260pde_t *pageTableMapped(asid_t asid, vptr_t vaddr, pte_t *pt)
1261{
1262    findVSpaceForASID_ret_t find_ret;
1263    lookupPDSlot_ret_t lu_ret;
1264
1265    find_ret = findVSpaceForASID(asid);
1266    if (find_ret.status != EXCEPTION_NONE) {
1267        return NULL;
1268    }
1269
1270    lu_ret = lookupPDSlot(find_ret.vspace_root, vaddr);
1271    if (lu_ret.status != EXCEPTION_NONE) {
1272        return NULL;
1273    }
1274
1275    if (pde_pde_small_ptr_get_present(lu_ret.pdSlot) &&
1276        (pde_pde_small_ptr_get_pt_base_address(lu_ret.pdSlot) == pptr_to_paddr(pt))) {
1277        return lu_ret.pdSlot;
1278    }
1279
1280    return NULL;
1281}
1282
1283void unmapPageUpperDirectory(asid_t asid, vptr_t vaddr, pude_t *pud)
1284{
1285    pgde_t *pgdSlot;
1286
1287    pgdSlot = pageUpperDirectoryMapped(asid, vaddr, pud);
1288    if (likely(pgdSlot != NULL)) {
1289#ifdef CONFIG_ARM_SMMU
1290        *pgdSlot = pgde_pgde_invalid_new(0, false, 0);
1291#else
1292        *pgdSlot = pgde_pgde_invalid_new(0, false);
1293#endif
1294        cleanByVA_PoU((vptr_t)pgdSlot, pptr_to_paddr(pgdSlot));
1295        invalidateTLBByASID(asid);
1296    }
1297}
1298
1299void unmapPageDirectory(asid_t asid, vptr_t vaddr, pde_t *pd)
1300{
1301    pude_t *pudSlot;
1302
1303    pudSlot = pageDirectoryMapped(asid, vaddr, pd);
1304    if (likely(pudSlot != NULL)) {
1305        *pudSlot = pude_invalid_new();
1306
1307        cleanByVA_PoU((vptr_t)pudSlot, pptr_to_paddr(pudSlot));
1308        invalidateTLBByASID(asid);
1309    }
1310}
1311
1312void unmapPageTable(asid_t asid, vptr_t vaddr, pte_t *pt)
1313{
1314    pde_t *pdSlot;
1315
1316    pdSlot = pageTableMapped(asid, vaddr, pt);
1317    if (likely(pdSlot != NULL)) {
1318        *pdSlot = pde_invalid_new();
1319
1320        cleanByVA_PoU((vptr_t)pdSlot, pptr_to_paddr(pdSlot));
1321        invalidateTLBByASID(asid);
1322    }
1323}
1324
1325void unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, pptr_t pptr)
1326{
1327    paddr_t addr;
1328    findVSpaceForASID_ret_t find_ret;
1329
1330    addr = pptr_to_paddr((void *)pptr);
1331    find_ret = findVSpaceForASID(asid);
1332    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1333        return;
1334    }
1335
1336    switch (page_size) {
1337    case ARMSmallPage: {
1338        lookupPTSlot_ret_t lu_ret;
1339
1340        lu_ret = lookupPTSlot(find_ret.vspace_root, vptr);
1341        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1342            return;
1343        }
1344
1345        if (pte_ptr_get_present(lu_ret.ptSlot) &&
1346            pte_ptr_get_page_base_address(lu_ret.ptSlot) == addr) {
1347            *(lu_ret.ptSlot) = pte_invalid_new();
1348
1349            cleanByVA_PoU((vptr_t)lu_ret.ptSlot, pptr_to_paddr(lu_ret.ptSlot));
1350        }
1351        break;
1352    }
1353
1354    case ARMLargePage: {
1355        lookupPDSlot_ret_t lu_ret;
1356
1357        lu_ret = lookupPDSlot(find_ret.vspace_root, vptr);
1358        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1359            return;
1360        }
1361
1362        if (pde_pde_large_ptr_get_present(lu_ret.pdSlot) &&
1363            pde_pde_large_ptr_get_page_base_address(lu_ret.pdSlot) == addr) {
1364            *(lu_ret.pdSlot) = pde_invalid_new();
1365
1366            cleanByVA_PoU((vptr_t)lu_ret.pdSlot, pptr_to_paddr(lu_ret.pdSlot));
1367        }
1368        break;
1369    }
1370
1371    case ARMHugePage: {
1372        lookupPUDSlot_ret_t lu_ret;
1373
1374        lu_ret = lookupPUDSlot(find_ret.vspace_root, vptr);
1375        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1376            return;
1377        }
1378
1379        if (pude_pude_1g_ptr_get_present(lu_ret.pudSlot) &&
1380            pude_pude_1g_ptr_get_page_base_address(lu_ret.pudSlot) == addr) {
1381            *(lu_ret.pudSlot) = pude_invalid_new();
1382
1383            cleanByVA_PoU((vptr_t)lu_ret.pudSlot, pptr_to_paddr(lu_ret.pudSlot));
1384        }
1385        break;
1386    }
1387
1388    default:
1389        fail("Invalid ARM page type");
1390    }
1391
1392    assert(asid < BIT(16));
1393    invalidateTLBByASIDVA(asid, vptr);
1394}
1395
1396void deleteASID(asid_t asid, vspace_root_t *vspace)
1397{
1398    asid_pool_t *poolPtr;
1399
1400    poolPtr = armKSASIDTable[asid >> asidLowBits];
1401
1402    if (poolPtr != NULL && poolPtr->array[asid & MASK(asidLowBits)] == vspace) {
1403        invalidateTLBByASID(asid);
1404#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1405        invalidateASIDEntry(asid);
1406#endif
1407        poolPtr->array[asid & MASK(asidLowBits)] = NULL;
1408        setVMRoot(NODE_STATE(ksCurThread));
1409    }
1410}
1411
1412void deleteASIDPool(asid_t asid_base, asid_pool_t *pool)
1413{
1414    word_t offset;
1415
1416    assert((asid_base & MASK(asidLowBits)) == 0);
1417
1418    if (armKSASIDTable[asid_base >> asidLowBits] == pool) {
1419        for (offset = 0; offset < BIT(asidLowBits); offset++) {
1420            if (pool->array[offset]) {
1421                invalidateTLBByASID(asid_base + offset);
1422#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1423                invalidateASIDEntry(asid_base + offset);
1424#endif
1425            }
1426        }
1427        armKSASIDTable[asid_base >> asidLowBits] = NULL;
1428        setVMRoot(NODE_STATE(ksCurThread));
1429    }
1430}
1431
1432static void doFlush(int invLabel, vptr_t start, vptr_t end, paddr_t pstart)
1433{
1434    switch (invLabel) {
1435    case ARMVSpaceClean_Data:
1436    case ARMPageClean_Data:
1437        cleanCacheRange_RAM(start, end, pstart);
1438        break;
1439
1440    case ARMVSpaceInvalidate_Data:
1441    case ARMPageInvalidate_Data:
1442        invalidateCacheRange_RAM(start, end, pstart);
1443        break;
1444
1445    case ARMVSpaceCleanInvalidate_Data:
1446    case ARMPageCleanInvalidate_Data:
1447        cleanInvalidateCacheRange_RAM(start, end, pstart);
1448        break;
1449
1450    case ARMVSpaceUnify_Instruction:
1451    case ARMPageUnify_Instruction:
1452        /* First clean data lines to point of unification... */
1453        cleanCacheRange_PoU(start, end, pstart);
1454        /* Ensure it's been written. */
1455        dsb();
1456        /* ...then invalidate the corresponding instruction lines
1457           to point of unification... */
1458        invalidateCacheRange_I(start, end, pstart);
1459        /* ... and ensure new instructions come from fresh cache lines. */
1460        isb();
1461        break;
1462    default:
1463        fail("Invalid operation, shouldn't get here.\n");
1464    }
1465}
1466
1467/* ================= INVOCATION HANDLING STARTS HERE ================== */
1468
1469static exception_t performVSpaceFlush(int invLabel, vspace_root_t *vspaceRoot, asid_t asid,
1470                                      vptr_t start, vptr_t end, paddr_t pstart)
1471{
1472
1473    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
1474        word_t size = end - start;
1475        start = (vptr_t)paddr_to_pptr(pstart);
1476        end = start + size;
1477        if (start < end) {
1478            doFlush(invLabel, start, end, pstart);
1479        }
1480    } else {
1481        bool_t root_switched;
1482
1483        /* Flush if given a non zero range */
1484        if (start < end) {
1485            root_switched = setVMRootForFlush(vspaceRoot, asid);
1486            doFlush(invLabel, start, end, pstart);
1487            if (root_switched) {
1488                setVMRoot(NODE_STATE(ksCurThread));
1489            }
1490        }
1491    }
1492    return EXCEPTION_NONE;
1493}
1494
1495#ifndef AARCH64_VSPACE_S2_START_L1
1496static exception_t performUpperPageDirectoryInvocationMap(cap_t cap, cte_t *ctSlot, pgde_t pgde, pgde_t *pgdSlot)
1497{
1498    ctSlot->cap = cap;
1499    *pgdSlot = pgde;
1500    cleanByVA_PoU((vptr_t)pgdSlot, pptr_to_paddr(pgdSlot));
1501
1502    return EXCEPTION_NONE;
1503}
1504
1505static exception_t performUpperPageDirectoryInvocationUnmap(cap_t cap, cte_t *ctSlot)
1506{
1507    if (cap_page_upper_directory_cap_get_capPUDIsMapped(cap)) {
1508        pude_t *pud = PUD_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(cap));
1509        unmapPageUpperDirectory(cap_page_upper_directory_cap_get_capPUDMappedASID(cap),
1510                                cap_page_upper_directory_cap_get_capPUDMappedAddress(cap), pud);
1511        clearMemory((void *)pud, cap_get_capSizeBits(cap));
1512    }
1513
1514    cap_page_upper_directory_cap_ptr_set_capPUDIsMapped(&(ctSlot->cap), 0);
1515    return EXCEPTION_NONE;
1516}
1517#endif
1518
1519static exception_t performPageDirectoryInvocationMap(cap_t cap, cte_t *ctSlot, pude_t pude, pude_t *pudSlot)
1520{
1521    ctSlot->cap = cap;
1522    *pudSlot = pude;
1523    cleanByVA_PoU((vptr_t)pudSlot, pptr_to_paddr(pudSlot));
1524
1525    return EXCEPTION_NONE;
1526}
1527
1528static exception_t performPageDirectoryInvocationUnmap(cap_t cap, cte_t *ctSlot)
1529{
1530    if (cap_page_directory_cap_get_capPDIsMapped(cap)) {
1531        pde_t *pd = PD_PTR(cap_page_directory_cap_get_capPDBasePtr(cap));
1532        unmapPageDirectory(cap_page_directory_cap_get_capPDMappedASID(cap),
1533                           cap_page_directory_cap_get_capPDMappedAddress(cap), pd);
1534        clearMemory((void *)pd, cap_get_capSizeBits(cap));
1535    }
1536
1537    cap_page_directory_cap_ptr_set_capPDIsMapped(&(ctSlot->cap), 0);
1538    return EXCEPTION_NONE;
1539}
1540
1541static exception_t performPageTableInvocationMap(cap_t cap, cte_t *ctSlot, pde_t pde, pde_t *pdSlot)
1542{
1543    ctSlot->cap = cap;
1544    *pdSlot = pde;
1545    cleanByVA_PoU((vptr_t)pdSlot, pptr_to_paddr(pdSlot));
1546
1547    return EXCEPTION_NONE;
1548}
1549
1550static exception_t performPageTableInvocationUnmap(cap_t cap, cte_t *ctSlot)
1551{
1552    if (cap_page_table_cap_get_capPTIsMapped(cap)) {
1553        pte_t *pt = PT_PTR(cap_page_table_cap_get_capPTBasePtr(cap));
1554        unmapPageTable(cap_page_table_cap_get_capPTMappedASID(cap),
1555                       cap_page_table_cap_get_capPTMappedAddress(cap), pt);
1556        clearMemory((void *)pt, cap_get_capSizeBits(cap));
1557    }
1558
1559    cap_page_table_cap_ptr_set_capPTIsMapped(&(ctSlot->cap), 0);
1560    return EXCEPTION_NONE;
1561}
1562
1563static exception_t performHugePageInvocationMap(asid_t asid, cap_t cap, cte_t *ctSlot,
1564                                                pude_t pude, pude_t *pudSlot)
1565{
1566    bool_t tlbflush_required = pude_pude_1g_ptr_get_present(pudSlot);
1567
1568    ctSlot->cap = cap;
1569    *pudSlot = pude;
1570
1571    cleanByVA_PoU((vptr_t)pudSlot, pptr_to_paddr(pudSlot));
1572    if (unlikely(tlbflush_required)) {
1573        assert(asid < BIT(16));
1574        invalidateTLBByASIDVA(asid, cap_frame_cap_get_capFMappedAddress(cap));
1575    }
1576
1577    return EXCEPTION_NONE;
1578}
1579
1580static exception_t performLargePageInvocationMap(asid_t asid, cap_t cap, cte_t *ctSlot,
1581                                                 pde_t pde, pde_t *pdSlot)
1582{
1583    bool_t tlbflush_required = pde_pde_large_ptr_get_present(pdSlot);
1584
1585    ctSlot->cap = cap;
1586    *pdSlot = pde;
1587
1588    cleanByVA_PoU((vptr_t)pdSlot, pptr_to_paddr(pdSlot));
1589    if (unlikely(tlbflush_required)) {
1590        assert(asid < BIT(16));
1591        invalidateTLBByASIDVA(asid, cap_frame_cap_get_capFMappedAddress(cap));
1592    }
1593
1594    return EXCEPTION_NONE;
1595}
1596
1597static exception_t performSmallPageInvocationMap(asid_t asid, cap_t cap, cte_t *ctSlot,
1598                                                 pte_t pte, pte_t *ptSlot)
1599{
1600    bool_t tlbflush_required = pte_ptr_get_present(ptSlot);
1601
1602    ctSlot->cap = cap;
1603    *ptSlot = pte;
1604
1605    cleanByVA_PoU((vptr_t)ptSlot, pptr_to_paddr(ptSlot));
1606    if (unlikely(tlbflush_required)) {
1607        assert(asid < BIT(16));
1608        invalidateTLBByASIDVA(asid, cap_frame_cap_get_capFMappedAddress(cap));
1609    }
1610
1611    return EXCEPTION_NONE;
1612}
1613
1614static exception_t performPageInvocationUnmap(cap_t cap, cte_t *ctSlot)
1615{
1616    if (cap_frame_cap_get_capFMappedASID(cap) != 0) {
1617
1618        unmapPage(cap_frame_cap_get_capFSize(cap),
1619                  cap_frame_cap_get_capFMappedASID(cap),
1620                  cap_frame_cap_get_capFMappedAddress(cap),
1621                  cap_frame_cap_get_capFBasePtr(cap));
1622    }
1623
1624    cap_frame_cap_ptr_set_capFMappedASID(&ctSlot->cap, asidInvalid);
1625    cap_frame_cap_ptr_set_capFMappedAddress(&ctSlot->cap, 0);
1626    return EXCEPTION_NONE;
1627}
1628
1629static exception_t performPageFlush(int invLabel, vspace_root_t *vspaceRoot, asid_t asid,
1630                                    vptr_t start, vptr_t end, paddr_t pstart)
1631{
1632    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
1633        /* We flush the cache with kernel virtual addresses since
1634         * the user virtual addresses are not valid in EL2.
1635         * Switching VMRoot is not required.
1636         */
1637        word_t size = end - start;
1638        start = (vptr_t)paddr_to_pptr(pstart);
1639        end = start + size;
1640
1641        if (start < end) {
1642            doFlush(invLabel, start, end, pstart);
1643        }
1644    } else {
1645        bool_t root_switched;
1646
1647        if (start < end) {
1648            root_switched = setVMRootForFlush(vspaceRoot, asid);
1649            doFlush(invLabel, start, end, pstart);
1650            if (root_switched) {
1651                setVMRoot(NODE_STATE(ksCurThread));
1652            }
1653        }
1654    }
1655    return EXCEPTION_NONE;
1656}
1657
1658static exception_t performPageGetAddress(pptr_t base_ptr)
1659{
1660    paddr_t base = pptr_to_paddr((void *)base_ptr);
1661
1662    setRegister(NODE_STATE(ksCurThread), msgRegisters[0], base);
1663    setRegister(NODE_STATE(ksCurThread), msgInfoRegister,
1664                wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, 1)));
1665
1666    return EXCEPTION_NONE;
1667}
1668
1669static exception_t performASIDControlInvocation(void *frame, cte_t *slot,
1670                                                cte_t *parent, asid_t asid_base)
1671{
1672    cap_untyped_cap_ptr_set_capFreeIndex(&(parent->cap),
1673                                         MAX_FREE_INDEX(cap_untyped_cap_get_capBlockSize(parent->cap)));
1674
1675    memzero(frame, BIT(seL4_ASIDPoolBits));
1676
1677    cteInsert(
1678        cap_asid_pool_cap_new(
1679            asid_base,         /* capASIDBase  */
1680            WORD_REF(frame)    /* capASIDPool  */
1681        ), parent, slot);
1682
1683    assert((asid_base & MASK(asidLowBits)) == 0);
1684    armKSASIDTable[asid_base >> asidLowBits] = (asid_pool_t *)frame;
1685
1686    return EXCEPTION_NONE;
1687}
1688
1689static exception_t decodeARMVSpaceRootInvocation(word_t invLabel, unsigned int length,
1690                                                 cte_t *cte, cap_t cap, extra_caps_t extraCaps,
1691                                                 word_t *buffer)
1692{
1693    vptr_t start, end;
1694    paddr_t pstart;
1695    asid_t asid;
1696    vspace_root_t *vspaceRoot;
1697    lookupFrame_ret_t resolve_ret;
1698    findVSpaceForASID_ret_t find_ret;
1699
1700    switch (invLabel) {
1701    case ARMVSpaceClean_Data:
1702    case ARMVSpaceInvalidate_Data:
1703    case ARMVSpaceCleanInvalidate_Data:
1704    case ARMVSpaceUnify_Instruction:
1705
1706        if (length < 2) {
1707            userError("VSpaceRoot Flush: Truncated message.");
1708            current_syscall_error.type = seL4_TruncatedMessage;
1709            return EXCEPTION_SYSCALL_ERROR;
1710        }
1711
1712        start = getSyscallArg(0, buffer);
1713        end =   getSyscallArg(1, buffer);
1714
1715        /* Check sanity of arguments */
1716        if (end <= start) {
1717            userError("VSpaceRoot Flush: Invalid range.");
1718            current_syscall_error.type = seL4_InvalidArgument;
1719            current_syscall_error.invalidArgumentNumber = 1;
1720            return EXCEPTION_SYSCALL_ERROR;
1721        }
1722
1723        /* Don't let applications flush kernel regions. */
1724        if (end > USER_TOP) {
1725            userError("VSpaceRoot Flush: Exceed the user addressable region.");
1726            current_syscall_error.type = seL4_IllegalOperation;
1727            return EXCEPTION_SYSCALL_ERROR;
1728        }
1729
1730        if (unlikely(!isValidNativeRoot(cap))) {
1731            current_syscall_error.type = seL4_InvalidCapability;
1732            current_syscall_error.invalidCapNumber = 0;
1733            return EXCEPTION_SYSCALL_ERROR;
1734        }
1735
1736        /* Make sure that the supplied pgd is ok */
1737        vspaceRoot = cap_vtable_root_get_basePtr(cap);
1738        asid = cap_vtable_root_get_mappedASID(cap);
1739
1740        find_ret = findVSpaceForASID(asid);
1741        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1742            userError("VSpaceRoot Flush: No VSpace for ASID");
1743            current_syscall_error.type = seL4_FailedLookup;
1744            current_syscall_error.failedLookupWasSource = false;
1745            return EXCEPTION_SYSCALL_ERROR;
1746        }
1747
1748        if (unlikely(find_ret.vspace_root != vspaceRoot)) {
1749            userError("VSpaceRoot Flush: Invalid VSpace Cap");
1750            current_syscall_error.type = seL4_InvalidCapability;
1751            current_syscall_error.invalidCapNumber = 0;
1752            return EXCEPTION_SYSCALL_ERROR;
1753        }
1754
1755        /* Look up the frame containing 'start'. */
1756        resolve_ret = lookupFrame(vspaceRoot, start);
1757
1758        if (!resolve_ret.valid) {
1759            /* Fail silently, as there can't be any stale cached data (for the
1760             * given address space), and getting a syscall error because the
1761             * relevant page is non-resident would be 'astonishing'. */
1762            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1763            return EXCEPTION_NONE;
1764        }
1765
1766        /* Refuse to cross a page boundary. */
1767        if (PAGE_BASE(start, resolve_ret.frameSize) != PAGE_BASE(end - 1, resolve_ret.frameSize)) {
1768            current_syscall_error.type = seL4_RangeError;
1769            current_syscall_error.rangeErrorMin = start;
1770            current_syscall_error.rangeErrorMax = PAGE_BASE(start, resolve_ret.frameSize) +
1771                                                  MASK(pageBitsForSize(resolve_ret.frameSize));
1772            return EXCEPTION_SYSCALL_ERROR;
1773        }
1774
1775        /* Calculate the physical start address. */
1776        pstart = resolve_ret.frameBase + PAGE_OFFSET(start, resolve_ret.frameSize);
1777
1778        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1779        return performVSpaceFlush(invLabel, vspaceRoot, asid, start, end - 1, pstart);
1780
1781    default:
1782        current_syscall_error.type = seL4_IllegalOperation;
1783        return EXCEPTION_SYSCALL_ERROR;
1784    }
1785}
1786
1787#ifndef AARCH64_VSPACE_S2_START_L1
1788static exception_t decodeARMPageUpperDirectoryInvocation(word_t invLabel, unsigned int length,
1789                                                         cte_t *cte, cap_t cap, extra_caps_t extraCaps,
1790                                                         word_t *buffer)
1791{
1792    cap_t pgdCap;
1793    vspace_root_t *pgd;
1794    pgde_t pgde;
1795    asid_t asid;
1796    vptr_t vaddr;
1797    lookupPGDSlot_ret_t pgdSlot;
1798    findVSpaceForASID_ret_t find_ret;
1799
1800    if (invLabel == ARMPageUpperDirectoryUnmap) {
1801        if (unlikely(!isFinalCapability(cte))) {
1802            current_syscall_error.type = seL4_RevokeFirst;
1803            return EXCEPTION_SYSCALL_ERROR;
1804        }
1805
1806        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1807        return performUpperPageDirectoryInvocationUnmap(cap, cte);
1808    }
1809
1810    if (unlikely(invLabel != ARMPageUpperDirectoryMap)) {
1811        current_syscall_error.type = seL4_IllegalOperation;
1812        return EXCEPTION_SYSCALL_ERROR;
1813    }
1814
1815    if (unlikely(length < 2 || extraCaps.excaprefs[0] == NULL)) {
1816        current_syscall_error.type = seL4_TruncatedMessage;
1817        return EXCEPTION_SYSCALL_ERROR;
1818    }
1819
1820    if (unlikely(cap_page_upper_directory_cap_get_capPUDIsMapped(cap))) {
1821        current_syscall_error.type = seL4_InvalidCapability;
1822        current_syscall_error.invalidCapNumber = 0;
1823        return EXCEPTION_SYSCALL_ERROR;
1824    }
1825
1826    vaddr = getSyscallArg(0, buffer) & (~MASK(PGD_INDEX_OFFSET));
1827    pgdCap = extraCaps.excaprefs[0]->cap;
1828
1829    if (unlikely(!isValidNativeRoot(pgdCap))) {
1830        current_syscall_error.type = seL4_InvalidCapability;
1831        current_syscall_error.invalidCapNumber = 1;
1832        return EXCEPTION_SYSCALL_ERROR;
1833    }
1834
1835    pgd = cap_vtable_root_get_basePtr(pgdCap);
1836    asid = cap_vtable_root_get_mappedASID(pgdCap);
1837
1838    if (unlikely(vaddr > USER_TOP)) {
1839        current_syscall_error.type = seL4_InvalidArgument;
1840        current_syscall_error.invalidArgumentNumber = 0;
1841        return EXCEPTION_SYSCALL_ERROR;
1842    }
1843
1844    find_ret = findVSpaceForASID(asid);
1845    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1846        current_syscall_error.type = seL4_FailedLookup;
1847        current_syscall_error.failedLookupWasSource = false;
1848        return EXCEPTION_SYSCALL_ERROR;
1849    }
1850
1851    if (unlikely(find_ret.vspace_root != pgd)) {
1852        current_syscall_error.type = seL4_InvalidCapability;
1853        current_syscall_error.invalidCapNumber = 1;
1854        return EXCEPTION_SYSCALL_ERROR;
1855    }
1856
1857    pgdSlot = lookupPGDSlot(pgd, vaddr);
1858
1859    if (unlikely(pgde_pgde_pud_ptr_get_present(pgdSlot.pgdSlot))) {
1860        current_syscall_error.type = seL4_DeleteFirst;
1861        return EXCEPTION_SYSCALL_ERROR;
1862    }
1863
1864    pgde = pgde_pgde_pud_new(
1865               pptr_to_paddr(PUDE_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(cap))));
1866
1867    cap_page_upper_directory_cap_ptr_set_capPUDIsMapped(&cap, 1);
1868    cap_page_upper_directory_cap_ptr_set_capPUDMappedASID(&cap, asid);
1869    cap_page_upper_directory_cap_ptr_set_capPUDMappedAddress(&cap, vaddr);
1870
1871    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1872    return performUpperPageDirectoryInvocationMap(cap, cte, pgde, pgdSlot.pgdSlot);
1873}
1874#endif
1875
1876static exception_t decodeARMPageDirectoryInvocation(word_t invLabel, unsigned int length,
1877                                                    cte_t *cte, cap_t cap, extra_caps_t extraCaps,
1878                                                    word_t *buffer)
1879{
1880    cap_t vspaceRootCap;
1881    vspace_root_t *vspaceRoot;
1882    pude_t pude;
1883    asid_t asid;
1884    vptr_t vaddr;
1885    lookupPUDSlot_ret_t pudSlot;
1886    findVSpaceForASID_ret_t find_ret;
1887
1888    if (invLabel == ARMPageDirectoryUnmap) {
1889        if (unlikely(!isFinalCapability(cte))) {
1890            current_syscall_error.type = seL4_RevokeFirst;
1891            return EXCEPTION_SYSCALL_ERROR;
1892        }
1893
1894        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1895        return performPageDirectoryInvocationUnmap(cap, cte);
1896    }
1897
1898    if (unlikely(invLabel != ARMPageDirectoryMap)) {
1899        current_syscall_error.type = seL4_IllegalOperation;
1900        return EXCEPTION_SYSCALL_ERROR;
1901    }
1902
1903    if (unlikely(length < 2 || extraCaps.excaprefs[0] == NULL)) {
1904        current_syscall_error.type = seL4_TruncatedMessage;
1905        return EXCEPTION_SYSCALL_ERROR;
1906    }
1907
1908    if (unlikely(cap_page_directory_cap_get_capPDIsMapped(cap))) {
1909        current_syscall_error.type = seL4_InvalidCapability;
1910        current_syscall_error.invalidCapNumber = 0;
1911        return EXCEPTION_SYSCALL_ERROR;
1912    }
1913
1914    vaddr = getSyscallArg(0, buffer) & (~MASK(PUD_INDEX_OFFSET));
1915    vspaceRootCap = extraCaps.excaprefs[0]->cap;
1916
1917    if (unlikely(!isValidNativeRoot(vspaceRootCap))) {
1918        current_syscall_error.type = seL4_InvalidCapability;
1919        current_syscall_error.invalidCapNumber = 1;
1920        return EXCEPTION_SYSCALL_ERROR;
1921    }
1922
1923    vspaceRoot = cap_vtable_root_get_basePtr(vspaceRootCap);
1924    asid = cap_vtable_root_get_mappedASID(vspaceRootCap);
1925
1926    if (unlikely(vaddr > USER_TOP)) {
1927        current_syscall_error.type = seL4_InvalidArgument;
1928        current_syscall_error.invalidArgumentNumber = 0;
1929        return EXCEPTION_SYSCALL_ERROR;
1930    }
1931
1932    find_ret = findVSpaceForASID(asid);
1933    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1934        current_syscall_error.type = seL4_FailedLookup;
1935        current_syscall_error.failedLookupWasSource = false;
1936        return EXCEPTION_SYSCALL_ERROR;
1937    }
1938
1939    if (unlikely(find_ret.vspace_root != vspaceRoot)) {
1940        current_syscall_error.type = seL4_InvalidCapability;
1941        current_syscall_error.invalidCapNumber = 1;
1942        return EXCEPTION_SYSCALL_ERROR;
1943    }
1944
1945    pudSlot = lookupPUDSlot(vspaceRoot, vaddr);
1946
1947    if (pudSlot.status != EXCEPTION_NONE) {
1948        current_syscall_error.type = seL4_FailedLookup;
1949        current_syscall_error.failedLookupWasSource = false;
1950        return EXCEPTION_SYSCALL_ERROR;
1951    }
1952
1953    if (unlikely(pude_pude_pd_ptr_get_present(pudSlot.pudSlot) ||
1954                 pude_pude_1g_ptr_get_present(pudSlot.pudSlot))) {
1955        current_syscall_error.type = seL4_DeleteFirst;
1956        return EXCEPTION_SYSCALL_ERROR;
1957    }
1958
1959    pude = pude_pude_pd_new(pptr_to_paddr(PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(cap))));
1960
1961    cap_page_directory_cap_ptr_set_capPDIsMapped(&cap, 1);
1962    cap_page_directory_cap_ptr_set_capPDMappedASID(&cap, asid);
1963    cap_page_directory_cap_ptr_set_capPDMappedAddress(&cap, vaddr);
1964
1965    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1966    return performPageDirectoryInvocationMap(cap, cte, pude, pudSlot.pudSlot);
1967}
1968
1969static exception_t decodeARMPageTableInvocation(word_t invLabel, unsigned int length,
1970                                                cte_t *cte, cap_t cap, extra_caps_t extraCaps,
1971                                                word_t *buffer)
1972{
1973    cap_t vspaceRootCap;
1974    vspace_root_t *vspaceRoot;
1975    pde_t pde;
1976    asid_t asid;
1977    vptr_t vaddr;
1978    lookupPDSlot_ret_t pdSlot;
1979    findVSpaceForASID_ret_t find_ret;
1980
1981    if (invLabel == ARMPageTableUnmap) {
1982        if (unlikely(!isFinalCapability(cte))) {
1983            current_syscall_error.type = seL4_RevokeFirst;
1984            return EXCEPTION_SYSCALL_ERROR;
1985        }
1986
1987        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1988        return performPageTableInvocationUnmap(cap, cte);
1989    }
1990
1991    if (unlikely(invLabel != ARMPageTableMap)) {
1992        current_syscall_error.type = seL4_IllegalOperation;
1993        return EXCEPTION_SYSCALL_ERROR;
1994    }
1995
1996    if (unlikely(length < 2 || extraCaps.excaprefs[0] == NULL)) {
1997        current_syscall_error.type = seL4_TruncatedMessage;
1998        return EXCEPTION_SYSCALL_ERROR;
1999    }
2000
2001    if (unlikely(cap_page_table_cap_get_capPTIsMapped(cap))) {
2002        current_syscall_error.type = seL4_InvalidCapability;
2003        current_syscall_error.invalidCapNumber = 0;
2004        return EXCEPTION_SYSCALL_ERROR;
2005    }
2006
2007    vaddr = getSyscallArg(0, buffer) & (~MASK(PD_INDEX_OFFSET));
2008    vspaceRootCap = extraCaps.excaprefs[0]->cap;
2009
2010    if (unlikely(!isValidNativeRoot(vspaceRootCap))) {
2011        current_syscall_error.type = seL4_InvalidCapability;
2012        current_syscall_error.invalidCapNumber = 1;
2013        return EXCEPTION_SYSCALL_ERROR;
2014    }
2015
2016    vspaceRoot = cap_vtable_root_get_basePtr(vspaceRootCap);
2017    asid = cap_vtable_root_get_mappedASID(vspaceRootCap);
2018
2019    if (unlikely(vaddr > USER_TOP)) {
2020        current_syscall_error.type = seL4_InvalidArgument;
2021        current_syscall_error.invalidArgumentNumber = 0;
2022        return EXCEPTION_SYSCALL_ERROR;
2023    }
2024
2025    find_ret = findVSpaceForASID(asid);
2026    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2027        current_syscall_error.type = seL4_FailedLookup;
2028        current_syscall_error.failedLookupWasSource = false;
2029        return EXCEPTION_SYSCALL_ERROR;
2030    }
2031
2032    if (unlikely(find_ret.vspace_root != vspaceRoot)) {
2033        current_syscall_error.type = seL4_InvalidCapability;
2034        current_syscall_error.invalidCapNumber = 1;
2035        return EXCEPTION_SYSCALL_ERROR;
2036    }
2037
2038    pdSlot = lookupPDSlot(vspaceRoot, vaddr);
2039
2040    if (pdSlot.status != EXCEPTION_NONE) {
2041        current_syscall_error.type = seL4_FailedLookup;
2042        current_syscall_error.failedLookupWasSource = false;
2043        return EXCEPTION_SYSCALL_ERROR;
2044    }
2045
2046    if (unlikely(pde_pde_small_ptr_get_present(pdSlot.pdSlot) ||
2047                 pde_pde_large_ptr_get_present(pdSlot.pdSlot))) {
2048        current_syscall_error.type = seL4_DeleteFirst;
2049        return EXCEPTION_SYSCALL_ERROR;
2050    }
2051
2052    pde = pde_pde_small_new(pptr_to_paddr(PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap))));
2053
2054    cap_page_table_cap_ptr_set_capPTIsMapped(&cap, 1);
2055    cap_page_table_cap_ptr_set_capPTMappedASID(&cap, asid);
2056    cap_page_table_cap_ptr_set_capPTMappedAddress(&cap, vaddr);
2057
2058    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2059    return performPageTableInvocationMap(cap, cte, pde, pdSlot.pdSlot);
2060}
2061
2062static exception_t decodeARMFrameInvocation(word_t invLabel, unsigned int length,
2063                                            cte_t *cte, cap_t cap, extra_caps_t extraCaps,
2064                                            word_t *buffer)
2065{
2066    switch (invLabel) {
2067    case ARMPageMap: {
2068        vptr_t vaddr;
2069        paddr_t base;
2070        cap_t vspaceRootCap;
2071        vspace_root_t *vspaceRoot;
2072        asid_t asid, frame_asid;
2073        vm_rights_t vmRights;
2074        vm_page_size_t frameSize;
2075        vm_attributes_t attributes;
2076        findVSpaceForASID_ret_t find_ret;
2077
2078        if (unlikely(length < 3 || extraCaps.excaprefs[0] == NULL)) {
2079            current_syscall_error.type = seL4_TruncatedMessage;
2080            return EXCEPTION_SYSCALL_ERROR;
2081        }
2082
2083        vaddr = getSyscallArg(0, buffer);
2084        attributes = vmAttributesFromWord(getSyscallArg(2, buffer));
2085        vspaceRootCap = extraCaps.excaprefs[0]->cap;
2086
2087        frameSize = cap_frame_cap_get_capFSize(cap);
2088        vmRights = maskVMRights(cap_frame_cap_get_capFVMRights(cap),
2089                                rightsFromWord(getSyscallArg(1, buffer)));
2090
2091        if (unlikely(!isValidNativeRoot(vspaceRootCap))) {
2092            current_syscall_error.type = seL4_InvalidCapability;
2093            current_syscall_error.invalidCapNumber = 1;
2094            return EXCEPTION_SYSCALL_ERROR;
2095        }
2096
2097        vspaceRoot = cap_vtable_root_get_basePtr(vspaceRootCap);
2098        asid = cap_vtable_root_get_mappedASID(vspaceRootCap);
2099
2100        find_ret = findVSpaceForASID(asid);
2101        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2102            current_syscall_error.type = seL4_FailedLookup;
2103            current_syscall_error.failedLookupWasSource = false;
2104            return EXCEPTION_SYSCALL_ERROR;
2105        }
2106
2107        if (unlikely(find_ret.vspace_root != vspaceRoot)) {
2108            current_syscall_error.type = seL4_InvalidCapability;
2109            current_syscall_error.invalidCapNumber = 1;
2110            return EXCEPTION_SYSCALL_ERROR;
2111        }
2112
2113        if (unlikely(!IS_PAGE_ALIGNED(vaddr, frameSize))) {
2114            current_syscall_error.type = seL4_AlignmentError;
2115            return EXCEPTION_SYSCALL_ERROR;
2116        }
2117
2118        /* In the case of remap, the cap should have a valid asid */
2119        frame_asid = cap_frame_cap_ptr_get_capFMappedASID(&cap);
2120
2121        if (frame_asid != asidInvalid) {
2122            if (frame_asid != asid) {
2123                userError("ARMPageMap: Attempting to remap a frame that does not belong to the passed address space");
2124                current_syscall_error.type = seL4_InvalidCapability;
2125                current_syscall_error.invalidArgumentNumber = 0;
2126                return EXCEPTION_SYSCALL_ERROR;
2127
2128            } else if (cap_frame_cap_get_capFMappedAddress(cap) != vaddr) {
2129                userError("ARMPageMap: Attempting to map frame into multiple addresses");
2130                current_syscall_error.type = seL4_InvalidArgument;
2131                current_syscall_error.invalidArgumentNumber = 2;
2132                return EXCEPTION_SYSCALL_ERROR;
2133            }
2134        } else {
2135            if (unlikely(vaddr + BIT(pageBitsForSize(frameSize)) - 1 > USER_TOP)) {
2136                current_syscall_error.type = seL4_InvalidArgument;
2137                current_syscall_error.invalidArgumentNumber = 0;
2138                return EXCEPTION_SYSCALL_ERROR;
2139            }
2140        }
2141
2142        cap = cap_frame_cap_set_capFMappedASID(cap, asid);
2143        cap = cap_frame_cap_set_capFMappedAddress(cap, vaddr);
2144
2145        base = pptr_to_paddr((void *)cap_frame_cap_get_capFBasePtr(cap));
2146
2147        if (frameSize == ARMSmallPage) {
2148            lookupPTSlot_ret_t lu_ret = lookupPTSlot(vspaceRoot, vaddr);
2149
2150            if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2151                current_syscall_error.type = seL4_FailedLookup;
2152                current_syscall_error.failedLookupWasSource = false;
2153                return EXCEPTION_SYSCALL_ERROR;
2154            }
2155
2156            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2157            return performSmallPageInvocationMap(asid, cap, cte,
2158                                                 makeUser3rdLevel(base, vmRights, attributes), lu_ret.ptSlot);
2159
2160        } else if (frameSize == ARMLargePage) {
2161            lookupPDSlot_ret_t lu_ret = lookupPDSlot(vspaceRoot, vaddr);
2162
2163            if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2164                current_syscall_error.type = seL4_FailedLookup;
2165                current_syscall_error.failedLookupWasSource = false;
2166                return EXCEPTION_SYSCALL_ERROR;
2167            }
2168
2169            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2170            return performLargePageInvocationMap(asid, cap, cte,
2171                                                 makeUser2ndLevel(base, vmRights, attributes), lu_ret.pdSlot);
2172
2173        } else {
2174            lookupPUDSlot_ret_t lu_ret = lookupPUDSlot(vspaceRoot, vaddr);
2175
2176            if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2177                current_syscall_error.type = seL4_FailedLookup;
2178                current_syscall_error.failedLookupWasSource = false;
2179                return EXCEPTION_SYSCALL_ERROR;
2180            }
2181
2182            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2183            return performHugePageInvocationMap(asid, cap, cte,
2184                                                makeUser1stLevel(base, vmRights, attributes), lu_ret.pudSlot);
2185        }
2186    }
2187
2188    case ARMPageUnmap:
2189        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2190        return performPageInvocationUnmap(cap, cte);
2191
2192    case ARMPageClean_Data:
2193    case ARMPageInvalidate_Data:
2194    case ARMPageCleanInvalidate_Data:
2195    case ARMPageUnify_Instruction: {
2196        vptr_t start, end;
2197        vptr_t vaddr;
2198        asid_t asid;
2199        word_t page_size;
2200        findVSpaceForASID_ret_t find_ret;
2201
2202        if (length < 2) {
2203            userError("Page Flush: Truncated message.");
2204            current_syscall_error.type = seL4_TruncatedMessage;
2205            return EXCEPTION_SYSCALL_ERROR;
2206        }
2207
2208        if (unlikely(cap_frame_cap_get_capFMappedASID(cap) == 0)) {
2209            userError("Page Flush: Frame is not mapped.");
2210            current_syscall_error.type = seL4_IllegalOperation;
2211            return EXCEPTION_SYSCALL_ERROR;
2212        }
2213
2214        vaddr = cap_frame_cap_get_capFMappedAddress(cap);
2215        asid = cap_frame_cap_get_capFMappedASID(cap);
2216
2217        find_ret = findVSpaceForASID(asid);
2218        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2219            userError("Page Flush: No PGD for ASID");
2220            current_syscall_error.type = seL4_FailedLookup;
2221            current_syscall_error.failedLookupWasSource = false;
2222            return EXCEPTION_SYSCALL_ERROR;
2223        }
2224
2225        start = getSyscallArg(0, buffer);
2226        end =   getSyscallArg(1, buffer);
2227
2228        /* check that the range is sane */
2229        if (end <= start) {
2230            userError("PageFlush: Invalid range");
2231            current_syscall_error.type = seL4_InvalidArgument;
2232            current_syscall_error.invalidArgumentNumber = 1;
2233            return EXCEPTION_SYSCALL_ERROR;
2234        }
2235
2236        /* start and end are currently relative inside this page */
2237        page_size = BIT(pageBitsForSize(cap_frame_cap_get_capFSize(cap)));
2238        if (start >= page_size || end > page_size) {
2239            userError("Page Flush: Requested range not inside page");
2240            current_syscall_error.type = seL4_InvalidArgument;
2241            current_syscall_error.invalidArgumentNumber = 0;
2242            return EXCEPTION_SYSCALL_ERROR;
2243        }
2244
2245        word_t pstart = pptr_to_paddr((void *)cap_frame_cap_get_capFBasePtr(cap)) + start;
2246#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2247        /* Don't let applications flush outside of the kernel window */
2248        if (pstart < PADDR_BASE || ((end - start) + pstart) > PADDR_TOP) {
2249            userError("Page Flush: Overlaps kernel region.");
2250            current_syscall_error.type = seL4_IllegalOperation;
2251            return EXCEPTION_SYSCALL_ERROR;
2252        }
2253#endif
2254
2255        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2256        return performPageFlush(invLabel, find_ret.vspace_root, asid, vaddr + start, vaddr + end - 1,
2257                                pstart);
2258    }
2259
2260    case ARMPageGetAddress:
2261        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2262        return performPageGetAddress(cap_frame_cap_get_capFBasePtr(cap));
2263
2264    default:
2265        current_syscall_error.type = seL4_IllegalOperation;
2266        return EXCEPTION_SYSCALL_ERROR;
2267    }
2268}
2269
2270exception_t decodeARMMMUInvocation(word_t invLabel, word_t length, cptr_t cptr,
2271                                   cte_t *cte, cap_t cap, extra_caps_t extraCaps,
2272                                   word_t *buffer)
2273{
2274    switch (cap_get_capType(cap)) {
2275    case cap_vtable_root_cap:
2276        return decodeARMVSpaceRootInvocation(invLabel, length, cte, cap, extraCaps, buffer);
2277#ifndef AARCH64_VSPACE_S2_START_L1
2278    case cap_page_upper_directory_cap:
2279        return decodeARMPageUpperDirectoryInvocation(invLabel, length, cte,
2280                                                     cap, extraCaps, buffer);
2281#endif
2282    case cap_page_directory_cap:
2283        return decodeARMPageDirectoryInvocation(invLabel, length, cte,
2284                                                cap, extraCaps, buffer);
2285
2286    case cap_page_table_cap:
2287        return decodeARMPageTableInvocation(invLabel, length, cte,
2288                                            cap, extraCaps, buffer);
2289
2290    case cap_frame_cap:
2291        return decodeARMFrameInvocation(invLabel, length, cte,
2292                                        cap, extraCaps, buffer);
2293
2294    case cap_asid_control_cap: {
2295        unsigned int i;
2296        asid_t asid_base;
2297        word_t index, depth;
2298        cap_t untyped, root;
2299        cte_t *parentSlot, *destSlot;
2300        lookupSlot_ret_t lu_ret;
2301        void *frame;
2302        exception_t status;
2303
2304        if (unlikely(invLabel != ARMASIDControlMakePool)) {
2305            current_syscall_error.type = seL4_IllegalOperation;
2306
2307            return EXCEPTION_SYSCALL_ERROR;
2308        }
2309
2310        if (unlikely(length < 2 ||
2311                     extraCaps.excaprefs[0] == NULL ||
2312                     extraCaps.excaprefs[1] == NULL)) {
2313            current_syscall_error.type = seL4_TruncatedMessage;
2314
2315            return EXCEPTION_SYSCALL_ERROR;
2316        }
2317
2318        index = getSyscallArg(0, buffer);
2319        depth = getSyscallArg(1, buffer);
2320        parentSlot = extraCaps.excaprefs[0];
2321        untyped = parentSlot->cap;
2322        root = extraCaps.excaprefs[1]->cap;
2323
2324        /* Find first free pool */
2325        for (i = 0; i < nASIDPools && armKSASIDTable[i]; i++);
2326
2327        if (unlikely(i == nASIDPools)) { /* If no unallocated pool is found */
2328            current_syscall_error.type = seL4_DeleteFirst;
2329
2330            return EXCEPTION_SYSCALL_ERROR;
2331        }
2332
2333        asid_base = i << asidLowBits;
2334
2335        if (unlikely(cap_get_capType(untyped) != cap_untyped_cap ||
2336                     cap_untyped_cap_get_capBlockSize(untyped) != seL4_ASIDPoolBits) ||
2337            cap_untyped_cap_get_capIsDevice(untyped)) {
2338            current_syscall_error.type = seL4_InvalidCapability;
2339            current_syscall_error.invalidCapNumber = 1;
2340
2341            return EXCEPTION_SYSCALL_ERROR;
2342        }
2343
2344        status = ensureNoChildren(parentSlot);
2345        if (unlikely(status != EXCEPTION_NONE)) {
2346            return status;
2347        }
2348
2349        frame = WORD_PTR(cap_untyped_cap_get_capPtr(untyped));
2350
2351        lu_ret = lookupTargetSlot(root, index, depth);
2352        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2353            return lu_ret.status;
2354        }
2355        destSlot = lu_ret.slot;
2356
2357        status = ensureEmptySlot(destSlot);
2358        if (unlikely(status != EXCEPTION_NONE)) {
2359            return status;
2360        }
2361
2362        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2363        return performASIDControlInvocation(frame, destSlot, parentSlot, asid_base);
2364    }
2365
2366    case cap_asid_pool_cap: {
2367        cap_t vspaceCap;
2368        cte_t *vspaceCapSlot;
2369        asid_pool_t *pool;
2370        unsigned int i;
2371        asid_t asid;
2372
2373        if (unlikely(invLabel != ARMASIDPoolAssign)) {
2374            current_syscall_error.type = seL4_IllegalOperation;
2375
2376            return EXCEPTION_SYSCALL_ERROR;
2377        }
2378
2379        if (unlikely(extraCaps.excaprefs[0] == NULL)) {
2380            current_syscall_error.type = seL4_TruncatedMessage;
2381
2382            return EXCEPTION_SYSCALL_ERROR;
2383        }
2384
2385        vspaceCapSlot = extraCaps.excaprefs[0];
2386        vspaceCap = vspaceCapSlot->cap;
2387
2388        if (unlikely(!isVTableRoot(vspaceCap) || cap_vtable_root_isMapped(vspaceCap))) {
2389            current_syscall_error.type = seL4_InvalidCapability;
2390            current_syscall_error.invalidCapNumber = 1;
2391
2392            return EXCEPTION_SYSCALL_ERROR;
2393        }
2394
2395        pool = armKSASIDTable[cap_asid_pool_cap_get_capASIDBase(cap) >> asidLowBits];
2396
2397        if (unlikely(!pool)) {
2398            current_syscall_error.type = seL4_FailedLookup;
2399            current_syscall_error.failedLookupWasSource = false;
2400            current_lookup_fault = lookup_fault_invalid_root_new();
2401
2402            return EXCEPTION_SYSCALL_ERROR;
2403        }
2404
2405        if (unlikely(pool != ASID_POOL_PTR(cap_asid_pool_cap_get_capASIDPool(cap)))) {
2406            current_syscall_error.type = seL4_InvalidCapability;
2407            current_syscall_error.invalidCapNumber = 0;
2408
2409            return EXCEPTION_SYSCALL_ERROR;
2410        }
2411
2412        /* Find first free ASID */
2413        asid = cap_asid_pool_cap_get_capASIDBase(cap);
2414        for (i = 0; i < (1 << asidLowBits) && (asid + i == 0 || pool->array[i]); i++);
2415
2416        if (unlikely(i == 1 << asidLowBits)) {
2417            current_syscall_error.type = seL4_DeleteFirst;
2418
2419            return EXCEPTION_SYSCALL_ERROR;
2420        }
2421
2422        asid += i;
2423
2424        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2425        return performASIDPoolInvocation(asid, pool, vspaceCapSlot);
2426    }
2427
2428    default:
2429        fail("Invalid ARM arch cap type");
2430    }
2431}
2432
2433#ifdef CONFIG_DEBUG_BUILD
2434void kernelPrefetchAbort(word_t pc) VISIBLE;
2435void kernelDataAbort(word_t pc) VISIBLE;
2436
2437void kernelPrefetchAbort(word_t pc)
2438{
2439    word_t ifsr = getIFSR();
2440
2441    printf("\n\nKERNEL PREFETCH ABORT!\n");
2442    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
2443    printf("ESR (IFSR): 0x%x\n", (unsigned int)ifsr);
2444
2445    halt();
2446}
2447
2448void kernelDataAbort(word_t pc)
2449{
2450    word_t dfsr = getDFSR();
2451    word_t far = getFAR();
2452
2453    printf("\n\nKERNEL DATA ABORT!\n");
2454    printf("Faulting instruction: 0x%lx\n", (unsigned long)pc);
2455    printf("FAR: 0x%lx ESR (DFSR): 0x%x\n", (unsigned long)far, (unsigned int)dfsr);
2456
2457    halt();
2458}
2459#endif
2460
2461#ifdef CONFIG_PRINTING
2462typedef struct readWordFromVSpace_ret {
2463    exception_t status;
2464    word_t value;
2465} readWordFromVSpace_ret_t;
2466
2467static readWordFromVSpace_ret_t readWordFromVSpace(vspace_root_t *pd, word_t vaddr)
2468{
2469    lookupFrame_ret_t lookup_frame_ret;
2470    readWordFromVSpace_ret_t ret;
2471    word_t offset;
2472    pptr_t kernel_vaddr;
2473    word_t *value;
2474
2475    lookup_frame_ret = lookupFrame(pd, vaddr);
2476
2477    if (!lookup_frame_ret.valid) {
2478        ret.status = EXCEPTION_LOOKUP_FAULT;
2479        return ret;
2480    }
2481
2482    offset = vaddr & MASK(pageBitsForSize(lookup_frame_ret.frameSize));
2483    kernel_vaddr = (word_t)paddr_to_pptr(lookup_frame_ret.frameBase);
2484    value = (word_t *)(kernel_vaddr + offset);
2485
2486    ret.status = EXCEPTION_NONE;
2487    ret.value = *value;
2488    return ret;
2489}
2490
2491void Arch_userStackTrace(tcb_t *tptr)
2492{
2493    cap_t threadRoot;
2494    vspace_root_t *vspaceRoot;
2495    word_t sp;
2496    int i;
2497
2498    threadRoot = TCB_PTR_CTE_PTR(tptr, tcbVTable)->cap;
2499
2500    /* lookup the vspace root */
2501    if (cap_get_capType(threadRoot) != cap_vtable_root_cap) {
2502        printf("Invalid vspace\n");
2503        return;
2504    }
2505
2506    vspaceRoot = cap_vtable_root_get_basePtr(threadRoot);
2507    sp = getRegister(tptr, SP_EL0);
2508
2509    /* check for alignment so we don't have to worry about accessing
2510     * words that might be on two different pages */
2511    if (!IS_ALIGNED(sp, seL4_WordSizeBits)) {
2512        printf("SP not aligned\n");
2513        return;
2514    }
2515
2516    for (i = 0; i < CONFIG_USER_STACK_TRACE_LENGTH; i++) {
2517        word_t address = sp + (i * sizeof(word_t));
2518        readWordFromVSpace_ret_t result;
2519        result = readWordFromVSpace(vspaceRoot, address);
2520        if (result.status == EXCEPTION_NONE) {
2521            printf("0x%lx: 0x%lx\n", (unsigned long)address, (unsigned long)result.value);
2522        } else {
2523            printf("0x%lx: INVALID\n", (unsigned long)address);
2524        }
2525    }
2526}
2527#endif
2528
2529#if defined(CONFIG_KERNEL_LOG_BUFFER)
2530exception_t benchmark_arch_map_logBuffer(word_t frame_cptr)
2531{
2532    lookupCapAndSlot_ret_t lu_ret;
2533    vm_page_size_t frameSize;
2534    pptr_t  frame_pptr;
2535
2536    /* faulting section */
2537    lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), frame_cptr);
2538
2539    if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2540        userError("Invalid cap #%lu.", frame_cptr);
2541        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2542
2543        return EXCEPTION_SYSCALL_ERROR;
2544    }
2545
2546    if (cap_get_capType(lu_ret.cap) != cap_frame_cap) {
2547        userError("Invalid cap. Log buffer should be of a frame cap");
2548        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2549
2550        return EXCEPTION_SYSCALL_ERROR;
2551    }
2552
2553    frameSize = cap_frame_cap_get_capFSize(lu_ret.cap);
2554
2555    if (frameSize != ARMLargePage) {
2556        userError("Invalid frame size. The kernel expects 2M log buffer");
2557        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2558
2559        return EXCEPTION_SYSCALL_ERROR;
2560    }
2561
2562    frame_pptr = cap_frame_cap_get_capFBasePtr(lu_ret.cap);
2563
2564    ksUserLogBuffer = pptr_to_paddr((void *) frame_pptr);
2565
2566    *armKSGlobalLogPDE = pde_pde_large_new(
2567#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2568                             0, // XN
2569#else
2570                             1, // UXN
2571#endif
2572                             ksUserLogBuffer,
2573                             0,                         /* global */
2574                             1,                         /* access flag */
2575                             SMP_TERNARY(SMP_SHARE, 0), /* Inner-shareable if SMP enabled, otherwise unshared */
2576                             0,                         /* VMKernelOnly */
2577                             NORMAL);
2578
2579    cleanByVA_PoU((vptr_t)armKSGlobalLogPDE, pptr_to_paddr(armKSGlobalLogPDE));
2580    invalidateTranslationSingle(KS_LOG_PPTR);
2581    return EXCEPTION_NONE;
2582}
2583#endif /* CONFIG_KERNEL_LOG_BUFFER */
2584
2585