1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <config.h>
8#include <types.h>
9#include <benchmark/benchmark.h>
10#include <api/failures.h>
11#include <api/syscall.h>
12#include <kernel/boot.h>
13#include <kernel/cspace.h>
14#include <kernel/thread.h>
15#include <kernel/stack.h>
16#include <machine/io.h>
17#include <machine/debug.h>
18#include <model/statedata.h>
19#include <object/cnode.h>
20#include <object/untyped.h>
21#include <arch/api/invocation.h>
22#include <arch/kernel/vspace.h>
23#include <linker.h>
24#include <plat/machine/hardware.h>
25#include <armv/context_switch.h>
26#include <arch/object/iospace.h>
27#include <arch/object/vcpu.h>
28#include <arch/machine/tlb.h>
29
30#ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES
31#include <benchmark/benchmark_track.h>
32#endif
33
34/* ARM uses multiple identical mappings in a page table / page directory to construct
35 * large mappings. In both cases it happens to be 16 entries, which can be calculated by
36 * looking at the size difference of the mappings, and is done this way to remove magic
37 * numbers littering this code and make it clear what is going on */
38#define SECTIONS_PER_SUPER_SECTION BIT(ARMSuperSectionBits - ARMSectionBits)
39#define PAGES_PER_LARGE_PAGE BIT(ARMLargePageBits - ARMSmallPageBits)
40
41/* helper stuff to avoid fencepost errors when
42 * getting the last byte of a PTE or PDE */
43#define LAST_BYTE_PTE(PTE,LENGTH) ((word_t)&(PTE)[(LENGTH)-1] + (BIT(PTE_SIZE_BITS)-1))
44#define LAST_BYTE_PDE(PDE,LENGTH) ((word_t)&(PDE)[(LENGTH)-1] + (BIT(PDE_SIZE_BITS)-1))
45
46#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
47/* Stage 2 */
48#define MEMATTR_CACHEABLE    0xf /* Inner and Outer write-back */
49#define MEMATTR_NONCACHEABLE 0x0 /* Strongly ordered or device memory */
50
51/* STage 1 hyp */
52#define ATTRINDX_CACHEABLE    0xff /* Inner and Outer RW write-back non-transient */
53#define ATTRINDX_NONCACHEABLE 0x0  /* strongly ordered or device memory */
54#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
55
56struct resolve_ret {
57    paddr_t frameBase;
58    vm_page_size_t frameSize;
59    bool_t valid;
60};
61typedef struct resolve_ret resolve_ret_t;
62
63#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
64static bool_t PURE pteCheckIfMapped(pte_t *pte);
65static bool_t PURE pdeCheckIfMapped(pde_t *pde);
66
67static word_t CONST APFromVMRights(vm_rights_t vm_rights)
68{
69    switch (vm_rights) {
70    case VMNoAccess:
71        return 0;
72
73    case VMKernelOnly:
74        return 1;
75
76    case VMReadOnly:
77        return 2;
78
79    case VMReadWrite:
80        return 3;
81
82    default:
83        fail("Invalid VM rights");
84    }
85}
86
87#else
88/* AP encoding slightly different. AP only used for kernel mappings which are fixed after boot time */
89BOOT_CODE
90static word_t CONST APFromVMRights(vm_rights_t vm_rights)
91{
92    switch (vm_rights) {
93    case VMKernelOnly:
94        return 0;
95    case VMReadWrite:
96        return 1;
97    case VMNoAccess:
98        /* RO at PL1 only */
99        return 2;
100    case VMReadOnly:
101        return 3;
102    default:
103        fail("Invalid VM rights");
104    }
105}
106
107static word_t CONST HAPFromVMRights(vm_rights_t vm_rights)
108{
109    switch (vm_rights) {
110    case VMKernelOnly:
111    case VMNoAccess:
112        return 0;
113    case VMReadOnly:
114        return 1;
115    /*
116    case VMWriteOnly:
117        return 2;
118    */
119    case VMReadWrite:
120        return 3;
121    default:
122        fail("Invalid VM rights");
123    }
124}
125
126#endif
127
128vm_rights_t CONST maskVMRights(vm_rights_t vm_rights, seL4_CapRights_t cap_rights_mask)
129{
130    if (vm_rights == VMNoAccess) {
131        return VMNoAccess;
132    }
133    if (vm_rights == VMReadOnly &&
134        seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
135        return VMReadOnly;
136    }
137    if (vm_rights == VMReadWrite &&
138        seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
139        if (!seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
140            return VMReadOnly;
141        } else {
142            return VMReadWrite;
143        }
144    }
145    if (vm_rights == VMReadWrite &&
146        !seL4_CapRights_get_capAllowRead(cap_rights_mask) &&
147        seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
148        userError("Attempted to make unsupported write only mapping");
149    }
150    return VMKernelOnly;
151}
152
153/* ==================== BOOT CODE STARTS HERE ==================== */
154
155BOOT_CODE void map_kernel_frame(paddr_t paddr, pptr_t vaddr, vm_rights_t vm_rights, vm_attributes_t attributes)
156{
157    word_t idx = (vaddr & MASK(pageBitsForSize(ARMSection))) >> pageBitsForSize(ARMSmallPage);
158
159    assert(vaddr >= PPTR_TOP); /* vaddr lies in the region the global PT covers */
160#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
161    word_t tex;
162    if (vm_attributes_get_armPageCacheable(attributes)) {
163        tex = 5; /* TEX = 0b1(Cached)01(Outer Write Allocate) */
164    } else {
165        tex = 0;
166    }
167    armKSGlobalPT[idx] =
168        pte_pte_small_new(
169            paddr,
170            0, /* global */
171            SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
172            0, /* APX = 0, privileged full access */
173            tex,
174            APFromVMRights(vm_rights),
175            0, /* C (Inner write allocate) */
176            1, /* B (Inner write allocate) */
177            0  /* executable */
178        );
179#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */
180    armHSGlobalPT[idx] =
181        pteS1_pteS1_small_new(
182            0, /* Executeable */
183            0, /* Executeable at PL1 */
184            0, /* Not contiguous */
185            paddr,
186            0, /* global */
187            1, /* AF -- always set */
188            0, /* Not shared */
189            APFromVMRights(vm_rights),
190            0, /* non secure */
191            vm_attributes_get_armPageCacheable(attributes)
192        );
193#endif /* !CONFIG_ARM_HYPERVISOR_SUPPORT */
194}
195
196#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
197BOOT_CODE void map_kernel_window(void)
198{
199    paddr_t  phys;
200    word_t idx;
201    pde_t    pde;
202
203    /* mapping of KERNEL_ELF_BASE (virtual address) to kernel's
204     * KERNEL_ELF_PHYS_BASE  */
205    /* up to end of virtual address space minus 16M using 16M frames */
206    phys = physBase;
207    idx = PPTR_BASE >> pageBitsForSize(ARMSection);
208
209    while (idx < BIT(PD_INDEX_BITS) - SECTIONS_PER_SUPER_SECTION) {
210        word_t idx2;
211
212        pde = pde_pde_section_new(
213                  phys,
214                  1, /* SuperSection */
215                  0, /* global */
216                  SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
217                  0, /* APX = 0, privileged full access */
218                  5, /* TEX = 0b1(Cached)01(Outer Write Allocate) */
219                  1, /* VMKernelOnly */
220                  1, /* Parity enabled */
221                  0, /* Domain 0 */
222                  0, /* XN not set */
223                  0, /* C (Inner write allocate) */
224                  1  /* B (Inner write allocate) */
225              );
226        for (idx2 = idx; idx2 < idx + SECTIONS_PER_SUPER_SECTION; idx2++) {
227            armKSGlobalPD[idx2] = pde;
228        }
229        phys += BIT(pageBitsForSize(ARMSuperSection));
230        idx += SECTIONS_PER_SUPER_SECTION;
231    }
232
233    while (idx < (PPTR_TOP >> pageBitsForSize(ARMSection))) {
234        pde = pde_pde_section_new(
235                  phys,
236                  0, /* Section */
237                  0, /* global */
238                  SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
239                  0, /* APX = 0, privileged full access */
240                  5, /* TEX = 0b1(Cached)01(Outer Write Allocate) */
241                  1, /* VMKernelOnly */
242                  1, /* Parity enabled */
243                  0, /* Domain 0 */
244                  0, /* XN not set */
245                  0, /* C (Inner write allocate) */
246                  1  /* B (Inner write allocate) */
247              );
248        armKSGlobalPD[idx] = pde;
249        phys += BIT(pageBitsForSize(ARMSection));
250        idx++;
251    }
252
253    /* crosscheck whether we have mapped correctly so far */
254    assert(phys == PADDR_TOP);
255
256#ifdef CONFIG_KERNEL_LOG_BUFFER
257    /* map log buffer page table. PTEs to be filled by user later by calling seL4_BenchmarkSetLogBuffer() */
258    armKSGlobalPD[idx] =
259        pde_pde_coarse_new(
260            addrFromPPtr(armKSGlobalLogPT), /* address */
261            true,                           /* P       */
262            0                               /* Domain  */
263        );
264
265    memzero(armKSGlobalLogPT, BIT(seL4_PageTableBits));
266
267    phys += BIT(pageBitsForSize(ARMSection));
268    idx++;
269#endif /* CONFIG_KERNEL_LOG_BUFFER */
270
271    /* map page table covering last 1M of virtual address space to page directory */
272    armKSGlobalPD[idx] =
273        pde_pde_coarse_new(
274            addrFromPPtr(armKSGlobalPT), /* address */
275            true,                        /* P       */
276            0                            /* Domain  */
277        );
278
279    /* now start initialising the page table */
280    memzero(armKSGlobalPT, 1 << seL4_PageTableBits);
281
282    /* map vector table */
283    map_kernel_frame(
284        addrFromPPtr(arm_vector_table),
285        PPTR_VECTOR_TABLE,
286        VMKernelOnly,
287        vm_attributes_new(
288            false, /* armExecuteNever */
289            true,  /* armParityEnabled */
290            true   /* armPageCacheable */
291        )
292    );
293
294#ifdef CONFIG_KERNEL_GLOBALS_FRAME
295    /* map globals frame */
296    map_kernel_frame(
297        addrFromPPtr(armKSGlobalsFrame),
298        seL4_GlobalsFrame,
299        VMReadOnly,
300        vm_attributes_new(
301            true,  /* armExecuteNever */
302            true,  /* armParityEnabled */
303            true   /* armPageCacheable */
304        )
305    );
306#endif /* CONFIG_KERNEL_GLOBALS_FRAME */
307
308    map_kernel_devices();
309}
310
311#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */
312
313BOOT_CODE void map_kernel_window(void)
314{
315    paddr_t    phys;
316    uint32_t   idx;
317    pdeS1_t pde;
318    pte_t UNUSED pteS2;
319
320    /* Initialise PGD */
321    for (idx = 0; idx < 3; idx++) {
322        pde = pdeS1_pdeS1_invalid_new();
323        armHSGlobalPGD[idx] = pde;
324    }
325    pde = pdeS1_pdeS1_coarse_new(0, 0, 0, 0, addrFromPPtr(armHSGlobalPD));
326    armHSGlobalPGD[3] = pde;
327
328    /* Initialise PMD */
329    /* Invalidate up until USER_TOP */
330    for (idx = 0; idx < (USER_TOP - 0xC0000000) >> (PT_INDEX_BITS + PAGE_BITS); idx++) {
331        pde = pdeS1_pdeS1_invalid_new();
332        armHSGlobalPD[idx] = pde;
333    }
334    /* mapping of PPTR_BASE (virtual address) to kernel's physBase  */
335    /* up to end of virtual address space minus 2M using 2M frames */
336    phys = physBase;
337    for (; idx < BIT(PT_INDEX_BITS) - 1; idx++) {
338        pde = pdeS1_pdeS1_section_new(
339                  0, /* Executable */
340                  0, /* Executable in PL1 */
341                  0, /* Not contiguous */
342                  phys, /* Address */
343                  0, /* global */
344                  1, /* AF -- always set to 1 */
345                  0, /* Not Shareable */
346                  0, /* AP: WR at PL1 only */
347                  0, /* Not secure */
348                  1  /* outer write-back Cacheable */
349              );
350        armHSGlobalPD[idx] = pde;
351        phys += BIT(PT_INDEX_BITS + PAGE_BITS);
352    }
353    /* map page table covering last 2M of virtual address space */
354    pde = pdeS1_pdeS1_coarse_new(0, 0, 0, 0, addrFromPPtr(armHSGlobalPT));
355    armHSGlobalPD[idx] = pde;
356
357    /* now start initialising the page table */
358    memzero(armHSGlobalPT, 1 << seL4_PageTableBits);
359    for (idx = 0; idx < 256; idx++) {
360        pteS1_t pte;
361        pte = pteS1_pteS1_small_new(
362                  0, /* Executable */
363                  0, /* Executable in PL1 */
364                  0, /* Not contiguous */
365                  phys, /* Address */
366                  0, /* global */
367                  1, /* AF -- always set to 1 */
368                  0, /* Not Shareable */
369                  0, /* AP: WR at PL1 only */
370                  0, /* Not secure */
371                  1  /* outer write-back Cacheable */
372              );
373        armHSGlobalPT[idx] = pte;
374        phys += BIT(PAGE_BITS);
375    }
376    /* map vector table */
377    map_kernel_frame(
378        addrFromPPtr(arm_vector_table),
379        PPTR_VECTOR_TABLE,
380        VMKernelOnly,
381        vm_attributes_new(
382            false, /* armExecuteNever */
383            true,  /* armParityEnabled */
384            true   /* armPageCacheable */
385        )
386    );
387
388#ifdef CONFIG_KERNEL_GLOBALS_FRAME
389    /* map globals frame */
390    map_kernel_frame(
391        addrFromPPtr(armKSGlobalsFrame),
392        seL4_GlobalsFrame,
393        VMReadOnly,
394        vm_attributes_new(
395            false, /* armExecuteNever */
396            true,  /* armParityEnabled */
397            true   /* armPageCacheable */
398        )
399    );
400    /* map globals into user global PT */
401    pteS2 = pte_pte_small_new(
402                1, /* Not Executeable */
403                0, /* Not contiguous */
404                addrFromPPtr(armKSGlobalsFrame),
405                1, /* AF -- always set */
406                0, /* Not shared */
407                HAPFromVMRights(VMReadOnly),
408                MEMATTR_CACHEABLE  /* Cacheable */
409            );
410    memzero(armUSGlobalPT, 1 << seL4_PageTableBits);
411    idx = (seL4_GlobalsFrame >> PAGE_BITS) & (MASK(PT_INDEX_BITS));
412    armUSGlobalPT[idx] = pteS2;
413#endif /* CONFIG_KERNEL_GLOBALS_FRAME */
414
415    map_kernel_devices();
416}
417
418#endif /* !CONFIG_ARM_HYPERVISOR_SUPPORT */
419
420static BOOT_CODE void map_it_frame_cap(cap_t pd_cap, cap_t frame_cap, bool_t executable)
421{
422    pte_t *pt;
423    pte_t *targetSlot;
424    pde_t *pd    = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pd_cap));
425    void  *frame = (void *)generic_frame_cap_get_capFBasePtr(frame_cap);
426    vptr_t vptr  = generic_frame_cap_get_capFMappedAddress(frame_cap);
427
428    assert(generic_frame_cap_get_capFMappedASID(frame_cap) != 0);
429
430    pd += (vptr >> pageBitsForSize(ARMSection));
431    pt = ptrFromPAddr(pde_pde_coarse_ptr_get_address(pd));
432    targetSlot = pt + ((vptr & MASK(pageBitsForSize(ARMSection)))
433                       >> pageBitsForSize(ARMSmallPage));
434#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
435    *targetSlot = pte_pte_small_new(
436                      addrFromPPtr(frame),
437                      1, /* not global */
438                      SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
439                      0, /* APX = 0, privileged full access */
440                      5, /* TEX = 0b1(Cached)01(Outer Write Allocate) */
441                      APFromVMRights(VMReadWrite),
442                      0, /* C (Inner write allocate) */
443                      1, /* B (Inner write allocate) */
444                      !executable
445                  );
446#else
447    *targetSlot = pte_pte_small_new(
448                      0, /* Executeable */
449                      0, /* Not contiguous */
450                      addrFromPPtr(frame),
451                      1, /* AF -- always set */
452                      0, /* Not shared */
453                      HAPFromVMRights(VMReadWrite),
454                      MEMATTR_CACHEABLE  /* Cacheable */
455                  );
456#endif
457}
458
459/* Create a frame cap for the initial thread. */
460
461static BOOT_CODE cap_t create_it_frame_cap(pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large)
462{
463    if (use_large)
464        return
465            cap_frame_cap_new(
466                ARMSection,                    /* capFSize           */
467                ASID_LOW(asid),                /* capFMappedASIDLow  */
468                wordFromVMRights(VMReadWrite), /* capFVMRights       */
469                vptr,                          /* capFMappedAddress  */
470                false,                         /* capFIsDevice       */
471                ASID_HIGH(asid),               /* capFMappedASIDHigh */
472                pptr                           /* capFBasePtr        */
473            );
474    else
475        return
476            cap_small_frame_cap_new(
477                ASID_LOW(asid),                /* capFMappedASIDLow  */
478                wordFromVMRights(VMReadWrite), /* capFVMRights       */
479                vptr,                          /* capFMappedAddress  */
480                false,                         /* capFIsDevice       */
481#ifdef CONFIG_TK1_SMMU
482                0,                             /* IOSpace            */
483#endif
484                ASID_HIGH(asid),               /* capFMappedASIDHigh */
485                pptr                           /* capFBasePtr        */
486            );
487}
488
489static BOOT_CODE void map_it_pt_cap(cap_t pd_cap, cap_t pt_cap)
490{
491    pde_t *pd   = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pd_cap));
492    pte_t *pt   = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(pt_cap));
493    vptr_t vptr = cap_page_table_cap_get_capPTMappedAddress(pt_cap);
494    pde_t *targetSlot = pd + (vptr >> pageBitsForSize(ARMSection));
495
496    assert(cap_page_table_cap_get_capPTIsMapped(pt_cap));
497
498#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
499    *targetSlot = pde_pde_coarse_new(
500                      addrFromPPtr(pt), /* address */
501                      true,             /* P       */
502                      0                 /* Domain  */
503                  );
504#else
505    *targetSlot = pde_pde_coarse_new(addrFromPPtr(pt));
506#endif
507}
508
509/* Create a page table for the initial thread */
510
511static BOOT_CODE cap_t create_it_page_table_cap(cap_t pd, pptr_t pptr, vptr_t vptr, asid_t asid)
512{
513    cap_t cap;
514    cap = cap_page_table_cap_new(
515              1,    /* capPTIsMapped      */
516              asid, /* capPTMappedASID    */
517              vptr, /* capPTMappedAddress */
518              pptr  /* capPTBasePtr       */
519          );
520    if (asid != asidInvalid) {
521        map_it_pt_cap(pd, cap);
522    }
523    return cap;
524}
525
526BOOT_CODE word_t arch_get_n_paging(v_region_t it_v_reg)
527{
528    return get_n_paging(it_v_reg, PT_INDEX_BITS + PAGE_BITS);
529}
530
531/* Create an address space for the initial thread.
532 * This includes page directory and page tables */
533BOOT_CODE cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
534{
535    vptr_t     pt_vptr;
536    seL4_SlotPos slot_pos_before;
537    seL4_SlotPos slot_pos_after;
538
539    /* create PD cap */
540    copyGlobalMappings(PDE_PTR(rootserver.vspace));
541    cleanCacheRange_PoU(rootserver.vspace, rootserver.vspace + (1 << seL4_PageDirBits) - 1,
542                        addrFromPPtr((void *)rootserver.vspace));
543    cap_t pd_cap =
544        cap_page_directory_cap_new(
545            true,    /* capPDIsMapped   */
546            IT_ASID, /* capPDMappedASID */
547            rootserver.vspace  /* capPDBasePtr    */
548        );
549    slot_pos_before = ndks_boot.slot_pos_cur;
550    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), pd_cap);
551
552    /* create all PT caps necessary to cover userland image */
553    for (pt_vptr = ROUND_DOWN(it_v_reg.start, PT_INDEX_BITS + PAGE_BITS);
554         pt_vptr < it_v_reg.end;
555         pt_vptr += BIT(PT_INDEX_BITS + PAGE_BITS)) {
556        if (!provide_cap(root_cnode_cap,
557                         create_it_page_table_cap(pd_cap, it_alloc_paging(), pt_vptr, IT_ASID))
558           ) {
559            return cap_null_cap_new();
560        }
561    }
562
563    slot_pos_after = ndks_boot.slot_pos_cur;
564    ndks_boot.bi_frame->userImagePaging = (seL4_SlotRegion) {
565        slot_pos_before, slot_pos_after
566    };
567
568    return pd_cap;
569}
570
571BOOT_CODE cap_t create_unmapped_it_frame_cap(pptr_t pptr, bool_t use_large)
572{
573    return create_it_frame_cap(pptr, 0, asidInvalid, use_large);
574}
575
576BOOT_CODE cap_t create_mapped_it_frame_cap(cap_t pd_cap, pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large,
577                                           bool_t executable)
578{
579    cap_t cap = create_it_frame_cap(pptr, vptr, asid, use_large);
580    map_it_frame_cap(pd_cap, cap, executable);
581    return cap;
582}
583
584#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
585
586BOOT_CODE void activate_global_pd(void)
587{
588    /* Ensure that there's nothing stale in newly-mapped regions, and
589       that everything we've written (particularly the kernel page tables)
590       is committed. */
591    cleanInvalidateL1Caches();
592    setCurrentPD(addrFromPPtr(armKSGlobalPD));
593    invalidateLocalTLB();
594    lockTLBEntry(PPTR_BASE);
595    lockTLBEntry(PPTR_VECTOR_TABLE);
596}
597
598#else
599
600BOOT_CODE void activate_global_pd(void)
601{
602    uint32_t r;
603    /* Ensure that there's nothing stale in newly-mapped regions, and
604       that everything we've written (particularly the kernel page tables)
605       is committed. */
606    cleanInvalidateL1Caches();
607    /* Setup the memory attributes: We use 2 indicies (cachable/non-cachable) */
608    setHMAIR((ATTRINDX_NONCACHEABLE << 0) | (ATTRINDX_CACHEABLE << 8), 0);
609    setCurrentHypPD(addrFromPPtr(armHSGlobalPGD));
610    invalidateHypTLB();
611#if 0 /* Can't lock entries on A15 */
612    lockTLBEntry(PPTR_BASE);
613    lockTLBEntry(PPTR_VECTOR_TABLE);
614#endif
615    /* TODO find a better place to init the VMMU */
616    r = 0;
617    /* Translation range */
618    r |= (0x0 << 0);     /* 2^(32 -(0)) input range. */
619    r |= (r & 0x8) << 1; /* Sign bit */
620    /* starting level */
621    r |= (0x0 << 6);     /* Start at second level */
622    /* Sharability of tables */
623    r |= BIT(8);       /* Inner write-back, write-allocate */
624    r |= BIT(10);      /* Outer write-back, write-allocate */
625    /* Long descriptor format (not that we have a choice) */
626    r |= BIT(31);
627    setVTCR(r);
628}
629
630#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
631
632BOOT_CODE void write_it_asid_pool(cap_t it_ap_cap, cap_t it_pd_cap)
633{
634    asid_pool_t *ap = ASID_POOL_PTR(pptr_of_cap(it_ap_cap));
635    ap->array[IT_ASID] = PDE_PTR(pptr_of_cap(it_pd_cap));
636    armKSASIDTable[IT_ASID >> asidLowBits] = ap;
637}
638
639/* ==================== BOOT CODE FINISHES HERE ==================== */
640
641findPDForASID_ret_t findPDForASID(asid_t asid)
642{
643    findPDForASID_ret_t ret;
644    asid_pool_t *poolPtr;
645    pde_t *pd;
646
647    poolPtr = armKSASIDTable[asid >> asidLowBits];
648    if (unlikely(!poolPtr)) {
649        current_lookup_fault = lookup_fault_invalid_root_new();
650
651        ret.pd = NULL;
652        ret.status = EXCEPTION_LOOKUP_FAULT;
653        return ret;
654    }
655
656    pd = poolPtr->array[asid & MASK(asidLowBits)];
657    if (unlikely(!pd)) {
658        current_lookup_fault = lookup_fault_invalid_root_new();
659
660        ret.pd = NULL;
661        ret.status = EXCEPTION_LOOKUP_FAULT;
662        return ret;
663    }
664
665    ret.pd = pd;
666    ret.status = EXCEPTION_NONE;
667    return ret;
668}
669
670word_t *PURE lookupIPCBuffer(bool_t isReceiver, tcb_t *thread)
671{
672    word_t w_bufferPtr;
673    cap_t bufferCap;
674    vm_rights_t vm_rights;
675
676    w_bufferPtr = thread->tcbIPCBuffer;
677    bufferCap = TCB_PTR_CTE_PTR(thread, tcbBuffer)->cap;
678
679    if (unlikely(cap_get_capType(bufferCap) != cap_small_frame_cap &&
680                 cap_get_capType(bufferCap) != cap_frame_cap)) {
681        return NULL;
682    }
683    if (unlikely(generic_frame_cap_get_capFIsDevice(bufferCap))) {
684        return NULL;
685    }
686
687    vm_rights = generic_frame_cap_get_capFVMRights(bufferCap);
688    if (likely(vm_rights == VMReadWrite ||
689               (!isReceiver && vm_rights == VMReadOnly))) {
690        word_t basePtr;
691        unsigned int pageBits;
692
693        basePtr = generic_frame_cap_get_capFBasePtr(bufferCap);
694        pageBits = pageBitsForSize(generic_frame_cap_get_capFSize(bufferCap));
695        return (word_t *)(basePtr + (w_bufferPtr & MASK(pageBits)));
696    } else {
697        return NULL;
698    }
699}
700
701exception_t checkValidIPCBuffer(vptr_t vptr, cap_t cap)
702{
703    if (unlikely(cap_get_capType(cap) != cap_small_frame_cap &&
704                 cap_get_capType(cap) != cap_frame_cap)) {
705        userError("Requested IPC Buffer is not a frame cap.");
706        current_syscall_error.type = seL4_IllegalOperation;
707        return EXCEPTION_SYSCALL_ERROR;
708    }
709    if (unlikely(generic_frame_cap_get_capFIsDevice(cap))) {
710        userError("Specifying a device frame as an IPC buffer is not permitted.");
711        current_syscall_error.type = seL4_IllegalOperation;
712        return EXCEPTION_SYSCALL_ERROR;
713    }
714
715    if (unlikely(vptr & MASK(seL4_IPCBufferSizeBits))) {
716        userError("Requested IPC Buffer location 0x%x is not aligned.",
717                  (int)vptr);
718        current_syscall_error.type = seL4_AlignmentError;
719        return EXCEPTION_SYSCALL_ERROR;
720    }
721
722    return EXCEPTION_NONE;
723}
724
725pde_t *CONST lookupPDSlot(pde_t *pd, vptr_t vptr)
726{
727    unsigned int pdIndex;
728
729    pdIndex = vptr >> (PAGE_BITS + PT_INDEX_BITS);
730    return pd + pdIndex;
731}
732
733lookupPTSlot_ret_t lookupPTSlot(pde_t *pd, vptr_t vptr)
734{
735    lookupPTSlot_ret_t ret;
736    pde_t *pdSlot;
737
738    pdSlot = lookupPDSlot(pd, vptr);
739
740    if (unlikely(pde_ptr_get_pdeType(pdSlot) != pde_pde_coarse)) {
741        current_lookup_fault = lookup_fault_missing_capability_new(PT_INDEX_BITS + PAGE_BITS);
742
743        ret.ptSlot = NULL;
744        ret.status = EXCEPTION_LOOKUP_FAULT;
745        return ret;
746    } else {
747        pte_t *pt, *ptSlot;
748        unsigned int ptIndex;
749
750        pt = ptrFromPAddr(pde_pde_coarse_ptr_get_address(pdSlot));
751        ptIndex = (vptr >> PAGE_BITS) & MASK(PT_INDEX_BITS);
752        ptSlot = pt + ptIndex;
753
754        ret.ptSlot = ptSlot;
755        ret.status = EXCEPTION_NONE;
756        return ret;
757    }
758}
759
760static pte_t *lookupPTSlot_nofail(pte_t *pt, vptr_t vptr)
761{
762    unsigned int ptIndex;
763
764    ptIndex = (vptr >> PAGE_BITS) & MASK(PT_INDEX_BITS);
765    return pt + ptIndex;
766}
767
768static const resolve_ret_t default_resolve_ret_t;
769
770static resolve_ret_t resolveVAddr(pde_t *pd, vptr_t vaddr)
771{
772    pde_t *pde = lookupPDSlot(pd, vaddr);
773    resolve_ret_t ret = default_resolve_ret_t;
774
775    ret.valid = true;
776
777    switch (pde_ptr_get_pdeType(pde)) {
778    case pde_pde_section:
779        ret.frameBase = pde_pde_section_ptr_get_address(pde);
780#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
781        if (pde_pde_section_ptr_get_size(pde)) {
782            ret.frameSize = ARMSuperSection;
783        } else {
784            ret.frameSize = ARMSection;
785        }
786#else
787        if (pde_pde_section_ptr_get_contiguous_hint(pde)) {
788            /* Entires are represented as 16 contiguous sections. We need to mask
789               to get the super section frame base */
790            ret.frameBase &= ~MASK(pageBitsForSize(ARMSuperSection));
791            ret.frameSize = ARMSuperSection;
792        } else {
793            ret.frameSize = ARMSection;
794        }
795#endif
796        return ret;
797
798    case pde_pde_coarse: {
799        pte_t *pt = ptrFromPAddr(pde_pde_coarse_ptr_get_address(pde));
800        pte_t *pte = lookupPTSlot_nofail(pt, vaddr);
801        switch (pte_ptr_get_pteType(pte)) {
802        case pte_pte_small:
803            ret.frameBase = pte_pte_small_ptr_get_address(pte);
804#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
805            if (pte_pte_small_ptr_get_contiguous_hint(pte)) {
806                /* Entries are represented as 16 contiguous small frames. We need to mask
807                   to get the large frame base */
808                ret.frameBase &= ~MASK(pageBitsForSize(ARMLargePage));
809                ret.frameSize = ARMLargePage;
810            } else {
811                ret.frameSize = ARMSmallPage;
812            }
813#else
814            ret.frameSize = ARMSmallPage;
815#endif
816            return ret;
817
818#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
819        case pte_pte_large:
820            ret.frameBase = pte_pte_large_ptr_get_address(pte);
821            ret.frameSize = ARMLargePage;
822            return ret;
823#endif
824        default:
825            break;
826        }
827        break;
828    }
829    }
830
831    ret.valid = false;
832    return ret;
833}
834
835static pte_t CONST makeUserPTE(vm_page_size_t page_size, paddr_t paddr,
836                               bool_t cacheable, bool_t nonexecutable, vm_rights_t vm_rights)
837{
838    pte_t pte;
839#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
840    word_t ap;
841
842    ap = APFromVMRights(vm_rights);
843
844    switch (page_size) {
845    case ARMSmallPage: {
846        if (cacheable) {
847            pte = pte_pte_small_new(paddr,
848                                    1, /* not global */
849                                    SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
850                                    0, /* APX = 0, privileged full access */
851                                    5, /* TEX = 0b101, outer write-back, write-allocate */
852                                    ap,
853                                    0, 1, /* Inner write-back, write-allocate (except on ARM11) */
854                                    nonexecutable);
855        } else {
856            pte = pte_pte_small_new(paddr,
857                                    1, /* not global */
858                                    1, /* shared */
859                                    0, /* APX = 0, privileged full access */
860                                    0, /* TEX = 0b000, strongly-ordered. */
861                                    ap,
862                                    0, 0,
863                                    nonexecutable);
864        }
865        break;
866    }
867
868    case ARMLargePage: {
869        if (cacheable) {
870            pte = pte_pte_large_new(paddr,
871                                    nonexecutable,
872                                    5, /* TEX = 0b101, outer write-back, write-allocate */
873                                    1, /* not global */
874                                    SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
875                                    0, /* APX = 0, privileged full access */
876                                    ap,
877                                    0, 1, /* Inner write-back, write-allocate (except on ARM11) */
878                                    1 /* reserved */);
879        } else {
880            pte = pte_pte_large_new(paddr,
881                                    nonexecutable,
882                                    0, /* TEX = 0b000, strongly-ordered */
883                                    1, /* not global */
884                                    1, /* shared */
885                                    0, /* APX = 0, privileged full access */
886                                    ap,
887                                    0, 0,
888                                    1 /* reserved */);
889        }
890        break;
891    }
892
893    default:
894        fail("Invalid PTE frame type");
895    }
896
897#else
898
899    word_t hap;
900
901    hap = HAPFromVMRights(vm_rights);
902
903    switch (page_size) {
904    case ARMSmallPage: {
905        if (cacheable) {
906            pte = pte_pte_small_new(
907                      nonexecutable, /* Executable */
908                      0,      /* Not contiguous */
909                      paddr,
910                      1,      /* AF - Always set */
911                      0,      /* not shared */
912                      hap,    /* HAP - access */
913                      MEMATTR_CACHEABLE /* Cacheable */);
914        } else {
915            pte = pte_pte_small_new(
916                      nonexecutable, /* Executable */
917                      0,      /* Not contiguous */
918                      paddr,
919                      1,      /* AF - Always set */
920                      0,      /* not shared */
921                      hap,    /* HAP - access */
922                      MEMATTR_NONCACHEABLE /* Not cacheable */);
923        }
924        break;
925    }
926
927    case ARMLargePage: {
928        if (cacheable) {
929            pte = pte_pte_small_new(
930                      nonexecutable,   /* Executable */
931                      1,   /* 16 contiguous */
932                      paddr,
933                      1,   /* AF - Always set */
934                      0,   /* not shared */
935                      hap, /* HAP - access */
936                      MEMATTR_CACHEABLE  /* Cacheable */);
937        } else {
938            pte = pte_pte_small_new(
939                      nonexecutable,   /* Executable */
940                      1,   /* 16 contiguous */
941                      paddr,
942                      1,   /* AF - Always set */
943                      0,   /* not shared */
944                      hap, /* HAP - access */
945                      MEMATTR_NONCACHEABLE /* Not cacheable */);
946        }
947        break;
948    }
949    default:
950        fail("Invalid PTE frame type");
951    }
952#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
953
954    return pte;
955}
956
957static pde_t CONST makeUserPDE(vm_page_size_t page_size, paddr_t paddr, bool_t parity,
958                               bool_t cacheable, bool_t nonexecutable, word_t domain,
959                               vm_rights_t vm_rights)
960{
961#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
962    word_t ap, size2;
963
964    ap = APFromVMRights(vm_rights);
965#else
966    word_t hap, size2;
967
968    (void)domain;
969    hap = HAPFromVMRights(vm_rights);
970#endif
971
972    switch (page_size) {
973    case ARMSection:
974        size2 = 0;
975        break;
976
977    case ARMSuperSection:
978        size2 = 1;
979        break;
980
981    default:
982        fail("Invalid PDE frame type");
983    }
984
985#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
986    if (cacheable) {
987        return pde_pde_section_new(paddr, size2,
988                                   1, /* not global */
989                                   SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
990                                   0, /* APX = 0, privileged full access */
991                                   5, /* TEX = 0b101, outer write-back, write-allocate */
992                                   ap, parity, domain, nonexecutable,
993                                   0, 1 /* Inner write-back, write-allocate (except on ARM11) */);
994    } else {
995        return pde_pde_section_new(paddr, size2,
996                                   1, /* not global */
997                                   1, /* shared */
998                                   0, /* APX = 0, privileged full access */
999                                   0, /* TEX = 0b000, strongly-ordered */
1000                                   ap, parity, domain, nonexecutable,
1001                                   0, 0);
1002    }
1003#else
1004    if (cacheable) {
1005        return pde_pde_section_new(
1006                   nonexecutable, /* Executable */
1007                   size2, /* contiguous */
1008                   paddr,
1009                   1, /* AF - Always set */
1010                   0, /* not shared */
1011                   hap,
1012                   MEMATTR_CACHEABLE /* Cacheable */);
1013    } else {
1014        return pde_pde_section_new(
1015                   nonexecutable, /* Executable */
1016                   size2, /* contiguous */
1017                   paddr,
1018                   1, /* AF - Always set */
1019                   0, /* not shared */
1020                   hap,
1021                   MEMATTR_NONCACHEABLE /* Not cacheable */);
1022    }
1023#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
1024}
1025
1026bool_t CONST isValidVTableRoot(cap_t cap)
1027{
1028    return cap_get_capType(cap) == cap_page_directory_cap &&
1029           cap_page_directory_cap_get_capPDIsMapped(cap);
1030}
1031
1032bool_t CONST isIOSpaceFrameCap(cap_t cap)
1033{
1034#ifdef CONFIG_TK1_SMMU
1035    return cap_get_capType(cap) == cap_small_frame_cap && cap_small_frame_cap_get_capFIsIOSpace(cap);
1036#else
1037    return false;
1038#endif
1039}
1040
1041void setVMRoot(tcb_t *tcb)
1042{
1043    cap_t threadRoot;
1044    asid_t asid;
1045    pde_t *pd;
1046    findPDForASID_ret_t find_ret;
1047
1048    threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap;
1049
1050    if (cap_get_capType(threadRoot) != cap_page_directory_cap ||
1051        !cap_page_directory_cap_get_capPDIsMapped(threadRoot)) {
1052#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1053        setCurrentPD(addrFromPPtr(armUSGlobalPD));
1054#else
1055        setCurrentPD(addrFromPPtr(armKSGlobalPD));
1056#endif
1057        return;
1058    }
1059
1060    pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(threadRoot));
1061    asid = cap_page_directory_cap_get_capPDMappedASID(threadRoot);
1062    find_ret = findPDForASID(asid);
1063    if (unlikely(find_ret.status != EXCEPTION_NONE || find_ret.pd != pd)) {
1064#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1065        setCurrentPD(addrFromPPtr(armUSGlobalPD));
1066#else
1067        setCurrentPD(addrFromPPtr(armKSGlobalPD));
1068#endif
1069        return;
1070    }
1071
1072    armv_contextSwitch(pd, asid);
1073}
1074
1075static bool_t setVMRootForFlush(pde_t *pd, asid_t asid)
1076{
1077    cap_t threadRoot;
1078
1079    threadRoot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbVTable)->cap;
1080
1081    if (cap_get_capType(threadRoot) == cap_page_directory_cap &&
1082        cap_page_directory_cap_get_capPDIsMapped(threadRoot) &&
1083        PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(threadRoot)) == pd) {
1084        return false;
1085    }
1086
1087    armv_contextSwitch(pd, asid);
1088
1089    return true;
1090}
1091
1092pde_t *pageTableMapped(asid_t asid, vptr_t vaddr, pte_t *pt)
1093{
1094    findPDForASID_ret_t find_ret;
1095    pde_t pde;
1096    unsigned int pdIndex;
1097
1098    find_ret = findPDForASID(asid);
1099    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1100        return NULL;
1101    }
1102
1103    pdIndex = vaddr >> (PAGE_BITS + PT_INDEX_BITS);
1104    pde = find_ret.pd[pdIndex];
1105
1106    if (likely(pde_get_pdeType(pde) == pde_pde_coarse
1107               && ptrFromPAddr(pde_pde_coarse_get_address(pde)) == pt)) {
1108        return find_ret.pd;
1109    } else {
1110        return NULL;
1111    }
1112}
1113
1114static void invalidateASID(asid_t asid)
1115{
1116    asid_pool_t *asidPool;
1117    pde_t *pd;
1118
1119    asidPool = armKSASIDTable[asid >> asidLowBits];
1120    assert(asidPool);
1121
1122    pd = asidPool->array[asid & MASK(asidLowBits)];
1123    assert(pd);
1124
1125    pd[PD_ASID_SLOT] = pde_pde_invalid_new(0, false);
1126}
1127
1128static pde_t PURE loadHWASID(asid_t asid)
1129{
1130    asid_pool_t *asidPool;
1131    pde_t *pd;
1132
1133    asidPool = armKSASIDTable[asid >> asidLowBits];
1134    assert(asidPool);
1135
1136    pd = asidPool->array[asid & MASK(asidLowBits)];
1137    assert(pd);
1138
1139    return pd[PD_ASID_SLOT];
1140}
1141
1142static void storeHWASID(asid_t asid, hw_asid_t hw_asid)
1143{
1144    asid_pool_t *asidPool;
1145    pde_t *pd;
1146
1147    asidPool = armKSASIDTable[asid >> asidLowBits];
1148    assert(asidPool);
1149
1150    pd = asidPool->array[asid & MASK(asidLowBits)];
1151    assert(pd);
1152
1153    /* Store HW ASID in the last entry
1154       Masquerade as an invalid PDE */
1155    pd[PD_ASID_SLOT] = pde_pde_invalid_new(hw_asid, true);
1156
1157    armKSHWASIDTable[hw_asid] = asid;
1158}
1159
1160hw_asid_t findFreeHWASID(void)
1161{
1162    word_t hw_asid_offset;
1163    hw_asid_t hw_asid;
1164
1165    /* Find a free hardware ASID */
1166    for (hw_asid_offset = 0;
1167         hw_asid_offset <= (word_t)((hw_asid_t) - 1);
1168         hw_asid_offset ++) {
1169        hw_asid = armKSNextASID + ((hw_asid_t)hw_asid_offset);
1170        if (armKSHWASIDTable[hw_asid] == asidInvalid) {
1171            return hw_asid;
1172        }
1173    }
1174
1175    hw_asid = armKSNextASID;
1176
1177    /* If we've scanned the table without finding a free ASID */
1178    invalidateASID(armKSHWASIDTable[hw_asid]);
1179
1180    /* Flush TLB */
1181    invalidateTranslationASID(hw_asid);
1182    armKSHWASIDTable[hw_asid] = asidInvalid;
1183
1184    /* Increment the NextASID index */
1185    armKSNextASID++;
1186
1187    return hw_asid;
1188}
1189
1190hw_asid_t getHWASID(asid_t asid)
1191{
1192    pde_t stored_hw_asid;
1193
1194    stored_hw_asid = loadHWASID(asid);
1195    if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1196        return pde_pde_invalid_get_stored_hw_asid(stored_hw_asid);
1197    } else {
1198        hw_asid_t new_hw_asid;
1199
1200        new_hw_asid = findFreeHWASID();
1201        storeHWASID(asid, new_hw_asid);
1202        return new_hw_asid;
1203    }
1204}
1205
1206static void invalidateASIDEntry(asid_t asid)
1207{
1208    pde_t stored_hw_asid;
1209
1210    stored_hw_asid = loadHWASID(asid);
1211    if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1212        armKSHWASIDTable[pde_pde_invalid_get_stored_hw_asid(stored_hw_asid)] =
1213            asidInvalid;
1214    }
1215    invalidateASID(asid);
1216}
1217
1218void unmapPageTable(asid_t asid, vptr_t vaddr, pte_t *pt)
1219{
1220    pde_t *pd, *pdSlot;
1221    unsigned int pdIndex;
1222
1223    pd = pageTableMapped(asid, vaddr, pt);
1224
1225    if (likely(pd != NULL)) {
1226        pdIndex = vaddr >> (PT_INDEX_BITS + PAGE_BITS);
1227        pdSlot = pd + pdIndex;
1228
1229        *pdSlot = pde_pde_invalid_new(0, 0);
1230        cleanByVA_PoU((word_t)pdSlot, addrFromPPtr(pdSlot));
1231        flushTable(pd, asid, vaddr, pt);
1232    }
1233}
1234
1235void copyGlobalMappings(pde_t *newPD)
1236{
1237#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1238    word_t i;
1239    pde_t *global_pd = armKSGlobalPD;
1240
1241    for (i = PPTR_BASE >> ARMSectionBits; i < BIT(PD_INDEX_BITS); i++) {
1242        if (i != PD_ASID_SLOT) {
1243            newPD[i] = global_pd[i];
1244        }
1245    }
1246#else
1247#ifdef CONFIG_KERNEL_GLOBALS_FRAME
1248    /* Kernel and user MMUs are completely independent, however,
1249     * we still need to share the globals page. */
1250    pde_t pde;
1251    pde = pde_pde_coarse_new(addrFromPPtr(armUSGlobalPT));
1252    newPD[BIT(PD_INDEX_BITS) - 1] = pde;
1253#endif /* CONFIG_KERNEL_GLOBALS_FRAME */
1254#endif
1255}
1256
1257exception_t handleVMFault(tcb_t *thread, vm_fault_type_t vm_faultType)
1258{
1259    switch (vm_faultType) {
1260    case ARMDataAbort: {
1261        word_t addr, fault;
1262
1263#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1264        addr = getHDFAR();
1265        addr = (addressTranslateS1CPR(addr) & ~MASK(PAGE_BITS)) | (addr & MASK(PAGE_BITS));
1266        /* MSBs tell us that this was a DataAbort */
1267        fault = getHSR() & 0x3ffffff;
1268#else
1269        addr = getFAR();
1270        fault = getDFSR();
1271#endif
1272
1273#ifdef CONFIG_HARDWARE_DEBUG_API
1274        /* Debug exceptions come in on the Prefetch and Data abort vectors.
1275         * We have to test the fault-status bits in the IFSR/DFSR to determine
1276         * if it's a debug exception when one occurs.
1277         *
1278         * If it is a debug exception, return early and don't fallthrough to the
1279         * normal VM Fault handling path.
1280         */
1281        if (isDebugFault(fault)) {
1282            current_fault = handleUserLevelDebugException(0);
1283            return EXCEPTION_FAULT;
1284        }
1285#endif
1286        current_fault = seL4_Fault_VMFault_new(addr, fault, false);
1287        return EXCEPTION_FAULT;
1288    }
1289
1290    case ARMPrefetchAbort: {
1291        word_t pc, fault;
1292
1293        pc = getRestartPC(thread);
1294
1295#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1296        pc = (addressTranslateS1CPR(pc) & ~MASK(PAGE_BITS)) | (pc & MASK(PAGE_BITS));
1297        /* MSBs tell us that this was a PrefetchAbort */
1298        fault = getHSR() & 0x3ffffff;
1299#else
1300        fault = getIFSR();
1301#endif
1302
1303#ifdef CONFIG_HARDWARE_DEBUG_API
1304        if (isDebugFault(fault)) {
1305            current_fault = handleUserLevelDebugException(pc);
1306
1307            if (seL4_Fault_DebugException_get_exceptionReason(current_fault) == seL4_SingleStep
1308                && !singleStepFaultCounterReady(thread)) {
1309                /* Don't send a fault message to the thread yet if we were asked
1310                 * to step through N instructions and the counter isn't depleted
1311                 * yet.
1312                 */
1313                return EXCEPTION_NONE;
1314            }
1315            return EXCEPTION_FAULT;
1316        }
1317#endif
1318        current_fault = seL4_Fault_VMFault_new(pc, fault, true);
1319        return EXCEPTION_FAULT;
1320    }
1321
1322    default:
1323        fail("Invalid VM fault type");
1324    }
1325}
1326
1327void deleteASIDPool(asid_t asid_base, asid_pool_t *pool)
1328{
1329    unsigned int offset;
1330
1331    /* Haskell error: "ASID pool's base must be aligned" */
1332    assert((asid_base & MASK(asidLowBits)) == 0);
1333
1334    if (armKSASIDTable[asid_base >> asidLowBits] == pool) {
1335        for (offset = 0; offset < BIT(asidLowBits); offset++) {
1336            if (pool->array[offset]) {
1337                flushSpace(asid_base + offset);
1338                invalidateASIDEntry(asid_base + offset);
1339            }
1340        }
1341        armKSASIDTable[asid_base >> asidLowBits] = NULL;
1342        setVMRoot(NODE_STATE(ksCurThread));
1343    }
1344}
1345
1346void deleteASID(asid_t asid, pde_t *pd)
1347{
1348    asid_pool_t *poolPtr;
1349
1350    poolPtr = armKSASIDTable[asid >> asidLowBits];
1351
1352    if (poolPtr != NULL && poolPtr->array[asid & MASK(asidLowBits)] == pd) {
1353        flushSpace(asid);
1354        invalidateASIDEntry(asid);
1355        poolPtr->array[asid & MASK(asidLowBits)] = NULL;
1356        setVMRoot(NODE_STATE(ksCurThread));
1357    }
1358}
1359
1360#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1361static pte_t pte_pte_invalid_new(void)
1362{
1363    /* Invalid as every PTE must have bit 0 set (large PTE) or bit 1 set (small
1364     * PTE). 0 == 'translation fault' in ARM parlance.
1365     */
1366    return (pte_t) {
1367        {
1368            0
1369        }
1370    };
1371}
1372#endif
1373
1374void unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, void *pptr)
1375{
1376    findPDForASID_ret_t find_ret;
1377    paddr_t addr = addrFromPPtr(pptr);
1378
1379    find_ret = findPDForASID(asid);
1380    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1381        return;
1382    }
1383
1384    switch (page_size) {
1385    case ARMSmallPage: {
1386        lookupPTSlot_ret_t lu_ret;
1387
1388        lu_ret = lookupPTSlot(find_ret.pd, vptr);
1389        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1390            return;
1391        }
1392
1393        if (unlikely(pte_ptr_get_pteType(lu_ret.ptSlot) != pte_pte_small)) {
1394            return;
1395        }
1396#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1397        if (unlikely(pte_pte_small_ptr_get_contiguous_hint(lu_ret.ptSlot) != 0)) {
1398            return;
1399        }
1400#endif
1401        if (unlikely(pte_pte_small_ptr_get_address(lu_ret.ptSlot) != addr)) {
1402            return;
1403        }
1404
1405        *(lu_ret.ptSlot) = pte_pte_invalid_new();
1406        cleanByVA_PoU((word_t)lu_ret.ptSlot, addrFromPPtr(lu_ret.ptSlot));
1407
1408        break;
1409    }
1410
1411    case ARMLargePage: {
1412        lookupPTSlot_ret_t lu_ret;
1413        word_t i;
1414
1415        lu_ret = lookupPTSlot(find_ret.pd, vptr);
1416        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1417            return;
1418        }
1419#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1420        if (unlikely(pte_ptr_get_pteType(lu_ret.ptSlot) != pte_pte_large)) {
1421            return;
1422        }
1423        if (unlikely(pte_pte_large_ptr_get_address(lu_ret.ptSlot) != addr)) {
1424            return;
1425        }
1426#else
1427        if (unlikely(pte_ptr_get_pteType(lu_ret.ptSlot) != pte_pte_small)) {
1428            return;
1429        }
1430        if (unlikely(pte_pte_small_ptr_get_contiguous_hint(lu_ret.ptSlot) != 1)) {
1431            return;
1432        }
1433        if (unlikely(pte_pte_small_ptr_get_address(lu_ret.ptSlot) != addr)) {
1434            return;
1435        }
1436#endif
1437
1438        for (i = 0; i < PAGES_PER_LARGE_PAGE; i++) {
1439            lu_ret.ptSlot[i] = pte_pte_invalid_new();
1440        }
1441        cleanCacheRange_PoU((word_t)&lu_ret.ptSlot[0],
1442                            LAST_BYTE_PTE(lu_ret.ptSlot, PAGES_PER_LARGE_PAGE),
1443                            addrFromPPtr(&lu_ret.ptSlot[0]));
1444
1445        break;
1446    }
1447
1448    case ARMSection: {
1449        pde_t *pd;
1450
1451        pd = lookupPDSlot(find_ret.pd, vptr);
1452
1453        if (unlikely(pde_ptr_get_pdeType(pd) != pde_pde_section)) {
1454            return;
1455        }
1456#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1457        if (unlikely(pde_pde_section_ptr_get_size(pd) != 0)) {
1458#else
1459        if (unlikely(pde_pde_section_ptr_get_contiguous_hint(pd) != 0)) {
1460#endif
1461            return;
1462        }
1463        if (unlikely(pde_pde_section_ptr_get_address(pd) != addr)) {
1464            return;
1465        }
1466
1467        *pd = pde_pde_invalid_new(0, 0);
1468        cleanByVA_PoU((word_t)pd, addrFromPPtr(pd));
1469
1470        break;
1471    }
1472
1473    case ARMSuperSection: {
1474        pde_t *pd;
1475        word_t i;
1476
1477        pd = lookupPDSlot(find_ret.pd, vptr);
1478
1479        if (unlikely(pde_ptr_get_pdeType(pd) != pde_pde_section)) {
1480            return;
1481        }
1482#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1483        if (unlikely(pde_pde_section_ptr_get_size(pd) != 1)) {
1484#else
1485        if (unlikely(pde_pde_section_ptr_get_contiguous_hint(pd) != 1)) {
1486#endif
1487            return;
1488        }
1489        if (unlikely(pde_pde_section_ptr_get_address(pd) != addr)) {
1490            return;
1491        }
1492
1493        for (i = 0; i < SECTIONS_PER_SUPER_SECTION; i++) {
1494            pd[i] = pde_pde_invalid_new(0, 0);
1495        }
1496        cleanCacheRange_PoU((word_t)&pd[0], LAST_BYTE_PDE(pd, SECTIONS_PER_SUPER_SECTION),
1497                            addrFromPPtr(&pd[0]));
1498
1499        break;
1500    }
1501
1502    default:
1503        fail("Invalid ARM page type");
1504        break;
1505    }
1506
1507    /* Flush the page now that the mapping has been updated */
1508    flushPage(page_size, find_ret.pd, asid, vptr);
1509}
1510
1511void flushPage(vm_page_size_t page_size, pde_t *pd, asid_t asid, word_t vptr)
1512{
1513    pde_t stored_hw_asid;
1514    word_t base_addr;
1515    bool_t root_switched;
1516
1517    assert((vptr & MASK(pageBitsForSize(page_size))) == 0);
1518
1519    /* Switch to the address space to allow a cache clean by VA */
1520    root_switched = setVMRootForFlush(pd, asid);
1521    stored_hw_asid = loadHWASID(asid);
1522
1523    if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1524        base_addr = vptr & ~MASK(12);
1525
1526        /* Do the TLB flush */
1527        invalidateTranslationSingle(base_addr | pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1528
1529        if (root_switched) {
1530            setVMRoot(NODE_STATE(ksCurThread));
1531        }
1532    }
1533}
1534
1535void flushTable(pde_t *pd, asid_t asid, word_t vptr, pte_t *pt)
1536{
1537    pde_t stored_hw_asid;
1538    bool_t root_switched;
1539
1540    assert((vptr & MASK(PT_INDEX_BITS + ARMSmallPageBits)) == 0);
1541
1542    /* Switch to the address space to allow a cache clean by VA */
1543    root_switched = setVMRootForFlush(pd, asid);
1544    stored_hw_asid = loadHWASID(asid);
1545
1546    if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1547        invalidateTranslationASID(pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1548        if (root_switched) {
1549            setVMRoot(NODE_STATE(ksCurThread));
1550        }
1551    }
1552}
1553
1554void flushSpace(asid_t asid)
1555{
1556    pde_t stored_hw_asid;
1557
1558    stored_hw_asid = loadHWASID(asid);
1559
1560    /* Clean the entire data cache, to guarantee that any VAs mapped
1561     * in the deleted space are clean (because we can't clean by VA after
1562     * deleting the space) */
1563    cleanCaches_PoU();
1564
1565    /* If the given ASID doesn't have a hardware ASID
1566     * assigned, then it can't have any mappings in the TLB */
1567    if (!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1568        return;
1569    }
1570
1571    /* Do the TLB flush */
1572    invalidateTranslationASID(pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1573}
1574
1575void invalidateTLBByASID(asid_t asid)
1576{
1577    pde_t stored_hw_asid;
1578
1579    stored_hw_asid = loadHWASID(asid);
1580
1581    /* If the given ASID doesn't have a hardware ASID
1582     * assigned, then it can't have any mappings in the TLB */
1583    if (!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1584        return;
1585    }
1586
1587    /* Do the TLB flush */
1588    invalidateTranslationASID(pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1589}
1590
1591static inline bool_t CONST checkVPAlignment(vm_page_size_t sz, word_t w)
1592{
1593    return (w & MASK(pageBitsForSize(sz))) == 0;
1594}
1595
1596struct create_mappings_pte_return {
1597    exception_t status;
1598    pte_t pte;
1599    pte_range_t pte_entries;
1600};
1601typedef struct create_mappings_pte_return create_mappings_pte_return_t;
1602
1603struct create_mappings_pde_return {
1604    exception_t status;
1605    pde_t pde;
1606    pde_range_t pde_entries;
1607};
1608typedef struct create_mappings_pde_return create_mappings_pde_return_t;
1609
1610static create_mappings_pte_return_t
1611createSafeMappingEntries_PTE
1612(paddr_t base, word_t vaddr, vm_page_size_t frameSize,
1613 vm_rights_t vmRights, vm_attributes_t attr, pde_t *pd)
1614{
1615
1616    create_mappings_pte_return_t ret;
1617    lookupPTSlot_ret_t lu_ret;
1618    word_t i;
1619
1620    switch (frameSize) {
1621
1622    case ARMSmallPage:
1623
1624        ret.pte_entries.base = NULL; /* to avoid uninitialised warning */
1625        ret.pte_entries.length = 1;
1626
1627        ret.pte = makeUserPTE(ARMSmallPage, base,
1628                              vm_attributes_get_armPageCacheable(attr),
1629                              vm_attributes_get_armExecuteNever(attr),
1630                              vmRights);
1631
1632        lu_ret = lookupPTSlot(pd, vaddr);
1633        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1634            current_syscall_error.type =
1635                seL4_FailedLookup;
1636            current_syscall_error.failedLookupWasSource =
1637                false;
1638            ret.status = EXCEPTION_SYSCALL_ERROR;
1639            /* current_lookup_fault will have been set by
1640             * lookupPTSlot */
1641            return ret;
1642        }
1643
1644        ret.pte_entries.base = lu_ret.ptSlot;
1645#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1646        if (unlikely(pte_ptr_get_pteType(ret.pte_entries.base) ==
1647                     pte_pte_large)) {
1648#else
1649        if (unlikely(pte_ptr_get_pteType(ret.pte_entries.base) == pte_pte_small
1650                     && pte_pte_small_ptr_get_contiguous_hint(ret.pte_entries.base))) {
1651#endif
1652            current_syscall_error.type =
1653                seL4_DeleteFirst;
1654
1655            ret.status = EXCEPTION_SYSCALL_ERROR;
1656            return ret;
1657        }
1658
1659        ret.status = EXCEPTION_NONE;
1660        return ret;
1661
1662    case ARMLargePage:
1663
1664        ret.pte_entries.base = NULL; /* to avoid uninitialised warning */
1665        ret.pte_entries.length = PAGES_PER_LARGE_PAGE;
1666
1667        ret.pte = makeUserPTE(ARMLargePage, base,
1668                              vm_attributes_get_armPageCacheable(attr),
1669                              vm_attributes_get_armExecuteNever(attr),
1670                              vmRights);
1671
1672        lu_ret = lookupPTSlot(pd, vaddr);
1673        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1674            current_syscall_error.type =
1675                seL4_FailedLookup;
1676            current_syscall_error.failedLookupWasSource =
1677                false;
1678            ret.status = EXCEPTION_SYSCALL_ERROR;
1679            /* current_lookup_fault will have been set by
1680             * lookupPTSlot */
1681            return ret;
1682        }
1683
1684        ret.pte_entries.base = lu_ret.ptSlot;
1685
1686        for (i = 0; i < PAGES_PER_LARGE_PAGE; i++) {
1687            if (unlikely(pte_get_pteType(ret.pte_entries.base[i]) ==
1688                         pte_pte_small)
1689#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1690                && !pte_pte_small_get_contiguous_hint(ret.pte_entries.base[i])
1691#endif
1692               ) {
1693                current_syscall_error.type =
1694                    seL4_DeleteFirst;
1695
1696                ret.status = EXCEPTION_SYSCALL_ERROR;
1697                return ret;
1698            }
1699        }
1700
1701        ret.status = EXCEPTION_NONE;
1702        return ret;
1703
1704    default:
1705        fail("Invalid or unexpected ARM page type.");
1706
1707    }
1708}
1709
1710static create_mappings_pde_return_t
1711createSafeMappingEntries_PDE
1712(paddr_t base, word_t vaddr, vm_page_size_t frameSize,
1713 vm_rights_t vmRights, vm_attributes_t attr, pde_t *pd)
1714{
1715
1716    create_mappings_pde_return_t ret;
1717    pde_tag_t currentPDEType;
1718    word_t i;
1719
1720    switch (frameSize) {
1721
1722    /* PDE mappings */
1723    case ARMSection:
1724        ret.pde_entries.base = lookupPDSlot(pd, vaddr);
1725        ret.pde_entries.length = 1;
1726
1727        ret.pde = makeUserPDE(ARMSection, base,
1728                              vm_attributes_get_armParityEnabled(attr),
1729                              vm_attributes_get_armPageCacheable(attr),
1730                              vm_attributes_get_armExecuteNever(attr),
1731                              0,
1732                              vmRights);
1733
1734        currentPDEType =
1735            pde_ptr_get_pdeType(ret.pde_entries.base);
1736        if (unlikely(currentPDEType != pde_pde_invalid &&
1737                     (currentPDEType != pde_pde_section ||
1738#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1739                      pde_pde_section_ptr_get_size(ret.pde_entries.base) != 0))) {
1740#else
1741                      pde_pde_section_ptr_get_contiguous_hint(ret.pde_entries.base) != 0))) {
1742#endif
1743            current_syscall_error.type =
1744                seL4_DeleteFirst;
1745            ret.status = EXCEPTION_SYSCALL_ERROR;
1746
1747            return ret;
1748        }
1749
1750        ret.status = EXCEPTION_NONE;
1751        return ret;
1752
1753    case ARMSuperSection:
1754        ret.pde_entries.base = lookupPDSlot(pd, vaddr);
1755        ret.pde_entries.length = SECTIONS_PER_SUPER_SECTION;
1756
1757        ret.pde = makeUserPDE(ARMSuperSection, base,
1758                              vm_attributes_get_armParityEnabled(attr),
1759                              vm_attributes_get_armPageCacheable(attr),
1760                              vm_attributes_get_armExecuteNever(attr),
1761                              0,
1762                              vmRights);
1763
1764        for (i = 0; i < SECTIONS_PER_SUPER_SECTION; i++) {
1765            currentPDEType =
1766                pde_get_pdeType(ret.pde_entries.base[i]);
1767            if (unlikely(currentPDEType != pde_pde_invalid &&
1768                         (currentPDEType != pde_pde_section ||
1769#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1770                          pde_pde_section_get_size(ret.pde_entries.base[i]) != 1))) {
1771#else
1772                          pde_pde_section_get_contiguous_hint(ret.pde_entries.base[i]) != 1))) {
1773#endif
1774                current_syscall_error.type =
1775                    seL4_DeleteFirst;
1776                ret.status = EXCEPTION_SYSCALL_ERROR;
1777
1778                return ret;
1779            }
1780        }
1781
1782        ret.status = EXCEPTION_NONE;
1783        return ret;
1784
1785    default:
1786        fail("Invalid or unexpected ARM page type.");
1787
1788    }
1789}
1790
1791static inline vptr_t pageBase(vptr_t vaddr, vm_page_size_t size)
1792{
1793    return vaddr & ~MASK(pageBitsForSize(size));
1794}
1795
1796static bool_t PURE pteCheckIfMapped(pte_t *pte)
1797{
1798    return pte_ptr_get_pteType(pte) != pte_pte_invalid;
1799}
1800
1801static bool_t PURE pdeCheckIfMapped(pde_t *pde)
1802{
1803    return pde_ptr_get_pdeType(pde) != pde_pde_invalid;
1804}
1805
1806static void doFlush(int invLabel, vptr_t start, vptr_t end, paddr_t pstart)
1807{
1808    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
1809            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
1810        \<and> \<acute>start <= \<acute>end, id)" */
1811    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
1812        /* The hypervisor does not share an AS with userspace so we must flush
1813         * by kernel MVA instead. ARMv7 caches are PIPT so it makes no difference */
1814        end = (vptr_t)paddr_to_pptr(pstart) + (end - start);
1815        start = (vptr_t)paddr_to_pptr(pstart);
1816    }
1817    switch (invLabel) {
1818    case ARMPDClean_Data:
1819    case ARMPageClean_Data:
1820        cleanCacheRange_RAM(start, end, pstart);
1821        break;
1822    case ARMPDInvalidate_Data:
1823    case ARMPageInvalidate_Data:
1824        invalidateCacheRange_RAM(start, end, pstart);
1825        break;
1826    case ARMPDCleanInvalidate_Data:
1827    case ARMPageCleanInvalidate_Data:
1828        cleanInvalidateCacheRange_RAM(start, end, pstart);
1829        break;
1830    case ARMPDUnify_Instruction:
1831    case ARMPageUnify_Instruction:
1832        /* First clean data lines to point of unification
1833           (L2 cache)... */
1834        cleanCacheRange_PoU(start, end, pstart);
1835        /* Ensure it's been written. */
1836        dsb();
1837        /* ...then invalidate the corresponding instruction lines
1838           to point of unification... */
1839        invalidateCacheRange_I(start, end, pstart);
1840        /* ...then invalidate branch predictors. */
1841        branchFlushRange(start, end, pstart);
1842        /* Ensure new instructions come from fresh cache lines. */
1843        isb();
1844        break;
1845    default:
1846        fail("Invalid operation, shouldn't get here.\n");
1847    }
1848}
1849
1850/* ================= INVOCATION HANDLING STARTS HERE ================== */
1851
1852static exception_t performPDFlush(int invLabel, pde_t *pd, asid_t asid, vptr_t start,
1853                                  vptr_t end, paddr_t pstart)
1854{
1855    bool_t root_switched;
1856
1857    /* Flush if given a non zero range */
1858    if (start < end) {
1859        root_switched = setVMRootForFlush(pd, asid);
1860
1861        doFlush(invLabel, start, end, pstart);
1862
1863        if (root_switched) {
1864            setVMRoot(NODE_STATE(ksCurThread));
1865        }
1866    }
1867
1868    return EXCEPTION_NONE;
1869}
1870
1871static exception_t performPageTableInvocationMap(cap_t cap, cte_t *ctSlot,
1872                                                 pde_t pde, pde_t *pdSlot)
1873{
1874    ctSlot->cap = cap;
1875    *pdSlot = pde;
1876    cleanByVA_PoU((word_t)pdSlot, addrFromPPtr(pdSlot));
1877
1878    return EXCEPTION_NONE;
1879}
1880
1881static exception_t performPageTableInvocationUnmap(cap_t cap, cte_t *ctSlot)
1882{
1883    if (cap_page_table_cap_get_capPTIsMapped(cap)) {
1884        pte_t *pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap));
1885        unmapPageTable(
1886            cap_page_table_cap_get_capPTMappedASID(cap),
1887            cap_page_table_cap_get_capPTMappedAddress(cap),
1888            pt);
1889        clearMemory((void *)pt, cap_get_capSizeBits(cap));
1890    }
1891    cap_page_table_cap_ptr_set_capPTIsMapped(&(ctSlot->cap), 0);
1892
1893    return EXCEPTION_NONE;
1894}
1895
1896static exception_t performPageInvocationMapPTE(asid_t asid, cap_t cap, cte_t *ctSlot, pte_t pte,
1897                                               pte_range_t pte_entries)
1898{
1899    word_t i, j UNUSED;
1900    bool_t tlbflush_required;
1901
1902    ctSlot->cap = cap;
1903
1904    /* we only need to check the first entries because of how createSafeMappingEntries
1905     * works to preserve the consistency of tables */
1906    tlbflush_required = pteCheckIfMapped(pte_entries.base);
1907
1908    j = pte_entries.length;
1909    /** GHOSTUPD: "(\<acute>j <= 16, id)" */
1910
1911#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1912    word_t base_address = pte_pte_small_get_address(pte);
1913#endif
1914    for (i = 0; i < pte_entries.length; i++) {
1915#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1916        pte = pte_pte_small_set_address(pte, base_address + i * BIT(pageBitsForSize(ARMSmallPage)));
1917#endif
1918        pte_entries.base[i] = pte;
1919    }
1920    cleanCacheRange_PoU((word_t)pte_entries.base,
1921                        LAST_BYTE_PTE(pte_entries.base, pte_entries.length),
1922                        addrFromPPtr(pte_entries.base));
1923    if (unlikely(tlbflush_required)) {
1924        invalidateTLBByASID(asid);
1925    }
1926
1927    return EXCEPTION_NONE;
1928}
1929
1930static exception_t performPageInvocationMapPDE(asid_t asid, cap_t cap, cte_t *ctSlot, pde_t pde,
1931                                               pde_range_t pde_entries)
1932{
1933    word_t i, j UNUSED;
1934    bool_t tlbflush_required;
1935
1936    ctSlot->cap = cap;
1937
1938    /* we only need to check the first entries because of how createSafeMappingEntries
1939     * works to preserve the consistency of tables */
1940    tlbflush_required = pdeCheckIfMapped(pde_entries.base);
1941
1942    j = pde_entries.length;
1943    /** GHOSTUPD: "(\<acute>j <= 16, id)" */
1944
1945#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1946    word_t base_address = pde_pde_section_get_address(pde);
1947#endif
1948    for (i = 0; i < pde_entries.length; i++) {
1949#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1950        pde = pde_pde_section_set_address(pde, base_address + i * BIT(pageBitsForSize(ARMSection)));
1951#endif
1952        pde_entries.base[i] = pde;
1953    }
1954    cleanCacheRange_PoU((word_t)pde_entries.base,
1955                        LAST_BYTE_PDE(pde_entries.base, pde_entries.length),
1956                        addrFromPPtr(pde_entries.base));
1957    if (unlikely(tlbflush_required)) {
1958        invalidateTLBByASID(asid);
1959    }
1960
1961    return EXCEPTION_NONE;
1962}
1963
1964static exception_t performPageInvocationUnmap(cap_t cap, cte_t *ctSlot)
1965{
1966    if (generic_frame_cap_get_capFIsMapped(cap)) {
1967        unmapPage(generic_frame_cap_get_capFSize(cap),
1968                  generic_frame_cap_get_capFMappedASID(cap),
1969                  generic_frame_cap_get_capFMappedAddress(cap),
1970                  (void *)generic_frame_cap_get_capFBasePtr(cap));
1971    }
1972
1973    generic_frame_cap_ptr_set_capFMappedAddress(&ctSlot->cap, asidInvalid, 0);
1974
1975    return EXCEPTION_NONE;
1976}
1977
1978static exception_t performPageFlush(int invLabel, pde_t *pd, asid_t asid, vptr_t start,
1979                                    vptr_t end, paddr_t pstart)
1980{
1981    bool_t root_switched;
1982
1983    /* now we can flush. But only if we were given a non zero range */
1984    if (start < end) {
1985        root_switched = setVMRootForFlush(pd, asid);
1986
1987        doFlush(invLabel, start, end, pstart);
1988
1989        if (root_switched) {
1990            setVMRoot(NODE_STATE(ksCurThread));
1991        }
1992    }
1993
1994    return EXCEPTION_NONE;
1995}
1996
1997static exception_t performPageGetAddress(void *vbase_ptr)
1998{
1999    paddr_t capFBasePtr;
2000
2001    /* Get the physical address of this frame. */
2002    capFBasePtr = addrFromPPtr(vbase_ptr);
2003
2004    /* return it in the first message register */
2005    setRegister(NODE_STATE(ksCurThread), msgRegisters[0], capFBasePtr);
2006    setRegister(NODE_STATE(ksCurThread), msgInfoRegister,
2007                wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, 1)));
2008
2009    return EXCEPTION_NONE;
2010}
2011
2012static exception_t performASIDPoolInvocation(asid_t asid, asid_pool_t *poolPtr,
2013                                             cte_t *pdCapSlot)
2014{
2015    cap_page_directory_cap_ptr_set_capPDMappedASID(&pdCapSlot->cap, asid);
2016    cap_page_directory_cap_ptr_set_capPDIsMapped(&pdCapSlot->cap, 1);
2017    poolPtr->array[asid & MASK(asidLowBits)] =
2018        PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pdCapSlot->cap));
2019
2020    return EXCEPTION_NONE;
2021}
2022
2023static exception_t performASIDControlInvocation(void *frame, cte_t *slot,
2024                                                cte_t *parent, asid_t asid_base)
2025{
2026
2027    /** AUXUPD: "(True, typ_region_bytes (ptr_val \<acute>frame) 12)" */
2028    /** GHOSTUPD: "(True, gs_clear_region (ptr_val \<acute>frame) 12)" */
2029    cap_untyped_cap_ptr_set_capFreeIndex(&(parent->cap),
2030                                         MAX_FREE_INDEX(cap_untyped_cap_get_capBlockSize(parent->cap)));
2031
2032    memzero(frame, 1 << ARMSmallPageBits);
2033    /** AUXUPD: "(True, ptr_retyps 1 (Ptr (ptr_val \<acute>frame) :: asid_pool_C ptr))" */
2034
2035    cteInsert(cap_asid_pool_cap_new(asid_base, WORD_REF(frame)),
2036              parent, slot);;
2037    /* Haskell error: "ASID pool's base must be aligned" */
2038    assert((asid_base & MASK(asidLowBits)) == 0);
2039    armKSASIDTable[asid_base >> asidLowBits] = (asid_pool_t *)frame;
2040
2041    return EXCEPTION_NONE;
2042}
2043
2044static exception_t decodeARMPageDirectoryInvocation(word_t invLabel, word_t length,
2045                                                    cptr_t cptr, cte_t *cte, cap_t cap,
2046                                                    extra_caps_t excaps, word_t *buffer)
2047{
2048    switch (invLabel) {
2049    case ARMPDClean_Data:
2050    case ARMPDInvalidate_Data:
2051    case ARMPDCleanInvalidate_Data:
2052    case ARMPDUnify_Instruction: {
2053        vptr_t start, end;
2054        paddr_t pstart;
2055        findPDForASID_ret_t find_ret;
2056        asid_t asid;
2057        pde_t *pd;
2058        resolve_ret_t resolve_ret;
2059
2060        if (length < 2) {
2061            userError("PD Flush: Truncated message.");
2062            current_syscall_error.type = seL4_TruncatedMessage;
2063            return EXCEPTION_SYSCALL_ERROR;
2064        }
2065
2066        start = getSyscallArg(0, buffer);
2067        end =   getSyscallArg(1, buffer);
2068
2069        /* Check sanity of arguments */
2070        if (end <= start) {
2071            userError("PD Flush: Invalid range");
2072            current_syscall_error.type = seL4_InvalidArgument;
2073            current_syscall_error.invalidArgumentNumber = 1;
2074            return EXCEPTION_SYSCALL_ERROR;
2075        }
2076
2077        /* Don't let applications flush kernel regions. */
2078        if (start >= USER_TOP || end > USER_TOP) {
2079            userError("PD Flush: Overlaps kernel region.");
2080            current_syscall_error.type = seL4_IllegalOperation;
2081            return EXCEPTION_SYSCALL_ERROR;
2082        }
2083
2084        if (unlikely(cap_get_capType(cap) != cap_page_directory_cap ||
2085                     !cap_page_directory_cap_get_capPDIsMapped(cap))) {
2086            userError("PD Flush: Invalid cap.");
2087            current_syscall_error.type = seL4_InvalidCapability;
2088            current_syscall_error.invalidCapNumber = 0;
2089            return EXCEPTION_SYSCALL_ERROR;
2090        }
2091
2092
2093        /* Make sure that the supplied pd is ok */
2094        pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(cap));
2095        asid = cap_page_directory_cap_get_capPDMappedASID(cap);
2096
2097        find_ret = findPDForASID(asid);
2098        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2099            userError("PD Flush: No PD for ASID");
2100            current_syscall_error.type = seL4_FailedLookup;
2101            current_syscall_error.failedLookupWasSource = false;
2102            return EXCEPTION_SYSCALL_ERROR;
2103        }
2104
2105        if (unlikely(find_ret.pd != pd)) {
2106            userError("PD Flush: Invalid PD Cap");
2107            current_syscall_error.type = seL4_InvalidCapability;
2108            current_syscall_error.invalidCapNumber = 0;
2109            return EXCEPTION_SYSCALL_ERROR;
2110        }
2111
2112        /* Look up the frame containing 'start'. */
2113        resolve_ret = resolveVAddr(pd, start);
2114
2115        /* Check that there's actually something there. */
2116        if (!resolve_ret.valid) {
2117            /* Fail silently, as there can't be any stale cached data (for the
2118             * given address space), and getting a syscall error because the
2119             * relevant page is non-resident would be 'astonishing'. */
2120            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2121            return EXCEPTION_NONE;
2122        }
2123
2124        /* Refuse to cross a page boundary. */
2125        if (pageBase(start, resolve_ret.frameSize) !=
2126            pageBase(end - 1, resolve_ret.frameSize)) {
2127            userError("PD Flush: Range is across page boundary.");
2128            current_syscall_error.type = seL4_RangeError;
2129            current_syscall_error.rangeErrorMin = start;
2130            current_syscall_error.rangeErrorMax =
2131                pageBase(start, resolve_ret.frameSize) +
2132                MASK(pageBitsForSize(resolve_ret.frameSize));
2133            return EXCEPTION_SYSCALL_ERROR;
2134        }
2135
2136
2137        /* Calculate the physical start address. */
2138        pstart = resolve_ret.frameBase
2139                 + (start & MASK(pageBitsForSize(resolve_ret.frameSize)));
2140
2141
2142        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2143        return performPDFlush(invLabel, pd, asid, start, end - 1, pstart);
2144    }
2145
2146    default:
2147        userError("PD: Invalid invocation number");
2148        current_syscall_error.type = seL4_IllegalOperation;
2149        return EXCEPTION_SYSCALL_ERROR;
2150    }
2151
2152}
2153
2154static exception_t decodeARMPageTableInvocation(word_t invLabel, word_t length,
2155                                                cte_t *cte, cap_t cap, extra_caps_t excaps,
2156                                                word_t *buffer)
2157{
2158    word_t vaddr, pdIndex;
2159
2160#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
2161    vm_attributes_t attr;
2162#endif
2163    cap_t pdCap;
2164    pde_t *pd, *pdSlot;
2165    pde_t pde;
2166    asid_t asid;
2167    paddr_t paddr;
2168
2169    if (invLabel == ARMPageTableUnmap) {
2170        if (unlikely(! isFinalCapability(cte))) {
2171            userError("ARMPageTableUnmap: Cannot unmap if more than one cap exists.");
2172            current_syscall_error.type = seL4_RevokeFirst;
2173            return EXCEPTION_SYSCALL_ERROR;
2174        }
2175        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2176        return performPageTableInvocationUnmap(cap, cte);
2177    }
2178
2179    if (unlikely(invLabel != ARMPageTableMap)) {
2180        userError("ARMPageTable: Illegal operation.");
2181        current_syscall_error.type = seL4_IllegalOperation;
2182        return EXCEPTION_SYSCALL_ERROR;
2183    }
2184
2185    if (unlikely(length < 2 || excaps.excaprefs[0] == NULL)) {
2186        userError("ARMPageTableMap: Truncated message.");
2187        current_syscall_error.type = seL4_TruncatedMessage;
2188        return EXCEPTION_SYSCALL_ERROR;
2189    }
2190
2191    if (unlikely(cap_page_table_cap_get_capPTIsMapped(cap))) {
2192        userError("ARMPageTableMap: Page table is already mapped to page directory.");
2193        current_syscall_error.type =
2194            seL4_InvalidCapability;
2195        current_syscall_error.invalidCapNumber = 0;
2196
2197        return EXCEPTION_SYSCALL_ERROR;
2198    }
2199
2200    vaddr = getSyscallArg(0, buffer);
2201#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
2202    attr = vmAttributesFromWord(getSyscallArg(1, buffer));
2203#endif
2204    pdCap = excaps.excaprefs[0]->cap;
2205
2206    if (unlikely(cap_get_capType(pdCap) != cap_page_directory_cap ||
2207                 !cap_page_directory_cap_get_capPDIsMapped(pdCap))) {
2208        userError("ARMPageTableMap: Invalid PD cap.");
2209        current_syscall_error.type = seL4_InvalidCapability;
2210        current_syscall_error.invalidCapNumber = 1;
2211
2212        return EXCEPTION_SYSCALL_ERROR;
2213    }
2214
2215    pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pdCap));
2216    asid = cap_page_directory_cap_get_capPDMappedASID(pdCap);
2217
2218    if (unlikely(vaddr >= USER_TOP)) {
2219        userError("ARMPageTableMap: Virtual address cannot be in kernel window. vaddr: 0x%08lx, USER_TOP: 0x%08x", vaddr,
2220                  USER_TOP);
2221        current_syscall_error.type = seL4_InvalidArgument;
2222        current_syscall_error.invalidArgumentNumber = 0;
2223
2224        return EXCEPTION_SYSCALL_ERROR;
2225    }
2226
2227    {
2228        findPDForASID_ret_t find_ret;
2229
2230        find_ret = findPDForASID(asid);
2231        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2232            userError("ARMPageTableMap: ASID lookup failed.");
2233            current_syscall_error.type = seL4_FailedLookup;
2234            current_syscall_error.failedLookupWasSource = false;
2235
2236            return EXCEPTION_SYSCALL_ERROR;
2237        }
2238
2239        if (unlikely(find_ret.pd != pd)) {
2240            userError("ARMPageTableMap: ASID lookup failed.");
2241            current_syscall_error.type =
2242                seL4_InvalidCapability;
2243            current_syscall_error.invalidCapNumber = 1;
2244
2245            return EXCEPTION_SYSCALL_ERROR;
2246        }
2247    }
2248
2249    pdIndex = vaddr >> (PAGE_BITS + PT_INDEX_BITS);
2250    pdSlot = &pd[pdIndex];
2251    if (unlikely(pde_ptr_get_pdeType(pdSlot) != pde_pde_invalid)) {
2252        userError("ARMPageTableMap: Page directory already has entry for supplied address.");
2253        current_syscall_error.type = seL4_DeleteFirst;
2254
2255        return EXCEPTION_SYSCALL_ERROR;
2256    }
2257
2258    paddr = addrFromPPtr(
2259                PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap)));
2260#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
2261    pde = pde_pde_coarse_new(
2262              paddr,
2263              vm_attributes_get_armParityEnabled(attr),
2264              0 /* Domain */
2265          );
2266#else
2267    pde = pde_pde_coarse_new(paddr);
2268#endif
2269
2270    cap = cap_page_table_cap_set_capPTIsMapped(cap, 1);
2271    cap = cap_page_table_cap_set_capPTMappedASID(cap, asid);
2272    cap = cap_page_table_cap_set_capPTMappedAddress(cap, vaddr);
2273
2274    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2275    return performPageTableInvocationMap(cap, cte, pde, pdSlot);
2276}
2277
2278static exception_t decodeARMFrameInvocation(word_t invLabel, word_t length,
2279                                            cte_t *cte, cap_t cap, extra_caps_t excaps,
2280                                            word_t *buffer)
2281{
2282    switch (invLabel) {
2283    case ARMPageMap: {
2284        word_t vaddr, vtop, w_rightsMask;
2285        paddr_t capFBasePtr;
2286        cap_t pdCap;
2287        pde_t *pd;
2288        asid_t asid;
2289        vm_rights_t capVMRights, vmRights;
2290        vm_page_size_t frameSize;
2291        vm_attributes_t attr;
2292
2293        if (unlikely(length < 3 || excaps.excaprefs[0] == NULL)) {
2294            userError("ARMPageMap: Truncated message.");
2295            current_syscall_error.type =
2296                seL4_TruncatedMessage;
2297
2298            return EXCEPTION_SYSCALL_ERROR;
2299        }
2300
2301        vaddr = getSyscallArg(0, buffer);
2302        w_rightsMask = getSyscallArg(1, buffer);
2303        attr = vmAttributesFromWord(getSyscallArg(2, buffer));
2304        pdCap = excaps.excaprefs[0]->cap;
2305
2306        frameSize = generic_frame_cap_get_capFSize(cap);
2307        capVMRights = generic_frame_cap_get_capFVMRights(cap);
2308
2309        if (unlikely(cap_get_capType(pdCap) != cap_page_directory_cap ||
2310                     !cap_page_directory_cap_get_capPDIsMapped(pdCap))) {
2311            userError("ARMPageMap: Bad PageDirectory cap.");
2312            current_syscall_error.type =
2313                seL4_InvalidCapability;
2314            current_syscall_error.invalidCapNumber = 1;
2315
2316            return EXCEPTION_SYSCALL_ERROR;
2317        }
2318        pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(
2319                         pdCap));
2320        asid = cap_page_directory_cap_get_capPDMappedASID(pdCap);
2321
2322        if (generic_frame_cap_get_capFIsMapped(cap)) {
2323            if (generic_frame_cap_get_capFMappedASID(cap) != asid) {
2324                current_syscall_error.type = seL4_InvalidCapability;
2325                current_syscall_error.invalidCapNumber = 1;
2326
2327                return EXCEPTION_SYSCALL_ERROR;
2328            }
2329
2330            if (generic_frame_cap_get_capFMappedAddress(cap) != vaddr) {
2331                userError("ARMPageMap: Attempting to map frame into multiple addresses");
2332                current_syscall_error.type = seL4_InvalidArgument;
2333                current_syscall_error.invalidArgumentNumber = 0;
2334
2335                return EXCEPTION_SYSCALL_ERROR;
2336            }
2337        } else {
2338            vtop = vaddr + BIT(pageBitsForSize(frameSize)) - 1;
2339
2340            if (unlikely(vtop >= USER_TOP)) {
2341                userError("ARMPageMap: Cannot map frame over kernel window. vaddr: 0x%08lx, USER_TOP: 0x%08x", vaddr, USER_TOP);
2342                current_syscall_error.type =
2343                    seL4_InvalidArgument;
2344                current_syscall_error.invalidArgumentNumber = 0;
2345
2346                return EXCEPTION_SYSCALL_ERROR;
2347            }
2348        }
2349
2350        {
2351            findPDForASID_ret_t find_ret;
2352
2353            find_ret = findPDForASID(asid);
2354            if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2355                userError("ARMPageMap: No PD for ASID");
2356                current_syscall_error.type =
2357                    seL4_FailedLookup;
2358                current_syscall_error.failedLookupWasSource =
2359                    false;
2360
2361                return EXCEPTION_SYSCALL_ERROR;
2362            }
2363
2364            if (unlikely(find_ret.pd != pd)) {
2365                userError("ARMPageMap: ASID lookup failed.");
2366                current_syscall_error.type =
2367                    seL4_InvalidCapability;
2368                current_syscall_error.invalidCapNumber = 1;
2369
2370                return EXCEPTION_SYSCALL_ERROR;
2371            }
2372        }
2373
2374        vmRights =
2375            maskVMRights(capVMRights, rightsFromWord(w_rightsMask));
2376
2377        if (unlikely(!checkVPAlignment(frameSize, vaddr))) {
2378            userError("ARMPageMap: Virtual address has incorrect alignment.");
2379            current_syscall_error.type =
2380                seL4_AlignmentError;
2381
2382            return EXCEPTION_SYSCALL_ERROR;
2383        }
2384
2385        capFBasePtr = addrFromPPtr((void *)
2386                                   generic_frame_cap_get_capFBasePtr(cap));
2387
2388        cap = generic_frame_cap_set_capFMappedAddress(cap, asid,
2389                                                      vaddr);
2390        if (frameSize == ARMSmallPage || frameSize == ARMLargePage) {
2391            create_mappings_pte_return_t map_ret;
2392            map_ret = createSafeMappingEntries_PTE(capFBasePtr, vaddr,
2393                                                   frameSize, vmRights,
2394                                                   attr, pd);
2395            if (unlikely(map_ret.status != EXCEPTION_NONE)) {
2396#ifdef CONFIG_PRINTING
2397                if (current_syscall_error.type == seL4_DeleteFirst) {
2398                    userError("ARMPageMap: Page table entry was not free.");
2399                }
2400#endif
2401                return map_ret.status;
2402            }
2403
2404            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2405            return performPageInvocationMapPTE(asid, cap, cte,
2406                                               map_ret.pte,
2407                                               map_ret.pte_entries);
2408        } else {
2409            create_mappings_pde_return_t map_ret;
2410            map_ret = createSafeMappingEntries_PDE(capFBasePtr, vaddr,
2411                                                   frameSize, vmRights,
2412                                                   attr, pd);
2413            if (unlikely(map_ret.status != EXCEPTION_NONE)) {
2414#ifdef CONFIG_PRINTING
2415                if (current_syscall_error.type == seL4_DeleteFirst) {
2416                    userError("ARMPageMap: Page directory entry was not free.");
2417                }
2418#endif
2419                return map_ret.status;
2420            }
2421
2422            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2423            return performPageInvocationMapPDE(asid, cap, cte,
2424                                               map_ret.pde,
2425                                               map_ret.pde_entries);
2426        }
2427    }
2428
2429    case ARMPageUnmap: {
2430#ifdef CONFIG_TK1_SMMU
2431        if (isIOSpaceFrameCap(cap)) {
2432            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2433            return performPageInvocationUnmapIO(cap, cte);
2434        } else
2435#endif
2436        {
2437            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2438            return performPageInvocationUnmap(cap, cte);
2439        }
2440    }
2441
2442#ifdef CONFIG_TK1_SMMU
2443    case ARMPageMapIO: {
2444        return decodeARMIOMapInvocation(invLabel, length, cte, cap, excaps, buffer);
2445    }
2446#endif
2447
2448    case ARMPageClean_Data:
2449    case ARMPageInvalidate_Data:
2450    case ARMPageCleanInvalidate_Data:
2451    case ARMPageUnify_Instruction: {
2452        asid_t asid;
2453        vptr_t vaddr;
2454        findPDForASID_ret_t pd;
2455        vptr_t start, end;
2456        paddr_t pstart;
2457        word_t page_size;
2458        word_t page_base;
2459
2460        if (length < 2) {
2461            userError("Page Flush: Truncated message.");
2462            current_syscall_error.type = seL4_TruncatedMessage;
2463            return EXCEPTION_SYSCALL_ERROR;
2464        }
2465
2466        asid = generic_frame_cap_get_capFMappedASID(cap);
2467#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2468        /* Must use kernel vaddr in hyp mode. */
2469        vaddr = generic_frame_cap_get_capFBasePtr(cap);
2470#else
2471        vaddr = generic_frame_cap_get_capFMappedAddress(cap);
2472#endif
2473
2474        if (unlikely(!generic_frame_cap_get_capFIsMapped(cap))) {
2475            userError("Page Flush: Frame is not mapped.");
2476            current_syscall_error.type = seL4_IllegalOperation;
2477            return EXCEPTION_SYSCALL_ERROR;
2478        }
2479
2480        pd = findPDForASID(asid);
2481        if (unlikely(pd.status != EXCEPTION_NONE)) {
2482            userError("Page Flush: No PD for ASID");
2483            current_syscall_error.type =
2484                seL4_FailedLookup;
2485            current_syscall_error.failedLookupWasSource = false;
2486            return EXCEPTION_SYSCALL_ERROR;
2487        }
2488
2489        start = getSyscallArg(0, buffer);
2490        end =   getSyscallArg(1, buffer);
2491
2492        /* check that the range is sane */
2493        if (end <= start) {
2494            userError("PageFlush: Invalid range");
2495            current_syscall_error.type = seL4_InvalidArgument;
2496            current_syscall_error.invalidArgumentNumber = 1;
2497            return EXCEPTION_SYSCALL_ERROR;
2498        }
2499
2500
2501        /* start and end are currently relative inside this page */
2502        page_size = 1 << pageBitsForSize(generic_frame_cap_get_capFSize(cap));
2503        page_base = addrFromPPtr((void *)generic_frame_cap_get_capFBasePtr(cap));
2504
2505        if (start >= page_size || end > page_size) {
2506            userError("Page Flush: Requested range not inside page");
2507            current_syscall_error.type = seL4_InvalidArgument;
2508            current_syscall_error.invalidArgumentNumber = 0;
2509            return EXCEPTION_SYSCALL_ERROR;
2510        }
2511
2512        /* turn start and end into absolute addresses */
2513        pstart = page_base + start;
2514        start += vaddr;
2515        end += vaddr;
2516
2517#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2518        /* Don't let applications flush outside of the kernel window */
2519        if (pstart < physBase || ((end - start) + pstart) > PADDR_TOP) {
2520            userError("Page Flush: Overlaps kernel region.");
2521            current_syscall_error.type = seL4_IllegalOperation;
2522            return EXCEPTION_SYSCALL_ERROR;
2523        }
2524#endif
2525
2526        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2527        return performPageFlush(invLabel, pd.pd, asid, start, end - 1, pstart);
2528    }
2529
2530    case ARMPageGetAddress: {
2531
2532
2533        /* Check that there are enough message registers */
2534        assert(n_msgRegisters >= 1);
2535
2536        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2537        return performPageGetAddress((void *)generic_frame_cap_get_capFBasePtr(cap));
2538    }
2539
2540    default:
2541        userError("ARMPage: Illegal operation.");
2542        current_syscall_error.type = seL4_IllegalOperation;
2543
2544        return EXCEPTION_SYSCALL_ERROR;
2545    }
2546}
2547
2548exception_t decodeARMMMUInvocation(word_t invLabel, word_t length, cptr_t cptr,
2549                                   cte_t *cte, cap_t cap, extra_caps_t excaps,
2550                                   word_t *buffer)
2551{
2552    switch (cap_get_capType(cap)) {
2553    case cap_page_directory_cap:
2554        return decodeARMPageDirectoryInvocation(invLabel, length, cptr, cte,
2555                                                cap, excaps, buffer);
2556
2557    case cap_page_table_cap:
2558        return decodeARMPageTableInvocation(invLabel, length, cte,
2559                                            cap, excaps, buffer);
2560
2561    case cap_small_frame_cap:
2562    case cap_frame_cap:
2563        return decodeARMFrameInvocation(invLabel, length, cte,
2564                                        cap, excaps, buffer);
2565
2566    case cap_asid_control_cap: {
2567        word_t i;
2568        asid_t asid_base;
2569        word_t index, depth;
2570        cap_t untyped, root;
2571        cte_t *parentSlot, *destSlot;
2572        lookupSlot_ret_t lu_ret;
2573        void *frame;
2574        exception_t status;
2575
2576        if (unlikely(invLabel != ARMASIDControlMakePool)) {
2577            userError("ASIDControl: Illegal operation.");
2578            current_syscall_error.type = seL4_IllegalOperation;
2579
2580            return EXCEPTION_SYSCALL_ERROR;
2581        }
2582
2583        if (unlikely(length < 2 || excaps.excaprefs[0] == NULL
2584                     || excaps.excaprefs[1] == NULL)) {
2585            userError("ASIDControlMakePool: Truncated message.");
2586            current_syscall_error.type = seL4_TruncatedMessage;
2587
2588            return EXCEPTION_SYSCALL_ERROR;
2589        }
2590
2591        index = getSyscallArg(0, buffer);
2592        depth = getSyscallArg(1, buffer);
2593        parentSlot = excaps.excaprefs[0];
2594        untyped = parentSlot->cap;
2595        root = excaps.excaprefs[1]->cap;
2596
2597        /* Find first free pool */
2598        for (i = 0; i < nASIDPools && armKSASIDTable[i]; i++);
2599
2600        if (unlikely(i == nASIDPools)) { /* If no unallocated pool is found */
2601            userError("ASIDControlMakePool: No free pools found.");
2602            current_syscall_error.type = seL4_DeleteFirst;
2603
2604            return EXCEPTION_SYSCALL_ERROR;
2605        }
2606
2607        asid_base = i << asidLowBits;
2608
2609        if (unlikely(cap_get_capType(untyped) != cap_untyped_cap ||
2610                     cap_untyped_cap_get_capBlockSize(untyped) !=
2611                     seL4_ASIDPoolBits) || cap_untyped_cap_get_capIsDevice(untyped)) {
2612            userError("ASIDControlMakePool: Invalid untyped cap.");
2613            current_syscall_error.type = seL4_InvalidCapability;
2614            current_syscall_error.invalidCapNumber = 1;
2615
2616            return EXCEPTION_SYSCALL_ERROR;
2617        }
2618
2619        status = ensureNoChildren(parentSlot);
2620        if (unlikely(status != EXCEPTION_NONE)) {
2621            userError("ASIDControlMakePool: Untyped has children. Revoke first.");
2622            return status;
2623        }
2624
2625        frame = WORD_PTR(cap_untyped_cap_get_capPtr(untyped));
2626
2627        lu_ret = lookupTargetSlot(root, index, depth);
2628        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2629            userError("ASIDControlMakePool: Failed to lookup destination slot.");
2630            return lu_ret.status;
2631        }
2632        destSlot = lu_ret.slot;
2633
2634        status = ensureEmptySlot(destSlot);
2635        if (unlikely(status != EXCEPTION_NONE)) {
2636            userError("ASIDControlMakePool: Destination slot not empty.");
2637            return status;
2638        }
2639
2640        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2641        return performASIDControlInvocation(frame, destSlot,
2642                                            parentSlot, asid_base);
2643    }
2644
2645    case cap_asid_pool_cap: {
2646        cap_t pdCap;
2647        cte_t *pdCapSlot;
2648        asid_pool_t *pool;
2649        word_t i;
2650        asid_t asid;
2651
2652        if (unlikely(invLabel != ARMASIDPoolAssign)) {
2653            userError("ASIDPool: Illegal operation.");
2654            current_syscall_error.type = seL4_IllegalOperation;
2655
2656            return EXCEPTION_SYSCALL_ERROR;
2657        }
2658
2659        if (unlikely(excaps.excaprefs[0] == NULL)) {
2660            userError("ASIDPoolAssign: Truncated message.");
2661            current_syscall_error.type = seL4_TruncatedMessage;
2662
2663            return EXCEPTION_SYSCALL_ERROR;
2664        }
2665
2666        pdCapSlot = excaps.excaprefs[0];
2667        pdCap = pdCapSlot->cap;
2668
2669        if (unlikely(
2670                cap_get_capType(pdCap) != cap_page_directory_cap ||
2671                cap_page_directory_cap_get_capPDIsMapped(pdCap))) {
2672            userError("ASIDPoolAssign: Invalid page directory cap.");
2673            current_syscall_error.type = seL4_InvalidCapability;
2674            current_syscall_error.invalidCapNumber = 1;
2675
2676            return EXCEPTION_SYSCALL_ERROR;
2677        }
2678
2679        pool = armKSASIDTable[cap_asid_pool_cap_get_capASIDBase(cap) >>
2680                                                                     asidLowBits];
2681        if (unlikely(!pool)) {
2682            userError("ASIDPoolAssign: Failed to lookup pool.");
2683            current_syscall_error.type = seL4_FailedLookup;
2684            current_syscall_error.failedLookupWasSource = false;
2685            current_lookup_fault = lookup_fault_invalid_root_new();
2686
2687            return EXCEPTION_SYSCALL_ERROR;
2688        }
2689
2690        if (unlikely(pool != ASID_POOL_PTR(cap_asid_pool_cap_get_capASIDPool(cap)))) {
2691            userError("ASIDPoolAssign: Failed to lookup pool.");
2692            current_syscall_error.type = seL4_InvalidCapability;
2693            current_syscall_error.invalidCapNumber = 0;
2694
2695            return EXCEPTION_SYSCALL_ERROR;
2696        }
2697
2698        /* Find first free ASID */
2699        asid = cap_asid_pool_cap_get_capASIDBase(cap);
2700        for (i = 0; i < (1 << asidLowBits) && (asid + i == 0 || pool->array[i]); i++);
2701
2702        if (unlikely(i == 1 << asidLowBits)) {
2703            userError("ASIDPoolAssign: No free ASID.");
2704            current_syscall_error.type = seL4_DeleteFirst;
2705
2706            return EXCEPTION_SYSCALL_ERROR;
2707        }
2708
2709        asid += i;
2710
2711        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2712        return performASIDPoolInvocation(asid, pool, pdCapSlot);
2713    }
2714
2715    default:
2716        fail("Invalid ARM arch cap type");
2717    }
2718}
2719
2720#ifdef CONFIG_KERNEL_LOG_BUFFER
2721exception_t benchmark_arch_map_logBuffer(word_t frame_cptr)
2722{
2723    lookupCapAndSlot_ret_t lu_ret;
2724    vm_page_size_t frameSize;
2725    pptr_t  frame_pptr;
2726
2727    /* faulting section */
2728    lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), frame_cptr);
2729
2730    if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2731        userError("Invalid cap #%lu.", frame_cptr);
2732        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2733
2734        return EXCEPTION_SYSCALL_ERROR;
2735    }
2736
2737    if (cap_get_capType(lu_ret.cap) != cap_frame_cap) {
2738        userError("Invalid cap. Log buffer should be of a frame cap");
2739        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2740
2741        return EXCEPTION_SYSCALL_ERROR;
2742    }
2743
2744    frameSize = generic_frame_cap_get_capFSize(lu_ret.cap);
2745
2746    if (frameSize != ARMSection) {
2747        userError("Invalid frame size. The kernel expects 1M log buffer");
2748        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
2749
2750        return EXCEPTION_SYSCALL_ERROR;
2751    }
2752
2753    frame_pptr = generic_frame_cap_get_capFBasePtr(lu_ret.cap);
2754
2755    ksUserLogBuffer = pptr_to_paddr((void *) frame_pptr);
2756
2757    for (int idx = 0; idx < BIT(PT_INDEX_BITS); idx++) {
2758        paddr_t physical_address = ksUserLogBuffer + (idx << seL4_PageBits);
2759
2760        armKSGlobalLogPT[idx] =
2761            pte_pte_small_new(
2762                physical_address,
2763                0, /* global */
2764                0, /* Not shared */
2765                0, /* APX = 0, privileged full access */
2766                0, /* TEX = 0 */
2767                1, /* VMKernelOnly */
2768                1, /* Cacheable */
2769                0, /* Write-through to minimise perf hit */
2770                0  /* executable */
2771            );
2772
2773        cleanByVA_PoU((vptr_t)&armKSGlobalLogPT[idx], pptr_to_paddr(&armKSGlobalLogPT[idx]));
2774        invalidateTranslationSingle(KS_LOG_PPTR + (idx * BIT(seL4_PageBits)));
2775    }
2776
2777    return EXCEPTION_NONE;
2778}
2779#endif /* CONFIG_KERNEL_LOG_BUFFER */
2780
2781#ifdef CONFIG_DEBUG_BUILD
2782void kernelPrefetchAbort(word_t pc) VISIBLE;
2783void kernelDataAbort(word_t pc) VISIBLE;
2784
2785#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2786
2787void kernelUndefinedInstruction(word_t pc) VISIBLE;
2788
2789void kernelPrefetchAbort(word_t pc)
2790{
2791    word_t UNUSED sr = getHSR();
2792
2793    printf("\n\nKERNEL PREFETCH ABORT!\n");
2794    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
2795    printf("HSR: 0x%x\n", (unsigned int)sr);
2796
2797    halt();
2798}
2799
2800void kernelDataAbort(word_t pc)
2801{
2802    word_t UNUSED far = getHDFAR();
2803    word_t UNUSED sr = getHSR();
2804
2805    printf("\n\nKERNEL DATA ABORT!\n");
2806    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
2807    printf("HDFAR: 0x%x HSR: 0x%x\n", (unsigned int)far, (unsigned int)sr);
2808
2809    halt();
2810}
2811
2812void kernelUndefinedInstruction(word_t pc)
2813{
2814    word_t UNUSED sr = getHSR();
2815
2816    printf("\n\nKERNEL UNDEFINED INSTRUCTION!\n");
2817    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
2818    printf("HSR: 0x%x\n", (unsigned int)sr);
2819
2820    halt();
2821}
2822
2823#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */
2824
2825void kernelPrefetchAbort(word_t pc)
2826{
2827    word_t UNUSED ifsr = getIFSR();
2828
2829    printf("\n\nKERNEL PREFETCH ABORT!\n");
2830    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
2831    printf("IFSR: 0x%x\n", (unsigned int)ifsr);
2832
2833    halt();
2834}
2835
2836void kernelDataAbort(word_t pc)
2837{
2838    word_t UNUSED dfsr = getDFSR();
2839    word_t UNUSED far = getFAR();
2840
2841    printf("\n\nKERNEL DATA ABORT!\n");
2842    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
2843    printf("FAR: 0x%x DFSR: 0x%x\n", (unsigned int)far, (unsigned int)dfsr);
2844
2845    halt();
2846}
2847#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
2848
2849#endif
2850
2851#ifdef CONFIG_PRINTING
2852typedef struct readWordFromVSpace_ret {
2853    exception_t status;
2854    word_t value;
2855} readWordFromVSpace_ret_t;
2856
2857static readWordFromVSpace_ret_t readWordFromVSpace(pde_t *pd, word_t vaddr)
2858{
2859    readWordFromVSpace_ret_t ret;
2860    lookupPTSlot_ret_t ptSlot;
2861    pde_t *pdSlot;
2862    paddr_t paddr;
2863    word_t offset;
2864    pptr_t kernel_vaddr;
2865    word_t *value;
2866
2867    pdSlot = lookupPDSlot(pd, vaddr);
2868    if (pde_ptr_get_pdeType(pdSlot) == pde_pde_section) {
2869        paddr = pde_pde_section_ptr_get_address(pdSlot);
2870        offset = vaddr & MASK(ARMSectionBits);
2871    } else {
2872        ptSlot = lookupPTSlot(pd, vaddr);
2873#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2874        if (ptSlot.status == EXCEPTION_NONE && pte_ptr_get_pteType(ptSlot.ptSlot) == pte_pte_small) {
2875            paddr = pte_pte_small_ptr_get_address(ptSlot.ptSlot);
2876            if (pte_pte_small_ptr_get_contiguous_hint(ptSlot.ptSlot)) {
2877                offset = vaddr & MASK(ARMLargePageBits);
2878            } else {
2879                offset = vaddr & MASK(ARMSmallPageBits);
2880            }
2881#else
2882        if (ptSlot.status == EXCEPTION_NONE && pte_ptr_get_pteType(ptSlot.ptSlot) == pte_pte_small) {
2883            paddr = pte_pte_small_ptr_get_address(ptSlot.ptSlot);
2884            offset = vaddr & MASK(ARMSmallPageBits);
2885        } else if (ptSlot.status == EXCEPTION_NONE && pte_ptr_get_pteType(ptSlot.ptSlot) == pte_pte_large) {
2886            paddr = pte_pte_large_ptr_get_address(ptSlot.ptSlot);
2887            offset = vaddr & MASK(ARMLargePageBits);
2888#endif
2889        } else {
2890            ret.status = EXCEPTION_LOOKUP_FAULT;
2891            return ret;
2892        }
2893    }
2894
2895
2896    kernel_vaddr = (word_t)paddr_to_pptr(paddr);
2897    value = (word_t *)(kernel_vaddr + offset);
2898    ret.status = EXCEPTION_NONE;
2899    ret.value = *value;
2900    return ret;
2901}
2902
2903void Arch_userStackTrace(tcb_t *tptr)
2904{
2905    cap_t threadRoot;
2906    pde_t *pd;
2907    word_t sp;
2908    int i;
2909
2910    threadRoot = TCB_PTR_CTE_PTR(tptr, tcbVTable)->cap;
2911
2912    /* lookup the PD */
2913    if (cap_get_capType(threadRoot) != cap_page_directory_cap) {
2914        printf("Invalid vspace\n");
2915        return;
2916    }
2917
2918    pd = (pde_t *)pptr_of_cap(threadRoot);
2919
2920    sp = getRegister(tptr, SP);
2921    /* check for alignment so we don't have to worry about accessing
2922     * words that might be on two different pages */
2923    if (!IS_ALIGNED(sp, seL4_WordSizeBits)) {
2924        printf("SP not aligned\n");
2925        return;
2926    }
2927
2928    for (i = 0; i < CONFIG_USER_STACK_TRACE_LENGTH; i++) {
2929        word_t address = sp + (i * sizeof(word_t));
2930        readWordFromVSpace_ret_t result;
2931        result = readWordFromVSpace(pd, address);
2932        if (result.status == EXCEPTION_NONE) {
2933            printf("0x%lx: 0x%lx\n", (long)address, (long)result.value);
2934        } else {
2935            printf("0x%lx: INVALID\n", (long)address);
2936        }
2937    }
2938}
2939#endif
2940
2941