1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * This software may be distributed and modified according to the terms of
5 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
6 * See "LICENSE_GPLv2.txt" for details.
7 *
8 * @TAG(GD_GPL)
9 */
10
11#include <config.h>
12#include <types.h>
13#include <benchmark/benchmark.h>
14#include <api/failures.h>
15#include <api/syscall.h>
16#include <kernel/boot.h>
17#include <kernel/cspace.h>
18#include <kernel/thread.h>
19#include <kernel/stack.h>
20#include <machine/io.h>
21#include <machine/debug.h>
22#include <model/statedata.h>
23#include <object/cnode.h>
24#include <object/untyped.h>
25#include <arch/api/invocation.h>
26#include <arch/kernel/vspace.h>
27#include <linker.h>
28#include <plat/machine/devices.h>
29#include <plat/machine/hardware.h>
30#include <armv/context_switch.h>
31#include <arch/object/iospace.h>
32#include <arch/object/vcpu.h>
33#include <arch/machine/tlb.h>
34
35#ifdef CONFIG_BENCHMARK_TRACK_KERNEL_ENTRIES
36#include <benchmark/benchmark_track.h>
37#endif
38
39/* ARM uses multiple identical mappings in a page table / page directory to construct
40 * large mappings. In both cases it happens to be 16 entries, which can be calculated by
41 * looking at the size difference of the mappings, and is done this way to remove magic
42 * numbers littering this code and make it clear what is going on */
43#define SECTIONS_PER_SUPER_SECTION BIT(ARMSuperSectionBits - ARMSectionBits)
44#define PAGES_PER_LARGE_PAGE BIT(ARMLargePageBits - ARMSmallPageBits)
45
46/* helper stuff to avoid fencepost errors when
47 * getting the last byte of a PTE or PDE */
48#define LAST_BYTE_PTE(PTE,LENGTH) ((word_t)&(PTE)[(LENGTH)-1] + (BIT(PTE_SIZE_BITS)-1))
49#define LAST_BYTE_PDE(PDE,LENGTH) ((word_t)&(PDE)[(LENGTH)-1] + (BIT(PDE_SIZE_BITS)-1))
50
51#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
52/* Stage 2 */
53#define MEMATTR_CACHEABLE    0xf /* Inner and Outer write-back */
54#define MEMATTR_NONCACHEABLE 0x0 /* Strongly ordered or device memory */
55
56/* STage 1 hyp */
57#define ATTRINDX_CACHEABLE    0xff /* Inner and Outer RW write-back non-transient */
58#define ATTRINDX_NONCACHEABLE 0x0  /* strongly ordered or device memory */
59#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
60
61struct resolve_ret {
62    paddr_t frameBase;
63    vm_page_size_t frameSize;
64    bool_t valid;
65};
66typedef struct resolve_ret resolve_ret_t;
67
68#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
69static bool_t PURE pteCheckIfMapped(pte_t *pte);
70static bool_t PURE pdeCheckIfMapped(pde_t *pde);
71
72static word_t CONST
73APFromVMRights(vm_rights_t vm_rights)
74{
75    switch (vm_rights) {
76    case VMNoAccess:
77        return 0;
78
79    case VMKernelOnly:
80        return 1;
81
82    case VMReadOnly:
83        return 2;
84
85    case VMReadWrite:
86        return 3;
87
88    default:
89        fail("Invalid VM rights");
90    }
91}
92
93#else
94/* AP encoding slightly different. AP only used for kernel mappings which are fixed after boot time */
95BOOT_CODE
96static word_t CONST
97APFromVMRights(vm_rights_t vm_rights)
98{
99    switch (vm_rights) {
100    case VMKernelOnly:
101        return 0;
102    case VMReadWrite:
103        return 1;
104    case VMNoAccess:
105        /* RO at PL1 only */
106        return 2;
107    case VMReadOnly:
108        return 3;
109    default:
110        fail("Invalid VM rights");
111    }
112}
113
114static word_t CONST
115HAPFromVMRights(vm_rights_t vm_rights)
116{
117    switch (vm_rights) {
118    case VMKernelOnly:
119    case VMNoAccess:
120        return 0;
121    case VMReadOnly:
122        return 1;
123    /*
124    case VMWriteOnly:
125        return 2;
126    */
127    case VMReadWrite:
128        return 3;
129    default:
130        fail("Invalid VM rights");
131    }
132}
133
134#endif
135
136vm_rights_t CONST
137maskVMRights(vm_rights_t vm_rights, seL4_CapRights_t cap_rights_mask)
138{
139    if (vm_rights == VMNoAccess) {
140        return VMNoAccess;
141    }
142    if (vm_rights == VMReadOnly &&
143            seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
144        return VMReadOnly;
145    }
146    if (vm_rights == VMReadWrite &&
147            seL4_CapRights_get_capAllowRead(cap_rights_mask)) {
148        if (!seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
149            return VMReadOnly;
150        } else {
151            return VMReadWrite;
152        }
153    }
154    if (vm_rights == VMReadWrite &&
155            !seL4_CapRights_get_capAllowRead(cap_rights_mask) &&
156            seL4_CapRights_get_capAllowWrite(cap_rights_mask)) {
157        userError("Attempted to make unsupported write only mapping");
158    }
159    return VMKernelOnly;
160}
161
162/* ==================== BOOT CODE STARTS HERE ==================== */
163
164BOOT_CODE void
165map_kernel_frame(paddr_t paddr, pptr_t vaddr, vm_rights_t vm_rights, vm_attributes_t attributes)
166{
167    word_t idx = (vaddr & MASK(pageBitsForSize(ARMSection))) >> pageBitsForSize(ARMSmallPage);
168
169    assert(vaddr >= PPTR_TOP); /* vaddr lies in the region the global PT covers */
170#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
171    if (vm_attributes_get_armPageCacheable(attributes)) {
172        armKSGlobalPT[idx] =
173            pte_pte_small_new(
174                paddr,
175                0, /* global */
176                SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
177                0, /* APX = 0, privileged full access */
178                5, /* TEX = 0b1(Cached)01(Outer Write Allocate) */
179                APFromVMRights(vm_rights),
180                0, /* C (Inner write allocate) */
181                1, /* B (Inner write allocate) */
182                0  /* executable */
183            );
184    } else {
185        armKSGlobalPT[idx] =
186            pte_pte_small_new(
187                paddr,
188                0, /* global */
189                SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
190                0, /* APX = 0, privileged full access */
191                0, /* TEX = 0 */
192                APFromVMRights(vm_rights),
193                0, /* Shared device */
194                1, /* Shared device */
195                0  /* executable */
196            );
197    }
198#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */
199    armHSGlobalPT[idx] =
200        pteS1_pteS1_small_new(
201            0, /* Executeable */
202            0, /* Executeable at PL1 */
203            0, /* Not contiguous */
204            paddr,
205            0, /* global */
206            1, /* AF -- always set */
207            0, /* Not shared */
208            APFromVMRights(vm_rights),
209            0, /* non secure */
210            vm_attributes_get_armPageCacheable(attributes)
211        );
212#endif /* !CONFIG_ARM_HYPERVISOR_SUPPORT */
213}
214
215#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
216BOOT_CODE void
217map_kernel_window(void)
218{
219    paddr_t  phys;
220    word_t idx;
221    pde_t    pde;
222
223    /* mapping of kernelBase (virtual address) to kernel's physBase  */
224    /* up to end of virtual address space minus 16M using 16M frames */
225    phys = physBase;
226    idx = kernelBase >> pageBitsForSize(ARMSection);
227
228    while (idx < BIT(PD_INDEX_BITS) - SECTIONS_PER_SUPER_SECTION) {
229        word_t idx2;
230
231        pde = pde_pde_section_new(
232                  phys,
233                  1, /* SuperSection */
234                  0, /* global */
235                  SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
236                  0, /* APX = 0, privileged full access */
237                  5, /* TEX = 0b1(Cached)01(Outer Write Allocate) */
238                  1, /* VMKernelOnly */
239                  1, /* Parity enabled */
240                  0, /* Domain 0 */
241                  0, /* XN not set */
242                  0, /* C (Inner write allocate) */
243                  1  /* B (Inner write allocate) */
244              );
245        for (idx2 = idx; idx2 < idx + SECTIONS_PER_SUPER_SECTION; idx2++) {
246            armKSGlobalPD[idx2] = pde;
247        }
248        phys += BIT(pageBitsForSize(ARMSuperSection));
249        idx += SECTIONS_PER_SUPER_SECTION;
250    }
251
252    while (idx < (PPTR_TOP >> pageBitsForSize(ARMSection))) {
253        pde = pde_pde_section_new(
254                  phys,
255                  0, /* Section */
256                  0, /* global */
257                  SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
258                  0, /* APX = 0, privileged full access */
259                  5, /* TEX = 0b1(Cached)01(Outer Write Allocate) */
260                  1, /* VMKernelOnly */
261                  1, /* Parity enabled */
262                  0, /* Domain 0 */
263                  0, /* XN not set */
264                  0, /* C (Inner write allocate) */
265                  1  /* B (Inner write allocate) */
266              );
267        armKSGlobalPD[idx] = pde;
268        phys += BIT(pageBitsForSize(ARMSection));
269        idx++;
270    }
271
272    /* crosscheck whether we have mapped correctly so far */
273    assert(phys == PADDR_TOP);
274
275#ifdef CONFIG_BENCHMARK_USE_KERNEL_LOG_BUFFER
276    /* map log buffer page table. PTEs to be filled by user later by calling seL4_BenchmarkSetLogBuffer() */
277    armKSGlobalPD[idx] =
278        pde_pde_coarse_new(
279            addrFromPPtr(armKSGlobalLogPT), /* address */
280            true,                           /* P       */
281            0                               /* Domain  */
282        );
283
284    memzero(armKSGlobalLogPT, BIT(seL4_PageTableBits));
285
286    phys += BIT(pageBitsForSize(ARMSection));
287    idx++;
288#endif /* CONFIG_BENCHMARK_USE_KERNEL_LOG_BUFFER */
289
290    /* map page table covering last 1M of virtual address space to page directory */
291    armKSGlobalPD[idx] =
292        pde_pde_coarse_new(
293            addrFromPPtr(armKSGlobalPT), /* address */
294            true,                        /* P       */
295            0                            /* Domain  */
296        );
297
298    /* now start initialising the page table */
299    memzero(armKSGlobalPT, 1 << seL4_PageTableBits);
300
301    /* map vector table */
302    map_kernel_frame(
303        addrFromPPtr(arm_vector_table),
304        PPTR_VECTOR_TABLE,
305        VMKernelOnly,
306        vm_attributes_new(
307            false, /* armExecuteNever */
308            true,  /* armParityEnabled */
309            true   /* armPageCacheable */
310        )
311    );
312
313#ifdef CONFIG_IPC_BUF_GLOBALS_FRAME
314    /* map globals frame */
315    map_kernel_frame(
316        addrFromPPtr(armKSGlobalsFrame),
317        seL4_GlobalsFrame,
318        VMReadOnly,
319        vm_attributes_new(
320            true,  /* armExecuteNever */
321            true,  /* armParityEnabled */
322            true   /* armPageCacheable */
323        )
324    );
325#endif /* CONFIG_IPC_BUF_GLOBALS_FRAME */
326
327    map_kernel_devices();
328}
329
330#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */
331
332BOOT_CODE void
333map_kernel_window(void)
334{
335    paddr_t    phys;
336    uint32_t   idx;
337    pdeS1_t pde;
338    pte_t UNUSED pteS2;
339
340    /* Initialise PGD */
341    for (idx = 0; idx < 3; idx++) {
342        pde = pdeS1_pdeS1_invalid_new();
343        armHSGlobalPGD[idx] = pde;
344    }
345    pde = pdeS1_pdeS1_coarse_new(0, 0, 0, 0, addrFromPPtr(armHSGlobalPD));
346    armHSGlobalPGD[3] = pde;
347
348    /* Initialise PMD */
349    /* Invalidate up until kernelBase */
350    for (idx = 0; idx < (kernelBase - 0xC0000000) >> (PT_INDEX_BITS + PAGE_BITS); idx++) {
351        pde = pdeS1_pdeS1_invalid_new();
352        armHSGlobalPD[idx] = pde;
353    }
354    /* mapping of kernelBase (virtual address) to kernel's physBase  */
355    /* up to end of virtual address space minus 2M using 2M frames */
356    phys = physBase;
357    for (; idx < BIT(PT_INDEX_BITS) - 1; idx++) {
358        pde = pdeS1_pdeS1_section_new(
359                  0, /* Executable */
360                  0, /* Executable in PL1 */
361                  0, /* Not contiguous */
362                  phys, /* Address */
363                  0, /* global */
364                  1, /* AF -- always set to 1 */
365                  0, /* Not Shareable */
366                  0, /* AP: WR at PL1 only */
367                  0, /* Not secure */
368                  1  /* outer write-back Cacheable */
369              );
370        armHSGlobalPD[idx] = pde;
371        phys += BIT(PT_INDEX_BITS + PAGE_BITS);
372    }
373    /* map page table covering last 2M of virtual address space */
374    pde = pdeS1_pdeS1_coarse_new(0, 0, 0, 0, addrFromPPtr(armHSGlobalPT));
375    armHSGlobalPD[idx] = pde;
376
377    /* now start initialising the page table */
378    memzero(armHSGlobalPT, 1 << seL4_PageTableBits);
379    for (idx = 0; idx < 256; idx++) {
380        pteS1_t pte;
381        pte = pteS1_pteS1_small_new(
382                  0, /* Executable */
383                  0, /* Executable in PL1 */
384                  0, /* Not contiguous */
385                  phys, /* Address */
386                  0, /* global */
387                  1, /* AF -- always set to 1 */
388                  0, /* Not Shareable */
389                  0, /* AP: WR at PL1 only */
390                  0, /* Not secure */
391                  1  /* outer write-back Cacheable */
392              );
393        armHSGlobalPT[idx] = pte;
394        phys += BIT(PAGE_BITS);
395    }
396    /* map vector table */
397    map_kernel_frame(
398        addrFromPPtr(arm_vector_table),
399        PPTR_VECTOR_TABLE,
400        VMKernelOnly,
401        vm_attributes_new(
402            false, /* armExecuteNever */
403            true,  /* armParityEnabled */
404            true   /* armPageCacheable */
405        )
406    );
407
408#ifdef CONFIG_IPC_BUF_GLOBALS_FRAME
409    /* map globals frame */
410    map_kernel_frame(
411        addrFromPPtr(armKSGlobalsFrame),
412        seL4_GlobalsFrame,
413        VMReadOnly,
414        vm_attributes_new(
415            false, /* armExecuteNever */
416            true,  /* armParityEnabled */
417            true   /* armPageCacheable */
418        )
419    );
420    /* map globals into user global PT */
421    pteS2 = pte_pte_small_new(
422                1, /* Not Executeable */
423                0, /* Not contiguous */
424                addrFromPPtr(armKSGlobalsFrame),
425                1, /* AF -- always set */
426                0, /* Not shared */
427                HAPFromVMRights(VMReadOnly),
428                MEMATTR_CACHEABLE  /* Cacheable */
429            );
430    memzero(armUSGlobalPT, 1 << seL4_PageTableBits);
431    idx = (seL4_GlobalsFrame >> PAGE_BITS) & (MASK(PT_INDEX_BITS));
432    armUSGlobalPT[idx] = pteS2;
433#endif /* CONFIG_IPC_BUF_GLOBALS_FRAME */
434
435    map_kernel_devices();
436}
437
438#endif /* !CONFIG_ARM_HYPERVISOR_SUPPORT */
439
440static BOOT_CODE void
441map_it_frame_cap(cap_t pd_cap, cap_t frame_cap, bool_t executable)
442{
443    pte_t* pt;
444    pte_t* targetSlot;
445    pde_t* pd    = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pd_cap));
446    void*  frame = (void*)generic_frame_cap_get_capFBasePtr(frame_cap);
447    vptr_t vptr  = generic_frame_cap_get_capFMappedAddress(frame_cap);
448
449    assert(generic_frame_cap_get_capFMappedASID(frame_cap) != 0);
450
451    pd += (vptr >> pageBitsForSize(ARMSection));
452    pt = ptrFromPAddr(pde_pde_coarse_ptr_get_address(pd));
453    targetSlot = pt + ((vptr & MASK(pageBitsForSize(ARMSection)))
454                       >> pageBitsForSize(ARMSmallPage));
455#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
456    *targetSlot = pte_pte_small_new(
457                      addrFromPPtr(frame),
458                      1, /* not global */
459                      SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
460                      0, /* APX = 0, privileged full access */
461                      5, /* TEX = 0b1(Cached)01(Outer Write Allocate) */
462                      APFromVMRights(VMReadWrite),
463                      0, /* C (Inner write allocate) */
464                      1, /* B (Inner write allocate) */
465                      !executable
466                  );
467#else
468    *targetSlot = pte_pte_small_new(
469                      0, /* Executeable */
470                      0, /* Not contiguous */
471                      addrFromPPtr(frame),
472                      1, /* AF -- always set */
473                      0, /* Not shared */
474                      HAPFromVMRights(VMReadWrite),
475                      MEMATTR_CACHEABLE  /* Cacheable */
476                  );
477#endif
478}
479
480/* Create a frame cap for the initial thread. */
481
482static BOOT_CODE cap_t
483create_it_frame_cap(pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large)
484{
485    if (use_large)
486        return
487            cap_frame_cap_new(
488                ARMSection,                    /* capFSize           */
489                ASID_LOW(asid),                /* capFMappedASIDLow  */
490                wordFromVMRights(VMReadWrite), /* capFVMRights       */
491                vptr,                          /* capFMappedAddress  */
492                false,                         /* capFIsDevice       */
493                ASID_HIGH(asid),               /* capFMappedASIDHigh */
494                pptr                           /* capFBasePtr        */
495            );
496    else
497        return
498            cap_small_frame_cap_new(
499                ASID_LOW(asid),                /* capFMappedASIDLow  */
500                wordFromVMRights(VMReadWrite), /* capFVMRights       */
501                vptr,                          /* capFMappedAddress  */
502                false,                         /* capFIsDevice       */
503#ifdef CONFIG_ARM_SMMU
504                0,                             /* IOSpace            */
505#endif
506                ASID_HIGH(asid),               /* capFMappedASIDHigh */
507                pptr                           /* capFBasePtr        */
508            );
509}
510
511static BOOT_CODE void
512map_it_pt_cap(cap_t pd_cap, cap_t pt_cap)
513{
514    pde_t* pd   = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pd_cap));
515    pte_t* pt   = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(pt_cap));
516    vptr_t vptr = cap_page_table_cap_get_capPTMappedAddress(pt_cap);
517    pde_t* targetSlot = pd + (vptr >> pageBitsForSize(ARMSection));
518
519    assert(cap_page_table_cap_get_capPTIsMapped(pt_cap));
520
521#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
522    *targetSlot = pde_pde_coarse_new(
523                      addrFromPPtr(pt), /* address */
524                      true,             /* P       */
525                      0                 /* Domain  */
526                  );
527#else
528    *targetSlot = pde_pde_coarse_new(addrFromPPtr(pt));
529#endif
530}
531
532/* Create a page table for the initial thread */
533
534static BOOT_CODE cap_t
535create_it_page_table_cap(cap_t pd, pptr_t pptr, vptr_t vptr, asid_t asid)
536{
537    cap_t cap;
538    cap = cap_page_table_cap_new(
539              1,    /* capPTIsMapped      */
540              asid, /* capPTMappedASID    */
541              vptr, /* capPTMappedAddress */
542              pptr  /* capPTBasePtr       */
543          );
544    if (asid != asidInvalid) {
545        map_it_pt_cap(pd, cap);
546    }
547    return cap;
548}
549
550/* Create an address space for the initial thread.
551 * This includes page directory and page tables */
552BOOT_CODE cap_t
553create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
554{
555    cap_t      pd_cap;
556    vptr_t     pt_vptr;
557    pptr_t     pt_pptr;
558    seL4_SlotPos slot_pos_before;
559    seL4_SlotPos slot_pos_after;
560    pptr_t pd_pptr;
561
562    /* create PD obj and cap */
563    pd_pptr = alloc_region(seL4_PageDirBits);
564    if (!pd_pptr) {
565        return cap_null_cap_new();
566    }
567    memzero(PDE_PTR(pd_pptr), 1 << seL4_PageDirBits);
568    copyGlobalMappings(PDE_PTR(pd_pptr));
569    cleanCacheRange_PoU(pd_pptr, pd_pptr + (1 << seL4_PageDirBits) - 1,
570                        addrFromPPtr((void *)pd_pptr));
571    pd_cap =
572        cap_page_directory_cap_new(
573            true,    /* capPDIsMapped   */
574            IT_ASID, /* capPDMappedASID */
575            pd_pptr  /* capPDBasePtr    */
576        );
577    slot_pos_before = ndks_boot.slot_pos_cur;
578    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), pd_cap);
579
580    /* create all PT objs and caps necessary to cover userland image */
581
582    for (pt_vptr = ROUND_DOWN(it_v_reg.start, PT_INDEX_BITS + PAGE_BITS);
583            pt_vptr < it_v_reg.end;
584            pt_vptr += BIT(PT_INDEX_BITS + PAGE_BITS)) {
585        pt_pptr = alloc_region(seL4_PageTableBits);
586        if (!pt_pptr) {
587            return cap_null_cap_new();
588        }
589        memzero(PTE_PTR(pt_pptr), 1 << seL4_PageTableBits);
590        if (!provide_cap(root_cnode_cap,
591                         create_it_page_table_cap(pd_cap, pt_pptr, pt_vptr, IT_ASID))
592           ) {
593            return cap_null_cap_new();
594        }
595    }
596
597    slot_pos_after = ndks_boot.slot_pos_cur;
598    ndks_boot.bi_frame->userImagePaging = (seL4_SlotRegion) {
599        slot_pos_before, slot_pos_after
600    };
601
602    return pd_cap;
603}
604
605BOOT_CODE cap_t
606create_unmapped_it_frame_cap(pptr_t pptr, bool_t use_large)
607{
608    return create_it_frame_cap(pptr, 0, asidInvalid, use_large);
609}
610
611BOOT_CODE cap_t
612create_mapped_it_frame_cap(cap_t pd_cap, pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large, bool_t executable)
613{
614    cap_t cap = create_it_frame_cap(pptr, vptr, asid, use_large);
615    map_it_frame_cap(pd_cap, cap, executable);
616    return cap;
617}
618
619#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
620
621BOOT_CODE void
622activate_global_pd(void)
623{
624    /* Ensure that there's nothing stale in newly-mapped regions, and
625       that everything we've written (particularly the kernel page tables)
626       is committed. */
627    cleanInvalidateL1Caches();
628    setCurrentPD(addrFromPPtr(armKSGlobalPD));
629    invalidateLocalTLB();
630    lockTLBEntry(kernelBase);
631    lockTLBEntry(PPTR_VECTOR_TABLE);
632}
633
634#else
635
636BOOT_CODE void
637activate_global_pd(void)
638{
639    uint32_t r;
640    /* Ensure that there's nothing stale in newly-mapped regions, and
641       that everything we've written (particularly the kernel page tables)
642       is committed. */
643    cleanInvalidateL1Caches();
644    /* Setup the memory attributes: We use 2 indicies (cachable/non-cachable) */
645    setHMAIR((ATTRINDX_NONCACHEABLE << 0) | (ATTRINDX_CACHEABLE << 8), 0);
646    setCurrentHypPD(addrFromPPtr(armHSGlobalPGD));
647    invalidateHypTLB();
648#if 0 /* Can't lock entries on A15 */
649    lockTLBEntry(kernelBase);
650    lockTLBEntry(PPTR_VECTOR_TABLE);
651#endif
652    /* TODO find a better place to init the VMMU */
653    r = 0;
654    /* Translation range */
655    r |= (0x0 << 0);     /* 2^(32 -(0)) input range. */
656    r |= (r & 0x8) << 1; /* Sign bit */
657    /* starting level */
658    r |= (0x0 << 6);     /* Start at second level */
659    /* Sharability of tables */
660    r |= BIT(8);       /* Inner write-back, write-allocate */
661    r |= BIT(10);      /* Outer write-back, write-allocate */
662    /* Long descriptor format (not that we have a choice) */
663    r |= BIT(31);
664    setVTCR(r);
665}
666
667#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
668
669BOOT_CODE void
670write_it_asid_pool(cap_t it_ap_cap, cap_t it_pd_cap)
671{
672    asid_pool_t* ap = ASID_POOL_PTR(pptr_of_cap(it_ap_cap));
673    ap->array[IT_ASID] = PDE_PTR(pptr_of_cap(it_pd_cap));
674    armKSASIDTable[IT_ASID >> asidLowBits] = ap;
675}
676
677/* ==================== BOOT CODE FINISHES HERE ==================== */
678
679findPDForASID_ret_t
680findPDForASID(asid_t asid)
681{
682    findPDForASID_ret_t ret;
683    asid_pool_t *poolPtr;
684    pde_t *pd;
685
686    poolPtr = armKSASIDTable[asid >> asidLowBits];
687    if (unlikely(!poolPtr)) {
688        current_lookup_fault = lookup_fault_invalid_root_new();
689
690        ret.pd = NULL;
691        ret.status = EXCEPTION_LOOKUP_FAULT;
692        return ret;
693    }
694
695    pd = poolPtr->array[asid & MASK(asidLowBits)];
696    if (unlikely(!pd)) {
697        current_lookup_fault = lookup_fault_invalid_root_new();
698
699        ret.pd = NULL;
700        ret.status = EXCEPTION_LOOKUP_FAULT;
701        return ret;
702    }
703
704    ret.pd = pd;
705    ret.status = EXCEPTION_NONE;
706    return ret;
707}
708
709word_t * PURE
710lookupIPCBuffer(bool_t isReceiver, tcb_t *thread)
711{
712    word_t w_bufferPtr;
713    cap_t bufferCap;
714    vm_rights_t vm_rights;
715
716    w_bufferPtr = thread->tcbIPCBuffer;
717    bufferCap = TCB_PTR_CTE_PTR(thread, tcbBuffer)->cap;
718
719    if (unlikely(cap_get_capType(bufferCap) != cap_small_frame_cap &&
720                 cap_get_capType(bufferCap) != cap_frame_cap)) {
721        return NULL;
722    }
723    if (unlikely (generic_frame_cap_get_capFIsDevice(bufferCap))) {
724        return NULL;
725    }
726
727    vm_rights = generic_frame_cap_get_capFVMRights(bufferCap);
728    if (likely(vm_rights == VMReadWrite ||
729               (!isReceiver && vm_rights == VMReadOnly))) {
730        word_t basePtr;
731        unsigned int pageBits;
732
733        basePtr = generic_frame_cap_get_capFBasePtr(bufferCap);
734        pageBits = pageBitsForSize(generic_frame_cap_get_capFSize(bufferCap));
735        return (word_t *)(basePtr + (w_bufferPtr & MASK(pageBits)));
736    } else {
737        return NULL;
738    }
739}
740
741exception_t
742checkValidIPCBuffer(vptr_t vptr, cap_t cap)
743{
744    if (unlikely(cap_get_capType(cap) != cap_small_frame_cap &&
745                 cap_get_capType(cap) != cap_frame_cap)) {
746        userError("Requested IPC Buffer is not a frame cap.");
747        current_syscall_error.type = seL4_IllegalOperation;
748        return EXCEPTION_SYSCALL_ERROR;
749    }
750    if (unlikely(generic_frame_cap_get_capFIsDevice(cap))) {
751        userError("Specifying a device frame as an IPC buffer is not permitted.");
752        current_syscall_error.type = seL4_IllegalOperation;
753        return EXCEPTION_SYSCALL_ERROR;
754    }
755
756    if (unlikely(vptr & MASK(seL4_IPCBufferSizeBits))) {
757        userError("Requested IPC Buffer location 0x%x is not aligned.",
758                  (int)vptr);
759        current_syscall_error.type = seL4_AlignmentError;
760        return EXCEPTION_SYSCALL_ERROR;
761    }
762
763    return EXCEPTION_NONE;
764}
765
766pde_t * CONST
767lookupPDSlot(pde_t *pd, vptr_t vptr)
768{
769    unsigned int pdIndex;
770
771    pdIndex = vptr >> (PAGE_BITS + PT_INDEX_BITS);
772    return pd + pdIndex;
773}
774
775lookupPTSlot_ret_t
776lookupPTSlot(pde_t *pd, vptr_t vptr)
777{
778    lookupPTSlot_ret_t ret;
779    pde_t *pdSlot;
780
781    pdSlot = lookupPDSlot(pd, vptr);
782
783    if (unlikely(pde_ptr_get_pdeType(pdSlot) != pde_pde_coarse)) {
784        current_lookup_fault = lookup_fault_missing_capability_new(PT_INDEX_BITS + PAGE_BITS);
785
786        ret.ptSlot = NULL;
787        ret.status = EXCEPTION_LOOKUP_FAULT;
788        return ret;
789    } else {
790        pte_t *pt, *ptSlot;
791        unsigned int ptIndex;
792
793        pt = ptrFromPAddr(pde_pde_coarse_ptr_get_address(pdSlot));
794        ptIndex = (vptr >> PAGE_BITS) & MASK(PT_INDEX_BITS);
795        ptSlot = pt + ptIndex;
796
797        ret.ptSlot = ptSlot;
798        ret.status = EXCEPTION_NONE;
799        return ret;
800    }
801}
802
803static pte_t *
804lookupPTSlot_nofail(pte_t *pt, vptr_t vptr)
805{
806    unsigned int ptIndex;
807
808    ptIndex = (vptr >> PAGE_BITS) & MASK(PT_INDEX_BITS);
809    return pt + ptIndex;
810}
811
812static const resolve_ret_t default_resolve_ret_t;
813
814static resolve_ret_t
815resolveVAddr(pde_t *pd, vptr_t vaddr)
816{
817    pde_t *pde = lookupPDSlot(pd, vaddr);
818    resolve_ret_t ret = default_resolve_ret_t;
819
820    ret.valid = true;
821
822    switch (pde_ptr_get_pdeType(pde)) {
823    case pde_pde_section:
824        ret.frameBase = pde_pde_section_ptr_get_address(pde);
825#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
826        if (pde_pde_section_ptr_get_size(pde)) {
827            ret.frameSize = ARMSuperSection;
828        } else {
829            ret.frameSize = ARMSection;
830        }
831#else
832        if (pde_pde_section_ptr_get_contiguous_hint(pde)) {
833            /* Entires are represented as 16 contiguous sections. We need to mask
834               to get the super section frame base */
835            ret.frameBase &= ~MASK(pageBitsForSize(ARMSuperSection));
836            ret.frameSize = ARMSuperSection;
837        } else {
838            ret.frameSize = ARMSection;
839        }
840#endif
841        return ret;
842
843    case pde_pde_coarse: {
844        pte_t *pt = ptrFromPAddr(pde_pde_coarse_ptr_get_address(pde));
845        pte_t *pte = lookupPTSlot_nofail(pt, vaddr);
846        switch (pte_ptr_get_pteType(pte)) {
847        case pte_pte_small:
848            ret.frameBase = pte_pte_small_ptr_get_address(pte);
849#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
850            if (pte_pte_small_ptr_get_contiguous_hint(pte)) {
851                /* Entries are represented as 16 contiguous small frames. We need to mask
852                   to get the large frame base */
853                ret.frameBase &= ~MASK(pageBitsForSize(ARMLargePage));
854                ret.frameSize = ARMLargePage;
855            } else {
856                ret.frameSize = ARMSmallPage;
857            }
858#else
859            ret.frameSize = ARMSmallPage;
860#endif
861            return ret;
862
863#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
864        case pte_pte_large:
865            ret.frameBase = pte_pte_large_ptr_get_address(pte);
866            ret.frameSize = ARMLargePage;
867            return ret;
868#endif
869        default:
870            break;
871        }
872        break;
873    }
874    }
875
876    ret.valid = false;
877    return ret;
878}
879
880static pte_t CONST
881makeUserPTE(vm_page_size_t page_size, paddr_t paddr,
882            bool_t cacheable, bool_t nonexecutable, vm_rights_t vm_rights)
883{
884    pte_t pte;
885#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
886    word_t ap;
887
888    ap = APFromVMRights(vm_rights);
889
890    switch (page_size) {
891    case ARMSmallPage: {
892        if (cacheable) {
893            pte = pte_pte_small_new(paddr,
894                                    1, /* not global */
895                                    SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
896                                    0, /* APX = 0, privileged full access */
897                                    5, /* TEX = 0b101, outer write-back, write-allocate */
898                                    ap,
899                                    0, 1, /* Inner write-back, write-allocate (except on ARM11) */
900                                    nonexecutable);
901        } else {
902            pte = pte_pte_small_new(paddr,
903                                    1, /* not global */
904                                    1, /* shared */
905                                    0, /* APX = 0, privileged full access */
906                                    0, /* TEX = 0b000, strongly-ordered. */
907                                    ap,
908                                    0, 0,
909                                    nonexecutable);
910        }
911        break;
912    }
913
914    case ARMLargePage: {
915        if (cacheable) {
916            pte = pte_pte_large_new(paddr,
917                                    nonexecutable,
918                                    5, /* TEX = 0b101, outer write-back, write-allocate */
919                                    1, /* not global */
920                                    SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
921                                    0, /* APX = 0, privileged full access */
922                                    ap,
923                                    0, 1, /* Inner write-back, write-allocate (except on ARM11) */
924                                    1 /* reserved */);
925        } else {
926            pte = pte_pte_large_new(paddr,
927                                    nonexecutable,
928                                    0, /* TEX = 0b000, strongly-ordered */
929                                    1, /* not global */
930                                    1, /* shared */
931                                    0, /* APX = 0, privileged full access */
932                                    ap,
933                                    0, 0,
934                                    1 /* reserved */);
935        }
936        break;
937    }
938
939    default:
940        fail("Invalid PTE frame type");
941    }
942
943#else
944
945    word_t hap;
946
947    hap = HAPFromVMRights(vm_rights);
948
949    switch (page_size) {
950    case ARMSmallPage: {
951        if (cacheable) {
952            pte = pte_pte_small_new(
953                      nonexecutable, /* Executable */
954                      0,      /* Not contiguous */
955                      paddr,
956                      1,      /* AF - Always set */
957                      0,      /* not shared */
958                      hap,    /* HAP - access */
959                      MEMATTR_CACHEABLE /* Cacheable */);
960        } else {
961            pte = pte_pte_small_new(
962                      nonexecutable, /* Executable */
963                      0,      /* Not contiguous */
964                      paddr,
965                      1,      /* AF - Always set */
966                      0,      /* not shared */
967                      hap,    /* HAP - access */
968                      MEMATTR_NONCACHEABLE /* Not cacheable */);
969        }
970        break;
971    }
972
973    case ARMLargePage: {
974        if (cacheable) {
975            pte = pte_pte_small_new(
976                      nonexecutable,   /* Executable */
977                      1,   /* 16 contiguous */
978                      paddr,
979                      1,   /* AF - Always set */
980                      0,   /* not shared */
981                      hap, /* HAP - access */
982                      MEMATTR_CACHEABLE  /* Cacheable */);
983        } else {
984            pte = pte_pte_small_new(
985                      nonexecutable,   /* Executable */
986                      1,   /* 16 contiguous */
987                      paddr,
988                      1,   /* AF - Always set */
989                      0,   /* not shared */
990                      hap, /* HAP - access */
991                      MEMATTR_NONCACHEABLE /* Not cacheable */);
992        }
993        break;
994    }
995    default:
996        fail("Invalid PTE frame type");
997    }
998#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
999
1000    return pte;
1001}
1002
1003static pde_t CONST
1004makeUserPDE(vm_page_size_t page_size, paddr_t paddr, bool_t parity,
1005            bool_t cacheable, bool_t nonexecutable, word_t domain,
1006            vm_rights_t vm_rights)
1007{
1008#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1009    word_t ap, size2;
1010
1011    ap = APFromVMRights(vm_rights);
1012#else
1013    word_t hap, size2;
1014
1015    (void)domain;
1016    hap = HAPFromVMRights(vm_rights);
1017#endif
1018
1019    switch (page_size) {
1020    case ARMSection:
1021        size2 = 0;
1022        break;
1023
1024    case ARMSuperSection:
1025        size2 = 1;
1026        break;
1027
1028    default:
1029        fail("Invalid PDE frame type");
1030    }
1031
1032#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1033    if (cacheable) {
1034        return pde_pde_section_new(paddr, size2,
1035                                   1, /* not global */
1036                                   SMP_TERNARY(1, 0), /* shareable if SMP enabled, otherwise unshared */
1037                                   0, /* APX = 0, privileged full access */
1038                                   5, /* TEX = 0b101, outer write-back, write-allocate */
1039                                   ap, parity, domain, nonexecutable,
1040                                   0, 1 /* Inner write-back, write-allocate (except on ARM11) */);
1041    } else {
1042        return pde_pde_section_new(paddr, size2,
1043                                   1, /* not global */
1044                                   1, /* shared */
1045                                   0, /* APX = 0, privileged full access */
1046                                   0, /* TEX = 0b000, strongly-ordered */
1047                                   ap, parity, domain, nonexecutable,
1048                                   0, 0);
1049    }
1050#else
1051    if (cacheable) {
1052        return pde_pde_section_new(
1053                   nonexecutable, /* Executable */
1054                   size2, /* contiguous */
1055                   paddr,
1056                   1, /* AF - Always set */
1057                   0, /* not shared */
1058                   hap,
1059                   MEMATTR_CACHEABLE /* Cacheable */);
1060    } else {
1061        return pde_pde_section_new(
1062                   nonexecutable, /* Executable */
1063                   size2, /* contiguous */
1064                   paddr,
1065                   1, /* AF - Always set */
1066                   0, /* not shared */
1067                   hap,
1068                   MEMATTR_NONCACHEABLE /* Not cacheable */);
1069    }
1070#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
1071}
1072
1073bool_t CONST
1074isValidVTableRoot(cap_t cap)
1075{
1076    return cap_get_capType(cap) == cap_page_directory_cap &&
1077           cap_page_directory_cap_get_capPDIsMapped(cap);
1078}
1079
1080bool_t CONST
1081isIOSpaceFrameCap(cap_t cap)
1082{
1083#ifdef CONFIG_ARM_SMMU
1084    return cap_get_capType(cap) == cap_small_frame_cap && cap_small_frame_cap_get_capFIsIOSpace(cap);
1085#else
1086    return false;
1087#endif
1088}
1089
1090void
1091setVMRoot(tcb_t *tcb)
1092{
1093    cap_t threadRoot;
1094    asid_t asid;
1095    pde_t *pd;
1096    findPDForASID_ret_t find_ret;
1097
1098    threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap;
1099
1100    if (cap_get_capType(threadRoot) != cap_page_directory_cap ||
1101            !cap_page_directory_cap_get_capPDIsMapped(threadRoot)) {
1102#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1103        setCurrentPD(addrFromPPtr(armUSGlobalPD));
1104#else
1105        setCurrentPD(addrFromPPtr(armKSGlobalPD));
1106#endif
1107        return;
1108    }
1109
1110    pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(threadRoot));
1111    asid = cap_page_directory_cap_get_capPDMappedASID(threadRoot);
1112    find_ret = findPDForASID(asid);
1113    if (unlikely(find_ret.status != EXCEPTION_NONE || find_ret.pd != pd)) {
1114#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1115        setCurrentPD(addrFromPPtr(armUSGlobalPD));
1116#else
1117        setCurrentPD(addrFromPPtr(armKSGlobalPD));
1118#endif
1119        return;
1120    }
1121
1122    armv_contextSwitch(pd, asid);
1123    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
1124        vcpu_switch(tcb->tcbArch.tcbVCPU);
1125    }
1126}
1127
1128static bool_t
1129setVMRootForFlush(pde_t* pd, asid_t asid)
1130{
1131    cap_t threadRoot;
1132
1133    threadRoot = TCB_PTR_CTE_PTR(NODE_STATE(ksCurThread), tcbVTable)->cap;
1134
1135    if (cap_get_capType(threadRoot) == cap_page_directory_cap &&
1136            cap_page_directory_cap_get_capPDIsMapped(threadRoot) &&
1137            PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(threadRoot)) == pd) {
1138        return false;
1139    }
1140
1141    armv_contextSwitch(pd, asid);
1142
1143    return true;
1144}
1145
1146pde_t *
1147pageTableMapped(asid_t asid, vptr_t vaddr, pte_t* pt)
1148{
1149    findPDForASID_ret_t find_ret;
1150    pde_t pde;
1151    unsigned int pdIndex;
1152
1153    find_ret = findPDForASID(asid);
1154    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1155        return NULL;
1156    }
1157
1158    pdIndex = vaddr >> (PAGE_BITS + PT_INDEX_BITS);
1159    pde = find_ret.pd[pdIndex];
1160
1161    if (likely(pde_get_pdeType(pde) == pde_pde_coarse
1162               && ptrFromPAddr (pde_pde_coarse_get_address(pde)) == pt)) {
1163        return find_ret.pd;
1164    } else {
1165        return NULL;
1166    }
1167}
1168
1169static void
1170invalidateASID(asid_t asid)
1171{
1172    asid_pool_t *asidPool;
1173    pde_t *pd;
1174
1175    asidPool = armKSASIDTable[asid >> asidLowBits];
1176    assert(asidPool);
1177
1178    pd = asidPool->array[asid & MASK(asidLowBits)];
1179    assert(pd);
1180
1181    pd[PD_ASID_SLOT] = pde_pde_invalid_new(0, false);
1182}
1183
1184static pde_t PURE
1185loadHWASID(asid_t asid)
1186{
1187    asid_pool_t *asidPool;
1188    pde_t *pd;
1189
1190    asidPool = armKSASIDTable[asid >> asidLowBits];
1191    assert(asidPool);
1192
1193    pd = asidPool->array[asid & MASK(asidLowBits)];
1194    assert(pd);
1195
1196    return pd[PD_ASID_SLOT];
1197}
1198
1199static void
1200storeHWASID(asid_t asid, hw_asid_t hw_asid)
1201{
1202    asid_pool_t *asidPool;
1203    pde_t *pd;
1204
1205    asidPool = armKSASIDTable[asid >> asidLowBits];
1206    assert(asidPool);
1207
1208    pd = asidPool->array[asid & MASK(asidLowBits)];
1209    assert(pd);
1210
1211    /* Store HW ASID in the last entry
1212       Masquerade as an invalid PDE */
1213    pd[PD_ASID_SLOT] = pde_pde_invalid_new(hw_asid, true);
1214
1215    armKSHWASIDTable[hw_asid] = asid;
1216}
1217
1218hw_asid_t
1219findFreeHWASID(void)
1220{
1221    word_t hw_asid_offset;
1222    hw_asid_t hw_asid;
1223
1224    /* Find a free hardware ASID */
1225    for (hw_asid_offset = 0;
1226            hw_asid_offset <= (word_t)((hw_asid_t) - 1);
1227            hw_asid_offset ++) {
1228        hw_asid = armKSNextASID + ((hw_asid_t)hw_asid_offset);
1229        if (armKSHWASIDTable[hw_asid] == asidInvalid) {
1230            return hw_asid;
1231        }
1232    }
1233
1234    hw_asid = armKSNextASID;
1235
1236    /* If we've scanned the table without finding a free ASID */
1237    invalidateASID(armKSHWASIDTable[hw_asid]);
1238
1239    /* Flush TLB */
1240    invalidateTranslationASID(hw_asid);
1241    armKSHWASIDTable[hw_asid] = asidInvalid;
1242
1243    /* Increment the NextASID index */
1244    armKSNextASID++;
1245
1246    return hw_asid;
1247}
1248
1249hw_asid_t
1250getHWASID(asid_t asid)
1251{
1252    pde_t stored_hw_asid;
1253
1254    stored_hw_asid = loadHWASID(asid);
1255    if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1256        return pde_pde_invalid_get_stored_hw_asid(stored_hw_asid);
1257    } else {
1258        hw_asid_t new_hw_asid;
1259
1260        new_hw_asid = findFreeHWASID();
1261        storeHWASID(asid, new_hw_asid);
1262        return new_hw_asid;
1263    }
1264}
1265
1266static void
1267invalidateASIDEntry(asid_t asid)
1268{
1269    pde_t stored_hw_asid;
1270
1271    stored_hw_asid = loadHWASID(asid);
1272    if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1273        armKSHWASIDTable[pde_pde_invalid_get_stored_hw_asid(stored_hw_asid)] =
1274            asidInvalid;
1275    }
1276    invalidateASID(asid);
1277}
1278
1279void
1280unmapPageTable(asid_t asid, vptr_t vaddr, pte_t* pt)
1281{
1282    pde_t *pd, *pdSlot;
1283    unsigned int pdIndex;
1284
1285    pd = pageTableMapped (asid, vaddr, pt);
1286
1287    if (likely(pd != NULL)) {
1288        pdIndex = vaddr >> (PT_INDEX_BITS + PAGE_BITS);
1289        pdSlot = pd + pdIndex;
1290
1291        *pdSlot = pde_pde_invalid_new(0, 0);
1292        cleanByVA_PoU((word_t)pdSlot, addrFromPPtr(pdSlot));
1293        flushTable(pd, asid, vaddr, pt);
1294    }
1295}
1296
1297void
1298copyGlobalMappings(pde_t *newPD)
1299{
1300#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1301    word_t i;
1302    pde_t *global_pd = armKSGlobalPD;
1303
1304    for (i = kernelBase >> ARMSectionBits; i < BIT(PD_INDEX_BITS); i++) {
1305        if (i != PD_ASID_SLOT) {
1306            newPD[i] = global_pd[i];
1307        }
1308    }
1309#else
1310#ifdef CONFIG_IPC_BUF_GLOBALS_FRAME
1311    /* Kernel and user MMUs are completely independent, however,
1312     * we still need to share the globals page. */
1313    pde_t pde;
1314    pde = pde_pde_coarse_new(addrFromPPtr(armUSGlobalPT));
1315    newPD[BIT(PD_INDEX_BITS) - 1] = pde;
1316#endif /* CONFIG_IPC_BUF_GLOBALS_FRAME */
1317#endif
1318}
1319
1320exception_t
1321handleVMFault(tcb_t *thread, vm_fault_type_t vm_faultType)
1322{
1323    switch (vm_faultType) {
1324    case ARMDataAbort: {
1325        word_t addr, fault;
1326
1327#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1328        addr = getHDFAR();
1329        addr = (addressTranslateS1CPR(addr) & ~MASK(PAGE_BITS)) | (addr & MASK(PAGE_BITS));
1330        /* MSBs tell us that this was a DataAbort */
1331        fault = getHSR() & 0x3ffffff;
1332#else
1333        addr = getFAR();
1334        fault = getDFSR();
1335#endif
1336
1337#ifdef CONFIG_HARDWARE_DEBUG_API
1338        /* Debug exceptions come in on the Prefetch and Data abort vectors.
1339         * We have to test the fault-status bits in the IFSR/DFSR to determine
1340         * if it's a debug exception when one occurs.
1341         *
1342         * If it is a debug exception, return early and don't fallthrough to the
1343         * normal VM Fault handling path.
1344         */
1345        if (isDebugFault(fault)) {
1346            current_fault = handleUserLevelDebugException(0);
1347            return EXCEPTION_FAULT;
1348        }
1349#endif
1350        current_fault = seL4_Fault_VMFault_new(addr, fault, false);
1351        return EXCEPTION_FAULT;
1352    }
1353
1354    case ARMPrefetchAbort: {
1355        word_t pc, fault;
1356
1357        pc = getRestartPC(thread);
1358
1359#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1360        pc = (addressTranslateS1CPR(pc) & ~MASK(PAGE_BITS)) | (pc & MASK(PAGE_BITS));
1361        /* MSBs tell us that this was a PrefetchAbort */
1362        fault = getHSR() & 0x3ffffff;
1363#else
1364        fault = getIFSR();
1365#endif
1366
1367#ifdef CONFIG_HARDWARE_DEBUG_API
1368        if (isDebugFault(fault)) {
1369            current_fault = handleUserLevelDebugException(pc);
1370
1371            if (seL4_Fault_DebugException_get_exceptionReason(current_fault) == seL4_SingleStep
1372                    && !singleStepFaultCounterReady(thread)) {
1373                /* Don't send a fault message to the thread yet if we were asked
1374                 * to step through N instructions and the counter isn't depleted
1375                 * yet.
1376                 */
1377                return EXCEPTION_NONE;
1378            }
1379            return EXCEPTION_FAULT;
1380        }
1381#endif
1382        current_fault = seL4_Fault_VMFault_new(pc, fault, true);
1383        return EXCEPTION_FAULT;
1384    }
1385
1386    default:
1387        fail("Invalid VM fault type");
1388    }
1389}
1390
1391void
1392deleteASIDPool(asid_t asid_base, asid_pool_t* pool)
1393{
1394    unsigned int offset;
1395
1396    /* Haskell error: "ASID pool's base must be aligned" */
1397    assert((asid_base & MASK(asidLowBits)) == 0);
1398
1399    if (armKSASIDTable[asid_base >> asidLowBits] == pool) {
1400        for (offset = 0; offset < BIT(asidLowBits); offset++) {
1401            if (pool->array[offset]) {
1402                flushSpace(asid_base + offset);
1403                invalidateASIDEntry(asid_base + offset);
1404            }
1405        }
1406        armKSASIDTable[asid_base >> asidLowBits] = NULL;
1407        setVMRoot(NODE_STATE(ksCurThread));
1408    }
1409}
1410
1411void
1412deleteASID(asid_t asid, pde_t* pd)
1413{
1414    asid_pool_t *poolPtr;
1415
1416    poolPtr = armKSASIDTable[asid >> asidLowBits];
1417
1418    if (poolPtr != NULL && poolPtr->array[asid & MASK(asidLowBits)] == pd) {
1419        flushSpace(asid);
1420        invalidateASIDEntry(asid);
1421        poolPtr->array[asid & MASK(asidLowBits)] = NULL;
1422        setVMRoot(NODE_STATE(ksCurThread));
1423    }
1424}
1425
1426#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1427static pte_t pte_pte_invalid_new(void)
1428{
1429    /* Invalid as every PTE must have bit 0 set (large PTE) or bit 1 set (small
1430     * PTE). 0 == 'translation fault' in ARM parlance.
1431     */
1432    return (pte_t) {
1433        {
1434            0
1435        }
1436    };
1437}
1438#endif
1439
1440void
1441unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, void *pptr)
1442{
1443    findPDForASID_ret_t find_ret;
1444    paddr_t addr = addrFromPPtr(pptr);
1445
1446    find_ret = findPDForASID(asid);
1447    if (unlikely(find_ret.status != EXCEPTION_NONE)) {
1448        return;
1449    }
1450
1451    switch (page_size) {
1452    case ARMSmallPage: {
1453        lookupPTSlot_ret_t lu_ret;
1454
1455        lu_ret = lookupPTSlot(find_ret.pd, vptr);
1456        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1457            return;
1458        }
1459
1460        if (unlikely(pte_ptr_get_pteType(lu_ret.ptSlot) != pte_pte_small)) {
1461            return;
1462        }
1463#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1464        if (unlikely(pte_pte_small_ptr_get_contiguous_hint(lu_ret.ptSlot) != 0)) {
1465            return;
1466        }
1467#endif
1468        if (unlikely(pte_pte_small_ptr_get_address(lu_ret.ptSlot) != addr)) {
1469            return;
1470        }
1471
1472        *(lu_ret.ptSlot) = pte_pte_invalid_new();
1473        cleanByVA_PoU((word_t)lu_ret.ptSlot, addrFromPPtr(lu_ret.ptSlot));
1474
1475        break;
1476    }
1477
1478    case ARMLargePage: {
1479        lookupPTSlot_ret_t lu_ret;
1480        word_t i;
1481
1482        lu_ret = lookupPTSlot(find_ret.pd, vptr);
1483        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1484            return;
1485        }
1486#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1487        if (unlikely(pte_ptr_get_pteType(lu_ret.ptSlot) != pte_pte_large)) {
1488            return;
1489        }
1490        if (unlikely(pte_pte_large_ptr_get_address(lu_ret.ptSlot) != addr)) {
1491            return;
1492        }
1493#else
1494        if (unlikely(pte_ptr_get_pteType(lu_ret.ptSlot) != pte_pte_small)) {
1495            return;
1496        }
1497        if (unlikely(pte_pte_small_ptr_get_contiguous_hint(lu_ret.ptSlot) != 1)) {
1498            return;
1499        }
1500        if (unlikely(pte_pte_small_ptr_get_address(lu_ret.ptSlot) != addr)) {
1501            return;
1502        }
1503#endif
1504
1505        for (i = 0; i < PAGES_PER_LARGE_PAGE; i++) {
1506            lu_ret.ptSlot[i] = pte_pte_invalid_new();
1507        }
1508        cleanCacheRange_PoU((word_t)&lu_ret.ptSlot[0],
1509                            LAST_BYTE_PTE(lu_ret.ptSlot, PAGES_PER_LARGE_PAGE),
1510                            addrFromPPtr(&lu_ret.ptSlot[0]));
1511
1512        break;
1513    }
1514
1515    case ARMSection: {
1516        pde_t *pd;
1517
1518        pd = lookupPDSlot(find_ret.pd, vptr);
1519
1520        if (unlikely(pde_ptr_get_pdeType(pd) != pde_pde_section)) {
1521            return;
1522        }
1523#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1524        if (unlikely(pde_pde_section_ptr_get_size(pd) != 0)) {
1525#else
1526        if (unlikely(pde_pde_section_ptr_get_contiguous_hint(pd) != 0)) {
1527#endif
1528            return;
1529        }
1530        if (unlikely(pde_pde_section_ptr_get_address(pd) != addr)) {
1531            return;
1532        }
1533
1534        *pd = pde_pde_invalid_new(0, 0);
1535        cleanByVA_PoU((word_t)pd, addrFromPPtr(pd));
1536
1537        break;
1538    }
1539
1540    case ARMSuperSection: {
1541        pde_t *pd;
1542        word_t i;
1543
1544        pd = lookupPDSlot(find_ret.pd, vptr);
1545
1546        if (unlikely(pde_ptr_get_pdeType(pd) != pde_pde_section)) {
1547            return;
1548        }
1549#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1550        if (unlikely(pde_pde_section_ptr_get_size(pd) != 1)) {
1551#else
1552        if (unlikely(pde_pde_section_ptr_get_contiguous_hint(pd) != 1)) {
1553#endif
1554            return;
1555        }
1556        if (unlikely(pde_pde_section_ptr_get_address(pd) != addr)) {
1557            return;
1558        }
1559
1560        for (i = 0; i < SECTIONS_PER_SUPER_SECTION; i++) {
1561            pd[i] = pde_pde_invalid_new(0, 0);
1562        }
1563        cleanCacheRange_PoU((word_t)&pd[0], LAST_BYTE_PDE(pd, SECTIONS_PER_SUPER_SECTION),
1564                            addrFromPPtr(&pd[0]));
1565
1566        break;
1567    }
1568
1569    default:
1570        fail("Invalid ARM page type");
1571        break;
1572    }
1573
1574    /* Flush the page now that the mapping has been updated */
1575    flushPage(page_size, find_ret.pd, asid, vptr);
1576}
1577
1578void
1579flushPage(vm_page_size_t page_size, pde_t* pd, asid_t asid, word_t vptr)
1580{
1581    pde_t stored_hw_asid;
1582    word_t base_addr;
1583    bool_t root_switched;
1584
1585    assert((vptr & MASK(pageBitsForSize(page_size))) == 0);
1586
1587    /* Switch to the address space to allow a cache clean by VA */
1588    root_switched = setVMRootForFlush(pd, asid);
1589    stored_hw_asid = loadHWASID(asid);
1590
1591    if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1592        base_addr = vptr & ~MASK(12);
1593
1594        /* Do the TLB flush */
1595        invalidateTranslationSingle(base_addr | pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1596
1597        if (root_switched) {
1598            setVMRoot(NODE_STATE(ksCurThread));
1599        }
1600    }
1601}
1602
1603void
1604flushTable(pde_t* pd, asid_t asid, word_t vptr, pte_t* pt)
1605{
1606    pde_t stored_hw_asid;
1607    bool_t root_switched;
1608
1609    assert((vptr & MASK(PT_INDEX_BITS + ARMSmallPageBits)) == 0);
1610
1611    /* Switch to the address space to allow a cache clean by VA */
1612    root_switched = setVMRootForFlush(pd, asid);
1613    stored_hw_asid = loadHWASID(asid);
1614
1615    if (pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1616        invalidateTranslationASID(pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1617        if (root_switched) {
1618            setVMRoot(NODE_STATE(ksCurThread));
1619        }
1620    }
1621}
1622
1623void
1624flushSpace(asid_t asid)
1625{
1626    pde_t stored_hw_asid;
1627
1628    stored_hw_asid = loadHWASID(asid);
1629
1630    /* Clean the entire data cache, to guarantee that any VAs mapped
1631     * in the deleted space are clean (because we can't clean by VA after
1632     * deleting the space) */
1633    cleanCaches_PoU();
1634
1635    /* If the given ASID doesn't have a hardware ASID
1636     * assigned, then it can't have any mappings in the TLB */
1637    if (!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1638        return;
1639    }
1640
1641    /* Do the TLB flush */
1642    invalidateTranslationASID(pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1643}
1644
1645void
1646invalidateTLBByASID(asid_t asid)
1647{
1648    pde_t stored_hw_asid;
1649
1650    stored_hw_asid = loadHWASID(asid);
1651
1652    /* If the given ASID doesn't have a hardware ASID
1653     * assigned, then it can't have any mappings in the TLB */
1654    if (!pde_pde_invalid_get_stored_asid_valid(stored_hw_asid)) {
1655        return;
1656    }
1657
1658    /* Do the TLB flush */
1659    invalidateTranslationASID(pde_pde_invalid_get_stored_hw_asid(stored_hw_asid));
1660}
1661
1662static inline bool_t CONST
1663checkVPAlignment(vm_page_size_t sz, word_t w)
1664{
1665    return (w & MASK(pageBitsForSize(sz))) == 0;
1666}
1667
1668struct create_mappings_pte_return {
1669    exception_t status;
1670    pte_t pte;
1671    pte_range_t pte_entries;
1672};
1673typedef struct create_mappings_pte_return create_mappings_pte_return_t;
1674
1675struct create_mappings_pde_return {
1676    exception_t status;
1677    pde_t pde;
1678    pde_range_t pde_entries;
1679};
1680typedef struct create_mappings_pde_return create_mappings_pde_return_t;
1681
1682static create_mappings_pte_return_t
1683createSafeMappingEntries_PTE
1684(paddr_t base, word_t vaddr, vm_page_size_t frameSize,
1685 vm_rights_t vmRights, vm_attributes_t attr, pde_t *pd)
1686{
1687
1688    create_mappings_pte_return_t ret;
1689    lookupPTSlot_ret_t lu_ret;
1690    word_t i;
1691
1692    switch (frameSize) {
1693
1694    case ARMSmallPage:
1695
1696        ret.pte_entries.base = NULL; /* to avoid uninitialised warning */
1697        ret.pte_entries.length = 1;
1698
1699        ret.pte = makeUserPTE(ARMSmallPage, base,
1700                              vm_attributes_get_armPageCacheable(attr),
1701                              vm_attributes_get_armExecuteNever(attr),
1702                              vmRights);
1703
1704        lu_ret = lookupPTSlot(pd, vaddr);
1705        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1706            current_syscall_error.type =
1707                seL4_FailedLookup;
1708            current_syscall_error.failedLookupWasSource =
1709                false;
1710            ret.status = EXCEPTION_SYSCALL_ERROR;
1711            /* current_lookup_fault will have been set by
1712             * lookupPTSlot */
1713            return ret;
1714        }
1715
1716        ret.pte_entries.base = lu_ret.ptSlot;
1717#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1718        if (unlikely(pte_ptr_get_pteType(ret.pte_entries.base) ==
1719                     pte_pte_large)) {
1720#else
1721        if (unlikely(pte_ptr_get_pteType(ret.pte_entries.base) == pte_pte_small
1722                     && pte_pte_small_ptr_get_contiguous_hint(ret.pte_entries.base))) {
1723#endif
1724            current_syscall_error.type =
1725                seL4_DeleteFirst;
1726
1727            ret.status = EXCEPTION_SYSCALL_ERROR;
1728            return ret;
1729        }
1730
1731        ret.status = EXCEPTION_NONE;
1732        return ret;
1733
1734    case ARMLargePage:
1735
1736        ret.pte_entries.base = NULL; /* to avoid uninitialised warning */
1737        ret.pte_entries.length = PAGES_PER_LARGE_PAGE;
1738
1739        ret.pte = makeUserPTE(ARMLargePage, base,
1740                              vm_attributes_get_armPageCacheable(attr),
1741                              vm_attributes_get_armExecuteNever(attr),
1742                              vmRights);
1743
1744        lu_ret = lookupPTSlot(pd, vaddr);
1745        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
1746            current_syscall_error.type =
1747                seL4_FailedLookup;
1748            current_syscall_error.failedLookupWasSource =
1749                false;
1750            ret.status = EXCEPTION_SYSCALL_ERROR;
1751            /* current_lookup_fault will have been set by
1752             * lookupPTSlot */
1753            return ret;
1754        }
1755
1756        ret.pte_entries.base = lu_ret.ptSlot;
1757
1758        for (i = 0; i < PAGES_PER_LARGE_PAGE; i++) {
1759            if (unlikely(pte_get_pteType(ret.pte_entries.base[i]) ==
1760                         pte_pte_small)
1761#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1762                    && !pte_pte_small_get_contiguous_hint(ret.pte_entries.base[i])
1763#endif
1764               ) {
1765                current_syscall_error.type =
1766                    seL4_DeleteFirst;
1767
1768                ret.status = EXCEPTION_SYSCALL_ERROR;
1769                return ret;
1770            }
1771        }
1772
1773        ret.status = EXCEPTION_NONE;
1774        return ret;
1775
1776    default:
1777        fail("Invalid or unexpected ARM page type.");
1778
1779    }
1780}
1781
1782static create_mappings_pde_return_t
1783createSafeMappingEntries_PDE
1784(paddr_t base, word_t vaddr, vm_page_size_t frameSize,
1785 vm_rights_t vmRights, vm_attributes_t attr, pde_t *pd)
1786{
1787
1788    create_mappings_pde_return_t ret;
1789    pde_tag_t currentPDEType;
1790    word_t i;
1791
1792    switch (frameSize) {
1793
1794    /* PDE mappings */
1795    case ARMSection:
1796        ret.pde_entries.base = lookupPDSlot(pd, vaddr);
1797        ret.pde_entries.length = 1;
1798
1799        ret.pde = makeUserPDE(ARMSection, base,
1800                              vm_attributes_get_armParityEnabled(attr),
1801                              vm_attributes_get_armPageCacheable(attr),
1802                              vm_attributes_get_armExecuteNever(attr),
1803                              0,
1804                              vmRights);
1805
1806        currentPDEType =
1807            pde_ptr_get_pdeType(ret.pde_entries.base);
1808        if (unlikely(currentPDEType != pde_pde_invalid &&
1809                     (currentPDEType != pde_pde_section ||
1810#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1811                      pde_pde_section_ptr_get_size(ret.pde_entries.base) != 0))) {
1812#else
1813                      pde_pde_section_ptr_get_contiguous_hint(ret.pde_entries.base) != 0))) {
1814#endif
1815            current_syscall_error.type =
1816                seL4_DeleteFirst;
1817            ret.status = EXCEPTION_SYSCALL_ERROR;
1818
1819            return ret;
1820        }
1821
1822        ret.status = EXCEPTION_NONE;
1823        return ret;
1824
1825    case ARMSuperSection:
1826        ret.pde_entries.base = lookupPDSlot(pd, vaddr);
1827        ret.pde_entries.length = SECTIONS_PER_SUPER_SECTION;
1828
1829        ret.pde = makeUserPDE(ARMSuperSection, base,
1830                              vm_attributes_get_armParityEnabled(attr),
1831                              vm_attributes_get_armPageCacheable(attr),
1832                              vm_attributes_get_armExecuteNever(attr),
1833                              0,
1834                              vmRights);
1835
1836        for (i = 0; i < SECTIONS_PER_SUPER_SECTION; i++) {
1837            currentPDEType =
1838                pde_get_pdeType(ret.pde_entries.base[i]);
1839            if (unlikely(currentPDEType != pde_pde_invalid &&
1840                         (currentPDEType != pde_pde_section ||
1841#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
1842                          pde_pde_section_get_size(ret.pde_entries.base[i]) != 1))) {
1843#else
1844                          pde_pde_section_get_contiguous_hint(ret.pde_entries.base[i]) != 1))) {
1845#endif
1846                current_syscall_error.type =
1847                    seL4_DeleteFirst;
1848                ret.status = EXCEPTION_SYSCALL_ERROR;
1849
1850                return ret;
1851            }
1852        }
1853
1854        ret.status = EXCEPTION_NONE;
1855        return ret;
1856
1857    default:
1858        fail("Invalid or unexpected ARM page type.");
1859
1860    }
1861}
1862
1863static inline vptr_t
1864pageBase(vptr_t vaddr, vm_page_size_t size)
1865{
1866    return vaddr & ~MASK(pageBitsForSize(size));
1867}
1868
1869static bool_t PURE
1870pteCheckIfMapped(pte_t *pte)
1871{
1872    return pte_ptr_get_pteType(pte) != pte_pte_invalid;
1873}
1874
1875static bool_t PURE
1876pdeCheckIfMapped(pde_t *pde)
1877{
1878    return pde_ptr_get_pdeType(pde) != pde_pde_invalid;
1879}
1880
1881static void
1882doFlush(int invLabel, vptr_t start, vptr_t end, paddr_t pstart)
1883{
1884    /** GHOSTUPD: "((gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state = 0
1885            \<or> \<acute>end - \<acute>start <= gs_get_assn cap_get_capSizeBits_'proc \<acute>ghost'state)
1886        \<and> \<acute>start <= \<acute>end, id)" */
1887    if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) {
1888        /* The hypervisor does not share an AS with userspace so we must flush
1889         * by kernel MVA instead. ARMv7 caches are PIPT so it makes no difference */
1890        end = (vptr_t)paddr_to_pptr(pstart) + (end - start);
1891        start = (vptr_t)paddr_to_pptr(pstart);
1892    }
1893    switch (invLabel) {
1894    case ARMPDClean_Data:
1895    case ARMPageClean_Data:
1896        cleanCacheRange_RAM(start, end, pstart);
1897        break;
1898    case ARMPDInvalidate_Data:
1899    case ARMPageInvalidate_Data:
1900        invalidateCacheRange_RAM(start, end, pstart);
1901        break;
1902    case ARMPDCleanInvalidate_Data:
1903    case ARMPageCleanInvalidate_Data:
1904        cleanInvalidateCacheRange_RAM(start, end, pstart);
1905        break;
1906    case ARMPDUnify_Instruction:
1907    case ARMPageUnify_Instruction:
1908        /* First clean data lines to point of unification
1909           (L2 cache)... */
1910        cleanCacheRange_PoU(start, end, pstart);
1911        /* Ensure it's been written. */
1912        dsb();
1913        /* ...then invalidate the corresponding instruction lines
1914           to point of unification... */
1915        invalidateCacheRange_I(start, end, pstart);
1916        /* ...then invalidate branch predictors. */
1917        branchFlushRange(start, end, pstart);
1918        /* Ensure new instructions come from fresh cache lines. */
1919        isb();
1920        break;
1921    default:
1922        fail("Invalid operation, shouldn't get here.\n");
1923    }
1924}
1925
1926/* ================= INVOCATION HANDLING STARTS HERE ================== */
1927
1928static exception_t
1929performPDFlush(int invLabel, pde_t *pd, asid_t asid, vptr_t start,
1930               vptr_t end, paddr_t pstart)
1931{
1932    bool_t root_switched;
1933
1934    /* Flush if given a non zero range */
1935    if (start < end) {
1936        root_switched = setVMRootForFlush(pd, asid);
1937
1938        doFlush(invLabel, start, end, pstart);
1939
1940        if (root_switched) {
1941            setVMRoot(NODE_STATE(ksCurThread));
1942        }
1943    }
1944
1945    return EXCEPTION_NONE;
1946}
1947
1948static exception_t
1949performPageTableInvocationMap(cap_t cap, cte_t *ctSlot,
1950                              pde_t pde, pde_t *pdSlot)
1951{
1952    ctSlot->cap = cap;
1953    *pdSlot = pde;
1954    cleanByVA_PoU((word_t)pdSlot, addrFromPPtr(pdSlot));
1955
1956    return EXCEPTION_NONE;
1957}
1958
1959static exception_t
1960performPageTableInvocationUnmap(cap_t cap, cte_t *ctSlot)
1961{
1962    if (cap_page_table_cap_get_capPTIsMapped(cap)) {
1963        pte_t *pt = PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap));
1964        unmapPageTable(
1965            cap_page_table_cap_get_capPTMappedASID(cap),
1966            cap_page_table_cap_get_capPTMappedAddress(cap),
1967            pt);
1968        clearMemory((void *)pt, cap_get_capSizeBits(cap));
1969    }
1970    cap_page_table_cap_ptr_set_capPTIsMapped(&(ctSlot->cap), 0);
1971
1972    return EXCEPTION_NONE;
1973}
1974
1975static exception_t
1976performPageInvocationMapPTE(asid_t asid, cap_t cap, cte_t *ctSlot, pte_t pte,
1977                            pte_range_t pte_entries)
1978{
1979    word_t i, j UNUSED;
1980    bool_t tlbflush_required;
1981
1982    ctSlot->cap = cap;
1983
1984    /* we only need to check the first entries because of how createSafeMappingEntries
1985     * works to preserve the consistency of tables */
1986    tlbflush_required = pteCheckIfMapped(pte_entries.base);
1987
1988    j = pte_entries.length;
1989    /** GHOSTUPD: "(\<acute>j <= 16, id)" */
1990
1991#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1992    word_t base_address = pte_pte_small_get_address(pte);
1993#endif
1994    for (i = 0; i < pte_entries.length; i++) {
1995#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
1996        pte = pte_pte_small_set_address(pte, base_address + i * BIT(pageBitsForSize(ARMSmallPage)));
1997#endif
1998        pte_entries.base[i] = pte;
1999    }
2000    cleanCacheRange_PoU((word_t)pte_entries.base,
2001                        LAST_BYTE_PTE(pte_entries.base, pte_entries.length),
2002                        addrFromPPtr(pte_entries.base));
2003    if (unlikely(tlbflush_required)) {
2004        invalidateTLBByASID(asid);
2005    }
2006
2007    return EXCEPTION_NONE;
2008}
2009
2010static exception_t
2011performPageInvocationMapPDE(asid_t asid, cap_t cap, cte_t *ctSlot, pde_t pde,
2012                            pde_range_t pde_entries)
2013{
2014    word_t i, j UNUSED;
2015    bool_t tlbflush_required;
2016
2017    ctSlot->cap = cap;
2018
2019    /* we only need to check the first entries because of how createSafeMappingEntries
2020     * works to preserve the consistency of tables */
2021    tlbflush_required = pdeCheckIfMapped(pde_entries.base);
2022
2023    j = pde_entries.length;
2024    /** GHOSTUPD: "(\<acute>j <= 16, id)" */
2025
2026#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2027    word_t base_address = pde_pde_section_get_address(pde);
2028#endif
2029    for (i = 0; i < pde_entries.length; i++) {
2030#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2031        pde = pde_pde_section_set_address(pde, base_address + i * BIT(pageBitsForSize(ARMSection)));
2032#endif
2033        pde_entries.base[i] = pde;
2034    }
2035    cleanCacheRange_PoU((word_t)pde_entries.base,
2036                        LAST_BYTE_PDE(pde_entries.base, pde_entries.length),
2037                        addrFromPPtr(pde_entries.base));
2038    if (unlikely(tlbflush_required)) {
2039        invalidateTLBByASID(asid);
2040    }
2041
2042    return EXCEPTION_NONE;
2043}
2044
2045static exception_t
2046performPageInvocationRemapPTE(asid_t asid, pte_t pte, pte_range_t pte_entries)
2047{
2048    word_t i, j UNUSED;
2049    bool_t tlbflush_required;
2050
2051    /* we only need to check the first entries because of how createSafeMappingEntries
2052     * works to preserve the consistency of tables */
2053    tlbflush_required = pteCheckIfMapped(pte_entries.base);
2054
2055    j = pte_entries.length;
2056    /** GHOSTUPD: "(\<acute>j <= 16, id)" */
2057
2058#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2059    word_t base_address = pte_pte_small_get_address(pte);
2060#endif
2061    for (i = 0; i < pte_entries.length; i++) {
2062#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2063        pte = pte_pte_small_set_address(pte, base_address + i * BIT(pageBitsForSize(ARMSmallPage)));
2064#endif
2065        pte_entries.base[i] = pte;
2066    }
2067    cleanCacheRange_PoU((word_t)pte_entries.base,
2068                        LAST_BYTE_PTE(pte_entries.base, pte_entries.length),
2069                        addrFromPPtr(pte_entries.base));
2070    if (unlikely(tlbflush_required)) {
2071        invalidateTLBByASID(asid);
2072    }
2073
2074    return EXCEPTION_NONE;
2075}
2076
2077static exception_t
2078performPageInvocationRemapPDE(asid_t asid, pde_t pde, pde_range_t pde_entries)
2079{
2080    word_t i, j UNUSED;
2081    bool_t tlbflush_required;
2082
2083    /* we only need to check the first entries because of how createSafeMappingEntries
2084     * works to preserve the consistency of tables */
2085    tlbflush_required = pdeCheckIfMapped(pde_entries.base);
2086
2087    j = pde_entries.length;
2088    /** GHOSTUPD: "(\<acute>j <= 16, id)" */
2089
2090#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2091    word_t base_address = pde_pde_section_get_address(pde);
2092#endif
2093    for (i = 0; i < pde_entries.length; i++) {
2094#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2095        pde = pde_pde_section_set_address(pde, base_address + i * BIT(pageBitsForSize(ARMSection)));
2096#endif
2097        pde_entries.base[i] = pde;
2098    }
2099    cleanCacheRange_PoU((word_t)pde_entries.base,
2100                        LAST_BYTE_PDE(pde_entries.base, pde_entries.length),
2101                        addrFromPPtr(pde_entries.base));
2102    if (unlikely(tlbflush_required)) {
2103        invalidateTLBByASID(asid);
2104    }
2105
2106    return EXCEPTION_NONE;
2107}
2108
2109static exception_t
2110performPageInvocationUnmap(cap_t cap, cte_t *ctSlot)
2111{
2112    if (generic_frame_cap_get_capFIsMapped(cap)) {
2113        unmapPage(generic_frame_cap_get_capFSize(cap),
2114                  generic_frame_cap_get_capFMappedASID(cap),
2115                  generic_frame_cap_get_capFMappedAddress(cap),
2116                  (void *)generic_frame_cap_get_capFBasePtr(cap));
2117    }
2118
2119    generic_frame_cap_ptr_set_capFMappedAddress(&ctSlot->cap, asidInvalid, 0);
2120
2121    return EXCEPTION_NONE;
2122}
2123
2124static exception_t
2125performPageFlush(int invLabel, pde_t *pd, asid_t asid, vptr_t start,
2126                 vptr_t end, paddr_t pstart)
2127{
2128    bool_t root_switched;
2129
2130    /* now we can flush. But only if we were given a non zero range */
2131    if (start < end) {
2132        root_switched = setVMRootForFlush(pd, asid);
2133
2134        doFlush(invLabel, start, end, pstart);
2135
2136        if (root_switched) {
2137            setVMRoot(NODE_STATE(ksCurThread));
2138        }
2139    }
2140
2141    return EXCEPTION_NONE;
2142}
2143
2144static exception_t
2145performPageGetAddress(void *vbase_ptr)
2146{
2147    paddr_t capFBasePtr;
2148
2149    /* Get the physical address of this frame. */
2150    capFBasePtr = addrFromPPtr(vbase_ptr);
2151
2152    /* return it in the first message register */
2153    setRegister(NODE_STATE(ksCurThread), msgRegisters[0], capFBasePtr);
2154    setRegister(NODE_STATE(ksCurThread), msgInfoRegister,
2155                wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, 1)));
2156
2157    return EXCEPTION_NONE;
2158}
2159
2160static exception_t
2161performASIDPoolInvocation(asid_t asid, asid_pool_t *poolPtr,
2162                          cte_t *pdCapSlot)
2163{
2164    cap_page_directory_cap_ptr_set_capPDMappedASID(&pdCapSlot->cap, asid);
2165    cap_page_directory_cap_ptr_set_capPDIsMapped(&pdCapSlot->cap, 1);
2166    poolPtr->array[asid & MASK(asidLowBits)] =
2167        PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pdCapSlot->cap));
2168
2169    return EXCEPTION_NONE;
2170}
2171
2172static exception_t
2173performASIDControlInvocation(void *frame, cte_t *slot,
2174                             cte_t *parent, asid_t asid_base)
2175{
2176
2177    /** AUXUPD: "(True, typ_region_bytes (ptr_val \<acute>frame) 12)" */
2178    /** GHOSTUPD: "(True, gs_clear_region (ptr_val \<acute>frame) 12)" */
2179    cap_untyped_cap_ptr_set_capFreeIndex(&(parent->cap),
2180                                         MAX_FREE_INDEX(cap_untyped_cap_get_capBlockSize(parent->cap)));
2181
2182    memzero(frame, 1 << ARMSmallPageBits);
2183    /** AUXUPD: "(True, ptr_retyps 1 (Ptr (ptr_val \<acute>frame) :: asid_pool_C ptr))" */
2184
2185    cteInsert(cap_asid_pool_cap_new(asid_base, WORD_REF(frame)),
2186              parent, slot);;
2187    /* Haskell error: "ASID pool's base must be aligned" */
2188    assert((asid_base & MASK(asidLowBits)) == 0);
2189    armKSASIDTable[asid_base >> asidLowBits] = (asid_pool_t *)frame;
2190
2191    return EXCEPTION_NONE;
2192}
2193
2194static exception_t
2195decodeARMPageDirectoryInvocation(word_t invLabel, word_t length,
2196                                 cptr_t cptr, cte_t *cte, cap_t cap,
2197                                 extra_caps_t excaps, word_t *buffer)
2198{
2199    switch (invLabel) {
2200    case ARMPDClean_Data:
2201    case ARMPDInvalidate_Data:
2202    case ARMPDCleanInvalidate_Data:
2203    case ARMPDUnify_Instruction: {
2204        vptr_t start, end;
2205        paddr_t pstart;
2206        findPDForASID_ret_t find_ret;
2207        asid_t asid;
2208        pde_t *pd;
2209        resolve_ret_t resolve_ret;
2210
2211        if (length < 2) {
2212            userError("PD Flush: Truncated message.");
2213            current_syscall_error.type = seL4_TruncatedMessage;
2214            return EXCEPTION_SYSCALL_ERROR;
2215        }
2216
2217        start = getSyscallArg(0, buffer);
2218        end =   getSyscallArg(1, buffer);
2219
2220        /* Check sanity of arguments */
2221        if (end <= start) {
2222            userError("PD Flush: Invalid range");
2223            current_syscall_error.type = seL4_InvalidArgument;
2224            current_syscall_error.invalidArgumentNumber = 1;
2225            return EXCEPTION_SYSCALL_ERROR;
2226        }
2227
2228        /* Don't let applications flush kernel regions. */
2229        if (start >= kernelBase || end > kernelBase) {
2230            userError("PD Flush: Overlaps kernel region.");
2231            current_syscall_error.type = seL4_IllegalOperation;
2232            return EXCEPTION_SYSCALL_ERROR;
2233        }
2234
2235        if (unlikely(cap_get_capType(cap) != cap_page_directory_cap ||
2236                     !cap_page_directory_cap_get_capPDIsMapped(cap))) {
2237            userError("PD Flush: Invalid cap.");
2238            current_syscall_error.type = seL4_InvalidCapability;
2239            current_syscall_error.invalidCapNumber = 0;
2240            return EXCEPTION_SYSCALL_ERROR;
2241        }
2242
2243
2244        /* Make sure that the supplied pd is ok */
2245        pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(cap));
2246        asid = cap_page_directory_cap_get_capPDMappedASID(cap);
2247
2248        find_ret = findPDForASID(asid);
2249        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2250            userError("PD Flush: No PD for ASID");
2251            current_syscall_error.type = seL4_FailedLookup;
2252            current_syscall_error.failedLookupWasSource = false;
2253            return EXCEPTION_SYSCALL_ERROR;
2254        }
2255
2256        if (unlikely(find_ret.pd != pd)) {
2257            userError("PD Flush: Invalid PD Cap");
2258            current_syscall_error.type = seL4_InvalidCapability;
2259            current_syscall_error.invalidCapNumber = 0;
2260            return EXCEPTION_SYSCALL_ERROR;
2261        }
2262
2263        /* Look up the frame containing 'start'. */
2264        resolve_ret = resolveVAddr(pd, start);
2265
2266        /* Check that there's actually something there. */
2267        if (!resolve_ret.valid) {
2268            /* Fail silently, as there can't be any stale cached data (for the
2269             * given address space), and getting a syscall error because the
2270             * relevant page is non-resident would be 'astonishing'. */
2271            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2272            return EXCEPTION_NONE;
2273        }
2274
2275        /* Refuse to cross a page boundary. */
2276        if (pageBase(start, resolve_ret.frameSize) !=
2277                pageBase(end - 1, resolve_ret.frameSize)) {
2278            userError("PD Flush: Range is across page boundary.");
2279            current_syscall_error.type = seL4_RangeError;
2280            current_syscall_error.rangeErrorMin = start;
2281            current_syscall_error.rangeErrorMax =
2282                pageBase(start, resolve_ret.frameSize) +
2283                MASK(pageBitsForSize(resolve_ret.frameSize));
2284            return EXCEPTION_SYSCALL_ERROR;
2285        }
2286
2287
2288        /* Calculate the physical start address. */
2289        pstart = resolve_ret.frameBase
2290                 + (start & MASK(pageBitsForSize(resolve_ret.frameSize)));
2291
2292
2293        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2294        return performPDFlush(invLabel, pd, asid, start, end - 1, pstart);
2295    }
2296
2297    default:
2298        userError("PD: Invalid invocation number");
2299        current_syscall_error.type = seL4_IllegalOperation;
2300        return EXCEPTION_SYSCALL_ERROR;
2301    }
2302
2303}
2304
2305static exception_t
2306decodeARMPageTableInvocation(word_t invLabel, word_t length,
2307                             cte_t *cte, cap_t cap, extra_caps_t excaps,
2308                             word_t *buffer)
2309{
2310    word_t vaddr, pdIndex;
2311
2312#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
2313    vm_attributes_t attr;
2314#endif
2315    cap_t pdCap;
2316    pde_t *pd, *pdSlot;
2317    pde_t pde;
2318    asid_t asid;
2319    paddr_t paddr;
2320
2321    if (invLabel == ARMPageTableUnmap) {
2322        if (unlikely(! isFinalCapability(cte))) {
2323            userError("ARMPageTableUnmap: Cannot unmap if more than one cap exists.");
2324            current_syscall_error.type = seL4_RevokeFirst;
2325            return EXCEPTION_SYSCALL_ERROR;
2326        }
2327        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2328        return performPageTableInvocationUnmap (cap, cte);
2329    }
2330
2331    if (unlikely(invLabel != ARMPageTableMap)) {
2332        userError("ARMPageTable: Illegal operation.");
2333        current_syscall_error.type = seL4_IllegalOperation;
2334        return EXCEPTION_SYSCALL_ERROR;
2335    }
2336
2337    if (unlikely(length < 2 || excaps.excaprefs[0] == NULL)) {
2338        userError("ARMPageTableMap: Truncated message.");
2339        current_syscall_error.type = seL4_TruncatedMessage;
2340        return EXCEPTION_SYSCALL_ERROR;
2341    }
2342
2343    if (unlikely(cap_page_table_cap_get_capPTIsMapped(cap))) {
2344        userError("ARMPageTableMap: Page table is already mapped to page directory.");
2345        current_syscall_error.type =
2346            seL4_InvalidCapability;
2347        current_syscall_error.invalidCapNumber = 0;
2348
2349        return EXCEPTION_SYSCALL_ERROR;
2350    }
2351
2352    vaddr = getSyscallArg(0, buffer);
2353#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
2354    attr = vmAttributesFromWord(getSyscallArg(1, buffer));
2355#endif
2356    pdCap = excaps.excaprefs[0]->cap;
2357
2358    if (unlikely(cap_get_capType(pdCap) != cap_page_directory_cap ||
2359                 !cap_page_directory_cap_get_capPDIsMapped(pdCap))) {
2360        userError("ARMPageTableMap: Invalid PD cap.");
2361        current_syscall_error.type = seL4_InvalidCapability;
2362        current_syscall_error.invalidCapNumber = 1;
2363
2364        return EXCEPTION_SYSCALL_ERROR;
2365    }
2366
2367    pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pdCap));
2368    asid = cap_page_directory_cap_get_capPDMappedASID(pdCap);
2369
2370    if (unlikely(vaddr >= kernelBase)) {
2371        userError("ARMPageTableMap: Virtual address cannot be in kernel window. vaddr: 0x%08lx, kernelBase: 0x%08x", vaddr, kernelBase);
2372        current_syscall_error.type = seL4_InvalidArgument;
2373        current_syscall_error.invalidArgumentNumber = 0;
2374
2375        return EXCEPTION_SYSCALL_ERROR;
2376    }
2377
2378    {
2379        findPDForASID_ret_t find_ret;
2380
2381        find_ret = findPDForASID(asid);
2382        if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2383            userError("ARMPageTableMap: ASID lookup failed.");
2384            current_syscall_error.type = seL4_FailedLookup;
2385            current_syscall_error.failedLookupWasSource = false;
2386
2387            return EXCEPTION_SYSCALL_ERROR;
2388        }
2389
2390        if (unlikely(find_ret.pd != pd)) {
2391            userError("ARMPageTableMap: ASID lookup failed.");
2392            current_syscall_error.type =
2393                seL4_InvalidCapability;
2394            current_syscall_error.invalidCapNumber = 1;
2395
2396            return EXCEPTION_SYSCALL_ERROR;
2397        }
2398    }
2399
2400    pdIndex = vaddr >> (PAGE_BITS + PT_INDEX_BITS);
2401    pdSlot = &pd[pdIndex];
2402    if (unlikely(pde_ptr_get_pdeType(pdSlot) != pde_pde_invalid)) {
2403        userError("ARMPageTableMap: Page directory already has entry for supplied address.");
2404        current_syscall_error.type = seL4_DeleteFirst;
2405
2406        return EXCEPTION_SYSCALL_ERROR;
2407    }
2408
2409    paddr = addrFromPPtr(
2410                PTE_PTR(cap_page_table_cap_get_capPTBasePtr(cap)));
2411#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
2412    pde = pde_pde_coarse_new(
2413              paddr,
2414              vm_attributes_get_armParityEnabled(attr),
2415              0 /* Domain */
2416          );
2417#else
2418    pde = pde_pde_coarse_new(paddr);
2419#endif
2420
2421    cap = cap_page_table_cap_set_capPTIsMapped(cap, 1);
2422    cap = cap_page_table_cap_set_capPTMappedASID(cap, asid);
2423    cap = cap_page_table_cap_set_capPTMappedAddress(cap, vaddr);
2424
2425    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2426    return performPageTableInvocationMap(cap, cte, pde, pdSlot);
2427}
2428
2429static exception_t
2430decodeARMFrameInvocation(word_t invLabel, word_t length,
2431                         cte_t *cte, cap_t cap, extra_caps_t excaps,
2432                         word_t *buffer)
2433{
2434    switch (invLabel) {
2435    case ARMPageMap: {
2436        word_t vaddr, vtop, w_rightsMask;
2437        paddr_t capFBasePtr;
2438        cap_t pdCap;
2439        pde_t *pd;
2440        asid_t asid;
2441        vm_rights_t capVMRights, vmRights;
2442        vm_page_size_t frameSize;
2443        vm_attributes_t attr;
2444
2445        if (unlikely(length < 3 || excaps.excaprefs[0] == NULL)) {
2446            userError("ARMPageMap: Truncated message.");
2447            current_syscall_error.type =
2448                seL4_TruncatedMessage;
2449
2450            return EXCEPTION_SYSCALL_ERROR;
2451        }
2452
2453        vaddr = getSyscallArg(0, buffer);
2454        w_rightsMask = getSyscallArg(1, buffer);
2455        attr = vmAttributesFromWord(getSyscallArg(2, buffer));
2456        pdCap = excaps.excaprefs[0]->cap;
2457
2458        frameSize = generic_frame_cap_get_capFSize(cap);
2459        capVMRights = generic_frame_cap_get_capFVMRights(cap);
2460
2461        if (unlikely(generic_frame_cap_get_capFIsMapped(cap))) {
2462            userError("ARMPageMap: Cap already has mapping.");
2463            current_syscall_error.type =
2464                seL4_InvalidCapability;
2465            current_syscall_error.invalidCapNumber = 0;
2466
2467            return EXCEPTION_SYSCALL_ERROR;
2468        }
2469
2470        if (unlikely(cap_get_capType(pdCap) != cap_page_directory_cap ||
2471                     !cap_page_directory_cap_get_capPDIsMapped(pdCap))) {
2472            userError("ARMPageMap: Bad PageDirectory cap.");
2473            current_syscall_error.type =
2474                seL4_InvalidCapability;
2475            current_syscall_error.invalidCapNumber = 1;
2476
2477            return EXCEPTION_SYSCALL_ERROR;
2478        }
2479        pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(
2480                         pdCap));
2481        asid = cap_page_directory_cap_get_capPDMappedASID(pdCap);
2482
2483        {
2484            findPDForASID_ret_t find_ret;
2485
2486            find_ret = findPDForASID(asid);
2487            if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2488                userError("ARMPageMap: No PD for ASID");
2489                current_syscall_error.type =
2490                    seL4_FailedLookup;
2491                current_syscall_error.failedLookupWasSource =
2492                    false;
2493
2494                return EXCEPTION_SYSCALL_ERROR;
2495            }
2496
2497            if (unlikely(find_ret.pd != pd)) {
2498                userError("ARMPageMap: ASID lookup failed.");
2499                current_syscall_error.type =
2500                    seL4_InvalidCapability;
2501                current_syscall_error.invalidCapNumber = 1;
2502
2503                return EXCEPTION_SYSCALL_ERROR;
2504            }
2505        }
2506
2507        vtop = vaddr + BIT(pageBitsForSize(frameSize)) - 1;
2508
2509        if (unlikely(vtop >= kernelBase)) {
2510            userError("ARMPageMap: Cannot map frame over kernel window. vaddr: 0x%08lx, kernelBase: 0x%08x", vaddr, kernelBase);
2511            current_syscall_error.type =
2512                seL4_InvalidArgument;
2513            current_syscall_error.invalidArgumentNumber = 0;
2514
2515            return EXCEPTION_SYSCALL_ERROR;
2516        }
2517
2518        vmRights =
2519            maskVMRights(capVMRights, rightsFromWord(w_rightsMask));
2520
2521        if (unlikely(!checkVPAlignment(frameSize, vaddr))) {
2522            userError("ARMPageMap: Virtual address has incorrect alignment.");
2523            current_syscall_error.type =
2524                seL4_AlignmentError;
2525
2526            return EXCEPTION_SYSCALL_ERROR;
2527        }
2528
2529        capFBasePtr = addrFromPPtr((void *)
2530                                   generic_frame_cap_get_capFBasePtr(cap));
2531
2532        cap = generic_frame_cap_set_capFMappedAddress(cap, asid,
2533                                                      vaddr);
2534        if (frameSize == ARMSmallPage || frameSize == ARMLargePage) {
2535            create_mappings_pte_return_t map_ret;
2536            map_ret = createSafeMappingEntries_PTE(capFBasePtr, vaddr,
2537                                                   frameSize, vmRights,
2538                                                   attr, pd);
2539            if (unlikely(map_ret.status != EXCEPTION_NONE)) {
2540#ifdef CONFIG_PRINTING
2541                if (current_syscall_error.type == seL4_DeleteFirst) {
2542                    userError("ARMPageMap: Page table entry was not free.");
2543                }
2544#endif
2545                return map_ret.status;
2546            }
2547
2548            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2549            return performPageInvocationMapPTE(asid, cap, cte,
2550                                               map_ret.pte,
2551                                               map_ret.pte_entries);
2552        } else {
2553            create_mappings_pde_return_t map_ret;
2554            map_ret = createSafeMappingEntries_PDE(capFBasePtr, vaddr,
2555                                                   frameSize, vmRights,
2556                                                   attr, pd);
2557            if (unlikely(map_ret.status != EXCEPTION_NONE)) {
2558#ifdef CONFIG_PRINTING
2559                if (current_syscall_error.type == seL4_DeleteFirst) {
2560                    userError("ARMPageMap: Page directory entry was not free.");
2561                }
2562#endif
2563                return map_ret.status;
2564            }
2565
2566            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2567            return performPageInvocationMapPDE(asid, cap, cte,
2568                                               map_ret.pde,
2569                                               map_ret.pde_entries);
2570        }
2571    }
2572
2573    case ARMPageRemap: {
2574        word_t vaddr, w_rightsMask;
2575        paddr_t capFBasePtr;
2576        cap_t pdCap;
2577        pde_t *pd;
2578        asid_t mappedASID;
2579        vm_rights_t capVMRights, vmRights;
2580        vm_page_size_t frameSize;
2581        vm_attributes_t attr;
2582
2583#ifdef CONFIG_ARM_SMMU
2584        if (isIOSpaceFrameCap(cap)) {
2585            userError("ARMPageRemap: Attempting to remap frame mapped into an IOSpace");
2586            current_syscall_error.type = seL4_IllegalOperation;
2587
2588            return EXCEPTION_SYSCALL_ERROR;
2589        }
2590#endif
2591
2592        if (unlikely(length < 2 || excaps.excaprefs[0] == NULL)) {
2593            userError("ARMPageRemap: Truncated message.");
2594            current_syscall_error.type =
2595                seL4_TruncatedMessage;
2596
2597            return EXCEPTION_SYSCALL_ERROR;
2598        }
2599
2600        w_rightsMask = getSyscallArg(0, buffer);
2601        attr = vmAttributesFromWord(getSyscallArg(1, buffer));
2602        pdCap = excaps.excaprefs[0]->cap;
2603
2604        if (unlikely(cap_get_capType(pdCap) != cap_page_directory_cap ||
2605                     !cap_page_directory_cap_get_capPDIsMapped(pdCap))) {
2606            userError("ARMPageRemap: Invalid pd cap.");
2607            current_syscall_error.type =
2608                seL4_InvalidCapability;
2609            current_syscall_error.invalidCapNumber = 1;
2610
2611            return EXCEPTION_SYSCALL_ERROR;
2612        }
2613
2614        if (unlikely(!generic_frame_cap_get_capFIsMapped(cap))) {
2615            userError("ARMPageRemap: Cap is not mapped");
2616            current_syscall_error.type =
2617                seL4_InvalidCapability;
2618            current_syscall_error.invalidCapNumber = 0;
2619
2620            return EXCEPTION_SYSCALL_ERROR;
2621        }
2622
2623        pd = PDE_PTR(cap_page_directory_cap_get_capPDBasePtr(pdCap));
2624        vaddr = generic_frame_cap_get_capFMappedAddress(cap);
2625
2626        {
2627            findPDForASID_ret_t find_ret;
2628
2629            mappedASID = generic_frame_cap_get_capFMappedASID(cap);
2630
2631            find_ret = findPDForASID(mappedASID);
2632            if (unlikely(find_ret.status != EXCEPTION_NONE)) {
2633                userError("ARMPageRemap: No PD for ASID");
2634                current_syscall_error.type =
2635                    seL4_FailedLookup;
2636                current_syscall_error.failedLookupWasSource = false;
2637
2638                return EXCEPTION_SYSCALL_ERROR;
2639            }
2640
2641            if (unlikely(find_ret.pd != pd ||
2642                         cap_page_directory_cap_get_capPDMappedASID(pdCap) !=
2643                         mappedASID)) {
2644                userError("ARMPageRemap: Failed ASID lookup.");
2645                current_syscall_error.type =
2646                    seL4_InvalidCapability;
2647                current_syscall_error.invalidCapNumber = 1;
2648
2649                return EXCEPTION_SYSCALL_ERROR;
2650            }
2651        }
2652
2653        frameSize = generic_frame_cap_get_capFSize(cap);
2654        capVMRights = generic_frame_cap_get_capFVMRights(cap);
2655        vmRights =
2656            maskVMRights(capVMRights, rightsFromWord(w_rightsMask));
2657
2658        if (unlikely(!checkVPAlignment(frameSize, vaddr))) {
2659            userError("ARMPageRemap: Virtual address has incorrect alignment.");
2660            current_syscall_error.type =
2661                seL4_AlignmentError;
2662
2663            return EXCEPTION_SYSCALL_ERROR;
2664        }
2665
2666        capFBasePtr = addrFromPPtr((void *)
2667                                   generic_frame_cap_get_capFBasePtr(cap));
2668
2669        if (frameSize == ARMSmallPage || frameSize == ARMLargePage) {
2670            create_mappings_pte_return_t map_ret;
2671            map_ret = createSafeMappingEntries_PTE(capFBasePtr, vaddr,
2672                                                   frameSize, vmRights,
2673                                                   attr, pd);
2674            if (map_ret.status != EXCEPTION_NONE) {
2675#ifdef CONFIG_PRINTING
2676                if (current_syscall_error.type == seL4_FailedLookup) {
2677                    userError("ARMPageRemap: Page directory entry did not contain a page table.");
2678                } else if (current_syscall_error.type == seL4_DeleteFirst) {
2679                    userError("ARMPageRemap: Page table entry was not free.");
2680                }
2681#endif
2682                return map_ret.status;
2683            }
2684
2685            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2686            return performPageInvocationRemapPTE(mappedASID, map_ret.pte,
2687                                                 map_ret.pte_entries);
2688        } else {
2689            create_mappings_pde_return_t map_ret;
2690            map_ret = createSafeMappingEntries_PDE(capFBasePtr, vaddr,
2691                                                   frameSize, vmRights,
2692                                                   attr, pd);
2693            if (map_ret.status != EXCEPTION_NONE) {
2694#ifdef CONFIG_PRINTING
2695                if (current_syscall_error.type == seL4_DeleteFirst) {
2696                    userError("ARMPageRemap: Page directory entry was not free.");
2697                }
2698#endif
2699                return map_ret.status;
2700            }
2701
2702            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2703            return performPageInvocationRemapPDE(mappedASID, map_ret.pde,
2704                                                 map_ret.pde_entries);
2705        }
2706    }
2707
2708    case ARMPageUnmap: {
2709#ifdef CONFIG_ARM_SMMU
2710        if (isIOSpaceFrameCap(cap)) {
2711            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2712            return performPageInvocationUnmapIO(cap, cte);
2713        } else
2714#endif
2715        {
2716            setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2717            return performPageInvocationUnmap(cap, cte);
2718        }
2719    }
2720
2721#ifdef CONFIG_ARM_SMMU
2722    case ARMPageMapIO: {
2723        return decodeARMIOMapInvocation(invLabel, length, cte, cap, excaps, buffer);
2724    }
2725#endif
2726
2727    case ARMPageClean_Data:
2728    case ARMPageInvalidate_Data:
2729    case ARMPageCleanInvalidate_Data:
2730    case ARMPageUnify_Instruction: {
2731        asid_t asid;
2732        vptr_t vaddr;
2733        findPDForASID_ret_t pd;
2734        vptr_t start, end;
2735        paddr_t pstart;
2736        word_t page_size;
2737        word_t page_base;
2738
2739        if (length < 2) {
2740            userError("Page Flush: Truncated message.");
2741            current_syscall_error.type = seL4_TruncatedMessage;
2742            return EXCEPTION_SYSCALL_ERROR;
2743        }
2744
2745        asid = generic_frame_cap_get_capFMappedASID(cap);
2746#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
2747        /* Must use kernel vaddr in hyp mode. */
2748        vaddr = generic_frame_cap_get_capFBasePtr(cap);
2749#else
2750        vaddr = generic_frame_cap_get_capFMappedAddress(cap);
2751#endif
2752
2753        if (unlikely(!generic_frame_cap_get_capFIsMapped(cap))) {
2754            userError("Page Flush: Frame is not mapped.");
2755            current_syscall_error.type = seL4_IllegalOperation;
2756            return EXCEPTION_SYSCALL_ERROR;
2757        }
2758
2759        pd = findPDForASID(asid);
2760        if (unlikely(pd.status != EXCEPTION_NONE)) {
2761            userError("Page Flush: No PD for ASID");
2762            current_syscall_error.type =
2763                seL4_FailedLookup;
2764            current_syscall_error.failedLookupWasSource = false;
2765            return EXCEPTION_SYSCALL_ERROR;
2766        }
2767
2768        start = getSyscallArg(0, buffer);
2769        end =   getSyscallArg(1, buffer);
2770
2771        /* check that the range is sane */
2772        if (end <= start) {
2773            userError("PageFlush: Invalid range");
2774            current_syscall_error.type = seL4_InvalidArgument;
2775            current_syscall_error.invalidArgumentNumber = 1;
2776            return EXCEPTION_SYSCALL_ERROR;
2777        }
2778
2779
2780        /* start and end are currently relative inside this page */
2781        page_size = 1 << pageBitsForSize(generic_frame_cap_get_capFSize(cap));
2782        page_base = addrFromPPtr((void*)generic_frame_cap_get_capFBasePtr(cap));
2783
2784        if (start >= page_size || end > page_size) {
2785            userError("Page Flush: Requested range not inside page");
2786            current_syscall_error.type = seL4_InvalidArgument;
2787            current_syscall_error.invalidArgumentNumber = 0;
2788            return EXCEPTION_SYSCALL_ERROR;
2789        }
2790
2791        /* turn start and end into absolute addresses */
2792        pstart = page_base + start;
2793        start += vaddr;
2794        end += vaddr;
2795
2796        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2797        return performPageFlush(invLabel, pd.pd, asid, start, end - 1, pstart);
2798    }
2799
2800    case ARMPageGetAddress: {
2801
2802
2803        /* Check that there are enough message registers */
2804        assert(n_msgRegisters >= 1);
2805
2806        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2807        return performPageGetAddress((void*)generic_frame_cap_get_capFBasePtr(cap));
2808    }
2809
2810    default:
2811        userError("ARMPage: Illegal operation.");
2812        current_syscall_error.type = seL4_IllegalOperation;
2813
2814        return EXCEPTION_SYSCALL_ERROR;
2815    }
2816}
2817
2818exception_t
2819decodeARMMMUInvocation(word_t invLabel, word_t length, cptr_t cptr,
2820                       cte_t *cte, cap_t cap, extra_caps_t excaps,
2821                       word_t *buffer)
2822{
2823    switch (cap_get_capType(cap)) {
2824    case cap_page_directory_cap:
2825        return decodeARMPageDirectoryInvocation(invLabel, length, cptr, cte,
2826                                                cap, excaps, buffer);
2827
2828    case cap_page_table_cap:
2829        return decodeARMPageTableInvocation (invLabel, length, cte,
2830                                             cap, excaps, buffer);
2831
2832    case cap_small_frame_cap:
2833    case cap_frame_cap:
2834        return decodeARMFrameInvocation (invLabel, length, cte,
2835                                         cap, excaps, buffer);
2836
2837    case cap_asid_control_cap: {
2838        word_t i;
2839        asid_t asid_base;
2840        word_t index, depth;
2841        cap_t untyped, root;
2842        cte_t *parentSlot, *destSlot;
2843        lookupSlot_ret_t lu_ret;
2844        void *frame;
2845        exception_t status;
2846
2847        if (unlikely(invLabel != ARMASIDControlMakePool)) {
2848            userError("ASIDControl: Illegal operation.");
2849            current_syscall_error.type = seL4_IllegalOperation;
2850
2851            return EXCEPTION_SYSCALL_ERROR;
2852        }
2853
2854        if (unlikely(length < 2 || excaps.excaprefs[0] == NULL
2855                     || excaps.excaprefs[1] == NULL)) {
2856            userError("ASIDControlMakePool: Truncated message.");
2857            current_syscall_error.type = seL4_TruncatedMessage;
2858
2859            return EXCEPTION_SYSCALL_ERROR;
2860        }
2861
2862        index = getSyscallArg(0, buffer);
2863        depth = getSyscallArg(1, buffer);
2864        parentSlot = excaps.excaprefs[0];
2865        untyped = parentSlot->cap;
2866        root = excaps.excaprefs[1]->cap;
2867
2868        /* Find first free pool */
2869        for (i = 0; i < nASIDPools && armKSASIDTable[i]; i++);
2870
2871        if (unlikely(i == nASIDPools)) { /* If no unallocated pool is found */
2872            userError("ASIDControlMakePool: No free pools found.");
2873            current_syscall_error.type = seL4_DeleteFirst;
2874
2875            return EXCEPTION_SYSCALL_ERROR;
2876        }
2877
2878        asid_base = i << asidLowBits;
2879
2880        if (unlikely(cap_get_capType(untyped) != cap_untyped_cap ||
2881                     cap_untyped_cap_get_capBlockSize(untyped) !=
2882                     seL4_ASIDPoolBits) || cap_untyped_cap_get_capIsDevice(untyped)) {
2883            userError("ASIDControlMakePool: Invalid untyped cap.");
2884            current_syscall_error.type = seL4_InvalidCapability;
2885            current_syscall_error.invalidCapNumber = 1;
2886
2887            return EXCEPTION_SYSCALL_ERROR;
2888        }
2889
2890        status = ensureNoChildren(parentSlot);
2891        if (unlikely(status != EXCEPTION_NONE)) {
2892            userError("ASIDControlMakePool: Untyped has children. Revoke first.");
2893            return status;
2894        }
2895
2896        frame = WORD_PTR(cap_untyped_cap_get_capPtr(untyped));
2897
2898        lu_ret = lookupTargetSlot(root, index, depth);
2899        if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
2900            userError("ASIDControlMakePool: Failed to lookup destination slot.");
2901            return lu_ret.status;
2902        }
2903        destSlot = lu_ret.slot;
2904
2905        status = ensureEmptySlot(destSlot);
2906        if (unlikely(status != EXCEPTION_NONE)) {
2907            userError("ASIDControlMakePool: Destination slot not empty.");
2908            return status;
2909        }
2910
2911        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2912        return performASIDControlInvocation(frame, destSlot,
2913                                            parentSlot, asid_base);
2914    }
2915
2916    case cap_asid_pool_cap: {
2917        cap_t pdCap;
2918        cte_t *pdCapSlot;
2919        asid_pool_t *pool;
2920        word_t i;
2921        asid_t asid;
2922
2923        if (unlikely(invLabel != ARMASIDPoolAssign)) {
2924            userError("ASIDPool: Illegal operation.");
2925            current_syscall_error.type = seL4_IllegalOperation;
2926
2927            return EXCEPTION_SYSCALL_ERROR;
2928        }
2929
2930        if (unlikely(excaps.excaprefs[0] == NULL)) {
2931            userError("ASIDPoolAssign: Truncated message.");
2932            current_syscall_error.type = seL4_TruncatedMessage;
2933
2934            return EXCEPTION_SYSCALL_ERROR;
2935        }
2936
2937        pdCapSlot = excaps.excaprefs[0];
2938        pdCap = pdCapSlot->cap;
2939
2940        if (unlikely(
2941                    cap_get_capType(pdCap) != cap_page_directory_cap ||
2942                    cap_page_directory_cap_get_capPDIsMapped(pdCap))) {
2943            userError("ASIDPoolAssign: Invalid page directory cap.");
2944            current_syscall_error.type = seL4_InvalidCapability;
2945            current_syscall_error.invalidCapNumber = 1;
2946
2947            return EXCEPTION_SYSCALL_ERROR;
2948        }
2949
2950        pool = armKSASIDTable[cap_asid_pool_cap_get_capASIDBase(cap) >>
2951                              asidLowBits];
2952        if (unlikely(!pool)) {
2953            userError("ASIDPoolAssign: Failed to lookup pool.");
2954            current_syscall_error.type = seL4_FailedLookup;
2955            current_syscall_error.failedLookupWasSource = false;
2956            current_lookup_fault = lookup_fault_invalid_root_new();
2957
2958            return EXCEPTION_SYSCALL_ERROR;
2959        }
2960
2961        if (unlikely(pool != ASID_POOL_PTR(cap_asid_pool_cap_get_capASIDPool(cap)))) {
2962            userError("ASIDPoolAssign: Failed to lookup pool.");
2963            current_syscall_error.type = seL4_InvalidCapability;
2964            current_syscall_error.invalidCapNumber = 0;
2965
2966            return EXCEPTION_SYSCALL_ERROR;
2967        }
2968
2969        /* Find first free ASID */
2970        asid = cap_asid_pool_cap_get_capASIDBase(cap);
2971        for (i = 0; i < (1 << asidLowBits) && (asid + i == 0 || pool->array[i]); i++);
2972
2973        if (unlikely(i == 1 << asidLowBits)) {
2974            userError("ASIDPoolAssign: No free ASID.");
2975            current_syscall_error.type = seL4_DeleteFirst;
2976
2977            return EXCEPTION_SYSCALL_ERROR;
2978        }
2979
2980        asid += i;
2981
2982        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
2983        return performASIDPoolInvocation(asid, pool, pdCapSlot);
2984    }
2985
2986    default:
2987        fail("Invalid ARM arch cap type");
2988    }
2989}
2990
2991#ifdef CONFIG_BENCHMARK_USE_KERNEL_LOG_BUFFER
2992exception_t benchmark_arch_map_logBuffer(word_t frame_cptr)
2993{
2994    lookupCapAndSlot_ret_t lu_ret;
2995    vm_page_size_t frameSize;
2996    pptr_t  frame_pptr;
2997
2998    /* faulting section */
2999    lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), frame_cptr);
3000
3001    if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
3002        userError("Invalid cap #%lu.", frame_cptr);
3003        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
3004
3005        return EXCEPTION_SYSCALL_ERROR;
3006    }
3007
3008    if (cap_get_capType(lu_ret.cap) != cap_frame_cap) {
3009        userError("Invalid cap. Log buffer should be of a frame cap");
3010        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
3011
3012        return EXCEPTION_SYSCALL_ERROR;
3013    }
3014
3015    frameSize = generic_frame_cap_get_capFSize(lu_ret.cap);
3016
3017    if (frameSize != ARMSection) {
3018        userError("Invalid frame size. The kernel expects 1M log buffer");
3019        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
3020
3021        return EXCEPTION_SYSCALL_ERROR;
3022    }
3023
3024    frame_pptr = generic_frame_cap_get_capFBasePtr(lu_ret.cap);
3025
3026    ksUserLogBuffer = pptr_to_paddr((void *) frame_pptr);
3027
3028    for (int idx = 0; idx < BIT(PT_INDEX_BITS); idx++) {
3029        paddr_t physical_address = ksUserLogBuffer + (idx << seL4_PageBits);
3030
3031        armKSGlobalLogPT[idx] =
3032            pte_pte_small_new(
3033                physical_address,
3034                0, /* global */
3035                0, /* Not shared */
3036                0, /* APX = 0, privileged full access */
3037                0, /* TEX = 0 */
3038                1, /* VMKernelOnly */
3039                1, /* Cacheable */
3040                0, /* Write-through to minimise perf hit */
3041                0  /* executable */
3042            );
3043
3044
3045        cleanCacheRange_PoU((pptr_t) &armKSGlobalLogPT[idx],
3046                            (pptr_t) &armKSGlobalLogPT[idx + 1],
3047                            addrFromPPtr((void *)&armKSGlobalLogPT[idx]));
3048
3049        invalidateTranslationSingle(pptr_to_paddr((void *) physical_address));
3050    }
3051
3052    return EXCEPTION_NONE;
3053}
3054#endif /* CONFIG_BENCHMARK_USE_KERNEL_LOG_BUFFER */
3055
3056#ifdef CONFIG_DEBUG_BUILD
3057void kernelPrefetchAbort(word_t pc) VISIBLE;
3058void kernelDataAbort(word_t pc) VISIBLE;
3059
3060#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
3061
3062void kernelUndefinedInstruction(word_t pc) VISIBLE;
3063
3064void
3065kernelPrefetchAbort(word_t pc)
3066{
3067    word_t UNUSED sr = getHSR();
3068
3069    printf("\n\nKERNEL PREFETCH ABORT!\n");
3070    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
3071    printf("HSR: 0x%x\n", (unsigned int)sr);
3072
3073    halt();
3074}
3075
3076void
3077kernelDataAbort(word_t pc)
3078{
3079    word_t UNUSED far = getHDFAR();
3080    word_t UNUSED sr = getHSR();
3081
3082    printf("\n\nKERNEL DATA ABORT!\n");
3083    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
3084    printf("HDFAR: 0x%x HSR: 0x%x\n", (unsigned int)far, (unsigned int)sr);
3085
3086    halt();
3087}
3088
3089void
3090kernelUndefinedInstruction(word_t pc)
3091{
3092    word_t UNUSED sr = getHSR();
3093
3094    printf("\n\nKERNEL UNDEFINED INSTRUCTION!\n");
3095    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
3096    printf("HSR: 0x%x\n", (unsigned int)sr);
3097
3098    halt();
3099}
3100
3101#else /* CONFIG_ARM_HYPERVISOR_SUPPORT */
3102
3103void
3104kernelPrefetchAbort(word_t pc)
3105{
3106    word_t UNUSED ifsr = getIFSR();
3107
3108    printf("\n\nKERNEL PREFETCH ABORT!\n");
3109    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
3110    printf("IFSR: 0x%x\n", (unsigned int)ifsr);
3111
3112    halt();
3113}
3114
3115void
3116kernelDataAbort(word_t pc)
3117{
3118    word_t UNUSED dfsr = getDFSR();
3119    word_t UNUSED far = getFAR();
3120
3121    printf("\n\nKERNEL DATA ABORT!\n");
3122    printf("Faulting instruction: 0x%x\n", (unsigned int)pc);
3123    printf("FAR: 0x%x DFSR: 0x%x\n", (unsigned int)far, (unsigned int)dfsr);
3124
3125    halt();
3126}
3127#endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
3128
3129#endif
3130
3131#ifdef CONFIG_PRINTING
3132typedef struct readWordFromVSpace_ret {
3133    exception_t status;
3134    word_t value;
3135} readWordFromVSpace_ret_t;
3136
3137static readWordFromVSpace_ret_t
3138readWordFromVSpace(pde_t *pd, word_t vaddr)
3139{
3140    readWordFromVSpace_ret_t ret;
3141    lookupPTSlot_ret_t ptSlot;
3142    pde_t *pdSlot;
3143    paddr_t paddr;
3144    word_t offset;
3145    pptr_t kernel_vaddr;
3146    word_t *value;
3147
3148    pdSlot = lookupPDSlot(pd, vaddr);
3149    if (pde_ptr_get_pdeType(pdSlot) == pde_pde_section) {
3150        paddr = pde_pde_section_ptr_get_address(pdSlot);
3151        offset = vaddr & MASK(ARMSectionBits);
3152    } else {
3153        ptSlot = lookupPTSlot(pd, vaddr);
3154#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
3155        if (ptSlot.status == EXCEPTION_NONE && pte_ptr_get_pteType(ptSlot.ptSlot) == pte_pte_small) {
3156            paddr = pte_pte_small_ptr_get_address(ptSlot.ptSlot);
3157            if (pte_pte_small_ptr_get_contiguous_hint(ptSlot.ptSlot)) {
3158                offset = vaddr & MASK(ARMLargePageBits);
3159            } else {
3160                offset = vaddr & MASK(ARMSmallPageBits);
3161            }
3162#else
3163        if (ptSlot.status == EXCEPTION_NONE && pte_ptr_get_pteType(ptSlot.ptSlot) == pte_pte_small) {
3164            paddr = pte_pte_small_ptr_get_address(ptSlot.ptSlot);
3165            offset = vaddr & MASK(ARMSmallPageBits);
3166        } else if (ptSlot.status == EXCEPTION_NONE && pte_ptr_get_pteType(ptSlot.ptSlot) == pte_pte_large) {
3167            paddr = pte_pte_large_ptr_get_address(ptSlot.ptSlot);
3168            offset = vaddr & MASK(ARMLargePageBits);
3169#endif
3170        } else {
3171            ret.status = EXCEPTION_LOOKUP_FAULT;
3172            return ret;
3173        }
3174    }
3175
3176
3177    kernel_vaddr = (word_t)paddr_to_pptr(paddr);
3178    value = (word_t*)(kernel_vaddr + offset);
3179    ret.status = EXCEPTION_NONE;
3180    ret.value = *value;
3181    return ret;
3182}
3183
3184void
3185Arch_userStackTrace(tcb_t *tptr)
3186{
3187    cap_t threadRoot;
3188    pde_t *pd;
3189    word_t sp;
3190    int i;
3191
3192    threadRoot = TCB_PTR_CTE_PTR(tptr, tcbVTable)->cap;
3193
3194    /* lookup the PD */
3195    if (cap_get_capType(threadRoot) != cap_page_directory_cap) {
3196        printf("Invalid vspace\n");
3197        return;
3198    }
3199
3200    pd = (pde_t*)pptr_of_cap(threadRoot);
3201
3202    sp = getRegister(tptr, SP);
3203    /* check for alignment so we don't have to worry about accessing
3204     * words that might be on two different pages */
3205    if (!IS_ALIGNED(sp, seL4_WordSizeBits)) {
3206        printf("SP not aligned\n");
3207        return;
3208    }
3209
3210    for (i = 0; i < CONFIG_USER_STACK_TRACE_LENGTH; i++) {
3211        word_t address = sp + (i * sizeof(word_t));
3212        readWordFromVSpace_ret_t result;
3213        result = readWordFromVSpace(pd, address);
3214        if (result.status == EXCEPTION_NONE) {
3215            printf("0x%lx: 0x%lx\n", (long)address, (long)result.value);
3216        } else {
3217            printf("0x%lx: INVALID\n", (long)address);
3218        }
3219    }
3220}
3221#endif
3222
3223