1/*
2 * Copyright (c) 2009-2012,2015,2016 ETH Zurich.
3 * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
4 * All rights reserved.
5 *
6 * This file is distributed under the terms in the attached LICENSE file.
7 * If you do not find this file, copies can be found by writing to:
8 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
9 */
10
11#include <kernel.h>
12#include <dispatch.h>
13#include <sysreg.h>
14#include <paging_kernel_arch.h>
15#include <string.h>
16#include <exceptions.h>
17#include <arm_hal.h>
18#include <cap_predicates.h>
19#include <dispatch.h>
20#include <mdb/mdb_tree.h>
21#include <dev/armv8_dev.h>
22#include <useraccess.h>
23
24
25// ------------------------------------------------------------------------
26// Utility declarations
27
28inline static uintptr_t paging_round_down(uintptr_t address, uintptr_t size)
29{
30    return address & ~(size - 1);
31}
32
33inline static uintptr_t paging_round_up(uintptr_t address, uintptr_t size)
34{
35    return (address + size - 1) & ~(size - 1);
36}
37
38inline static int aligned(uintptr_t address, uintptr_t bytes)
39{
40    return (address & (bytes - 1)) == 0;
41}
42
43static void
44paging_write_l0_entry(union armv8_ttable_entry *l0_table, lvaddr_t va, union armv8_ttable_entry l0)
45{
46    assert(l0_table);
47//    union armv8_l0_entry *l0_table;
48//    if (ttbase == 0) {
49//        if(va < MEMORY_OFFSET)
50//            ttbase = sysreg_read_ttbr0_el1() + MEMORY_OFFSET;
51//        else
52//            ttbase = sysreg_read_ttbr1_el1() + MEMORY_OFFSET;
53//    }
54//    l0_table = (union armv8_l0_entry *) ttbase;
55
56    l0_table[VMSAv8_64_L0_BASE(va)] = l0;
57}
58
59static void
60paging_write_l1_entry(union armv8_ttable_entry *l1_table, lvaddr_t va, union armv8_ttable_entry l1)
61{
62    assert(l1_table);
63//    union armv8_l1_entry *l1_table;
64//    if (ttbase == 0) {
65//        if(va < MEMORY_OFFSET)
66//            ttbase = sysreg_read_ttbr0_el1() + MEMORY_OFFSET;
67//        else
68//            ttbase = sysreg_read_ttbr1_el1() + MEMORY_OFFSET;
69//    }
70//    l1_table = (union armv8_l1_entry *) ttbase;
71
72    l1_table[VMSAv8_64_L1_BASE(va)] = l1;
73}
74
75static void
76paging_write_l2_entry(union armv8_ttable_entry *l2_table, lvaddr_t va, union armv8_ttable_entry l2)
77{
78    assert(l2_table);
79    l2_table[VMSAv8_64_L2_BASE(va)] = l2;
80}
81
82static void
83paging_write_l3_entry(union armv8_ttable_entry *l3_table, lvaddr_t va, union armv8_ttable_entry l3)
84{
85    assert(l3_table);
86    l3_table[VMSAv8_64_L3_BASE(va)] = l3;
87}
88
89// ------------------------------------------------------------------------
90// Exported functions
91
92
93/**
94 * \brief Return whether we have enabled the MMU. Useful for
95 * initialization assertions
96 */
97bool paging_mmu_enabled(void)
98{
99    return true;
100}
101
102void paging_map_kernel_section(union armv8_ttable_entry *ttbase, lvaddr_t va, lpaddr_t pa)
103{
104    union armv8_ttable_entry l2;
105
106    l2.raw = 0;
107    l2.block_l2.valid = 1;
108    l2.block_l2.mb0 = 0;
109    l2.block_l2.af = 1;
110    l2.block_l2.base = pa >> 21u;
111    paging_write_l2_entry(ttbase, va, l2);
112}
113
114void paging_map_kernel_l1_block(union armv8_ttable_entry *ttbase, lvaddr_t va, lpaddr_t pa)
115{
116    union armv8_ttable_entry l1;
117
118    l1.raw = 0;
119    l1.block_l1.valid = 1;
120    l1.block_l1.mb0 = 0;
121    l1.block_l1.af = 1;
122    l1.block_l1.base = pa >> 30u;
123    paging_write_l1_entry(ttbase, va, l1);
124}
125
126void paging_map_memory(union armv8_ttable_entry *ttbase, lpaddr_t paddr, size_t bytes)
127{
128    lpaddr_t pend  = paging_round_up(paddr + bytes, VMSAv8_64_L1_BLOCK_SIZE);
129    while (paddr < pend) {
130        paging_map_kernel_l1_block(ttbase,
131                (paddr + MEMORY_OFFSET) - KERNEL_OFFSET,
132                paddr);
133        paddr += VMSAv8_64_L1_BLOCK_SIZE;
134    }
135}
136
137//static lvaddr_t dev_alloc;
138
139lvaddr_t paging_map_device(lpaddr_t device_base, size_t device_bytes)
140{
141    // TODO: Implement
142    if (device_base < KERNEL_OFFSET) {
143        return device_base + KERNEL_OFFSET;
144    } else {
145        return device_base;
146    }
147}
148
149void paging_map_table_l0(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa)
150{
151    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
152    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
153
154    union armv8_ttable_entry e;
155
156    e.raw = 0;
157    e.d.valid = 1;
158    e.d.mb1 = 1;
159    e.d.base = (pa >> BASE_PAGE_BITS);
160
161    paging_write_l0_entry(table_base, va, e);
162}
163
164void paging_map_table_l1(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa)
165{
166    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
167    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
168
169    union armv8_ttable_entry e;
170
171    e.raw = 0;
172    e.d.valid = 1;
173    e.d.mb1 = 1;
174    e.d.base = (pa >> BASE_PAGE_BITS);
175
176    paging_write_l1_entry(table_base, va, e);
177}
178
179void paging_map_block_l1(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa, uintptr_t flags)
180{
181    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
182    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
183
184    union armv8_ttable_entry e;
185
186    e.raw = flags;
187    e.block_l1.valid = 1;
188    e.block_l1.mb0 = 0;
189    e.block_l1.af = 1;
190    e.block_l1.base = (pa >> BASE_PAGE_BITS);
191
192    paging_write_l1_entry(table_base, va, e);
193}
194
195
196void paging_map_table_l2(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa)
197{
198    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
199    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
200    assert(pa);
201
202    union armv8_ttable_entry e;
203
204    e.raw = 0;
205    e.d.valid = 1;
206    e.d.mb1 = 1;
207    e.d.base = (pa >> BASE_PAGE_BITS);
208
209//    printf("map table l2@%p->l3@%p\n", table_base + VMSAv8_64_L2_BASE(va), pa);
210
211    paging_write_l2_entry(table_base, va, e);
212}
213
214void paging_map_block_l2(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa, uintptr_t flags)
215{
216    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
217    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
218
219    union armv8_ttable_entry e;
220
221    e.raw = 0;
222    e.block_l2.valid = 1;
223    e.block_l2.mb0 = 0;
224    e.block_l2.af = 1;
225    e.block_l2.base = (pa >> BASE_PAGE_BITS);
226
227    paging_write_l2_entry(table_base, va, e);
228}
229
230
231void paging_map_page_l3(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa, uintptr_t flags)
232{
233    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
234    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
235
236    assert(0 == (flags & 0xfffff000));
237    assert(0 == (flags & 0x3));
238    assert(0 == (pa & 0xfff));
239
240    union armv8_ttable_entry e;
241    e.raw = flags;
242
243    e.page.valid = 1;
244    e.page.mb1 = 1;
245    e.page.af= 1;
246    e.page.base = (pa >> BASE_PAGE_BITS);
247
248    paging_write_l3_entry(table_base, va, e);
249}
250
251void paging_set_l3_entry(union armv8_ttable_entry *l3_entry, lpaddr_t pa, uintptr_t flags)
252{
253    assert(0 == (flags & 0xfffff000));
254    assert(0 == (flags & 0x3));
255    assert(0 == (pa & 0xfff));
256
257
258    union armv8_ttable_entry e;
259    e.raw = flags;
260
261    e.page.valid = 1;
262    e.page.mb1 = 1;
263    e.page.af= 1;
264    e.page.base = (pa >> BASE_PAGE_BITS);
265
266//    printf("Setting l3 entry@%p -> (%p) %p raw: %p\n", l3_entry, e.page.base, e.page.base << BASE_PAGE_BITS, e.raw);
267
268    *l3_entry = e;
269}
270
271
272//void paging_set_l2_entry(union armv8_ttable_entry *l2e, lpaddr_t addr, uintptr_t flags)
273//{
274//    assert(0 == (flags & 0xfffff000));
275//    assert(0 == (flags & 0x3));
276//    assert(0 == (addr & 0xfff));
277//
278//    union armv8_ttable_entry e;
279//    e.raw = flags;
280//
281//    e.d.valid = 1;
282//    e.d.mb1 = 1;
283//    e.d.base = (addr >> 12);
284//
285//    *l2e = e;
286//}
287//
288//void paging_set_l3_entry(union armv8_ttable_entry *l3e, lpaddr_t addr, uintptr_t flags)
289//{
290//    assert(0 == (flags & 0xfffff000));
291//    assert(0 == (flags & 0x3));
292//    assert(0 == (addr & 0xfff));
293//
294//    union armv8_ttable_entry e;
295//    e.raw = flags;
296//
297//    e.page.valid = 1;
298//    e.page.mb1 = 1;
299//    e.page.af         = 1;
300//    e.page.base = (addr >> 12);
301//
302//    *l3e = e;
303//}
304
305
306static void
307paging_set_flags(union armv8_ttable_entry *entry, uintptr_t kpi_paging_flags)
308{
309    entry->page.sh = 3;
310    entry->page.ap = 0;
311
312    if (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE) {
313        entry->page.attrindex = 1;
314    } else {
315        entry->page.attrindex = 0;
316    }
317
318    if(kpi_paging_flags & KPI_PAGING_FLAGS_WRITE)
319        entry->page.ap = 1;
320    else if (kpi_paging_flags & KPI_PAGING_FLAGS_READ)
321        entry->page.ap = 3;
322    else
323        entry->page.ap = 0;
324
325    entry->page.af = 1;
326}
327
328static errval_t
329caps_map_l0(struct capability* dest,
330            cslot_t            slot,
331            struct capability* src,
332            uintptr_t          kpi_paging_flags,
333            uintptr_t          offset,
334            uintptr_t          pte_count,
335            struct cte*        mapping_cte)
336{
337    //
338    // Note:
339    //
340    // We have chicken-and-egg problem in initializing resources so
341    // instead of treating an L3 table it's actual 1K size, we treat
342    // it as being 4K. As a result when we map an "L3" table we actually
343    // map a page of memory as if it is 4 consecutive L3 tables.
344    //
345    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
346    //
347
348    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
349        printf("slot = %"PRIuCSLOT"\n",slot);
350        panic("oops: slot id >= %d", VMSAv8_64_PTABLE_NUM_ENTRIES);
351        return SYS_ERR_VNODE_SLOT_INVALID;
352    }
353
354    if (pte_count != 1) {
355        printf("pte_count = %zu\n",(size_t)pte_count);
356        panic("oops: pte_count");
357        return SYS_ERR_VM_MAP_SIZE;
358    }
359
360    if (src->type != ObjType_VNode_AARCH64_l1) {
361        char buf[128];
362        sprint_cap(buf, 128, src);
363        printf("src: %s\n", buf);
364        panic("oops: l0 wrong src type");
365        return SYS_ERR_WRONG_MAPPING;
366    }
367
368//    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
369//        printf("slot = %"PRIuCSLOT", max=%d MEMORY_OFFSET=%p\n", slot, VMSAv8_64_L0_BASE(MEMORY_OFFSET),MEMORY_OFFSET);
370//        panic("oops: l0 slot id");
371//        return SYS_ERR_VNODE_SLOT_RESERVED;
372//    }
373//
374    // Destination
375    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
376    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
377
378    union armv8_ttable_entry* entry = (union armv8_ttable_entry*) dest_lvaddr + slot;
379
380    // Source
381    genpaddr_t src_gpaddr = get_address(src);
382    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
383
384    //union armv8_l2_entry* entry1 = (union armv8_l2_entry*)local_phys_to_mem(src_gpaddr);
385
386
387    assert(offset == 0);
388    assert(aligned(src_lpaddr, 1u << 12));
389    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 32));
390
391    if (entry->d.valid) {
392        // cleanup mapping info
393        debug(SUBSYS_PAGING, "slot in use\n");
394        return SYS_ERR_VNODE_SLOT_INUSE;
395    }
396
397    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
398
399    entry->raw = 0;
400    entry->d.valid = 1;
401    entry->d.mb1 = 1;
402    entry->d.base = (src_lpaddr) >> 12;
403    debug(SUBSYS_PAGING, "L0 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
404              slot, entry, entry->raw);
405
406    sysreg_invalidate_tlb();
407
408    return SYS_ERR_OK;
409}
410static errval_t
411caps_map_l1(struct capability* dest,
412            cslot_t            slot,
413            struct capability* src,
414            uintptr_t          kpi_paging_flags,
415            uintptr_t          offset,
416            uintptr_t          pte_count,
417            struct cte*        mapping_cte)
418{
419    //
420    // Note:
421    //
422    // We have chicken-and-egg problem in initializing resources so
423    // instead of treating an L3 table it's actual 1K size, we treat
424    // it as being 4K. As a result when we map an "L3" table we actually
425    // map a page of memory as if it is 4 consecutive L3 tables.
426    //
427    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
428    //
429
430    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
431        printf("slot = %"PRIuCSLOT"\n",slot);
432        panic("oops: slot id >= %d", VMSAv8_64_PTABLE_NUM_ENTRIES);
433        return SYS_ERR_VNODE_SLOT_INVALID;
434    }
435
436    if (pte_count != 1) {
437        printf("pte_count = %zu\n",(size_t)pte_count);
438        panic("oops: pte_count");
439        return SYS_ERR_VM_MAP_SIZE;
440    }
441
442    if (src->type != ObjType_VNode_AARCH64_l2) {
443        panic("oops: l1 wrong src type");
444        return SYS_ERR_WRONG_MAPPING;
445    }
446
447    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
448        printf("slot = %"PRIuCSLOT"\n",slot);
449        panic("oops: l1 slot id");
450        return SYS_ERR_VNODE_SLOT_RESERVED;
451    }
452
453    // Destination
454    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
455    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
456
457    union armv8_ttable_entry* entry = (union armv8_ttable_entry*) dest_lvaddr + slot;
458
459    // Source
460    genpaddr_t src_gpaddr = get_address(src);
461    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
462
463	//union armv8_l2_entry* entry1 = (union armv8_l2_entry*)local_phys_to_mem(src_gpaddr);
464
465
466    assert(offset == 0);
467    assert(aligned(src_lpaddr, 1u << 12));
468    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 32));
469
470    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
471
472    entry->raw = 0;
473    entry->d.valid = 1;
474    entry->d.mb1 = 1;
475    entry->d.base = (src_lpaddr) >> 12;
476    debug(SUBSYS_PAGING, "L1 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
477              slot, entry, entry->raw);
478
479    sysreg_invalidate_tlb();
480
481    return SYS_ERR_OK;
482}
483
484static errval_t
485caps_map_l2(struct capability* dest,
486            cslot_t            slot,
487            struct capability* src,
488            uintptr_t          kpi_paging_flags,
489            uintptr_t          offset,
490            uintptr_t          pte_count,
491            struct cte*        mapping_cte)
492{
493    //
494    // Note:
495    //
496    // We have chicken-and-egg problem in initializing resources so
497    // instead of treating an L3 table it's actual 1K size, we treat
498    // it as being 4K. As a result when we map an "L3" table we actually
499    // map a page of memory as if it is 4 consecutive L3 tables.
500    //
501    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
502    //
503    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
504        printf("slot = %"PRIuCSLOT"\n", slot);
505        panic("oops: slot id >= 512");
506        return SYS_ERR_VNODE_SLOT_INVALID;
507    }
508
509    if (pte_count != 1) {
510        printf("pte_count = %zu\n",(size_t) pte_count);
511        panic("oops: pte_count");
512        return SYS_ERR_VM_MAP_SIZE;
513    }
514
515    if (src->type != ObjType_VNode_AARCH64_l3) {
516        panic("oops: l2 wrong src type");
517        return SYS_ERR_WRONG_MAPPING;
518    }
519
520    if (slot > VMSAv8_64_PTABLE_NUM_ENTRIES) {
521        printf("slot = %"PRIuCSLOT"\n",slot);
522        panic("oops: l2 slot id");
523        return SYS_ERR_VNODE_SLOT_RESERVED;
524    }
525
526    // Destination
527    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
528    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
529
530    union armv8_ttable_entry* entry = (union armv8_ttable_entry*) dest_lvaddr + slot;
531
532    // Source
533    genpaddr_t src_gpaddr = get_address(src);
534    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
535
536    assert(offset == 0);
537    assert(aligned(src_lpaddr, 1u << 12));
538    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 4096));
539
540    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
541
542    entry->raw = 0;
543    entry->d.valid = 1;
544    entry->d.mb1 = 1;
545    entry->d.base = (src_lpaddr) >> 12;
546    debug(SUBSYS_PAGING, "L2 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
547              slot, entry, entry->raw);
548
549    sysreg_invalidate_tlb();
550
551    return SYS_ERR_OK;
552}
553
554
555static errval_t
556caps_map_l3(struct capability* dest,
557            cslot_t            slot,
558            struct capability* src,
559            uintptr_t          kpi_paging_flags,
560            uintptr_t          offset,
561            uintptr_t          pte_count,
562            struct cte*        mapping_cte)
563{
564    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
565
566    // ARM L3 has 256 entries, but we treat a 4K page as a consecutive
567    // region of L3 with a single index. 4K == 4 * 1K
568    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
569        panic("oops: slot >= 512");
570        return SYS_ERR_VNODE_SLOT_INVALID;
571    }
572
573    if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
574        panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
575        return SYS_ERR_WRONG_MAPPING;
576    }
577
578    // check offset within frame
579    if ((offset + BASE_PAGE_SIZE > get_size(src)) ||
580        ((offset % BASE_PAGE_SIZE) != 0)) {
581        panic("oops: frame offset invalid");
582        return SYS_ERR_FRAME_OFFSET_INVALID;
583    }
584
585    // check mapping does not overlap leaf page table
586    if (slot + pte_count > VMSAv8_64_PTABLE_NUM_ENTRIES ) {
587        return SYS_ERR_VM_MAP_SIZE;
588    }
589
590    // Destination
591    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
592    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
593
594    union armv8_ttable_entry *entry = (union armv8_ttable_entry *)dest_lvaddr + slot;
595    if (entry->page.valid) {
596        panic("Remapping valid page.");
597    }
598
599    lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
600    if ((src_lpaddr & (BASE_PAGE_SIZE - 1))) {
601        panic("Invalid target");
602    }
603
604    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
605
606    for (int i = 0; i < pte_count; i++) {
607        entry->raw = 0;
608
609        entry->page.valid = 1;
610        entry->page.mb1 = 1;
611        paging_set_flags(entry, kpi_paging_flags);
612        entry->page.base = (src_lpaddr + i * BASE_PAGE_SIZE) >> 12;
613
614        debug(SUBSYS_PAGING, "L3 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx64"\n",
615               dest_lvaddr, slot, entry, entry->raw);
616
617		entry++;
618
619    }
620
621    // Flush TLB if remapping.
622    sysreg_invalidate_tlb();
623
624    return SYS_ERR_OK;
625}
626
627typedef errval_t (*mapping_handler_t)(struct capability *dest_cap,
628                                      cslot_t dest_slot,
629                                      struct capability *src_cap,
630                                      uintptr_t flags, uintptr_t offset,
631                                      size_t pte_count,
632                                      struct cte *mapping_cte);
633
634/// Dispatcher table for the type of mapping to create
635static mapping_handler_t handler[ObjType_Num] = {
636        [ObjType_VNode_AARCH64_l0]   = caps_map_l0,
637        [ObjType_VNode_AARCH64_l1]   = caps_map_l1,
638        [ObjType_VNode_AARCH64_l2]   = caps_map_l2,
639        [ObjType_VNode_AARCH64_l3]   = caps_map_l3,
640};
641
642
643/// Create page mappings
644errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
645                            struct cte *src_cte, uintptr_t flags,
646                            uintptr_t offset, uintptr_t pte_count,
647                            struct cte *mapping_cte)
648{
649    struct capability *src_cap  = &src_cte->cap;
650    struct capability *dest_cap = &dest_vnode_cte->cap;
651    assert(mapping_cte->cap.type == ObjType_Null);
652    mapping_handler_t handler_func = handler[dest_cap->type];
653
654    assert(handler_func != NULL);
655
656    errval_t err;
657
658    err = handler_func(dest_cap, dest_slot, src_cap, flags, offset, pte_count,
659            mapping_cte);
660
661    if (err_is_fail(err)) {
662        assert(mapping_cte->cap.type == ObjType_Null);
663        debug(SUBSYS_PAGING,
664                "caps_copy_to_vnode: handler func returned %"PRIuERRV"\n", err);
665        return err;
666    }
667
668    /* insert mapping cap into mdb */
669    err = mdb_insert(mapping_cte);
670    if (err_is_fail(err)) {
671        printk(LOG_ERR, "%s: mdb_insert: %"PRIuERRV"\n", __FUNCTION__, err);
672    }
673
674    TRACE_CAP_MSG("created", mapping_cte);
675
676    return err;
677}
678
679size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
680{
681    size_t unmapped_pages = 0;
682    union armv8_ttable_entry *ptentry = (union armv8_ttable_entry *)pt + slot;
683    for (int i = 0; i < num_pages; i++) {
684        ptentry++->raw = 0;
685        unmapped_pages++;
686    }
687    return unmapped_pages;
688}
689
690static inline void read_pt_entry(struct capability *pgtable, size_t slot, genpaddr_t *paddr)
691{
692    assert(type_is_vnode(pgtable->type));
693    assert(paddr);
694
695    genpaddr_t gp = get_address(pgtable);
696    lpaddr_t lp = gen_phys_to_local_phys(gp);
697    lvaddr_t lv = local_phys_to_mem(lp);
698
699    switch (pgtable->type) {
700        case ObjType_VNode_AARCH64_l0:
701        {
702            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
703            *paddr = (genpaddr_t) (e->d.base) << 12;
704            return;
705        }
706        case ObjType_VNode_AARCH64_l1:
707        {
708            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
709            *paddr = (genpaddr_t) (e->d.base) << 12;
710            return;
711        }
712        case ObjType_VNode_AARCH64_l2:
713        {
714            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
715            *paddr = (genpaddr_t) (e->d.base) << 12;
716            return;
717        }
718        case ObjType_VNode_AARCH64_l3:
719        {
720            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
721            *paddr = (genpaddr_t) (e->page.base) << 12;
722            return;
723        }
724        default:
725            assert(!"Should not get here");
726    }
727}
728
729errval_t paging_modify_flags(struct capability *mapping, uintptr_t offset,
730                             uintptr_t pages, uintptr_t kpi_paging_flags)
731{
732    assert(type_is_mapping(mapping->type));
733    struct Frame_Mapping *info = &mapping->u.frame_mapping;
734
735    // check flags
736    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
737
738    /* Calculate location of page table entries we need to modify */
739    lvaddr_t base = local_phys_to_mem(get_address(&info->ptable->cap)) +
740        (info->entry + offset) * sizeof(union armv8_ttable_entry *);
741
742    for (int i = 0; i < pages; i++) {
743        union armv8_ttable_entry *entry =
744            (union armv8_ttable_entry *)base + i;
745        paging_set_flags(entry, kpi_paging_flags);
746    }
747
748    return paging_tlb_flush_range(cte_for_cap(mapping), 0, pages);
749}
750
751errval_t paging_copy_remap(struct cte *dest_vnode_cte, cslot_t dest_slot,
752                           struct cte *src_cte, uintptr_t flags,
753                           uintptr_t offset, uintptr_t pte_count,
754                           struct cte *mapping_cte)
755{
756    printk(LOG_ERR, "%s called on ARMv8: NYI!\n", __FUNCTION__);
757    return SYS_ERR_NOT_IMPLEMENTED;
758}
759
760bool paging_is_region_valid(lvaddr_t buffer, size_t size, uint8_t type)
761{
762    // XXX: ARMv8 does not seem to have ro mappings? -SG,2018-10-25.
763    // this currently ignores type
764    lvaddr_t root_pt = local_phys_to_mem(dcb_current->vspace);
765
766    lvaddr_t end = buffer + size;
767
768    uint16_t first_l0idx = VMSAv8_64_L0_BASE(buffer);
769    uint16_t last_l0idx = VMSAv8_64_L0_BASE(end);
770
771    uint16_t first_l1idx, first_l2idx, first_l3idx;
772    uint16_t last_l1idx, last_l2idx, last_l3idx;
773
774
775    union armv8_ttable_entry *pte;
776
777    for (uint16_t l0idx = first_l0idx; l0idx <= last_l0idx; l0idx++) {
778        pte = (union armv8_ttable_entry *)root_pt + l0idx;
779        if (!pte->d.valid) { return false; }
780        // calculate which part of pdpt to check
781        first_l1idx = l0idx == first_l0idx ? VMSAv8_64_L1_BASE(buffer) : 0;
782        last_l1idx  = l0idx == last_l0idx  ? VMSAv8_64_L1_BASE(end)  : PTABLE_ENTRIES;
783        // read pdpt base
784        lvaddr_t pdpt = local_phys_to_mem((genpaddr_t)pte->d.base << BASE_PAGE_BITS);
785        for (uint16_t l1idx = first_l1idx; l1idx <= last_l1idx; l1idx++) {
786            pte = (union armv8_ttable_entry *)pdpt + l1idx;
787            if (!pte->d.valid) { return false; }
788            // calculate which part of pdpt to check
789            first_l2idx = l1idx == first_l1idx ? VMSAv8_64_L2_BASE(buffer) : 0;
790            last_l2idx  = l1idx == last_l1idx  ? VMSAv8_64_L2_BASE(end)  : PTABLE_ENTRIES;
791            // read pdpt base
792            lvaddr_t pdir = local_phys_to_mem((genpaddr_t)pte->d.base << BASE_PAGE_BITS);
793            for (uint16_t l2idx = first_l2idx; l2idx <= last_l2idx; l2idx++) {
794                pte = (union armv8_ttable_entry *)pdir + l2idx;
795                if (!pte->d.valid) { return false; }
796                // calculate which part of pdpt to check
797                first_l3idx = l2idx == first_l2idx ? VMSAv8_64_L3_BASE(buffer) : 0;
798                last_l3idx  = l2idx == last_l2idx  ? VMSAv8_64_L3_BASE(end)  : PTABLE_ENTRIES;
799                // read pdpt base
800                lvaddr_t pt = local_phys_to_mem((genpaddr_t)pte->d.base << BASE_PAGE_BITS);
801                for (uint16_t l3idx = first_l3idx; l3idx < last_l3idx; l3idx++) {
802                    pte = (union armv8_ttable_entry *)pt + l3idx;
803                    if (!pte->page.valid) { return false; }
804                }
805            }
806        }
807    }
808    // if we never bailed early, the access is fine.
809    return true;
810}
811
812void paging_dump_tables(struct dcb *dispatcher)
813{
814    if (!local_phys_is_valid(dispatcher->vspace)) {
815        printk(LOG_ERR, "dispatcher->vspace = 0x%"PRIxLPADDR": too high!\n" ,
816               dispatcher->vspace);
817        return;
818    }
819    lvaddr_t l0 = local_phys_to_mem(dispatcher->vspace);
820
821    for (int l0_index = 0; l0_index < VMSAv8_64_PTABLE_NUM_ENTRIES; l0_index++) {
822        // get level0 table
823        union armv8_ttable_entry *l0_e = (union armv8_ttable_entry *) l0 + l0_index;
824        if (!l0_e->raw) {
825            continue;
826        }
827        genpaddr_t l1_gp = (genpaddr_t)(l0_e->d.base) << BASE_PAGE_BITS;
828        lvaddr_t l1 = local_phys_to_mem(gen_phys_to_local_phys(l1_gp));
829        printf("l0 %d -> %p\n", l0_index, l1);
830
831        for (int l1_index = 0; l1_index < VMSAv8_64_PTABLE_NUM_ENTRIES; l1_index++) {
832            // get level1 table
833            union armv8_ttable_entry *l1_e = (union armv8_ttable_entry *)l1 + l1_index;
834            if (!l1_e->raw) { continue; }
835            genpaddr_t l2_gp = (genpaddr_t)(l1_e->d.base) << BASE_PAGE_BITS;
836            lvaddr_t l2 = local_phys_to_mem(gen_phys_to_local_phys(l2_gp));
837            printf("  l1 %d -> %p\n", l1_index, l2);
838
839            for (int l2_index = 0; l2_index < VMSAv8_64_PTABLE_NUM_ENTRIES; l2_index++) {
840                // get level2 table
841                union armv8_ttable_entry *l2_e = (union armv8_ttable_entry *)l2 + l2_index;
842                if (!l2_e->raw) { continue; }
843                genpaddr_t l3_gp = (genpaddr_t)(l2_e->d.base) << BASE_PAGE_BITS;
844                lvaddr_t l3 = local_phys_to_mem(gen_phys_to_local_phys(l3_gp));
845                printf("    l2 %d -> %p\n", l2_index, l3);
846
847                for (int entry = 0; entry < VMSAv8_64_PTABLE_NUM_ENTRIES; entry++) {
848                    union armv8_ttable_entry *e =
849                        (union armv8_ttable_entry *)l3 + entry;
850                    genpaddr_t paddr = (genpaddr_t)(e->page.base) << BASE_PAGE_BITS;
851                    if (!paddr) {
852                        continue;
853                    }
854                    printf("%d.%d.%d.%d: 0x%"PRIxGENPADDR" \n", l0_index, l1_index, l2_index, entry, paddr);
855                }
856            }
857        }
858    }
859}
860
861/* XXX - rewrite this. */
862void paging_context_switch(lpaddr_t ttbr)
863{
864    assert(ttbr < MEMORY_OFFSET);
865    //assert((ttbr & 0x3fff) == 0);
866
867    lpaddr_t old_ttbr = armv8_TTBR0_EL1_rd(NULL);
868    if (ttbr != old_ttbr)
869    {
870        armv8_TTBR0_EL1_wr(NULL, ttbr);
871        sysreg_invalidate_tlb();
872        //this isn't necessary on gem5, since gem5 doesn't implement the cache
873        //maintenance instructions, but ensures coherency by itself
874        //sysreg_invalidate_i_and_d_caches();
875    }
876}
877