1/*
2 * Copyright (c) 2009-2012,2015,2016 ETH Zurich.
3 * Copyright (c) 2015, Hewlett Packard Enterprise Development LP.
4 * All rights reserved.
5 *
6 * This file is distributed under the terms in the attached LICENSE file.
7 * If you do not find this file, copies can be found by writing to:
8 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
9 */
10
11#include <kernel.h>
12#include <dispatch.h>
13#include <sysreg.h>
14#include <paging_kernel_arch.h>
15#include <string.h>
16#include <exceptions.h>
17#include <arm_hal.h>
18#include <cap_predicates.h>
19#include <dispatch.h>
20#include <mdb/mdb_tree.h>
21#include <dev/armv8_dev.h>
22
23
24// ------------------------------------------------------------------------
25// Utility declarations
26
27inline static uintptr_t paging_round_down(uintptr_t address, uintptr_t size)
28{
29    return address & ~(size - 1);
30}
31
32inline static uintptr_t paging_round_up(uintptr_t address, uintptr_t size)
33{
34    return (address + size - 1) & ~(size - 1);
35}
36
37inline static int aligned(uintptr_t address, uintptr_t bytes)
38{
39    return (address & (bytes - 1)) == 0;
40}
41
42static void
43paging_write_l0_entry(union armv8_ttable_entry *l0_table, lvaddr_t va, union armv8_ttable_entry l0)
44{
45    assert(l0_table);
46//    union armv8_l0_entry *l0_table;
47//    if (ttbase == 0) {
48//        if(va < MEMORY_OFFSET)
49//            ttbase = sysreg_read_ttbr0_el1() + MEMORY_OFFSET;
50//        else
51//            ttbase = sysreg_read_ttbr1_el1() + MEMORY_OFFSET;
52//    }
53//    l0_table = (union armv8_l0_entry *) ttbase;
54
55    l0_table[VMSAv8_64_L0_BASE(va)] = l0;
56}
57
58static void
59paging_write_l1_entry(union armv8_ttable_entry *l1_table, lvaddr_t va, union armv8_ttable_entry l1)
60{
61    assert(l1_table);
62//    union armv8_l1_entry *l1_table;
63//    if (ttbase == 0) {
64//        if(va < MEMORY_OFFSET)
65//            ttbase = sysreg_read_ttbr0_el1() + MEMORY_OFFSET;
66//        else
67//            ttbase = sysreg_read_ttbr1_el1() + MEMORY_OFFSET;
68//    }
69//    l1_table = (union armv8_l1_entry *) ttbase;
70
71    l1_table[VMSAv8_64_L1_BASE(va)] = l1;
72}
73
74static void
75paging_write_l2_entry(union armv8_ttable_entry *l2_table, lvaddr_t va, union armv8_ttable_entry l2)
76{
77    assert(l2_table);
78    l2_table[VMSAv8_64_L2_BASE(va)] = l2;
79}
80
81static void
82paging_write_l3_entry(union armv8_ttable_entry *l3_table, lvaddr_t va, union armv8_ttable_entry l3)
83{
84    assert(l3_table);
85    l3_table[VMSAv8_64_L3_BASE(va)] = l3;
86}
87
88// ------------------------------------------------------------------------
89// Exported functions
90
91
92/**
93 * \brief Return whether we have enabled the MMU. Useful for
94 * initialization assertions
95 */
96bool paging_mmu_enabled(void)
97{
98    return true;
99}
100
101void paging_map_kernel_section(union armv8_ttable_entry *ttbase, lvaddr_t va, lpaddr_t pa)
102{
103    union armv8_ttable_entry l2;
104
105    l2.raw = 0;
106    l2.block_l2.valid = 1;
107    l2.block_l2.mb0 = 0;
108    l2.block_l2.af = 1;
109    l2.block_l2.base = pa >> 21u;
110    paging_write_l2_entry(ttbase, va, l2);
111}
112
113void paging_map_kernel_l1_block(union armv8_ttable_entry *ttbase, lvaddr_t va, lpaddr_t pa)
114{
115    union armv8_ttable_entry l1;
116
117    l1.raw = 0;
118    l1.block_l1.valid = 1;
119    l1.block_l1.mb0 = 0;
120    l1.block_l1.af = 1;
121    l1.block_l1.base = pa >> 30u;
122    paging_write_l1_entry(ttbase, va, l1);
123}
124
125void paging_map_memory(union armv8_ttable_entry *ttbase, lpaddr_t paddr, size_t bytes)
126{
127    lpaddr_t pend  = paging_round_up(paddr + bytes, VMSAv8_64_L1_BLOCK_SIZE);
128    while (paddr < pend) {
129        paging_map_kernel_l1_block(ttbase,
130                (paddr + MEMORY_OFFSET) - KERNEL_OFFSET,
131                paddr);
132        paddr += VMSAv8_64_L1_BLOCK_SIZE;
133    }
134}
135
136//static lvaddr_t dev_alloc;
137
138lvaddr_t paging_map_device(lpaddr_t device_base, size_t device_bytes)
139{
140    // TODO: Implement
141    if (device_base < KERNEL_OFFSET) {
142        return device_base + KERNEL_OFFSET;
143    } else {
144        return device_base;
145    }
146}
147
148void paging_map_table_l0(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa)
149{
150    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
151    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
152
153    union armv8_ttable_entry e;
154
155    e.raw = 0;
156    e.d.valid = 1;
157    e.d.mb1 = 1;
158    e.d.base = (pa >> BASE_PAGE_BITS);
159
160    paging_write_l0_entry(table_base, va, e);
161}
162
163void paging_map_table_l1(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa)
164{
165    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
166    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
167
168    union armv8_ttable_entry e;
169
170    e.raw = 0;
171    e.d.valid = 1;
172    e.d.mb1 = 1;
173    e.d.base = (pa >> BASE_PAGE_BITS);
174
175    paging_write_l1_entry(table_base, va, e);
176}
177
178void paging_map_block_l1(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa, uintptr_t flags)
179{
180    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
181    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
182
183    union armv8_ttable_entry e;
184
185    e.raw = flags;
186    e.block_l1.valid = 1;
187    e.block_l1.mb0 = 0;
188    e.block_l1.af = 1;
189    e.block_l1.base = (pa >> BASE_PAGE_BITS);
190
191    paging_write_l1_entry(table_base, va, e);
192}
193
194
195void paging_map_table_l2(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa)
196{
197    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
198    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
199    assert(pa);
200
201    union armv8_ttable_entry e;
202
203    e.raw = 0;
204    e.d.valid = 1;
205    e.d.mb1 = 1;
206    e.d.base = (pa >> BASE_PAGE_BITS);
207
208//    printf("map table l2@%p->l3@%p\n", table_base + VMSAv8_64_L2_BASE(va), pa);
209
210    paging_write_l2_entry(table_base, va, e);
211}
212
213void paging_map_block_l2(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa, uintptr_t flags)
214{
215    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
216    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
217
218    union armv8_ttable_entry e;
219
220    e.raw = 0;
221    e.block_l2.valid = 1;
222    e.block_l2.mb0 = 0;
223    e.block_l2.af = 1;
224    e.block_l2.base = (pa >> BASE_PAGE_BITS);
225
226    paging_write_l2_entry(table_base, va, e);
227}
228
229
230void paging_map_page_l3(union armv8_ttable_entry *table_base, lvaddr_t va, lpaddr_t pa, uintptr_t flags)
231{
232    assert(aligned((uintptr_t)table_base, VMSAv8_64_PTABLE_SIZE));
233    assert(aligned(pa, VMSAv8_64_PTABLE_SIZE));
234
235    assert(0 == (flags & 0xfffff000));
236    assert(0 == (flags & 0x3));
237    assert(0 == (pa & 0xfff));
238
239    union armv8_ttable_entry e;
240    e.raw = flags;
241
242    e.page.valid = 1;
243    e.page.mb1 = 1;
244    e.page.af= 1;
245    e.page.base = (pa >> BASE_PAGE_BITS);
246
247    paging_write_l3_entry(table_base, va, e);
248}
249
250void paging_set_l3_entry(union armv8_ttable_entry *l3_entry, lpaddr_t pa, uintptr_t flags)
251{
252    assert(0 == (flags & 0xfffff000));
253    assert(0 == (flags & 0x3));
254    assert(0 == (pa & 0xfff));
255
256
257    union armv8_ttable_entry e;
258    e.raw = flags;
259
260    e.page.valid = 1;
261    e.page.mb1 = 1;
262    e.page.af= 1;
263    e.page.base = (pa >> BASE_PAGE_BITS);
264
265//    printf("Setting l3 entry@%p -> (%p) %p raw: %p\n", l3_entry, e.page.base, e.page.base << BASE_PAGE_BITS, e.raw);
266
267    *l3_entry = e;
268}
269
270
271//void paging_set_l2_entry(union armv8_ttable_entry *l2e, lpaddr_t addr, uintptr_t flags)
272//{
273//    assert(0 == (flags & 0xfffff000));
274//    assert(0 == (flags & 0x3));
275//    assert(0 == (addr & 0xfff));
276//
277//    union armv8_ttable_entry e;
278//    e.raw = flags;
279//
280//    e.d.valid = 1;
281//    e.d.mb1 = 1;
282//    e.d.base = (addr >> 12);
283//
284//    *l2e = e;
285//}
286//
287//void paging_set_l3_entry(union armv8_ttable_entry *l3e, lpaddr_t addr, uintptr_t flags)
288//{
289//    assert(0 == (flags & 0xfffff000));
290//    assert(0 == (flags & 0x3));
291//    assert(0 == (addr & 0xfff));
292//
293//    union armv8_ttable_entry e;
294//    e.raw = flags;
295//
296//    e.page.valid = 1;
297//    e.page.mb1 = 1;
298//    e.page.af         = 1;
299//    e.page.base = (addr >> 12);
300//
301//    *l3e = e;
302//}
303
304
305static void
306paging_set_flags(union armv8_ttable_entry *entry, uintptr_t kpi_paging_flags)
307{
308    entry->page.sh = 3;
309    entry->page.ap = 0;
310
311    if (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE) {
312        entry->page.attrindex = 1;
313    } else {
314        entry->page.attrindex = 0;
315    }
316
317    if(kpi_paging_flags & KPI_PAGING_FLAGS_WRITE)
318        entry->page.ap = 1;
319    else if (kpi_paging_flags & KPI_PAGING_FLAGS_READ)
320        entry->page.ap = 3;
321    else
322        panic("oops: wrong page flags");
323
324    entry->page.af = 1;
325}
326
327static errval_t
328caps_map_l0(struct capability* dest,
329            cslot_t            slot,
330            struct capability* src,
331            uintptr_t          kpi_paging_flags,
332            uintptr_t          offset,
333            uintptr_t          pte_count,
334            struct cte*        mapping_cte)
335{
336    //
337    // Note:
338    //
339    // We have chicken-and-egg problem in initializing resources so
340    // instead of treating an L3 table it's actual 1K size, we treat
341    // it as being 4K. As a result when we map an "L3" table we actually
342    // map a page of memory as if it is 4 consecutive L3 tables.
343    //
344    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
345    //
346
347    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
348        printf("slot = %"PRIuCSLOT"\n",slot);
349        panic("oops: slot id >= %d", VMSAv8_64_PTABLE_NUM_ENTRIES);
350        return SYS_ERR_VNODE_SLOT_INVALID;
351    }
352
353    if (pte_count != 1) {
354        printf("pte_count = %zu\n",(size_t)pte_count);
355        panic("oops: pte_count");
356        return SYS_ERR_VM_MAP_SIZE;
357    }
358
359    if (src->type != ObjType_VNode_AARCH64_l1) {
360        char buf[128];
361        sprint_cap(buf, 128, src);
362        printf("src: %s\n", buf);
363        panic("oops: l0 wrong src type");
364        return SYS_ERR_WRONG_MAPPING;
365    }
366
367//    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
368//        printf("slot = %"PRIuCSLOT", max=%d MEMORY_OFFSET=%p\n", slot, VMSAv8_64_L0_BASE(MEMORY_OFFSET),MEMORY_OFFSET);
369//        panic("oops: l0 slot id");
370//        return SYS_ERR_VNODE_SLOT_RESERVED;
371//    }
372//
373    // Destination
374    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
375    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
376
377    union armv8_ttable_entry* entry = (union armv8_ttable_entry*) dest_lvaddr + slot;
378
379    // Source
380    genpaddr_t src_gpaddr = get_address(src);
381    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
382
383    //union armv8_l2_entry* entry1 = (union armv8_l2_entry*)local_phys_to_mem(src_gpaddr);
384
385
386    assert(offset == 0);
387    assert(aligned(src_lpaddr, 1u << 12));
388    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 32));
389
390    if (entry->d.valid) {
391        // cleanup mapping info
392        debug(SUBSYS_PAGING, "slot in use\n");
393        return SYS_ERR_VNODE_SLOT_INUSE;
394    }
395
396    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
397
398    entry->raw = 0;
399    entry->d.valid = 1;
400    entry->d.mb1 = 1;
401    entry->d.base = (src_lpaddr) >> 12;
402    debug(SUBSYS_PAGING, "L0 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
403              slot, entry, entry->raw);
404
405    sysreg_invalidate_tlb();
406
407    return SYS_ERR_OK;
408}
409static errval_t
410caps_map_l1(struct capability* dest,
411            cslot_t            slot,
412            struct capability* src,
413            uintptr_t          kpi_paging_flags,
414            uintptr_t          offset,
415            uintptr_t          pte_count,
416            struct cte*        mapping_cte)
417{
418    //
419    // Note:
420    //
421    // We have chicken-and-egg problem in initializing resources so
422    // instead of treating an L3 table it's actual 1K size, we treat
423    // it as being 4K. As a result when we map an "L3" table we actually
424    // map a page of memory as if it is 4 consecutive L3 tables.
425    //
426    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
427    //
428
429    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
430        printf("slot = %"PRIuCSLOT"\n",slot);
431        panic("oops: slot id >= %d", VMSAv8_64_PTABLE_NUM_ENTRIES);
432        return SYS_ERR_VNODE_SLOT_INVALID;
433    }
434
435    if (pte_count != 1) {
436        printf("pte_count = %zu\n",(size_t)pte_count);
437        panic("oops: pte_count");
438        return SYS_ERR_VM_MAP_SIZE;
439    }
440
441    if (src->type != ObjType_VNode_AARCH64_l2) {
442        panic("oops: l1 wrong src type");
443        return SYS_ERR_WRONG_MAPPING;
444    }
445
446    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
447        printf("slot = %"PRIuCSLOT"\n",slot);
448        panic("oops: l1 slot id");
449        return SYS_ERR_VNODE_SLOT_RESERVED;
450    }
451
452    // Destination
453    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
454    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
455
456    union armv8_ttable_entry* entry = (union armv8_ttable_entry*) dest_lvaddr + slot;
457
458    // Source
459    genpaddr_t src_gpaddr = get_address(src);
460    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
461
462	//union armv8_l2_entry* entry1 = (union armv8_l2_entry*)local_phys_to_mem(src_gpaddr);
463
464
465    assert(offset == 0);
466    assert(aligned(src_lpaddr, 1u << 12));
467    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 32));
468
469    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
470
471    entry->raw = 0;
472    entry->d.valid = 1;
473    entry->d.mb1 = 1;
474    entry->d.base = (src_lpaddr) >> 12;
475    debug(SUBSYS_PAGING, "L1 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
476              slot, entry, entry->raw);
477
478    sysreg_invalidate_tlb();
479
480    return SYS_ERR_OK;
481}
482
483static errval_t
484caps_map_l2(struct capability* dest,
485            cslot_t            slot,
486            struct capability* src,
487            uintptr_t          kpi_paging_flags,
488            uintptr_t          offset,
489            uintptr_t          pte_count,
490            struct cte*        mapping_cte)
491{
492    //
493    // Note:
494    //
495    // We have chicken-and-egg problem in initializing resources so
496    // instead of treating an L3 table it's actual 1K size, we treat
497    // it as being 4K. As a result when we map an "L3" table we actually
498    // map a page of memory as if it is 4 consecutive L3 tables.
499    //
500    // See lib/barrelfish/arch/arm/pmap_arch.c for more discussion.
501    //
502    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
503        printf("slot = %"PRIuCSLOT"\n", slot);
504        panic("oops: slot id >= 512");
505        return SYS_ERR_VNODE_SLOT_INVALID;
506    }
507
508    if (pte_count != 1) {
509        printf("pte_count = %zu\n",(size_t) pte_count);
510        panic("oops: pte_count");
511        return SYS_ERR_VM_MAP_SIZE;
512    }
513
514    if (src->type != ObjType_VNode_AARCH64_l3) {
515        panic("oops: l2 wrong src type");
516        return SYS_ERR_WRONG_MAPPING;
517    }
518
519    if (slot > VMSAv8_64_PTABLE_NUM_ENTRIES) {
520        printf("slot = %"PRIuCSLOT"\n",slot);
521        panic("oops: l2 slot id");
522        return SYS_ERR_VNODE_SLOT_RESERVED;
523    }
524
525    // Destination
526    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
527    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
528
529    union armv8_ttable_entry* entry = (union armv8_ttable_entry*) dest_lvaddr + slot;
530
531    // Source
532    genpaddr_t src_gpaddr = get_address(src);
533    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr);
534
535    assert(offset == 0);
536    assert(aligned(src_lpaddr, 1u << 12));
537    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 4096));
538
539    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
540
541    entry->raw = 0;
542    entry->d.valid = 1;
543    entry->d.mb1 = 1;
544    entry->d.base = (src_lpaddr) >> 12;
545    debug(SUBSYS_PAGING, "L2 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
546              slot, entry, entry->raw);
547
548    sysreg_invalidate_tlb();
549
550    return SYS_ERR_OK;
551}
552
553
554static errval_t
555caps_map_l3(struct capability* dest,
556            cslot_t            slot,
557            struct capability* src,
558            uintptr_t          kpi_paging_flags,
559            uintptr_t          offset,
560            uintptr_t          pte_count,
561            struct cte*        mapping_cte)
562{
563    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
564
565    // ARM L3 has 256 entries, but we treat a 4K page as a consecutive
566    // region of L3 with a single index. 4K == 4 * 1K
567    if (slot >= VMSAv8_64_PTABLE_NUM_ENTRIES) {
568        panic("oops: slot >= 512");
569        return SYS_ERR_VNODE_SLOT_INVALID;
570    }
571
572    if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
573        panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
574        return SYS_ERR_WRONG_MAPPING;
575    }
576
577    // check offset within frame
578    if ((offset + BASE_PAGE_SIZE > get_size(src)) ||
579        ((offset % BASE_PAGE_SIZE) != 0)) {
580        panic("oops: frame offset invalid");
581        return SYS_ERR_FRAME_OFFSET_INVALID;
582    }
583
584    // check mapping does not overlap leaf page table
585    if (slot + pte_count > VMSAv8_64_PTABLE_NUM_ENTRIES ) {
586        return SYS_ERR_VM_MAP_SIZE;
587    }
588
589    // Destination
590    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
591    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
592
593    union armv8_ttable_entry *entry = (union armv8_ttable_entry *)dest_lvaddr + slot;
594    if (entry->page.valid) {
595        panic("Remapping valid page.");
596    }
597
598    lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
599    if ((src_lpaddr & (BASE_PAGE_SIZE - 1))) {
600        panic("Invalid target");
601    }
602
603    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
604
605    for (int i = 0; i < pte_count; i++) {
606        entry->raw = 0;
607
608        entry->page.valid = 1;
609        entry->page.mb1 = 1;
610        paging_set_flags(entry, kpi_paging_flags);
611        entry->page.base = (src_lpaddr + i * BASE_PAGE_SIZE) >> 12;
612
613        debug(SUBSYS_PAGING, "L3 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx64"\n",
614               dest_lvaddr, slot, entry, entry->raw);
615
616		entry++;
617
618    }
619
620    // Flush TLB if remapping.
621    sysreg_invalidate_tlb();
622
623    return SYS_ERR_OK;
624}
625
626typedef errval_t (*mapping_handler_t)(struct capability *dest_cap,
627                                      cslot_t dest_slot,
628                                      struct capability *src_cap,
629                                      uintptr_t flags, uintptr_t offset,
630                                      size_t pte_count,
631                                      struct cte *mapping_cte);
632
633/// Dispatcher table for the type of mapping to create
634static mapping_handler_t handler[ObjType_Num] = {
635        [ObjType_VNode_AARCH64_l0]   = caps_map_l0,
636        [ObjType_VNode_AARCH64_l1]   = caps_map_l1,
637        [ObjType_VNode_AARCH64_l2]   = caps_map_l2,
638        [ObjType_VNode_AARCH64_l3]   = caps_map_l3,
639};
640
641
642/// Create page mappings
643errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
644                            struct cte *src_cte, uintptr_t flags,
645                            uintptr_t offset, uintptr_t pte_count,
646                            struct cte *mapping_cte)
647{
648    struct capability *src_cap  = &src_cte->cap;
649    struct capability *dest_cap = &dest_vnode_cte->cap;
650    assert(mapping_cte->cap.type == ObjType_Null);
651    mapping_handler_t handler_func = handler[dest_cap->type];
652
653    assert(handler_func != NULL);
654
655    errval_t err;
656
657    err = handler_func(dest_cap, dest_slot, src_cap, flags, offset, pte_count,
658            mapping_cte);
659
660    if (err_is_fail(err)) {
661        assert(mapping_cte->cap.type == ObjType_Null);
662        debug(SUBSYS_PAGING,
663                "caps_copy_to_vnode: handler func returned %"PRIuERRV"\n", err);
664        return err;
665    }
666
667    /* insert mapping cap into mdb */
668    err = mdb_insert(mapping_cte);
669    if (err_is_fail(err)) {
670        printk(LOG_ERR, "%s: mdb_insert: %"PRIuERRV"\n", __FUNCTION__, err);
671    }
672
673    TRACE_CAP_MSG("created", mapping_cte);
674
675    return err;
676}
677
678size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
679{
680    size_t unmapped_pages = 0;
681    union armv8_ttable_entry *ptentry = (union armv8_ttable_entry *)pt + slot;
682    for (int i = 0; i < num_pages; i++) {
683        ptentry++->raw = 0;
684        unmapped_pages++;
685    }
686    return unmapped_pages;
687}
688
689static inline void read_pt_entry(struct capability *pgtable, size_t slot, genpaddr_t *paddr)
690{
691    assert(type_is_vnode(pgtable->type));
692    assert(paddr);
693
694    genpaddr_t gp = get_address(pgtable);
695    lpaddr_t lp = gen_phys_to_local_phys(gp);
696    lvaddr_t lv = local_phys_to_mem(lp);
697
698    switch (pgtable->type) {
699        case ObjType_VNode_AARCH64_l0:
700        {
701            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
702            *paddr = (genpaddr_t) (e->d.base) << 12;
703            return;
704        }
705        case ObjType_VNode_AARCH64_l1:
706        {
707            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
708            *paddr = (genpaddr_t) (e->d.base) << 12;
709            return;
710        }
711        case ObjType_VNode_AARCH64_l2:
712        {
713            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
714            *paddr = (genpaddr_t) (e->d.base) << 12;
715            return;
716        }
717        case ObjType_VNode_AARCH64_l3:
718        {
719            union armv8_ttable_entry *e = (union armv8_ttable_entry *) lv;
720            *paddr = (genpaddr_t) (e->page.base) << 12;
721            return;
722        }
723        default:
724            assert(!"Should not get here");
725    }
726}
727
728errval_t paging_modify_flags(struct capability *mapping, uintptr_t offset,
729                             uintptr_t pages, uintptr_t kpi_paging_flags)
730{
731    assert(type_is_mapping(mapping->type));
732    struct Frame_Mapping *info = &mapping->u.frame_mapping;
733
734    // check flags
735    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
736
737    /* Calculate location of page table entries we need to modify */
738    lvaddr_t base = local_phys_to_mem(get_address(&info->ptable->cap)) +
739        (info->entry + offset) * sizeof(union armv8_ttable_entry *);
740
741    for (int i = 0; i < pages; i++) {
742        union armv8_ttable_entry *entry =
743            (union armv8_ttable_entry *)base + i;
744        paging_set_flags(entry, kpi_paging_flags);
745    }
746
747    return paging_tlb_flush_range(cte_for_cap(mapping), 0, pages);
748}
749
750void paging_dump_tables(struct dcb *dispatcher)
751{
752    if (!local_phys_is_valid(dispatcher->vspace)) {
753        printk(LOG_ERR, "dispatcher->vspace = 0x%"PRIxLPADDR": too high!\n" ,
754               dispatcher->vspace);
755        return;
756    }
757    lvaddr_t l0 = local_phys_to_mem(dispatcher->vspace);
758
759    for (int l0_index = 0; l0_index < VMSAv8_64_PTABLE_NUM_ENTRIES; l0_index++) {
760        // get level0 table
761        union armv8_ttable_entry *l0_e = (union armv8_ttable_entry *) l0 + l0_index;
762        if (!l0_e->raw) {
763            continue;
764        }
765        genpaddr_t l1_gp = (genpaddr_t)(l0_e->d.base) << BASE_PAGE_BITS;
766        lvaddr_t l1 = local_phys_to_mem(gen_phys_to_local_phys(l1_gp));
767        printf("l0 %d -> %p\n", l0_index, l1);
768
769        for (int l1_index = 0; l1_index < VMSAv8_64_PTABLE_NUM_ENTRIES; l1_index++) {
770            // get level1 table
771            union armv8_ttable_entry *l1_e = (union armv8_ttable_entry *)l1 + l1_index;
772            if (!l1_e->raw) { continue; }
773            genpaddr_t l2_gp = (genpaddr_t)(l1_e->d.base) << BASE_PAGE_BITS;
774            lvaddr_t l2 = local_phys_to_mem(gen_phys_to_local_phys(l2_gp));
775            printf("  l1 %d -> %p\n", l1_index, l2);
776
777            for (int l2_index = 0; l2_index < VMSAv8_64_PTABLE_NUM_ENTRIES; l2_index++) {
778                // get level2 table
779                union armv8_ttable_entry *l2_e = (union armv8_ttable_entry *)l2 + l2_index;
780                if (!l2_e->raw) { continue; }
781                genpaddr_t l3_gp = (genpaddr_t)(l2_e->d.base) << BASE_PAGE_BITS;
782                lvaddr_t l3 = local_phys_to_mem(gen_phys_to_local_phys(l3_gp));
783                printf("    l2 %d -> %p\n", l2_index, l3);
784
785                for (int entry = 0; entry < VMSAv8_64_PTABLE_NUM_ENTRIES; entry++) {
786                    union armv8_ttable_entry *e =
787                        (union armv8_ttable_entry *)l3 + entry;
788                    genpaddr_t paddr = (genpaddr_t)(e->page.base) << BASE_PAGE_BITS;
789                    if (!paddr) {
790                        continue;
791                    }
792                    printf("%d.%d.%d.%d: 0x%"PRIxGENPADDR" \n", l0_index, l1_index, l2_index, entry, paddr);
793                }
794            }
795        }
796    }
797}
798
799/* XXX - rewrite this. */
800void paging_context_switch(lpaddr_t ttbr)
801{
802    assert(ttbr < MEMORY_OFFSET);
803    //assert((ttbr & 0x3fff) == 0);
804
805    lpaddr_t old_ttbr = armv8_TTBR0_EL1_rd(NULL);
806    if (ttbr != old_ttbr)
807    {
808        armv8_TTBR0_EL1_wr(NULL, ttbr);
809        sysreg_invalidate_tlb();
810        //this isn't necessary on gem5, since gem5 doesn't implement the cache
811        //maintenance instructions, but ensures coherency by itself
812        //sysreg_invalidate_i_and_d_caches();
813    }
814}
815