1/*
2 * Copyright (c) 2009-2013,2016 ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
8 */
9
10#include <kernel.h>
11#include <dispatch.h>
12#include <cache.h>
13#include <cp15.h>
14#include <paging_kernel_arch.h>
15#include <string.h>
16#include <exceptions.h>
17#include <platform.h>
18#include <cap_predicates.h>
19#include <dispatch.h>
20#include <mdb/mdb_tree.h>
21#include <barrelfish_kpi/paging_arch.h>
22
23#define MSG(format, ...) printk( LOG_NOTE, "ARMv7-A: "format, ## __VA_ARGS__ )
24
25inline static uintptr_t paging_round_down(uintptr_t address, uintptr_t size)
26{
27    return address & ~(size - 1);
28}
29
30inline static uintptr_t paging_round_up(uintptr_t address, uintptr_t size)
31{
32    return (address + size - 1) & ~(size - 1);
33}
34
35inline static int aligned(uintptr_t address, uintptr_t bytes)
36{
37    return (address & (bytes - 1)) == 0;
38}
39
40union arm_l2_entry;
41static void
42paging_set_flags(union arm_l2_entry *entry, uintptr_t kpi_paging_flags)
43{
44        entry->small_page.tex = 1; /* Write-allocate. */
45        entry->small_page.shareable = 1; /* Coherent. */
46        entry->small_page.bufferable = 1;
47        entry->small_page.cacheable =
48            (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE) ? 0 : 1;
49        entry->small_page.ap10  =
50            (kpi_paging_flags & KPI_PAGING_FLAGS_READ)  ? 2 : 0;
51        entry->small_page.ap10 |=
52            (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE) ? 3 : 0;
53        entry->small_page.ap2 = 0;
54}
55
56static void map_kernel_section_hi(lvaddr_t va, union arm_l1_entry l1);
57static union arm_l1_entry make_dev_section(lpaddr_t pa);
58static void paging_print_l1_pte(lvaddr_t va, union arm_l1_entry pte);
59
60void paging_print_l1(void);
61
62/* In the non-boot paging code, these are pointers to be set to the values
63 * passed from the boot driver. */
64union arm_l1_entry *l1_low;
65union arm_l1_entry *l1_high;
66union arm_l2_entry *l2_vec;
67
68void paging_load_pointers(struct arm_core_data *boot_core_data) {
69    l1_low= (union arm_l1_entry *)
70        local_phys_to_mem(boot_core_data->kernel_l1_low);
71    l1_high= (union arm_l1_entry *)
72        local_phys_to_mem(boot_core_data->kernel_l1_high);
73    l2_vec= (union arm_l2_entry *)
74        local_phys_to_mem(boot_core_data->kernel_l2_vec);
75}
76
77static void map_kernel_section_hi(lvaddr_t va, union arm_l1_entry l1)
78{
79    assert( va >= MEMORY_OFFSET );
80    l1_high[ARM_L1_OFFSET(va)] = l1;
81}
82
83/**
84 * /brief Return an L1 page table entry to map a 1MB 'section' of
85 * device memory located at physical address 'pa'.
86 */
87static union arm_l1_entry make_dev_section(lpaddr_t pa)
88{
89    union arm_l1_entry l1;
90
91    l1.raw = 0;
92    l1.section.type = L1_TYPE_SECTION_ENTRY;
93    // l1.section.tex       = 1;
94    l1.section.bufferable   = 0;
95    l1.section.cacheable    = 0;
96    l1.section.ap10         = 3; // prev value: 3 // RW/NA RW/RW
97    // l1.section.ap10         = 1;    // RW/NA
98    l1.section.ap2          = 0;
99    l1.section.base_address = ARM_L1_SECTION_NUMBER(pa);
100    return l1;
101}
102
103/**
104 * \brief Return whether we have enabled the MMU. Useful for
105 * initialization assertions
106 */
107bool paging_mmu_enabled(void)
108{
109    return true;
110}
111
112/**
113 * /brief Perform a context switch.  Reload TTBR0 with the new
114 * address, and invalidate the TLBs and caches.
115 */
116void paging_context_switch(lpaddr_t ttbr)
117{
118    assert(ttbr >= phys_memory_start &&
119           ttbr <  phys_memory_start + RAM_WINDOW_SIZE);
120    lpaddr_t old_ttbr = cp15_read_ttbr0();
121    if (ttbr != old_ttbr)
122    {
123        dsb(); isb(); /* Make sure any page table updates have completed. */
124        cp15_write_ttbr0(ttbr);
125        isb(); /* The update must occur before we invalidate. */
126        /* With no ASIDs, we've got to flush everything. */
127        invalidate_tlb();
128        /* Clean and invalidate. */
129        invalidate_data_caches_pouu(true);
130        invalidate_instruction_cache();
131        /* Make sure the invalidates are completed and visible before any
132         * user-level code can execute. */
133        dsb(); isb();
134    }
135}
136
137/* Map the exception vectors at VECTORS_BASE. */
138void
139paging_map_vectors(void) {
140    /* The addresses installed into the page tables must be physical. */
141    lpaddr_t vectors_phys= mem_to_local_phys((lvaddr_t)exception_vectors);
142    lpaddr_t l2_vec_phys=  mem_to_local_phys((lvaddr_t)l2_vec);
143
144    MSG("Mapping vectors at P:%"PRIxLPADDR" to %"PRIxLVADDR
145        " using L2 table at P:%"PRIxLPADDR"\n",
146        vectors_phys, VECTORS_BASE, l2_vec_phys);
147
148    /**
149     * Install a single small page mapping to cover the vectors.
150     *
151     * The mapping fields are set exactly as for the kernel's RAM sections -
152     * see make_ram_section() for details.
153     */
154    union arm_l2_entry *e_l2= &l2_vec[ARM_L2_OFFSET(VECTORS_BASE)];
155    e_l2->small_page.type= L2_TYPE_SMALL_PAGE;
156    e_l2->small_page.tex=        1;
157    e_l2->small_page.cacheable=  1;
158    e_l2->small_page.bufferable= 1;
159    e_l2->small_page.not_global= 0;
160    e_l2->small_page.shareable=  1;
161    e_l2->small_page.ap10=       1;
162    e_l2->small_page.ap2=        0;
163
164    /* The vectors must be at the beginning of a frame. */
165    assert((vectors_phys & BASE_PAGE_MASK) == 0);
166    e_l2->small_page.base_address= vectors_phys >> BASE_PAGE_BITS;
167
168    /* Clean the modified entry to L2 cache. */
169    clean_to_pou(e_l2);
170
171    /**
172     * Map the L2 table to hold the high vectors mapping.
173     */
174    union arm_l1_entry *e_l1= &l1_high[ARM_L1_OFFSET(VECTORS_BASE)];
175    e_l1->page_table.type= L1_TYPE_PAGE_TABLE_ENTRY;
176    e_l1->page_table.base_address= l2_vec_phys >> ARM_L2_TABLE_BITS;
177
178    /* Clean the modified entry to L2 cache. */
179    clean_to_pou(e_l1);
180
181    /* We shouldn't need to invalidate any TLB entries, as this entry has
182     * never been mapped. */
183}
184
185/**
186 * \brief Map a device into the kernel's address space.
187 *
188 * \param device_base is the physical address of the device
189 * \param device_size is the number of bytes of physical address space
190 * the device occupies.
191 *
192 * \return the kernel virtual address of the mapped device, or panic.
193 */
194lvaddr_t paging_map_device(lpaddr_t dev_base, size_t dev_size)
195{
196    // We map all hardware devices in the kernel using sections in the
197    // top quarter (0xC0000000-0xFE000000) of the address space, just
198    // below the exception vectors.
199    //
200    // It makes sense to use sections since (1) we don't map many
201    // devices in the CPU driver anyway, and (2) if we did, it might
202    // save a wee bit of TLB space.
203    //
204
205    // First, we make sure that the device fits into a single
206    // section.
207    if (ARM_L1_SECTION_NUMBER(dev_base) != ARM_L1_SECTION_NUMBER(dev_base+dev_size-1)) {
208        panic("Attempt to map device spanning >1 section 0x%"PRIxLPADDR"+0x%x\n",
209              dev_base, dev_size );
210    }
211
212    // Now, walk down the page table looking for either (a) an
213
214    // existing mapping, in which case return the address the device
215    // is already mapped to, or an invalid mapping, in which case map
216    // it.
217    uint32_t dev_section = ARM_L1_SECTION_NUMBER(dev_base);
218    uint32_t dev_offset  = ARM_L1_SECTION_OFFSET(dev_base);
219    lvaddr_t dev_virt    = 0;
220
221    for( size_t i = ARM_L1_OFFSET( DEVICE_OFFSET - 1); i > ARM_L1_MAX_ENTRIES / 4 * 3; i-- ) {
222
223        // Work out the virtual address we're looking at
224        dev_virt = (lvaddr_t)(i << ARM_L1_SECTION_BITS);
225
226        // If we already have a mapping for that address, return it.
227        if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_SECTION_ENTRY &&
228             l1_high[i].section.base_address == dev_section ) {
229            return dev_virt + dev_offset;
230        }
231
232        // Otherwise, if it's free, map it.
233        if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_INVALID_ENTRY ) {
234            map_kernel_section_hi(dev_virt, make_dev_section(dev_base));
235            invalidate_data_caches_pouu(true);
236            invalidate_tlb(); /* XXX selective */
237            return dev_virt + dev_offset;
238        }
239    }
240    // We're all out of section entries :-(
241    panic("Ran out of section entries to map a kernel device");
242}
243
244/**
245 * \brief Print out a L1 page table entry 'pte', interpreted relative
246 * to a given virtual address 'va'.
247 */
248static void paging_print_l1_pte(lvaddr_t va, union arm_l1_entry pte)
249{
250    printf("(memory offset=%x):\n", va);
251    if ( L1_TYPE(pte.raw) == L1_TYPE_INVALID_ENTRY) {
252        return;
253    }
254    printf( " %x-%"PRIxLVADDR": ", va, va + ARM_L1_SECTION_BYTES - 1);
255    switch( L1_TYPE(pte.raw) ) {
256    case L1_TYPE_INVALID_ENTRY:
257        printf("INVALID\n");
258        break;
259    case L1_TYPE_PAGE_TABLE_ENTRY:
260        printf("L2 PT 0x%"PRIxLPADDR" pxn=%d ns=%d sbz=%d dom=0x%04x sbz1=%d \n",
261               pte.page_table.base_address << 10,
262               pte.page_table.pxn,
263               pte.page_table.ns,
264               pte.page_table.sbz0,
265               pte.page_table.domain,
266               pte.page_table.sbz1 );
267        break;
268    case L1_TYPE_SECTION_ENTRY:
269        printf("SECTION 0x%"PRIxLPADDR" buf=%d cache=%d xn=%d dom=0x%04x\n",
270               pte.section.base_address << 20,
271               pte.section.bufferable,
272               pte.section.cacheable,
273               pte.section.execute_never,
274               pte.section.domain );
275        printf("      sbz0=%d ap=0x%03x tex=0x%03x shr=%d ng=%d mbz0=%d ns=%d\n",
276               pte.section.sbz0,
277               (pte.section.ap2) << 2 | pte.section.ap10,
278               pte.section.tex,
279               pte.section.shareable,
280               pte.section.not_global,
281               pte.section.mbz0,
282               pte.section.ns );
283        break;
284    case L1_TYPE_SUPER_SECTION_ENTRY:
285        printf("SUPERSECTION 0x%"PRIxLPADDR" buf=%d cache=%d xn=%d dom=0x%04x\n",
286               pte.super_section.base_address << 24,
287               pte.super_section.bufferable,
288               pte.super_section.cacheable,
289               pte.super_section.execute_never,
290               pte.super_section.domain );
291        printf("      sbz0=%d ap=0x%03x tex=0x%03x shr=%d ng=%d mbz0=%d ns=%d\n",
292               pte.super_section.sbz0,
293               (pte.super_section.ap2) << 2 | pte.super_section.ap10,
294               pte.super_section.tex,
295               pte.super_section.shareable,
296               pte.super_section.not_global,
297               pte.super_section.mbz0,
298               pte.super_section.ns );
299        break;
300    }
301}
302
303/**
304 * /brief Print out the CPU driver's two static page tables.  Note:
305 *
306 * 1) This is a lot of output.  Each table has 4096 entries, each of
307 *    which takes one or two lines of output.
308 * 2) The first half of the TTBR1 table is similarly used, and is
309 *    probably (hopefully) all empty.
310 * 3) The second half of the TTBR0 table is similarly never used, and
311 *    hopefully empty.
312 * 4) The TTBR0 table is only used anyway at boot, since thereafter it
313 *    is replaced by a user page table.
314 * Otherwise, go ahead and knock yourself out.
315 */
316void paging_print_l1(void)
317{
318    size_t i;
319    lvaddr_t base = 0;
320    printf("TTBR1 table:\n");
321    for(i = 0; i < ARM_L1_MAX_ENTRIES; i++, base += ARM_L1_SECTION_BYTES ) {
322        paging_print_l1_pte(base, l1_high[i]);
323    }
324    printf("TTBR0 table:\n");
325    base = 0;
326    for(i = 0; i < ARM_L1_MAX_ENTRIES; i++, base += ARM_L1_SECTION_BYTES ) {
327        paging_print_l1_pte(base, l1_low[i]);
328    }
329}
330
331
332static errval_t
333caps_map_l1(struct capability* dest,
334            cslot_t            slot,
335            struct capability* src,
336            uintptr_t          kpi_paging_flags,
337            uintptr_t          offset,
338            uintptr_t          pte_count,
339            struct cte*        mapping_cte)
340{
341    if (src->type != ObjType_VNode_ARM_l2) {
342        //large page mapping goes here
343        assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
344
345        // ARM L1 has 4K entries, we need to fill in individual entries for
346        // 1M sections
347        // XXX: magic constant
348        if (slot >= 4096) {
349            panic("oops: slot >= 4096");
350            return SYS_ERR_VNODE_SLOT_INVALID;
351        }
352
353        if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
354            panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
355            return SYS_ERR_WRONG_MAPPING;
356        }
357
358        // check offset within frame
359        if ((offset + pte_count * BYTES_PER_SECTION > get_size(src)) ||
360            ((offset % BYTES_PER_SECTION) != 0)) {
361            printf("offset = %"PRIuPTR", pte_count=%"PRIuPTR
362                   ", src->size = %"PRIuGENSIZE", src->type = %d\n",
363                    offset, pte_count, get_size(src), src->type);
364            panic("oops: frame offset invalid");
365            return SYS_ERR_FRAME_OFFSET_INVALID;
366        }
367
368        // check mapping does not overlap leaf page table
369        if (slot + pte_count > 4096) {
370            return SYS_ERR_VM_MAP_SIZE;
371        }
372
373        // Destination
374        lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
375        lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
376
377        union arm_l1_entry* entry = ((union arm_l1_entry*)dest_lvaddr) + slot;
378        if (entry->invalid.type != L1_TYPE_INVALID_ENTRY) {
379            panic("Remapping valid page.");
380        }
381
382        lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
383        if ((src_lpaddr & (LARGE_PAGE_SIZE - 1))) {
384            panic("Invalid target");
385        }
386
387        create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
388
389        for (int i = 0; i < pte_count; i++) {
390            entry->raw = 0;
391
392            entry->section.type = L1_TYPE_SECTION_ENTRY;
393            entry->section.bufferable = 1;
394            entry->section.cacheable = (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE)? 0: 1;
395            entry->section.ap10 = (kpi_paging_flags & KPI_PAGING_FLAGS_READ)? 2:0;
396            entry->section.ap10 |= (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE)? 3:0;
397            entry->section.ap2 = 0;
398            entry->section.base_address = (src_lpaddr + i * BYTES_PER_SECTION) >> 20;
399
400            entry++;
401
402            /* Clean the modified entry to L2 cache. */
403            clean_to_pou(entry);
404
405            debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT
406                                 "] @%p = %08"PRIx32"\n",
407                   dest_lvaddr, slot, entry, entry->raw);
408        }
409
410        // Flush TLB if remapping.
411        invalidate_tlb(); /* XXX selective */
412        return SYS_ERR_OK;
413    }
414
415    // XXX: magic constant
416    if (slot >= ARM_L1_MAX_ENTRIES) {
417        printf("slot = %"PRIuCSLOT"\n",slot);
418        return SYS_ERR_VNODE_SLOT_INVALID;
419    }
420
421
422    // check offset within frame
423    if ((offset + pte_count * ARM_L2_TABLE_BYTES > get_size(src)) ||
424            ((offset % ARM_L2_TABLE_BYTES) != 0)) {
425        printf("offset = %"PRIuPTR", pte_count=%"PRIuPTR
426                ", src->size = %"PRIuGENSIZE", src->type = %d\n",
427                offset, pte_count, get_size(src), src->type);
428        return SYS_ERR_FRAME_OFFSET_INVALID;
429    }
430
431    // check mapping does not overlap leaf page table
432    if (slot + pte_count > 4096) {
433        return SYS_ERR_VM_MAP_SIZE;
434    }
435
436
437    if (slot >= ARM_L1_OFFSET(MEMORY_OFFSET)) {
438        printf("slot = %"PRIuCSLOT"\n",slot);
439        return SYS_ERR_VNODE_SLOT_RESERVED;
440    }
441
442    debug(SUBSYS_PAGING, "caps_map_l1: mapping %"PRIuPTR" L2 tables @%"PRIuCSLOT"\n",
443            pte_count, slot);
444    // Destination
445    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
446    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
447
448    union arm_l1_entry* entry = (union arm_l1_entry*)dest_lvaddr + slot;
449
450    // Source
451    genpaddr_t src_gpaddr = get_address(src);
452    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr) + offset;
453
454    assert(aligned(src_lpaddr, 1u << 10));
455    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 16384));
456
457    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
458
459    for (int i = 0; i < pte_count; i++, entry++)
460    {
461        entry->raw = 0;
462        entry->page_table.type   = L1_TYPE_PAGE_TABLE_ENTRY;
463        entry->page_table.domain = 0;
464        entry->page_table.base_address =
465            (src_lpaddr + i * ARM_L2_TABLE_BYTES) >> 10;
466
467        /* Clean the modified entry to L2 cache. */
468        clean_to_pou(entry);
469
470        debug(SUBSYS_PAGING, "L1 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
471              slot + i, entry, entry->raw);
472    }
473
474    invalidate_tlb(); /* XXX selective */
475
476    return SYS_ERR_OK;
477}
478
479static errval_t
480caps_map_l2(struct capability* dest,
481            cslot_t            slot,
482            struct capability* src,
483            uintptr_t          kpi_paging_flags,
484            uintptr_t          offset,
485            uintptr_t          pte_count,
486            struct cte*        mapping_cte)
487{
488    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
489
490    if (slot >= ARM_L2_MAX_ENTRIES) {
491        panic("oops: slot >= 256");
492        return SYS_ERR_VNODE_SLOT_INVALID;
493    }
494
495    if (src->type != ObjType_Frame && src->type != ObjType_DevFrame) {
496        panic("oops: src->type != ObjType_Frame && src->type != ObjType_DevFrame");
497        return SYS_ERR_WRONG_MAPPING;
498    }
499
500    // check offset within frame
501    if ((offset + pte_count * BASE_PAGE_SIZE > get_size(src)) ||
502        ((offset % BASE_PAGE_SIZE) != 0)) {
503        return SYS_ERR_FRAME_OFFSET_INVALID;
504    }
505
506    // check mapping does not overlap leaf page table
507    if (slot + pte_count > ARM_L2_MAX_ENTRIES) {
508        return SYS_ERR_VM_MAP_SIZE;
509    }
510
511    // Destination
512    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
513    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
514
515    union arm_l2_entry* entry = (union arm_l2_entry*)dest_lvaddr + slot;
516    if (entry->small_page.type != L2_TYPE_INVALID_PAGE) {
517        panic("Remapping valid page.");
518    }
519
520    lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
521    if ((src_lpaddr & (BASE_PAGE_SIZE - 1))) {
522        panic("Invalid target");
523    }
524
525    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
526
527    for (int i = 0; i < pte_count; i++) {
528        entry->raw = 0;
529
530        entry->small_page.type = L2_TYPE_SMALL_PAGE;
531        paging_set_flags(entry, kpi_paging_flags);
532        entry->small_page.base_address = (src_lpaddr + i * BASE_PAGE_SIZE) >> 12;
533
534        /* Clean the modified entry to L2 cache. */
535        clean_to_pou(entry);
536
537        debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx32"\n",
538               dest_lvaddr, slot, entry, entry->raw);
539
540        entry++;
541    }
542
543    // Flush TLB if remapping.
544    invalidate_tlb(); /* XXX selective */
545
546    return SYS_ERR_OK;
547}
548
549/// Create page mappings
550errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
551                            struct cte *src_cte, uintptr_t flags,
552                            uintptr_t offset, uintptr_t pte_count,
553                            struct cte *mapping_cte)
554{
555    struct capability *src_cap  = &src_cte->cap;
556    struct capability *dest_cap = &dest_vnode_cte->cap;
557    assert(mapping_cte->cap.type == ObjType_Null);
558    errval_t err;
559
560    if (ObjType_VNode_ARM_l1 == dest_cap->type) {
561        //printf("caps_map_l1: %zu\n", (size_t)pte_count);
562        err = caps_map_l1(dest_cap, dest_slot, src_cap,
563                           flags,
564                           offset,
565                           pte_count,
566                           mapping_cte
567                          );
568    }
569    else if (ObjType_VNode_ARM_l2 == dest_cap->type) {
570        //printf("caps_map_l2: %zu\n", (size_t)pte_count);
571        err = caps_map_l2(dest_cap, dest_slot, src_cap,
572                           flags,
573                           offset,
574                           pte_count,
575                           mapping_cte
576                          );
577    }
578    else {
579        panic("ObjType not VNode");
580    }
581
582    if (err_is_fail(err)) {
583        memset(mapping_cte, 0, sizeof(*mapping_cte));
584        return err;
585    }
586
587    assert(type_is_mapping(mapping_cte->cap.type));
588    err = mdb_insert(mapping_cte);
589    if (err_is_fail(err)) {
590        printk(LOG_ERR, "%s: mdb_insert: %"PRIuERRV"\n", __FUNCTION__, err);
591    }
592
593    TRACE_CAP_MSG("created", mapping_cte);
594
595    return err;
596}
597
598size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
599{
600    size_t unmapped_pages = 0;
601    union arm_l2_entry *ptentry = (union arm_l2_entry *)pt + slot;
602    for (int i = 0; i < num_pages; i++) {
603        ptentry++->raw = 0;
604        unmapped_pages++;
605    }
606    return unmapped_pages;
607}
608
609errval_t paging_modify_flags(struct capability *mapping, uintptr_t offset,
610                             uintptr_t pages, uintptr_t kpi_paging_flags)
611{
612    // XXX: modify flags for sections?
613    assert(type_is_mapping(mapping->type));
614    // check flags
615    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
616
617    struct Frame_Mapping *info = &mapping->u.frame_mapping;
618
619    /* Calculate location of page table entries we need to modify */
620    lvaddr_t base = local_phys_to_mem(get_address(&info->ptable->cap)) +
621        (info->entry + offset) * sizeof(union arm_l2_entry);
622
623    for (int i = 0; i < pages; i++) {
624        union arm_l2_entry *entry =
625            (union arm_l2_entry *)base + i;
626        paging_set_flags(entry, kpi_paging_flags);
627
628        /* Clean the modified entry to L2 cache. */
629        clean_to_pou(entry);
630    }
631
632    return paging_tlb_flush_range(cte_for_cap(mapping), offset, pages);
633}
634
635void paging_dump_tables(struct dcb *dispatcher)
636{
637    if (!local_phys_is_valid(dispatcher->vspace)) {
638        printk(LOG_ERR, "dispatcher->vspace = 0x%"PRIxLPADDR": too high!\n" ,
639               dispatcher->vspace);
640        return;
641    }
642    lvaddr_t l1 = local_phys_to_mem(dispatcher->vspace);
643
644    for (int l1_index = 0; l1_index < ARM_L1_MAX_ENTRIES; l1_index++) {
645        // get level2 table
646        union arm_l1_entry *l1_e = (union arm_l1_entry *)l1 + l1_index;
647        if (!l1_e->raw) { continue; }
648        if (l1_e->invalid.type == 2) { // section
649            genpaddr_t paddr = (genpaddr_t)(l1_e->section.base_address) << 20;
650            printf("%d: (section) 0x%"PRIxGENPADDR"\n", l1_index, paddr);
651        } else if (l1_e->invalid.type == 1) { // l2 table
652            genpaddr_t ptable_gp = (genpaddr_t)(l1_e->page_table.base_address) << 10;
653            lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));
654
655            printf("%d: (l2table) 0x%"PRIxGENPADDR"\n", l1_index, ptable_gp);
656
657            for (int entry = 0; entry < ARM_L2_MAX_ENTRIES; entry++) {
658                union arm_l2_entry *e =
659                    (union arm_l2_entry *)ptable_lv + entry;
660                genpaddr_t paddr = (genpaddr_t)(e->small_page.base_address) << BASE_PAGE_BITS;
661                if (!paddr) {
662                    continue;
663                }
664                printf("%d.%d: 0x%"PRIxGENPADDR" (rw=%d%d)\n", l1_index, entry, paddr,
665                        (e->small_page.ap10 >> 1) & 1, e->small_page.ap10 & 1);
666            }
667        }
668    }
669}
670
671/**
672 * \brief Install a page table pointer in a level 1 page table
673 * located at 'table_base' to map addresses starting at virtual
674 * address 'va'.  The level 2 page table to be used is assumed to be
675 * located at physical address 'pa'.
676 */
677void paging_map_user_pages_l1(lvaddr_t table_base, lvaddr_t va, lpaddr_t pa)
678{
679    assert(aligned(table_base, ARM_L1_ALIGN));
680    assert(aligned(pa, BYTES_PER_SMALL_PAGE));
681
682    union arm_l1_entry e;
683    union arm_l1_entry *l1_table;
684
685    e.raw                     = 0;
686    e.page_table.type         = L1_TYPE_PAGE_TABLE_ENTRY;
687    e.page_table.domain       = 0;
688    e.page_table.base_address = ARM_L2_TABLE_PPN(pa);
689
690    if (table_base == 0) {
691        if(va < MEMORY_OFFSET) {
692            table_base = cp15_read_ttbr0() + MEMORY_OFFSET;
693        } else {
694            table_base = cp15_read_ttbr1() + MEMORY_OFFSET;
695        }
696    }
697    l1_table = (union arm_l1_entry *) table_base;
698    l1_table[ARM_L1_OFFSET(va)] = e;
699
700    /* Clean the modified entry to L2 cache. */
701    clean_to_pou(&l1_table[ARM_L1_OFFSET(va)]);
702}
703
704/**
705 * /brief Install a level 2 page table entry located at l2e, to map
706 * physical address 'pa', with flags 'flags'.   'flags' here is in the
707 * form of a prototype 32-bit L2 *invalid* PTE with address 0.
708 */
709void paging_set_l2_entry(uintptr_t* l2e, lpaddr_t addr, uintptr_t flags)
710{
711    union arm_l2_entry e;
712    e.raw = flags;
713
714    assert( L2_TYPE(e.raw) == L2_TYPE_INVALID_PAGE );
715    assert( e.small_page.base_address == 0);
716    assert( ARM_PAGE_OFFSET(addr) == 0 );
717
718    e.small_page.type = L2_TYPE_SMALL_PAGE;
719    e.small_page.base_address = (addr >> 12);
720
721    *l2e = e.raw;
722
723    /* Clean the modified entry to L2 cache. */
724    clean_to_pou(l2e);
725}
726