1/*
2 * Copyright (c) 2009-2013,2016 ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
8 */
9
10#include <kernel.h>
11#include <dispatch.h>
12#include <cache.h>
13#include <cp15.h>
14#include <paging_kernel_arch.h>
15#include <string.h>
16#include <exceptions.h>
17#include <arch/arm/platform.h>
18#include <cap_predicates.h>
19#include <dispatch.h>
20#include <mdb/mdb_tree.h>
21#include <barrelfish_kpi/paging_arch.h>
22#include <useraccess.h>
23
24#define MSG(format, ...) printk( LOG_NOTE, "ARMv7-A: "format, ## __VA_ARGS__ )
25
26inline static uintptr_t paging_round_down(uintptr_t address, uintptr_t size)
27{
28    return address & ~(size - 1);
29}
30
31inline static uintptr_t paging_round_up(uintptr_t address, uintptr_t size)
32{
33    return (address + size - 1) & ~(size - 1);
34}
35
36inline static int aligned(uintptr_t address, uintptr_t bytes)
37{
38    return (address & (bytes - 1)) == 0;
39}
40
41union arm_l2_entry;
42static void
43paging_set_flags(union arm_l2_entry *entry, uintptr_t kpi_paging_flags)
44{
45        entry->small_page.tex = 1; /* Write-allocate. */
46        entry->small_page.shareable = 1; /* Coherent. */
47        entry->small_page.bufferable = 1;
48        entry->small_page.cacheable =
49            (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE) ? 0 : 1;
50        entry->small_page.ap10  =
51            (kpi_paging_flags & KPI_PAGING_FLAGS_READ)  ? 2 : 0;
52        entry->small_page.ap10 |=
53            (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE) ? 3 : 0;
54        entry->small_page.ap2 = 0;
55}
56
57static void map_kernel_section_hi(lvaddr_t va, union arm_l1_entry l1);
58static union arm_l1_entry make_dev_section(lpaddr_t pa);
59static void paging_print_l1_pte(lvaddr_t va, union arm_l1_entry pte);
60
61void paging_print_l1(void);
62
63/* In the non-boot paging code, these are pointers to be set to the values
64 * passed from the boot driver. */
65union arm_l1_entry *l1_low;
66union arm_l1_entry *l1_high;
67union arm_l2_entry *l2_vec;
68
69void paging_load_pointers(struct arm_core_data *boot_core_data) {
70    l1_low= (union arm_l1_entry *)
71        local_phys_to_mem(boot_core_data->kernel_l1_low);
72    l1_high= (union arm_l1_entry *)
73        local_phys_to_mem(boot_core_data->kernel_l1_high);
74    l2_vec= (union arm_l2_entry *)
75        local_phys_to_mem(boot_core_data->kernel_l2_vec);
76}
77
78static void map_kernel_section_hi(lvaddr_t va, union arm_l1_entry l1)
79{
80    assert( va >= MEMORY_OFFSET );
81    l1_high[ARM_L1_OFFSET(va)] = l1;
82}
83
84/**
85 * /brief Return an L1 page table entry to map a 1MB 'section' of
86 * device memory located at physical address 'pa'.
87 */
88static union arm_l1_entry make_dev_section(lpaddr_t pa)
89{
90    union arm_l1_entry l1;
91
92    l1.raw = 0;
93    l1.section.type = L1_TYPE_SECTION_ENTRY;
94    // l1.section.tex       = 1;
95    l1.section.bufferable   = 0;
96    l1.section.cacheable    = 0;
97    l1.section.ap10         = 3; // prev value: 3 // RW/NA RW/RW
98    // l1.section.ap10         = 1;    // RW/NA
99    l1.section.ap2          = 0;
100    l1.section.base_address = ARM_L1_SECTION_NUMBER(pa);
101    return l1;
102}
103
104/**
105 * \brief Return whether we have enabled the MMU. Useful for
106 * initialization assertions
107 */
108bool paging_mmu_enabled(void)
109{
110    return true;
111}
112
113/**
114 * /brief Perform a context switch.  Reload TTBR0 with the new
115 * address, and invalidate the TLBs and caches.
116 */
117void paging_context_switch(lpaddr_t ttbr)
118{
119    assert(ttbr >= phys_memory_start &&
120           ttbr <  phys_memory_start + RAM_WINDOW_SIZE);
121    lpaddr_t old_ttbr = cp15_read_ttbr0();
122    if (ttbr != old_ttbr)
123    {
124        dsb(); isb(); /* Make sure any page table updates have completed. */
125        cp15_write_ttbr0(ttbr);
126        isb(); /* The update must occur before we invalidate. */
127        /* With no ASIDs, we've got to flush everything. */
128        invalidate_tlb();
129        /* Clean and invalidate. */
130        invalidate_data_caches_pouu(true);
131        invalidate_instruction_cache();
132        /* Make sure the invalidates are completed and visible before any
133         * user-level code can execute. */
134        dsb(); isb();
135    }
136}
137
138/* Map the exception vectors at VECTORS_BASE. */
139void
140paging_map_vectors(void) {
141    /* The addresses installed into the page tables must be physical. */
142    lpaddr_t vectors_phys= mem_to_local_phys((lvaddr_t)exception_vectors);
143    lpaddr_t l2_vec_phys=  mem_to_local_phys((lvaddr_t)l2_vec);
144
145    MSG("Mapping vectors at P:%"PRIxLPADDR" to %"PRIxLVADDR
146        " using L2 table at P:%"PRIxLPADDR"\n",
147        vectors_phys, VECTORS_BASE, l2_vec_phys);
148
149    /**
150     * Install a single small page mapping to cover the vectors.
151     *
152     * The mapping fields are set exactly as for the kernel's RAM sections -
153     * see make_ram_section() for details.
154     */
155    union arm_l2_entry *e_l2= &l2_vec[ARM_L2_OFFSET(VECTORS_BASE)];
156    e_l2->small_page.type= L2_TYPE_SMALL_PAGE;
157    e_l2->small_page.tex=        1;
158    e_l2->small_page.cacheable=  1;
159    e_l2->small_page.bufferable= 1;
160    e_l2->small_page.not_global= 0;
161    e_l2->small_page.shareable=  1;
162    e_l2->small_page.ap10=       1;
163    e_l2->small_page.ap2=        0;
164
165    /* The vectors must be at the beginning of a frame. */
166    assert((vectors_phys & BASE_PAGE_MASK) == 0);
167    e_l2->small_page.base_address= vectors_phys >> BASE_PAGE_BITS;
168
169    /* Clean the modified entry to L2 cache. */
170    clean_to_pou(e_l2);
171
172    /**
173     * Map the L2 table to hold the high vectors mapping.
174     */
175    union arm_l1_entry *e_l1= &l1_high[ARM_L1_OFFSET(VECTORS_BASE)];
176    e_l1->page_table.type= L1_TYPE_PAGE_TABLE_ENTRY;
177    e_l1->page_table.base_address= l2_vec_phys >> ARM_L2_TABLE_BITS;
178
179    /* Clean the modified entry to L2 cache. */
180    clean_to_pou(e_l1);
181
182    /* We shouldn't need to invalidate any TLB entries, as this entry has
183     * never been mapped. */
184}
185
186/**
187 * \brief Map a device into the kernel's address space.
188 *
189 * \param device_base is the physical address of the device
190 * \param device_size is the number of bytes of physical address space
191 * the device occupies.
192 *
193 * \return the kernel virtual address of the mapped device, or panic.
194 */
195lvaddr_t paging_map_device(lpaddr_t dev_base, size_t dev_size)
196{
197    // We map all hardware devices in the kernel using sections in the
198    // top quarter (0xC0000000-0xFE000000) of the address space, just
199    // below the exception vectors.
200    //
201    // It makes sense to use sections since (1) we don't map many
202    // devices in the CPU driver anyway, and (2) if we did, it might
203    // save a wee bit of TLB space.
204    //
205
206    // First, we make sure that the device fits into a single
207    // section.
208    if (ARM_L1_SECTION_NUMBER(dev_base) != ARM_L1_SECTION_NUMBER(dev_base+dev_size-1)) {
209        panic("Attempt to map device spanning >1 section 0x%"PRIxLPADDR"+0x%x\n",
210              dev_base, dev_size );
211    }
212
213    // Now, walk down the page table looking for either (a) an
214
215    // existing mapping, in which case return the address the device
216    // is already mapped to, or an invalid mapping, in which case map
217    // it.
218    uint32_t dev_section = ARM_L1_SECTION_NUMBER(dev_base);
219    uint32_t dev_offset  = ARM_L1_SECTION_OFFSET(dev_base);
220    lvaddr_t dev_virt    = 0;
221
222    for( size_t i = ARM_L1_OFFSET( DEVICE_OFFSET - 1); i > ARM_L1_MAX_ENTRIES / 4 * 3; i-- ) {
223
224        // Work out the virtual address we're looking at
225        dev_virt = (lvaddr_t)(i << ARM_L1_SECTION_BITS);
226
227        // If we already have a mapping for that address, return it.
228        if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_SECTION_ENTRY &&
229             l1_high[i].section.base_address == dev_section ) {
230            return dev_virt + dev_offset;
231        }
232
233        // Otherwise, if it's free, map it.
234        if ( L1_TYPE(l1_high[i].raw) == L1_TYPE_INVALID_ENTRY ) {
235            map_kernel_section_hi(dev_virt, make_dev_section(dev_base));
236            invalidate_data_caches_pouu(true);
237            invalidate_tlb(); /* XXX selective */
238            return dev_virt + dev_offset;
239        }
240    }
241    // We're all out of section entries :-(
242    panic("Ran out of section entries to map a kernel device");
243}
244
245/**
246 * \brief Print out a L1 page table entry 'pte', interpreted relative
247 * to a given virtual address 'va'.
248 */
249static void paging_print_l1_pte(lvaddr_t va, union arm_l1_entry pte)
250{
251    printf("(memory offset=%x):\n", va);
252    if ( L1_TYPE(pte.raw) == L1_TYPE_INVALID_ENTRY) {
253        return;
254    }
255    printf( " %x-%"PRIxLVADDR": ", va, va + ARM_L1_SECTION_BYTES - 1);
256    switch( L1_TYPE(pte.raw) ) {
257    case L1_TYPE_INVALID_ENTRY:
258        printf("INVALID\n");
259        break;
260    case L1_TYPE_PAGE_TABLE_ENTRY:
261        printf("L2 PT 0x%"PRIxLPADDR" pxn=%d ns=%d sbz=%d dom=0x%04x sbz1=%d \n",
262               pte.page_table.base_address << 10,
263               pte.page_table.pxn,
264               pte.page_table.ns,
265               pte.page_table.sbz0,
266               pte.page_table.domain,
267               pte.page_table.sbz1 );
268        break;
269    case L1_TYPE_SECTION_ENTRY:
270        printf("SECTION 0x%"PRIxLPADDR" buf=%d cache=%d xn=%d dom=0x%04x\n",
271               pte.section.base_address << 20,
272               pte.section.bufferable,
273               pte.section.cacheable,
274               pte.section.execute_never,
275               pte.section.domain );
276        printf("      sbz0=%d ap=0x%03x tex=0x%03x shr=%d ng=%d mbz0=%d ns=%d\n",
277               pte.section.sbz0,
278               (pte.section.ap2) << 2 | pte.section.ap10,
279               pte.section.tex,
280               pte.section.shareable,
281               pte.section.not_global,
282               pte.section.mbz0,
283               pte.section.ns );
284        break;
285    case L1_TYPE_SUPER_SECTION_ENTRY:
286        printf("SUPERSECTION 0x%"PRIxLPADDR" buf=%d cache=%d xn=%d dom=0x%04x\n",
287               pte.super_section.base_address << 24,
288               pte.super_section.bufferable,
289               pte.super_section.cacheable,
290               pte.super_section.execute_never,
291               pte.super_section.domain );
292        printf("      sbz0=%d ap=0x%03x tex=0x%03x shr=%d ng=%d mbz0=%d ns=%d\n",
293               pte.super_section.sbz0,
294               (pte.super_section.ap2) << 2 | pte.super_section.ap10,
295               pte.super_section.tex,
296               pte.super_section.shareable,
297               pte.super_section.not_global,
298               pte.super_section.mbz0,
299               pte.super_section.ns );
300        break;
301    }
302}
303
304/**
305 * /brief Print out the CPU driver's two static page tables.  Note:
306 *
307 * 1) This is a lot of output.  Each table has 4096 entries, each of
308 *    which takes one or two lines of output.
309 * 2) The first half of the TTBR1 table is similarly used, and is
310 *    probably (hopefully) all empty.
311 * 3) The second half of the TTBR0 table is similarly never used, and
312 *    hopefully empty.
313 * 4) The TTBR0 table is only used anyway at boot, since thereafter it
314 *    is replaced by a user page table.
315 * Otherwise, go ahead and knock yourself out.
316 */
317void paging_print_l1(void)
318{
319    size_t i;
320    lvaddr_t base = 0;
321    printf("TTBR1 table:\n");
322    for(i = 0; i < ARM_L1_MAX_ENTRIES; i++, base += ARM_L1_SECTION_BYTES ) {
323        paging_print_l1_pte(base, l1_high[i]);
324    }
325    printf("TTBR0 table:\n");
326    base = 0;
327    for(i = 0; i < ARM_L1_MAX_ENTRIES; i++, base += ARM_L1_SECTION_BYTES ) {
328        paging_print_l1_pte(base, l1_low[i]);
329    }
330}
331
332
333static errval_t
334caps_map_l1(struct capability* dest,
335            cslot_t            slot,
336            struct capability* src,
337            uintptr_t          kpi_paging_flags,
338            uintptr_t          offset,
339            uintptr_t          pte_count,
340            struct cte*        mapping_cte)
341{
342    if (src->type != ObjType_VNode_ARM_l2) {
343        //large page mapping goes here
344        assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
345
346        // ARM L1 has 4K entries, we need to fill in individual entries for
347        // 1M sections
348        // XXX: magic constant
349        if (slot >= 4096) {
350            panic("oops: slot >= 4096");
351            return SYS_ERR_VNODE_SLOT_INVALID;
352        }
353
354        if (src->type != ObjType_Frame && src->type != ObjType_DevFrame &&
355            src->type != ObjType_EndPointUMP) {
356            panic("oops: src->type != ObjType_Frame && "
357                  "src->type != ObjType_DevFrame &&"
358                  "src->type != ObjType_EndPointUMP");
359            return SYS_ERR_WRONG_MAPPING;
360        }
361
362        // check offset within frame
363        if ((offset + pte_count * BYTES_PER_SECTION > get_size(src)) ||
364            ((offset % BYTES_PER_SECTION) != 0)) {
365            printf("offset = %"PRIuPTR", pte_count=%"PRIuPTR
366                   ", src->size = %"PRIuGENSIZE", src->type = %d\n",
367                    offset, pte_count, get_size(src), src->type);
368            panic("oops: frame offset invalid");
369            return SYS_ERR_FRAME_OFFSET_INVALID;
370        }
371
372        // check mapping does not overlap leaf page table
373        if (slot + pte_count > 4096) {
374            return SYS_ERR_VM_MAP_SIZE;
375        }
376
377        // Destination
378        lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
379        lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
380
381        union arm_l1_entry* entry = ((union arm_l1_entry*)dest_lvaddr) + slot;
382        if (entry->invalid.type != L1_TYPE_INVALID_ENTRY) {
383            panic("Remapping valid page.");
384        }
385
386        lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
387        if ((src_lpaddr & (LARGE_PAGE_SIZE - 1))) {
388            panic("Invalid target");
389        }
390
391        create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
392
393        for (int i = 0; i < pte_count; i++) {
394            entry->raw = 0;
395
396            entry->section.type = L1_TYPE_SECTION_ENTRY;
397            entry->section.bufferable = 1;
398            entry->section.cacheable = (kpi_paging_flags & KPI_PAGING_FLAGS_NOCACHE)? 0: 1;
399            entry->section.ap10 = (kpi_paging_flags & KPI_PAGING_FLAGS_READ)? 2:0;
400            entry->section.ap10 |= (kpi_paging_flags & KPI_PAGING_FLAGS_WRITE)? 3:0;
401            entry->section.ap2 = 0;
402            entry->section.base_address = (src_lpaddr + i * BYTES_PER_SECTION) >> 20;
403
404            entry++;
405
406            /* Clean the modified entry to L2 cache. */
407            clean_to_pou(entry);
408
409            debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT
410                                 "] @%p = %08"PRIx32"\n",
411                   dest_lvaddr, slot, entry, entry->raw);
412        }
413
414        // Flush TLB if remapping.
415        invalidate_tlb(); /* XXX selective */
416        return SYS_ERR_OK;
417    }
418
419    // XXX: magic constant
420    if (slot >= ARM_L1_MAX_ENTRIES) {
421        printf("slot = %"PRIuCSLOT"\n",slot);
422        return SYS_ERR_VNODE_SLOT_INVALID;
423    }
424
425
426    // check offset within frame
427    if ((offset + pte_count * ARM_L2_TABLE_BYTES > get_size(src)) ||
428            ((offset % ARM_L2_TABLE_BYTES) != 0)) {
429        printf("offset = %"PRIuPTR", pte_count=%"PRIuPTR
430                ", src->size = %"PRIuGENSIZE", src->type = %d\n",
431                offset, pte_count, get_size(src), src->type);
432        return SYS_ERR_FRAME_OFFSET_INVALID;
433    }
434
435    // check mapping does not overlap leaf page table
436    if (slot + pte_count > 4096) {
437        return SYS_ERR_VM_MAP_SIZE;
438    }
439
440
441    if (slot >= ARM_L1_OFFSET(MEMORY_OFFSET)) {
442        printf("slot = %"PRIuCSLOT"\n",slot);
443        return SYS_ERR_VNODE_SLOT_RESERVED;
444    }
445
446    debug(SUBSYS_PAGING, "caps_map_l1: mapping %"PRIuPTR" L2 tables @%"PRIuCSLOT"\n",
447            pte_count, slot);
448    // Destination
449    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
450    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
451
452    union arm_l1_entry* entry = (union arm_l1_entry*)dest_lvaddr + slot;
453
454    // Source
455    genpaddr_t src_gpaddr = get_address(src);
456    lpaddr_t   src_lpaddr = gen_phys_to_local_phys(src_gpaddr) + offset;
457
458    assert(aligned(src_lpaddr, 1u << 10));
459    assert((src_lpaddr < dest_lpaddr) || (src_lpaddr >= dest_lpaddr + 16384));
460
461    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
462
463    for (int i = 0; i < pte_count; i++, entry++)
464    {
465        entry->raw = 0;
466        entry->page_table.type   = L1_TYPE_PAGE_TABLE_ENTRY;
467        entry->page_table.domain = 0;
468        entry->page_table.base_address =
469            (src_lpaddr + i * ARM_L2_TABLE_BYTES) >> 10;
470
471        /* Clean the modified entry to L2 cache. */
472        clean_to_pou(entry);
473
474        debug(SUBSYS_PAGING, "L1 mapping %"PRIuCSLOT". @%p = %08"PRIx32"\n",
475              slot + i, entry, entry->raw);
476    }
477
478    invalidate_tlb(); /* XXX selective */
479
480    return SYS_ERR_OK;
481}
482
483static errval_t
484caps_map_l2(struct capability* dest,
485            cslot_t            slot,
486            struct capability* src,
487            uintptr_t          kpi_paging_flags,
488            uintptr_t          offset,
489            uintptr_t          pte_count,
490            struct cte*        mapping_cte)
491{
492    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
493
494    if (slot >= ARM_L2_MAX_ENTRIES) {
495        panic("oops: slot >= 256");
496        return SYS_ERR_VNODE_SLOT_INVALID;
497    }
498
499    if (src->type != ObjType_Frame && src->type != ObjType_DevFrame &&
500        src->type != ObjType_EndPointUMP) {
501        panic("oops: src->type != ObjType_Frame &&"
502              "src->type != ObjType_DevFrame &&"
503              "src->type != ObjType_EndPointUMP");
504        return SYS_ERR_WRONG_MAPPING;
505    }
506
507    // check offset within frame
508    if ((offset + pte_count * BASE_PAGE_SIZE > get_size(src)) ||
509        ((offset % BASE_PAGE_SIZE) != 0)) {
510        return SYS_ERR_FRAME_OFFSET_INVALID;
511    }
512
513    // check mapping does not overlap leaf page table
514    if (slot + pte_count > ARM_L2_MAX_ENTRIES) {
515        return SYS_ERR_VM_MAP_SIZE;
516    }
517
518    // Destination
519    lpaddr_t dest_lpaddr = gen_phys_to_local_phys(get_address(dest));
520    lvaddr_t dest_lvaddr = local_phys_to_mem(dest_lpaddr);
521
522    union arm_l2_entry* entry = (union arm_l2_entry*)dest_lvaddr + slot;
523    if (entry->small_page.type != L2_TYPE_INVALID_PAGE) {
524        panic("Remapping valid page.");
525    }
526
527    lpaddr_t src_lpaddr = gen_phys_to_local_phys(get_address(src) + offset);
528    if ((src_lpaddr & (BASE_PAGE_SIZE - 1))) {
529        panic("Invalid target");
530    }
531
532    create_mapping_cap(mapping_cte, src, cte_for_cap(dest), slot, pte_count);
533
534    for (int i = 0; i < pte_count; i++) {
535        entry->raw = 0;
536
537        entry->small_page.type = L2_TYPE_SMALL_PAGE;
538        paging_set_flags(entry, kpi_paging_flags);
539        entry->small_page.base_address = (src_lpaddr + i * BASE_PAGE_SIZE) >> 12;
540
541        /* Clean the modified entry to L2 cache. */
542        clean_to_pou(entry);
543
544        debug(SUBSYS_PAGING, "L2 mapping %08"PRIxLVADDR"[%"PRIuCSLOT"] @%p = %08"PRIx32"\n",
545               dest_lvaddr, slot, entry, entry->raw);
546
547        entry++;
548    }
549
550    // Flush TLB if remapping.
551    invalidate_tlb(); /* XXX selective */
552
553    return SYS_ERR_OK;
554}
555
556/// Create page mappings
557errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
558                            struct cte *src_cte, uintptr_t flags,
559                            uintptr_t offset, uintptr_t pte_count,
560                            struct cte *mapping_cte)
561{
562    struct capability *src_cap  = &src_cte->cap;
563    struct capability *dest_cap = &dest_vnode_cte->cap;
564    assert(mapping_cte->cap.type == ObjType_Null);
565    errval_t err;
566
567    if (ObjType_VNode_ARM_l1 == dest_cap->type) {
568        //printf("caps_map_l1: %zu\n", (size_t)pte_count);
569        err = caps_map_l1(dest_cap, dest_slot, src_cap,
570                           flags,
571                           offset,
572                           pte_count,
573                           mapping_cte
574                          );
575    }
576    else if (ObjType_VNode_ARM_l2 == dest_cap->type) {
577        //printf("caps_map_l2: %zu\n", (size_t)pte_count);
578        err = caps_map_l2(dest_cap, dest_slot, src_cap,
579                           flags,
580                           offset,
581                           pte_count,
582                           mapping_cte
583                          );
584    }
585    else {
586        panic("ObjType not VNode");
587    }
588
589    if (err_is_fail(err)) {
590        memset(mapping_cte, 0, sizeof(*mapping_cte));
591        return err;
592    }
593
594    assert(type_is_mapping(mapping_cte->cap.type));
595    err = mdb_insert(mapping_cte);
596    if (err_is_fail(err)) {
597        printk(LOG_ERR, "%s: mdb_insert: %"PRIuERRV"\n", __FUNCTION__, err);
598    }
599
600    TRACE_CAP_MSG("created", mapping_cte);
601
602    return err;
603}
604
605size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
606{
607    size_t unmapped_pages = 0;
608    union arm_l2_entry *ptentry = (union arm_l2_entry *)pt + slot;
609    for (int i = 0; i < num_pages; i++) {
610        ptentry++->raw = 0;
611        unmapped_pages++;
612    }
613    return unmapped_pages;
614}
615
616errval_t paging_modify_flags(struct capability *mapping, uintptr_t offset,
617                             uintptr_t pages, uintptr_t kpi_paging_flags)
618{
619    // XXX: modify flags for sections?
620    assert(type_is_mapping(mapping->type));
621    // check flags
622    assert(0 == (kpi_paging_flags & ~KPI_PAGING_FLAGS_MASK));
623
624    struct Frame_Mapping *info = &mapping->u.frame_mapping;
625
626    /* Calculate location of page table entries we need to modify */
627    lvaddr_t base = local_phys_to_mem(get_address(&info->ptable->cap)) +
628        (info->entry + offset) * sizeof(union arm_l2_entry);
629
630    for (int i = 0; i < pages; i++) {
631        union arm_l2_entry *entry =
632            (union arm_l2_entry *)base + i;
633        paging_set_flags(entry, kpi_paging_flags);
634
635        /* Clean the modified entry to L2 cache. */
636        clean_to_pou(entry);
637    }
638
639    return paging_tlb_flush_range(cte_for_cap(mapping), offset, pages);
640}
641
642errval_t paging_copy_remap(struct cte *dest_vnode_cte, cslot_t dest_slot,
643                           struct cte *src_cte, uintptr_t flags,
644                           uintptr_t offset, uintptr_t pte_count,
645                           struct cte *mapping_cte)
646{
647    printk(LOG_ERR, "%s called on ARMv7: NYI!\n", __FUNCTION__);
648    return SYS_ERR_NOT_IMPLEMENTED;
649}
650
651bool paging_is_region_valid(lvaddr_t buffer, size_t size, uint8_t type)
652{
653    lvaddr_t root_pt = local_phys_to_mem(dcb_current->vspace);
654
655    lvaddr_t end = buffer + size;
656
657    uint16_t first_l1idx = ARM_L1_OFFSET(buffer);
658    uint16_t last_l1idx = ARM_L1_OFFSET(end);
659
660    uint16_t first_l2idx, last_l2idx;
661
662
663    union arm_l1_entry *l1e;
664    union arm_l2_entry *l2e;
665
666    for (uint16_t l1idx = first_l1idx; l1idx <= last_l1idx; l1idx++) {
667        l1e = (union arm_l1_entry *)root_pt + l1idx;
668        if (!l1e->invalid.type) {// invalid
669            return false;
670        } else if (l1e->section.type == 2) { // section
671            // check if we have write access
672            if (type == ACCESS_WRITE && !(l1e->section.ap10 & 1)) { return false; }
673            continue;
674        } else if (l1e->super_section.type == 3) { // supersection
675            // check if we have write access
676            if (type == ACCESS_WRITE && !(l1e->super_section.ap10 & 1)) { return false; }
677        } else if (l1e->page_table.type == 1) { // ptable
678            // calculate which part of ptable to check
679            first_l2idx = l1idx == first_l1idx ? ARM_L2_OFFSET(buffer) : 0;
680            last_l2idx  = l1idx == last_l1idx  ? ARM_L2_OFFSET(end)  : ARM_L2_MAX_ENTRIES;
681            // read ptable base
682            lvaddr_t ptable = local_phys_to_mem((genpaddr_t)l1e->page_table.base_address << 10);
683            for (uint16_t l2idx = first_l2idx; l2idx < last_l2idx; l2idx++) {
684                l2e = (union arm_l2_entry *)ptable + l2idx;
685                if (!l2e->invalid.type) { // invalid entry
686                    return false;
687                } else if (l2e->large_page.type == 1) { // large page
688                    // check if we have write access
689                    if (type == ACCESS_WRITE && !(l2e->large_page.ap10 & 1)) { return false; }
690                } else if (l2e->small_page.type >= 2) { // type == 2 or 3 - small page
691                    // check if we have write access
692                    if (type == ACCESS_WRITE && !(l2e->small_page.ap10 & 1)) { return false; }
693                }
694            }
695        }
696    }
697    // if we never bailed early, the access is fine.
698    return true;
699}
700
701void paging_dump_tables(struct dcb *dispatcher)
702{
703    if (!local_phys_is_valid(dispatcher->vspace)) {
704        printk(LOG_ERR, "dispatcher->vspace = 0x%"PRIxLPADDR": too high!\n" ,
705               dispatcher->vspace);
706        return;
707    }
708    lvaddr_t l1 = local_phys_to_mem(dispatcher->vspace);
709
710    for (int l1_index = 0; l1_index < ARM_L1_MAX_ENTRIES; l1_index++) {
711        // get level2 table
712        union arm_l1_entry *l1_e = (union arm_l1_entry *)l1 + l1_index;
713        if (!l1_e->raw) { continue; }
714        if (l1_e->invalid.type == 2) { // section
715            genpaddr_t paddr = (genpaddr_t)(l1_e->section.base_address) << 20;
716            printf("%d: (section) 0x%"PRIxGENPADDR"\n", l1_index, paddr);
717        } else if (l1_e->invalid.type == 1) { // l2 table
718            genpaddr_t ptable_gp = (genpaddr_t)(l1_e->page_table.base_address) << 10;
719            lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));
720
721            printf("%d: (l2table) 0x%"PRIxGENPADDR"\n", l1_index, ptable_gp);
722
723            for (int entry = 0; entry < ARM_L2_MAX_ENTRIES; entry++) {
724                union arm_l2_entry *e =
725                    (union arm_l2_entry *)ptable_lv + entry;
726                genpaddr_t paddr = (genpaddr_t)(e->small_page.base_address) << BASE_PAGE_BITS;
727                if (!paddr) {
728                    continue;
729                }
730                printf("%d.%d: 0x%"PRIxGENPADDR" (rw=%d%d)\n", l1_index, entry, paddr,
731                        (e->small_page.ap10 >> 1) & 1, e->small_page.ap10 & 1);
732            }
733        }
734    }
735}
736
737/**
738 * \brief Install a page table pointer in a level 1 page table
739 * located at 'table_base' to map addresses starting at virtual
740 * address 'va'.  The level 2 page table to be used is assumed to be
741 * located at physical address 'pa'.
742 */
743void paging_map_user_pages_l1(lvaddr_t table_base, lvaddr_t va, lpaddr_t pa)
744{
745    assert(aligned(table_base, ARM_L1_ALIGN));
746    assert(aligned(pa, BYTES_PER_SMALL_PAGE));
747
748    union arm_l1_entry e;
749    union arm_l1_entry *l1_table;
750
751    e.raw                     = 0;
752    e.page_table.type         = L1_TYPE_PAGE_TABLE_ENTRY;
753    e.page_table.domain       = 0;
754    e.page_table.base_address = ARM_L2_TABLE_PPN(pa);
755
756    if (table_base == 0) {
757        if(va < MEMORY_OFFSET) {
758            table_base = cp15_read_ttbr0() + MEMORY_OFFSET;
759        } else {
760            table_base = cp15_read_ttbr1() + MEMORY_OFFSET;
761        }
762    }
763    l1_table = (union arm_l1_entry *) table_base;
764    l1_table[ARM_L1_OFFSET(va)] = e;
765
766    /* Clean the modified entry to L2 cache. */
767    clean_to_pou(&l1_table[ARM_L1_OFFSET(va)]);
768}
769
770/**
771 * /brief Install a level 2 page table entry located at l2e, to map
772 * physical address 'pa', with flags 'flags'.   'flags' here is in the
773 * form of a prototype 32-bit L2 *invalid* PTE with address 0.
774 */
775void paging_set_l2_entry(uintptr_t* l2e, lpaddr_t addr, uintptr_t flags)
776{
777    union arm_l2_entry e;
778    e.raw = flags;
779
780    assert( L2_TYPE(e.raw) == L2_TYPE_INVALID_PAGE );
781    assert( e.small_page.base_address == 0);
782    assert( ARM_PAGE_OFFSET(addr) == 0 );
783
784    e.small_page.type = L2_TYPE_SMALL_PAGE;
785    e.small_page.base_address = (addr >> 12);
786
787    *l2e = e.raw;
788
789    /* Clean the modified entry to L2 cache. */
790    clean_to_pou(l2e);
791}
792