1/**
2 * \file
3 * \brief pmap management
4 */
5
6/*
7 * Copyright (c) 2010-2015 ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15/*
16 * There was some minor difficulty here with mapping the cpus native
17 * page table arrangement onto Barrelfish. The problem lies with
18 * resource bootstrapping. The bootstrap ram allocator allocates pages.
19 *
20 * After reworking retype to be range based, we can now select to create a
21 * single 1kB vnode from a 4kB frame, so we currently waste 3kB when creating
22 * ARM l2 vnodes before we have a connection to the memory server.
23 *
24 */
25
26#include <barrelfish/barrelfish.h>
27#include <barrelfish/caddr.h>
28#include <barrelfish/invocations_arch.h>
29#include <stdio.h>
30
31// Location of VSpace managed by this system.
32#define VSPACE_BEGIN ((lvaddr_t)(256UL << 20) * (disp_get_core_id() + 1))
33
34// Amount of virtual address space reserved for mapping frames
35// backing refill_slabs.
36//#define META_DATA_RESERVED_SPACE (BASE_PAGE_SIZE * 128) // 64
37#define META_DATA_RESERVED_SPACE (BASE_PAGE_SIZE * 1024)
38// increased above value from 128 for pandaboard port
39
40static inline uintptr_t
41vregion_flags_to_kpi_paging_flags(vregion_flags_t flags)
42{
43    STATIC_ASSERT(0x1ff == VREGION_FLAGS_MASK, "");
44    STATIC_ASSERT(0x0f == KPI_PAGING_FLAGS_MASK, "");
45    STATIC_ASSERT(VREGION_FLAGS_READ    == KPI_PAGING_FLAGS_READ,    "");
46    STATIC_ASSERT(VREGION_FLAGS_WRITE   == KPI_PAGING_FLAGS_WRITE,   "");
47    STATIC_ASSERT(VREGION_FLAGS_EXECUTE == KPI_PAGING_FLAGS_EXECUTE, "");
48    STATIC_ASSERT(VREGION_FLAGS_NOCACHE == KPI_PAGING_FLAGS_NOCACHE, "");
49    if ((flags & VREGION_FLAGS_MPB) != 0) {
50        // XXX: ignore MPB flag on ARM, otherwise the assert below fires -AB
51        flags &= ~VREGION_FLAGS_MPB;
52    }
53    if ((flags & VREGION_FLAGS_WRITE_COMBINING) != 0) {
54        // XXX mask out write-combining flag on ARM
55        flags &= ~VREGION_FLAGS_WRITE_COMBINING;
56    }
57    if ((flags & VREGION_FLAGS_VTD_SNOOP) != 0) {
58        // XXX mask out vtd-snooping flag on ARM
59        flags &= ~VREGION_FLAGS_VTD_SNOOP;
60    }
61    if ((flags & VREGION_FLAGS_GUARD) != 0) {
62        flags = 0;
63    }
64    assert(0 == (~KPI_PAGING_FLAGS_MASK & (uintptr_t)flags));
65    return (uintptr_t)flags;
66}
67
68static void
69set_mapping_capref(struct capref *mapping, struct vnode *root, uint32_t entry)
70{
71#ifdef GLOBAL_MCN
72    mapping->cnode = root->u.vnode.mcnode[entry / L2_CNODE_SLOTS];
73    mapping->slot  = entry % L2_CNODE_SLOTS;
74#else
75    errval_t err = slot_alloc(mapping);
76    assert(err_is_ok(err));
77#endif
78    assert(!cnoderef_is_null(mapping->cnode));
79}
80
81// debug print preprocessor flag for this file
82//#define LIBBARRELFISH_DEBUG_PMAP
83
84/**
85 * \brief check whether region A = [start_a .. end_a) overlaps
86 * region B = [start_b .. end_b).
87 * \return true iff A overlaps B
88 */
89static bool is_overlapping(uint16_t start_a, uint16_t end_a, uint16_t start_b, uint16_t end_b)
90{
91    return
92        // B strict subset of A
93        (start_a < start_b && end_a >= end_b)
94        // start_a inside B
95        || (start_a >= start_b && start_a < end_b)
96        // end_a inside B
97        || (end_a > start_b && end_a < end_b);
98}
99
100/**
101 * \brief Check whether vnode `root' has entries in between [entry ..
102 * entry+len).
103 * \param root the vnode to look at
104 * \param entry first entry of the region to check
105 * \param len   length of the region to check
106 * \param only_pages true == do not report previously allocated lower-level
107 *                   page tables that are empty
108 * \return true iff entries exist in region.
109 */
110#if defined(LIBBARRELFISH_DEBUG_PMAP)
111#define DEBUG_HAS_VNODE
112#endif
113static bool has_vnode(struct vnode *root, uint32_t entry, size_t len,
114               bool only_pages)
115{
116    assert(root != NULL);
117    assert(root->is_vnode);
118    struct vnode *n;
119
120    uint32_t end_entry = entry + len;
121#ifdef DEBUG_HAS_VNODE
122    debug_printf("%s: checking region [%"PRIu32"--%"PRIu32"], only_pages = %d\n",
123            __FUNCTION__, entry, end_entry, only_pages);
124#endif
125
126    for (n = root->u.vnode.children; n; n = n->next) {
127        // region to check [entry .. end_entry)
128        if (n->is_vnode && n->entry >= entry && n->entry < end_entry) {
129            if (only_pages) {
130                return has_vnode(n, 0, ARM_L2_TABLE_BYTES, true);
131            }
132#ifdef LIBBARRELFISH_DEBUG_PMAP
133            debug_printf("1: found page table inside our region\n");
134#endif
135            return true;
136        } else if (n->is_vnode) {
137            // all other vnodes do not overlap with us, so go to next
138            assert(n->entry < entry || n->entry >= end_entry);
139            continue;
140        } else {
141            // not vnode
142            uint32_t end = n->entry + n->u.frame.pte_count;
143#ifdef DEBUG_HAS_VNODE
144            debug_printf("%s: looking at region: [%"PRIu32"--%"PRIu32"]\n",
145                    __FUNCTION__, n->entry, end);
146#endif
147
148            // do checks
149            if (is_overlapping(entry, end_entry, n->entry, end)) {
150                return true;
151            }
152        }
153    }
154
155    return false;
156}
157
158/**
159 * \brief Starting at a given root, return the vnode with entry equal to #entry
160 * \return vnode at index `entry` or NULL
161 */
162#ifdef LIBBARRELFISH_DEBUG_PMAP
163#define DEBUG_FIND_VNODE
164#endif
165static struct vnode *find_vnode(struct vnode *root, uint16_t entry)
166{
167    assert(root != NULL);
168    assert(root->is_vnode);
169    struct vnode *n;
170
171#ifdef DEBUG_FIND_VNODE
172    debug_printf("%s: looking for %"PRIu16"\n", __FUNCTION__, entry);
173#endif
174
175    for(n = root->u.vnode.children; n != NULL; n = n->next) {
176        if (n->is_vnode &&
177            is_overlapping(entry, entry + 1, n->entry, n->entry + 1)) {
178#ifdef DEBUG_FIND_VNODE
179            debug_printf("%s: found ptable at [%"PRIu16"--%"PRIu16"]\n",
180                    __FUNCTION__, n->entry, n->entry + 1);
181#endif
182            return n;
183        }
184        else if (n->is_vnode) {
185            assert(!is_overlapping(entry, entry + 1, n->entry, n->entry + 1));
186            // ignore all other vnodes;
187            continue;
188        }
189
190        // not vnode
191        assert(!n->is_vnode);
192        uint16_t end = n->entry + n->u.frame.pte_count;
193#ifdef DEBUG_FIND_VNODE
194        debug_printf("%s: looking at section [%"PRIu16"--%"PRIu16"]\n", __FUNCTION__, n->entry, end);
195#endif
196        if (n->entry <= entry && entry < end) {
197#ifdef DEBUG_FIND_VNODE
198            debug_printf("%d \\in [%d, %d]\n", entry, n->entry, end);
199#endif
200            return n;
201        }
202    }
203    return NULL;
204}
205
206/**
207 * \brief check whether region [entry, entry+npages) is contained in a child
208 * of `root`.
209 */
210static bool inside_region(struct vnode *root, uint32_t entry, uint32_t npages)
211{
212    assert(root != NULL);
213    assert(root->is_vnode);
214
215    struct vnode *n;
216
217    for (n = root->u.vnode.children; n; n = n->next) {
218        if (!n->is_vnode) {
219            uint16_t end = n->entry + n->u.frame.pte_count;
220            if (n->entry <= entry && entry + npages <= end) {
221                return true;
222            }
223        }
224    }
225
226    return false;
227}
228
229/**
230 * \brief remove vnode `item` from linked list of children of `root`
231 */
232static void remove_vnode(struct vnode *root, struct vnode *item)
233{
234    assert(root->is_vnode);
235    struct vnode *walk = root->u.vnode.children;
236    struct vnode *prev = NULL;
237    while (walk) {
238        if (walk == item) {
239            if (prev) {
240                prev->next = walk->next;
241                return;
242            } else {
243                root->u.vnode.children = walk->next;
244                return;
245            }
246        }
247        prev = walk;
248        walk = walk->next;
249    }
250    USER_PANIC("Should not get here");
251}
252
253/**
254 * \brief (recursively) remove empty page tables in region [entry ..
255 * entry+len) in vnode `root`.
256 */
257#ifdef LIBBARRELFISH_DEBUG_PMAP
258#define DEBUG_REMOVE_EMPTY_VNODES
259#endif
260static void remove_empty_vnodes(struct slab_allocator *vnode_alloc, struct vnode *root,
261                         uint32_t entry, size_t len)
262{
263    // precondition: root does not have pages in [entry, entry+len)
264    assert(!has_vnode(root, entry, len, true));
265
266    errval_t err;
267    uint32_t end_entry = entry + len;
268    for (struct vnode *n = root->u.vnode.children; n; n = n->next) {
269        // sanity check and skip leaf entries
270        if (!n->is_vnode) {
271            continue;
272        }
273        // here we know that all vnodes we're interested in are
274        // page tables
275        assert(n->is_vnode);
276
277        // Unmap vnode if it is in range [entry .. entry+len)
278        if (n->entry >= entry && n->entry < end_entry) {
279            err = vnode_unmap(root->u.vnode.invokable, n->mapping);
280            assert(err_is_ok(err));
281
282            if (!capcmp(n->u.vnode.cap, n->u.vnode.invokable)) {
283                // delete invokable pt cap if it's a real copy
284                err =cap_destroy(n->u.vnode.invokable);
285                assert(err_is_ok(err));
286            }
287
288            // delete last copy of pt cap
289            err = cap_destroy(n->u.vnode.cap);
290            assert(err_is_ok(err));
291
292            // remove vnode from list
293            remove_vnode(root, n);
294            slab_free(vnode_alloc, n);
295        }
296    }
297}
298
299/**
300 * \brief Allocates a new VNode, adding it to the page table and our metadata
301 */
302static errval_t alloc_vnode(struct pmap_arm *pmap_arm, struct vnode *root,
303                            enum objtype type, uint32_t entry,
304                            struct vnode **retvnode)
305{
306    assert(root->is_vnode);
307    errval_t err;
308
309    struct vnode *newvnode = slab_alloc(&pmap_arm->slab);
310    if (newvnode == NULL) {
311        return LIB_ERR_SLAB_ALLOC_FAIL;
312    }
313    newvnode->is_vnode = true;
314
315    // The VNode capability
316    err = slot_alloc(&newvnode->u.vnode.cap);
317    if (err_is_fail(err)) {
318        return err_push(err, LIB_ERR_SLOT_ALLOC);
319    }
320
321    err = vnode_create(newvnode->u.vnode.cap, type);
322    if (err_is_fail(err)) {
323        return err_push(err, LIB_ERR_VNODE_CREATE);
324    }
325
326    // XXX: do we need to put master copy in other cspace?
327    newvnode->u.vnode.invokable = newvnode->u.vnode.cap;
328
329    // The VNode meta data
330    newvnode->entry            = entry;
331    newvnode->next             = root->u.vnode.children;
332    root->u.vnode.children     = newvnode;
333    newvnode->u.vnode.children = NULL;
334
335    err = slot_alloc(&newvnode->mapping);
336    if (err_is_fail(err)) {
337        return err_push(err, LIB_ERR_SLOT_ALLOC);
338    }
339
340    err = vnode_map(root->u.vnode.invokable, newvnode->u.vnode.cap,
341            entry, KPI_PAGING_FLAGS_READ | KPI_PAGING_FLAGS_WRITE, 0, 1,
342            newvnode->mapping);
343    if (err_is_fail(err)) {
344        return err_push(err, LIB_ERR_PMAP_MAP);
345    }
346
347#ifdef GLOBAL_MCN
348    /* allocate mapping cnodes */
349    for (int i = 0; i < MCN_COUNT; i++) {
350        err = cnode_create_l2(&newvnode->u.vnode.mcn[i], &newvnode->u.vnode.mcnode[i]);
351        if (err_is_fail(err)) {
352            return err_push(err, LIB_ERR_PMAP_ALLOC_CNODE);
353        }
354    }
355#endif
356
357    if (retvnode) {
358        *retvnode = newvnode;
359    }
360    return SYS_ERR_OK;
361}
362
363/**
364 * \brief Returns the vnode for the pagetable mapping a given vspace address
365 */
366#ifdef LIBBARRELFISH_DEBUG_PMAP
367#define DEBUG_GET_PTABLE
368#endif
369static errval_t get_ptable(struct pmap_arm  *pmap,
370                           genvaddr_t        vaddr,
371                           struct vnode    **ptable)
372{
373    // NB Strictly there are 12 bits in the ARM L1, but allocations unit
374    // of L2 is 1 page of L2 entries (4 tables) so we use 10 bits for the L1
375    // idx here
376    uintptr_t idx = ARM_L1_OFFSET(vaddr);
377    if ((*ptable = find_vnode(&pmap->root, idx)) == NULL)
378    {
379        // L1 table entries point to L2 tables so allocate an L2
380        // table for this L1 entry.
381
382        struct vnode *tmp = NULL; // Tmp variable for passing to alloc_vnode
383
384        errval_t err = alloc_vnode(pmap, &pmap->root, ObjType_VNode_ARM_l2,
385                                   idx, &tmp);
386        if (err_is_fail(err)) {
387            DEBUG_ERR(err, "alloc_vnode");
388            return err;
389        }
390        assert(tmp != NULL);
391        *ptable = tmp; // Set argument to received value
392
393        if (err_is_fail(err)) {
394            return err_push(err, LIB_ERR_PMAP_ALLOC_VNODE);
395        }
396    }
397    assert(ptable);
398    struct vnode *pt = *ptable;
399    if (!pt->is_vnode) {
400        debug_printf("found section @%d, trying to get ptable for %d\n",
401                pt->entry, idx);
402    }
403    assert(pt->is_vnode);
404#ifdef DEBUG_GET_PTABLE
405    debug_printf("have ptable: %p\n", pt);
406#endif
407
408    return SYS_ERR_OK;
409}
410
411static struct vnode *find_ptable(struct pmap_arm  *pmap,
412                                 genvaddr_t vaddr)
413{
414    // NB Strictly there are 12 bits in the ARM L1, but allocations unit
415    // of L2 is 1 page of L2 entries (4 tables) so
416    uintptr_t idx = ARM_L1_OFFSET(vaddr);
417    return find_vnode(&pmap->root, idx);
418}
419
420static errval_t do_single_map(struct pmap_arm *pmap, genvaddr_t vaddr, genvaddr_t vend,
421                              struct capref frame, size_t offset, size_t pte_count,
422                              vregion_flags_t flags)
423{
424    errval_t err = SYS_ERR_OK;
425    // Get the page table
426    struct vnode *ptable;
427    uintptr_t entry;
428    bool is_large = false;
429
430    struct frame_identity fi;
431    err = frame_identify(frame, &fi);
432    if (err_is_fail(err)) {
433        return err_push(err, LIB_ERR_PMAP_FRAME_IDENTIFY);
434    }
435
436    if (flags & VREGION_FLAGS_LARGE &&
437        (vaddr & LARGE_PAGE_MASK) == 0 &&
438        fi.bytes >= LARGE_PAGE_SIZE &&
439        (fi.base & LARGE_PAGE_MASK) == 0) {
440        //section mapping (1MB)
441        //mapped in the L1 table at root
442        //
443        ptable = &pmap->root;
444        entry = ARM_L1_OFFSET(vaddr);
445        is_large = true;
446#ifdef LIBBARRELFISH_DEBUG_PMAP
447        debug_printf("do_single_map: large path: entry=%zu\n", entry);
448#endif
449    } else {
450#ifdef LIBBARRELFISH_DEBUG_PMAP
451        debug_printf("%s: 4k path: mapping %"PRIxGENVADDR", %zu entries\n", __FUNCTION__, vaddr, pte_count);
452        debug_printf("4k path: L1 entry: %zu\n", ARM_L1_OFFSET(vaddr));
453#endif
454        //4k mapping
455        // XXX: reassess the following note -SG
456        // NOTE: strictly speaking a l2 entry only has 8 bits, while a l1 entry
457        // has 12 bits, but due to the way Barrelfish allocates l1 and l2 tables,
458        // we use 10 bits for the entry here and in the map syscall
459        err = get_ptable(pmap, vaddr, &ptable);
460        if (err_is_fail(err)) {
461            DEBUG_ERR(err, "get_ptable() in do_single_map");
462            return err_push(err, LIB_ERR_PMAP_GET_PTABLE);
463        }
464        entry = ARM_L2_OFFSET(vaddr);
465#ifdef LIBBARRELFISH_DEBUG_PMAP
466        debug_printf("%s: 4k path: L2 entry=%zu\n", __FUNCTION__, entry);
467        debug_printf("%s: ptable->is_vnode = %d\n",
468                __FUNCTION__, ptable->is_vnode);
469#endif
470    }
471
472    // convert flags
473    flags &= ~(VREGION_FLAGS_LARGE | VREGION_FLAGS_HUGE);
474    uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);
475
476    // check if there is an overlapping mapping
477    if (has_vnode(ptable, entry, pte_count, false)) {
478#ifdef LIBBARRELFISH_DEBUG_PMAP
479        debug_printf("has_vnode, only_pages=false  returned true\n");
480#endif
481        if (has_vnode(ptable, entry, pte_count, true)) {
482            printf("page already exists in 0x%"
483                    PRIxGENVADDR"--0x%"PRIxGENVADDR"\n", vaddr, vend);
484            return LIB_ERR_PMAP_EXISTING_MAPPING;
485        } else {
486#ifdef LIBBARRELFISH_DEBUG_PMAP
487            debug_printf("has_vnode, only_pages=true  returned false, cleaning up empty ptables\n");
488#endif
489            // clean out empty page tables. We do this here because we benefit
490            // from having the page tables in place when doing lots of small
491            // mappings
492            // XXX: TODO: fix this + mapping of L2 to work on single 1k
493            // chunks
494            remove_empty_vnodes(&pmap->slab, ptable, entry, pte_count);
495        }
496    }
497
498    // Create user level datastructure for the mapping
499    struct vnode *page = slab_alloc(&pmap->slab);
500    assert(page);
501    page->is_vnode = false;
502    page->entry = entry;
503    page->next  = ptable->u.vnode.children;
504    ptable->u.vnode.children = page;
505    page->u.frame.cap = frame;
506    page->u.frame.flags = flags;
507    page->u.frame.pte_count = pte_count;
508
509    set_mapping_capref(&page->mapping, ptable, entry);
510
511    // Map entry into the page table
512    err = vnode_map(ptable->u.vnode.invokable, frame, entry,
513                    pmap_flags, offset, pte_count,
514                    page->mapping);
515    if (err_is_fail(err)) {
516        return err_push(err, LIB_ERR_VNODE_MAP);
517    }
518    return SYS_ERR_OK;
519}
520
521static errval_t do_map(struct pmap_arm *pmap, genvaddr_t vaddr,
522                       struct capref frame, size_t offset, size_t size,
523                       vregion_flags_t flags, size_t *retoff, size_t *retsize)
524{
525    errval_t err;
526    size_t page_size;
527    size_t offset_level;
528
529    // get base address and size of frame
530    struct frame_identity fi;
531    err = frame_identify(frame, &fi);
532    if (err_is_fail(err)) {
533        return err_push(err, LIB_ERR_PMAP_DO_MAP);
534    }
535
536    // determine mapping specific parts
537    if (flags & VREGION_FLAGS_LARGE &&
538        (vaddr & LARGE_PAGE_MASK) == 0 &&
539        fi.bytes >= LARGE_PAGE_SIZE &&
540        (fi.base & LARGE_PAGE_MASK) == 0) {
541        //section mapping (1MB)
542        page_size = LARGE_PAGE_SIZE;
543        offset_level = ARM_L1_OFFSET(vaddr);
544#ifdef LIBBARRELFISH_DEBUG_PMAP
545        printf("do_map: large path\n");
546        printf("page_size: %zx, size: %zx\n", page_size, size);
547#endif
548    } else {
549        //normal 4k mapping
550        page_size = BASE_PAGE_SIZE;
551        offset_level = ARM_L2_OFFSET(vaddr);
552    }
553
554    size = ROUND_UP(size, page_size);
555    size_t pte_count = DIVIDE_ROUND_UP(size, page_size);
556    if (flags & VREGION_FLAGS_LARGE) {
557#ifdef LIBBARRELFISH_DEBUG_PMAP
558        printf("#pages: 0x%zu\n", pte_count);
559#endif
560    }
561    genvaddr_t vend = vaddr + size;
562
563    if (fi.bytes < size) {
564        return LIB_ERR_PMAP_FRAME_SIZE;
565    }
566
567#ifdef LIBBARRELFISH_DEBUG_PMAP
568        printf("do_map: mapping %zu pages (size=%zx), from %zu.%zu\n",
569                pte_count, page_size, ARM_L1_OFFSET(vaddr), ARM_L2_OFFSET(vaddr));
570        printf("page_size: %zx, size: %zx\n", page_size, size);
571#endif
572
573    //should be trivially true for section mappings
574    if ((ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend)) ||
575        flags & VREGION_FLAGS_LARGE) {
576        // fast path
577        err = do_single_map(pmap, vaddr, vend, frame, offset, pte_count, flags);
578        if (err_is_fail(err)) {
579            DEBUG_ERR(err, "[do_map] in fast path");
580            return err_push(err, LIB_ERR_PMAP_DO_MAP);
581        }
582    } else { // multiple leaf page tables
583        // first leaf
584        uint32_t c = ARM_L2_MAX_ENTRIES - offset_level;
585        genvaddr_t temp_end = vaddr + c * page_size;
586        err = do_single_map(pmap, vaddr, temp_end, frame, offset, c, flags);
587        if (err_is_fail(err)) {
588            return err_push(err, LIB_ERR_PMAP_DO_MAP);
589        }
590
591        // map full leaves
592        while (ARM_L1_OFFSET(temp_end) < ARM_L1_OFFSET(vend)) { // update vars
593            vaddr = temp_end;
594            temp_end = vaddr + ARM_L2_MAX_ENTRIES * page_size;
595            offset += c * page_size;
596            c = ARM_L2_MAX_ENTRIES;
597
598            // do mapping
599            err = do_single_map(pmap, vaddr, temp_end, frame, offset, ARM_L2_MAX_ENTRIES, flags);
600            if (err_is_fail(err)) {
601                return err_push(err, LIB_ERR_PMAP_DO_MAP);
602            }
603        }
604
605        // map remaining part
606        offset += c * page_size;
607        c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(temp_end);
608        if (c) {
609
610            // do mapping
611            err = do_single_map(pmap, temp_end, vend, frame, offset, c, flags);
612            if (err_is_fail(err)) {
613                return err_push(err, LIB_ERR_PMAP_DO_MAP);
614            }
615        }
616    }
617    if (retoff) {
618        *retoff = offset;
619    }
620    if (retsize) {
621        *retsize = size;
622    }
623    //has_vnode_debug = false;
624    return SYS_ERR_OK;
625#if 0
626    errval_t err;
627    uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);
628
629    for (size_t i = offset; i < offset + size; i += BASE_PAGE_SIZE) {
630
631        vaddr += BASE_PAGE_SIZE;
632    }
633
634    if (retoff) {
635        *retoff = offset;
636    }
637    if (retsize) {
638        *retsize = size;
639    }
640    return SYS_ERR_OK;
641#endif
642}
643
644static size_t
645max_slabs_required(size_t bytes)
646{
647    // Perform a slab allocation for every page (do_map -> slab_alloc)
648    size_t pages     = DIVIDE_ROUND_UP(bytes, BASE_PAGE_SIZE);
649    // Perform a slab allocation for every L2 (get_ptable -> find_vnode)
650    size_t l2entries = DIVIDE_ROUND_UP(pages, ARM_L2_MAX_ENTRIES);
651
652    return 2 * l2entries;
653}
654static size_t max_slabs_required_large(size_t bytes)
655{
656    // always need only 1 slab, as we can represent any size section mapping
657    // in a single struct vnode.
658    return 1;
659}
660
661/**
662 * \brief Refill slabs used for metadata
663 *
664 * \param pmap     The pmap to refill in
665 * \param request  The number of slabs the allocator must have
666 * when the function returns
667 *
668 * When the current pmap is initialized,
669 * it reserves some virtual address space for metadata.
670 * This reserved address space is used here
671 *
672 * Can only be called for the current pmap
673 * Will recursively call into itself till it has enough slabs
674 */
675#include <stdio.h>
676static errval_t refill_slabs(struct pmap_arm *pmap, size_t request)
677{
678    errval_t err;
679
680    /* Keep looping till we have #request slabs */
681    while (slab_freecount(&pmap->slab) < request) {
682        // Amount of bytes required for #request
683        size_t bytes = SLAB_STATIC_SIZE(request - slab_freecount(&pmap->slab),
684                                        sizeof(struct vnode));
685
686        /* Get a frame of that size */
687        struct capref cap;
688        err = frame_alloc(&cap, bytes, &bytes);
689        if (err_is_fail(err)) {
690            return err_push(err, LIB_ERR_FRAME_ALLOC);
691        }
692
693        /* If we do not have enough slabs to map the frame in, recurse */
694        size_t required_slabs_for_frame = max_slabs_required(bytes);
695        if (slab_freecount(&pmap->slab) < required_slabs_for_frame) {
696            // If we recurse, we require more slabs than to map a single page
697            assert(required_slabs_for_frame > 4);
698
699            err = refill_slabs(pmap, required_slabs_for_frame);
700            if (err_is_fail(err)) {
701                return err_push(err, LIB_ERR_SLAB_REFILL);
702            }
703        }
704
705        /* Perform mapping */
706        genvaddr_t genvaddr = pmap->vregion_offset;
707        pmap->vregion_offset += (genvaddr_t)bytes;
708
709        // if this assert fires, increase META_DATA_RESERVED_SPACE
710        assert(pmap->vregion_offset < (vregion_get_base_addr(&pmap->vregion) +
711               vregion_get_size(&pmap->vregion)));
712
713        err = do_map(pmap, genvaddr, cap, 0, bytes,
714                     VREGION_FLAGS_READ_WRITE, NULL, NULL);
715        if (err_is_fail(err)) {
716            return err_push(err, LIB_ERR_PMAP_DO_MAP);
717        }
718
719        /* Grow the slab */
720        lvaddr_t buf = vspace_genvaddr_to_lvaddr(genvaddr);
721        slab_grow(&pmap->slab, (void*)buf, bytes);
722    }
723
724    return SYS_ERR_OK;
725}
726
727/**
728 * \brief Create page mappings
729 *
730 * \param pmap     The pmap object
731 * \param vaddr    The virtual address to create the mapping for
732 * \param frame    The frame cap to map in
733 * \param offset   Offset into the frame cap
734 * \param size     Size of the mapping
735 * \param flags    Flags for the mapping
736 * \param retoff   If non-NULL, filled in with adjusted offset of mapped region
737 * \param retsize  If non-NULL, filled in with adjusted size of mapped region
738 */
739static errval_t
740map(struct pmap     *pmap,
741    genvaddr_t       vaddr,
742    struct capref    frame,
743    size_t           offset,
744    size_t           size,
745    vregion_flags_t  flags,
746    size_t          *retoff,
747    size_t          *retsize)
748{
749    struct pmap_arm *pmap_arm = (struct pmap_arm *)pmap;
750
751    errval_t err;
752    size_t base;
753    size_t page_size;
754    size_t slabs_required;
755
756    struct frame_identity fi;
757    err = frame_identify(frame, &fi);
758    if (err_is_fail(err)) {
759        return err_push(err, LIB_ERR_PMAP_FRAME_IDENTIFY);
760    }
761
762    // adjust the mapping to be on page boundaries
763    if (flags & VREGION_FLAGS_LARGE &&
764        (vaddr & LARGE_PAGE_MASK) == 0 &&
765        fi.bytes >= LARGE_PAGE_SIZE &&
766        (fi.base & LARGE_PAGE_MASK) == 0) {
767        //section mapping (1MB)
768        base = LARGE_PAGE_OFFSET(offset);
769        page_size = LARGE_PAGE_SIZE;
770        slabs_required = max_slabs_required_large(size);
771#ifdef LIBBARRELFISH_DEBUG_PMAP
772        printf("map: large path, page_size: %i, base: %i, slabs: %i, size: %i,"
773                "frame size: %zu\n", page_size, base, slabs_required, size, fi.bytes);
774#endif
775    } else {
776        //4k mapping
777        base = BASE_PAGE_OFFSET(offset);
778        page_size = BASE_PAGE_SIZE;
779        slabs_required = max_slabs_required(size);
780    }
781    size   += base;
782    size    = ROUND_UP(size, page_size);
783    offset -= base;
784
785    const size_t slabs_reserve = 3; // == max_slabs_required(1)
786    uint64_t  slabs_free       = slab_freecount(&pmap_arm->slab);
787
788    slabs_required += slabs_reserve;
789
790    if (slabs_required > slabs_free) {
791        if (get_current_pmap() == pmap) {
792            err = refill_slabs(pmap_arm, slabs_required);
793            if (err_is_fail(err)) {
794                return err_push(err, LIB_ERR_SLAB_REFILL);
795            }
796        }
797        else {
798            size_t bytes = SLAB_STATIC_SIZE(slabs_required - slabs_free,
799                                            sizeof(struct vnode));
800            void *buf = malloc(bytes);
801            if (!buf) {
802                return LIB_ERR_MALLOC_FAIL;
803            }
804            slab_grow(&pmap_arm->slab, buf, bytes);
805        }
806    }
807
808    return do_map(pmap_arm, vaddr, frame, offset, size, flags,
809                  retoff, retsize);
810}
811
812static errval_t do_single_unmap(struct pmap_arm *pmap, genvaddr_t vaddr,
813                                size_t pte_count)
814{
815#ifdef LIBBARRELFISH_DEBUG_PMAP
816    debug_printf("%s: vaddr=0x%"PRIxGENVADDR", pte_count=%zu\n",
817             __FUNCTION__, vaddr, pte_count);
818#endif
819    errval_t err;
820    struct vnode *pt = find_ptable(pmap, vaddr);
821    // pt->is_vnode == non-large mapping
822    if (pt && pt->is_vnode) {
823        // analog to do_single_map we use 10 bits for tracking pages in user space -SG
824        struct vnode *page = find_vnode(pt, ARM_L2_OFFSET(vaddr));
825        if (page && page->u.frame.pte_count == pte_count) {
826#ifdef LIBBARRELFISH_DEBUG_PMAP
827        debug_printf("page unmap: pt entry: %zu, entry = %zu, pte_count = %hu\n",
828                pt->entry, page->entry, page->u.frame.pte_count);
829#endif
830            err = vnode_unmap(pt->u.vnode.cap, page->mapping);
831            if (err_is_fail(err)) {
832                DEBUG_ERR(err, "vnode_unmap");
833                return err_push(err, LIB_ERR_VNODE_UNMAP);
834            }
835
836            // cleanup mapping cap
837            err = cap_delete(page->mapping);
838            if (err_is_fail(err)) {
839                DEBUG_ERR(err, "cap_delete");
840                return err_push(err, LIB_ERR_CAP_DELETE);
841            }
842#ifndef GLOBAL_MCN
843            err = slot_free(page->mapping);
844            if (err_is_fail(err)) {
845                debug_printf("remove_empty_vnodes: slot_free (mapping): %s\n",
846                        err_getstring(err));
847            }
848#endif
849
850            remove_vnode(pt, page);
851            slab_free(&pmap->slab, page);
852        }
853        else {
854            return LIB_ERR_PMAP_FIND_VNODE;
855        }
856    } else if (pt) {
857#ifdef LIBBARRELFISH_DEBUG_PMAP
858        debug_printf("section unmap: entry = %zu\n", pt->entry);
859#endif
860        err = vnode_unmap(pmap->root.u.vnode.cap, pt->mapping);
861        if (err_is_fail(err)) {
862            DEBUG_ERR(err, "vnode_unmap");
863            return err_push(err, LIB_ERR_VNODE_UNMAP);
864        }
865
866        // cleanup mapping cap
867        err = cap_delete(pt->mapping);
868        if (err_is_fail(err)) {
869            DEBUG_ERR(err, "cap_delete");
870            return err_push(err, LIB_ERR_CAP_DELETE);
871        }
872        // we need to free slots when unmapping page tables, as we don't have
873        // mapping cnodes for the root vnode in armv7
874        err = slot_free(pt->mapping);
875        if (err_is_fail(err)) {
876            return err_push(err, LIB_ERR_SLOT_FREE);
877        }
878
879        remove_vnode(&pmap->root, pt);
880        slab_free(&pmap->slab, pt);
881    } else {
882        return LIB_ERR_PMAP_FIND_VNODE;
883    }
884
885    return SYS_ERR_OK;
886}
887
888/**
889 * \brief Remove page mappings
890 *
891 * \param pmap     The pmap object
892 * \param vaddr    The start of the virtual addres to remove
893 * \param size     The size of virtual address to remove
894 * \param retsize  If non-NULL, filled in with the actual size removed
895 */
896static errval_t
897unmap(struct pmap *pmap,
898      genvaddr_t   vaddr,
899      size_t       size,
900      size_t      *retsize)
901{
902    errval_t err, ret = SYS_ERR_OK;
903    struct pmap_arm *pmap_arm = (struct pmap_arm*)pmap;
904    size = ROUND_UP(size, BASE_PAGE_SIZE);
905    size_t pte_count = size / BASE_PAGE_SIZE;
906    genvaddr_t vend = vaddr + size;
907
908    if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
909        // fast path
910#ifdef LIBBARRELFISH_DEBUG_PMAP
911        debug_printf("%s: fast path vaddr=0x%"PRIxGENVADDR", pte_count=%zu\n",
912                __FUNCTION__, vaddr, pte_count);
913#endif
914        err = do_single_unmap(pmap_arm, vaddr, pte_count);
915        if (err_is_fail(err)) {
916            return err_push(err, LIB_ERR_PMAP_UNMAP);
917        }
918    } else { // slow path
919        // unmap first leaf
920        uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
921#ifdef LIBBARRELFISH_DEBUG_PMAP
922        debug_printf("%s: slow path 1st leaf vaddr=0x%"PRIxGENVADDR", pte_count=%zu\n",
923                __FUNCTION__, vaddr, c);
924#endif
925        err = do_single_unmap(pmap_arm, vaddr, c);
926        if (err_is_fail(err)) {
927            return err_push(err, LIB_ERR_PMAP_UNMAP);
928        }
929
930        // unmap full leaves
931        vaddr += c * BASE_PAGE_SIZE;
932        while (ARM_L1_OFFSET(vaddr) < ARM_L1_OFFSET(vend)) {
933            c = ARM_L2_MAX_ENTRIES;
934#ifdef LIBBARRELFISH_DEBUG_PMAP
935            debug_printf("%s: slow path full leaf vaddr=0x%"PRIxGENVADDR", pte_count=%zu\n",
936                    __FUNCTION__, vaddr, c);
937#endif
938            err = do_single_unmap(pmap_arm, vaddr, c);
939            if (err_is_fail(err)) {
940                return err_push(err, LIB_ERR_PMAP_UNMAP);
941            }
942            vaddr += c * BASE_PAGE_SIZE;
943        }
944
945        // unmap remaining part
946        c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(vaddr);
947        if (c) {
948#ifdef LIBBARRELFISH_DEBUG_PMAP
949            debug_printf("%s: slow path last leaf vaddr=0x%"PRIxGENVADDR", pte_count=%zu\n",
950                    __FUNCTION__, vaddr, c);
951#endif
952            err = do_single_unmap(pmap_arm, vaddr, c);
953            if (err_is_fail(err)) {
954                return err_push(err, LIB_ERR_PMAP_UNMAP);
955            }
956        }
957    }
958
959    if (retsize) {
960        *retsize = size;
961    }
962
963    return ret;
964}
965
966/**
967 * \brief Determine a suitable address for a given memory object
968 *
969 * \param pmap    The pmap object
970 * \param memobj  The memory object to determine the address for
971 * \param alignment Minimum alignment
972 * \param vaddr   Pointer to return the determined address
973 *
974 * Relies on vspace.c code maintaining an ordered list of vregions
975 */
976static errval_t
977determine_addr(struct pmap   *pmap,
978               struct memobj *memobj,
979               size_t        alignment,
980               genvaddr_t    *vaddr)
981{
982    assert(pmap->vspace->head);
983
984    if (alignment == 0) {
985        alignment = BASE_PAGE_SIZE;
986    } else {
987        alignment = ROUND_UP(alignment, BASE_PAGE_SIZE);
988    }
989    size_t size = ROUND_UP(memobj->size, alignment);
990
991    struct vregion *walk = pmap->vspace->head;
992    while (walk->next) { // Try to insert between existing mappings
993        genvaddr_t walk_base = vregion_get_base_addr(walk);
994        genvaddr_t walk_size = ROUND_UP(vregion_get_size(walk), BASE_PAGE_SIZE);
995        genvaddr_t walk_end  = ROUND_UP(walk_base + walk_size, alignment);
996        genvaddr_t next_base = vregion_get_base_addr(walk->next);
997
998        if (next_base > walk_end + size &&
999            walk_base + walk_size > VSPACE_BEGIN) { // Ensure mappings are larger than VSPACE_BEGIN
1000            *vaddr = walk_end;
1001            return SYS_ERR_OK;
1002        }
1003        walk = walk->next;
1004    }
1005
1006    *vaddr = ROUND_UP((vregion_get_base_addr(walk)
1007                       + ROUND_UP(vregion_get_size(walk), alignment)),
1008                       alignment);
1009    return SYS_ERR_OK;
1010}
1011
1012/** \brief Retrieves an address that can currently be used for large mappings
1013  *
1014  */
1015static errval_t determine_addr_raw(struct pmap *pmap, size_t size,
1016                                   size_t alignment, genvaddr_t *retvaddr)
1017{
1018    struct pmap_arm *pmap_arm = (struct pmap_arm *)pmap;
1019
1020    struct vnode *walk_pdir = pmap_arm->root.u.vnode.children;
1021    assert(walk_pdir != NULL); // assume there's always at least one existing entry
1022
1023    if (alignment == 0) {
1024        alignment = BASE_PAGE_SIZE;
1025    } else {
1026        alignment = ROUND_UP(alignment, BASE_PAGE_SIZE);
1027    }
1028    size = ROUND_UP(size, alignment);
1029
1030    size_t free_count = DIVIDE_ROUND_UP(size, LARGE_PAGE_SIZE);
1031    //debug_printf("need %zu contiguous free pdirs\n", free_count);
1032
1033    // compile pdir free list
1034    // barrelfish treats L1 as 1024 entries
1035    bool f[ARM_L1_MAX_ENTRIES];
1036    for (int i = 0; i < ARM_L1_MAX_ENTRIES; i++) {
1037        f[i] = true;
1038    }
1039    f[walk_pdir->entry] = false;
1040    while (walk_pdir) {
1041        assert(walk_pdir->is_vnode);
1042        f[walk_pdir->entry] = false;
1043        walk_pdir = walk_pdir->next;
1044    }
1045    genvaddr_t first_free = 384;
1046    for (; first_free < 512; first_free++) {
1047        if (f[first_free]) {
1048            for (int i = 1; i < free_count; i++) {
1049                if (!f[first_free + i]) {
1050                    // advance pointer
1051                    first_free = first_free+i;
1052                    goto next;
1053                }
1054            }
1055            break;
1056        }
1057next:
1058        assert(1 == 1);// make compiler shut up about label
1059    }
1060    //printf("first free: %li\n", (uint32_t)first_free);
1061    if (first_free + free_count <= 512) {
1062        *retvaddr = first_free << 22;
1063        return SYS_ERR_OK;
1064    } else {
1065        return LIB_ERR_OUT_OF_VIRTUAL_ADDR;
1066    }
1067}
1068
1069
1070
1071static errval_t do_single_modify_flags(struct pmap_arm *pmap, genvaddr_t vaddr,
1072                                       size_t pages, vregion_flags_t flags)
1073{
1074    errval_t err = SYS_ERR_OK;
1075    struct vnode *ptable = find_ptable(pmap, vaddr);
1076    uint16_t ptentry = ARM_L2_OFFSET(vaddr);
1077    if (ptable) {
1078        struct vnode *page = find_vnode(ptable, ptentry);
1079        if (page) {
1080            if (inside_region(ptable, ptentry, pages)) {
1081                // we're modifying part of a valid mapped region
1082                // arguments to invocation: invoke frame cap, first affected
1083                // page (as offset from first page in mapping), #affected
1084                // pages, new flags. Invocation should check compatibility of
1085                // new set of flags with cap permissions.
1086                size_t off = ptentry - page->entry;
1087                uintptr_t pmap_flags = vregion_flags_to_kpi_paging_flags(flags);
1088                // VA hinting NYI on ARM, so we always pass 0 for va_hint
1089                err = invoke_mapping_modify_flags(page->mapping,
1090                        off, pages, pmap_flags, 0);
1091                printf("invoke_frame_modify_flags returned error: %s (%"PRIuERRV")\n",
1092                        err_getstring(err), err);
1093                return err;
1094            } else {
1095                // overlaps some region border
1096                return LIB_ERR_PMAP_EXISTING_MAPPING;
1097            }
1098        }
1099    }
1100    return SYS_ERR_OK;
1101}
1102
1103/**
1104 * \brief Modify page mapping
1105 *
1106 * \param pmap     The pmap object
1107 * \param vaddr    The virtual address to unmap
1108 * \param flags    New flags for the mapping
1109 * \param retsize  If non-NULL, filled in with the actual size modified
1110 */
1111static errval_t
1112modify_flags(struct pmap     *pmap,
1113             genvaddr_t       vaddr,
1114             size_t           size,
1115             vregion_flags_t  flags,
1116             size_t          *retsize)
1117{
1118    errval_t err, ret = SYS_ERR_OK;
1119    struct pmap_arm *pmap_arm = (struct pmap_arm*)pmap;
1120    size = ROUND_UP(size, BASE_PAGE_SIZE);
1121    size_t pte_count = size / BASE_PAGE_SIZE;
1122    genvaddr_t vend = vaddr + size;
1123
1124    if (ARM_L1_OFFSET(vaddr) == ARM_L1_OFFSET(vend-1)) {
1125        // fast path
1126        err = do_single_modify_flags(pmap_arm, vaddr, pte_count, flags);
1127        if (err_is_fail(err)) {
1128            return err_push(err, LIB_ERR_PMAP_UNMAP);
1129        }
1130    }
1131    else { // slow path
1132        // modify flags in first leaf
1133        uint32_t c = ARM_L2_MAX_ENTRIES - ARM_L2_OFFSET(vaddr);
1134        err = do_single_modify_flags(pmap_arm, vaddr, c, flags);
1135        if (err_is_fail(err)) {
1136            return err_push(err, LIB_ERR_PMAP_UNMAP);
1137        }
1138
1139        // modify flags in full leaves
1140        vaddr += c * BASE_PAGE_SIZE;
1141        while (ARM_L1_OFFSET(vaddr) < ARM_L1_OFFSET(vend)) {
1142            c = ARM_L2_MAX_ENTRIES;
1143            err = do_single_modify_flags(pmap_arm, vaddr, c, flags);
1144            if (err_is_fail(err)) {
1145                return err_push(err, LIB_ERR_PMAP_UNMAP);
1146            }
1147            vaddr += c * BASE_PAGE_SIZE;
1148        }
1149
1150        // modify flags in remaining part
1151        c = ARM_L2_OFFSET(vend) - ARM_L2_OFFSET(vaddr);
1152        if (c) {
1153            err = do_single_modify_flags(pmap_arm, vaddr, c, flags);
1154            if (err_is_fail(err)) {
1155                return err_push(err, LIB_ERR_PMAP_UNMAP);
1156            }
1157        }
1158    }
1159
1160    if (retsize) {
1161        *retsize = size;
1162    }
1163
1164    return ret;
1165}
1166
1167/**
1168 * \brief Query existing page mapping
1169 *
1170 * \param pmap     The pmap object
1171 * \param vaddr    The virtual address to query
1172 * \param retvaddr Returns the base virtual address of the mapping
1173 * \param retsize  Returns the actual size of the mapping
1174 * \param retcap   Returns the cap mapped at this address
1175 * \param retoffset Returns the offset within the cap that is mapped
1176 * \param retflags Returns the flags for this mapping
1177 *
1178 * All of the ret parameters are optional.
1179 */
1180static errval_t lookup(struct pmap *pmap, genvaddr_t vaddr,
1181                       struct pmap_mapping_info *info)
1182{
1183    USER_PANIC("NYI");
1184    return 0;
1185}
1186
1187
1188static errval_t
1189serialise(struct pmap *pmap, void *buf, size_t buflen)
1190{
1191    // Unimplemented: ignored
1192    return SYS_ERR_OK;
1193}
1194
1195static errval_t
1196deserialise(struct pmap *pmap, void *buf, size_t buflen)
1197{
1198    // Unimplemented: we start with an empty pmap, and avoid the bottom of the A/S
1199    return SYS_ERR_OK;
1200}
1201
1202static struct pmap_funcs pmap_funcs = {
1203    .determine_addr = determine_addr,
1204    .determine_addr_raw = determine_addr_raw,
1205    .map = map,
1206    .unmap = unmap,
1207    .modify_flags = modify_flags,
1208    .lookup = lookup,
1209    .serialise = serialise,
1210    .deserialise = deserialise,
1211};
1212
1213/**
1214 * \brief Initialize the pmap object
1215 */
1216errval_t
1217pmap_init(struct pmap   *pmap,
1218          struct vspace *vspace,
1219          struct capref  vnode,
1220          struct slot_allocator *opt_slot_alloc)
1221{
1222    struct pmap_arm* pmap_arm = (struct pmap_arm*)pmap;
1223
1224    /* Generic portion */
1225    pmap->f = pmap_funcs;
1226    pmap->vspace = vspace;
1227
1228    // Slab allocator for vnodes
1229    slab_init(&pmap_arm->slab, sizeof(struct vnode), NULL);
1230    slab_grow(&pmap_arm->slab,
1231              pmap_arm->slab_buffer,
1232              sizeof(pmap_arm->slab_buffer));
1233
1234    pmap_arm->root.is_vnode         = true;
1235    pmap_arm->root.u.vnode.cap      = vnode;
1236    if (get_croot_addr(vnode) != CPTR_ROOTCN) {
1237        /* non invokable root cnode; copy */
1238        errval_t err = slot_alloc(&pmap_arm->root.u.vnode.invokable);
1239        assert(err_is_ok(err));
1240        err = cap_copy(pmap_arm->root.u.vnode.invokable, vnode);
1241        assert(err_is_ok(err));
1242    } else {
1243        pmap_arm->root.u.vnode.invokable= vnode;
1244    }
1245    pmap_arm->root.next             = NULL;
1246    pmap_arm->root.u.vnode.children = NULL;
1247
1248    return SYS_ERR_OK;
1249}
1250
1251errval_t pmap_current_init(bool init_domain)
1252{
1253    struct pmap_arm *pmap_arm = (struct pmap_arm*)get_current_pmap();
1254
1255    // To reserve a block of virtual address space,
1256    // a vregion representing the address space is required.
1257    // We construct a superficial one here and add it to the vregion list.
1258    struct vregion *vregion = &pmap_arm->vregion;
1259    assert((void*)vregion > (void*)pmap_arm);
1260    assert((void*)vregion < (void*)(pmap_arm + 1));
1261    vregion->vspace = NULL;
1262    vregion->memobj = NULL;
1263    vregion->base   = VSPACE_BEGIN;
1264    vregion->offset = 0;
1265    vregion->size   = META_DATA_RESERVED_SPACE;
1266    vregion->flags  = 0;
1267    vregion->next = NULL;
1268
1269    struct vspace *vspace = pmap_arm->p.vspace;
1270    assert(!vspace->head);
1271    vspace->head = vregion;
1272
1273    pmap_arm->vregion_offset = pmap_arm->vregion.base;
1274
1275    return SYS_ERR_OK;
1276}
1277