1/**
2 * \file
3 * \brief pmap management -- architecture-indepented slab management.
4 */
5
6/*
7 * Copyright (c) 2018, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15
16#include <barrelfish/barrelfish.h>
17#include <pmap_priv.h>
18#include <pmap_ds.h>
19
20// Size of virtual region mapped by a single PML4 entry
21#define VROOT_ENTRY_MAPPING_SIZE \
22    ((genvaddr_t)PTABLE_ENTRIES*PTABLE_ENTRIES*PTABLE_ENTRIES*BASE_PAGE_SIZE)
23
24// where to put metadata: this is currently assuming that we have a four-level
25// ptable tree where each level has the same amount of table entries.
26#define META_DATA_RESERVED_BASE (VROOT_ENTRY_MAPPING_SIZE * (disp_get_core_id() + 1))
27// Amount of virtual address space reserved for mapping frames
28// backing refill_slabs. Need way more with pmap_array.
29#define META_DATA_RESERVED_SIZE (BASE_PAGE_SIZE * 256000)
30// increased above value from 128 for pandaboard port
31
32/**
33 * \brief Refill slabs using pages from fixed allocator, used if we need to
34 * refill the slab allocator before we've established a connection to
35 * the memory server.
36 *
37 * \arg pmap the pmap whose slab allocator we want to refill
38 * \arg bytes the refill buffer size in bytes
39 */
40static errval_t refill_slabs_fixed_allocator(struct pmap *pmap,
41                                             struct slab_allocator *slab, size_t bytes)
42{
43    size_t pages = DIVIDE_ROUND_UP(bytes, BASE_PAGE_SIZE);
44
45    struct pmap_vnode_mgmt *m = &pmap->m;
46
47    genvaddr_t vbase = m->vregion_offset;
48
49    // Allocate and map buffer using base pages
50    for (int i = 0; i < pages; i++) {
51        struct capref cap;
52        size_t retbytes;
53        // Get page
54        errval_t err = frame_alloc(&cap, BASE_PAGE_SIZE, &retbytes);
55        if (err_is_fail(err)) {
56            return err_push(err, LIB_ERR_FRAME_ALLOC);
57        }
58        assert(retbytes == BASE_PAGE_SIZE);
59
60        // Map page
61        genvaddr_t genvaddr = m->vregion_offset;
62        m->vregion_offset += (genvaddr_t)BASE_PAGE_SIZE;
63        assert(m->vregion_offset < vregion_get_base_addr(&m->vregion) +
64                vregion_get_size(&m->vregion));
65
66        err = do_map(pmap, genvaddr, cap, 0, BASE_PAGE_SIZE,
67                VREGION_FLAGS_READ_WRITE, NULL, NULL);
68        if (err_is_fail(err)) {
69            return err_push(err, LIB_ERR_PMAP_DO_MAP);
70        }
71    }
72
73    /* Grow the slab */
74    lvaddr_t buf = vspace_genvaddr_to_lvaddr(vbase);
75    //debug_printf("%s: Calling slab_grow with %#zx bytes\n", __FUNCTION__, bytes);
76    slab_grow(slab, (void*)buf, bytes);
77
78    return SYS_ERR_OK;
79}
80
81/**
82 * \brief Refill slabs used for metadata
83 *
84 * \param pmap     The pmap to refill in
85 * \param request  The number of slabs the allocator must have
86 * when the function returns
87 *
88 * When the current pmap is initialized,
89 * it reserves some virtual address space for metadata.
90 * This reserved address space is used here
91 *
92 * Can only be called for the current pmap
93 * Will recursively call into itself till it has enough slabs
94 */
95bool debug_refill = false;
96static errval_t refill_slabs(struct pmap *pmap, struct slab_allocator *slab, size_t request)
97{
98    errval_t err;
99
100    struct pmap_vnode_mgmt *m = &pmap->m;
101
102    /* Keep looping till we have #request slabs */
103    while (slab_freecount(slab) < request) {
104        // Amount of bytes required for #request
105        size_t slabs_req = request - slab_freecount(slab);
106        size_t bytes = SLAB_STATIC_SIZE(slabs_req,
107                                        slab->blocksize);
108        bytes = ROUND_UP(bytes, BASE_PAGE_SIZE);
109
110        if (debug_refill) {
111        debug_printf("%s: req=%zu, bytes=%zu, slab->blocksize=%zu, slab->freecount=%zu\n",
112                __FUNCTION__, slabs_req, bytes, slab->blocksize, slab_freecount(slab));
113        }
114
115        /* Get a frame of that size */
116        struct capref cap;
117        size_t retbytes = 0;
118        err = frame_alloc(&cap, bytes, &retbytes);
119        if (err_is_fail(err)) {
120            if (err_no(err) == LIB_ERR_RAM_ALLOC_MS_CONSTRAINTS &&
121                err_no(err_pop(err)) == LIB_ERR_RAM_ALLOC_WRONG_SIZE) {
122                /* only retry with fixed allocator if we get
123                 * LIB_ERR_RAM_ALLOC_WRONG_SIZE.
124                 */
125                return refill_slabs_fixed_allocator(pmap, slab, bytes);
126            }
127            if (err_is_fail(err)) {
128                return err_push(err, LIB_ERR_FRAME_ALLOC);
129            }
130        }
131        bytes = retbytes;
132
133        /* If we do not have enough slabs to map the frame in, recurse */
134        size_t required_slabs_for_frame = max_slabs_required(bytes);
135        // Here we need to check that we have enough vnode slabs, not whatever
136        // slabs we're refilling
137        if (slab_freecount(&m->slab) < required_slabs_for_frame) {
138            if (debug_refill) {
139                debug_printf("%s: called from %p -- need to recurse\n", __FUNCTION__,
140                        __builtin_return_address(0));
141            }
142            // If we recurse, we require more slabs than to map a single page
143            assert(required_slabs_for_frame > max_slabs_required(BASE_PAGE_SIZE));
144            if (required_slabs_for_frame <= max_slabs_required(BASE_PAGE_SIZE)) {
145                USER_PANIC(
146                    "%s: cannot handle this recursion: required slabs = %zu > max slabs for a mapping (%zu)\n",
147                    __FUNCTION__, required_slabs_for_frame, max_slabs_required(BASE_PAGE_SIZE));
148            }
149
150            err = refill_slabs(pmap, slab, required_slabs_for_frame);
151            if (err_is_fail(err)) {
152                return err_push(err, LIB_ERR_SLAB_REFILL);
153            }
154        }
155
156        /* Perform mapping */
157        genvaddr_t genvaddr = m->vregion_offset;
158        m->vregion_offset += (genvaddr_t)bytes;
159        assert(m->vregion_offset < vregion_get_base_addr(&m->vregion) +
160               vregion_get_size(&m->vregion));
161
162        err = do_map(pmap, genvaddr, cap, 0, bytes,
163                     VREGION_FLAGS_READ_WRITE, NULL, NULL);
164        if (err_is_fail(err)) {
165            return err_push(err, LIB_ERR_PMAP_DO_MAP);
166        }
167
168        /* Grow the slab */
169        lvaddr_t buf = vspace_genvaddr_to_lvaddr(genvaddr);
170        slab_grow(slab, (void*)buf, bytes);
171    }
172
173    return SYS_ERR_OK;
174}
175
176errval_t pmap_slab_refill(struct pmap *pmap, struct slab_allocator *slab,
177                          size_t max_slabs)
178{
179    errval_t err;
180
181    // Refill slab allocator if necessary
182    size_t slabs_free = slab_freecount(slab);
183
184    if (slabs_free < max_slabs) {
185        struct pmap *mypmap = get_current_pmap();
186        if (pmap == mypmap) {
187            err = refill_slabs(pmap, slab, max_slabs);
188            if (err_is_fail(err)) {
189                return err_push(err, LIB_ERR_SLAB_REFILL);
190            }
191        } else {
192            size_t bytes = SLAB_STATIC_SIZE(max_slabs - slabs_free,
193                                            slab->blocksize);
194            void *buf = malloc(bytes);
195            if (!buf) {
196                return LIB_ERR_MALLOC_FAIL;
197            }
198            slab_grow(slab, buf, bytes);
199        }
200    }
201    return SYS_ERR_OK;
202}
203
204errval_t pmap_vnode_mgmt_current_init(struct pmap *pmap)
205{
206    // To reserve a block of virtual address space,
207    // a vregion representing the address space is required.
208    // We construct a superficial one here and add it to the vregion list.
209    struct vregion *vregion = &pmap->m.vregion;
210    vregion->vspace = NULL;
211    vregion->memobj = NULL;
212    vregion->base   = META_DATA_RESERVED_BASE;
213    vregion->offset = 0;
214    vregion->size   = META_DATA_RESERVED_SIZE;
215    vregion->flags  = 0;
216    vregion->next = NULL;
217
218    struct vspace *vspace = pmap->vspace;
219    assert(!vspace->head);
220    vspace->head = vregion;
221
222    pmap->m.vregion_offset = pmap->m.vregion.base;
223
224    return SYS_ERR_OK;
225}
226