1/**
2 * \file
3 * \brief memory object of anonymous type.
4 * The object maintains a list of frames.
5 *
6 * The object maintains a list of frames and a list of vregions.
7 * The lists are backed by slabs.
8 * The slabs may have to be grown,
9 * in which case the object will use #vspace_pinned_alloc.
10 *
11 * morecore uses this memory object so it cannot use malloc for its lists.
12 * Therefore, this uses slabs and grows them using the pinned memory.
13 */
14
15/*
16 * Copyright (c) 2009, 2010, 2011, ETH Zurich.
17 * All rights reserved.
18 *
19 * This file is distributed under the terms in the attached LICENSE file.
20 * If you do not find this file, copies can be found by writing to:
21 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
22 */
23
24#include <string.h>
25
26#include <barrelfish/barrelfish.h>
27#include "vspace_internal.h"
28
29/**
30 * \brief Map the memory object into a region
31 *
32 * \param memobj   The memory object
33 * \param region  The region to add
34 */
35static errval_t map_region(struct memobj *memobj, struct vregion *vregion)
36{
37    struct memobj_fixed *fixed = (struct memobj_fixed*) memobj;
38
39    /* make sure we are not overshooting the end */
40    assert(memobj->size >= (vregion->offset + vregion->size));
41
42    /* the vregion must start at one of the backed frames */
43    if (vregion->offset % fixed->chunk_size) {
44        return LIB_ERR_MEMOBJ_MAP_REGION;
45    }
46
47    if (fixed->vregion) {
48        return LIB_ERR_MEMOBJ_VREGION_ALREADY_MAPPED;
49    }
50
51    fixed->vregion = vregion;
52
53    return SYS_ERR_OK;
54}
55
56/**
57 * \brief Unmap the memory object from a region
58 *
59 * \param memobj   The memory object
60 * \param region  The region to remove
61 */
62static errval_t unmap_region(struct memobj *memobj, struct vregion *vregion)
63{
64    struct memobj_fixed *fixed = (struct memobj_fixed*) memobj;
65    errval_t err;
66
67    if (fixed->vregion != vregion) {
68        return LIB_ERR_VSPACE_VREGION_NOT_FOUND;
69    }
70
71    struct vspace *vspace = vregion_get_vspace(vregion);
72    struct pmap *pmap = vspace_get_pmap(vspace);
73
74    genvaddr_t vregion_base = vregion_get_base_addr(vregion);
75    genvaddr_t vregion_offset = vregion_get_offset(vregion);
76
77    err = pmap->f.unmap(pmap, vregion_base + vregion_offset, vregion->size,
78    NULL);
79    if (err_is_fail(err)) {
80        return err_push(err, LIB_ERR_PMAP_UNMAP);
81    }
82
83    fixed->vregion = NULL;
84
85    return SYS_ERR_OK;
86}
87
88/**
89 * \brief Set the protection on a range
90 *
91 * \param memobj  The memory object
92 * \param region  The vregion to modify the mappings on
93 * \param offset  Offset into the memory object
94 * \param range   The range of space to set the protection for
95 * \param flags   The protection flags
96 */
97static errval_t protect(struct memobj *memobj,
98                        struct vregion *vregion,
99                        genvaddr_t offset,
100                        size_t range,
101                        vs_prot_flags_t flags)
102{
103    struct vspace *vspace = vregion_get_vspace(vregion);
104    struct pmap *pmap = vspace_get_pmap(vspace);
105    genvaddr_t base = vregion_get_base_addr(vregion);
106    genvaddr_t vregion_offset = vregion_get_offset(vregion);
107    errval_t err;
108    size_t ret_size;
109    err = pmap->f.modify_flags(pmap, base + offset + vregion_offset, range,
110                               flags, &ret_size);
111    if (err_is_fail(err)) {
112        return err_push(err, LIB_ERR_PMAP_MODIFY_FLAGS);
113    }
114
115    return SYS_ERR_OK;
116}
117
118/**
119 * \brief Pin a range
120 *
121 * \param memobj  The memory object
122 * \param region  The vregion to modify the state on
123 * \param offset  Offset into the memory object
124 * \param range   The range of space to pin
125 */
126static errval_t pin(struct memobj *memobj,
127                    struct vregion *vregion,
128                    genvaddr_t offset,
129                    size_t range)
130{
131    USER_PANIC("NYI");
132}
133
134/**
135 * \brief Unpin a range
136 *
137 * \param memobj  The memory object
138 * \param region  The vregion to modify the state on
139 * \param offset  Offset into the memory object
140 * \param range   The range of space to unpin
141 */
142static errval_t unpin(struct memobj *memobj,
143                      struct vregion *vregion,
144                      genvaddr_t offset,
145                      size_t range)
146{
147    USER_PANIC("NYI");
148}
149
150/**
151 * \brief Set a frame for an offset into the memobj
152 *
153 * \param memobj  The memory object
154 * \param offset  Offset into the memory object
155 * \param frame   The frame cap for the offset
156 * \param offset  The offset into the frame cap
157 *
158 * Pagefault relies on frames inserted in order
159 */
160static errval_t fill(struct memobj *memobj,
161                     genvaddr_t offset,
162                     struct capref frame,
163                     size_t frame_offset)
164{
165    struct memobj_fixed *fixed = (struct memobj_fixed*) memobj;
166
167    if (offset % fixed->chunk_size) {
168        return LIB_ERR_MEMOBJ_FILL;
169    }
170
171    size_t slot = offset / fixed->chunk_size;
172    if (slot >= fixed->count) {
173        return LIB_ERR_MEMOBJ_WRONG_OFFSET;
174    }
175
176    if (!capref_is_null((fixed->frames[slot]))) {
177        return LIB_ERR_MEMOBJ_DUPLICATE_FILL;
178    }
179
180    fixed->frames[slot] = frame;
181    fixed->offsets[slot] = frame_offset;
182
183    return SYS_ERR_OK;
184}
185
186/**
187 * \brief Unmap/remove one frame from the end of the memobj
188 *
189 * \param memobj     The memory object
190 * \param offset     The offset from which to remove a frame from
191 * \param ret_frame  Pointer to return the removed frame
192 *
193 */
194static errval_t unfill(struct memobj *memobj,
195                       genvaddr_t offset,
196                       struct capref *ret_frame,
197                       genvaddr_t *ret_offset)
198{
199    errval_t err;
200    struct memobj_fixed *fixed = (struct memobj_fixed*) memobj;
201
202    size_t slot = offset / fixed->chunk_size;
203    if (slot >= fixed->count || capref_is_null(fixed->frames[slot])) {
204        return LIB_ERR_MEMOBJ_UNFILL_TOO_HIGH_OFFSET;
205    }
206
207    if (fixed->vregion) {
208        struct vregion *vregion = fixed->vregion;
209        size_t retsize;
210        struct vspace *vspace = vregion_get_vspace(vregion);
211        struct pmap *pmap = vspace_get_pmap(vspace);
212        genvaddr_t vregion_base = vregion_get_base_addr(vregion);
213        genvaddr_t vregion_offset = vregion_get_offset(vregion);
214
215        err = pmap->f.unmap(pmap, vregion_base + vregion_offset + offset,
216                            fixed->chunk_size, &retsize);
217        if (err_is_fail(err)) {
218            return err_push(err, LIB_ERR_PMAP_UNMAP);
219        }
220
221        assert(retsize == fixed->chunk_size);
222        // Return the frame
223        if (ret_offset) {
224            *ret_offset = vregion_offset + offset;
225        }
226    }
227
228    if (ret_frame) {
229        *ret_frame = fixed->frames[slot];
230    }
231
232    fixed->frames[slot] = NULL_CAP;
233
234    return SYS_ERR_OK;
235}
236
237/**
238 * \brief Page fault handler
239 *
240 * \param memobj  The memory object
241 * \param region  The associated vregion
242 * \param offset  Offset into memory object of the page fault
243 * \param type    The fault type
244 *
245 * Locates the frame for the offset and maps it in.
246 * Relies on fill inserting frames in order.
247 */
248static errval_t pagefault(struct memobj *memobj,
249                          struct vregion *vregion,
250                          genvaddr_t offset,
251                          vm_fault_type_t type)
252{
253    errval_t err;
254    struct memobj_fixed *fixed = (struct memobj_fixed*) memobj;
255
256    assert(!(offset % fixed->chunk_size));
257
258    size_t slot = (vregion->offset + offset) / fixed->chunk_size;
259
260    if (slot >= fixed->count) {
261        return LIB_ERR_MEMOBJ_WRONG_OFFSET;
262    }
263
264    if (capref_is_null(fixed->frames[slot])) {
265        return LIB_ERR_MEMOBJ_PAGEFAULT_HANDLER;
266    }
267
268    struct vspace *vspace = vregion_get_vspace(vregion);
269    struct pmap *pmap = vspace_get_pmap(vspace);
270
271    genvaddr_t base = vregion_get_base_addr(vregion);
272    genvaddr_t vregion_offset = vregion_get_offset(vregion);
273    vregion_flags_t flags = vregion_get_flags(vregion);
274
275    err = pmap->f.map(pmap, base + vregion_offset + offset, fixed->frames[slot],
276                      fixed->offsets[slot], fixed->chunk_size, flags,
277                      NULL, NULL);
278    if (err_is_fail(err)) {
279        return err_push(err, LIB_ERR_PMAP_MAP);
280    }
281
282    return SYS_ERR_OK;
283}
284
285/**
286 * \brief Free up some pages by placing them in the backing storage
287 *
288 * \param memobj      The memory object
289 * \param size        The amount of space to free up
290 * \param frames      An array of capref frames to return the freed pages
291 * \param num_frames  The number of frames returned
292 *
293 * This will affect all the vregions that are associated with the object
294 */
295static errval_t pager_free(struct memobj *memobj,
296size_t size,
297                           struct capref *frames,
298                           size_t num_frames)
299{
300    USER_PANIC("NYI");
301}
302
303/**
304 * \brief Initialize
305 *
306 * \param memobj  The memory object
307 * \param size    Size of the memory region
308 * \param flags   Memory object specific flags
309 *
310 * This object handles multiple frames.
311 * The frames are mapped in on demand.
312 */
313errval_t memobj_create_fixed(struct memobj_fixed *fixed,
314size_t size,
315                             memobj_flags_t flags,
316                             size_t count,
317                             size_t chunk_size)
318{
319    struct memobj *memobj = &fixed->m;
320
321    /* Generic portion */
322    memobj->f.map_region = map_region;
323    memobj->f.unmap_region = unmap_region;
324    memobj->f.protect = protect;
325    memobj->f.pin = pin;
326    memobj->f.unpin = unpin;
327    memobj->f.fill = fill;
328    memobj->f.unfill = unfill;
329    memobj->f.pagefault = pagefault;
330    memobj->f.pager_free = pager_free;
331
332    assert(size == count * chunk_size);
333    assert((chunk_size % BASE_PAGE_SIZE)==0);
334
335    memobj->size = size;
336    memobj->flags = flags;
337
338    memobj->type = MEMOBJ_FIXED;
339
340    /* specific portion */
341    fixed->count = count;
342    fixed->chunk_size = chunk_size;
343    fixed->vregion = NULL;
344
345    fixed->frames = malloc(count * sizeof(struct capref));
346    if (!fixed->frames) {
347        return LIB_ERR_MALLOC_FAIL;
348    }
349    memset(fixed->frames, 0, count * sizeof(struct capref));
350
351    fixed->offsets = malloc(count * sizeof(lpaddr_t));
352    if (!fixed->offsets) {
353        return LIB_ERR_MALLOC_FAIL;
354    }
355    memset(fixed->offsets, 0, count * sizeof(lpaddr_t));
356
357    return SYS_ERR_OK;
358}
359
360/**
361 * \brief Destroy the object
362 *
363 */
364errval_t memobj_destroy_fixed(struct memobj *memobj)
365{
366    struct memobj_fixed *m = (struct memobj_fixed *) memobj;
367
368    errval_t err = SYS_ERR_OK;
369
370    err = vregion_destroy(m->vregion);
371    free(m->frames);
372    free(m->offsets);
373    return err;
374}
375