1/*
2 * Copyright (c) 2017, ETH Zurich.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
8 */
9
10#include <sys/mman.h>
11#include <barrelfish/barrelfish.h>
12#include <sys/errno.h>
13
14// Copied from usr/monitor/capops/internal.h
15#define GOTO_IF_ERR(err, label) do { \
16    if (err_is_fail(err)) { \
17        DEBUG_ERR(err, "%s:%u -> goto err", __FUNCTION__, __LINE__); \
18        goto label; \
19    } \
20} while (0)
21
22static vregion_flags_t prot_to_vregion_flags(int flags)
23{
24    vregion_flags_t f = VREGION_FLAGS_NONE;
25    if (flags & PROT_READ) {
26        f |= VREGION_FLAGS_READ;
27    }
28    if (flags & PROT_WRITE) {
29        f |= VREGION_FLAGS_WRITE;
30    }
31    if (flags & PROT_EXEC) {
32        f |= VREGION_FLAGS_EXECUTE;
33    }
34    return f;
35}
36
37static const char *flag_to_str(int flag)
38{
39    switch(flag) {
40#ifdef __x86_64__
41        case MAP_32BIT        : return "MAP_32BIT";
42#endif
43        case MAP_ALIGNED_SUPER: return "MAP_ALIGNED_SUPER";
44        case MAP_ANON         : return "MAP_ANON|MAP_ANONYMOUS";
45        case MAP_EXCL         : return "MAP_EXCL";
46        case MAP_FIXED        : return "MAP_FIXED";
47        case MAP_HASSEMAPHORE : return "MAP_HASSEMAPHORE";
48        case MAP_NOCORE       : return "MAP_NOCORE";
49        case MAP_NOSYNC       : return "MAP_NOSYNC";
50        case MAP_PREFAULT_READ: return "MAP_PREFAULT_READ";
51        case MAP_PRIVATE      : return "MAP_PRIVATE";
52        case MAP_SHARED       : return "MAP_SHARED";
53        case MAP_STACK        : return "MAP_STACK";
54        default: return "Unknown Flag";
55    }
56}
57
58// If you implement one of the following flags, update this list to avoid
59// unnecessary warnings! -SG,2017-07-28
60static int NYI_FLAGS = MAP_HASSEMAPHORE | MAP_NOCORE | MAP_NOSYNC |
61                       MAP_PREFAULT_READ | MAP_PRIVATE | MAP_SHARED | MAP_STACK;
62
63static inline bool flag_is_set(int flags, int flag)
64{
65    return flags & flag;
66}
67
68static inline bool is_anon_mapping(int flags)
69{
70    return flag_is_set(flags, MAP_ANONYMOUS) || flag_is_set(flags, MAP_ANON);
71}
72
73static inline bool is_fixed_mapping(int flags)
74{
75    return flag_is_set(flags, MAP_FIXED);
76}
77
78void * mmap(void *addr, size_t len, int prot, int flags, int fd, off_t offset)
79{
80    // Hard-fail on non anonymous mappings for now
81    if (!is_anon_mapping(flags)) {
82        debug_printf("mmap() without MAP_ANON NYI!\n");
83        *__error() = EBADF;
84        return MAP_FAILED;
85    }
86
87#ifdef CONFIG_PRINT_MMAP_NYI_FLAGS
88    // Warn user about NYI flags
89    if (flags & NYI_FLAGS) {
90        debug_printf("mmap(): user provided NYI flags, ignoring the following:\n");
91        for (int i = 0; i < 32; i++) {
92            if (NYI_FLAGS & (1 << i)) {
93                debug_printf("     * %s\n", flag_to_str(1<<i));
94            }
95        }
96    }
97#endif // CONFIG_PRINT_MMAP_NYI_FLAGS
98
99    // According to manpage, for anonymous mappings mmap() MUST be called with
100    // fd = -1 and offset = 0.
101    if (is_anon_mapping(flags) && (fd != -1 || offset != 0)) {
102        *__error() = EINVAL;
103        return MAP_FAILED;
104    }
105
106    genvaddr_t vaddr = (genvaddr_t)(lvaddr_t) addr;
107
108    if (flag_is_set(flags, MAP_FIXED) && !flag_is_set(flags, MAP_EXCL)) {
109        // We currently do not support MAP_FIXED without MAP_EXCL, as we do
110        // not have a clean way of replacing existing mapped pages.
111        debug_printf("mmap(): MAP_FIXED without MAP_EXCL NYI!\n");
112        *__error() = EINVAL;
113        return MAP_FAILED;
114    }
115
116    if (len == 0) {
117        *__error() = EINVAL;
118        return MAP_FAILED;
119    }
120
121    errval_t err;
122    // Translate prot to vregion flags
123    vregion_flags_t vflags = prot_to_vregion_flags(prot);
124
125    // Set large page flag, if MAP_ALIGNED_SUPER is set
126    if (flag_is_set(flags, MAP_ALIGNED_SUPER)) {
127        vflags |= VREGION_FLAGS_LARGE;
128    }
129
130    // Returned from vspace_map* call
131    struct vregion *vregion;
132    struct memobj *memobj;
133
134    if (is_fixed_mapping(flags)) {
135        // According to manpage, calls with MAP_FIXED must have aligned base
136        // address.
137        if (vaddr & BASE_PAGE_MASK) {
138            *__error() = EINVAL;
139            return MAP_FAILED;
140        }
141
142        err = vspace_map_anon_fixed(vaddr, len, vflags, &vregion, &memobj);
143#ifdef __x86_64__
144    } else if (is_anon_mapping(flags) && !flag_is_set(flags, MAP_32BIT)) {
145        // For anonymous mappings without constraints we let libbarrelfish
146        // find a suitable region of virtual address space.
147        err = vspace_map_anon_attr(&addr, &memobj, &vregion, len, NULL, vflags);
148#endif
149    } else {
150        // Fail on other combinations of flags for now
151        debug_printf("mmap(): mapping with given set of flags NYI!\n");
152        *__error() = EBADF;
153        return MAP_FAILED;
154    }
155    if (err_is_fail(err)) {
156        *__error() = ENOMEM;
157        return MAP_FAILED;
158    }
159
160    // Contrary to mmap() we do not support lazy page allocation on Barrelfish
161    // at the moment, and rather than requiring all mmap() calls to explicitly
162    // specify MAP_PREFAULT_READ we allocate RAM here and prefault the new
163    // mapping like all other libbarrelfish high-level functions.
164    // -SG,2017-07-28
165
166    size_t retbytes;
167    struct capref frame;
168    err = frame_alloc(&frame, len, &retbytes);
169    GOTO_IF_ERR(err, cleanup);
170    assert(len <= retbytes);
171
172    // Attach Frame to memobj
173    err = memobj->f.fill(memobj, 0, frame, len);
174    GOTO_IF_ERR(err, cleanup);
175
176    // Prefault
177    err = memobj->f.pagefault(memobj, vregion, 0, 0);
178    GOTO_IF_ERR(err, cleanup);
179
180    return (void *)vregion_get_base_addr(vregion);
181
182cleanup:
183    *__error() = ENOMEM;
184    vregion_destroy(vregion);
185    memobj_destroy_anon(memobj, true);
186    return MAP_FAILED;
187}
188
189int munmap(void *addr, size_t len)
190{
191    genvaddr_t vaddr = (genvaddr_t)addr;
192    // Catch unaligned base addr or invalid length
193    if ((vaddr & BASE_PAGE_MASK) || len <= 0) {
194        *__error() = EINVAL;
195        return -1;
196    }
197
198    // Get vregion containing the region to unmap
199    struct vregion *vr = vspace_get_region(get_current_vspace(), addr);
200
201    if (vregion_get_base_addr(vr) != vaddr || vregion_get_size(vr) != len) {
202        // As Barrelfish does not cleanly support unmapping parts of an existing
203        // vregion, we fail on such requests. -SG,2017-07-28.
204        debug_printf("munmap(): user requested unmapping of part of existing vregion, NYI on Barrelfish\n");
205        *__error() = EINVAL;
206        return -1;
207    }
208
209    errval_t err = vregion_destroy(vr);
210    if (err_is_fail(err)) {
211        *__error() = EINVAL;
212        return -1;
213    }
214
215    return 0;
216}
217