1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12
13/* defining _GNU_SOURCE to make certain constants appear in muslc. This is rather hacky */
14#define _GNU_SOURCE
15
16#include <autoconf.h>
17#include <sel4muslcsys/gen_config.h>
18#include <stdio.h>
19#include <stdint.h>
20#include <stdarg.h>
21#include <sys/mman.h>
22#include <errno.h>
23#include <assert.h>
24
25#include <vspace/vspace.h>
26
27#include <sel4utils/util.h>
28#include <sel4utils/mapping.h>
29
30/* If we have a nonzero static morecore then we are just doing dodgy hacky morecore */
31#if CONFIG_LIB_SEL4_MUSLC_SYS_MORECORE_BYTES > 0
32
33/*
34 * Statically allocated morecore area.
35 *
36 * This is rather terrible, but is the simplest option without a
37 * huge amount of infrastructure.
38 */
39char __attribute__((aligned(PAGE_SIZE_4K))) morecore_area[CONFIG_LIB_SEL4_MUSLC_SYS_MORECORE_BYTES];
40
41size_t morecore_size = CONFIG_LIB_SEL4_MUSLC_SYS_MORECORE_BYTES;
42/* Pointer to free space in the morecore area. */
43static uintptr_t morecore_base = (uintptr_t) &morecore_area;
44uintptr_t morecore_top = (uintptr_t) &morecore_area[CONFIG_LIB_SEL4_MUSLC_SYS_MORECORE_BYTES];
45
46/* Actual morecore implementation
47   returns 0 if failure, returns newbrk if success.
48*/
49
50long sys_brk(va_list ap)
51{
52
53    uintptr_t ret;
54    uintptr_t newbrk = va_arg(ap, uintptr_t);
55
56    /*if the newbrk is 0, return the bottom of the heap*/
57    if (!newbrk) {
58        ret = morecore_base;
59    } else if (newbrk < morecore_top && newbrk > (uintptr_t)&morecore_area[0]) {
60        ret = morecore_base = newbrk;
61    } else {
62        ret = 0;
63    }
64
65    return ret;
66}
67
68/* Large mallocs will result in muslc calling mmap, so we do a minimal implementation
69   here to support that. We make a bunch of assumptions in the process */
70long sys_mmap_impl(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
71{
72    if (flags & MAP_ANONYMOUS) {
73        /* Check that we don't try and allocate more than exists */
74        if (length > morecore_top - morecore_base) {
75            return -ENOMEM;
76        }
77        /* Steal from the top */
78        morecore_top -= length;
79        return morecore_top;
80    }
81    assert(!"not implemented");
82    return -ENOMEM;
83}
84
85long sys_mremap(va_list ap)
86{
87    assert(!"not implemented");
88    return -ENOMEM;
89}
90
91#else
92
93/* dynamic morecore based on a vspace. These need to be defined somewhere (probably in the
94 * main function of your app. And setup something like
95    sel4utils_reserve_range_no_alloc(&vspace, &muslc_brk_reservation_memory, BRK_VIRTUAL_SIZE, seL4_AllRights, 1, &muslc_brk_reservation_start);
96    muslc_this_vspace = &vspace;
97    muslc_brk_reservation.res = &muslc_brk_reservation_memory;
98
99    In the case that you need dynamic morecore for some apps and static for others, just
100    define more_core area in your app itself and set the global morecore_area before calling malloc.
101
102    We have to do this because the build system builds the same library for each app.
103  */
104
105vspace_t *muslc_this_vspace = NULL;
106reservation_t muslc_brk_reservation = {.res = NULL};
107void *muslc_brk_reservation_start = NULL;
108
109char *morecore_area = NULL;
110size_t morecore_size = 0;
111static uintptr_t morecore_base = 0;
112uintptr_t morecore_top = 0;
113
114static uintptr_t brk_start;
115
116static void init_morecore_region(void)
117{
118    if (morecore_base == 0) {
119        if (morecore_size == 0) {
120            ZF_LOGE("Warning: static morecore size is 0");
121        }
122        morecore_base = (uintptr_t) morecore_area;
123        morecore_top = (uintptr_t) &morecore_area[morecore_size];
124    }
125}
126
127static long sys_brk_static(va_list ap)
128{
129    uintptr_t ret;
130    uintptr_t newbrk = va_arg(ap, uintptr_t);
131
132    /* ensure the morecore region is initialized */
133    init_morecore_region();
134    /*if the newbrk is 0, return the bottom of the heap*/
135    if (!newbrk) {
136        ret = morecore_base;
137    } else if (newbrk < morecore_top && newbrk > (uintptr_t)&morecore_area[0]) {
138        ret = morecore_base = newbrk;
139    } else {
140        ret = 0;
141    }
142
143    return ret;
144}
145
146static long sys_brk_dynamic(va_list ap)
147{
148
149    uintptr_t ret;
150    uintptr_t newbrk = va_arg(ap, uintptr_t);
151    if (!muslc_this_vspace || !muslc_brk_reservation.res || !muslc_brk_reservation_start) {
152        ZF_LOGE("Need to assign vspace for sys_brk to work!\n");
153        assert(muslc_this_vspace && muslc_brk_reservation.res && muslc_brk_reservation_start);
154        return 0;
155    }
156
157    /*if the newbrk is 0, return the bottom of the heap*/
158    if (newbrk == 0) {
159        brk_start = (uintptr_t)muslc_brk_reservation_start;
160        ret = brk_start;
161    } else {
162        /* try and map pages until this point */
163        while (brk_start < newbrk) {
164            int error = vspace_new_pages_at_vaddr(muslc_this_vspace, (void *) brk_start, 1,
165                                                  seL4_PageBits, muslc_brk_reservation);
166            if (error) {
167                ZF_LOGE("Mapping new pages to extend brk region failed\n");
168                return 0;
169            }
170            brk_start += PAGE_SIZE_4K;
171        }
172        ret = brk_start;
173    }
174    return ret;
175}
176
177long sys_brk(va_list ap)
178{
179    if (morecore_area != NULL) {
180        return sys_brk_static(ap);
181    } else if (muslc_this_vspace != NULL) {
182        return sys_brk_dynamic(ap);
183    } else {
184        ZF_LOGE("You need to define either morecore_area or the muslc*");
185        ZF_LOGE("global variables to use malloc\n");
186        assert(morecore_area != NULL || muslc_this_vspace != NULL);
187        return 0;
188    }
189}
190
191/* Large mallocs will result in muslc calling mmap, so we do a minimal implementation
192   here to support that. We make a bunch of assumptions in the process */
193static long sys_mmap_impl_static(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
194{
195    if (flags & MAP_ANONYMOUS) {
196        /* ensure the morecore region is initialized */
197        init_morecore_region();
198        /* Steal from the top */
199        uintptr_t base = morecore_top - length;
200        if (base < morecore_base) {
201            return -ENOMEM;
202        }
203        morecore_top = base;
204        return base;
205    }
206    assert(!"not implemented");
207    return -ENOMEM;
208}
209
210static long sys_mmap_impl_dynamic(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
211{
212    if (!muslc_this_vspace) {
213        ZF_LOGE("Need to assign vspace for mmap to work!\n");
214        assert(muslc_this_vspace);
215        return 0;
216    }
217    if (flags & MAP_ANONYMOUS) {
218        /* determine how many pages we need */
219        uint32_t pages = BYTES_TO_4K_PAGES(length);
220        void *ret = vspace_new_pages(muslc_this_vspace, seL4_AllRights, pages, seL4_PageBits);
221        return (long)ret;
222    }
223    assert(!"not implemented");
224    return -ENOMEM;
225}
226
227long sys_mmap_impl(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
228{
229    if (morecore_area != NULL) {
230        return sys_mmap_impl_static(addr, length, prot, flags, fd, offset);
231    } else if (muslc_this_vspace != NULL) {
232        return sys_mmap_impl_dynamic(addr, length, prot, flags, fd, offset);
233    } else {
234        ZF_LOGE("mmap requires morecore_area or muslc* vars to be initialised\n");
235        assert(morecore_area != NULL || muslc_this_vspace != NULL);
236        return 0;
237    }
238}
239
240static long sys_mremap_dynamic(va_list ap)
241{
242
243    void *old_address = va_arg(ap, void *);
244    size_t old_size = va_arg(ap, size_t);
245    size_t new_size = va_arg(ap, size_t);
246    int flags = va_arg(ap, int);
247    UNUSED void *new_address_arg;
248
249    assert(flags == MREMAP_MAYMOVE);
250    assert(IS_ALIGNED_4K(old_size));
251    assert(IS_ALIGNED_4K((uintptr_t) old_address));
252    assert(IS_ALIGNED_4K(new_size));
253    /* we currently only support remaping to size >= original */
254    assert(new_size >= old_size);
255
256    if (flags & MREMAP_FIXED) {
257        new_address_arg = va_arg(ap, void *);
258    }
259
260    /* first find all the old caps */
261    int num_pages = old_size >> seL4_PageBits;
262    seL4_CPtr caps[num_pages];
263    uintptr_t cookies[num_pages];
264    int i;
265    for (i = 0; i < num_pages; i++) {
266        void *vaddr = old_address + i * BIT(seL4_PageBits);
267        caps[i] = vspace_get_cap(muslc_this_vspace, vaddr);
268        cookies[i] = vspace_get_cookie(muslc_this_vspace, vaddr);
269    }
270    /* unmap the previous mapping */
271    vspace_unmap_pages(muslc_this_vspace, old_address, num_pages, seL4_PageBits, VSPACE_PRESERVE);
272    /* reserve a new region */
273    int error;
274    void *new_address;
275    int new_pages = new_size >> seL4_PageBits;
276    reservation_t reservation = vspace_reserve_range(muslc_this_vspace, new_pages * PAGE_SIZE_4K, seL4_AllRights, 1,
277                                                     &new_address);
278    if (!reservation.res) {
279        ZF_LOGE("Failed to make reservation for remap\n");
280        goto restore;
281    }
282    /* map all the existing pages into the reservation */
283    error = vspace_map_pages_at_vaddr(muslc_this_vspace, caps, cookies, new_address, num_pages, seL4_PageBits, reservation);
284    if (error) {
285        ZF_LOGE("Mapping existing pages into new reservation failed\n");
286        vspace_free_reservation(muslc_this_vspace, reservation);
287        goto restore;
288    }
289    /* create any new pages */
290    error = vspace_new_pages_at_vaddr(muslc_this_vspace, new_address + num_pages * PAGE_SIZE_4K, new_pages - num_pages,
291                                      seL4_PageBits, reservation);
292    if (error) {
293        ZF_LOGE("Creating new pages for remap region failed\n");
294        vspace_unmap_pages(muslc_this_vspace, new_address, num_pages, seL4_PageBits, VSPACE_PRESERVE);
295        vspace_free_reservation(muslc_this_vspace, reservation);
296        goto restore;
297    }
298    /* free the reservation book keeping */
299    vspace_free_reservation(muslc_this_vspace, reservation);
300    return (long)new_address;
301restore:
302    /* try and recreate the original mapping */
303    reservation = vspace_reserve_range_at(muslc_this_vspace, old_address, num_pages * PAGE_SIZE_4K, seL4_AllRights, 1);
304    assert(reservation.res);
305    error = vspace_map_pages_at_vaddr(muslc_this_vspace, caps, cookies, old_address,
306                                      num_pages, seL4_PageBits, reservation);
307    assert(!error);
308    return -ENOMEM;
309}
310
311static long sys_mremap_static(va_list ap)
312{
313    assert(!"not implemented");
314    return -ENOMEM;
315}
316
317long sys_mremap(va_list ap)
318{
319    if (morecore_area != NULL) {
320        return sys_mremap_static(ap);
321    } else if (muslc_this_vspace != NULL) {
322        return sys_mremap_dynamic(ap);
323    } else {
324        ZF_LOGE("mrepmap requires morecore_area or muslc* vars to be initialised\n");
325        assert(morecore_area != NULL || muslc_this_vspace != NULL);
326        return 0;
327    }
328}
329
330#endif
331
332/* This is a "dummy" implementation of sys_madvise() to satisfy free() in muslc. */
333long sys_madvise(va_list ap)
334{
335    ZF_LOGV("calling dummy version of sys_madvise()\n");
336    return 0;
337}
338
339long sys_mmap(va_list ap)
340{
341    void *addr = va_arg(ap, void *);
342    size_t length = va_arg(ap, size_t);
343    int prot = va_arg(ap, int);
344    int flags = va_arg(ap, int);
345    int fd = va_arg(ap, int);
346    off_t offset = va_arg(ap, off_t);
347    return sys_mmap_impl(addr, length, prot, flags, fd, offset);
348}
349
350long sys_mmap2(va_list ap)
351{
352    void *addr = va_arg(ap, void *);
353    size_t length = va_arg(ap, size_t);
354    int prot = va_arg(ap, int);
355    int flags = va_arg(ap, int);
356    int fd = va_arg(ap, int);
357    off_t offset = va_arg(ap, off_t);
358    /* for now redirect to mmap. muslc always defines an off_t as being an int64
359     * so this will not overflow */
360    return sys_mmap_impl(addr, length, prot, flags, fd, offset * 4096);
361}
362