1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12
13#include <autoconf.h>
14#include <sel4utils/gen_config.h>
15
16#include <stdlib.h>
17#include <string.h>
18#include <vka/vka.h>
19#include <vka/capops.h>
20#include <sel4utils/vspace.h>
21#include <sel4utils/vspace_internal.h>
22
23/* For the initial vspace, we must always guarantee we have virtual memory available
24 * for each bottom level page table. Future vspaces can then use the initial vspace
25 * to allocate bottom level page tables until memory runs out.
26 * We have 1 + k + k^2 + ... + k^n number of intermediate paging structures. Where
27 * k = VSPACE_LEVEL_SIZE and n = (VSPACE_NUM_LEVELS - 2)
28 * We want to calculate this using a geometric sumation. Fortunately we know that
29 * VSPACE_LEVEL_SIZE = 2^VSPACE_LEVEL_BITS so when calculating k^n we can simplify to
30 * (2^VSPACE_LEVEL_BITS)^n = (2^(VSPACE_LEVEL_BITS * n)) = 1 << (VSPACE_LEVEL_BITS * n) */
31#define NUM_MID_LEVEL_STRUCTURES ( (1 - BIT(VSPACE_LEVEL_BITS * (VSPACE_NUM_LEVELS - 1))) / (1 - BIT(VSPACE_LEVEL_BITS)))
32/* Number of bottom level structures is just the next term in the previous geometric
33 * series, i.e. k^(n + 1) */
34#define NUM_BOTTOM_LEVEL_STRUCTURES (BIT(VSPACE_LEVEL_BITS * (VSPACE_NUM_LEVELS - 1)))
35/* We need to reserve a range of virtual memory such that we have somewhere to put all of
36 * our tables */
37#define MID_LEVEL_STRUCTURES_SIZE (NUM_MID_LEVEL_STRUCTURES * sizeof(vspace_mid_level_t))
38#define BOTTOM_LEVEL_STRUCTURES_SIZE (NUM_BOTTOM_LEVEL_STRUCTURES * sizeof(vspace_bottom_level_t))
39#define VSPACE_RESERVE_SIZE (MID_LEVEL_STRUCTURES_SIZE + BOTTOM_LEVEL_STRUCTURES_SIZE + sizeof(vspace_mid_level_t))
40#define VSPACE_RESERVE_START (KERNEL_RESERVED_START - VSPACE_RESERVE_SIZE)
41
42static int common_init(vspace_t *vspace, vka_t *vka, seL4_CPtr vspace_root,
43                       vspace_allocated_object_fn allocated_object_fn, void *cookie)
44{
45    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
46    data->vka = vka;
47    data->last_allocated = 0x10000000;
48    data->reservation_head = NULL;
49    data->is_empty = false;
50
51    data->vspace_root = vspace_root;
52    vspace->allocated_object = allocated_object_fn;
53    vspace->allocated_object_cookie = cookie;
54
55    return 0;
56}
57
58static void common_init_post_bootstrap(vspace_t *vspace, sel4utils_map_page_fn map_page)
59{
60    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
61    /* reserve the kernel region, we do this by marking the
62     * top level entry as RESERVED */
63    if (!data->is_empty) {
64        for (int i = TOP_LEVEL_INDEX(KERNEL_RESERVED_START);
65             i < VSPACE_LEVEL_SIZE; i++) {
66            data->top_level->table[i] = RESERVED;
67        }
68    }
69
70    data->map_page = map_page;
71
72    /* initialise the rest of the functions now that they are usable */
73    vspace->new_pages = sel4utils_new_pages;
74    vspace->map_pages = sel4utils_map_pages;
75    vspace->new_pages_at_vaddr = sel4utils_new_pages_at_vaddr;
76    vspace->map_pages_at_vaddr = sel4utils_map_pages_at_vaddr;
77    vspace->deferred_rights_map_pages_at_vaddr = sel4utils_deferred_rights_map_pages_at_vaddr;
78    vspace->unmap_pages = sel4utils_unmap_pages;
79
80    vspace->reserve_range_aligned = sel4utils_reserve_range_aligned;
81    vspace->reserve_range_at = sel4utils_reserve_range_at;
82    vspace->reserve_deferred_rights_range_at = sel4utils_reserve_deferred_rights_range_at;
83    vspace->free_reservation = sel4utils_free_reservation;
84    vspace->free_reservation_by_vaddr = sel4utils_free_reservation_by_vaddr;
85
86    vspace->get_cap = sel4utils_get_cap;
87    vspace->get_cookie = sel4utils_get_cookie;
88    vspace->get_root = sel4utils_get_root;
89
90    vspace->tear_down = sel4utils_tear_down;
91    vspace->share_mem_at_vaddr = sel4utils_share_mem_at_vaddr;
92}
93
94static void *alloc_and_map(vspace_t *vspace, size_t size)
95{
96    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
97    if ((size % PAGE_SIZE_4K) != 0) {
98        ZF_LOGE("Object must be multiple of base page size");
99        return NULL;
100    }
101    if (data->next_bootstrap_vaddr) {
102        void *first_addr = (void *)data->next_bootstrap_vaddr;
103        while (size > 0) {
104            void *vaddr = (void *)data->next_bootstrap_vaddr;
105            vka_object_t frame;
106            int error = vka_alloc_frame(data->vka, seL4_PageBits, &frame);
107            if (error) {
108                LOG_ERROR("Failed to allocate bootstrap frame, error: %d", error);
109                return NULL;
110            }
111
112            vka_object_t objects[VSPACE_MAP_PAGING_OBJECTS];
113            int num = VSPACE_MAP_PAGING_OBJECTS;
114
115            error = sel4utils_map_page(data->vka, data->vspace_root, frame.cptr, vaddr,
116                                       seL4_AllRights, 1, objects, &num);
117
118            if (error) {
119                vka_free_object(data->vka, &frame);
120                LOG_ERROR("Failed to map bootstrap frame at %p, error: %d", vaddr, error);
121                return NULL;
122            }
123
124            /* Zero the memory */
125            memset(vaddr, 0, PAGE_SIZE_4K);
126
127            for (int i = 0; i < num; i++) {
128                vspace_maybe_call_allocated_object(vspace, objects[i]);
129            }
130
131            data->next_bootstrap_vaddr += PAGE_SIZE_4K;
132            size -= PAGE_SIZE_4K;
133        }
134        return first_addr;
135    } else {
136        assert(!"not implemented");
137    }
138    return NULL;
139}
140
141static int reserve_range_bottom(vspace_t *vspace, vspace_bottom_level_t *level, uintptr_t start, uintptr_t end)
142{
143    while (start < end) {
144        int index = INDEX_FOR_LEVEL(start, 0);
145        uintptr_t cap = level->cap[index];
146        switch (cap) {
147        case RESERVED:
148            /* nothing to be done */
149            break;
150        case EMPTY:
151            level->cap[index] = RESERVED;
152            break;
153        default:
154            ZF_LOGE("Cannot reserve allocated region");
155            return -1;
156        }
157        start += BYTES_FOR_LEVEL(0);
158    }
159    return 0;
160}
161
162static int reserve_range_mid(vspace_t *vspace, vspace_mid_level_t *level, int level_num, uintptr_t start, uintptr_t end)
163{
164    /* walk entries at this level until we complete this range */
165    while (start < end) {
166        int index = INDEX_FOR_LEVEL(start, level_num);
167        /* align the start so we can check for alignment later */
168        uintptr_t aligned_start = start & ALIGN_FOR_LEVEL(level_num);
169        /* calculate the start of the next index */
170        uintptr_t next_start = aligned_start + BYTES_FOR_LEVEL(level_num);
171        int must_recurse = 0;
172        if (next_start > end) {
173            next_start = end;
174            must_recurse = 1;
175        } else if (start != aligned_start) {
176            must_recurse = 1;
177        }
178        uintptr_t next_table = level->table[index];
179        if (next_table == EMPTY) {
180            if (must_recurse) {
181                /* allocate new level */
182                if (level_num == 1) {
183                    next_table = (uintptr_t)alloc_and_map(vspace, sizeof(vspace_bottom_level_t));
184                } else {
185                    next_table = (uintptr_t)alloc_and_map(vspace, sizeof(vspace_mid_level_t));
186                }
187                if (next_table == EMPTY) {
188                    ZF_LOGE("Failed to allocate and map book keeping frames during bootstrapping");
189                    return -1;
190                }
191            } else {
192                next_table = RESERVED;
193            }
194            level->table[index] = next_table;
195        }
196        /* at this point table is either RESERVED or needs recursion */
197        if (next_table != RESERVED) {
198            int error;
199            if (level_num == 1) {
200                error = reserve_range_bottom(vspace, (vspace_bottom_level_t *)next_table, start, next_start);
201            } else {
202                error = reserve_range_mid(vspace, (vspace_mid_level_t *)next_table, level_num - 1, start, next_start);
203            }
204            if (error) {
205                return error;
206            }
207        }
208        start = next_start;
209    }
210    return 0;
211}
212
213static int reserve_range(vspace_t *vspace, uintptr_t start, uintptr_t end)
214{
215    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
216    return reserve_range_mid(vspace, data->top_level, VSPACE_NUM_LEVELS - 1, start, end);
217}
218
219/**
220 * Symbols in this function need to be provided by your
221 * crt0.S or your linker script, such that we can figure out
222 * what virtual addresses are taken up by the current task
223 */
224void sel4utils_get_image_region(uintptr_t *va_start, uintptr_t *va_end)
225{
226    extern char __executable_start[];
227    extern char _end[];
228
229    *va_start = (uintptr_t) __executable_start;
230    *va_end = (uintptr_t) _end;
231    *va_end = (uintptr_t) ROUND_UP(*va_end, PAGE_SIZE_4K);
232}
233
234static int reserve_initial_task_regions(vspace_t *vspace, void *existing_frames[])
235{
236
237    /* mark the code and data segments as used */
238    uintptr_t va_start, va_end;
239
240    sel4utils_get_image_region(&va_start, &va_end);
241
242    /* this is the scope of the virtual memory used by the image, including
243     * data, text and stack */
244    if (reserve_range(vspace, va_start, va_end)) {
245        ZF_LOGE("Error reserving code/data segment");
246        return -1;
247    }
248
249    /* mark boot info as used */
250    if (existing_frames != NULL) {
251        for (int i = 0; existing_frames[i] != NULL; i++) {
252            if (reserve_range(vspace, (uintptr_t) existing_frames[i], (uintptr_t) existing_frames[i]
253                              + PAGE_SIZE_4K)) {
254                ZF_LOGE("Error reserving frame at %p", existing_frames[i]);
255                return -1;
256            }
257        }
258    }
259
260    return 0;
261}
262
263/* What we need to do is bootstrap the book keeping information for our page tables.
264 * The whole goal here is that at some point we need to allocate book keeping information
265 * and put it somewhere. Putting it somewhere is fine, but we then need to make sure that
266 * we track (i.e. mark as used) wherever we ended up putting it. In order to do this we
267 * need to allocate memory to create structures to mark etc etc. To prevent this recursive
268 * dependency we will mark, right now, as reserved a region large enough such that we could
269 * allocate all possible book keeping tables from it */
270static int bootstrap_page_table(vspace_t *vspace)
271{
272    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
273
274    data->next_bootstrap_vaddr = VSPACE_RESERVE_START;
275    /* Allocate top level paging structure */
276    data->top_level = alloc_and_map(vspace, sizeof(vspace_mid_level_t));
277
278    /* Try and mark reserved our entire reserve region */
279    if (reserve_range(vspace, VSPACE_RESERVE_START, VSPACE_RESERVE_START + VSPACE_RESERVE_SIZE)) {
280        return -1;
281    }
282
283    return 0;
284}
285
286void *bootstrap_create_level(vspace_t *vspace, size_t size)
287{
288    return alloc_and_map(vspace, size);
289}
290
291
292static int get_vspace_bootstrap(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
293                                sel4utils_map_page_fn map_page)
294{
295    data->bootstrap = loader;
296    /* create the top level page table from the loading vspace */
297    data->top_level = vspace_new_pages(loader, seL4_AllRights, sizeof(vspace_mid_level_t) / PAGE_SIZE_4K, seL4_PageBits);
298    if (data->top_level == NULL) {
299        return -1;
300    }
301    memset(data->top_level, 0, sizeof(vspace_mid_level_t));
302
303    common_init_post_bootstrap(new_vspace, map_page);
304    return 0;
305}
306
307/* Interface functions */
308int sel4utils_get_vspace_with_map(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
309                                  vka_t *vka, seL4_CPtr vspace_root,
310                                  vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie, sel4utils_map_page_fn map_page)
311{
312    new_vspace->data = (void *) data;
313
314    if (common_init(new_vspace, vka, vspace_root, allocated_object_fn, allocated_object_cookie)) {
315        return -1;
316    }
317
318    return get_vspace_bootstrap(loader, new_vspace, data, map_page);
319}
320
321int sel4utils_get_empty_vspace_with_map(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
322                                        vka_t *vka, seL4_CPtr vspace_root,
323                                        vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie, sel4utils_map_page_fn map_page)
324{
325
326    new_vspace->data = (void *) data;
327
328    if (common_init(new_vspace, vka, vspace_root, allocated_object_fn, allocated_object_cookie)) {
329        return -1;
330    }
331    data->is_empty = true;
332
333    return get_vspace_bootstrap(loader, new_vspace, data, map_page);
334}
335
336int sel4utils_get_vspace(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
337                         vka_t *vka, seL4_CPtr vspace_root,
338                         vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie)
339{
340    return sel4utils_get_vspace_with_map(loader, new_vspace, data, vka, vspace_root, allocated_object_fn,
341                                         allocated_object_cookie, sel4utils_map_page_pd);
342}
343
344int sel4utils_get_empty_vspace(vspace_t *loader, vspace_t *new_vspace, sel4utils_alloc_data_t *data,
345                               vka_t *vka, seL4_CPtr vspace_root,
346                               vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie)
347{
348    new_vspace->data = (void *) data;
349
350    if (common_init(new_vspace, vka, vspace_root, allocated_object_fn, allocated_object_cookie)) {
351        return -1;
352    }
353    data->is_empty = true;
354
355    return get_vspace_bootstrap(loader, new_vspace, data, sel4utils_map_page_pd);
356}
357
358
359#ifdef CONFIG_VTX
360int sel4utils_get_vspace_ept(vspace_t *loader, vspace_t *new_vspace, vka_t *vka,
361                             seL4_CPtr ept, vspace_allocated_object_fn allocated_object_fn, void *allocated_object_cookie)
362{
363    sel4utils_alloc_data_t *data = malloc(sizeof(*data));
364    if (!data) {
365        return -1;
366    }
367
368    return sel4utils_get_vspace_with_map(loader, new_vspace, data, vka, ept, allocated_object_fn, allocated_object_cookie,
369                                         sel4utils_map_page_ept);
370}
371#endif /* CONFIG_VTX */
372
373int sel4utils_bootstrap_vspace(vspace_t *vspace, sel4utils_alloc_data_t *data,
374                               seL4_CPtr vspace_root, vka_t *vka,
375                               vspace_allocated_object_fn allocated_object_fn, void *cookie, void *existing_frames[])
376{
377    vspace->data = (void *) data;
378
379    if (common_init(vspace, vka, vspace_root, allocated_object_fn, cookie)) {
380        return -1;
381    }
382
383    data->bootstrap = NULL;
384
385    if (bootstrap_page_table(vspace)) {
386        return -1;
387    }
388
389    if (reserve_initial_task_regions(vspace, existing_frames)) {
390        return -1;
391    }
392
393    common_init_post_bootstrap(vspace, sel4utils_map_page_pd);
394
395    return 0;
396}
397
398int sel4utils_bootstrap_vspace_with_bootinfo(vspace_t *vspace, sel4utils_alloc_data_t *data,
399                                             seL4_CPtr vspace_root,
400                                             vka_t *vka, seL4_BootInfo *info, vspace_allocated_object_fn allocated_object_fn,
401                                             void *allocated_object_cookie)
402{
403    size_t extra_pages = BYTES_TO_4K_PAGES(info->extraLen);
404    uintptr_t extra_base = (uintptr_t)info + PAGE_SIZE_4K;
405    void *existing_frames[extra_pages + 3];
406    existing_frames[0] = info;
407    /* We assume the IPC buffer is less than a page and fits into one page */
408    existing_frames[1] = (void *)(seL4_Word)ROUND_DOWN(((seL4_Word)(info->ipcBuffer)), PAGE_SIZE_4K);
409    size_t i;
410    for (i = 0; i < extra_pages; i++) {
411        existing_frames[i + 2] = (void *)(extra_base + i * PAGE_SIZE_4K);
412    }
413    existing_frames[i + 2] = NULL;
414
415    return sel4utils_bootstrap_vspace(vspace, data, vspace_root, vka, allocated_object_fn,
416                                      allocated_object_cookie, existing_frames);
417}
418
419int sel4utils_bootstrap_clone_into_vspace(vspace_t *current, vspace_t *clone, reservation_t image)
420{
421    sel4utils_res_t *res = reservation_to_res(image);
422    seL4_CPtr slot;
423    int error = vka_cspace_alloc(get_alloc_data(current)->vka, &slot);
424
425    if (error) {
426        return -1;
427    }
428
429    cspacepath_t dest;
430    vka_cspace_make_path(get_alloc_data(current)->vka, slot, &dest);
431
432    for (uintptr_t page = res->start; page < res->end - 1; page += PAGE_SIZE_4K) {
433        /* we don't know if the current vspace has caps to its mappings -
434         * it probably doesn't.
435         *
436         * So we map the page in and copy the data across instead :( */
437
438        /* create the page in the clone vspace */
439        error = vspace_new_pages_at_vaddr(clone, (void *) page, 1, seL4_PageBits, image);
440        if (error) {
441            /* vspace will be left inconsistent */
442            ZF_LOGE("Error %d while trying to map page at %"PRIuPTR, error, page);
443        }
444
445        seL4_CPtr cap = vspace_get_cap(clone, (void *) page);
446        /* copy the cap */
447        cspacepath_t src;
448
449        vka_cspace_make_path(get_alloc_data(clone)->vka, cap, &src);
450        error = vka_cnode_copy(&dest, &src, seL4_AllRights);
451        assert(error == 0);
452
453        /* map a copy of it the current vspace */
454        void *dest_addr = vspace_map_pages(current, &dest.capPtr, NULL, seL4_AllRights,
455                                           1, seL4_PageBits, 1);
456        if (dest_addr == NULL) {
457            /* vspace will be left inconsistent */
458            ZF_LOGE("Error! Vspace mapping failed, bailing\n");
459            return -1;
460        }
461
462        /* copy the data */
463        memcpy(dest_addr, (void *) page, PAGE_SIZE_4K);
464
465#ifdef CONFIG_ARCH_ARM
466        seL4_ARM_Page_Unify_Instruction(dest.capPtr, 0, PAGE_SIZE_4K);
467        seL4_ARM_Page_Unify_Instruction(cap, 0, PAGE_SIZE_4K);
468#endif /* CONFIG_ARCH_ARM */
469
470        /* unmap our copy */
471        vspace_unmap_pages(current, dest_addr, 1, seL4_PageBits, VSPACE_PRESERVE);
472        vka_cnode_delete(&dest);
473    }
474
475    /* TODO swap out fault handler temporarily to ignore faults here */
476    vka_cspace_free(get_alloc_data(current)->vka, slot);
477    return 0;
478}
479