1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12
13/* see sel4utils/vspace.h for details */
14#include <autoconf.h>
15#include <sel4utils/gen_config.h>
16
17#include <inttypes.h>
18#include <stdbool.h>
19#include <stdlib.h>
20#include <string.h>
21
22#include <sel4utils/vspace.h>
23#include <sel4utils/page.h>
24
25#include <sel4utils/vspace_internal.h>
26#include <vka/capops.h>
27
28#include <utils/util.h>
29
30void *create_level(vspace_t *vspace, size_t size)
31{
32    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
33
34    /* We need a level in the bootstrapper vspace */
35    if (data->bootstrap == NULL) {
36        return bootstrap_create_level(vspace, size);
37    }
38
39    /* Otherwise we allocate our level out of the bootstrapper vspace -
40     * which is where bookkeeping is mapped */
41    void *level = vspace_new_pages(data->bootstrap, seL4_AllRights,
42                                   size / PAGE_SIZE_4K, seL4_PageBits);
43    if (level == NULL) {
44        return NULL;
45    }
46    memset(level, 0, size);
47
48    return level;
49}
50
51/* check that vaddr is actually in the reservation */
52static int check_reservation_bounds(sel4utils_res_t *reservation, uintptr_t start, uintptr_t end)
53{
54    return start >= reservation->start &&
55           end <= reservation->end;
56}
57
58static int check_reservation(vspace_mid_level_t *top_level, sel4utils_res_t *reservation, uintptr_t start,
59                             uintptr_t end)
60{
61    return check_reservation_bounds(reservation, start, end) &&
62           is_reserved_range(top_level, start, end);
63}
64
65static void insert_reservation(sel4utils_alloc_data_t *data, sel4utils_res_t *reservation)
66{
67
68    assert(data != NULL);
69    assert(reservation != NULL);
70
71    reservation->next = NULL;
72
73    /* insert at head */
74    if (data->reservation_head == NULL || reservation->start > data->reservation_head->start) {
75        reservation->next = data->reservation_head;
76        data->reservation_head = reservation;
77        return;
78    }
79
80    /* insert elsewhere */
81    sel4utils_res_t *prev = data->reservation_head;
82    sel4utils_res_t *current = prev->next;
83
84    while (current != NULL) {
85        /* insert in the middle */
86        if (reservation->start > current->start) {
87            reservation->next = current;
88            prev->next = reservation;
89            return;
90        }
91        prev = current;
92        current = current->next;
93    }
94
95    /* insert at the end */
96    prev->next = reservation;
97}
98
99static void remove_reservation(sel4utils_alloc_data_t *data, sel4utils_res_t *reservation)
100{
101    /* remove head */
102    if (reservation == data->reservation_head) {
103        data->reservation_head = data->reservation_head->next;
104        reservation->next = NULL;
105        return;
106    }
107
108    sel4utils_res_t *prev = data->reservation_head;
109    sel4utils_res_t *current = prev->next;
110
111    while (current != NULL) {
112        /* remove middle */
113        if (current == reservation) {
114            prev->next = reservation->next;
115            reservation->next = NULL;
116            return;
117        }
118        prev = current;
119        current = current->next;
120    }
121
122    /* remove tail */
123    prev->next = NULL;
124    reservation->next = NULL;
125}
126
127static void perform_reservation(vspace_t *vspace, sel4utils_res_t *reservation, uintptr_t vaddr, size_t bytes,
128                                seL4_CapRights_t rights, int cacheable)
129{
130    assert(reservation != NULL);
131
132    UNUSED int error;
133    reservation->start = ROUND_DOWN(vaddr, PAGE_SIZE_4K);
134    reservation->end = ROUND_UP(vaddr + bytes, PAGE_SIZE_4K);
135
136    reservation->rights = rights;
137    reservation->cacheable = cacheable;
138
139    error = reserve_entries_range(vspace, reservation->start, reservation->end, true);
140
141    /* only support to reserve things that we've checked that we can */
142    assert(error == seL4_NoError);
143
144    /* insert the reservation ordered */
145    insert_reservation(get_alloc_data(vspace), reservation);
146}
147
148int sel4utils_map_page_pd(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights_t rights,
149                          int cacheable, size_t size_bits)
150{
151    vka_object_t objects[VSPACE_MAP_PAGING_OBJECTS];
152    int num = VSPACE_MAP_PAGING_OBJECTS;
153    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
154
155    int error = sel4utils_map_page(data->vka, data->vspace_root, cap, vaddr,
156                                   rights, cacheable, objects, &num);
157    if (error) {
158        /* everything has gone to hell. Do no clean up. */
159        ZF_LOGE("Error mapping pages, bailing: %d", error);
160        return -1;
161    }
162
163    for (int i = 0; i < num; i++) {
164        vspace_maybe_call_allocated_object(vspace, objects[i]);
165    }
166
167    return seL4_NoError;
168}
169
170#ifdef CONFIG_VTX
171int sel4utils_map_page_ept(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights_t rights,
172                           int cacheable, size_t size_bits)
173{
174    struct sel4utils_alloc_data *data = get_alloc_data(vspace);
175    vka_object_t pagetable = {0};
176    vka_object_t pagedir = {0};
177    vka_object_t pdpt = {0};
178
179    int error = sel4utils_map_ept_page(data->vka, data->vspace_root, cap,
180                                       (seL4_Word) vaddr, rights, cacheable, size_bits, &pagetable, &pagedir, &pdpt);
181    if (error) {
182        ZF_LOGE("Error mapping pages, bailing\n");
183        return -1;
184    }
185
186    if (pagetable.cptr != 0) {
187        vspace_maybe_call_allocated_object(vspace, pagetable);
188        pagetable.cptr = 0;
189    }
190
191    if (pagedir.cptr != 0) {
192        vspace_maybe_call_allocated_object(vspace, pagedir);
193        pagedir.cptr = 0;
194    }
195
196    if (pdpt.cptr != 0) {
197        vspace_maybe_call_allocated_object(vspace, pdpt);
198        pdpt.cptr = 0;
199    }
200
201    return seL4_NoError;
202}
203#endif /* CONFIG_VTX */
204
205#ifdef CONFIG_IOMMU
206int sel4utils_map_page_iommu(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights_t rights,
207                             int cacheable, size_t size_bits)
208{
209    struct sel4utils_alloc_data *data = get_alloc_data(vspace);
210    int num_pts = 0;
211    /* The maximum number of page table levels current Intel hardware implements is 6 */
212    vka_object_t pts[7];
213
214    int error = sel4utils_map_iospace_page(data->vka, data->vspace_root, cap,
215                                           (seL4_Word) vaddr, rights, cacheable, size_bits, pts, &num_pts);
216    if (error) {
217        ZF_LOGE("Error mapping pages, bailing");
218        return -1;
219    }
220
221    for (int i = 0; i < num_pts; i++) {
222        vspace_maybe_call_allocated_object(vspace, pts[i]);
223    }
224
225    return seL4_NoError;
226}
227#endif /* CONFIG_IOMMU */
228
229static int map_page(vspace_t *vspace, seL4_CPtr cap, void *vaddr, seL4_CapRights_t rights,
230                    int cacheable, size_t size_bits)
231{
232    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
233    return data->map_page(vspace, cap, vaddr, rights, cacheable, size_bits);
234}
235
236static sel4utils_res_t *find_reserve(sel4utils_alloc_data_t *data, uintptr_t vaddr)
237{
238
239    sel4utils_res_t *current = data->reservation_head;
240
241    while (current != NULL) {
242        if (vaddr >= current->start && vaddr < current->end) {
243            return current;
244        }
245
246        current = current->next;
247    }
248
249    return NULL;
250}
251
252static void *find_range(sel4utils_alloc_data_t *data, size_t num_pages, size_t size_bits)
253{
254    /* look for a contiguous range that is free.
255     * We use first-fit with the optimisation that we store
256     * a pointer to the last thing we freed/allocated */
257    size_t contiguous = 0;
258    uintptr_t start = ALIGN_UP(data->last_allocated, SIZE_BITS_TO_BYTES(size_bits));
259    uintptr_t current = start;
260
261    assert(IS_ALIGNED(start, size_bits));
262    while (contiguous < num_pages) {
263
264        bool available = is_available(data->top_level, current, size_bits);
265        current += SIZE_BITS_TO_BYTES(size_bits);
266
267        if (available) {
268            /* keep going! */
269            contiguous++;
270        } else {
271            /* reset start and try again */
272            start = current;
273            contiguous = 0;
274        }
275
276        if (current >= KERNEL_RESERVED_START) {
277            ZF_LOGE("Out of virtual memory");
278            return NULL;
279        }
280
281    }
282
283    data->last_allocated = current;
284
285    return (void *) start;
286}
287
288static int map_pages_at_vaddr(vspace_t *vspace, seL4_CPtr caps[], uintptr_t cookies[],
289                              void *vaddr, size_t num_pages,
290                              size_t size_bits, seL4_CapRights_t rights, int cacheable)
291{
292    int error = seL4_NoError;
293
294    for (int i = 0; i < num_pages && error == seL4_NoError; i++) {
295        error = map_page(vspace, caps[i], vaddr, rights, cacheable, size_bits);
296
297        if (error == seL4_NoError) {
298            uintptr_t cookie = cookies == NULL ? 0 : cookies[i];
299            error = update_entries(vspace, (uintptr_t) vaddr, caps[i], size_bits, cookie);
300            vaddr = (void *)((uintptr_t) vaddr + (BIT(size_bits)));
301        }
302    }
303    return error;
304}
305
306static int new_pages_at_vaddr(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits,
307                              seL4_CapRights_t rights, int cacheable, bool can_use_dev)
308{
309    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
310    int i;
311    int error = seL4_NoError;
312    void *start_vaddr = vaddr;
313
314    for (i = 0; i < num_pages; i++) {
315        vka_object_t object;
316        if (vka_alloc_frame_maybe_device(data->vka, size_bits, can_use_dev, &object) != 0) {
317            /* abort! */
318            ZF_LOGE("Failed to allocate page number: %d out of %zu", i, num_pages);
319            error = seL4_NotEnoughMemory;
320            break;
321        }
322
323        error = map_page(vspace, object.cptr, vaddr, rights, cacheable, size_bits);
324
325        if (error == seL4_NoError) {
326            error = update_entries(vspace, (uintptr_t) vaddr, object.cptr, size_bits, object.ut);
327            vaddr = (void *)((uintptr_t) vaddr + (BIT(size_bits)));
328        } else {
329            vka_free_object(data->vka, &object);
330            break;
331        }
332    }
333
334    if (i < num_pages) {
335        /* we failed, clean up successfully allocated pages */
336        sel4utils_unmap_pages(vspace, start_vaddr, i, size_bits, data->vka);
337    }
338
339    return error;
340}
341
342/* VSPACE INTERFACE FUNCTIONS */
343
344int sel4utils_map_pages_at_vaddr(vspace_t *vspace, seL4_CPtr caps[], uintptr_t cookies[], void *vaddr,
345                                 size_t num_pages, size_t size_bits, reservation_t reservation)
346{
347    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
348    sel4utils_res_t *res = reservation_to_res(reservation);
349
350    if (!sel4_valid_size_bits(size_bits)) {
351        ZF_LOGE("Invalid size_bits %zu", size_bits);
352        return -1;
353    }
354
355    if (!check_reservation(data->top_level, res, (uintptr_t) vaddr, (uintptr_t)vaddr + num_pages * BIT(size_bits))) {
356        ZF_LOGE("Invalid reservation");
357        return -1;
358    }
359
360    if (res->rights_deferred) {
361        ZF_LOGE("Reservation has no rights associated with it");
362        return -1;
363    }
364
365    return map_pages_at_vaddr(vspace, caps, cookies, vaddr, num_pages, size_bits,
366                              res->rights, res->cacheable);
367}
368
369int sel4utils_deferred_rights_map_pages_at_vaddr(vspace_t *vspace, seL4_CPtr caps[], uintptr_t cookies[], void *vaddr,
370                                                 size_t num_pages, size_t size_bits,
371                                                 seL4_CapRights_t rights, reservation_t reservation)
372{
373    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
374    sel4utils_res_t *res = reservation_to_res(reservation);
375
376    if (!sel4_valid_size_bits(size_bits)) {
377        ZF_LOGE("Invalid size_bits %zu", size_bits);
378        return -1;
379    }
380
381    if (!check_reservation(data->top_level, res, (uintptr_t) vaddr, (uintptr_t)vaddr + num_pages * BIT(size_bits))) {
382        ZF_LOGE("Invalid reservation");
383        return -1;
384    }
385
386    if (!res->rights_deferred) {
387        ZF_LOGE("Invalid rights: rights already given to reservation");
388        return -1;
389    }
390
391    return map_pages_at_vaddr(vspace, caps, cookies, vaddr, num_pages, size_bits,
392                              rights, res->cacheable);
393}
394
395void *sel4utils_map_pages(vspace_t *vspace, seL4_CPtr caps[], uintptr_t cookies[],
396                          seL4_CapRights_t rights, size_t num_pages, size_t size_bits,
397                          int cacheable)
398{
399    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
400    int error;
401    void *ret_vaddr;
402
403    assert(num_pages > 0);
404
405    ret_vaddr = find_range(data, num_pages, size_bits);
406    if (ret_vaddr == NULL) {
407        return NULL;
408    }
409
410    error = map_pages_at_vaddr(vspace, caps, cookies,
411                               ret_vaddr, num_pages, size_bits,
412                               rights, cacheable);
413    if (error != 0) {
414        if (clear_entries(vspace, (uintptr_t)ret_vaddr, size_bits) != 0) {
415            ZF_LOGE("FATAL: Failed to clear VMM metadata for vmem @0x%p, %lu pages.",
416                    ret_vaddr, BIT(size_bits));
417            /* This is probably cause for a panic, but continue anyway. */
418        }
419        return NULL;
420    }
421    return ret_vaddr;
422}
423
424seL4_CPtr sel4utils_get_cap(vspace_t *vspace, void *vaddr)
425{
426    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
427    seL4_CPtr cap = get_cap(data->top_level, (uintptr_t) vaddr);
428
429    if (cap == RESERVED) {
430        cap = 0;
431    }
432    return cap;
433}
434
435uintptr_t sel4utils_get_cookie(vspace_t *vspace, void *vaddr)
436{
437    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
438    return get_cookie(data->top_level, (uintptr_t) vaddr);
439}
440
441void sel4utils_unmap_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits, vka_t *vka)
442{
443    uintptr_t v = (uintptr_t) vaddr;
444    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
445    sel4utils_res_t *reserve = find_reserve(data, v);
446
447    if (!sel4_valid_size_bits(size_bits)) {
448        ZF_LOGE("Invalid size_bits %zu", size_bits);
449        return;
450    }
451
452    if (vka == VSPACE_FREE) {
453        vka = data->vka;
454    }
455
456    for (int i = 0; i < num_pages; i++) {
457        seL4_CPtr cap = get_cap(data->top_level, v);
458
459        /* unmap */
460        if (cap != 0) {
461            int error = seL4_ARCH_Page_Unmap(cap);
462            if (error != seL4_NoError) {
463                ZF_LOGE("Failed to unmap page at vaddr %p", vaddr);
464            }
465        }
466
467        if (vka) {
468            cspacepath_t path;
469            vka_cspace_make_path(vka, cap, &path);
470            vka_cnode_delete(&path);
471            vka_cspace_free(vka, cap);
472            if (sel4utils_get_cookie(vspace, vaddr)) {
473                vka_utspace_free(vka, kobject_get_type(KOBJECT_FRAME, size_bits),
474                                 size_bits, sel4utils_get_cookie(vspace, vaddr));
475            }
476        }
477
478        if (reserve == NULL) {
479            clear_entries(vspace, v, size_bits);
480        } else {
481            reserve_entries(vspace, v, size_bits);
482        }
483        assert(get_cap(data->top_level, v) != cap);
484        assert(get_cookie(data->top_level, v) == 0);
485
486        v += (BIT(size_bits));
487        vaddr = (void *) v;
488    }
489}
490
491int sel4utils_new_pages_at_vaddr(vspace_t *vspace, void *vaddr, size_t num_pages,
492                                 size_t size_bits, reservation_t reservation, bool can_use_dev)
493{
494    struct sel4utils_alloc_data *data = get_alloc_data(vspace);
495    sel4utils_res_t *res = reservation_to_res(reservation);
496
497    if (!check_reservation(data->top_level, res, (uintptr_t) vaddr, (uintptr_t)vaddr + num_pages * BIT(size_bits))) {
498        ZF_LOGE("Range for vaddr %p with %"PRIuPTR" 4k pages not reserved!", vaddr, num_pages);
499        return -1;
500    }
501
502    return new_pages_at_vaddr(vspace, vaddr, num_pages, size_bits, res->rights, res->cacheable, can_use_dev);
503}
504
505void *sel4utils_new_pages(vspace_t *vspace, seL4_CapRights_t rights,
506                          size_t num_pages, size_t size_bits)
507{
508    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
509    int error;
510    void *ret_vaddr;
511
512    assert(num_pages > 0);
513
514    ret_vaddr = find_range(data, num_pages, size_bits);
515    if (ret_vaddr == NULL) {
516        return NULL;
517    }
518
519    /* Since sel4utils_new_pages() is an implementation of vspace_new_pages(),
520     * it should ideally be preferring to allocate device untypeds and leaving
521     * the non-device untypeds for VKA to use when it's allocating kernel objects.
522     *
523     * Unfortunately it currently has to prefer to allocate non-device untypeds
524     * to maintain compatibility with code that uses it incorrectly, such as
525     * code that calls vspace_new_pages() to allocate an IPC buffer.
526     */
527    error = new_pages_at_vaddr(vspace, ret_vaddr, num_pages, size_bits, rights,
528                               (int)true, false);
529    if (error != 0) {
530        if (clear_entries(vspace, (uintptr_t)ret_vaddr, size_bits) != 0) {
531            ZF_LOGE("FATAL: Failed to clear VMM metadata for vmem @0x%p, %lu pages.",
532                    ret_vaddr, BIT(size_bits));
533            /* This is probably cause for a panic, but continue anyway. */
534        }
535        return NULL;
536    }
537
538    return ret_vaddr;
539}
540
541int sel4utils_reserve_range_no_alloc_aligned(vspace_t *vspace, sel4utils_res_t *reservation,
542                                             size_t size, size_t size_bits, seL4_CapRights_t rights, int cacheable, void **result)
543{
544    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
545    void *vaddr = find_range(data, BYTES_TO_SIZE_BITS_PAGES(size, size_bits), size_bits);
546
547    if (vaddr == NULL) {
548        return -1;
549    }
550
551    *result = vaddr;
552    reservation->malloced = 0;
553    reservation->rights_deferred = false;
554    perform_reservation(vspace, reservation, (uintptr_t) vaddr, size, rights, cacheable);
555    return 0;
556}
557
558int sel4utils_reserve_range_no_alloc(vspace_t *vspace, sel4utils_res_t *reservation, size_t size,
559                                     seL4_CapRights_t rights, int cacheable, void **result)
560{
561    return sel4utils_reserve_range_no_alloc_aligned(vspace, reservation, size, seL4_PageBits,
562                                                    rights, cacheable, result);
563}
564
565reservation_t sel4utils_reserve_range_aligned(vspace_t *vspace, size_t bytes, size_t size_bits, seL4_CapRights_t rights,
566                                              int cacheable, void **result)
567{
568    reservation_t reservation = {
569        .res = NULL,
570    };
571
572    if (!sel4_valid_size_bits(size_bits)) {
573        ZF_LOGE("Invalid size bits %zu", size_bits);
574        return reservation;
575    }
576
577    sel4utils_res_t *res = malloc(sizeof(sel4utils_res_t));
578
579    if (res == NULL) {
580        ZF_LOGE("Malloc failed");
581        return reservation;
582    }
583
584    reservation.res = res;
585
586    int error = sel4utils_reserve_range_no_alloc_aligned(vspace, res, bytes, size_bits, rights, cacheable, result);
587    if (error) {
588        free(reservation.res);
589        reservation.res = NULL;
590    }
591
592    res->malloced = 1;
593    return reservation;
594}
595
596int sel4utils_reserve_range_at_no_alloc(vspace_t *vspace, sel4utils_res_t *reservation, void *vaddr,
597                                        size_t size, seL4_CapRights_t rights, int cacheable)
598{
599    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
600    if (!is_available_range(data->top_level, (uintptr_t) vaddr, (uintptr_t)vaddr + size)) {
601        ZF_LOGE("Range not available at %p, size %p", vaddr, (void *)size);
602        return -1;
603    }
604    reservation->malloced = 0;
605    reservation->rights_deferred = false;
606    perform_reservation(vspace, reservation, (uintptr_t) vaddr, size, rights, cacheable);
607    return 0;
608}
609
610reservation_t sel4utils_reserve_range_at(vspace_t *vspace, void *vaddr, size_t size, seL4_CapRights_t
611                                         rights, int cacheable)
612{
613    reservation_t reservation;
614    reservation.res = malloc(sizeof(sel4utils_res_t));
615
616    if (reservation.res == NULL) {
617        ZF_LOGE("Malloc failed");
618        return reservation;
619    }
620
621    int error = sel4utils_reserve_range_at_no_alloc(vspace, reservation.res, vaddr, size, rights, cacheable);
622
623    if (error) {
624        free(reservation.res);
625        reservation.res = NULL;
626    } else {
627        ((sel4utils_res_t *)reservation.res)->malloced = 1;
628    }
629
630    return reservation;
631}
632
633reservation_t sel4utils_reserve_deferred_rights_range_at(vspace_t *vspace, void *vaddr, size_t size, int cacheable)
634{
635    reservation_t reservation = sel4utils_reserve_range_at(vspace, vaddr, size, seL4_NoRights, cacheable);
636    if (reservation.res != NULL) {
637        ((sel4utils_res_t *)reservation.res)->rights_deferred = true;
638    }
639    return reservation;
640}
641
642void sel4utils_free_reservation(vspace_t *vspace, reservation_t reservation)
643{
644    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
645    sel4utils_res_t *res = reservation.res;
646
647    clear_entries_range(vspace, res->start, res->end, true);
648    remove_reservation(data, res);
649    if (res->malloced) {
650        free(reservation.res);
651    }
652}
653
654void sel4utils_free_reservation_by_vaddr(vspace_t *vspace, void *vaddr)
655{
656
657    reservation_t reservation;
658    reservation.res = find_reserve(get_alloc_data(vspace), (uintptr_t) vaddr);
659    sel4utils_free_reservation(vspace, reservation);
660}
661
662int sel4utils_move_resize_reservation(vspace_t *vspace, reservation_t reservation, void *vaddr,
663                                      size_t bytes)
664{
665    assert(reservation.res != NULL);
666    sel4utils_res_t *res = reservation.res;
667    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
668
669    uintptr_t new_start = ROUND_DOWN((uintptr_t) vaddr, PAGE_SIZE_4K);
670    uintptr_t new_end = ROUND_UP(((uintptr_t)(vaddr)) + bytes, PAGE_SIZE_4K);
671    uintptr_t v = 0;
672
673    /* Sanity checks that newly asked reservation space is available. */
674    if (new_start < res->start) {
675        if (!is_available_range(data->top_level, new_start, res->start)) {
676            return -1;
677        }
678    }
679    if (new_end > res->end) {
680        if (!is_available_range(data->top_level, res->end, new_end)) {
681            return -2;
682        }
683    }
684
685    for (v = new_start; v < new_end; v += PAGE_SIZE_4K) {
686        if (v < res->start || v >= res->end) {
687            /* Any outside the reservation must be unreserved. */
688            int error UNUSED = reserve_entries_range(vspace, v, v + PAGE_SIZE_4K, true);
689            /* Should not cause any errors as we have just checked the regions are free. */
690            assert(!error);
691        } else {
692            v = res->end - PAGE_SIZE_4K;
693        }
694    }
695
696    for (v = res->start; v < res->end; v += PAGE_SIZE_4K) {
697        if (v < new_start || v >= new_end) {
698            /* Clear any regions that aren't reserved by the new region any more. */
699            if (get_cap(data->top_level, v) == RESERVED) {
700                clear_entries_range(vspace, v, v + PAGE_SIZE_4K, true);
701            }
702        } else {
703            v = new_end - PAGE_SIZE_4K;
704        }
705    }
706
707    bool need_reinsert = false;
708    if (res->start != new_start) {
709        need_reinsert = true;
710    }
711
712    res->start = new_start;
713    res->end = new_end;
714
715    /* We may need to re-insert the reservation into the list to keep it sorted by start address. */
716    if (need_reinsert) {
717        remove_reservation(data, res);
718        insert_reservation(data, res);
719    }
720
721    return 0;
722}
723
724seL4_CPtr sel4utils_get_root(vspace_t *vspace)
725{
726    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
727    return data->vspace_root;
728}
729
730static void free_page(vspace_t *vspace, vka_t *vka, uintptr_t vaddr)
731{
732    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
733    vspace_mid_level_t *level = data->top_level;
734    /* see if we should free the thing here or not */
735    uintptr_t cookie = get_cookie(level, vaddr);
736    int num_4k_entries = 1;
737    if (cookie != 0) {
738        /* walk along and see just how big this page is */
739        uintptr_t test_vaddr = vaddr + PAGE_SIZE_4K;
740        while (get_cookie(level, test_vaddr) == cookie) {
741            test_vaddr += PAGE_SIZE_4K;
742            num_4k_entries++;
743        }
744        sel4utils_unmap_pages(vspace, (void *)vaddr, 1, PAGE_BITS_4K * num_4k_entries, vka);
745    }
746}
747
748static void free_pages_at_level(vspace_t *vspace, vka_t *vka, int table_level, uintptr_t vaddr)
749{
750    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
751    vspace_mid_level_t *level = data->top_level;
752    /* walk down to the level that we want */
753    for (int i = VSPACE_NUM_LEVELS - 1; i > table_level && i > 1; i--) {
754        int index = INDEX_FOR_LEVEL(vaddr, i);
755        switch (level->table[index]) {
756        case RESERVED:
757        case EMPTY:
758            return;
759        }
760        level = (vspace_mid_level_t *)level->table[index];
761    }
762    if (table_level == 0) {
763        int index = INDEX_FOR_LEVEL(vaddr, 1);
764        switch (level->table[index]) {
765        case RESERVED:
766        case EMPTY:
767            return;
768        }
769        vspace_bottom_level_t *bottom = (vspace_bottom_level_t *)level->table[index];
770        index = INDEX_FOR_LEVEL(vaddr, 0);
771        if (bottom->cap[index] != EMPTY && bottom->cap[index] != RESERVED) {
772            free_page(vspace, vka, vaddr);
773        }
774    } else {
775        int index = INDEX_FOR_LEVEL(vaddr, table_level);
776        switch (level->table[index]) {
777        case RESERVED:
778        case EMPTY:
779            return;
780        }
781        /* recurse to the sub level */
782        for (int j = 0; j < VSPACE_LEVEL_SIZE; j++) {
783            free_pages_at_level(vspace, vka,
784                                table_level - 1,
785                                vaddr + j * BYTES_FOR_LEVEL(table_level - 1));
786        }
787        vspace_unmap_pages(data->bootstrap, (void *)level->table[index],
788                           (table_level == 1 ? sizeof(vspace_bottom_level_t) : sizeof(vspace_mid_level_t)) / PAGE_SIZE_4K, PAGE_BITS_4K,
789                           VSPACE_FREE);
790    }
791}
792
793void sel4utils_tear_down(vspace_t *vspace, vka_t *vka)
794{
795
796    sel4utils_alloc_data_t *data = get_alloc_data(vspace);
797
798    if (data->bootstrap == NULL) {
799        ZF_LOGE("Not implemented: sel4utils cannot currently tear down a self-bootstrapped vspace\n");
800        return;
801    }
802
803    if (vka == VSPACE_FREE) {
804        vka = data->vka;
805    }
806
807    /* free all the reservations */
808    while (data->reservation_head != NULL) {
809        reservation_t res = { .res = data->reservation_head };
810        sel4utils_free_reservation(vspace, res);
811    }
812
813    /* walk each level and find any pages / large pages */
814    if (data->top_level) {
815        for (int i = 0; i < BIT(VSPACE_LEVEL_BITS); i++) {
816            free_pages_at_level(vspace, vka, VSPACE_NUM_LEVELS - 1, BYTES_FOR_LEVEL(VSPACE_NUM_LEVELS - 1) * i);
817        }
818        vspace_unmap_pages(data->bootstrap, data->top_level, sizeof(vspace_mid_level_t) / PAGE_SIZE_4K, PAGE_BITS_4K,
819                           VSPACE_FREE);
820    }
821}
822
823int sel4utils_share_mem_at_vaddr(vspace_t *from, vspace_t *to, void *start, int num_pages,
824                                 size_t size_bits, void *vaddr, reservation_t reservation)
825{
826    int error = 0; /* no error */
827    sel4utils_alloc_data_t *from_data = get_alloc_data(from);
828    sel4utils_alloc_data_t *to_data = get_alloc_data(to);
829    cspacepath_t from_path, to_path;
830    int page;
831    sel4utils_res_t *res = reservation_to_res(reservation);
832
833    if (!sel4_valid_size_bits(size_bits)) {
834        ZF_LOGE("Invalid size bits %zu", size_bits);
835        return -1;
836    }
837
838    /* go through, page by page, and duplicate the page cap into the to cspace and
839     * map it into the to vspace */
840    size_t size_bytes = 1 << size_bits;
841    for (page = 0; page < num_pages; page++) {
842        uintptr_t from_vaddr = (uintptr_t) start + page * size_bytes;
843        uintptr_t to_vaddr = (uintptr_t) vaddr + (uintptr_t) page * size_bytes;
844
845        /* get the frame cap to be copied */
846        seL4_CPtr cap = get_cap(from_data->top_level, from_vaddr);
847        if (cap == seL4_CapNull) {
848            ZF_LOGE("Cap not present in from vspace to copy, vaddr %"PRIuPTR, from_vaddr);
849            error = -1;
850            break;
851        }
852
853        /* create a path to the cap */
854        vka_cspace_make_path(from_data->vka, cap, &from_path);
855
856        /* allocate a path to put the copy in the destination */
857        error = vka_cspace_alloc_path(to_data->vka, &to_path);
858        if (error) {
859            ZF_LOGE("Failed to allocate slot in to cspace, error: %d", error);
860            break;
861        }
862
863        /* copy the frame cap into the to cspace */
864        error = vka_cnode_copy(&to_path, &from_path, res->rights);
865        if (error) {
866            ZF_LOGE("Failed to copy cap, error %d\n", error);
867            break;
868        }
869
870        /* now finally map the page */
871        error = map_page(to, to_path.capPtr, (void *) to_vaddr, res->rights, res->cacheable, size_bits);
872        if (error) {
873            ZF_LOGE("Failed to map page into target vspace at vaddr %"PRIuPTR, to_vaddr);
874            break;
875        }
876
877        update_entries(to, to_vaddr, to_path.capPtr, size_bits, 0);
878    }
879
880    if (error) {
881        /* we didn't finish, undo any pages we did map */
882        vspace_unmap_pages(to, vaddr, page, size_bits, VSPACE_FREE);
883    }
884
885    return error;
886}
887
888uintptr_t sel4utils_get_paddr(vspace_t *vspace, void *vaddr, seL4_Word type, seL4_Word size_bits)
889{
890    vka_t *vka = get_alloc_data(vspace)->vka;
891    return vka_utspace_paddr(vka, vspace_get_cookie(vspace, vaddr), type, size_bits);
892
893}
894