1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12
13#pragma once
14
15#include <sel4utils/gen_config.h>
16
17#include <assert.h>
18#include <stddef.h>
19#include <vka/object.h>
20#include <vspace/page.h>
21
22typedef struct vspace vspace_t;
23
24typedef struct reservation {
25    void *res;
26} reservation_t;
27
28/**
29 * Configuration for vspace_new_pages functions
30 */
31typedef struct vspace_new_pages_config {
32    /* If `NULL` then the mapping will be created at any available virtual
33       address.  If vaddr is not `NULL` than the mapping will be at
34       that virtual address if successful. */
35    void *vaddr;
36    /* Number of pages to be created and mapped */
37    size_t num_pages;
38    /* Number of bits for each page */
39    size_t size_bits;
40    /* Whether frames used to create pages can be device untyped or regular untyped */
41    bool can_use_dev;
42} vspace_new_pages_config_t;
43
44/**
45 * Returns a default configuration based on supplied parameters that can be passed to vspace_new_pages_with_config
46 * or vspace_new_pages_at_vaddr_with_config.
47 * @param  num_pages   number of pages in reservation
48 * @param  size_bits   size bits of each page
49 * @param  config      config struct to save configuration into
50 * @return             0 on success.
51 */
52static inline int default_vspace_new_pages_config(size_t num_pages, size_t size_bits,
53                                                  vspace_new_pages_config_t *config)
54{
55    if (num_pages == 0) {
56        ZF_LOGW("attempt to create 0 pages");
57        return -1;
58    }
59
60    config->vaddr = NULL;
61    config->num_pages = num_pages;
62    config->size_bits = size_bits;
63    config->can_use_dev = false;
64    return 0;
65}
66
67/**
68 * Set vaddr of the config
69 * @param  vaddr  vaddr to set. See documentation on vspace_new_pages_config_t.
70 * @param  config config struct to save configuration into
71 * @return        0 on success.
72 */
73static inline int vspace_new_pages_config_set_vaddr(void *vaddr, vspace_new_pages_config_t *config)
74{
75    config->vaddr = vaddr;
76    return 0;
77}
78
79/**
80 * Set whether can use device untyped
81 * @param  can_use_dev  `true` if can use device untyped. See documentation on vspace_new_pages_config_t.
82 * @param  config config struct to save configuration into
83 * @return        0 on success.
84 */
85static inline int vspace_new_pages_config_use_device_ut(bool can_use_dev, vspace_new_pages_config_t *config)
86{
87    config->can_use_dev = can_use_dev;
88    return 0;
89}
90
91/* IMPLEMENTATION INDEPENDANT FUNCTIONS - implemented by calling the implementation specific
92 * function pointers */
93
94/**
95 * Reserve a range to map memory into later, aligned to 4K.
96 * Regions will be aligned to 4K boundaries.
97 *
98 * @param vspace the virtual memory allocator to use.
99 * @param bytes the size in bytes to map.
100 * @param rights the rights to map the pages in with in this reservation
101 * @param cacheable 1 if the pages should be mapped with cacheable attributes. 0 for DMA.
102 * @param vaddr the virtual address of the reserved range will be returned here.
103 *
104 * @return a reservation to use when mapping pages in the range.
105 */
106reservation_t vspace_reserve_range(vspace_t *vspace, size_t bytes,
107                                   seL4_CapRights_t rights, int cacheable, void **vaddr);
108
109/**
110 * Share memory from one vspace to another.
111 *
112 * Make duplicate mappings of the from vspace in a contiguous region in the
113 * to vspace. Pages are expected to already be mapped in the from vspace, or an error
114 * will be returned.
115 *
116 * @param from      vspace to share memory from
117 * @param to        vspace to share memory to
118 * @param start     address to start sharing at
119 * @param num_pages number of pages to share
120 * @param size_bits size of pages in bits
121 * @param rights    rights to map pages into the to vspace with.
122 * @param cacheable cacheable attribute to map pages into the vspace with
123 *
124 * @return address of shared region in to, NULL on failure.
125 */
126void *vspace_share_mem(vspace_t *from, vspace_t *to, void *start, int num_pages,
127                       size_t size_bits, seL4_CapRights_t rights, int cacheable);
128
129/**
130 * Create a virtually contiguous area of mapped pages.
131 * This could be for shared memory or just allocating some pages.
132 * Depending on the config passed in, this will create a reservation or
133 * use an existing reservation
134 * @param  vspace the virtual memory allocator used.
135 * @param  config configuration for this function. See vspace_new_pages_config_t.
136 * @param  rights the rights to map the pages in with
137 * @return        vaddr at the start of the contiguous region
138 *         NULL on failure.
139 */
140void *vspace_new_pages_with_config(vspace_t *vspace, vspace_new_pages_config_t *config, seL4_CapRights_t rights);
141
142/**
143 * Create a stack. The determines stack size.
144 *
145 * @param vspace the virtual memory allocator used.
146 * @param n_pages number of 4k pages to allocate for the stack.
147 *                A 4k guard page will also be reserved in the address space
148 *                to prevent code from running off the created stack.
149 *
150 * @return virtual address of the top of the created stack.
151 *         NULL on failure.
152 */
153void *vspace_new_sized_stack(vspace_t *vspace, size_t n_pages);
154
155/**
156 * Callback invoked when accessing a page through vspace_access_page_with_callback
157 *
158 * @param access_addr address being accessed in source vspace.
159 * @param vaddr the virtual address of the mapped page in the destination vspace.
160 * @param pointer to cookie/data the caller wants passed onto the callback
161 *
162 * @return integer result defined by the callback implementation
163 */
164typedef int (*vspace_access_callback_fn)(void *access_addr, void *vaddr, void *cookie);
165
166/**
167 * Access a page from one vspace in another.
168 *
169 * Duplicate a page mapping out of the 'from' vspace into the 'to' vspace for subsequent access
170 * by a caller defined callback function. The page will be unmapped after the callback function
171 * has been executed. Pages are expected to already be mapped in the 'from' vspace, or an error
172 * will be returned.
173 *
174 * @param from          vspace to access page from
175 * @param to            vspace to map page into
176 * @param access_addr   address to access
177 * @param size_bits     size of page in bits
178 * @param rights        rights to map page into the 'to' vspace with.
179 * @param cacheable     cacheable attribute to map page into the vspace with
180 * @param callback      callback function to pass mapped vaddr onto
181 * @param cookie        pointer to cookie/data the caller wants passed onto the callback
182 *
183 * @return -1 on error, otherwise the integer result of the callback function
184 */
185int vspace_access_page_with_callback(vspace_t *from, vspace_t *to, void *access_addr, size_t size_bits,
186                                     seL4_CapRights_t rights, int cacheable, vspace_access_callback_fn callback, void *cookie);
187
188static inline void *vspace_new_stack(vspace_t *vspace)
189{
190    return vspace_new_sized_stack(vspace, BYTES_TO_4K_PAGES(CONFIG_SEL4UTILS_STACK_SIZE));
191}
192
193/**
194 * Free a stack. This will only free virtual resources, not physical resources.
195 *
196 * @param vspace the virtual memory allocator used.
197 * @param stack_top the top of the stack as returned by vspace_new_stack.
198 * @param n_pages number of 4k pages that were allocated for stack.
199 *
200 */
201void vspace_free_sized_stack(vspace_t *vspace, void *stack_top, size_t n_pages);
202
203static inline void vspace_free_stack(vspace_t *vspace, void *stack_top)
204{
205    vspace_free_sized_stack(vspace, stack_top, BYTES_TO_4K_PAGES(CONFIG_SEL4UTILS_STACK_SIZE));
206}
207
208/**
209 * Create an IPC buffer.
210 *
211 * @param[in] vspace the virtual memory allocator used.
212 * @param[out] page capability to that the IPC buffer was mapped in with
213 *
214 * @return vaddr of the mapped in IPC buffer
215 *         NULL on failure.
216 */
217void *vspace_new_ipc_buffer(vspace_t *vspace, seL4_CPtr *page);
218
219/**
220 * Free an IPC buffer. This will only free virtual resources, not physical resources.
221 *
222 *
223 * @param vspace the virtual memory allocator used.
224 * @param addr address the IPC buffer was mapped in to.
225 *
226 */
227void vspace_free_ipc_buffer(vspace_t *vspace, void *addr);
228
229/* IMPLEMENTATION SPECIFIC FUNCTIONS - function pointers of the vspace used */
230
231typedef void *(*vspace_new_pages_fn)(vspace_t *vspace, seL4_CapRights_t rights,
232                                     size_t num_pages, size_t size_bits);
233
234typedef void *(*vspace_map_pages_fn)(vspace_t *vspace,
235                                     seL4_CPtr caps[], uintptr_t cookies[], seL4_CapRights_t rights,
236                                     size_t num_pages, size_t size_bits, int cacheable);
237
238/**
239 * Create a virtually contiguous area of mapped pages, at the specified virtual address.
240 *
241 * This is designed for elf loading, where virtual addresses are chosen for you.
242 * The vspace allocator will not allow this address to be reused unless you free it.
243 *
244 * This function will FAIL if the virtual address range requested is not free.
245 *
246 *
247 * @param vspace the virtual memory allocator used.
248 * @param vaddr the virtual address to start allocation at.
249 * @param num_pages the number of pages to allocate and map.
250 * @param size_bits size of the pages to allocate and map, in bits.
251 * @param reservation reservation to the range the allocation will take place in.
252 * @param can_use_dev whether the underlying allocator can allocate object from ram device_untyped
253 *                    (Setting this to true is normally safe unless when creating IPC buffers.)
254 * @return seL4_NoError on success, -1 otherwise.
255 */
256typedef int (*vspace_new_pages_at_vaddr_fn)(vspace_t *vspace, void *vaddr, size_t num_pages,
257                                            size_t size_bits, reservation_t reservation, bool can_use_dev);
258
259/**
260 * Map in existing page capabilities, using contiguos virtual memory at the specified virtual address.
261 *
262 * This will FAIL if the virtual address is already mapped in.
263 *`
264 * @param vspace the virtual memory allocator used.
265 * @param seL4_CPtr caps array of caps to map in
266 * @param uintptr_t cookies array of allocation cookies. Populate this if you want the vspace to
267 *                         be able to free the caps for you with a vka. NULL acceptable.
268 * @param size_bits size, in bits, of an individual page -- all pages must be the same size.
269 * @param num_pages the number of pages to map in (must correspond to the size of the array).
270 * @param reservation reservation to the range the allocation will take place in.
271 *
272 * @return seL4_NoError on success. -1 on failure.
273 */
274typedef int (*vspace_map_pages_at_vaddr_fn)(vspace_t *vspace, seL4_CPtr caps[], uintptr_t cookies[],
275                                            void *vaddr, size_t num_pages,
276                                            size_t size_bits, reservation_t reservation);
277
278/**
279 * Map in existing page capabilities, using contiguos virtual memory at the specified virtual address.
280 * Mapping is performed with the rights given by the caller if the reservation given has no rights
281 * associated with it.
282 *
283 * This will FAIL if the virtual address is already mapped in.
284 *`
285 * @param vspace the virtual memory allocator used.
286 * @param seL4_CPtr caps array of caps to map in
287 * @param uintptr_t cookies array of allocation cookies. Populate this if you want the vspace to
288 *                         be able to free the caps for you with a vka. NULL acceptable.
289 * @param size_bits size, in bits, of an individual page -- all pages must be the same size.
290 * @param num_pages the number of pages to map in (must correspond to the size of the array).
291 * @param rights the rights to map the pages in.
292 * @param reservation reservation to the range the allocation will take place in.
293 *
294 * @return seL4_NoError on success. -1 on failure.
295 */
296typedef int (*vspace_deferred_rights_map_pages_at_vaddr_fn)(vspace_t *vspace, seL4_CPtr caps[], uintptr_t cookies[],
297                                                            void *vaddr, size_t num_pages,
298                                                            size_t size_bits, seL4_CapRights_t rights, reservation_t reservation);
299
300/* potential values for vspace_unmap_pages */
301#define VSPACE_FREE ((vka_t *) 0xffffffff)
302#define VSPACE_PRESERVE ((vka_t *) 0)
303
304/**
305 * Unmap in existing page capabilities that use contiguos virtual memory.
306 *
307 * This function can also free the cslots and frames that back the virtual memory in the region.
308 * This can be done by the internal vka that the vspace was created with, or the user can provide
309 * a vka to free with. The vka must be the same vka that the frame object and cslot were allocated with.
310 *
311 * Reservations are preserved.
312 *
313 * @param vspace the virtual memory allocator used.
314 * @param vaddr the start of the contiguous region.
315 * @param size_bits size, in bits, of an individual page -- all pages must be the same size.
316 * @param num_pages the number of pages to map in (must correspond to the size of the array).
317 * @param free interface to free frame objects and cslots with, options:
318 *             + VSPACE_FREE to free the frames/cslots with the vspace internal vka,
319 *             + VSPACE_PRESERVE to not free the frames/cslots or
320 *             + a pointer to a custom vka to free the frames/cslots with.
321 *
322 */
323typedef void (*vspace_unmap_pages_fn)(vspace_t *vspace, void *vaddr, size_t num_pages,
324                                      size_t size_bits, vka_t *free);
325
326/**
327 * Tear down a vspace, freeing any memory allocated by the vspace itself.
328 *
329 * Like vspace_unmap_pages this function can also free the frames and cslots backing
330 * the vspace, if a vka is provided.
331 *
332 * When using this function to tear down all backing frames/cslots the user MUST make sure
333 * that any frames/cslots not allocated by the vka being used to free have already been unmapped
334 * from the vspace *or* that the cookies for these custom mappings are set to 0.
335 * If this is not done the vspace will attempt to use the wrong vka to free
336 * frames and cslots resulting in allocator corruption.
337 *
338 * To completely free a vspace the user should also free any objects/cslots that the vspace
339 * called vspace_allocated_object_fn on, as the vspace has essentially delegated control
340 * of these objects/cslots to the user.
341 *
342 * @param vspace the vspace to tear down.
343 * @param free vka to use to free the cslots/frames, options:
344 *             + VSPACE_FREE to use the internal vka,
345 *             + VSPACE_PRESERVE to not free the frames/cslots,
346 *             + a pointer to a custom vka to free the frames/cslots with.
347 */
348typedef void (*vspace_tear_down_fn)(vspace_t *vspace, vka_t *free);
349
350/**
351 * Reserve a range to map memory into later.
352 * Regions will be aligned to 4K boundaries.
353 *
354 * @param vspace the virtual memory allocator to use.
355 * @param bytes the size in bytes to map.
356 * @param size_bits size to align the range to
357 * @param rights the rights to map the pages in with in this reservation
358 * @param cacheable 1 if the pages should be mapped with cacheable attributes. 0 for DMA.
359 * @param vaddr the virtual address of the reserved range will be returned here.
360 *
361 * @return a reservation to use when mapping pages in the range.
362 */
363typedef reservation_t (*vspace_reserve_range_aligned_fn)(vspace_t *vspace, size_t bytes, size_t size_bits,
364                                                         seL4_CapRights_t rights, int cacheable, void **vaddr);
365
366/**
367 * Reserve a range to map memory in to later at a specific address.
368 * Regions will be aligned to 4K boundaries.
369 *
370 * @param vspace the virtual memory allocator to use.
371 * @param vaddr the virtual address to start the range at.
372 * @param bytes the size in bytes to map.
373 * @param rights the rights to map the pages in with in this reservation
374 * @param cacheable 1 if the pages should be mapped with cacheable attributes. 0 for DMA.
375 *
376 * @return a reservation to use when mapping pages in the range.
377 */
378typedef reservation_t (*vspace_reserve_range_at_fn)(vspace_t *vspace, void *vaddr,
379                                                    size_t bytes, seL4_CapRights_t rights, int cacheable);
380
381/**
382 * Reserve a range to map memory in to later at a specific address.
383 * The rights of the memory within the range are deferred to when performing the mapping.
384 * Regions will be aligned to 4K boundaries.
385 *
386 * @param vspace the virtual memory allocator to use.
387 * @param vaddr the virtual address to start the range at.
388 * @param bytes the size in bytes to map.
389 * @param rights the rights to map the pages in with in this reservation
390 * @param cacheable 1 if the pages should be mapped with cacheable attributes. 0 for DMA.
391 *
392 * @return a reservation to use when mapping pages in the range.
393 */
394typedef reservation_t (*vspace_reserve_deferred_rights_range_at_fn)(vspace_t *vspace, void *vaddr,
395                                                                    size_t bytes, int cacheable);
396
397/**
398 * Free a reservation.
399 *
400 * This will not touch any pages, but will unreserve any reserved addresses in the reservation.
401 *
402 * @param vspace the virtual memory allocator to use.
403 * @param reservation the reservation to free.
404 */
405typedef void (*vspace_free_reservation_fn)(vspace_t *vspace, reservation_t reservation);
406
407/**
408 * Free a reservation by vaddr.
409 *
410 * This will not touch any pages, but will unreserve any reserved addresses in the reservation.
411 *
412 * @param vspace the virtual memory allocator to use.
413 * @param vaddr a vaddr in the reservation (will free entire reservation).
414 */
415typedef void (*vspace_free_reservation_by_vaddr_fn)(vspace_t *vspace, void *vaddr);
416
417/**
418 * Get the capability mapped at a virtual address.
419 *
420 *
421 * @param vspace the virtual memory allocator to use.
422 * @param vaddr the virtual address to get the cap for.
423 *
424 * @return the cap mapped to this virtual address, 0 otherwise.
425 */
426typedef seL4_CPtr(*vspace_get_cap_fn)(vspace_t *vspace, void *vaddr);
427
428/**
429 * Get the vka allocation cookie for an the frame mapped at a virtual address.
430 *
431 * @param vspace the virtual memory allocator to use.
432 * @param vaddr the virtual address to get the cap for.
433 *
434 * @return the allocation cookie mapped to this virtual address, 0 otherwise.
435 */
436typedef uintptr_t (*vspace_get_cookie_fn)(vspace_t *vspace, void *vaddr);
437
438/**
439 * Function that the vspace allocator will call if it allocates any memory.
440 * This allows the user to clean up the allocation at a later time.
441 *
442 * If this function is null, it will not be called.
443 *
444 * @param vspace the virtual memory space allocator to use.
445 * @param allocated_object_cookie A cookie provided by the user when the vspace allocator is
446 *                                initialised --> vspace->allocated_object_cookie/
447 * @param object the object that was allocated.
448 */
449typedef void (*vspace_allocated_object_fn)(void *allocated_object_cookie, vka_object_t object);
450
451/* @return the page directory for this vspace
452 */
453typedef seL4_CPtr(*vspace_get_root_fn)(vspace_t *vspace);
454
455/**
456 * Share memory from one vspace to another at a specific address. Pages are expected
457 * to already be mapped in the from vspace, or an error will be returned.
458 *
459 * @param from        vspace to share memory from
460 * @param to          vspace to share memory to
461 * @param start       address to start sharing at
462 * @param num_pages   number of pages to share
463 * @param size_bits   size of pages in bits
464 * @param vaddr       vaddr to start sharing memory at
465 * @param reservation reservation for that vaddr.
466 *
467 * @return 0 on success
468 */
469typedef int (*vspace_share_mem_at_vaddr_fn)(vspace_t *from, vspace_t *to, void *start, int num_pages, size_t size_bits,
470                                            void *vaddr, reservation_t res);
471
472/* Portable virtual memory allocation interface */
473struct vspace {
474    void *data;
475
476    vspace_new_pages_fn new_pages;
477    vspace_map_pages_fn map_pages;
478
479    vspace_new_pages_at_vaddr_fn new_pages_at_vaddr;
480
481    vspace_map_pages_at_vaddr_fn map_pages_at_vaddr;
482    vspace_deferred_rights_map_pages_at_vaddr_fn deferred_rights_map_pages_at_vaddr;
483
484    vspace_unmap_pages_fn unmap_pages;
485    vspace_tear_down_fn tear_down;
486
487    vspace_reserve_range_aligned_fn reserve_range_aligned;
488    vspace_reserve_range_at_fn reserve_range_at;
489    vspace_reserve_deferred_rights_range_at_fn reserve_deferred_rights_range_at;
490    vspace_free_reservation_fn free_reservation;
491    vspace_free_reservation_by_vaddr_fn free_reservation_by_vaddr;
492
493    vspace_get_cap_fn get_cap;
494    vspace_get_root_fn get_root;
495    vspace_get_cookie_fn get_cookie;
496
497    vspace_share_mem_at_vaddr_fn share_mem_at_vaddr;
498
499    vspace_allocated_object_fn allocated_object;
500    void *allocated_object_cookie;
501};
502
503/* convenient wrappers */
504
505/**
506 * Create a virtually contiguous area of mapped pages.
507 * This could be for shared memory or just allocating some pages.
508 *
509 * @param vspace the virtual memory allocator used.
510 * @param rights the rights to map the pages in with
511 * @param num_pages the number of pages to allocate and map.
512 * @param size_bits size of the pages to allocate and map, in bits.
513 *
514 * @return vaddr at the start of the contiguous region
515 *         NULL on failure.
516 */
517static inline void *vspace_new_pages(vspace_t *vspace, seL4_CapRights_t rights,
518                                     size_t num_pages, size_t size_bits)
519{
520    if (vspace == NULL) {
521        ZF_LOGE("vspace is NULL.");
522        return NULL;
523    }
524    if (vspace->new_pages == NULL) {
525        ZF_LOGE("Supplied vspace doesn't implement new_pages().");
526        return NULL;
527    }
528    if (num_pages == 0) {
529        ZF_LOGE("Called with num_pages == 0. Intentional?");
530        return NULL;
531    }
532    if (!sel4_valid_size_bits(size_bits)) {
533        ZF_LOGE("Invalid size_bits %zu", size_bits);
534        return NULL;
535    }
536
537    return vspace->new_pages(vspace, rights, num_pages, size_bits);
538}
539
540/**
541 * Map in existing page capabilities, using contiguos virtual memory.
542 *
543 * @param vspace the virtual memory allocator used.
544 * @param seL4_CPtr caps array of caps to map in
545 * @param uint32_t cookies array of allocation cookies. Populate this if you want the vspace to
546 *                         be able to free the caps for you with a vka. NULL acceptable.
547 * @param rights the rights to map the pages in with
548 * @param size_bits size, in bits, of an individual page -- all pages must be the same size.
549 * @param num_pages the number of pages to map in (must correspond to the size of the array).
550 * @param cacheable 1 if the pages should be mapped with cacheable attributes. 0 for DMA.
551 *
552 * @return vaddr at the start of the device mapping
553 *         NULL on failure.
554 */
555static inline void *vspace_map_pages(vspace_t *vspace, seL4_CPtr caps[], uintptr_t cookies[],
556                                     seL4_CapRights_t rights, size_t num_pages, size_t size_bits,
557                                     int cacheable)
558{
559    if (vspace == NULL) {
560        ZF_LOGE("vspace is NULL.");
561        return NULL;
562    }
563    if (vspace->new_pages == NULL) {
564        ZF_LOGE("Supplied vspace doesn't implement map_pages().");
565        return NULL;
566    }
567    if (num_pages == 0) {
568        ZF_LOGE("Called with num_pages == 0. Intentional?");
569        return NULL;
570    }
571    if (!sel4_valid_size_bits(size_bits)) {
572        ZF_LOGE("Invalid size_bits %zu", size_bits);
573        return NULL;
574    }
575
576    return vspace->map_pages(vspace, caps, cookies, rights,
577                             num_pages, size_bits, cacheable);
578}
579
580static inline int vspace_new_pages_at_vaddr_with_config(vspace_t *vspace, vspace_new_pages_config_t *config,
581                                                        reservation_t res)
582{
583    if (vspace == NULL) {
584        ZF_LOGE("vspace is NULL");
585        return -1;
586    }
587    if (res.res == NULL) {
588        ZF_LOGE("reservation is required");
589    }
590    return vspace->new_pages_at_vaddr(vspace, config->vaddr, config->num_pages, config->size_bits, res,
591                                      config->can_use_dev);
592}
593
594static inline int vspace_new_pages_at_vaddr(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits,
595                                            reservation_t reservation)
596{
597    if (vspace == NULL) {
598        ZF_LOGE("vspace is NULL");
599        return -1;
600    }
601
602    if (vspace->new_pages_at_vaddr == NULL) {
603        ZF_LOGE("Unimplemented");
604        return -1;
605    }
606    vspace_new_pages_config_t config;
607    if (default_vspace_new_pages_config(num_pages, size_bits, &config)) {
608        ZF_LOGE("Failed to create config");
609        return -1;
610    }
611    if (vspace_new_pages_config_set_vaddr(vaddr, &config)) {
612        ZF_LOGE("Failed to set vaddr");
613        return -1;
614    }
615    return vspace_new_pages_at_vaddr_with_config(vspace, &config, reservation);
616}
617
618static inline int vspace_map_pages_at_vaddr(vspace_t *vspace, seL4_CPtr caps[], uintptr_t cookies[], void *vaddr,
619                                            size_t num_pages, size_t size_bits, reservation_t reservation)
620{
621    if (vspace == NULL) {
622        ZF_LOGE("vspace is NULL");
623        return -1;
624    }
625
626    if (num_pages == 0) {
627        ZF_LOGW("Attempt to map 0 pages");
628        return -1;
629    }
630
631    if (vaddr == NULL) {
632        ZF_LOGW("Mapping NULL");
633    }
634
635    if (vspace->map_pages_at_vaddr == NULL) {
636        ZF_LOGW("Unimplemented\n");
637        return -1;
638    }
639
640    return vspace->map_pages_at_vaddr(vspace, caps, cookies, vaddr, num_pages, size_bits, reservation);
641}
642
643static inline int vspace_deferred_rights_map_pages_at_vaddr(vspace_t *vspace, seL4_CPtr caps[], uintptr_t cookies[],
644                                                            void *vaddr, size_t num_pages, size_t size_bits,
645                                                            seL4_CapRights_t rights, reservation_t reservation)
646{
647    if (vspace == NULL) {
648        ZF_LOGE("vspace is NULL");
649        return -1;
650    }
651
652    if (num_pages == 0) {
653        ZF_LOGW("Attempt to map 0 pages");
654        return -1;
655    }
656
657    if (vaddr == NULL) {
658        ZF_LOGW("Mapping NULL");
659    }
660
661    if (vspace->deferred_rights_map_pages_at_vaddr == NULL) {
662        ZF_LOGW("Unimplemented\n");
663        return -1;
664    }
665
666    return vspace->deferred_rights_map_pages_at_vaddr(vspace, caps, cookies, vaddr, num_pages,
667                                                      size_bits, rights, reservation);
668}
669
670static inline void vspace_unmap_pages(vspace_t *vspace, void *vaddr, size_t num_pages, size_t size_bits, vka_t *vka)
671{
672
673    if (vspace == NULL) {
674        ZF_LOGE("vspace is NULL");
675        return;
676    }
677
678    if (num_pages == 0) {
679        printf("Num pages : %zu\n", num_pages);
680        ZF_LOGW("Attempt to unmap 0 pages");
681        return;
682    }
683
684    if (vaddr == NULL) {
685        ZF_LOGW("Attempt to unmap NULL\n");
686    }
687
688    if (vspace->unmap_pages == NULL) {
689        ZF_LOGE("Not implemented\n");
690        return;
691    }
692
693    vspace->unmap_pages(vspace, vaddr, num_pages, size_bits, vka);
694}
695
696static inline void vspace_tear_down(vspace_t *vspace, vka_t *vka)
697{
698    if (vspace == NULL) {
699        ZF_LOGE("vspace is NULL");
700        return;
701    }
702
703    if (vspace->tear_down == NULL) {
704        ZF_LOGE("Not implemented");
705        return;
706    }
707    vspace->tear_down(vspace, vka);
708}
709
710static inline reservation_t vspace_reserve_range_aligned(vspace_t *vspace, size_t bytes, size_t size_bits,
711                                                         seL4_CapRights_t rights, int cacheable, void **vaddr)
712{
713    reservation_t error = { .res = 0 };
714
715    if (vspace == NULL) {
716        ZF_LOGE("vspace is NULL");
717        return error;
718    }
719
720    if (vspace->reserve_range_aligned == NULL) {
721        ZF_LOGE("Not implemented");
722        return error;
723    }
724
725    if (bytes == 0) {
726        ZF_LOGE("Attempt to reserve 0 length range");
727        return error;
728    }
729
730    if (vaddr == NULL) {
731        ZF_LOGE("Cannot store result at NULL");
732        return error;
733    }
734
735    return vspace->reserve_range_aligned(vspace, bytes, size_bits, rights, cacheable, vaddr);
736}
737
738static inline reservation_t vspace_reserve_range_at(vspace_t *vspace, void *vaddr,
739                                                    size_t bytes, seL4_CapRights_t rights, int cacheable)
740{
741    reservation_t error = { .res = 0 };
742
743    if (vspace == NULL) {
744        ZF_LOGE("vspace is NULL");
745        return error;
746    }
747
748    if (vspace->reserve_range_at == NULL) {
749        ZF_LOGE("Not implemented");
750        return error;
751    }
752
753    if (bytes == 0) {
754        ZF_LOGE("Attempt to reserve 0 length range");
755        return error;
756    }
757
758    return vspace->reserve_range_at(vspace, vaddr, bytes, rights, cacheable);
759}
760
761static inline reservation_t vspace_reserve_deferred_rights_range_at(vspace_t *vspace, void *vaddr,
762                                                                    size_t bytes, int cacheable)
763{
764    reservation_t error = { .res = 0 };
765
766    if (vspace == NULL) {
767        ZF_LOGE("vspace is NULL");
768        return error;
769    }
770
771    if (vspace->reserve_deferred_rights_range_at == NULL) {
772        ZF_LOGE("Not implemented");
773        return error;
774    }
775
776    if (bytes == 0) {
777        ZF_LOGE("Attempt to reserve 0 length range");
778        return error;
779    }
780    return vspace->reserve_deferred_rights_range_at(vspace, vaddr, bytes, cacheable);
781}
782
783static inline void vspace_free_reservation(vspace_t *vspace, reservation_t reservation)
784{
785    if (vspace == NULL) {
786        ZF_LOGE("vspace is NULL");
787        return;
788    }
789
790    if (vspace->free_reservation == NULL) {
791        ZF_LOGE("Not implemented");
792        return;
793    }
794
795    vspace->free_reservation(vspace, reservation);
796}
797
798static inline void vspace_free_reservation_by_vaddr(vspace_t *vspace, void *vaddr)
799{
800    if (vspace == NULL) {
801        ZF_LOGE("vspace is NULL");
802        return;
803    }
804
805    if (vspace->free_reservation_by_vaddr == NULL) {
806        ZF_LOGE("Not implemented");
807        return;
808    }
809
810    vspace->free_reservation_by_vaddr(vspace, vaddr);
811}
812
813static inline seL4_CPtr vspace_get_cap(vspace_t *vspace, void *vaddr)
814{
815
816    if (vspace == NULL) {
817        ZF_LOGE("vspace is NULL");
818        return seL4_CapNull;
819    }
820
821    if (vaddr == NULL) {
822        ZF_LOGW("Warning: null address");
823    }
824
825    if (vspace->get_cap == NULL) {
826        ZF_LOGE("Not implemented\n");
827        return seL4_CapNull;
828    }
829
830    return vspace->get_cap(vspace, vaddr);
831}
832
833static inline uintptr_t vspace_get_cookie(vspace_t *vspace, void *vaddr)
834{
835    if (vspace == NULL) {
836        ZF_LOGE("vspace is NULL");
837        return 0;
838    }
839
840    if (vaddr == NULL) {
841        /* only warn as someone might do this intentionally? */
842        ZF_LOGW("Warning: null address");
843    }
844
845    if (vspace->get_cookie == NULL) {
846        ZF_LOGE("Not implemented");
847        return 0;
848    }
849
850    return vspace->get_cookie(vspace, vaddr);
851}
852
853/* Helper functions */
854
855static inline void vspace_maybe_call_allocated_object(vspace_t *vspace, vka_object_t object)
856{
857    if (vspace == NULL) {
858        ZF_LOGF("vspace is NULL");
859    }
860
861    if (vspace->allocated_object != NULL) {
862        vspace->allocated_object(vspace->allocated_object_cookie, object);
863    }
864}
865
866static inline seL4_CPtr vspace_get_root(vspace_t *vspace)
867{
868    if (vspace == NULL) {
869        ZF_LOGE("vspace is NULL");
870        return seL4_CapNull;
871    }
872    if (vspace->get_root == NULL) {
873        ZF_LOGE("Not implemented");
874        return seL4_CapNull;
875    }
876    return vspace->get_root(vspace);
877}
878
879static inline int vspace_share_mem_at_vaddr(vspace_t *from, vspace_t *to, void *start, int num_pages,
880                                            size_t size_bits, void *vaddr, reservation_t res)
881{
882
883    if (num_pages == 0) {
884        /* nothing to do */
885        return -1;
886    } else if (num_pages < 0) {
887        ZF_LOGE("Attempted to share %d pages\n", num_pages);
888        return -1;
889    }
890
891    if (from == NULL) {
892        ZF_LOGE("From vspace does not exist");
893        return -1;
894    }
895
896    if (to == NULL) {
897        ZF_LOGE("To vspace does not exist");
898        return -1;
899    }
900
901    if (vaddr == NULL) {
902        ZF_LOGE("Cannot share memory at NULL");
903        return -1;
904    }
905
906    if (from->share_mem_at_vaddr == NULL) {
907        ZF_LOGE("Not implemented for this vspace\n");
908        return -1;
909    }
910
911    return from->share_mem_at_vaddr(from, to, start, num_pages, size_bits, vaddr, res);
912}
913
914