1/*
2 * Copyright 2019, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6
7#include <string.h>
8#include <stdio.h>
9#include <stdlib.h>
10
11#include <sel4/sel4.h>
12
13#include <sel4vm/guest_vm.h>
14#include <sel4vm/guest_ram.h>
15#include <sel4vm/guest_memory.h>
16
17#include "guest_memory.h"
18
19struct guest_mem_touch_params {
20    void *data;
21    size_t size;
22    size_t offset;
23    uintptr_t current_addr;
24    vm_t *vm;
25    ram_touch_callback_fn touch_fn;
26};
27
28static int push_guest_ram_region(vm_mem_t *guest_memory, uintptr_t start, size_t size, int allocated)
29{
30    int last_region = guest_memory->num_ram_regions;
31    if (size == 0) {
32        return -1;
33    }
34    vm_ram_region_t *extended_regions = realloc(guest_memory->ram_regions, sizeof(vm_ram_region_t) * (last_region + 1));
35    if (extended_regions == NULL) {
36        return -1;
37    }
38    guest_memory->ram_regions = extended_regions;
39
40    guest_memory->ram_regions[last_region].start = start;
41    guest_memory->ram_regions[last_region].size = size;
42    guest_memory->ram_regions[last_region].allocated = allocated;
43    guest_memory->num_ram_regions++;
44    return 0;
45}
46
47static int ram_region_cmp(const void *a, const void *b)
48{
49    const vm_ram_region_t *aa = a;
50    const vm_ram_region_t *bb = b;
51    return aa->start - bb->start;
52}
53
54static void sort_guest_ram_regions(vm_mem_t *guest_memory)
55{
56    qsort(guest_memory->ram_regions, guest_memory->num_ram_regions, sizeof(vm_ram_region_t), ram_region_cmp);
57}
58
59static void guest_ram_remove_region(vm_mem_t *guest_memory, int region)
60{
61    if (region >= guest_memory->num_ram_regions) {
62        return;
63    }
64    guest_memory->num_ram_regions--;
65    memmove(&guest_memory->ram_regions[region], &guest_memory->ram_regions[region + 1],
66            sizeof(vm_ram_region_t) * (guest_memory->num_ram_regions - region));
67    /* realloc it smaller */
68    guest_memory->ram_regions = realloc(guest_memory->ram_regions, sizeof(vm_ram_region_t) * guest_memory->num_ram_regions);
69}
70
71static void collapse_guest_ram_regions(vm_mem_t *guest_memory)
72{
73    int i;
74    for (i = 1; i < guest_memory->num_ram_regions;) {
75        /* Only collapse regions with the same allocation flag that are contiguous */
76        if (guest_memory->ram_regions[i - 1].allocated == guest_memory->ram_regions[i].allocated &&
77            guest_memory->ram_regions[i - 1].start + guest_memory->ram_regions[i - 1].size == guest_memory->ram_regions[i].start) {
78
79            guest_memory->ram_regions[i - 1].size += guest_memory->ram_regions[i].size;
80            guest_ram_remove_region(guest_memory, i);
81        } else {
82            /* We are satisified that this entry cannot be merged. So now we
83             * move onto the next one */
84            i++;
85        }
86    }
87}
88
89static int expand_guest_ram_region(vm_t *vm, uintptr_t start, size_t bytes)
90{
91    int err;
92    vm_mem_t *guest_memory = &vm->mem;
93    /* blindly put a new region at the end */
94    err = push_guest_ram_region(guest_memory, start, bytes, 0);
95    if (err) {
96        ZF_LOGE("Failed to expand guest ram region");
97        return err;
98    }
99    /* sort the region we just added */
100    sort_guest_ram_regions(guest_memory);
101    /* collapse any contiguous regions */
102    collapse_guest_ram_regions(guest_memory);
103    return 0;
104}
105
106static bool is_ram_region(vm_t *vm, uintptr_t addr, size_t size)
107{
108    vm_mem_t *guest_memory = &vm->mem;
109    for (int i = 0; i < guest_memory->num_ram_regions; i++) {
110        if (guest_memory->ram_regions[i].start <= addr &&
111            guest_memory->ram_regions[i].start + guest_memory->ram_regions[i].size >= addr + size) {
112            /* We are within a ram region*/
113            return true;
114        }
115    }
116    return false;
117}
118
119static memory_fault_result_t default_ram_fault_callback(vm_t *vm, vm_vcpu_t *vcpu, uintptr_t fault_addr,
120                                                        size_t fault_length, void *cookie)
121{
122    /* We don't handle RAM faults by default unless the callback is specifically overrided, hence we fail here */
123    ZF_LOGE("ERROR: UNHANDLED RAM FAULT");
124    return FAULT_ERROR;
125}
126
127/* Helpers for use with touch below */
128int vm_guest_ram_read_callback(vm_t *vm, uintptr_t addr, void *vaddr, size_t size, size_t offset, void *buf)
129{
130    memcpy(buf, vaddr, size);
131    return 0;
132}
133
134int vm_guest_ram_write_callback(vm_t *vm, uintptr_t addr, void *vaddr, size_t size, size_t offset, void *buf)
135{
136    memcpy(vaddr, buf, size);
137    return 0;
138}
139
140static int touch_access_callback(void *access_addr, void *vaddr, void *cookie)
141{
142    struct guest_mem_touch_params *guest_touch = (struct guest_mem_touch_params *)cookie;
143    uintptr_t vmm_addr = (uintptr_t)vaddr;
144    uintptr_t vm_addr = (uintptr_t)access_addr;
145    return guest_touch->touch_fn(guest_touch->vm, vm_addr,
146                                 (void *)(vmm_addr + (guest_touch->current_addr - vm_addr)),
147                                 guest_touch->size, guest_touch->offset, guest_touch->data);
148}
149
150int vm_ram_touch(vm_t *vm, uintptr_t addr, size_t size, ram_touch_callback_fn touch_callback, void *cookie)
151{
152    struct guest_mem_touch_params access_cookie;
153    uintptr_t current_addr;
154    uintptr_t next_addr;
155    uintptr_t end_addr = (uintptr_t)(addr + size);
156    if (!is_ram_region(vm, addr, size)) {
157        ZF_LOGE("Failed to touch ram region: Not registered RAM region");
158        return -1;
159    }
160    access_cookie.touch_fn = touch_callback;
161    access_cookie.data = cookie;
162    access_cookie.vm = vm;
163    for (current_addr = addr; current_addr < end_addr; current_addr = next_addr) {
164        uintptr_t current_aligned = PAGE_ALIGN_4K(current_addr);
165        uintptr_t next_page_start = current_aligned + PAGE_SIZE_4K;
166        next_addr = MIN(end_addr, next_page_start);
167        access_cookie.size = next_addr - current_addr;
168        access_cookie.offset = current_addr - addr;
169        access_cookie.current_addr = current_addr;
170        int result = vspace_access_page_with_callback(&vm->mem.vm_vspace, &vm->mem.vmm_vspace, (void *)current_aligned,
171                                                      seL4_PageBits, seL4_AllRights, 1, touch_access_callback, &access_cookie);
172        if (result) {
173            return result;
174        }
175    }
176    return 0;
177}
178
179int vm_ram_find_largest_free_region(vm_t *vm, uintptr_t *addr, size_t *size)
180{
181    vm_mem_t *guest_memory = &vm->mem;
182    int largest = -1;
183    int i;
184    /* find a first region */
185    for (i = 0; i < guest_memory->num_ram_regions && largest == -1; i++) {
186        if (!guest_memory->ram_regions[i].allocated) {
187            largest = i;
188        }
189    }
190    if (largest == -1) {
191        ZF_LOGE("Failed to find free region");
192        return -1;
193    }
194    for (i++; i < guest_memory->num_ram_regions; i++) {
195        if (!guest_memory->ram_regions[i].allocated &&
196            guest_memory->ram_regions[i].size > guest_memory->ram_regions[largest].size) {
197            largest = i;
198        }
199    }
200    *addr = guest_memory->ram_regions[largest].start;
201    *size = guest_memory->ram_regions[largest].size;
202    return 0;
203}
204
205void vm_ram_mark_allocated(vm_t *vm, uintptr_t start, size_t bytes)
206{
207    vm_mem_t *guest_memory = &vm->mem;
208    /* Find the region */
209    int i;
210    int region = -1;
211    for (i = 0; i < guest_memory->num_ram_regions; i++) {
212        if (guest_memory->ram_regions[i].start <= start &&
213            guest_memory->ram_regions[i].start + guest_memory->ram_regions[i].size >= start + bytes) {
214            region = i;
215            break;
216        }
217    }
218    if (region == -1 || guest_memory->ram_regions[region].allocated) {
219        return;
220    }
221    /* Remove the region */
222    vm_ram_region_t r = guest_memory->ram_regions[region];
223    guest_ram_remove_region(guest_memory, region);
224    /* Split the region into three pieces and add them */
225    push_guest_ram_region(guest_memory, r.start, start - r.start, 0);
226    push_guest_ram_region(guest_memory, start, bytes, 1);
227    push_guest_ram_region(guest_memory, start + bytes, r.size - bytes - (start - r.start), 0);
228    /* sort and collapse */
229    sort_guest_ram_regions(guest_memory);
230    collapse_guest_ram_regions(guest_memory);
231}
232
233uintptr_t vm_ram_allocate(vm_t *vm, size_t bytes)
234{
235    vm_mem_t *guest_memory = &vm->mem;
236    for (int i = 0; i < guest_memory->num_ram_regions; i++) {
237        if (!guest_memory->ram_regions[i].allocated && guest_memory->ram_regions[i].size >= bytes) {
238            uintptr_t addr = guest_memory->ram_regions[i].start;
239            vm_ram_mark_allocated(vm, addr, bytes);
240            return addr;
241        }
242    }
243    ZF_LOGE("Failed to allocate %zu bytes of guest RAM", bytes);
244    return 0;
245}
246
247static vm_frame_t ram_alloc_iterator(uintptr_t addr, void *cookie)
248{
249    int ret;
250    vka_object_t object;
251    vm_frame_t frame_result = { seL4_CapNull, seL4_NoRights, 0, 0 };
252    vm_t *vm = (vm_t *)cookie;
253    if (!vm) {
254        return frame_result;
255    }
256    int page_size = seL4_PageBits;
257    uintptr_t frame_start = ROUND_DOWN(addr, BIT(page_size));
258    ret = vka_alloc_frame_maybe_device(vm->vka, page_size, true, &object);
259    if (ret) {
260        ZF_LOGE("Failed to allocate frame for address 0x%x", addr);
261        return frame_result;
262    }
263    frame_result.cptr = object.cptr;
264    frame_result.rights = seL4_AllRights;
265    frame_result.vaddr = frame_start;
266    frame_result.size_bits = page_size;
267    return frame_result;
268}
269
270static vm_frame_t ram_ut_alloc_iterator(uintptr_t addr, void *cookie)
271{
272    int ret;
273    int error;
274    vka_object_t object;
275    vm_frame_t frame_result = { seL4_CapNull, seL4_NoRights, 0, 0 };
276    vm_t *vm = (vm_t *)cookie;
277    if (!vm) {
278        return frame_result;
279    }
280    int page_size = seL4_PageBits;
281    uintptr_t frame_start = ROUND_DOWN(addr, BIT(page_size));
282    cspacepath_t path;
283    error = vka_cspace_alloc_path(vm->vka, &path);
284    if (error) {
285        ZF_LOGE("Failed to allocate path");
286        return frame_result;
287    }
288    seL4_Word vka_cookie;
289    error = vka_utspace_alloc_at(vm->vka, &path, kobject_get_type(KOBJECT_FRAME, page_size), page_size, frame_start,
290                                 &vka_cookie);
291    if (error) {
292        ZF_LOGE("Failed to allocate page");
293        vka_cspace_free_path(vm->vka, path);
294        return frame_result;
295    }
296    frame_result.cptr = path.capPtr;
297    frame_result.rights = seL4_AllRights;
298    frame_result.vaddr = frame_start;
299    frame_result.size_bits = page_size;
300    return frame_result;
301}
302
303static int map_ram_reservation(vm_t *vm, vm_memory_reservation_t *ram_reservation, bool untyped)
304{
305    int err;
306    /* We map the reservation immediately, by-passing the deferred mapping functionality
307     * This allows us the allocate, touch and manipulate VM RAM prior to the region needing to be
308     * faulted upon first */
309    if (untyped) {
310        err = map_vm_memory_reservation(vm, ram_reservation, ram_ut_alloc_iterator, (void *)vm);
311    } else {
312        err = map_vm_memory_reservation(vm, ram_reservation, ram_alloc_iterator, (void *)vm);
313    }
314    if (err) {
315        ZF_LOGE("Failed to map new ram reservation");
316        return -1;
317    }
318    return 0;
319}
320
321uintptr_t vm_ram_register(vm_t *vm, size_t bytes)
322{
323    vm_memory_reservation_t *ram_reservation;
324    int err;
325    uintptr_t base_addr;
326
327    ram_reservation = vm_reserve_anon_memory(vm, bytes, default_ram_fault_callback, NULL, &base_addr);
328    if (!ram_reservation) {
329        ZF_LOGE("Unable to reserve ram region of size 0x%x", bytes);
330        return 0;
331    }
332    err = map_ram_reservation(vm, ram_reservation, false);
333    if (err) {
334        vm_free_reserved_memory(vm, ram_reservation);
335        return 0;
336    }
337    err = expand_guest_ram_region(vm, base_addr, bytes);
338    if (err) {
339        ZF_LOGE("Failed to register new ram region");
340        vm_free_reserved_memory(vm, ram_reservation);
341        return 0;
342    }
343
344    return base_addr;
345}
346
347int vm_ram_register_at(vm_t *vm, uintptr_t start, size_t bytes, bool untyped)
348{
349    vm_memory_reservation_t *ram_reservation;
350    int err;
351
352    ram_reservation = vm_reserve_memory_at(vm, start, bytes, default_ram_fault_callback,
353                                           NULL);
354    if (!ram_reservation) {
355        ZF_LOGE("Unable to reserve ram region at addr 0x%x of size 0x%x", start, bytes);
356        return 0;
357    }
358    err = map_ram_reservation(vm, ram_reservation, untyped);
359    if (err) {
360        vm_free_reserved_memory(vm, ram_reservation);
361        return 0;
362    }
363    err = expand_guest_ram_region(vm, start, bytes);
364    if (err) {
365        ZF_LOGE("Failed to register new ram region");
366        vm_free_reserved_memory(vm, ram_reservation);
367        return 0;
368    }
369    return 0;
370}
371
372int vm_ram_register_at_custom_iterator(vm_t *vm, uintptr_t start, size_t bytes, memory_map_iterator_fn map_iterator,
373                                       void *cookie)
374{
375    vm_memory_reservation_t *ram_reservation;
376    int err;
377
378    ram_reservation = vm_reserve_memory_at(vm, start, bytes, default_ram_fault_callback,
379                                           NULL);
380    if (!ram_reservation) {
381        ZF_LOGE("Unable to reserve ram region at addr 0x%x of size 0x%x", start, bytes);
382        return -1;
383    }
384    err = map_vm_memory_reservation(vm, ram_reservation, map_iterator, cookie);
385    if (err) {
386        ZF_LOGE("failed to map vm memory reservation to dataport\n");
387        return -1;
388    }
389    err = expand_guest_ram_region(vm, start, bytes);
390    if (err) {
391        ZF_LOGE("Failed to register new ram region");
392        vm_free_reserved_memory(vm, ram_reservation);
393        return -1;
394    }
395    return 0;
396}
397
398void vm_ram_free(vm_t *vm, uintptr_t start, size_t bytes)
399{
400    return;
401}
402