1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12
13#include <autoconf.h>
14#include <sel4utils/gen_config.h>
15
16#include <sel4utils/page_dma.h>
17#include <vspace/vspace.h>
18#include <stdlib.h>
19#include <vka/capops.h>
20#include <vka/kobject_t.h>
21#include <utils/util.h>
22#include <string.h>
23#include <sel4utils/arch/cache.h>
24
25typedef struct dma_man {
26    vka_t vka;
27    vspace_t vspace;
28} dma_man_t;
29
30typedef struct dma_alloc {
31    void *base;
32    vka_object_t ut;
33    uintptr_t paddr;
34} dma_alloc_t;
35
36static void dma_free(void *cookie, void *addr, size_t size)
37{
38    dma_man_t *dma = cookie;
39    dma_alloc_t *alloc = (dma_alloc_t *)vspace_get_cookie(&dma->vspace, addr);
40    assert(alloc);
41    assert(alloc->base == addr);
42    int num_pages = BIT(alloc->ut.size_bits) / PAGE_SIZE_4K;
43    for (int i = 0; i < num_pages; i++) {
44        cspacepath_t path;
45        seL4_CPtr frame = vspace_get_cap(&dma->vspace, addr + i * PAGE_SIZE_4K);
46        vspace_unmap_pages(&dma->vspace, addr + i * PAGE_SIZE_4K, 1, PAGE_BITS_4K, NULL);
47        vka_cspace_make_path(&dma->vka, frame, &path);
48        vka_cnode_delete(&path);
49        vka_cspace_free(&dma->vka, frame);
50    }
51    vka_free_object(&dma->vka, &alloc->ut);
52    free(alloc);
53}
54
55static uintptr_t dma_pin(void *cookie, void *addr, size_t size)
56{
57    dma_man_t *dma = cookie;
58    dma_alloc_t *alloc = (dma_alloc_t *)vspace_get_cookie(&dma->vspace, addr);
59    if (!alloc) {
60        return 0;
61    }
62    uint32_t diff = addr - alloc->base;
63    return alloc->paddr + diff;
64}
65
66static void *dma_alloc(void *cookie, size_t size, int align, int cached, ps_mem_flags_t flags)
67{
68    dma_man_t *dma = cookie;
69    cspacepath_t *frames = NULL;
70    reservation_t res = {NULL};
71    dma_alloc_t *alloc = NULL;
72    unsigned int num_frames = 0;
73    void *base = NULL;
74    /* We align to the 4K boundary, but do not support more */
75    if (align > PAGE_SIZE_4K) {
76        return NULL;
77    }
78    /* Round up to the next page size */
79    size = ROUND_UP(size, PAGE_SIZE_4K);
80    /* Then round up to the next power of 2 size. This is because untypeds are allocated
81     * in powers of 2 */
82    size_t size_bits = LOG_BASE_2(size);
83    if (BIT(size_bits) != size) {
84        size_bits++;
85    }
86    size = BIT(size_bits);
87    /* Allocate an untyped */
88    vka_object_t ut;
89    int error = vka_alloc_untyped(&dma->vka, size_bits, &ut);
90    if (error) {
91        ZF_LOGE("Failed to allocate untyped of size %zu", size_bits);
92        return NULL;
93    }
94    /* Get the physical address */
95    uintptr_t paddr = vka_utspace_paddr(&dma->vka, ut.ut, seL4_UntypedObject, size_bits);
96    if (paddr == VKA_NO_PADDR) {
97        ZF_LOGE("Allocated untyped has no physical address");
98        goto handle_error;
99    }
100    /* Allocate all the frames */
101    num_frames = size / PAGE_SIZE_4K;
102    frames = calloc(num_frames, sizeof(cspacepath_t));
103    if (!frames) {
104        goto handle_error;
105    }
106    for (unsigned i = 0; i < num_frames; i++) {
107        error = vka_cspace_alloc_path(&dma->vka, &frames[i]);
108        if (error) {
109            goto handle_error;
110        }
111        error = seL4_Untyped_Retype(ut.cptr, kobject_get_type(KOBJECT_FRAME, PAGE_BITS_4K), size_bits, frames[i].root,
112                                    frames[i].dest, frames[i].destDepth, frames[i].offset, 1);
113        if (error != seL4_NoError) {
114            goto handle_error;
115        }
116    }
117    /* Grab a reservation */
118    res = vspace_reserve_range(&dma->vspace, size, seL4_AllRights, cached, &base);
119    if (!res.res) {
120        ZF_LOGE("Failed to reserve");
121        return NULL;
122    }
123    alloc = malloc(sizeof(*alloc));
124    if (alloc == NULL) {
125        goto handle_error;
126    }
127    alloc->base = base;
128    alloc->ut = ut;
129    alloc->paddr = paddr;
130    /* Map in all the pages */
131    for (unsigned i = 0; i < num_frames; i++) {
132        error = vspace_map_pages_at_vaddr(&dma->vspace, &frames[i].capPtr, (uintptr_t *)&alloc, base + i * PAGE_SIZE_4K, 1,
133                                          PAGE_BITS_4K, res);
134        if (error) {
135            goto handle_error;
136        }
137    }
138    /* no longer need the reservation */
139    vspace_free_reservation(&dma->vspace, res);
140    return base;
141handle_error:
142    if (alloc) {
143        free(alloc);
144    }
145    if (res.res) {
146        vspace_unmap_pages(&dma->vspace, base, num_frames, PAGE_BITS_4K, NULL);
147        vspace_free_reservation(&dma->vspace, res);
148    }
149    if (frames) {
150        for (int i = 0; i < num_frames; i++) {
151            if (frames[i].capPtr) {
152                vka_cnode_delete(&frames[i]);
153                vka_cspace_free(&dma->vka, frames[i].capPtr);
154            }
155        }
156        free(frames);
157    }
158    vka_free_object(&dma->vka, &ut);
159    return NULL;
160}
161
162static void dma_unpin(void *cookie, void *addr, size_t size)
163{
164}
165
166static void dma_cache_op(void *cookie, void *addr, size_t size, dma_cache_op_t op)
167{
168    dma_man_t *dma = cookie;
169    seL4_CPtr root = vspace_get_root(&dma->vspace);
170    uintptr_t end = (uintptr_t)addr + size;
171    uintptr_t cur = (uintptr_t)addr;
172    while (cur < end) {
173        uintptr_t top = ROUND_UP(cur + 1, PAGE_SIZE_4K);
174        if (top > end) {
175            top = end;
176        }
177        switch (op) {
178        case DMA_CACHE_OP_CLEAN:
179            seL4_ARCH_PageDirectory_Clean_Data(root, (seL4_Word)cur, (seL4_Word)top);
180            break;
181        case DMA_CACHE_OP_INVALIDATE:
182            seL4_ARCH_PageDirectory_Invalidate_Data(root, (seL4_Word)cur, (seL4_Word)top);
183            break;
184        case DMA_CACHE_OP_CLEAN_INVALIDATE:
185            seL4_ARCH_PageDirectory_CleanInvalidate_Data(root, (seL4_Word)cur, (seL4_Word)top);
186            break;
187        }
188        cur = top;
189    }
190}
191
192int sel4utils_new_page_dma_alloc(vka_t *vka, vspace_t *vspace, ps_dma_man_t *dma_man)
193{
194    dma_man_t *dma = calloc(1, sizeof(*dma));
195    if (!dma) {
196        return -1;
197    }
198    dma->vka = *vka;
199    dma->vspace = *vspace;
200    dma_man->cookie = dma;
201    dma_man->dma_alloc_fn = dma_alloc;
202    dma_man->dma_free_fn = dma_free;
203    dma_man->dma_pin_fn = dma_pin;
204    dma_man->dma_unpin_fn = dma_unpin;
205    dma_man->dma_cache_op_fn = dma_cache_op;
206    return 0;
207}
208