1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12
13#include <autoconf.h>
14#include <sel4utils/gen_config.h>
15
16#if defined(CONFIG_IOMMU)
17
18#include <sel4utils/iommu_dma.h>
19#include <sel4utils/vspace.h>
20#include <sel4utils/vspace_internal.h>
21#include <stdlib.h>
22#include <vka/capops.h>
23#include <string.h>
24#include <utils/zf_log.h>
25
26typedef struct dma_man {
27    vka_t vka;
28    vspace_t vspace;
29    int num_iospaces;
30    vspace_t *iospaces;
31    sel4utils_alloc_data_t *iospace_data;
32} dma_man_t;
33
34static void unmap_range(dma_man_t *dma, uintptr_t addr, size_t size)
35{
36    uintptr_t start = ROUND_DOWN(addr, PAGE_SIZE_4K);
37    uintptr_t end = addr + size;
38    for (uintptr_t addr = start; addr < end; addr += PAGE_SIZE_4K) {
39        for (int i = 0; i < dma->num_iospaces; i++) {
40            uintptr_t *cookie = (uintptr_t *)vspace_get_cookie(dma->iospaces + i, (void *)addr);
41            assert(cookie);
42            (*cookie)--;
43            if (*cookie == 0) {
44                seL4_CPtr page = vspace_get_cap(dma->iospaces + i, (void *)addr);
45                cspacepath_t page_path;
46                assert(page);
47                vspace_unmap_pages(dma->iospaces + i, (void *)addr, 1, seL4_PageBits, NULL);
48                vka_cspace_make_path(&dma->vka, page, &page_path);
49                vka_cnode_delete(&page_path);
50                vka_cspace_free(&dma->vka, page);
51                free(cookie);
52            }
53        }
54    }
55}
56
57int sel4utils_iommu_dma_alloc_iospace(void *cookie, void *vaddr, size_t size)
58{
59    dma_man_t *dma = (dma_man_t *)cookie;
60    int error;
61
62    /* for each page duplicate and map it into all the iospaces */
63    uintptr_t start = ROUND_DOWN((uintptr_t)vaddr, PAGE_SIZE_4K);
64    uintptr_t end = (uintptr_t)vaddr + size;
65    seL4_CPtr last_page = 0;
66    for (uintptr_t addr = start; addr < end; addr += PAGE_SIZE_4K) {
67        cspacepath_t page_path;
68        seL4_CPtr page = vspace_get_cap(&dma->vspace, (void *)addr);
69        if (!page) {
70            ZF_LOGE("Failed to retrieve frame cap for malloc region. "
71                    "Is your malloc backed by the correct vspace? "
72                    "If you allocated your own buffer, does the dma manager's vspace "
73                    "know about the caps to the frames that back the buffer?");
74            unmap_range(dma, start, addr + 1);
75            return -1;
76        }
77        if (page == last_page) {
78            ZF_LOGE("Found the same frame two pages in a row. We only support 4K mappings");
79            unmap_range(dma, start, addr + 1);
80            return -1;
81        }
82        last_page = page;
83        vka_cspace_make_path(&dma->vka, page, &page_path);
84        /* work out the size of this page */
85        for (int i = 0; i < dma->num_iospaces; i++) {
86            /* see if its already mapped */
87            uintptr_t *cookie = (uintptr_t *)vspace_get_cookie(dma->iospaces + i, (void *)addr);
88            if (cookie) {
89                /* increment the counter */
90                (*cookie)++;
91            } else {
92                cspacepath_t copy_path;
93                /* allocate slot for the cap */
94                error = vka_cspace_alloc_path(&dma->vka, &copy_path);
95                if (error) {
96                    ZF_LOGE("Failed to allocate slot");
97                    unmap_range(dma, start, addr + 1);
98                    return -1;
99                }
100                /* copy the cap */
101                error = vka_cnode_copy(&copy_path, &page_path, seL4_AllRights);
102                if (error) {
103                    ZF_LOGE("Failed to copy frame cap");
104                    vka_cspace_free(&dma->vka, copy_path.capPtr);
105                    unmap_range(dma, start, addr + 1);
106                    return -1;
107                }
108                /* now map it in */
109                reservation_t res = vspace_reserve_range_at(dma->iospaces + i, (void *)addr, PAGE_SIZE_4K, seL4_AllRights, 1);
110                if (!res.res) {
111                    ZF_LOGE("Failed to create a reservation");
112                    vka_cnode_delete(&copy_path);
113                    vka_cspace_free(&dma->vka, copy_path.capPtr);
114                    unmap_range(dma, start, addr + 1);
115                    return -1;
116                }
117                cookie = malloc(sizeof(*cookie));
118                if (!cookie) {
119                    ZF_LOGE("Failed to malloc %zu bytes", sizeof(*cookie));
120                    vspace_free_reservation(dma->iospaces + i, res);
121                    vka_cnode_delete(&copy_path);
122                    vka_cspace_free(&dma->vka, copy_path.capPtr);
123                    unmap_range(dma, start, addr + 1);
124                    return -1;
125                }
126                *cookie = 1;
127                error = vspace_map_pages_at_vaddr(dma->iospaces + i, &copy_path.capPtr, (uintptr_t *)&cookie, (void *)addr, 1,
128                                                  seL4_PageBits, res);
129                if (error) {
130                    ZF_LOGE("Failed to map frame into iospace");
131                    free(cookie);
132                    vspace_free_reservation(dma->iospaces + i, res);
133                    vka_cnode_delete(&copy_path);
134                    vka_cspace_free(&dma->vka, copy_path.capPtr);
135                    unmap_range(dma, start, addr + 1);
136                    return -1;
137                }
138                vspace_free_reservation(dma->iospaces + i, res);
139            }
140        }
141    }
142
143    return 0;
144}
145
146static void *dma_alloc(void *cookie, size_t size, int align, int cached, ps_mem_flags_t flags)
147{
148    int error;
149    if (cached || flags != PS_MEM_NORMAL) {
150        /* Going to ignore flags */
151        void *ret;
152        error = posix_memalign(&ret, align, size);
153        if (error) {
154            return NULL;
155        }
156        error = sel4utils_iommu_dma_alloc_iospace(cookie, ret, size);
157        if (error) {
158            free(ret);
159            return NULL;
160        }
161        return ret;
162    } else {
163        /* do not support uncached memory */
164        ZF_LOGE("Only support cached normal memory");
165        return NULL;
166    }
167}
168
169static void dma_free(void *cookie, void *addr, size_t size)
170{
171    dma_man_t *dma = cookie;
172    unmap_range(dma, (uintptr_t)addr, size);
173    free(addr);
174}
175
176static uintptr_t dma_pin(void *cookie, void *addr, size_t size)
177{
178    return (uintptr_t)addr;
179}
180
181static void dma_unpin(void *cookie, void *addr, size_t size)
182{
183}
184
185static void dma_cache_op(void *cookie, void *addr, size_t size, dma_cache_op_t op)
186{
187    /* I have no way of knowing what this function should do on an architecture
188     * that is both non cache coherent with respect to DMA, and has an IOMMU.
189     * When there is a working implementation of an arm IOMMU this function
190     * could be implemented */
191#ifdef CONFIG_ARCH_ARM
192    assert(!"not implemented");
193#endif
194}
195
196int sel4utils_make_iommu_dma_alloc(vka_t *vka, vspace_t *vspace, ps_dma_man_t *dma_man, unsigned int num_iospaces,
197                                   seL4_CPtr *iospaces)
198{
199    dma_man_t *dma = calloc(1, sizeof(*dma));
200    if (!dma) {
201        return -1;
202    }
203    dma->num_iospaces = num_iospaces;
204    dma->vka = *vka;
205    dma->vspace = *vspace;
206    dma_man->cookie = dma;
207    dma_man->dma_alloc_fn = dma_alloc;
208    dma_man->dma_free_fn = dma_free;
209    dma_man->dma_pin_fn = dma_pin;
210    dma_man->dma_unpin_fn = dma_unpin;
211    dma_man->dma_cache_op_fn = dma_cache_op;
212
213    dma->iospaces = malloc(sizeof(vspace_t) * num_iospaces);
214    if (!dma->iospaces) {
215        goto error;
216    }
217    dma->iospace_data = malloc(sizeof(sel4utils_alloc_data_t) * num_iospaces);
218    if (!dma->iospace_data) {
219        goto error;
220    }
221    for (unsigned int i = 0; i < num_iospaces; i++) {
222        int err = sel4utils_get_vspace_with_map(&dma->vspace, dma->iospaces + i, dma->iospace_data + i, &dma->vka, iospaces[i],
223                                                NULL, NULL, sel4utils_map_page_iommu);
224        if (err) {
225            for (unsigned int j = 0; j < i; j++) {
226                vspace_tear_down(dma->iospaces + i, &dma->vka);
227            }
228            goto error;
229        }
230    }
231    return 0;
232error:
233    if (dma->iospace_data) {
234        free(dma->iospace_data);
235    }
236    if (dma->iospaces) {
237        free(dma->iospaces);
238    }
239    if (dma) {
240        free(dma);
241    }
242    return -1;
243}
244
245#endif /* CONFIG_IOMMU */
246