1/*
2 * Copyright 2017, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 */
6/* Author: alex.kroh@nicta.com.au */
7
8#include <dma/dma.h>
9#include <assert.h>
10#include <stdio.h>
11#include <stdlib.h>
12
13#include <autoconf.h>
14
15//#define DMA_DEBUG
16#ifdef DMA_DEBUG
17#define dprintf(...) do{     \
18        printf("DMA| ");     \
19        printf(__VA_ARGS__); \
20    }while(0)
21#else
22#define dprintf(...) do{}while(0)
23#endif
24
25#if defined(CONFIG_PLAT_IMX6) || defined(IMX6)
26#define DMA_MINALIGN_BYTES 32
27#elif defined(CONFIG_PLAT_EXYNOS4)
28#define DMA_MINALIGN_BYTES 32
29#elif defined(CONFIG_PLAT_EXYNOS5)
30#define DMA_MINALIGN_BYTES 32
31#else
32#warning Unknown platform. DMA alignment defaulting to 32 bytes.
33#define DMA_MINALIGN_BYTES 32
34#endif
35
36#define _malloc malloc
37#define _free   free
38
39/* dma_mem_t flag bit that signals that the memory is in use */
40#define DMFLAG_ALLOCATED 1
41
42/* Linked list of descriptors */
43struct dma_memd_node {
44    /* Description of this memory chunk */
45    struct dma_mem_descriptor desc;
46    /* Number of frames in this region */
47    int nframes;
48    /* Caps to the underlying frames */
49    void **alloc_cookies;
50    /* Head of linked list of regions */
51    dma_mem_t dma_mem_head;
52    /* Chain */
53    struct dma_memd_node *next;
54};
55
56/* Linked list of memory regions */
57struct dma_mem {
58    /* Offset within the described memory */
59    uintptr_t offset;
60    /* Flags */
61    int flags;
62    /* Parent node */
63    struct dma_memd_node *node;
64    /* Chain */
65    dma_mem_t next;
66};
67
68struct dma_allocator {
69    dma_morecore_fn morecore;
70    struct dma_memd_node *head;
71};
72
73
74/*** Helpers ***/
75
76static inline int _is_free(dma_mem_t m)
77{
78    return !(m->flags & DMFLAG_ALLOCATED);
79}
80
81static inline size_t _node_size(struct dma_memd_node *n)
82{
83    return n->nframes << n->desc.size_bits;
84}
85
86static inline size_t _mem_size(dma_mem_t m)
87{
88    struct dma_memd_node *n = m->node;
89    size_t s;
90    if (m->next == NULL) {
91        s = _node_size(n);;
92    } else {
93        s = m->next->offset;
94    }
95    return s -= m->offset;
96}
97
98/* @pre the offset must be contained within the node provided node */
99static inline dma_mem_t _find_mem(struct dma_memd_node *n, uintptr_t offset)
100{
101    dma_mem_t m;
102    assert(n);
103    assert(offset < _node_size(n));
104    m = n->dma_mem_head;
105    while (m->next != NULL && offset >= m->next->offset) {
106        m = m->next;
107    }
108    return m;
109}
110
111static void _mem_compact(dma_mem_t m)
112{
113    while (m->next != NULL && _is_free(m->next)) {
114        dma_mem_t compact = m->next;
115        dprintf("Compacting:\n");
116        m->next = compact->next;
117        _free(compact);
118    }
119}
120
121/*** Debug ***/
122
123static void print_dma_mem(dma_mem_t m, const char *prefix)
124{
125    dprintf("%s{p0x%08x, v0x%08x, s0x%x %s}\n", prefix,
126            (uint32_t)dma_paddr(m), (uint32_t)dma_vaddr(m), _mem_size(m),
127            _is_free(m) ? "FREE" : "USED");
128}
129
130static void print_dma_node(struct dma_memd_node *n)
131{
132    dma_mem_t m;
133    dprintf("NODE:\n");
134    for (m = n->dma_mem_head; m != NULL; m = m->next) {
135        print_dma_mem(m, ">");
136    }
137}
138
139static void print_dma_allocator(struct dma_allocator *a)
140{
141    struct dma_memd_node *n;
142    dprintf("ALLOC:\n");
143    for (n = a->head; n != NULL; n = n->next) {
144        print_dma_node(n);
145    }
146}
147
148/*** Interface ***/
149
150
151static struct dma_memd_node *do_dma_provide_mem(struct dma_allocator *allocator,
152                                                struct dma_mem_descriptor *dma_desc)
153{
154    struct dma_memd_node *n;
155    dma_mem_t m;
156
157    /* The memory size must be sane */
158    assert(dma_desc->size_bits > 0 && dma_desc->size_bits < 32);
159    /* Allocate some objects */
160    m = (dma_mem_t)_malloc(sizeof(*m));
161    if (m == NULL) {
162        return NULL;
163    }
164    /* We do not attempt to tack this region onto an existing node */
165    n = (struct dma_memd_node *)_malloc(sizeof(*n));
166    if (n == NULL) {
167        _free(m);
168        return NULL;
169    }
170    /* Initialise free memory */
171    m->offset = 0;
172    m->flags = 0;
173    m->next = NULL;
174    m->node = n;
175    /* Initialise the pool node */
176    n->desc = *dma_desc;
177    n->dma_mem_head = m;
178    n->next = allocator->head;
179    n->nframes = 1;
180    n->alloc_cookies = _malloc(sizeof(*n->alloc_cookies) * n->nframes);
181    if (n->alloc_cookies == NULL) {
182        _free(n);
183        _free(m);
184        return NULL;
185    }
186    n->alloc_cookies[0] = dma_desc->alloc_cookie;
187    /* Add the node to the allocator */
188    allocator->head = n;
189
190    dprintf("DMA memory provided\n");
191    print_dma_allocator(allocator);
192    return n;
193}
194
195
196
197struct dma_allocator *
198dma_allocator_init(dma_morecore_fn morecore)
199{
200    struct dma_allocator *alloc;
201    alloc = (struct dma_allocator *)_malloc(sizeof(*alloc));
202    if (alloc == NULL) {
203        return NULL;
204    }
205    alloc->morecore = morecore;
206    alloc->head = NULL;
207    return alloc;
208}
209
210
211int dma_provide_mem(struct dma_allocator *allocator,
212                    struct dma_mem_descriptor dma_desc)
213{
214    struct dma_memd_node *n;
215    n = do_dma_provide_mem(allocator, &dma_desc);
216    return n == NULL;
217}
218
219static dma_mem_t dma_memd_alloc(struct dma_memd_node *n, size_t size, int align)
220{
221    dma_mem_t m;
222    dprintf("Allocating 0x%x aligned to 0x%x\n", size, align);
223    for (m = n->dma_mem_head; m != NULL; m = m->next) {
224        if (_is_free(m)) {
225            size_t mem_size;
226            int mem_align;
227            paddr_t paddr;
228
229            _mem_compact(m);
230
231            /* Find the region size and alignment */
232            paddr = dma_paddr(m);
233            mem_align = align - (paddr % align);
234            if (mem_align == align) {
235                mem_align = 0;
236            }
237            mem_size = _mem_size(m) - mem_align;
238            /* Check for overflow in subtraction and check our size */
239            if (_mem_size(m) > mem_align && mem_size >= size) {
240                m->offset += mem_align;
241                /* Split off the free memory if possible */
242                if (mem_size > size) {
243                    dma_mem_t split;
244                    split = (dma_mem_t)_malloc(sizeof(*split));
245                    if (split != NULL) {
246                        split->offset = m->offset + size;
247                        split->flags = m->flags;
248                        split->next = m->next;
249                        split->node = m->node;
250
251                        m->next = split;
252                    } else {
253                        /* Just use the over size region... */
254                    }
255                }
256                m->flags |= DMFLAG_ALLOCATED;
257                return m;
258            }
259        }
260    }
261    return NULL;
262}
263
264vaddr_t dma_alloc(struct dma_allocator *allocator, size_t size, int align,
265                  enum dma_flags flags, dma_mem_t *ret_mem)
266{
267    int cached;
268    struct dma_memd_node *n;
269    assert(allocator);
270
271    if (align < DMA_MINALIGN_BYTES) {
272        align = DMA_MINALIGN_BYTES;
273    }
274    /* TODO access patterns and cachability */
275    cached = 0;
276    (void)flags;
277
278    /* Cycle through nodes looking for free memory */
279    for (n = allocator->head; n != NULL; n = n->next) {
280        dma_mem_t m;
281        /* TODO some tricky stuff first to see if an allocation from
282         * this node is appropriate (access patterns/cachability) */
283        m = dma_memd_alloc(n, size, align);
284        if (m != NULL) {
285            dprintf("DMA mem allocated\n");
286            print_dma_mem(m, "-");
287            print_dma_allocator(allocator);
288            if (ret_mem) {
289                *ret_mem = m;
290            }
291            return dma_vaddr(m);
292        }
293    }
294
295    /* Out of memory! Allocate more if we have the ability */
296    if (allocator->morecore) {
297        struct dma_mem_descriptor dma_desc;
298        struct dma_memd_node *n;
299        dma_mem_t m;
300        int err;
301        dprintf("Morecore called for %d bytes\n", size);
302        /* Grab more core */
303        err = allocator->morecore(size, cached, &dma_desc);
304        if (err) {
305            return NULL;
306        }
307        /* Add the memory to the allocator */
308        n = do_dma_provide_mem(allocator, &dma_desc);
309        if (n == NULL) {
310            return NULL;
311        }
312        /* Perform the allocation */
313        m = dma_memd_alloc(n, size, align);
314        assert(m);
315        if (m == NULL) {
316            return NULL;
317        }
318        /* Success */
319        dprintf("DMA mem allocated\n");
320        print_dma_mem(m, "-");
321        print_dma_allocator(allocator);
322        if (ret_mem) {
323            *ret_mem = m;
324        }
325        return dma_vaddr(m);
326    }
327    dprintf("Failed to allocate DMA memory\n");
328    return NULL;
329}
330
331int dma_reclaim_mem(struct dma_allocator *allocator,
332                    struct dma_mem_descriptor *dma_desc)
333{
334    struct dma_memd_node *n;
335    struct dma_memd_node **nptr = &allocator->head;
336    for (n = allocator->head; n != NULL; n = n->next) {
337        dma_mem_t m = n->dma_mem_head;
338        _mem_compact(m);
339        if (_is_free(m) && !m->next) {
340            *dma_desc = n->desc;
341            /* Currently there is not support for compacted nodes */
342            assert(n->nframes == 1);
343            dma_desc->alloc_cookie = n->alloc_cookies[0];
344            /* Remove the node and free memory */
345            *nptr = n->next;
346            _free(m);
347            _free(n->alloc_cookies);
348            _free(n);
349            return 0;
350        }
351        nptr = &n->next;
352    }
353    return -1;
354}
355
356void dma_free(dma_mem_t m)
357{
358    if (m) {
359        m->flags &= ~DMFLAG_ALLOCATED;
360        _mem_compact(m);
361    }
362}
363
364/*** Address translation ***/
365
366vaddr_t dma_vaddr(dma_mem_t m)
367{
368    if (m) {
369        assert(m->node);
370        return (vaddr_t)((uintptr_t)m->node->desc.vaddr + m->offset);
371    } else {
372        return (vaddr_t)0;
373    }
374}
375
376paddr_t dma_paddr(dma_mem_t m)
377{
378    if (m) {
379        assert(m->node);
380        return m->node->desc.paddr + m->offset;
381    } else {
382        return (paddr_t)0;
383    }
384}
385
386dma_mem_t dma_plookup(struct dma_allocator *dma_allocator, paddr_t paddr)
387{
388    struct dma_memd_node *n;
389    uintptr_t offs;
390    /* Search the list for the associated node */
391    for (n = dma_allocator->head; n != NULL; n = n->next) {
392        if (n->desc.paddr <= paddr && n->desc.paddr < _node_size(n)) {
393            break;
394        }
395    }
396    /* Search the mem list for the assocated dma_mem_t */
397    if (n == NULL) {
398        return NULL;
399    } else {
400        offs = paddr - n->desc.paddr;
401        return _find_mem(n, offs);
402    }
403}
404
405dma_mem_t dma_vlookup(struct dma_allocator *dma_allocator, vaddr_t vaddr)
406{
407    struct dma_memd_node *n;
408    uintptr_t offs;
409    /* Search the list for the associated node */
410    for (n = dma_allocator->head; n != NULL; n = n->next) {
411        uintptr_t vn = (uintptr_t)n->desc.vaddr;
412        uintptr_t vm = (uintptr_t)vaddr;
413        if (vn <= vm && vm < vn + _node_size(n)) {
414            break;
415        }
416    }
417    /* Search the mem list for the assocated dma_mem_t */
418    if (n == NULL) {
419        return NULL;
420    } else {
421        offs = (uintptr_t)vaddr - (uintptr_t)n->desc.vaddr;
422        return _find_mem(n, offs);
423    }
424}
425
426
427
428/*** Cache ops ***/
429
430void dma_clean(dma_mem_t m, vaddr_t vstart, vaddr_t vend)
431{
432    (void)m;
433    (void)vstart;
434    (void)vend;
435}
436
437
438void dma_invalidate(dma_mem_t m, vaddr_t vstart, vaddr_t vend)
439{
440    (void)m;
441    (void)vstart;
442    (void)vend;
443}
444
445void dma_cleaninvalidate(dma_mem_t m, vaddr_t vstart, vaddr_t vend)
446{
447    (void)m;
448    (void)vstart;
449    (void)vend;
450}
451
452/******** libplatsupport adapter ********/
453
454static void *dma_dma_alloc(void *cookie, size_t size, int align, int cached, ps_mem_flags_t flags)
455{
456    struct dma_allocator *dalloc;
457    vaddr_t vaddr;
458    enum dma_flags dma_flags;
459    assert(cookie);
460    dalloc = (struct dma_allocator *)cookie;
461
462    if (cached) {
463        dma_flags = DMAF_COHERENT;
464    } else {
465        switch (flags) {
466        case PS_MEM_NORMAL:
467            dma_flags = DMAF_HRW;
468            break;
469        case PS_MEM_HR:
470            dma_flags = DMAF_HR;
471            break;
472        case PS_MEM_HW:
473            dma_flags = DMAF_HW;
474            break;
475        default:
476            dma_flags = DMAF_COHERENT;
477        }
478    }
479
480    vaddr = dma_alloc(dalloc, size, align, dma_flags, NULL);
481    return vaddr;
482}
483
484static void dma_dma_free(void *cookie, void *addr, size_t size)
485{
486    struct dma_allocator *dalloc;
487    assert(cookie);
488    dalloc = (struct dma_allocator *)cookie;
489    dma_free(dma_vlookup(dalloc, addr));
490}
491
492
493static uintptr_t dma_dma_pin(void *cookie, void *addr, size_t size)
494{
495    struct dma_allocator *dalloc;
496    assert(cookie);
497    /* DMA memory is pinned when allocated */
498    dalloc = (struct dma_allocator *)cookie;
499    return dma_paddr(dma_vlookup(dalloc, addr));
500}
501
502static void dma_dma_unpin(void *cookie UNUSED, void *addr UNUSED, size_t size UNUSED)
503{
504    /* DMA memory is unpinned when freed */
505}
506
507
508int dma_dmaman_init(dma_morecore_fn morecore, ps_dma_cache_op_fn_t cache_ops,
509                    ps_dma_man_t *dma_man)
510{
511    struct dma_allocator *dalloc;
512    assert(dma_man);
513
514    dalloc = dma_allocator_init(morecore);
515    if (dalloc != NULL) {
516        dma_man->cookie = dalloc;
517        dma_man->dma_cache_op_fn = cache_ops;
518        dma_man->dma_alloc_fn = &dma_dma_alloc;
519        dma_man->dma_free_fn = &dma_dma_free;
520        dma_man->dma_pin_fn = &dma_dma_pin;
521        dma_man->dma_unpin_fn = &dma_dma_unpin;
522        return 0;
523    } else {
524        return -1;
525    }
526}
527
528
529