1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12
13#pragma once
14
15#include <stddef.h>
16#include <stdint.h>
17#include <assert.h>
18#include <utils/util.h>
19#include <sys/types.h>
20#include <errno.h>
21/* For clock.h and mux.h */
22typedef struct ps_io_ops ps_io_ops_t;
23
24#ifdef CONFIG_ARCH_ARM
25#include <platsupport/clock.h>
26#include <platsupport/mux.h>
27#endif
28#include <platsupport/irq.h>
29#include <platsupport/interface_registration.h>
30
31/**
32 * Memory usage hints. These indicate how memory is expected to be used
33 * allowing for better memory attributes or caching to be done.
34 * For example, memory that is only written to would be best mapped
35 * write combining if the architecture supports it.
36 */
37typedef enum ps_mem_flags {
38    PS_MEM_NORMAL, /* No hints, consider 'normal' memory */
39    PS_MEM_HR,     /* Host typically reads */
40    PS_MEM_HW      /* Host typically writes */
41} ps_mem_flags_t;
42
43/**
44 * Map the region of memory at the requested physical address
45 *
46 * @param cookie Cookie for the I/O Mapper
47 * @param paddr physical address to map.
48 * @param size amount of bytes to map.
49 * @param cached Whether region should be mapped cached or not
50 * @param flags Memory usage flags
51 * @return the virtual address at which the data at paddr can be accessed or NULL on failure
52 */
53typedef void *(*ps_io_map_fn_t)(
54    void *cookie,
55    uintptr_t paddr,
56    size_t size,
57    int cached,
58    ps_mem_flags_t flags);
59
60/**
61 * Unmap a previously mapped I/O memory region
62 *
63 * @param cookie Cookie for the I/O Mapper
64 * @param vaddr a virtual address that has been returned by io_map
65 * @param size the same size in bytes this memory was mapped in with originally
66 */
67typedef void (*ps_io_unmap_fn_t)(
68    void *cookie,
69    void *vaddr,
70    size_t size);
71
72typedef struct ps_io_mapper {
73    void *cookie;
74    ps_io_map_fn_t io_map_fn;
75    ps_io_unmap_fn_t io_unmap_fn;
76} ps_io_mapper_t;
77
78static inline void *ps_io_map(
79    const ps_io_mapper_t *io_mapper,
80    uintptr_t paddr,
81    size_t size,
82    int cached,
83    ps_mem_flags_t flags)
84{
85    assert(io_mapper);
86    assert(io_mapper->io_map_fn);
87    return io_mapper->io_map_fn(io_mapper->cookie, paddr, size, cached, flags);
88}
89
90static inline void ps_io_unmap(
91    const ps_io_mapper_t *io_mapper,
92    void *vaddr,
93    size_t size)
94{
95    assert(io_mapper);
96    assert(io_mapper->io_unmap_fn);
97    io_mapper->io_unmap_fn(io_mapper->cookie, vaddr, size);
98}
99
100/**
101 * Perform an architectural I/O 'in' operation (aka I/O ports on x86)
102 *
103 * @param cookie Cookie to the underlying I/O handler
104 * @param port Port to perform the 'in' on
105 * @param io_size Size in bytes of the I/O operation
106 * @param result Location to store the results. If io_size < 4 then unused bytes will be zeroed
107 *
108 * @return Returns 0 on success
109 */
110typedef int (*ps_io_port_in_fn_t)(
111    void *cookie,
112    uint32_t port,
113    int io_size,
114    uint32_t *result);
115
116/**
117 * Perform an architectural I/O 'out' operation (aka I/O ports on x86)
118 *
119 * @param cookie Cookie to the underlying I/O handler
120 * @param port Port to perform the 'out' on
121 * @param io_size Size in bytes of the I/O operation
122 * @param val Value to send to the I/O port
123 *
124 * @return Returns 0 on success
125 */
126typedef int (*ps_io_port_out_fn_t)(
127    void *cookie,
128    uint32_t port,
129    int io_size,
130    uint32_t val);
131
132typedef struct ps_io_port_ops {
133    void *cookie;
134    ps_io_port_in_fn_t io_port_in_fn;
135    ps_io_port_out_fn_t io_port_out_fn;
136} ps_io_port_ops_t;
137
138static inline int ps_io_port_in(
139    const ps_io_port_ops_t *port_ops,
140    uint32_t port,
141    int io_size,
142    uint32_t *result)
143{
144    assert(port_ops);
145    assert(port_ops->io_port_in_fn);
146    return port_ops->io_port_in_fn(port_ops->cookie, port, io_size, result);
147}
148
149static inline int ps_io_port_out(
150    const ps_io_port_ops_t *port_ops,
151    uint32_t port,
152    int io_size,
153    uint32_t val)
154{
155    assert(port_ops);
156    assert(port_ops->io_port_out_fn);
157    return port_ops->io_port_out_fn(port_ops->cookie, port, io_size, val);
158}
159
160typedef enum dma_cache_op {
161    DMA_CACHE_OP_CLEAN,
162    DMA_CACHE_OP_INVALIDATE,
163    DMA_CACHE_OP_CLEAN_INVALIDATE
164} dma_cache_op_t;
165
166/**
167 * Allocate a dma memory buffer. Must be contiguous in physical and virtual address,
168 * but my cross page boundaries. It is also guaranteed that this memory region can
169 * be pinned
170 *
171 * @param cookie Cookie for the dma manager
172 * @param size Size in bytes of the dma memory region
173 * @param align Alignment in bytes of the dma region
174 * @param cached Whether the region should be mapped cached or not
175 * @param flags Memory access flags
176 *
177 * @return NULL on failure, otherwise virtual address of allocation
178 */
179typedef void *(*ps_dma_alloc_fn_t)(
180    void *cookie,
181    size_t size,
182    int align,
183    int cached,
184    ps_mem_flags_t flags);
185
186/**
187 * Free a previously allocated dma memory buffer
188 *
189 * @param cookie Cookie for the dma manager
190 * @param addr Virtual address of the memory buffer as given by the dma_alloc function
191 * @param size Original size of the allocated buffer
192 */
193typedef void (*ps_dma_free_fn_t)(
194    void *cookie,
195    void *addr,
196    size_t size);
197
198/**
199 * Pin a piece of memory. This ensures it is resident and has a translation until
200 * it is unpinned. You should not pin a memory range that overlaps with another
201 * pinned range. If pinning is successful memory is guaranteed to be contiguous
202 * in physical memory.
203 *
204 * @param cookie Cookie for the dma manager
205 * @param addr Address of the memory to pin
206 * @param size Range of memory to pin
207 *
208 * @return 0 if memory could not be pinned, otherwise physical address
209 */
210typedef uintptr_t (*ps_dma_pin_fn_t)(
211    void *cookie,
212    void *addr,
213    size_t size);
214
215/**
216 * Unpin a piece of memory. You should only unpin the exact same range
217 * that was pinned, do not partially unpin a range or unpin memory that
218 * was never pinned.
219 *
220 * @param cookie Cookie for the dma manager
221 * @param addr Address of the memory to unpin
222 * @param size Range of the memory to unpin
223 */
224typedef void (*ps_dma_unpin_fn_t)(
225    void *cookie,
226    void *addr,
227    size_t size);
228
229/**
230 * Perform a cache operation on a dma memory region. Pinned and unpinned
231 * memory can have cache operations performed on it
232 *
233 * @param cookie Cookie for the dma manager
234 * @param addr Start address to perform the cache operation on
235 * @param size Size of the range to perform the cache operation on
236 * @param op Cache operation to perform
237 */
238typedef void (*ps_dma_cache_op_fn_t)(
239    void *cookie,
240    void *addr,
241    size_t size,
242    dma_cache_op_t op);
243
244typedef struct ps_dma_man {
245    void *cookie;
246    ps_dma_alloc_fn_t dma_alloc_fn;
247    ps_dma_free_fn_t dma_free_fn;
248    ps_dma_pin_fn_t dma_pin_fn;
249    ps_dma_unpin_fn_t dma_unpin_fn;
250    ps_dma_cache_op_fn_t dma_cache_op_fn;
251} ps_dma_man_t;
252
253static inline void *ps_dma_alloc(
254    const ps_dma_man_t *dma_man,
255    size_t size,
256    int align,
257    int cache,
258    ps_mem_flags_t flags)
259{
260    assert(dma_man);
261    assert(dma_man->dma_alloc_fn);
262    return dma_man->dma_alloc_fn(dma_man->cookie, size, align, cache, flags);
263}
264
265static inline void ps_dma_free(
266    const ps_dma_man_t *dma_man,
267    void *addr,
268    size_t size)
269{
270    assert(dma_man);
271    assert(dma_man->dma_free_fn);
272    dma_man->dma_free_fn(dma_man->cookie, addr, size);
273}
274
275static inline uintptr_t ps_dma_pin(
276    const ps_dma_man_t *dma_man,
277    void *addr,
278    size_t size)
279{
280    assert(dma_man);
281    assert(dma_man->dma_pin_fn);
282    return dma_man->dma_pin_fn(dma_man->cookie, addr, size);
283}
284
285static inline void ps_dma_unpin(
286    const ps_dma_man_t *dma_man,
287    void *addr,
288    size_t size)
289{
290    assert(dma_man);
291    assert(dma_man->dma_unpin_fn);
292    dma_man->dma_unpin_fn(dma_man->cookie, addr, size);
293}
294
295static inline void ps_dma_cache_op(
296    const ps_dma_man_t *dma_man,
297    void *addr,
298    size_t size,
299    dma_cache_op_t op)
300{
301    assert(dma_man);
302    assert(dma_man->dma_cache_op_fn);
303    dma_man->dma_cache_op_fn(dma_man->cookie, addr, size, op);
304}
305
306static inline void ps_dma_cache_clean(
307    const ps_dma_man_t *dma_man,
308    void *addr,
309    size_t size)
310{
311    ps_dma_cache_op(dma_man, addr, size, DMA_CACHE_OP_CLEAN);
312}
313
314static inline void ps_dma_cache_invalidate(
315    const ps_dma_man_t *dma_man,
316    void *addr,
317    size_t size)
318{
319    ps_dma_cache_op(dma_man, addr, size, DMA_CACHE_OP_INVALIDATE);
320}
321
322static inline void ps_dma_cache_clean_invalidate(
323    const ps_dma_man_t *dma_man,
324    void *addr,
325    size_t size)
326{
327    ps_dma_cache_op(dma_man, addr, size, DMA_CACHE_OP_CLEAN_INVALIDATE);
328}
329
330/*
331 * Allocate some heap memory for the driver to use. Basically malloc.
332 *
333 * @param cookie     Cookie for the allocator.
334 * @param size       Amount of bytes to allocate.
335 * @param[out] ptr   Pointer to store the result in.
336 *
337 * @return 0 on success, errno on error.
338 */
339typedef int (*ps_malloc_fn_t)(
340    void *cookie,
341    size_t size,
342    void **ptr);
343
344/*
345 * Allocate and zero some heap memory for the driver to use. Basically calloc.
346 *
347 * @param cookie     Cookie for the allocator.
348 * @param nmemb      Amount of element to allocate.
349 * @param size       Size of each element in bytes.
350 * @param[out] ptr   Pointer to store the result in.
351 *
352 * @return 0 on success, errno on error.
353 */
354typedef int (*ps_calloc_fn_t)(
355    void *cookie,
356    size_t nmemb,
357    size_t size,
358    void **ptr);
359
360/*
361 * Free allocated heap memory.
362 *
363 * @param ptr        Pointer previously returned by alloc or calloc.
364 * @param size       Amount of bytes to free.
365 * @param cookie     Cookie for the allocator.
366 *
367 * @return 0 on success, errno on error.
368 */
369typedef int (*ps_free_fn_t)(
370    void *cookie,
371    size_t size,
372    void *ptr);
373
374typedef struct {
375    ps_malloc_fn_t malloc;
376    ps_calloc_fn_t calloc;
377    ps_free_fn_t free;
378    void *cookie;
379} ps_malloc_ops_t;
380
381static inline int ps_malloc(
382    const ps_malloc_ops_t *ops,
383    size_t size,
384    void **ptr)
385{
386    if (ops == NULL) {
387        ZF_LOGE("ops cannot be NULL");
388        return EINVAL;
389    }
390
391    if (ops->malloc == NULL) {
392        ZF_LOGE("not implemented");
393        return ENOSYS;
394    }
395
396    if (size == 0) {
397        /* nothing to do */
398        ZF_LOGW("called with size 0");
399        return 0;
400    }
401
402    if (ptr == NULL) {
403        ZF_LOGE("ptr cannot be NULL");
404        return EINVAL;
405    }
406
407    return ops->malloc(ops->cookie, size, ptr);
408}
409
410static inline int ps_calloc(
411    const ps_malloc_ops_t *ops,
412    size_t nmemb,
413    size_t size,
414    void **ptr)
415{
416
417    if (ops == NULL) {
418        ZF_LOGE("ops cannot be NULL");
419        return EINVAL;
420    }
421
422    if (ops->calloc == NULL) {
423        ZF_LOGE("not implemented");
424        return ENOSYS;
425    }
426
427    if (size == 0 || nmemb == 0) {
428        /* nothing to do */
429        ZF_LOGW("called no bytes to allocate");
430        return 0;
431    }
432
433    if (ptr == NULL) {
434        ZF_LOGE("ptr cannot be NULL");
435        return EINVAL;
436    }
437
438    return ops->calloc(ops->cookie, nmemb, size, ptr);
439}
440
441static inline int ps_free(
442    const ps_malloc_ops_t *ops,
443    size_t size, void *ptr)
444{
445    if (ops == NULL) {
446        ZF_LOGE("ops cannot be NULL");
447        return EINVAL;
448    }
449
450    if (ops->free == NULL) {
451        ZF_LOGE("not implemented");
452        return ENOSYS;
453    }
454
455    if (ptr == NULL) {
456        ZF_LOGE("ptr cannot be NULL");
457        return EINVAL;
458    }
459
460    return ops->free(ops->cookie, size, ptr);
461}
462
463/*
464 * Retrieves a copy of the FDT.
465 *
466 * @param cookie     Cookie for the FDT interface.
467 *
468 * @return A pointer to a FDT object, NULL on error.
469 */
470typedef char *(*ps_io_fdt_get_fn_t)(
471    void *cookie);
472
473typedef struct ps_fdt {
474    void *cookie;
475    ps_io_fdt_get_fn_t get_fn;
476} ps_io_fdt_t;
477
478static inline char *ps_io_fdt_get(
479    const  ps_io_fdt_t *io_fdt)
480{
481    if (io_fdt == NULL) {
482        ZF_LOGE("fdt cannot be NULL");
483        return NULL;
484    }
485
486    if (io_fdt->get_fn == NULL) {
487        ZF_LOGE("not implemented");
488        return NULL;
489    }
490
491    return io_fdt->get_fn(io_fdt->cookie);
492}
493
494/* Struct to collect all the different I/O operations together. This should contain
495 * everything a driver needs to function */
496struct ps_io_ops {
497    ps_io_mapper_t io_mapper;
498    ps_io_port_ops_t io_port_ops;
499    ps_dma_man_t dma_manager;
500    ps_io_fdt_t io_fdt;
501#ifdef CONFIG_ARCH_ARM
502    clock_sys_t clock_sys;
503    mux_sys_t mux_sys;
504#endif
505    ps_interface_registration_ops_t interface_registration_ops;
506    ps_malloc_ops_t malloc_ops;
507    ps_irq_ops_t irq_ops;
508};
509
510/**
511 * In place reads/writes of device register bitfields
512 *
513 * eg, where var = 0x12345678
514 *
515 * read_masked(&var, 0x0000FFFF) ==> 0x00005678
516 * write_masked(&var, 0x0000FFFF, 0x000000CC) ==> var = 0x123400CC
517 * read_bits(&var, 8, 4) ==> 0x6
518 * write_bits(&var, 8, 4, 0xC) ==> var = 0x12345C78
519 */
520static inline uint32_t read_masked(
521    volatile uint32_t *addr,
522    uint32_t mask)
523{
524    assert(addr);
525    return *addr & mask;
526}
527
528static inline void write_masked(
529    volatile uint32_t *addr,
530    uint32_t mask,
531    uint32_t value)
532{
533    assert(addr);
534    assert((value & mask) == value);
535    *addr = read_masked(addr, ~mask) | value;
536}
537
538static inline uint32_t read_bits(
539    volatile uint32_t *addr,
540    unsigned int first_bit,
541    unsigned int nbits)
542{
543    assert(addr);
544    assert(first_bit < 32);
545    assert(nbits <= 32 - first_bit);
546    return (*addr >> first_bit) & MASK(nbits);
547}
548
549static inline void write_bits(
550    volatile uint32_t *addr,
551    unsigned int first_bit,
552    unsigned int nbits,
553    uint32_t value)
554{
555    assert(addr);
556    assert(first_bit < 32);
557    assert(nbits <= 32 - first_bit);
558    write_masked(addr, MASK(nbits) << first_bit, value << first_bit);
559}
560
561/*
562 * Populate a malloc ops with stdlib malloc wrappers.
563 */
564int ps_new_stdlib_malloc_ops(
565    ps_malloc_ops_t *ops);
566