1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright 2013-2015 Analog Devices Inc.
4 *  Author: Lars-Peter Clausen <lars@metafoo.de>
5 */
6
7#ifndef __INDUSTRIALIO_DMA_BUFFER_H__
8#define __INDUSTRIALIO_DMA_BUFFER_H__
9
10#include <linux/list.h>
11#include <linux/kref.h>
12#include <linux/spinlock.h>
13#include <linux/mutex.h>
14#include <linux/iio/buffer_impl.h>
15
16struct iio_dma_buffer_queue;
17struct iio_dma_buffer_ops;
18struct device;
19
20/**
21 * enum iio_block_state - State of a struct iio_dma_buffer_block
22 * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
23 * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
24 * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
25 * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
26 */
27enum iio_block_state {
28	IIO_BLOCK_STATE_QUEUED,
29	IIO_BLOCK_STATE_ACTIVE,
30	IIO_BLOCK_STATE_DONE,
31	IIO_BLOCK_STATE_DEAD,
32};
33
34/**
35 * struct iio_dma_buffer_block - IIO buffer block
36 * @head: List head
37 * @size: Total size of the block in bytes
38 * @bytes_used: Number of bytes that contain valid data
39 * @vaddr: Virutal address of the blocks memory
40 * @phys_addr: Physical address of the blocks memory
41 * @queue: Parent DMA buffer queue
42 * @kref: kref used to manage the lifetime of block
43 * @state: Current state of the block
44 */
45struct iio_dma_buffer_block {
46	/* May only be accessed by the owner of the block */
47	struct list_head head;
48	size_t bytes_used;
49
50	/*
51	 * Set during allocation, constant thereafter. May be accessed read-only
52	 * by anybody holding a reference to the block.
53	 */
54	void *vaddr;
55	dma_addr_t phys_addr;
56	size_t size;
57	struct iio_dma_buffer_queue *queue;
58
59	/* Must not be accessed outside the core. */
60	struct kref kref;
61	/*
62	 * Must not be accessed outside the core. Access needs to hold
63	 * queue->list_lock if the block is not owned by the core.
64	 */
65	enum iio_block_state state;
66};
67
68/**
69 * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer
70 * @blocks: Buffer blocks used for fileio
71 * @active_block: Block being used in read()
72 * @pos: Read offset in the active block
73 * @block_size: Size of each block
74 * @next_dequeue: index of next block that will be dequeued
75 */
76struct iio_dma_buffer_queue_fileio {
77	struct iio_dma_buffer_block *blocks[2];
78	struct iio_dma_buffer_block *active_block;
79	size_t pos;
80	size_t block_size;
81
82	unsigned int next_dequeue;
83};
84
85/**
86 * struct iio_dma_buffer_queue - DMA buffer base structure
87 * @buffer: IIO buffer base structure
88 * @dev: Parent device
89 * @ops: DMA buffer callbacks
90 * @lock: Protects the incoming list, active and the fields in the fileio
91 *   substruct
92 * @list_lock: Protects lists that contain blocks which can be modified in
93 *   atomic context as well as blocks on those lists. This is the outgoing queue
94 *   list and typically also a list of active blocks in the part that handles
95 *   the DMA controller
96 * @incoming: List of buffers on the incoming queue
97 * @active: Whether the buffer is currently active
98 * @fileio: FileIO state
99 */
100struct iio_dma_buffer_queue {
101	struct iio_buffer buffer;
102	struct device *dev;
103	const struct iio_dma_buffer_ops *ops;
104
105	struct mutex lock;
106	spinlock_t list_lock;
107	struct list_head incoming;
108
109	bool active;
110
111	struct iio_dma_buffer_queue_fileio fileio;
112};
113
114/**
115 * struct iio_dma_buffer_ops - DMA buffer callback operations
116 * @submit: Called when a block is submitted to the DMA controller
117 * @abort: Should abort all pending transfers
118 */
119struct iio_dma_buffer_ops {
120	int (*submit)(struct iio_dma_buffer_queue *queue,
121		struct iio_dma_buffer_block *block);
122	void (*abort)(struct iio_dma_buffer_queue *queue);
123};
124
125void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
126void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
127	struct list_head *list);
128
129int iio_dma_buffer_enable(struct iio_buffer *buffer,
130	struct iio_dev *indio_dev);
131int iio_dma_buffer_disable(struct iio_buffer *buffer,
132	struct iio_dev *indio_dev);
133int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
134	char __user *user_buffer);
135size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
136int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
137int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
138int iio_dma_buffer_request_update(struct iio_buffer *buffer);
139
140int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
141	struct device *dma_dev, const struct iio_dma_buffer_ops *ops);
142void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
143void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
144
145#endif
146