1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2016-2019 Ruslan Bukin <br@bsdpad.com>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8 * ("CTSRD"), as part of the DARPA CRASH research programme.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33
34#ifndef _DEV_XDMA_XDMA_H_
35#define _DEV_XDMA_XDMA_H_
36
37#include <sys/proc.h>
38#include <sys/vmem.h>
39
40#ifdef FDT
41#include <dev/fdt/fdt_common.h>
42#include <dev/ofw/openfirm.h>
43#endif
44
45#include <vm/vm.h>
46#include <vm/pmap.h>
47
48enum xdma_direction {
49	XDMA_MEM_TO_MEM,
50	XDMA_MEM_TO_DEV,
51	XDMA_DEV_TO_MEM,
52	XDMA_DEV_TO_DEV,
53};
54
55enum xdma_operation_type {
56	XDMA_MEMCPY,
57	XDMA_CYCLIC,
58	XDMA_FIFO,
59	XDMA_SG,
60};
61
62enum xdma_request_type {
63	XR_TYPE_PHYS,
64	XR_TYPE_VIRT,
65	XR_TYPE_MBUF,
66	XR_TYPE_BIO,
67};
68
69enum xdma_command {
70	XDMA_CMD_BEGIN,
71	XDMA_CMD_PAUSE,
72	XDMA_CMD_TERMINATE,
73};
74
75struct xdma_transfer_status {
76	uint32_t	transferred;
77	int		error;
78};
79
80typedef struct xdma_transfer_status xdma_transfer_status_t;
81
82struct xdma_controller {
83	device_t dev;		/* DMA consumer device_t. */
84	device_t dma_dev;	/* A real DMA device_t. */
85	void *data;		/* OFW MD part. */
86	vmem_t *vmem;		/* Bounce memory. */
87
88	/* List of virtual channels allocated. */
89	TAILQ_HEAD(xdma_channel_list, xdma_channel)	channels;
90};
91
92typedef struct xdma_controller xdma_controller_t;
93
94struct xchan_buf {
95	bus_dmamap_t			map;
96	uint32_t			nsegs;
97	uint32_t			nsegs_left;
98	vm_offset_t			vaddr;
99	vm_offset_t			paddr;
100	vm_size_t			size;
101};
102
103struct xdma_request {
104	struct mbuf			*m;
105	struct bio			*bp;
106	enum xdma_operation_type	operation;
107	enum xdma_request_type		req_type;
108	enum xdma_direction		direction;
109	bus_addr_t			src_addr;
110	bus_addr_t			dst_addr;
111	uint8_t				src_width;
112	uint8_t				dst_width;
113	bus_size_t			block_num;
114	bus_size_t			block_len;
115	xdma_transfer_status_t		status;
116	void				*user;
117	TAILQ_ENTRY(xdma_request)	xr_next;
118	struct xchan_buf		buf;
119};
120
121struct xdma_sglist {
122	bus_addr_t			src_addr;
123	bus_addr_t			dst_addr;
124	size_t				len;
125	uint8_t				src_width;
126	uint8_t				dst_width;
127	enum xdma_direction		direction;
128	bool				first;
129	bool				last;
130};
131
132struct xdma_iommu {
133	struct pmap p;
134	vmem_t *vmem;		/* VA space */
135	device_t dev;		/* IOMMU device */
136};
137
138struct xdma_channel {
139	xdma_controller_t		*xdma;
140	vmem_t				*vmem;
141
142	uint32_t			flags;
143#define	XCHAN_BUFS_ALLOCATED		(1 << 0)
144#define	XCHAN_SGLIST_ALLOCATED		(1 << 1)
145#define	XCHAN_CONFIGURED		(1 << 2)
146#define	XCHAN_TYPE_CYCLIC		(1 << 3)
147#define	XCHAN_TYPE_MEMCPY		(1 << 4)
148#define	XCHAN_TYPE_FIFO			(1 << 5)
149#define	XCHAN_TYPE_SG			(1 << 6)
150
151	uint32_t			caps;
152#define	XCHAN_CAP_BUSDMA		(1 << 0)
153#define	XCHAN_CAP_NOSEG			(1 << 1)
154#define	XCHAN_CAP_BOUNCE		(1 << 2)
155#define	XCHAN_CAP_IOMMU			(1 << 3)
156
157	/* A real hardware driver channel. */
158	void				*chan;
159
160	/* Interrupt handlers. */
161	TAILQ_HEAD(, xdma_intr_handler)	ie_handlers;
162	TAILQ_ENTRY(xdma_channel)	xchan_next;
163
164	struct mtx			mtx_lock;
165	struct mtx			mtx_qin_lock;
166	struct mtx			mtx_qout_lock;
167	struct mtx			mtx_bank_lock;
168	struct mtx			mtx_proc_lock;
169
170	/* Request queue. */
171	bus_dma_tag_t			dma_tag_bufs;
172	struct xdma_request		*xr_mem;
173	uint32_t			xr_num;
174
175	/* Bus dma tag options. */
176	bus_size_t			maxsegsize;
177	bus_size_t			maxnsegs;
178	bus_size_t			alignment;
179	bus_addr_t			boundary;
180	bus_addr_t			lowaddr;
181	bus_addr_t			highaddr;
182
183	struct xdma_sglist		*sg;
184
185	TAILQ_HEAD(, xdma_request)	bank;
186	TAILQ_HEAD(, xdma_request)	queue_in;
187	TAILQ_HEAD(, xdma_request)	queue_out;
188	TAILQ_HEAD(, xdma_request)	processing;
189
190	/* iommu */
191	struct xdma_iommu		xio;
192};
193
194typedef struct xdma_channel xdma_channel_t;
195
196struct xdma_intr_handler {
197	int		(*cb)(void *cb_user, xdma_transfer_status_t *status);
198	int		flags;
199#define	XDMA_INTR_NET	(1 << 0)
200	void		*cb_user;
201	TAILQ_ENTRY(xdma_intr_handler)	ih_next;
202};
203
204static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
205
206#define	XCHAN_LOCK(xchan)		mtx_lock(&(xchan)->mtx_lock)
207#define	XCHAN_UNLOCK(xchan)		mtx_unlock(&(xchan)->mtx_lock)
208#define	XCHAN_ASSERT_LOCKED(xchan)	\
209    mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
210
211#define	QUEUE_IN_LOCK(xchan)		mtx_lock(&(xchan)->mtx_qin_lock)
212#define	QUEUE_IN_UNLOCK(xchan)		mtx_unlock(&(xchan)->mtx_qin_lock)
213#define	QUEUE_IN_ASSERT_LOCKED(xchan)	\
214    mtx_assert(&(xchan)->mtx_qin_lock, MA_OWNED)
215
216#define	QUEUE_OUT_LOCK(xchan)		mtx_lock(&(xchan)->mtx_qout_lock)
217#define	QUEUE_OUT_UNLOCK(xchan)		mtx_unlock(&(xchan)->mtx_qout_lock)
218#define	QUEUE_OUT_ASSERT_LOCKED(xchan)	\
219    mtx_assert(&(xchan)->mtx_qout_lock, MA_OWNED)
220
221#define	QUEUE_BANK_LOCK(xchan)		mtx_lock(&(xchan)->mtx_bank_lock)
222#define	QUEUE_BANK_UNLOCK(xchan)	mtx_unlock(&(xchan)->mtx_bank_lock)
223#define	QUEUE_BANK_ASSERT_LOCKED(xchan)	\
224    mtx_assert(&(xchan)->mtx_bank_lock, MA_OWNED)
225
226#define	QUEUE_PROC_LOCK(xchan)		mtx_lock(&(xchan)->mtx_proc_lock)
227#define	QUEUE_PROC_UNLOCK(xchan)	mtx_unlock(&(xchan)->mtx_proc_lock)
228#define	QUEUE_PROC_ASSERT_LOCKED(xchan)	\
229    mtx_assert(&(xchan)->mtx_proc_lock, MA_OWNED)
230
231#define	XDMA_SGLIST_MAXLEN	2048
232#define	XDMA_MAX_SEG		128
233
234/* xDMA controller ops */
235xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
236xdma_controller_t *xdma_get(device_t dev, device_t dma_dev);
237int xdma_put(xdma_controller_t *xdma);
238vmem_t * xdma_get_memory(device_t dev);
239void xdma_put_memory(vmem_t *vmem);
240#ifdef FDT
241int xdma_handle_mem_node(vmem_t *vmem, phandle_t memory);
242#endif
243
244/* xDMA channel ops */
245xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
246int xdma_channel_free(xdma_channel_t *);
247int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
248void xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem);
249
250/* SG interface */
251int xdma_prep_sg(xdma_channel_t *, uint32_t,
252    bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
253void xdma_channel_free_sg(xdma_channel_t *xchan);
254int xdma_queue_submit_sg(xdma_channel_t *xchan);
255void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
256
257/* Queue operations */
258int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
259    xdma_transfer_status_t *);
260int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
261    uint8_t, uint8_t, enum xdma_direction dir);
262int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
263    xdma_transfer_status_t *status);
264int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
265    uint8_t, uint8_t, enum xdma_direction dir);
266int xdma_dequeue(xdma_channel_t *xchan, void **user,
267    xdma_transfer_status_t *status);
268int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
269    uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
270int xdma_queue_submit(xdma_channel_t *xchan);
271
272/* Mbuf operations */
273uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
274uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
275
276/* Channel Control */
277int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
278
279/* Interrupt callback */
280int xdma_setup_intr(xdma_channel_t *xchan, int flags, int (*cb)(void *,
281    xdma_transfer_status_t *), void *arg, void **);
282int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
283int xdma_teardown_all_intr(xdma_channel_t *xchan);
284void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
285
286/* Sglist */
287int xchan_sglist_alloc(xdma_channel_t *xchan);
288void xchan_sglist_free(xdma_channel_t *xchan);
289int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
290    uint32_t nsegs, struct xdma_request *xr);
291
292/* Requests bank */
293void xchan_bank_init(xdma_channel_t *xchan);
294int xchan_bank_free(xdma_channel_t *xchan);
295struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
296int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
297
298/* IOMMU */
299void xdma_iommu_add_entry(xdma_channel_t *xchan, vm_offset_t *va,
300    vm_paddr_t pa, vm_size_t size, vm_prot_t prot);
301void xdma_iommu_remove_entry(xdma_channel_t *xchan, vm_offset_t va);
302int xdma_iommu_init(struct xdma_iommu *xio);
303int xdma_iommu_release(struct xdma_iommu *xio);
304
305#endif /* !_DEV_XDMA_XDMA_H_ */
306