1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33/* Xilinx AXI DMA controller driver. */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38#include "opt_platform.h"
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/conf.h>
42#include <sys/bus.h>
43#include <sys/kernel.h>
44#include <sys/module.h>
45#include <sys/rman.h>
46
47#include <machine/bus.h>
48
49#include <vm/vm.h>
50#include <vm/vm_extern.h>
51#include <vm/vm_page.h>
52
53#ifdef FDT
54#include <dev/fdt/fdt_common.h>
55#include <dev/ofw/ofw_bus.h>
56#include <dev/ofw/ofw_bus_subr.h>
57#endif
58
59#include <dev/xdma/xdma.h>
60#include <dev/xilinx/axidma.h>
61
62#include "xdma_if.h"
63
64#define	READ4(_sc, _reg)	\
65	bus_space_read_4(_sc->bst, _sc->bsh, _reg)
66#define	WRITE4(_sc, _reg, _val)	\
67	bus_space_write_4(_sc->bst, _sc->bsh, _reg, _val)
68#define	READ8(_sc, _reg)	\
69	bus_space_read_8(_sc->bst, _sc->bsh, _reg)
70#define	WRITE8(_sc, _reg, _val)	\
71	bus_space_write_8(_sc->bst, _sc->bsh, _reg, _val)
72
73#define AXIDMA_DEBUG
74#undef AXIDMA_DEBUG
75
76#ifdef AXIDMA_DEBUG
77#define dprintf(fmt, ...)  printf(fmt, ##__VA_ARGS__)
78#else
79#define dprintf(fmt, ...)
80#endif
81
82extern struct bus_space memmap_bus;
83
84struct axidma_channel {
85	struct axidma_softc	*sc;
86	xdma_channel_t		*xchan;
87	bool			used;
88	int			idx_head;
89	int			idx_tail;
90
91	struct axidma_desc	**descs;
92	vm_paddr_t		*descs_phys;
93	uint32_t		descs_num;
94
95	vm_size_t		mem_size;
96	vm_offset_t		mem_paddr;
97	vm_offset_t		mem_vaddr;
98
99	uint32_t		descs_used_count;
100};
101
102struct axidma_softc {
103	device_t		dev;
104	struct resource		*res[3];
105	bus_space_tag_t		bst;
106	bus_space_handle_t	bsh;
107	void			*ih[2];
108	struct axidma_desc	desc;
109	struct axidma_channel	channels[AXIDMA_NCHANNELS];
110};
111
112static struct resource_spec axidma_spec[] = {
113	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
114	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
115	{ SYS_RES_IRQ,		1,	RF_ACTIVE },
116	{ -1, 0 }
117};
118
119#define	HWTYPE_NONE	0
120#define	HWTYPE_STD	1
121
122static struct ofw_compat_data compat_data[] = {
123	{ "xlnx,eth-dma",	HWTYPE_STD },
124	{ NULL,			HWTYPE_NONE },
125};
126
127static int axidma_probe(device_t dev);
128static int axidma_attach(device_t dev);
129static int axidma_detach(device_t dev);
130
131static inline uint32_t
132axidma_next_desc(struct axidma_channel *chan, uint32_t curidx)
133{
134
135	return ((curidx + 1) % chan->descs_num);
136}
137
138static void
139axidma_intr(struct axidma_softc *sc,
140    struct axidma_channel *chan)
141{
142	xdma_transfer_status_t status;
143	xdma_transfer_status_t st;
144	struct axidma_fdt_data *data;
145	xdma_controller_t *xdma;
146	struct axidma_desc *desc;
147	struct xdma_channel *xchan;
148	uint32_t tot_copied;
149	int pending;
150	int errors;
151
152	xchan = chan->xchan;
153	xdma = xchan->xdma;
154	data = xdma->data;
155
156	pending = READ4(sc, AXI_DMASR(data->id));
157	WRITE4(sc, AXI_DMASR(data->id), pending);
158
159	errors = (pending & (DMASR_DMAINTERR | DMASR_DMASLVERR
160			| DMASR_DMADECOREERR | DMASR_SGINTERR
161			| DMASR_SGSLVERR | DMASR_SGDECERR));
162
163	dprintf("%s: AXI_DMASR %x\n", __func__,
164	    READ4(sc, AXI_DMASR(data->id)));
165	dprintf("%s: AXI_CURDESC %x\n", __func__,
166	    READ4(sc, AXI_CURDESC(data->id)));
167	dprintf("%s: AXI_TAILDESC %x\n", __func__,
168	    READ4(sc, AXI_TAILDESC(data->id)));
169
170	tot_copied = 0;
171
172	while (chan->idx_tail != chan->idx_head) {
173		desc = chan->descs[chan->idx_tail];
174		if ((desc->status & BD_STATUS_CMPLT) == 0)
175			break;
176
177		st.error = errors;
178		st.transferred = desc->status & BD_CONTROL_LEN_M;
179		tot_copied += st.transferred;
180		xchan_seg_done(xchan, &st);
181
182		chan->idx_tail = axidma_next_desc(chan, chan->idx_tail);
183		atomic_subtract_int(&chan->descs_used_count, 1);
184	}
185
186	/* Finish operation */
187	status.error = errors;
188	status.transferred = tot_copied;
189	xdma_callback(chan->xchan, &status);
190}
191
192static void
193axidma_intr_rx(void *arg)
194{
195	struct axidma_softc *sc;
196	struct axidma_channel *chan;
197
198	dprintf("%s\n", __func__);
199
200	sc = arg;
201	chan = &sc->channels[AXIDMA_RX_CHAN];
202
203	axidma_intr(sc, chan);
204}
205
206static void
207axidma_intr_tx(void *arg)
208{
209	struct axidma_softc *sc;
210	struct axidma_channel *chan;
211
212	dprintf("%s\n", __func__);
213
214	sc = arg;
215	chan = &sc->channels[AXIDMA_TX_CHAN];
216
217	axidma_intr(sc, chan);
218}
219
220static int
221axidma_reset(struct axidma_softc *sc, int chan_id)
222{
223	int timeout;
224
225	WRITE4(sc, AXI_DMACR(chan_id), DMACR_RESET);
226
227	timeout = 100;
228	do {
229		if ((READ4(sc, AXI_DMACR(chan_id)) & DMACR_RESET) == 0)
230			break;
231	} while (timeout--);
232
233	dprintf("timeout %d\n", timeout);
234
235	if (timeout == 0)
236		return (-1);
237
238	dprintf("%s: read control after reset: %x\n",
239	    __func__, READ4(sc, AXI_DMACR(chan_id)));
240
241	return (0);
242}
243
244static int
245axidma_probe(device_t dev)
246{
247	int hwtype;
248
249	if (!ofw_bus_status_okay(dev))
250		return (ENXIO);
251
252	hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
253	if (hwtype == HWTYPE_NONE)
254		return (ENXIO);
255
256	device_set_desc(dev, "Xilinx AXI DMA");
257
258	return (BUS_PROBE_DEFAULT);
259}
260
261static int
262axidma_attach(device_t dev)
263{
264	struct axidma_softc *sc;
265	phandle_t xref, node;
266	int err;
267
268	sc = device_get_softc(dev);
269	sc->dev = dev;
270
271	if (bus_alloc_resources(dev, axidma_spec, sc->res)) {
272		device_printf(dev, "could not allocate resources.\n");
273		return (ENXIO);
274	}
275
276	/* CSR memory interface */
277	sc->bst = rman_get_bustag(sc->res[0]);
278	sc->bsh = rman_get_bushandle(sc->res[0]);
279
280	/* Setup interrupt handler */
281	err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
282	    NULL, axidma_intr_tx, sc, &sc->ih[0]);
283	if (err) {
284		device_printf(dev, "Unable to alloc interrupt resource.\n");
285		return (ENXIO);
286	}
287
288	/* Setup interrupt handler */
289	err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
290	    NULL, axidma_intr_rx, sc, &sc->ih[1]);
291	if (err) {
292		device_printf(dev, "Unable to alloc interrupt resource.\n");
293		return (ENXIO);
294	}
295
296	node = ofw_bus_get_node(dev);
297	xref = OF_xref_from_node(node);
298	OF_device_register_xref(xref, dev);
299
300	return (0);
301}
302
303static int
304axidma_detach(device_t dev)
305{
306	struct axidma_softc *sc;
307
308	sc = device_get_softc(dev);
309
310	bus_teardown_intr(dev, sc->res[1], sc->ih[0]);
311	bus_teardown_intr(dev, sc->res[2], sc->ih[1]);
312	bus_release_resources(dev, axidma_spec, sc->res);
313
314	return (0);
315}
316
317static int
318axidma_desc_free(struct axidma_softc *sc, struct axidma_channel *chan)
319{
320	struct xdma_channel *xchan;
321	int nsegments;
322
323	nsegments = chan->descs_num;
324	xchan = chan->xchan;
325
326	free(chan->descs, M_DEVBUF);
327	free(chan->descs_phys, M_DEVBUF);
328
329	pmap_kremove_device(chan->mem_vaddr, chan->mem_size);
330	kva_free(chan->mem_vaddr, chan->mem_size);
331	vmem_free(xchan->vmem, chan->mem_paddr, chan->mem_size);
332
333	return (0);
334}
335
336static int
337axidma_desc_alloc(struct axidma_softc *sc, struct xdma_channel *xchan,
338    uint32_t desc_size)
339{
340	struct axidma_channel *chan;
341	int nsegments;
342	int i;
343
344	chan = (struct axidma_channel *)xchan->chan;
345	nsegments = chan->descs_num;
346
347	chan->descs = malloc(nsegments * sizeof(struct axidma_desc *),
348	    M_DEVBUF, M_NOWAIT | M_ZERO);
349	if (chan->descs == NULL) {
350		device_printf(sc->dev,
351		    "%s: Can't allocate memory.\n", __func__);
352		return (-1);
353	}
354
355	chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
356	    M_DEVBUF, M_NOWAIT | M_ZERO);
357	chan->mem_size = desc_size * nsegments;
358	if (vmem_alloc(xchan->vmem, chan->mem_size, M_FIRSTFIT | M_NOWAIT,
359	    &chan->mem_paddr)) {
360		device_printf(sc->dev, "Failed to allocate memory.\n");
361		return (-1);
362	}
363	chan->mem_vaddr = kva_alloc(chan->mem_size);
364	pmap_kenter_device(chan->mem_vaddr, chan->mem_size, chan->mem_paddr);
365
366	device_printf(sc->dev, "Allocated chunk %lx %lu\n",
367	    chan->mem_paddr, chan->mem_size);
368
369	for (i = 0; i < nsegments; i++) {
370		chan->descs[i] = (struct axidma_desc *)
371		    ((uint64_t)chan->mem_vaddr + desc_size * i);
372		chan->descs_phys[i] = chan->mem_paddr + desc_size * i;
373	}
374
375	return (0);
376}
377
378static int
379axidma_channel_alloc(device_t dev, struct xdma_channel *xchan)
380{
381	xdma_controller_t *xdma;
382	struct axidma_fdt_data *data;
383	struct axidma_channel *chan;
384	struct axidma_softc *sc;
385
386	sc = device_get_softc(dev);
387
388	if (xchan->caps & XCHAN_CAP_BUSDMA) {
389		device_printf(sc->dev,
390		    "Error: busdma operation is not implemented.");
391		return (-1);
392	}
393
394	xdma = xchan->xdma;
395	data = xdma->data;
396
397	chan = &sc->channels[data->id];
398	if (chan->used == false) {
399		if (axidma_reset(sc, data->id) != 0)
400			return (-1);
401		chan->xchan = xchan;
402		xchan->caps |= XCHAN_CAP_BOUNCE;
403		xchan->chan = (void *)chan;
404		chan->sc = sc;
405		chan->used = true;
406		chan->idx_head = 0;
407		chan->idx_tail = 0;
408		chan->descs_used_count = 0;
409		chan->descs_num = AXIDMA_DESCS_NUM;
410
411		return (0);
412	}
413
414	return (-1);
415}
416
417static int
418axidma_channel_free(device_t dev, struct xdma_channel *xchan)
419{
420	struct axidma_channel *chan;
421	struct axidma_softc *sc;
422
423	sc = device_get_softc(dev);
424
425	chan = (struct axidma_channel *)xchan->chan;
426
427	axidma_desc_free(sc, chan);
428
429	chan->used = false;
430
431	return (0);
432}
433
434static int
435axidma_channel_capacity(device_t dev, xdma_channel_t *xchan,
436    uint32_t *capacity)
437{
438	struct axidma_channel *chan;
439	uint32_t c;
440
441	chan = (struct axidma_channel *)xchan->chan;
442
443	/* At least one descriptor must be left empty. */
444	c = (chan->descs_num - chan->descs_used_count - 1);
445
446	*capacity = c;
447
448	return (0);
449}
450
451static int
452axidma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
453    struct xdma_sglist *sg, uint32_t sg_n)
454{
455	xdma_controller_t *xdma;
456	struct axidma_fdt_data *data;
457	struct axidma_channel *chan;
458	struct axidma_desc *desc;
459	struct axidma_softc *sc;
460	uint32_t src_addr;
461	uint32_t dst_addr;
462	uint32_t addr;
463	uint32_t len;
464	uint32_t tmp;
465	int i;
466	int tail;
467
468	dprintf("%s: sg_n %d\n", __func__, sg_n);
469
470	sc = device_get_softc(dev);
471
472	chan = (struct axidma_channel *)xchan->chan;
473	xdma = xchan->xdma;
474	data = xdma->data;
475
476	if (sg_n == 0)
477		return (0);
478
479	tail = chan->idx_head;
480
481	tmp = 0;
482
483	for (i = 0; i < sg_n; i++) {
484		src_addr = (uint32_t)sg[i].src_addr;
485		dst_addr = (uint32_t)sg[i].dst_addr;
486		len = (uint32_t)sg[i].len;
487
488		dprintf("%s(%d): src %x dst %x len %d\n", __func__,
489		    data->id, src_addr, dst_addr, len);
490
491		desc = chan->descs[chan->idx_head];
492		if (sg[i].direction == XDMA_MEM_TO_DEV)
493			desc->phys = src_addr;
494		else
495			desc->phys = dst_addr;
496		desc->status = 0;
497		desc->control = len;
498		if (sg[i].first == 1)
499			desc->control |= BD_CONTROL_TXSOF;
500		if (sg[i].last == 1)
501			desc->control |= BD_CONTROL_TXEOF;
502
503		tmp = chan->idx_head;
504
505		atomic_add_int(&chan->descs_used_count, 1);
506		chan->idx_head = axidma_next_desc(chan, chan->idx_head);
507	}
508
509	dprintf("%s(%d): _curdesc %x\n", __func__, data->id,
510	    READ8(sc, AXI_CURDESC(data->id)));
511	dprintf("%s(%d): _curdesc %x\n", __func__, data->id,
512	    READ8(sc, AXI_CURDESC(data->id)));
513	dprintf("%s(%d): status %x\n", __func__, data->id,
514	    READ4(sc, AXI_DMASR(data->id)));
515
516	addr = chan->descs_phys[tmp];
517	WRITE8(sc, AXI_TAILDESC(data->id), addr);
518
519	return (0);
520}
521
522static int
523axidma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
524{
525	xdma_controller_t *xdma;
526	struct axidma_fdt_data *data;
527	struct axidma_channel *chan;
528	struct axidma_desc *desc;
529	struct axidma_softc *sc;
530	uint32_t addr;
531	uint32_t reg;
532	int ret;
533	int i;
534
535	sc = device_get_softc(dev);
536
537	chan = (struct axidma_channel *)xchan->chan;
538	xdma = xchan->xdma;
539	data = xdma->data;
540
541	dprintf("%s(%d)\n", __func__, data->id);
542
543	ret = axidma_desc_alloc(sc, xchan, sizeof(struct axidma_desc));
544	if (ret != 0) {
545		device_printf(sc->dev,
546		    "%s: Can't allocate descriptors.\n", __func__);
547		return (-1);
548	}
549
550	for (i = 0; i < chan->descs_num; i++) {
551		desc = chan->descs[i];
552		bzero(desc, sizeof(struct axidma_desc));
553
554		if (i == (chan->descs_num - 1))
555			desc->next = chan->descs_phys[0];
556		else
557			desc->next = chan->descs_phys[i + 1];
558		desc->status = 0;
559		desc->control = 0;
560
561		dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
562		    data->id, i, (uint64_t)desc, le32toh(desc->next));
563	}
564
565	addr = chan->descs_phys[0];
566	WRITE8(sc, AXI_CURDESC(data->id), addr);
567
568	reg = READ4(sc, AXI_DMACR(data->id));
569	reg |= DMACR_IOC_IRQEN | DMACR_DLY_IRQEN | DMACR_ERR_IRQEN;
570	WRITE4(sc, AXI_DMACR(data->id), reg);
571	reg |= DMACR_RS;
572	WRITE4(sc, AXI_DMACR(data->id), reg);
573
574	return (0);
575}
576
577static int
578axidma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
579{
580	struct axidma_channel *chan;
581	struct axidma_softc *sc;
582
583	sc = device_get_softc(dev);
584
585	chan = (struct axidma_channel *)xchan->chan;
586
587	switch (cmd) {
588	case XDMA_CMD_BEGIN:
589	case XDMA_CMD_TERMINATE:
590	case XDMA_CMD_PAUSE:
591		/* TODO: implement me */
592		return (-1);
593	}
594
595	return (0);
596}
597
598#ifdef FDT
599static int
600axidma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
601{
602	struct axidma_fdt_data *data;
603
604	if (ncells != 1)
605		return (-1);
606
607	data = malloc(sizeof(struct axidma_fdt_data),
608	    M_DEVBUF, (M_WAITOK | M_ZERO));
609	data->id = cells[0];
610
611	*ptr = data;
612
613	return (0);
614}
615#endif
616
617static device_method_t axidma_methods[] = {
618	/* Device interface */
619	DEVMETHOD(device_probe,			axidma_probe),
620	DEVMETHOD(device_attach,		axidma_attach),
621	DEVMETHOD(device_detach,		axidma_detach),
622
623	/* xDMA Interface */
624	DEVMETHOD(xdma_channel_alloc,		axidma_channel_alloc),
625	DEVMETHOD(xdma_channel_free,		axidma_channel_free),
626	DEVMETHOD(xdma_channel_control,		axidma_channel_control),
627
628	/* xDMA SG Interface */
629	DEVMETHOD(xdma_channel_capacity,	axidma_channel_capacity),
630	DEVMETHOD(xdma_channel_prep_sg,		axidma_channel_prep_sg),
631	DEVMETHOD(xdma_channel_submit_sg,	axidma_channel_submit_sg),
632
633#ifdef FDT
634	DEVMETHOD(xdma_ofw_md_data,		axidma_ofw_md_data),
635#endif
636
637	DEVMETHOD_END
638};
639
640static driver_t axidma_driver = {
641	"axidma",
642	axidma_methods,
643	sizeof(struct axidma_softc),
644};
645
646static devclass_t axidma_devclass;
647
648EARLY_DRIVER_MODULE(axidma, simplebus, axidma_driver, axidma_devclass, 0, 0,
649    BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
650