1/*-
2 * Copyright (c) 2016 Jared D. McNeill <jmcneill@invisible.ca>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28/*
29 * Allwinner DMA controller
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/rman.h>
39#include <sys/condvar.h>
40#include <sys/kernel.h>
41#include <sys/lock.h>
42#include <sys/module.h>
43#include <sys/mutex.h>
44#include <sys/endian.h>
45
46#include <machine/bus.h>
47
48#include <dev/ofw/ofw_bus.h>
49#include <dev/ofw/ofw_bus_subr.h>
50
51#include <arm/allwinner/a10_dmac.h>
52#include <dev/extres/clk/clk.h>
53#include <dev/extres/hwreset/hwreset.h>
54
55#include "sunxi_dma_if.h"
56
57#define	DMA_IRQ_EN_REG0		0x00
58#define	DMA_IRQ_EN_REG1		0x04
59#define	DMA_IRQ_EN_REG(ch)	(DMA_IRQ_EN_REG0 + ((ch) / 8) * 4)
60#define	 DMA_PKG_IRQ_EN(ch)	(1 << (((ch) % 8) * 4 + 1))
61#define	 DMA_PKG_IRQ_MASK	0x2222222222222222ULL
62#define	DMA_IRQ_PEND_REG0	0x10
63#define	DMA_IRQ_PEND_REG1	0x14
64#define	DMA_IRQ_PEND_REG(ch)	(DMA_IRQ_PEND_REG0 + ((ch) / 8) * 4)
65#define	DMA_STA_REG		0x30
66#define	DMA_EN_REG(n)		(0x100 + (n) * 0x40 + 0x00)
67#define	 DMA_EN			(1 << 0)
68#define	DMA_PAU_REG(n)		(0x100 + (n) * 0x40 + 0x04)
69#define	DMA_STAR_ADDR_REG(n)	(0x100 + (n) * 0x40 + 0x08)
70#define	DMA_CFG_REG(n)		(0x100 + (n) * 0x40 + 0x0c)
71#define	 DMA_DEST_DATA_WIDTH		(0x3 << 25)
72#define	 DMA_DEST_DATA_WIDTH_SHIFT	25
73#define	 DMA_DEST_BST_LEN		(0x3 << 22)
74#define	 DMA_DEST_BST_LEN_SHIFT		22
75#define	 DMA_DEST_ADDR_MODE		(0x1 << 21)
76#define	 DMA_DEST_ADDR_MODE_SHIFT	21
77#define	 DMA_DEST_DRQ_TYPE		(0x1f << 16)
78#define	 DMA_DEST_DRQ_TYPE_SHIFT	16
79#define	 DMA_SRC_DATA_WIDTH		(0x3 << 9)
80#define	 DMA_SRC_DATA_WIDTH_SHIFT	9
81#define	 DMA_SRC_BST_LEN		(0x3 << 6)
82#define	 DMA_SRC_BST_LEN_SHIFT		6
83#define	 DMA_SRC_ADDR_MODE		(0x1 << 5)
84#define	 DMA_SRC_ADDR_MODE_SHIFT	5
85#define	 DMA_SRC_DRQ_TYPE		(0x1f << 0)
86#define	 DMA_SRC_DRQ_TYPE_SHIFT		0
87#define	 DMA_DATA_WIDTH_8BIT		0
88#define	 DMA_DATA_WIDTH_16BIT		1
89#define	 DMA_DATA_WIDTH_32BIT		2
90#define	 DMA_DATA_WIDTH_64BIT		3
91#define	 DMA_ADDR_MODE_LINEAR		0
92#define	 DMA_ADDR_MODE_IO		1
93#define	 DMA_BST_LEN_1			0
94#define	 DMA_BST_LEN_4			1
95#define	 DMA_BST_LEN_8			2
96#define	 DMA_BST_LEN_16			3
97#define	DMA_CUR_SRC_REG(n)	(0x100 + (n) * 0x40 + 0x10)
98#define	DMA_CUR_DEST_REG(n)	(0x100 + (n) * 0x40 + 0x14)
99#define	DMA_BCNT_LEFT_REG(n)	(0x100 + (n) * 0x40 + 0x18)
100#define	DMA_PARA_REG(n)		(0x100 + (n) * 0x40 + 0x1c)
101#define	 WAIT_CYC			(0xff << 0)
102#define	 WAIT_CYC_SHIFT			0
103
104struct a31dmac_desc {
105	uint32_t		config;
106	uint32_t		srcaddr;
107	uint32_t		dstaddr;
108	uint32_t		bcnt;
109	uint32_t		para;
110	uint32_t		next;
111#define	DMA_NULL		0xfffff800
112};
113#define	DESC_ALIGN		4
114#define	DESC_SIZE		sizeof(struct a31dmac_desc)
115
116struct a31dmac_config {
117	u_int			nchans;
118};
119
120static const struct a31dmac_config a31_config = { .nchans = 16 };
121static const struct a31dmac_config h3_config = { .nchans = 12 };
122static const struct a31dmac_config a83t_config = { .nchans = 8 };
123static const struct a31dmac_config a64_config = { .nchans = 8 };
124
125static struct ofw_compat_data compat_data[] = {
126	{ "allwinner,sun6i-a31-dma",	(uintptr_t)&a31_config },
127	{ "allwinner,sun8i-a83t-dma",	(uintptr_t)&a83t_config },
128	{ "allwinner,sun8i-h3-dma",	(uintptr_t)&h3_config },
129	{ "allwinner,sun50i-a64-dma",	(uintptr_t)&a64_config },
130	{ NULL,				(uintptr_t)NULL }
131};
132
133struct a31dmac_softc;
134
135struct a31dmac_channel {
136	struct a31dmac_softc *		sc;
137	uint8_t				index;
138	void				(*callback)(void *);
139	void *				callbackarg;
140
141	bus_dmamap_t			dmamap;
142	struct a31dmac_desc		*desc;
143	bus_addr_t			physaddr;
144};
145
146struct a31dmac_softc {
147	struct resource *		res[2];
148	struct mtx			mtx;
149	void *				ih;
150
151	bus_dma_tag_t			dmat;
152
153	u_int				nchans;
154	struct a31dmac_channel *	chans;
155};
156
157static struct resource_spec a31dmac_spec[] = {
158	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
159	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
160	{ -1, 0 }
161};
162
163#define	DMA_READ(sc, reg)	bus_read_4((sc)->res[0], (reg))
164#define	DMA_WRITE(sc, reg, val)	bus_write_4((sc)->res[0], (reg), (val))
165
166static void a31dmac_intr(void *);
167static void a31dmac_dmamap_cb(void *, bus_dma_segment_t *, int, int);
168
169static int
170a31dmac_probe(device_t dev)
171{
172	if (!ofw_bus_status_okay(dev))
173		return (ENXIO);
174
175	if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
176		return (ENXIO);
177
178	device_set_desc(dev, "Allwinner DMA controller");
179	return (BUS_PROBE_DEFAULT);
180}
181
182static int
183a31dmac_attach(device_t dev)
184{
185	struct a31dmac_softc *sc;
186	struct a31dmac_config *conf;
187	u_int index;
188	hwreset_t rst;
189	clk_t clk;
190	int error;
191
192	sc = device_get_softc(dev);
193	conf = (void *)ofw_bus_search_compatible(dev, compat_data)->ocd_data;
194	clk = NULL;
195	rst = NULL;
196
197	if (bus_alloc_resources(dev, a31dmac_spec, sc->res)) {
198		device_printf(dev, "cannot allocate resources for device\n");
199		return (ENXIO);
200	}
201
202	mtx_init(&sc->mtx, "a31 dmac", NULL, MTX_SPIN);
203
204	/* Clock and reset setup */
205	if (clk_get_by_ofw_index(dev, 0, 0, &clk) != 0) {
206		device_printf(dev, "cannot get clock\n");
207		goto fail;
208	}
209	if (clk_enable(clk) != 0) {
210		device_printf(dev, "cannot enable clock\n");
211		goto fail;
212	}
213	if (hwreset_get_by_ofw_idx(dev, 0, 0, &rst) != 0) {
214		device_printf(dev, "cannot get hwreset\n");
215		goto fail;
216	}
217	if (hwreset_deassert(rst) != 0) {
218		device_printf(dev, "cannot de-assert reset\n");
219		goto fail;
220	}
221
222	/* Descriptor DMA */
223	error = bus_dma_tag_create(
224		bus_get_dma_tag(dev),		/* Parent tag */
225		DESC_ALIGN, 0,			/* alignment, boundary */
226		BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
227		BUS_SPACE_MAXADDR,		/* highaddr */
228		NULL, NULL,			/* filter, filterarg */
229		DESC_SIZE, 1,			/* maxsize, nsegs */
230		DESC_SIZE,			/* maxsegsize */
231		0,				/* flags */
232		NULL, NULL,			/* lockfunc, lockarg */
233		&sc->dmat);
234	if (error != 0) {
235		device_printf(dev, "cannot create dma tag\n");
236		goto fail;
237	}
238
239	/* Disable all interrupts and clear pending status */
240	DMA_WRITE(sc, DMA_IRQ_EN_REG0, 0);
241	DMA_WRITE(sc, DMA_IRQ_EN_REG1, 0);
242	DMA_WRITE(sc, DMA_IRQ_PEND_REG0, ~0);
243	DMA_WRITE(sc, DMA_IRQ_PEND_REG1, ~0);
244
245	/* Initialize channels */
246	sc->nchans = conf->nchans;
247	sc->chans = malloc(sizeof(*sc->chans) * sc->nchans, M_DEVBUF,
248	    M_WAITOK | M_ZERO);
249
250	for (index = 0; index < sc->nchans; index++) {
251		sc->chans[index].sc = sc;
252		sc->chans[index].index = index;
253		sc->chans[index].callback = NULL;
254		sc->chans[index].callbackarg = NULL;
255
256		error = bus_dmamem_alloc(sc->dmat,
257		    (void **)&sc->chans[index].desc,
258		    BUS_DMA_WAITOK | BUS_DMA_COHERENT,
259		    &sc->chans[index].dmamap);
260		if (error != 0) {
261			device_printf(dev, "cannot allocate dma mem\n");
262			goto fail;
263		}
264		error = bus_dmamap_load(sc->dmat, sc->chans[index].dmamap,
265		    sc->chans[index].desc, sizeof(*sc->chans[index].desc),
266		    a31dmac_dmamap_cb, &sc->chans[index], BUS_DMA_WAITOK);
267		if (error != 0) {
268			device_printf(dev, "cannot load dma map\n");
269			goto fail;
270		}
271
272		DMA_WRITE(sc, DMA_EN_REG(index), 0);
273	}
274
275	error = bus_setup_intr(dev, sc->res[1], INTR_MPSAFE | INTR_TYPE_MISC,
276	    NULL, a31dmac_intr, sc, &sc->ih);
277	if (error != 0) {
278		device_printf(dev, "could not setup interrupt handler\n");
279		bus_release_resources(dev, a31dmac_spec, sc->res);
280		mtx_destroy(&sc->mtx);
281		return (ENXIO);
282	}
283
284	OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev);
285	return (0);
286
287fail:
288	for (index = 0; index < sc->nchans; index++)
289		if (sc->chans[index].desc != NULL) {
290			bus_dmamap_unload(sc->dmat, sc->chans[index].dmamap);
291			bus_dmamem_free(sc->dmat, sc->chans[index].desc,
292			    sc->chans[index].dmamap);
293		}
294	if (sc->chans != NULL)
295		free(sc->chans, M_DEVBUF);
296	if (sc->ih != NULL)
297		bus_teardown_intr(dev, sc->res[1], sc->ih);
298	if (rst != NULL)
299		hwreset_release(rst);
300	if (clk != NULL)
301		clk_release(clk);
302	bus_release_resources(dev, a31dmac_spec, sc->res);
303
304	return (ENXIO);
305}
306
307static void
308a31dmac_dmamap_cb(void *priv, bus_dma_segment_t *segs, int nsegs, int error)
309{
310	struct a31dmac_channel *ch;
311
312	if (error != 0)
313		return;
314
315	ch = priv;
316	ch->physaddr = segs[0].ds_addr;
317}
318
319static void
320a31dmac_intr(void *priv)
321{
322	struct a31dmac_softc *sc;
323	uint32_t pend0, pend1, bit;
324	uint64_t pend, mask;
325	u_int index;
326
327	sc = priv;
328	pend0 = DMA_READ(sc, DMA_IRQ_PEND_REG0);
329	pend1 = sc->nchans > 8 ? DMA_READ(sc, DMA_IRQ_PEND_REG1) : 0;
330	if (pend0 == 0 && pend1 == 0)
331		return;
332
333	if (pend0 != 0)
334		DMA_WRITE(sc, DMA_IRQ_PEND_REG0, pend0);
335	if (pend1 != 0)
336		DMA_WRITE(sc, DMA_IRQ_PEND_REG1, pend1);
337
338	pend = pend0 | ((uint64_t)pend1 << 32);
339
340	while ((bit = ffsll(pend & DMA_PKG_IRQ_MASK)) != 0) {
341		mask = (1U << (bit - 1));
342		pend &= ~mask;
343		index = (bit - 1) / 4;
344
345		if (index >= sc->nchans)
346			continue;
347		if (sc->chans[index].callback == NULL)
348			continue;
349		sc->chans[index].callback(sc->chans[index].callbackarg);
350	}
351}
352
353static int
354a31dmac_set_config(device_t dev, void *priv, const struct sunxi_dma_config *cfg)
355{
356	struct a31dmac_channel *ch;
357	uint32_t config, para;
358	unsigned int dst_dw, dst_bl, dst_wc, dst_am;
359	unsigned int src_dw, src_bl, src_wc, src_am;
360
361	ch = priv;
362
363	switch (cfg->dst_width) {
364	case 8:
365		dst_dw = DMA_DATA_WIDTH_8BIT;
366		break;
367	case 16:
368		dst_dw = DMA_DATA_WIDTH_16BIT;
369		break;
370	case 32:
371		dst_dw = DMA_DATA_WIDTH_32BIT;
372		break;
373	case 64:
374		dst_dw = DMA_DATA_WIDTH_64BIT;
375		break;
376	default:
377		return (EINVAL);
378	}
379	switch (cfg->dst_burst_len) {
380	case 1:
381		dst_bl = DMA_BST_LEN_1;
382		break;
383	case 4:
384		dst_bl = DMA_BST_LEN_4;
385		break;
386	case 8:
387		dst_bl = DMA_BST_LEN_8;
388		break;
389	case 16:
390		dst_bl = DMA_BST_LEN_16;
391		break;
392	default:
393		return (EINVAL);
394	}
395	switch (cfg->src_width) {
396	case 8:
397		src_dw = DMA_DATA_WIDTH_8BIT;
398		break;
399	case 16:
400		src_dw = DMA_DATA_WIDTH_16BIT;
401		break;
402	case 32:
403		src_dw = DMA_DATA_WIDTH_32BIT;
404		break;
405	case 64:
406		src_dw = DMA_DATA_WIDTH_64BIT;
407	default:
408		return (EINVAL);
409	}
410	switch (cfg->src_burst_len) {
411	case 1:
412		src_bl = DMA_BST_LEN_1;
413		break;
414	case 4:
415		src_bl = DMA_BST_LEN_4;
416		break;
417	case 8:
418		src_bl = DMA_BST_LEN_8;
419		break;
420	case 16:
421		src_bl = DMA_BST_LEN_16;
422		break;
423	default:
424		return (EINVAL);
425	}
426	dst_am = cfg->dst_noincr ? DMA_ADDR_MODE_IO : DMA_ADDR_MODE_LINEAR;
427	src_am = cfg->src_noincr ? DMA_ADDR_MODE_IO : DMA_ADDR_MODE_LINEAR;
428	dst_wc = cfg->dst_wait_cyc;
429	src_wc = cfg->src_wait_cyc;
430	if (dst_wc != src_wc)
431		return (EINVAL);
432
433	config = (dst_dw << DMA_DEST_DATA_WIDTH_SHIFT) |
434		 (dst_bl << DMA_DEST_BST_LEN_SHIFT) |
435		 (dst_am << DMA_DEST_ADDR_MODE_SHIFT) |
436		 (cfg->dst_drqtype << DMA_DEST_DRQ_TYPE_SHIFT) |
437		 (src_dw << DMA_SRC_DATA_WIDTH_SHIFT) |
438		 (src_bl << DMA_SRC_BST_LEN_SHIFT) |
439		 (src_am << DMA_SRC_ADDR_MODE_SHIFT) |
440		 (cfg->src_drqtype << DMA_SRC_DRQ_TYPE_SHIFT);
441	para = (dst_wc << WAIT_CYC_SHIFT);
442
443	ch->desc->config = htole32(config);
444	ch->desc->para = htole32(para);
445
446	return (0);
447}
448
449static void *
450a31dmac_alloc(device_t dev, bool dedicated, void (*cb)(void *), void *cbarg)
451{
452	struct a31dmac_softc *sc;
453	struct a31dmac_channel *ch;
454	uint32_t irqen;
455	u_int index;
456
457	sc = device_get_softc(dev);
458	ch = NULL;
459
460	mtx_lock_spin(&sc->mtx);
461	for (index = 0; index < sc->nchans; index++) {
462		if (sc->chans[index].callback == NULL) {
463			ch = &sc->chans[index];
464			ch->callback = cb;
465			ch->callbackarg = cbarg;
466
467			irqen = DMA_READ(sc, DMA_IRQ_EN_REG(index));
468			irqen |= DMA_PKG_IRQ_EN(index);
469			DMA_WRITE(sc, DMA_IRQ_EN_REG(index), irqen);
470			break;
471		}
472	}
473	mtx_unlock_spin(&sc->mtx);
474
475	return (ch);
476}
477
478static void
479a31dmac_free(device_t dev, void *priv)
480{
481	struct a31dmac_channel *ch;
482	struct a31dmac_softc *sc;
483	uint32_t irqen;
484	u_int index;
485
486	ch = priv;
487	sc = ch->sc;
488	index = ch->index;
489
490	mtx_lock_spin(&sc->mtx);
491
492	irqen = DMA_READ(sc, DMA_IRQ_EN_REG(index));
493	irqen &= ~DMA_PKG_IRQ_EN(index);
494	DMA_WRITE(sc, DMA_IRQ_EN_REG(index), irqen);
495	DMA_WRITE(sc, DMA_IRQ_PEND_REG(index), DMA_PKG_IRQ_EN(index));
496
497	ch->callback = NULL;
498	ch->callbackarg = NULL;
499
500	mtx_unlock_spin(&sc->mtx);
501}
502
503static int
504a31dmac_transfer(device_t dev, void *priv, bus_addr_t src, bus_addr_t dst,
505    size_t nbytes)
506{
507	struct a31dmac_channel *ch;
508	struct a31dmac_softc *sc;
509
510	ch = priv;
511	sc = ch->sc;
512
513	ch->desc->srcaddr = htole32((uint32_t)src);
514	ch->desc->dstaddr = htole32((uint32_t)dst);
515	ch->desc->bcnt = htole32(nbytes);
516	ch->desc->next = htole32(DMA_NULL);
517
518	DMA_WRITE(sc, DMA_STAR_ADDR_REG(ch->index), (uint32_t)ch->physaddr);
519	DMA_WRITE(sc, DMA_EN_REG(ch->index), DMA_EN);
520
521	return (0);
522}
523
524static void
525a31dmac_halt(device_t dev, void *priv)
526{
527	struct a31dmac_channel *ch;
528	struct a31dmac_softc *sc;
529
530	ch = priv;
531	sc = ch->sc;
532
533	DMA_WRITE(sc, DMA_EN_REG(ch->index), 0);
534}
535
536static device_method_t a31dmac_methods[] = {
537	/* Device interface */
538	DEVMETHOD(device_probe,		a31dmac_probe),
539	DEVMETHOD(device_attach,	a31dmac_attach),
540
541	/* sunxi DMA interface */
542	DEVMETHOD(sunxi_dma_alloc,	a31dmac_alloc),
543	DEVMETHOD(sunxi_dma_free,	a31dmac_free),
544	DEVMETHOD(sunxi_dma_set_config,	a31dmac_set_config),
545	DEVMETHOD(sunxi_dma_transfer,	a31dmac_transfer),
546	DEVMETHOD(sunxi_dma_halt,	a31dmac_halt),
547
548	DEVMETHOD_END
549};
550
551static driver_t a31dmac_driver = {
552	"a31dmac",
553	a31dmac_methods,
554	sizeof(struct a31dmac_softc)
555};
556
557static devclass_t a31dmac_devclass;
558
559DRIVER_MODULE(a31dmac, simplebus, a31dmac_driver, a31dmac_devclass, 0, 0);
560