1/*-
2 * Copyright (c) 2014-2016 Jared D. McNeill <jmcneill@invisible.ca>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28/*
29 * Allwinner A10/A20 DMA controller
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/arm/allwinner/a10_dmac.c 309759 2016-12-09 20:21:48Z manu $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/rman.h>
39#include <sys/condvar.h>
40#include <sys/kernel.h>
41#include <sys/module.h>
42
43#include <machine/bus.h>
44
45#include <dev/ofw/ofw_bus.h>
46#include <dev/ofw/ofw_bus_subr.h>
47
48#include <arm/allwinner/a10_dmac.h>
49#include <dev/extres/clk/clk.h>
50
51#include "sunxi_dma_if.h"
52
53#define	NDMA_CHANNELS	8
54#define	DDMA_CHANNELS	8
55
56enum a10dmac_type {
57	CH_NDMA,
58	CH_DDMA
59};
60
61struct a10dmac_softc;
62
63struct a10dmac_channel {
64	struct a10dmac_softc *	ch_sc;
65	uint8_t			ch_index;
66	enum a10dmac_type	ch_type;
67	void			(*ch_callback)(void *);
68	void *			ch_callbackarg;
69	uint32_t		ch_regoff;
70};
71
72struct a10dmac_softc {
73	struct resource *	sc_res[2];
74	struct mtx		sc_mtx;
75	void *			sc_ih;
76
77	struct a10dmac_channel	sc_ndma_channels[NDMA_CHANNELS];
78	struct a10dmac_channel	sc_ddma_channels[DDMA_CHANNELS];
79};
80
81static struct resource_spec a10dmac_spec[] = {
82	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
83	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
84	{ -1, 0 }
85};
86
87#define	DMA_READ(sc, reg)	bus_read_4((sc)->sc_res[0], (reg))
88#define	DMA_WRITE(sc, reg, val)	bus_write_4((sc)->sc_res[0], (reg), (val))
89#define	DMACH_READ(ch, reg)		\
90    DMA_READ((ch)->ch_sc, (reg) + (ch)->ch_regoff)
91#define	DMACH_WRITE(ch, reg, val)	\
92    DMA_WRITE((ch)->ch_sc, (reg) + (ch)->ch_regoff, (val))
93
94static void a10dmac_intr(void *);
95
96static int
97a10dmac_probe(device_t dev)
98{
99	if (!ofw_bus_status_okay(dev))
100		return (ENXIO);
101
102	if (!ofw_bus_is_compatible(dev, "allwinner,sun4i-a10-dma"))
103		return (ENXIO);
104
105	device_set_desc(dev, "Allwinner DMA controller");
106	return (BUS_PROBE_DEFAULT);
107}
108
109static int
110a10dmac_attach(device_t dev)
111{
112	struct a10dmac_softc *sc;
113	unsigned int index;
114	clk_t clk;
115	int error;
116
117	sc = device_get_softc(dev);
118
119	if (bus_alloc_resources(dev, a10dmac_spec, sc->sc_res)) {
120		device_printf(dev, "cannot allocate resources for device\n");
121		return (ENXIO);
122	}
123
124	mtx_init(&sc->sc_mtx, "a10 dmac", NULL, MTX_SPIN);
125
126	/* Activate DMA controller clock */
127	error = clk_get_by_ofw_index(dev, 0, 0, &clk);
128	if (error != 0) {
129		device_printf(dev, "cannot get clock\n");
130		return (error);
131	}
132	error = clk_enable(clk);
133	if (error != 0) {
134		device_printf(dev, "cannot enable clock\n");
135		return (error);
136	}
137
138	/* Disable all interrupts and clear pending status */
139	DMA_WRITE(sc, AWIN_DMA_IRQ_EN_REG, 0);
140	DMA_WRITE(sc, AWIN_DMA_IRQ_PEND_STA_REG, ~0);
141
142	/* Initialize channels */
143	for (index = 0; index < NDMA_CHANNELS; index++) {
144		sc->sc_ndma_channels[index].ch_sc = sc;
145		sc->sc_ndma_channels[index].ch_index = index;
146		sc->sc_ndma_channels[index].ch_type = CH_NDMA;
147		sc->sc_ndma_channels[index].ch_callback = NULL;
148		sc->sc_ndma_channels[index].ch_callbackarg = NULL;
149		sc->sc_ndma_channels[index].ch_regoff = AWIN_NDMA_REG(index);
150		DMACH_WRITE(&sc->sc_ndma_channels[index], AWIN_NDMA_CTL_REG, 0);
151	}
152	for (index = 0; index < DDMA_CHANNELS; index++) {
153		sc->sc_ddma_channels[index].ch_sc = sc;
154		sc->sc_ddma_channels[index].ch_index = index;
155		sc->sc_ddma_channels[index].ch_type = CH_DDMA;
156		sc->sc_ddma_channels[index].ch_callback = NULL;
157		sc->sc_ddma_channels[index].ch_callbackarg = NULL;
158		sc->sc_ddma_channels[index].ch_regoff = AWIN_DDMA_REG(index);
159		DMACH_WRITE(&sc->sc_ddma_channels[index], AWIN_DDMA_CTL_REG, 0);
160	}
161
162	error = bus_setup_intr(dev, sc->sc_res[1], INTR_MPSAFE | INTR_TYPE_MISC,
163	    NULL, a10dmac_intr, sc, &sc->sc_ih);
164	if (error != 0) {
165		device_printf(dev, "could not setup interrupt handler\n");
166		bus_release_resources(dev, a10dmac_spec, sc->sc_res);
167		mtx_destroy(&sc->sc_mtx);
168		return (ENXIO);
169	}
170
171	OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev);
172	return (0);
173}
174
175static void
176a10dmac_intr(void *priv)
177{
178	struct a10dmac_softc *sc = priv;
179	uint32_t sta, bit, mask;
180	uint8_t index;
181
182	sta = DMA_READ(sc, AWIN_DMA_IRQ_PEND_STA_REG);
183	DMA_WRITE(sc, AWIN_DMA_IRQ_PEND_STA_REG, sta);
184
185	while ((bit = ffs(sta & AWIN_DMA_IRQ_END_MASK)) != 0) {
186		mask = (1U << (bit - 1));
187		sta &= ~mask;
188		/*
189		 * Map status bit to channel number. The status register is
190		 * encoded with two bits of status per channel (lowest bit
191		 * is half transfer pending, highest bit is end transfer
192		 * pending). The 8 normal DMA channel status are in the lower
193		 * 16 bits and the 8 dedicated DMA channel status are in
194		 * the upper 16 bits. The output is a channel number from 0-7.
195		 */
196		index = ((bit - 1) / 2) & 7;
197		if (mask & AWIN_DMA_IRQ_NDMA) {
198			if (sc->sc_ndma_channels[index].ch_callback == NULL)
199				continue;
200			sc->sc_ndma_channels[index].ch_callback(
201			    sc->sc_ndma_channels[index].ch_callbackarg);
202		} else {
203			if (sc->sc_ddma_channels[index].ch_callback == NULL)
204				continue;
205			sc->sc_ddma_channels[index].ch_callback(
206			    sc->sc_ddma_channels[index].ch_callbackarg);
207		}
208	}
209}
210
211static uint32_t
212a10dmac_read_ctl(struct a10dmac_channel *ch)
213{
214	if (ch->ch_type == CH_NDMA) {
215		return (DMACH_READ(ch, AWIN_NDMA_CTL_REG));
216	} else {
217		return (DMACH_READ(ch, AWIN_DDMA_CTL_REG));
218	}
219}
220
221static void
222a10dmac_write_ctl(struct a10dmac_channel *ch, uint32_t val)
223{
224	if (ch->ch_type == CH_NDMA) {
225		DMACH_WRITE(ch, AWIN_NDMA_CTL_REG, val);
226	} else {
227		DMACH_WRITE(ch, AWIN_DDMA_CTL_REG, val);
228	}
229}
230
231static int
232a10dmac_set_config(device_t dev, void *priv, const struct sunxi_dma_config *cfg)
233{
234	struct a10dmac_channel *ch = priv;
235	uint32_t val;
236	unsigned int dst_dw, dst_bl, dst_bs, dst_wc, dst_am;
237	unsigned int src_dw, src_bl, src_bs, src_wc, src_am;
238
239	switch (cfg->dst_width) {
240	case 8:
241		dst_dw = AWIN_DMA_CTL_DATA_WIDTH_8;
242		break;
243	case 16:
244		dst_dw = AWIN_DMA_CTL_DATA_WIDTH_16;
245		break;
246	case 32:
247		dst_dw = AWIN_DMA_CTL_DATA_WIDTH_32;
248		break;
249	default:
250		return (EINVAL);
251	}
252	switch (cfg->dst_burst_len) {
253	case 1:
254		dst_bl = AWIN_DMA_CTL_BURST_LEN_1;
255		break;
256	case 4:
257		dst_bl = AWIN_DMA_CTL_BURST_LEN_4;
258		break;
259	case 8:
260		dst_bl = AWIN_DMA_CTL_BURST_LEN_8;
261		break;
262	default:
263		return (EINVAL);
264	}
265	switch (cfg->src_width) {
266	case 8:
267		src_dw = AWIN_DMA_CTL_DATA_WIDTH_8;
268		break;
269	case 16:
270		src_dw = AWIN_DMA_CTL_DATA_WIDTH_16;
271		break;
272	case 32:
273		src_dw = AWIN_DMA_CTL_DATA_WIDTH_32;
274		break;
275	default:
276		return (EINVAL);
277	}
278	switch (cfg->src_burst_len) {
279	case 1:
280		src_bl = AWIN_DMA_CTL_BURST_LEN_1;
281		break;
282	case 4:
283		src_bl = AWIN_DMA_CTL_BURST_LEN_4;
284		break;
285	case 8:
286		src_bl = AWIN_DMA_CTL_BURST_LEN_8;
287		break;
288	default:
289		return (EINVAL);
290	}
291
292	val = (dst_dw << AWIN_DMA_CTL_DST_DATA_WIDTH_SHIFT) |
293	      (dst_bl << AWIN_DMA_CTL_DST_BURST_LEN_SHIFT) |
294	      (cfg->dst_drqtype << AWIN_DMA_CTL_DST_DRQ_TYPE_SHIFT) |
295	      (src_dw << AWIN_DMA_CTL_SRC_DATA_WIDTH_SHIFT) |
296	      (src_bl << AWIN_DMA_CTL_SRC_BURST_LEN_SHIFT) |
297	      (cfg->src_drqtype << AWIN_DMA_CTL_SRC_DRQ_TYPE_SHIFT);
298
299	if (ch->ch_type == CH_NDMA) {
300		if (cfg->dst_noincr)
301			val |= AWIN_NDMA_CTL_DST_ADDR_NOINCR;
302		if (cfg->src_noincr)
303			val |= AWIN_NDMA_CTL_SRC_ADDR_NOINCR;
304
305		DMACH_WRITE(ch, AWIN_NDMA_CTL_REG, val);
306	} else {
307		dst_am = cfg->dst_noincr ? AWIN_DDMA_CTL_DMA_ADDR_IO :
308		    AWIN_DDMA_CTL_DMA_ADDR_LINEAR;
309		src_am = cfg->src_noincr ? AWIN_DDMA_CTL_DMA_ADDR_IO :
310		    AWIN_DDMA_CTL_DMA_ADDR_LINEAR;
311
312		val |= (dst_am << AWIN_DDMA_CTL_DST_ADDR_MODE_SHIFT);
313		val |= (src_am << AWIN_DDMA_CTL_SRC_ADDR_MODE_SHIFT);
314
315		DMACH_WRITE(ch, AWIN_DDMA_CTL_REG, val);
316
317		dst_bs = cfg->dst_blksize - 1;
318		dst_wc = cfg->dst_wait_cyc - 1;
319		src_bs = cfg->src_blksize - 1;
320		src_wc = cfg->src_wait_cyc - 1;
321
322		DMACH_WRITE(ch, AWIN_DDMA_PARA_REG,
323		    (dst_bs << AWIN_DDMA_PARA_DST_DATA_BLK_SIZ_SHIFT) |
324		    (dst_wc << AWIN_DDMA_PARA_DST_WAIT_CYC_SHIFT) |
325		    (src_bs << AWIN_DDMA_PARA_SRC_DATA_BLK_SIZ_SHIFT) |
326		    (src_wc << AWIN_DDMA_PARA_SRC_WAIT_CYC_SHIFT));
327	}
328
329	return (0);
330}
331
332static void *
333a10dmac_alloc(device_t dev, bool dedicated, void (*cb)(void *), void *cbarg)
334{
335	struct a10dmac_softc *sc = device_get_softc(dev);
336	struct a10dmac_channel *ch_list;
337	struct a10dmac_channel *ch = NULL;
338	uint32_t irqen;
339	uint8_t ch_count, index;
340
341	if (dedicated) {
342		ch_list = sc->sc_ddma_channels;
343		ch_count = DDMA_CHANNELS;
344	} else {
345		ch_list = sc->sc_ndma_channels;
346		ch_count = NDMA_CHANNELS;
347	}
348
349	mtx_lock_spin(&sc->sc_mtx);
350	for (index = 0; index < ch_count; index++) {
351		if (ch_list[index].ch_callback == NULL) {
352			ch = &ch_list[index];
353			ch->ch_callback = cb;
354			ch->ch_callbackarg = cbarg;
355
356			irqen = DMA_READ(sc, AWIN_DMA_IRQ_EN_REG);
357			if (ch->ch_type == CH_NDMA)
358				irqen |= AWIN_DMA_IRQ_NDMA_END(index);
359			else
360				irqen |= AWIN_DMA_IRQ_DDMA_END(index);
361			DMA_WRITE(sc, AWIN_DMA_IRQ_EN_REG, irqen);
362
363			break;
364		}
365	}
366	mtx_unlock_spin(&sc->sc_mtx);
367
368	return (ch);
369}
370
371static void
372a10dmac_free(device_t dev, void *priv)
373{
374	struct a10dmac_channel *ch = priv;
375	struct a10dmac_softc *sc = ch->ch_sc;
376	uint32_t irqen, sta, cfg;
377
378	mtx_lock_spin(&sc->sc_mtx);
379
380	irqen = DMA_READ(sc, AWIN_DMA_IRQ_EN_REG);
381	cfg = a10dmac_read_ctl(ch);
382	if (ch->ch_type == CH_NDMA) {
383		sta = AWIN_DMA_IRQ_NDMA_END(ch->ch_index);
384		cfg &= ~AWIN_NDMA_CTL_DMA_LOADING;
385	} else {
386		sta = AWIN_DMA_IRQ_DDMA_END(ch->ch_index);
387		cfg &= ~AWIN_DDMA_CTL_DMA_LOADING;
388	}
389	irqen &= ~sta;
390	a10dmac_write_ctl(ch, cfg);
391	DMA_WRITE(sc, AWIN_DMA_IRQ_EN_REG, irqen);
392	DMA_WRITE(sc, AWIN_DMA_IRQ_PEND_STA_REG, sta);
393
394	ch->ch_callback = NULL;
395	ch->ch_callbackarg = NULL;
396
397	mtx_unlock_spin(&sc->sc_mtx);
398}
399
400static int
401a10dmac_transfer(device_t dev, void *priv, bus_addr_t src, bus_addr_t dst,
402    size_t nbytes)
403{
404	struct a10dmac_channel *ch = priv;
405	uint32_t cfg;
406
407	cfg = a10dmac_read_ctl(ch);
408	if (ch->ch_type == CH_NDMA) {
409		if (cfg & AWIN_NDMA_CTL_DMA_LOADING)
410			return (EBUSY);
411
412		DMACH_WRITE(ch, AWIN_NDMA_SRC_ADDR_REG, src);
413		DMACH_WRITE(ch, AWIN_NDMA_DEST_ADDR_REG, dst);
414		DMACH_WRITE(ch, AWIN_NDMA_BC_REG, nbytes);
415
416		cfg |= AWIN_NDMA_CTL_DMA_LOADING;
417		a10dmac_write_ctl(ch, cfg);
418	} else {
419		if (cfg & AWIN_DDMA_CTL_DMA_LOADING)
420			return (EBUSY);
421
422		DMACH_WRITE(ch, AWIN_DDMA_SRC_START_ADDR_REG, src);
423		DMACH_WRITE(ch, AWIN_DDMA_DEST_START_ADDR_REG, dst);
424		DMACH_WRITE(ch, AWIN_DDMA_BC_REG, nbytes);
425
426		cfg |= AWIN_DDMA_CTL_DMA_LOADING;
427		a10dmac_write_ctl(ch, cfg);
428	}
429
430	return (0);
431}
432
433static void
434a10dmac_halt(device_t dev, void *priv)
435{
436	struct a10dmac_channel *ch = priv;
437	uint32_t cfg;
438
439	cfg = a10dmac_read_ctl(ch);
440	if (ch->ch_type == CH_NDMA) {
441		cfg &= ~AWIN_NDMA_CTL_DMA_LOADING;
442	} else {
443		cfg &= ~AWIN_DDMA_CTL_DMA_LOADING;
444	}
445	a10dmac_write_ctl(ch, cfg);
446}
447
448static device_method_t a10dmac_methods[] = {
449	/* Device interface */
450	DEVMETHOD(device_probe,		a10dmac_probe),
451	DEVMETHOD(device_attach,	a10dmac_attach),
452
453	/* sunxi DMA interface */
454	DEVMETHOD(sunxi_dma_alloc,	a10dmac_alloc),
455	DEVMETHOD(sunxi_dma_free,	a10dmac_free),
456	DEVMETHOD(sunxi_dma_set_config,	a10dmac_set_config),
457	DEVMETHOD(sunxi_dma_transfer,	a10dmac_transfer),
458	DEVMETHOD(sunxi_dma_halt,	a10dmac_halt),
459
460	DEVMETHOD_END
461};
462
463static driver_t a10dmac_driver = {
464	"a10dmac",
465	a10dmac_methods,
466	sizeof(struct a10dmac_softc)
467};
468
469static devclass_t a10dmac_devclass;
470
471DRIVER_MODULE(a10dmac, simplebus, a10dmac_driver, a10dmac_devclass, 0, 0);
472