1/* $NetBSD: tegra_apbdma.c,v 1.9 2021/01/27 03:10:19 thorpej Exp $ */
2
3/*-
4 * Copyright (c) 2017 Jared D. McNeill <jmcneill@invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: tegra_apbdma.c,v 1.9 2021/01/27 03:10:19 thorpej Exp $");
31
32#include <sys/param.h>
33#include <sys/bus.h>
34#include <sys/device.h>
35#include <sys/intr.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38
39#include <arm/nvidia/tegra_reg.h>
40#include <arm/nvidia/tegra_apbdmareg.h>
41#include <arm/nvidia/tegra_var.h>
42
43#include <dev/fdt/fdtvar.h>
44
45#define	TEGRA_APBDMA_NCHAN	32
46
47static void *	tegra_apbdma_acquire(device_t, const void *, size_t,
48				     void (*)(void *), void *);
49static void	tegra_apbdma_release(device_t, void *);
50static int	tegra_apbdma_transfer(device_t, void *,
51				      struct fdtbus_dma_req *);
52static void	tegra_apbdma_halt(device_t, void *);
53
54static const struct fdtbus_dma_controller_func tegra_apbdma_funcs = {
55	.acquire = tegra_apbdma_acquire,
56	.release = tegra_apbdma_release,
57	.transfer = tegra_apbdma_transfer,
58	.halt = tegra_apbdma_halt
59};
60
61static int	tegra_apbdma_match(device_t, cfdata_t, void *);
62static void	tegra_apbdma_attach(device_t, device_t, void *);
63
64static int	tegra_apbdma_intr(void *);
65
66struct tegra_apbdma_softc;
67
68struct tegra_apbdma_chan {
69	struct tegra_apbdma_softc *ch_sc;
70	u_int			ch_n;
71	void			*ch_ih;
72	void			(*ch_cb)(void *);
73	void			*ch_cbarg;
74	u_int			ch_req;
75};
76
77struct tegra_apbdma_softc {
78	device_t		sc_dev;
79	bus_space_tag_t		sc_bst;
80	bus_space_handle_t	sc_bsh;
81	int			sc_phandle;
82
83	struct tegra_apbdma_chan sc_chan[TEGRA_APBDMA_NCHAN];
84};
85
86CFATTACH_DECL_NEW(tegra_apbdma, sizeof(struct tegra_apbdma_softc),
87	tegra_apbdma_match, tegra_apbdma_attach, NULL, NULL);
88
89#define	APBDMA_READ(sc, reg)						\
90	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
91#define	APBDMA_WRITE(sc, reg, val)					\
92	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
93
94static const struct device_compatible_entry compat_data[] = {
95	{ .compat = "nvidia,tegra210-apbdma" },
96	{ .compat = "nvidia,tegra124-apbdma" },
97	DEVICE_COMPAT_EOL
98};
99
100static int
101tegra_apbdma_match(device_t parent, cfdata_t cf, void *aux)
102{
103	struct fdt_attach_args * const faa = aux;
104
105	return of_compatible_match(faa->faa_phandle, compat_data);
106}
107
108static void
109tegra_apbdma_attach(device_t parent, device_t self, void *aux)
110{
111	struct tegra_apbdma_softc *sc = device_private(self);
112	struct fdt_attach_args * const faa = aux;
113	const int phandle = faa->faa_phandle;
114	struct fdtbus_reset *rst;
115	struct clk *clk;
116	bus_addr_t addr;
117	bus_size_t size;
118	int error;
119	u_int n;
120
121	if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
122		aprint_error(": couldn't get registers\n");
123		return;
124	}
125
126	clk = fdtbus_clock_get_index(phandle, 0);
127	if (clk == NULL) {
128		aprint_error(": couldn't get clock\n");
129		return;
130	}
131	rst = fdtbus_reset_get(phandle, "dma");
132	if (rst == NULL) {
133		aprint_error(": couldn't get reset dma\n");
134		return;
135	}
136
137	fdtbus_reset_assert(rst);
138	error = clk_enable(clk);
139	if (error) {
140		aprint_error(": couldn't enable clock dma: %d\n", error);
141		return;
142	}
143	fdtbus_reset_deassert(rst);
144
145	sc->sc_dev = self;
146	sc->sc_bst = faa->faa_bst;
147	sc->sc_phandle = phandle;
148	error = bus_space_map(sc->sc_bst, addr, size, 0, &sc->sc_bsh);
149	if (error) {
150		aprint_error(": couldn't map %#" PRIxBUSADDR ": %d", addr, error);
151		return;
152	}
153	for (n = 0; n < TEGRA_APBDMA_NCHAN; n++) {
154		sc->sc_chan[n].ch_sc = sc;
155		sc->sc_chan[n].ch_n = n;
156	}
157
158	aprint_naive("\n");
159	aprint_normal(": APBDMA\n");
160
161	/* Stop all channels */
162	for (n = 0; n < TEGRA_APBDMA_NCHAN; n++)
163		APBDMA_WRITE(sc, APBDMACHAN_CSR_REG(n), 0);
164
165	/* Mask interrupts */
166	APBDMA_WRITE(sc, APBDMA_IRQ_MASK_REG, 0);
167
168	/* Global enable */
169	APBDMA_WRITE(sc, APBDMA_COMMAND_REG, APBDMA_COMMAND_GEN);
170
171	fdtbus_register_dma_controller(self, phandle, &tegra_apbdma_funcs);
172}
173
174static int
175tegra_apbdma_intr(void *priv)
176{
177	struct tegra_apbdma_chan *ch = priv;
178	struct tegra_apbdma_softc *sc = ch->ch_sc;
179	const u_int n = ch->ch_n;
180	uint32_t sta;
181
182	sta = APBDMA_READ(sc, APBDMACHAN_STA_REG(n));
183	APBDMA_WRITE(sc, APBDMACHAN_STA_REG(n), sta);	/* clear EOC */
184
185	ch->ch_cb(ch->ch_cbarg);
186
187	return 1;
188}
189
190static void *
191tegra_apbdma_acquire(device_t dev, const void *data, size_t len,
192    void (*cb)(void *), void *cbarg)
193{
194	struct tegra_apbdma_softc *sc = device_private(dev);
195	struct tegra_apbdma_chan *ch;
196	u_int n;
197	char intrstr[128];
198
199	if (len != 4)
200		return NULL;
201
202	const u_int req = be32dec(data);
203	if (req > __SHIFTOUT_MASK(APBDMACHAN_CSR_REQ_SEL))
204		return NULL;
205
206	for (n = 0; n < TEGRA_APBDMA_NCHAN; n++) {
207		ch = &sc->sc_chan[n];
208		if (ch->ch_ih == NULL)
209			break;
210	}
211	if (n >= TEGRA_APBDMA_NCHAN) {
212		aprint_error_dev(dev, "no free DMA channel\n");
213		return NULL;
214	}
215
216	if (!fdtbus_intr_str(sc->sc_phandle, n, intrstr, sizeof(intrstr))) {
217		aprint_error_dev(dev, "failed to decode interrupt %u\n", n);
218		return NULL;
219	}
220
221	ch->ch_ih = fdtbus_intr_establish_xname(sc->sc_phandle, n, IPL_VM,
222	    FDT_INTR_MPSAFE, tegra_apbdma_intr, ch, device_xname(dev));
223	if (ch->ch_ih == NULL) {
224		aprint_error_dev(dev, "failed to establish interrupt on %s\n",
225		    intrstr);
226		return NULL;
227	}
228	aprint_normal_dev(dev, "interrupting on %s (channel %u)\n", intrstr, n);
229
230	ch->ch_cb = cb;
231	ch->ch_cbarg = cbarg;
232	ch->ch_req = req;
233
234	/* Unmask interrupts for this channel */
235	APBDMA_WRITE(sc, APBDMA_IRQ_MASK_SET_REG, __BIT(n));
236
237	return ch;
238}
239static void
240tegra_apbdma_release(device_t dev, void *priv)
241{
242	struct tegra_apbdma_softc *sc = device_private(dev);
243	struct tegra_apbdma_chan *ch = priv;
244	const u_int n = ch->ch_n;
245
246	KASSERT(ch->ch_ih != NULL);
247
248	/* Halt the channel */
249	APBDMA_WRITE(sc, APBDMACHAN_CSR_REG(n), 0);
250
251	/* Mask interrupts for this channel */
252	APBDMA_WRITE(sc, APBDMA_IRQ_MASK_CLR_REG, __BIT(n));
253
254	fdtbus_intr_disestablish(sc->sc_phandle, ch->ch_ih);
255
256	ch->ch_cb = NULL;
257	ch->ch_cbarg = NULL;
258}
259
260static int
261tegra_apbdma_transfer(device_t dev, void *priv, struct fdtbus_dma_req *req)
262
263{
264	struct tegra_apbdma_softc *sc = device_private(dev);
265	struct tegra_apbdma_chan *ch = priv;
266	const u_int n = ch->ch_n;
267	uint32_t csr = 0;
268	uint32_t csre = 0;
269	uint32_t ahb_seq = 0;
270	uint32_t apb_seq = 0;
271
272	/* Scatter-gather not supported */
273	if (req->dreq_nsegs != 1)
274		return EINVAL;
275
276	/* Addresses must be aligned to 32-bits */
277	if ((req->dreq_segs[0].ds_addr & 3) != 0 ||
278	    (req->dreq_dev_phys & 3) != 0)
279		return EINVAL;
280
281	/* Length must be a multiple of 32-bits */
282	if ((req->dreq_segs[0].ds_len & 3) != 0)
283		return EINVAL;
284
285	csr |= __SHIFTIN(ch->ch_req, APBDMACHAN_CSR_REQ_SEL);
286
287	/*
288	 * Set DMA transfer direction.
289	 * APBDMACHAN_CSR_DIR=0 means "APB read to AHB write", and
290	 * APBDMACHAN_CSR_DIR=1 means "AHB read to APB write".
291	 */
292	if (req->dreq_dir == FDT_DMA_WRITE)
293		csr |= APBDMACHAN_CSR_DIR;
294
295	/*
296	 * Generate interrupt when DMA block transfer completes.
297	 */
298	if (req->dreq_block_irq)
299		csr |= APBDMACHAN_CSR_IE_EOC;
300
301	/*
302	 * Single or multiple block transfer
303	 */
304	if (!req->dreq_block_multi)
305		csr |= APBDMACHAN_CSR_ONCE;
306
307	/*
308	 * Flow control enable
309	 */
310	if (req->dreq_flow)
311		csr |= APBDMACHAN_CSR_FLOW;
312
313	/*
314	 * Route interrupt to CPU. 1 = CPU, 0 = COP
315	 */
316	ahb_seq |= APBDMACHAN_AHB_SEQ_INTR_ENB;
317
318	/*
319	 * AHB is a 32-bit bus.
320	 */
321	if (req->dreq_mem_opt.opt_bus_width != 32)
322		return EINVAL;
323	ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_BUS_WIDTH_32,
324			     APBDMACHAN_AHB_SEQ_BUS_WIDTH);
325
326	/*
327	 * AHB data swap.
328	 */
329	if (req->dreq_mem_opt.opt_swap)
330		ahb_seq |= APBDMACHAN_AHB_SEQ_DATA_SWAP;
331
332	/*
333	 * AHB burst size.
334	 */
335	switch (req->dreq_mem_opt.opt_burst_len) {
336	case 32:
337		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_BURST_1,
338				     APBDMACHAN_AHB_SEQ_BURST);
339		break;
340	case 128:
341		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_BURST_4,
342				     APBDMACHAN_AHB_SEQ_BURST);
343		break;
344	case 256:
345		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_BURST_8,
346				     APBDMACHAN_AHB_SEQ_BURST);
347		break;
348	default:
349		return EINVAL;
350	}
351
352	/*
353	 * 2X double buffering mode. Only supported in run-multiple mode
354	 * with no-wrap operations.
355	 */
356	if (req->dreq_mem_opt.opt_dblbuf) {
357		if (req->dreq_mem_opt.opt_wrap_len != 0)
358			return EINVAL;
359		if (!req->dreq_block_multi)
360			return EINVAL;
361		ahb_seq |= APBDMACHAN_AHB_SEQ_DBL_BUF;
362	}
363
364	/*
365	 * AHB address wrap.
366	 */
367	switch (req->dreq_mem_opt.opt_wrap_len) {
368	case 0:
369		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_NO_WRAP,
370				     APBDMACHAN_AHB_SEQ_WRAP);
371		break;
372	case 128:
373		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_32,
374				     APBDMACHAN_AHB_SEQ_WRAP);
375		break;
376	case 256:
377		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_64,
378				     APBDMACHAN_AHB_SEQ_WRAP);
379		break;
380	case 512:
381		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_128,
382				     APBDMACHAN_AHB_SEQ_WRAP);
383		break;
384	case 1024:
385		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_256,
386				     APBDMACHAN_AHB_SEQ_WRAP);
387		break;
388	case 2048:
389		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_512,
390				     APBDMACHAN_AHB_SEQ_WRAP);
391		break;
392	case 4096:
393		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_1024,
394				     APBDMACHAN_AHB_SEQ_WRAP);
395		break;
396	case 8192:
397		ahb_seq |= __SHIFTIN(APBDMACHAN_AHB_SEQ_WRAP_2048,
398				     APBDMACHAN_AHB_SEQ_WRAP);
399		break;
400	default:
401		return EINVAL;
402	}
403
404	/*
405	 * APB bus width.
406	 */
407	switch (req->dreq_dev_opt.opt_bus_width) {
408	case 8:
409		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_BUS_WIDTH_8,
410				     APBDMACHAN_APB_SEQ_BUS_WIDTH);
411		break;
412	case 16:
413		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_BUS_WIDTH_16,
414				     APBDMACHAN_APB_SEQ_BUS_WIDTH);
415		break;
416	case 32:
417		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_BUS_WIDTH_32,
418				     APBDMACHAN_APB_SEQ_BUS_WIDTH);
419		break;
420	default:
421		return EINVAL;
422	}
423
424	/*
425	 * APB data swap.
426	 */
427	if (req->dreq_dev_opt.opt_swap)
428		apb_seq |= APBDMACHAN_APB_SEQ_DATA_SWAP;
429
430	/*
431	 * APB address wrap-around window.
432	 */
433	switch (req->dreq_dev_opt.opt_wrap_len) {
434	case 0:
435		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_NO_WRAP,
436				     APBDMACHAN_APB_SEQ_WRAP);
437		break;
438	case 4:
439		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_1,
440				     APBDMACHAN_APB_SEQ_WRAP);
441		break;
442	case 8:
443		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_2,
444				     APBDMACHAN_APB_SEQ_WRAP);
445		break;
446	case 16:
447		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_4,
448				     APBDMACHAN_APB_SEQ_WRAP);
449		break;
450	case 32:
451		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_8,
452				     APBDMACHAN_APB_SEQ_WRAP);
453		break;
454	case 64:
455		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_16,
456				     APBDMACHAN_APB_SEQ_WRAP);
457		break;
458	case 128:
459		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_32,
460				     APBDMACHAN_APB_SEQ_WRAP);
461		break;
462	case 256:
463		apb_seq |= __SHIFTIN(APBDMACHAN_APB_SEQ_WRAP_64,
464				     APBDMACHAN_APB_SEQ_WRAP);
465		break;
466	default:
467		return EINVAL;
468	}
469
470	/*
471	 * Program all channel registers before setting the channel enable bit.
472	 */
473	APBDMA_WRITE(sc, APBDMACHAN_AHB_PTR_REG(n), req->dreq_segs[0].ds_addr);
474	APBDMA_WRITE(sc, APBDMACHAN_APB_PTR_REG(n), req->dreq_dev_phys);
475	APBDMA_WRITE(sc, APBDMACHAN_AHB_SEQ_REG(n), ahb_seq);
476	APBDMA_WRITE(sc, APBDMACHAN_APB_SEQ_REG(n), apb_seq);
477	APBDMA_WRITE(sc, APBDMACHAN_WCOUNT_REG(n), req->dreq_segs[0].ds_len);
478	APBDMA_WRITE(sc, APBDMACHAN_CSRE_REG(n), csre);
479	APBDMA_WRITE(sc, APBDMACHAN_CSR_REG(n), csr | APBDMACHAN_CSR_ENB);
480
481	return 0;
482}
483
484static void
485tegra_apbdma_halt(device_t dev, void *priv)
486{
487	struct tegra_apbdma_softc *sc = device_private(dev);
488	struct tegra_apbdma_chan *ch = priv;
489	const u_int n = ch->ch_n;
490	uint32_t v;
491
492	v = APBDMA_READ(sc, APBDMACHAN_CSR_REG(n));
493	v &= ~APBDMACHAN_CSR_ENB;
494	APBDMA_WRITE(sc, APBDMACHAN_CSR_REG(n), v);
495}
496