1/* $NetBSD: ti_edma.c,v 1.5 2022/05/21 19:07:23 andvar Exp $ */
2
3/*-
4 * Copyright (c) 2014 Jared D. McNeill <jmcneill@invisible.ca>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 *    derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
20 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
22 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: ti_edma.c,v 1.5 2022/05/21 19:07:23 andvar Exp $");
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/device.h>
34#include <sys/conf.h>
35#include <sys/intr.h>
36#include <sys/mutex.h>
37#include <sys/bus.h>
38#include <sys/bitops.h>
39
40#include <dev/fdt/fdtvar.h>
41
42#include <arm/ti/ti_prcm.h>
43#include <arm/ti/ti_edma.h>
44
45#define NUM_DMA_CHANNELS	64
46#define NUM_PARAM_SETS		256
47#define MAX_PARAM_PER_CHANNEL	32
48
49#ifdef EDMA_DEBUG
50int edmadebug = 1;
51#define DPRINTF(n,s)    do { if ((n) <= edmadebug) device_printf s; } while (0)
52#else
53#define DPRINTF(n,s)    do {} while (0)
54#endif
55
56struct edma_softc;
57
58struct edma_channel {
59	struct edma_softc *ch_sc;
60	enum edma_type ch_type;
61	uint8_t ch_index;
62	void (*ch_callback)(void *);
63	void *ch_callbackarg;
64	unsigned int ch_nparams;
65};
66
67struct edma_softc {
68	device_t sc_dev;
69	bus_space_tag_t sc_iot;
70	bus_space_handle_t sc_ioh;
71	kmutex_t sc_lock;
72	struct edma_channel sc_dma[NUM_DMA_CHANNELS];
73
74	void *sc_ih;
75
76	uint32_t sc_dmamask[NUM_DMA_CHANNELS / 32];
77	uint32_t sc_parammask[NUM_PARAM_SETS / 32];
78};
79
80static int edma_match(device_t, cfdata_t, void *);
81static void edma_attach(device_t, device_t, void *);
82
83static void edma_init(struct edma_softc *);
84static int edma_intr(void *);
85static void edma_write_param(struct edma_softc *,
86				  unsigned int, const struct edma_param *);
87static bool edma_bit_isset(uint32_t *, unsigned int);
88static void edma_bit_set(uint32_t *, unsigned int);
89static void edma_bit_clr(uint32_t *, unsigned int);
90
91CFATTACH_DECL_NEW(ti_edma, sizeof(struct edma_softc),
92    edma_match, edma_attach, NULL, NULL);
93
94#define EDMA_READ(sc, reg) \
95	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
96#define EDMA_WRITE(sc, reg, val) \
97	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
98
99static const struct device_compatible_entry compat_data[] = {
100	{ .compat = "ti,edma3-tpcc" },
101	DEVICE_COMPAT_EOL
102};
103
104static int
105edma_match(device_t parent, cfdata_t match, void *aux)
106{
107	struct fdt_attach_args * const faa = aux;
108
109	return of_compatible_match(faa->faa_phandle, compat_data);
110}
111
112static void
113edma_attach(device_t parent, device_t self, void *aux)
114{
115	struct edma_softc *sc = device_private(self);
116	struct fdt_attach_args * const faa = aux;
117	const int phandle = faa->faa_phandle;
118	char intrstr[128];
119	bus_addr_t addr;
120	bus_size_t size;
121	int idx;
122
123	if (fdtbus_get_reg(phandle, 0, &addr, &size) != 0) {
124		aprint_error(": couldn't get registers\n");
125		return;
126	}
127
128	if (!fdtbus_intr_str(phandle, 0, intrstr, sizeof(intrstr))) {
129		aprint_error(": failed to decode interrupt\n");
130		return;
131	}
132
133	sc->sc_dev = self;
134	sc->sc_iot = faa->faa_bst;
135	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_VM);
136	if (bus_space_map(sc->sc_iot, addr, size, 0, &sc->sc_ioh) != 0) {
137		aprint_error(": couldn't map registers\n");
138		return;
139	}
140
141	aprint_naive("\n");
142	aprint_normal(": EDMA Channel Controller\n");
143
144	for (idx = 0; idx < NUM_DMA_CHANNELS; idx++) {
145		struct edma_channel *ch = &sc->sc_dma[idx];
146		ch->ch_sc = sc;
147		ch->ch_type = EDMA_TYPE_DMA;
148		ch->ch_index = idx;
149		ch->ch_callback = NULL;
150		ch->ch_callbackarg = NULL;
151		ch->ch_nparams = 0;
152	}
153
154	if (ti_prcm_enable_hwmod(phandle, 0) != 0) {
155		aprint_error_dev(self, "couldn't enable module\n");
156		return;
157	}
158
159	edma_init(sc);
160
161	sc->sc_ih = fdtbus_intr_establish_byname(phandle, "edma3_ccint",
162	    IPL_VM, FDT_INTR_MPSAFE, edma_intr, sc, device_xname(self));
163	if (sc->sc_ih == NULL) {
164		aprint_error_dev(self, "failed to establish interrupt\n");
165		return;
166	}
167	aprint_normal_dev(self, "interrupting on %s\n", intrstr);
168}
169
170/*
171 * Hardware initialization
172 */
173static void
174edma_init(struct edma_softc *sc)
175{
176	struct edma_param param;
177	uint32_t val;
178	int idx;
179
180	val = EDMA_READ(sc, EDMA_CCCFG_REG);
181	if (val & EDMA_CCCFG_CHMAP_EXIST) {
182		for (idx = 0; idx < NUM_DMA_CHANNELS; idx++) {
183			EDMA_WRITE(sc, EDMA_DCHMAP_REG(idx),
184			    __SHIFTIN(0, EDMA_DCHMAP_PAENTRY));
185		}
186	}
187
188	memset(&param, 0, sizeof(param));
189	param.ep_bcnt = 1;
190	for (idx = 0; idx < NUM_PARAM_SETS; idx++) {
191		edma_write_param(sc, idx, &param);
192	}
193
194	/* reserve PaRAM entry 0 for dummy slot */
195	edma_bit_set(sc->sc_parammask, 0);
196	for (idx = 1; idx <= 32; idx++) {
197		edma_bit_set(sc->sc_parammask, idx);
198	}
199}
200
201/*
202 * Write a PaRAM entry
203 */
204static void
205edma_write_param(struct edma_softc *sc,
206    unsigned int idx, const struct edma_param *ep)
207{
208	EDMA_WRITE(sc, EDMA_PARAM_OPT_REG(idx), ep->ep_opt);
209	EDMA_WRITE(sc, EDMA_PARAM_SRC_REG(idx), ep->ep_src);
210	EDMA_WRITE(sc, EDMA_PARAM_CNT_REG(idx),
211	    __SHIFTIN(ep->ep_bcnt, EDMA_PARAM_CNT_BCNT) |
212	    __SHIFTIN(ep->ep_acnt, EDMA_PARAM_CNT_ACNT));
213	EDMA_WRITE(sc, EDMA_PARAM_DST_REG(idx), ep->ep_dst);
214	EDMA_WRITE(sc, EDMA_PARAM_BIDX_REG(idx),
215	    __SHIFTIN(ep->ep_dstbidx, EDMA_PARAM_BIDX_DSTBIDX) |
216	    __SHIFTIN(ep->ep_srcbidx, EDMA_PARAM_BIDX_SRCBIDX));
217	EDMA_WRITE(sc, EDMA_PARAM_LNK_REG(idx),
218	    __SHIFTIN(ep->ep_bcntrld, EDMA_PARAM_LNK_BCNTRLD) |
219	    __SHIFTIN(ep->ep_link, EDMA_PARAM_LNK_LINK));
220	EDMA_WRITE(sc, EDMA_PARAM_CIDX_REG(idx),
221	    __SHIFTIN(ep->ep_dstcidx, EDMA_PARAM_CIDX_DSTCIDX) |
222	    __SHIFTIN(ep->ep_srccidx, EDMA_PARAM_CIDX_SRCCIDX));
223	EDMA_WRITE(sc, EDMA_PARAM_CCNT_REG(idx),
224	    __SHIFTIN(ep->ep_ccnt, EDMA_PARAM_CCNT_CCNT));
225}
226
227static bool
228edma_bit_isset(uint32_t *bits, unsigned int bit)
229{
230	return !!(bits[bit >> 5] & (1 << (bit & 0x1f)));
231}
232
233static void
234edma_bit_set(uint32_t *bits, unsigned int bit)
235{
236	bits[bit >> 5] |= (1 << (bit & 0x1f));
237}
238
239static void
240edma_bit_clr(uint32_t *bits, unsigned int bit)
241{
242	bits[bit >> 5] &= ~(1 << (bit & 0x1f));
243}
244
245static int
246edma_intr(void *priv)
247{
248	struct edma_softc *sc = priv;
249	uint64_t ipr, ier;
250	int bit, idx;
251
252	ipr = EDMA_READ(sc, EDMA_IPR_REG);
253	ipr |= (uint64_t)EDMA_READ(sc, EDMA_IPRH_REG) << 32;
254	if (ipr == 0)
255		return 0;
256
257	ier = EDMA_READ(sc, EDMA_IER_REG);
258	ier |= (uint64_t)EDMA_READ(sc, EDMA_IERH_REG) << 32;
259
260	DPRINTF(2, (sc->sc_dev, "ipr = 0x%016llx ier 0x%016llx\n", ipr, ier));
261
262	EDMA_WRITE(sc, EDMA_ICR_REG, ipr & 0xffffffff);
263	EDMA_WRITE(sc, EDMA_ICRH_REG, ipr >> 32);
264
265	while ((bit = ffs64(ipr)) != 0) {
266		idx = bit - 1;
267		ipr &= ~__BIT(idx);
268		if (!(ier & __BIT(idx)))
269			continue;
270		if (!edma_bit_isset(sc->sc_dmamask, idx))
271			continue;
272
273		sc->sc_dma[idx].ch_callback(sc->sc_dma[idx].ch_callbackarg);
274	}
275
276	EDMA_WRITE(sc, EDMA_IEVAL_REG, EDMA_IEVAL_EVAL);
277
278	return 1;
279}
280
281/*
282 * Allocate a DMA channel. Currently only DMA types are supported, not QDMA.
283 * Returns NULL on failure.
284 */
285struct edma_channel *
286edma_channel_alloc(enum edma_type type, unsigned int drq,
287    void (*cb)(void *), void *cbarg)
288{
289	struct edma_softc *sc;
290	device_t dev;
291	struct edma_channel *ch = NULL;
292
293	KASSERT(drq < __arraycount(sc->sc_dma));
294	KASSERT(type == EDMA_TYPE_DMA);	/* QDMA not implemented */
295	KASSERT(cb != NULL);
296	KASSERT(cbarg != NULL);
297
298	dev = device_find_by_driver_unit("tiedma", 0);
299	if (dev == NULL)
300		return NULL;
301	sc = device_private(dev);
302
303	mutex_enter(&sc->sc_lock);
304	if (!edma_bit_isset(sc->sc_dmamask, drq)) {
305		ch = &sc->sc_dma[drq];
306		KASSERT(ch->ch_callback == NULL);
307		KASSERT(ch->ch_index == drq);
308		ch->ch_callback = cb;
309		ch->ch_callbackarg = cbarg;
310		edma_bit_set(sc->sc_dmamask, drq);
311	}
312
313	if (ch == NULL)
314		goto done;
315
316	EDMA_WRITE(sc, EDMA_DRAE_REG(0), sc->sc_dmamask[0]);
317	EDMA_WRITE(sc, EDMA_DRAEH_REG(0), sc->sc_dmamask[1]);
318
319	if (ch->ch_index < 32) {
320		EDMA_WRITE(sc, EDMA_ICR_REG, __BIT(ch->ch_index));
321		EDMA_WRITE(sc, EDMA_IESR_REG, __BIT(ch->ch_index));
322	} else {
323		EDMA_WRITE(sc, EDMA_ICRH_REG, __BIT(ch->ch_index - 32));
324		EDMA_WRITE(sc, EDMA_IESRH_REG, __BIT(ch->ch_index - 32));
325	}
326
327done:
328	mutex_exit(&sc->sc_lock);
329
330	return ch;
331}
332
333/*
334 * Free a DMA channel allocated with edma_channel_alloc
335 */
336void
337edma_channel_free(struct edma_channel *ch)
338{
339	struct edma_softc *sc = ch->ch_sc;
340
341	KASSERT(ch->ch_nparams == 0);
342
343	mutex_enter(&sc->sc_lock);
344	if (ch->ch_index < 32) {
345		EDMA_WRITE(sc, EDMA_IECR_REG, __BIT(ch->ch_index));
346	} else {
347		EDMA_WRITE(sc, EDMA_IECRH_REG, __BIT(ch->ch_index - 32));
348	}
349	ch->ch_callback = NULL;
350	ch->ch_callbackarg = NULL;
351	edma_bit_clr(sc->sc_dmamask, ch->ch_index);
352	mutex_exit(&sc->sc_lock);
353}
354
355/*
356 * Allocate a PaRAM entry. The driver artificially restricts the number
357 * of PaRAM entries available for each channel to MAX_PARAM_PER_CHANNEL.
358 * If the number of entries for the channel has been exceeded, or there
359 * are no entries available, 0xffff is returned.
360 */
361uint16_t
362edma_param_alloc(struct edma_channel *ch)
363{
364	struct edma_softc *sc = ch->ch_sc;
365	uint16_t param_entry = 0xffff;
366	int idx;
367
368	if (ch->ch_nparams == MAX_PARAM_PER_CHANNEL)
369		return param_entry;
370
371	mutex_enter(&sc->sc_lock);
372	for (idx = 0; idx < NUM_PARAM_SETS; idx++) {
373		if (!edma_bit_isset(sc->sc_parammask, idx)) {
374			param_entry = idx;
375			edma_bit_set(sc->sc_parammask, idx);
376			ch->ch_nparams++;
377			break;
378		}
379	}
380	mutex_exit(&sc->sc_lock);
381
382	return param_entry;
383}
384
385/*
386 * Free a PaRAM entry allocated with edma_param_alloc
387 */
388void
389edma_param_free(struct edma_channel *ch, uint16_t param_entry)
390{
391	struct edma_softc *sc = ch->ch_sc;
392
393	KASSERT(param_entry < NUM_PARAM_SETS);
394	KASSERT(ch->ch_nparams > 0);
395	KASSERT(edma_bit_isset(sc->sc_parammask, param_entry));
396
397	mutex_enter(&sc->sc_lock);
398	edma_bit_clr(sc->sc_parammask, param_entry);
399	ch->ch_nparams--;
400	mutex_exit(&sc->sc_lock);
401}
402
403/*
404 * Update a PaRAM entry register set with caller-provided values
405 */
406void
407edma_set_param(struct edma_channel *ch, uint16_t param_entry,
408    struct edma_param *ep)
409{
410	struct edma_softc *sc = ch->ch_sc;
411
412	KASSERT(param_entry < NUM_PARAM_SETS);
413	KASSERT(ch->ch_nparams > 0);
414	KASSERT(edma_bit_isset(sc->sc_parammask, param_entry));
415
416	DPRINTF(1, (sc->sc_dev, "write param entry ch# %d pe %d: 0x%08x -> 0x%08x (%u, %u, %u)\n", ch->ch_index, param_entry, ep->ep_src, ep->ep_dst, ep->ep_acnt, ep->ep_bcnt, ep->ep_ccnt));
417	edma_write_param(sc, param_entry, ep);
418}
419
420/*
421 * Enable a DMA channel: Point channel to the PaRam entry,
422 * clear error if any, and only set the Event Enable bit.
423 * The Even will either be generated by hardware, or with
424 * edma_transfer_start()
425 */
426int
427edma_transfer_enable(struct edma_channel *ch, uint16_t param_entry)
428{
429	struct edma_softc *sc = ch->ch_sc;
430	bus_size_t off = (ch->ch_index < 32 ? 0 : 4);
431	uint32_t bit = __BIT(ch->ch_index < 32 ?
432			     ch->ch_index : ch->ch_index - 32);
433
434	DPRINTF(1, (sc->sc_dev, "enable transfer ch# %d off %d bit %x pe %d\n", ch->ch_index, (int)off, bit, param_entry));
435
436	EDMA_WRITE(sc, EDMA_DCHMAP_REG(ch->ch_index),
437	    __SHIFTIN(param_entry, EDMA_DCHMAP_PAENTRY));
438
439	uint32_t ccerr = EDMA_READ(sc, EDMA_CCERR_REG);
440	if (ccerr) {
441		device_printf(sc->sc_dev, " !!! CCER %08x\n", ccerr);
442		EDMA_WRITE(sc, EDMA_CCERRCLR_REG, ccerr);
443	}
444
445	EDMA_WRITE(sc, EDMA_EESR_REG + off, bit);
446	return 0;
447}
448
449/*
450 * Software-start a DMA channel: Set the Event bit.
451 */
452int
453edma_transfer_start(struct edma_channel *ch)
454{
455	struct edma_softc *sc = ch->ch_sc;
456	bus_size_t off = (ch->ch_index < 32 ? 0 : 4);
457	uint32_t bit = __BIT(ch->ch_index < 32 ?
458			     ch->ch_index : ch->ch_index - 32);
459
460	DPRINTF(1, (sc->sc_dev, "start transfer ch# %d off %d bit %x pe %d\n", ch->ch_index, (int)off, bit));
461
462	EDMA_WRITE(sc, EDMA_ESR_REG + off, bit);
463	return 0;
464}
465
466/*
467 * Halt a DMA transfer. Called after successful transfer, or to abort
468 * a transfer.
469 */
470void
471edma_halt(struct edma_channel *ch)
472{
473	struct edma_softc *sc = ch->ch_sc;
474	bus_size_t off = (ch->ch_index < 32 ? 0 : 4);
475	uint32_t bit = __BIT(ch->ch_index < 32 ?
476			     ch->ch_index : ch->ch_index - 32);
477
478	EDMA_WRITE(sc, EDMA_EECR_REG + off, bit);
479	EDMA_WRITE(sc, EDMA_ECR_REG + off, bit);
480	EDMA_WRITE(sc, EDMA_SECR_REG + off, bit);
481	EDMA_WRITE(sc, EDMA_EMCR_REG + off, bit);
482
483	EDMA_WRITE(sc, EDMA_DCHMAP_REG(ch->ch_index),
484	    __SHIFTIN(0, EDMA_DCHMAP_PAENTRY));
485}
486
487uint8_t
488edma_channel_index(struct edma_channel *ch)
489{
490	return ch->ch_index;
491}
492
493void
494edma_dump(struct edma_channel *ch)
495{
496	static const struct {
497		const char *name;
498		uint16_t off;
499	} regs[] = {
500		{ "ER", EDMA_ER_REG },
501		{ "ERH", EDMA_ERH_REG },
502		{ "EER", EDMA_EER_REG },
503		{ "EERH", EDMA_EERH_REG },
504		{ "SER", EDMA_SER_REG },
505		{ "SERH", EDMA_SERH_REG },
506		{ "IER", EDMA_IER_REG },
507		{ "IERH", EDMA_IERH_REG },
508		{ "IPR", EDMA_IPR_REG },
509		{ "IPRH", EDMA_IPRH_REG },
510		{ "CCERR", EDMA_CCERR_REG },
511		{ "CCSTAT", EDMA_CCSTAT_REG },
512		{ "DRAE0", EDMA_DRAE_REG(0) },
513		{ "DRAEH0", EDMA_DRAEH_REG(0) },
514		{ NULL, 0 }
515	};
516	struct edma_softc *sc = ch->ch_sc;
517	int i;
518
519	for (i = 0; regs[i].name; i++) {
520		device_printf(sc->sc_dev, "%s: %08x\n",
521		    regs[i].name, EDMA_READ(sc, regs[i].off));
522	}
523	device_printf(sc->sc_dev, "DCHMAP%d: %08x\n", ch->ch_index,
524	    EDMA_READ(sc, EDMA_DCHMAP_REG(ch->ch_index)));
525}
526
527void
528edma_dump_param(struct edma_channel *ch, uint16_t param_entry)
529{
530	struct {
531		const char *name;
532		uint16_t off;
533	} regs[] = {
534		{ "OPT", EDMA_PARAM_OPT_REG(param_entry) },
535		{ "CNT", EDMA_PARAM_CNT_REG(param_entry) },
536		{ "DST", EDMA_PARAM_DST_REG(param_entry) },
537		{ "BIDX", EDMA_PARAM_BIDX_REG(param_entry) },
538		{ "LNK", EDMA_PARAM_LNK_REG(param_entry) },
539		{ "CIDX", EDMA_PARAM_CIDX_REG(param_entry) },
540		{ "CCNT", EDMA_PARAM_CCNT_REG(param_entry) },
541		{ NULL, 0 }
542	};
543	struct edma_softc *sc = ch->ch_sc;
544	int i;
545
546	for (i = 0; regs[i].name; i++) {
547		device_printf(sc->sc_dev, "%s%d: %08x\n",
548		    regs[i].name, param_entry, EDMA_READ(sc, regs[i].off));
549	}
550}
551