1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Enhanced Direct Memory Access (EDMA3) Controller
4 *
5 * (C) Copyright 2014
6 *     Texas Instruments Incorporated, <www.ti.com>
7 *
8 * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
9 */
10
11#include <asm/cache.h>
12#include <asm/io.h>
13#include <common.h>
14#include <dm.h>
15#include <dma-uclass.h>
16#include <linux/dma-mapping.h>
17#include <asm/omap_common.h>
18#include <asm/ti-common/ti-edma3.h>
19#include <linux/printk.h>
20
21#define EDMA3_SL_BASE(slot)			(0x4000 + ((slot) << 5))
22#define EDMA3_SL_MAX_NUM			512
23#define EDMA3_SLOPT_FIFO_WIDTH_MASK		(0x7 << 8)
24
25#define EDMA3_QCHMAP(ch)			0x0200 + ((ch) << 2)
26#define EDMA3_CHMAP_PARSET_MASK			0x1ff
27#define EDMA3_CHMAP_PARSET_SHIFT		0x5
28#define EDMA3_CHMAP_TRIGWORD_SHIFT		0x2
29
30#define EDMA3_QEMCR				0x314
31#define EDMA3_IPR				0x1068
32#define EDMA3_IPRH				0x106c
33#define EDMA3_ICR				0x1070
34#define EDMA3_ICRH				0x1074
35#define EDMA3_QEECR				0x1088
36#define EDMA3_QEESR				0x108c
37#define EDMA3_QSECR				0x1094
38
39#define EDMA_FILL_BUFFER_SIZE			512
40
41struct ti_edma3_priv {
42	u32 base;
43};
44
45static u8 edma_fill_buffer[EDMA_FILL_BUFFER_SIZE] __aligned(ARCH_DMA_MINALIGN);
46
47/**
48 * qedma3_start - start qdma on a channel
49 * @base: base address of edma
50 * @cfg: pinter to struct edma3_channel_config where you can set
51 * the slot number to associate with, the chnum, which corresponds
52 * your quick channel number 0-7, complete code - transfer complete code
53 * and trigger slot word - which has to correspond to the word number in
54 * edma3_slot_layout struct for generating event.
55 *
56 */
57void qedma3_start(u32 base, struct edma3_channel_config *cfg)
58{
59	u32 qchmap;
60
61	/* Clear the pending int bit */
62	if (cfg->complete_code < 32)
63		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICR);
64	else
65		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH);
66
67	/* Map parameter set and trigger word 7 to quick channel */
68	qchmap = ((EDMA3_CHMAP_PARSET_MASK & cfg->slot)
69		  << EDMA3_CHMAP_PARSET_SHIFT) |
70		  (cfg->trigger_slot_word << EDMA3_CHMAP_TRIGWORD_SHIFT);
71
72	__raw_writel(qchmap, base + EDMA3_QCHMAP(cfg->chnum));
73
74	/* Clear missed event if set*/
75	__raw_writel(1 << cfg->chnum, base + EDMA3_QSECR);
76	__raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR);
77
78	/* Enable qdma channel event */
79	__raw_writel(1 << cfg->chnum, base + EDMA3_QEESR);
80}
81
82/**
83 * edma3_set_dest - set initial DMA destination address in parameter RAM slot
84 * @base: base address of edma
85 * @slot: parameter RAM slot being configured
86 * @dst: physical address of destination (memory, controller FIFO, etc)
87 * @addressMode: INCR, except in very rare cases
88 * @width: ignored unless @addressMode is FIFO, else specifies the
89 *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
90 *
91 * Note that the destination address is modified during the DMA transfer
92 * according to edma3_set_dest_index().
93 */
94void edma3_set_dest(u32 base, int slot, u32 dst, enum edma3_address_mode mode,
95		    enum edma3_fifo_width width)
96{
97	u32 opt;
98	struct edma3_slot_layout *rg;
99
100	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
101
102	opt = __raw_readl(&rg->opt);
103	if (mode == FIFO)
104		opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) |
105		       (EDMA3_SLOPT_DST_ADDR_CONST_MODE |
106			EDMA3_SLOPT_FIFO_WIDTH_SET(width));
107	else
108		opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE;
109
110	__raw_writel(opt, &rg->opt);
111	__raw_writel(dst, &rg->dst);
112}
113
114/**
115 * edma3_set_dest_index - configure DMA destination address indexing
116 * @base: base address of edma
117 * @slot: parameter RAM slot being configured
118 * @bidx: byte offset between destination arrays in a frame
119 * @cidx: byte offset between destination frames in a block
120 *
121 * Offsets are specified to support either contiguous or discontiguous
122 * memory transfers, or repeated access to a hardware register, as needed.
123 * When accessing hardware registers, both offsets are normally zero.
124 */
125void edma3_set_dest_index(u32 base, unsigned slot, int bidx, int cidx)
126{
127	u32 src_dst_bidx;
128	u32 src_dst_cidx;
129	struct edma3_slot_layout *rg;
130
131	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
132
133	src_dst_bidx = __raw_readl(&rg->src_dst_bidx);
134	src_dst_cidx = __raw_readl(&rg->src_dst_cidx);
135
136	__raw_writel((src_dst_bidx & 0x0000ffff) | (bidx << 16),
137		     &rg->src_dst_bidx);
138	__raw_writel((src_dst_cidx & 0x0000ffff) | (cidx << 16),
139		     &rg->src_dst_cidx);
140}
141
142/**
143 * edma3_set_dest_addr - set destination address for slot only
144 */
145void edma3_set_dest_addr(u32 base, int slot, u32 dst)
146{
147	struct edma3_slot_layout *rg;
148
149	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
150	__raw_writel(dst, &rg->dst);
151}
152
153/**
154 * edma3_set_src - set initial DMA source address in parameter RAM slot
155 * @base: base address of edma
156 * @slot: parameter RAM slot being configured
157 * @src_port: physical address of source (memory, controller FIFO, etc)
158 * @mode: INCR, except in very rare cases
159 * @width: ignored unless @addressMode is FIFO, else specifies the
160 *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
161 *
162 * Note that the source address is modified during the DMA transfer
163 * according to edma3_set_src_index().
164 */
165void edma3_set_src(u32 base, int slot, u32 src, enum edma3_address_mode mode,
166		   enum edma3_fifo_width width)
167{
168	u32 opt;
169	struct edma3_slot_layout *rg;
170
171	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
172
173	opt = __raw_readl(&rg->opt);
174	if (mode == FIFO)
175		opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) |
176		       (EDMA3_SLOPT_DST_ADDR_CONST_MODE |
177			EDMA3_SLOPT_FIFO_WIDTH_SET(width));
178	else
179		opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE;
180
181	__raw_writel(opt, &rg->opt);
182	__raw_writel(src, &rg->src);
183}
184
185/**
186 * edma3_set_src_index - configure DMA source address indexing
187 * @base: base address of edma
188 * @slot: parameter RAM slot being configured
189 * @bidx: byte offset between source arrays in a frame
190 * @cidx: byte offset between source frames in a block
191 *
192 * Offsets are specified to support either contiguous or discontiguous
193 * memory transfers, or repeated access to a hardware register, as needed.
194 * When accessing hardware registers, both offsets are normally zero.
195 */
196void edma3_set_src_index(u32 base, unsigned slot, int bidx, int cidx)
197{
198	u32 src_dst_bidx;
199	u32 src_dst_cidx;
200	struct edma3_slot_layout *rg;
201
202	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
203
204	src_dst_bidx = __raw_readl(&rg->src_dst_bidx);
205	src_dst_cidx = __raw_readl(&rg->src_dst_cidx);
206
207	__raw_writel((src_dst_bidx & 0xffff0000) | bidx,
208		     &rg->src_dst_bidx);
209	__raw_writel((src_dst_cidx & 0xffff0000) | cidx,
210		     &rg->src_dst_cidx);
211}
212
213/**
214 * edma3_set_src_addr - set source address for slot only
215 */
216void edma3_set_src_addr(u32 base, int slot, u32 src)
217{
218	struct edma3_slot_layout *rg;
219
220	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
221	__raw_writel(src, &rg->src);
222}
223
224/**
225 * edma3_set_transfer_params - configure DMA transfer parameters
226 * @base: base address of edma
227 * @slot: parameter RAM slot being configured
228 * @acnt: how many bytes per array (at least one)
229 * @bcnt: how many arrays per frame (at least one)
230 * @ccnt: how many frames per block (at least one)
231 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
232 *	the value to reload into bcnt when it decrements to zero
233 * @sync_mode: ASYNC or ABSYNC
234 *
235 * See the EDMA3 documentation to understand how to configure and link
236 * transfers using the fields in PaRAM slots.  If you are not doing it
237 * all at once with edma3_write_slot(), you will use this routine
238 * plus two calls each for source and destination, setting the initial
239 * address and saying how to index that address.
240 *
241 * An example of an A-Synchronized transfer is a serial link using a
242 * single word shift register.  In that case, @acnt would be equal to
243 * that word size; the serial controller issues a DMA synchronization
244 * event to transfer each word, and memory access by the DMA transfer
245 * controller will be word-at-a-time.
246 *
247 * An example of an AB-Synchronized transfer is a device using a FIFO.
248 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
249 * The controller with the FIFO issues DMA synchronization events when
250 * the FIFO threshold is reached, and the DMA transfer controller will
251 * transfer one frame to (or from) the FIFO.  It will probably use
252 * efficient burst modes to access memory.
253 */
254void edma3_set_transfer_params(u32 base, int slot, int acnt,
255			       int bcnt, int ccnt, u16 bcnt_rld,
256			       enum edma3_sync_dimension sync_mode)
257{
258	u32 opt;
259	u32 link_bcntrld;
260	struct edma3_slot_layout *rg;
261
262	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
263
264	link_bcntrld = __raw_readl(&rg->link_bcntrld);
265
266	__raw_writel((bcnt_rld << 16) | (0x0000ffff & link_bcntrld),
267		     &rg->link_bcntrld);
268
269	opt = __raw_readl(&rg->opt);
270	if (sync_mode == ASYNC)
271		__raw_writel(opt & ~EDMA3_SLOPT_AB_SYNC, &rg->opt);
272	else
273		__raw_writel(opt | EDMA3_SLOPT_AB_SYNC, &rg->opt);
274
275	/* Set the acount, bcount, ccount registers */
276	__raw_writel((bcnt << 16) | (acnt & 0xffff), &rg->a_b_cnt);
277	__raw_writel(0xffff & ccnt, &rg->ccnt);
278}
279
280/**
281 * edma3_write_slot - write parameter RAM data for slot
282 * @base: base address of edma
283 * @slot: number of parameter RAM slot being modified
284 * @param: data to be written into parameter RAM slot
285 *
286 * Use this to assign all parameters of a transfer at once.  This
287 * allows more efficient setup of transfers than issuing multiple
288 * calls to set up those parameters in small pieces, and provides
289 * complete control over all transfer options.
290 */
291void edma3_write_slot(u32 base, int slot, struct edma3_slot_layout *param)
292{
293	int i;
294	u32 *p = (u32 *)param;
295	u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot));
296
297	for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4)
298		__raw_writel(*p++, addr++);
299}
300
301/**
302 * edma3_read_slot - read parameter RAM data from slot
303 * @base: base address of edma
304 * @slot: number of parameter RAM slot being copied
305 * @param: where to store copy of parameter RAM data
306 *
307 * Use this to read data from a parameter RAM slot, perhaps to
308 * save them as a template for later reuse.
309 */
310void edma3_read_slot(u32 base, int slot, struct edma3_slot_layout *param)
311{
312	int i;
313	u32 *p = (u32 *)param;
314	u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot));
315
316	for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4)
317		*p++ = __raw_readl(addr++);
318}
319
320void edma3_slot_configure(u32 base, int slot, struct edma3_slot_config *cfg)
321{
322	struct edma3_slot_layout *rg;
323
324	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
325
326	__raw_writel(cfg->opt, &rg->opt);
327	__raw_writel(cfg->src, &rg->src);
328	__raw_writel((cfg->bcnt << 16) | (cfg->acnt & 0xffff), &rg->a_b_cnt);
329	__raw_writel(cfg->dst, &rg->dst);
330	__raw_writel((cfg->dst_bidx << 16) |
331		     (cfg->src_bidx & 0xffff), &rg->src_dst_bidx);
332	__raw_writel((cfg->bcntrld << 16) |
333		     (cfg->link & 0xffff), &rg->link_bcntrld);
334	__raw_writel((cfg->dst_cidx << 16) |
335		     (cfg->src_cidx & 0xffff), &rg->src_dst_cidx);
336	__raw_writel(0xffff & cfg->ccnt, &rg->ccnt);
337}
338
339/**
340 * edma3_check_for_transfer - check if transfer coplete by checking
341 * interrupt pending bit. Clear interrupt pending bit if complete.
342 * @base: base address of edma
343 * @cfg: pinter to struct edma3_channel_config which was passed
344 * to qedma3_start when you started qdma channel
345 *
346 * Return 0 if complete, 1 if not.
347 */
348int edma3_check_for_transfer(u32 base, struct edma3_channel_config *cfg)
349{
350	u32 inum;
351	u32 ipr_base;
352	u32 icr_base;
353
354	if (cfg->complete_code < 32) {
355		ipr_base = base + EDMA3_IPR;
356		icr_base = base + EDMA3_ICR;
357		inum = 1 << cfg->complete_code;
358	} else {
359		ipr_base = base + EDMA3_IPRH;
360		icr_base = base + EDMA3_ICRH;
361		inum = 1 << (cfg->complete_code - 32);
362	}
363
364	/* check complete interrupt */
365	if (!(__raw_readl(ipr_base) & inum))
366		return 1;
367
368	/* clean up the pending int bit */
369	__raw_writel(inum, icr_base);
370
371	return 0;
372}
373
374/**
375 * qedma3_stop - stops dma on the channel passed
376 * @base: base address of edma
377 * @cfg: pinter to struct edma3_channel_config which was passed
378 * to qedma3_start when you started qdma channel
379 */
380void qedma3_stop(u32 base, struct edma3_channel_config *cfg)
381{
382	/* Disable qdma channel event */
383	__raw_writel(1 << cfg->chnum, base + EDMA3_QEECR);
384
385	/* clean up the interrupt indication */
386	if (cfg->complete_code < 32)
387		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICR);
388	else
389		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH);
390
391	/* Clear missed event if set*/
392	__raw_writel(1 << cfg->chnum, base + EDMA3_QSECR);
393	__raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR);
394
395	/* Clear the channel map */
396	__raw_writel(0, base + EDMA3_QCHMAP(cfg->chnum));
397}
398
399void __edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num,
400		      dma_addr_t dst, dma_addr_t src, size_t len, size_t s_len)
401{
402	struct edma3_slot_config        slot;
403	struct edma3_channel_config     edma_channel;
404	int                             b_cnt_value = 1;
405	int                             rem_bytes  = 0;
406	int                             a_cnt_value = len;
407	unsigned int                    addr = (unsigned int) (dst);
408	unsigned int                    max_acnt  = 0x7FFFU;
409
410	if (len > s_len) {
411		b_cnt_value = (len / s_len);
412		rem_bytes = (len % s_len);
413		a_cnt_value = s_len;
414	} else if (len > max_acnt) {
415		b_cnt_value = (len / max_acnt);
416		rem_bytes  = (len % max_acnt);
417		a_cnt_value = max_acnt;
418	}
419
420	slot.opt        = 0;
421	slot.src        = ((unsigned int) src);
422	slot.acnt       = a_cnt_value;
423	slot.bcnt       = b_cnt_value;
424	slot.ccnt       = 1;
425	if (len == s_len)
426		slot.src_bidx = a_cnt_value;
427	else
428		slot.src_bidx = 0;
429	slot.dst_bidx   = a_cnt_value;
430	slot.src_cidx   = 0;
431	slot.dst_cidx   = 0;
432	slot.link       = EDMA3_PARSET_NULL_LINK;
433	slot.bcntrld    = 0;
434	slot.opt        = EDMA3_SLOPT_TRANS_COMP_INT_ENB |
435			  EDMA3_SLOPT_COMP_CODE(0) |
436			  EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC;
437
438	edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot);
439	edma_channel.slot = edma_slot_num;
440	edma_channel.chnum = 0;
441	edma_channel.complete_code = 0;
442	 /* set event trigger to dst update */
443	edma_channel.trigger_slot_word = EDMA3_TWORD(dst);
444
445	qedma3_start(edma3_base_addr, &edma_channel);
446	edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr);
447
448	while (edma3_check_for_transfer(edma3_base_addr, &edma_channel))
449		;
450	qedma3_stop(edma3_base_addr, &edma_channel);
451
452	if (rem_bytes != 0) {
453		slot.opt        = 0;
454		if (len == s_len)
455			slot.src =
456				(b_cnt_value * max_acnt) + ((unsigned int) src);
457		else
458			slot.src = (unsigned int) src;
459		slot.acnt       = rem_bytes;
460		slot.bcnt       = 1;
461		slot.ccnt       = 1;
462		slot.src_bidx   = rem_bytes;
463		slot.dst_bidx   = rem_bytes;
464		slot.src_cidx   = 0;
465		slot.dst_cidx   = 0;
466		slot.link       = EDMA3_PARSET_NULL_LINK;
467		slot.bcntrld    = 0;
468		slot.opt        = EDMA3_SLOPT_TRANS_COMP_INT_ENB |
469				  EDMA3_SLOPT_COMP_CODE(0) |
470				  EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC;
471		edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot);
472		edma_channel.slot = edma_slot_num;
473		edma_channel.chnum = 0;
474		edma_channel.complete_code = 0;
475		/* set event trigger to dst update */
476		edma_channel.trigger_slot_word = EDMA3_TWORD(dst);
477
478		qedma3_start(edma3_base_addr, &edma_channel);
479		edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr +
480				    (max_acnt * b_cnt_value));
481		while (edma3_check_for_transfer(edma3_base_addr, &edma_channel))
482			;
483		qedma3_stop(edma3_base_addr, &edma_channel);
484	}
485}
486
487void __edma3_fill(unsigned long edma3_base_addr, unsigned int edma_slot_num,
488		  dma_addr_t dst, u8 val, size_t len)
489{
490	int xfer_len;
491	int max_xfer = EDMA_FILL_BUFFER_SIZE * 65535;
492	dma_addr_t source;
493
494	memset((void *)edma_fill_buffer, val, sizeof(edma_fill_buffer));
495	source = dma_map_single(edma_fill_buffer, len, DMA_TO_DEVICE);
496
497	while (len) {
498		xfer_len = len;
499		if (xfer_len > max_xfer)
500			xfer_len = max_xfer;
501
502		__edma3_transfer(edma3_base_addr, edma_slot_num, dst,
503				 source, xfer_len,
504				 EDMA_FILL_BUFFER_SIZE);
505		len -= xfer_len;
506		dst += xfer_len;
507	}
508
509	dma_unmap_single(source, len, DMA_FROM_DEVICE);
510}
511
512#ifndef CONFIG_DMA
513
514void edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num,
515		    void *dst, void *src, size_t len)
516{
517	/* Clean the areas, so no writeback into the RAM races with DMA */
518	dma_addr_t destination = dma_map_single(dst, len, DMA_FROM_DEVICE);
519	dma_addr_t source = dma_map_single(src, len, DMA_TO_DEVICE);
520
521	__edma3_transfer(edma3_base_addr, edma_slot_num, destination, source, len, len);
522
523	/* Clean+Invalidate the areas after, so we can see DMA'd data */
524	dma_unmap_single(destination, len, DMA_FROM_DEVICE);
525	dma_unmap_single(source, len, DMA_TO_DEVICE);
526}
527
528void edma3_fill(unsigned long edma3_base_addr, unsigned int edma_slot_num,
529		void *dst, u8 val, size_t len)
530{
531	/* Clean the area, so no writeback into the RAM races with DMA */
532	dma_addr_t destination = dma_map_single(dst, len, DMA_FROM_DEVICE);
533
534	__edma3_fill(edma3_base_addr, edma_slot_num, destination, val, len);
535
536	/* Clean+Invalidate the area after, so we can see DMA'd data */
537	dma_unmap_single(destination, len, DMA_FROM_DEVICE);
538}
539
540#else
541
542static int ti_edma3_transfer(struct udevice *dev, int direction,
543			     dma_addr_t dst, dma_addr_t src, size_t len)
544{
545	struct ti_edma3_priv *priv = dev_get_priv(dev);
546
547	/* enable edma3 clocks */
548	enable_edma3_clocks();
549
550	switch (direction) {
551	case DMA_MEM_TO_MEM:
552		__edma3_transfer(priv->base, 1, dst, src, len, len);
553		break;
554	default:
555		pr_err("Transfer type not implemented in DMA driver\n");
556		break;
557	}
558
559	/* disable edma3 clocks */
560	disable_edma3_clocks();
561
562	return 0;
563}
564
565static int ti_edma3_of_to_plat(struct udevice *dev)
566{
567	struct ti_edma3_priv *priv = dev_get_priv(dev);
568
569	priv->base = dev_read_addr(dev);
570
571	return 0;
572}
573
574static int ti_edma3_probe(struct udevice *dev)
575{
576	struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
577
578	uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM;
579
580	return 0;
581}
582
583static const struct dma_ops ti_edma3_ops = {
584	.transfer	= ti_edma3_transfer,
585};
586
587static const struct udevice_id ti_edma3_ids[] = {
588	{ .compatible = "ti,edma3" },
589	{ }
590};
591
592U_BOOT_DRIVER(ti_edma3) = {
593	.name	= "ti_edma3",
594	.id	= UCLASS_DMA,
595	.of_match = ti_edma3_ids,
596	.ops	= &ti_edma3_ops,
597	.of_to_plat = ti_edma3_of_to_plat,
598	.probe	= ti_edma3_probe,
599	.priv_auto	= sizeof(struct ti_edma3_priv),
600};
601#endif /* CONFIG_DMA */
602