1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2023 Cai Huoqing
4 * Synopsys DesignWare HDMA v0 core
5 */
6
7#include <linux/bitfield.h>
8#include <linux/irqreturn.h>
9#include <linux/io-64-nonatomic-lo-hi.h>
10
11#include "dw-edma-core.h"
12#include "dw-hdma-v0-core.h"
13#include "dw-hdma-v0-regs.h"
14#include "dw-hdma-v0-debugfs.h"
15
16enum dw_hdma_control {
17	DW_HDMA_V0_CB					= BIT(0),
18	DW_HDMA_V0_TCB					= BIT(1),
19	DW_HDMA_V0_LLP					= BIT(2),
20	DW_HDMA_V0_LIE					= BIT(3),
21	DW_HDMA_V0_RIE					= BIT(4),
22	DW_HDMA_V0_CCS					= BIT(8),
23	DW_HDMA_V0_LLE					= BIT(9),
24};
25
26static inline struct dw_hdma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
27{
28	return dw->chip->reg_base;
29}
30
31static inline struct dw_hdma_v0_ch_regs __iomem *
32__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
33{
34	if (dir == EDMA_DIR_WRITE)
35		return &(__dw_regs(dw)->ch[ch].wr);
36	else
37		return &(__dw_regs(dw)->ch[ch].rd);
38}
39
40#define SET_CH_32(dw, dir, ch, name, value) \
41	writel(value, &(__dw_ch_regs(dw, dir, ch)->name))
42
43#define GET_CH_32(dw, dir, ch, name) \
44	readl(&(__dw_ch_regs(dw, dir, ch)->name))
45
46#define SET_BOTH_CH_32(dw, ch, name, value) \
47	do {					\
48		writel(value, &(__dw_ch_regs(dw, EDMA_DIR_WRITE, ch)->name));	\
49		writel(value, &(__dw_ch_regs(dw, EDMA_DIR_READ, ch)->name));	\
50	} while (0)
51
52/* HDMA management callbacks */
53static void dw_hdma_v0_core_off(struct dw_edma *dw)
54{
55	int id;
56
57	for (id = 0; id < HDMA_V0_MAX_NR_CH; id++) {
58		SET_BOTH_CH_32(dw, id, int_setup,
59			       HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
60		SET_BOTH_CH_32(dw, id, int_clear,
61			       HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK);
62		SET_BOTH_CH_32(dw, id, ch_en, 0);
63	}
64}
65
66static u16 dw_hdma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
67{
68	/*
69	 * The HDMA IP have no way to know the number of hardware channels
70	 * available, we set it to maximum channels and let the platform
71	 * set the right number of channels.
72	 */
73	return HDMA_V0_MAX_NR_CH;
74}
75
76static enum dma_status dw_hdma_v0_core_ch_status(struct dw_edma_chan *chan)
77{
78	struct dw_edma *dw = chan->dw;
79	u32 tmp;
80
81	tmp = FIELD_GET(HDMA_V0_CH_STATUS_MASK,
82			GET_CH_32(dw, chan->id, chan->dir, ch_stat));
83
84	if (tmp == 1)
85		return DMA_IN_PROGRESS;
86	else if (tmp == 3)
87		return DMA_COMPLETE;
88	else
89		return DMA_ERROR;
90}
91
92static void dw_hdma_v0_core_clear_done_int(struct dw_edma_chan *chan)
93{
94	struct dw_edma *dw = chan->dw;
95
96	SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_STOP_INT_MASK);
97}
98
99static void dw_hdma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
100{
101	struct dw_edma *dw = chan->dw;
102
103	SET_CH_32(dw, chan->dir, chan->id, int_clear, HDMA_V0_ABORT_INT_MASK);
104}
105
106static u32 dw_hdma_v0_core_status_int(struct dw_edma_chan *chan)
107{
108	struct dw_edma *dw = chan->dw;
109
110	return GET_CH_32(dw, chan->dir, chan->id, int_stat);
111}
112
113static irqreturn_t
114dw_hdma_v0_core_handle_int(struct dw_edma_irq *dw_irq, enum dw_edma_dir dir,
115			   dw_edma_handler_t done, dw_edma_handler_t abort)
116{
117	struct dw_edma *dw = dw_irq->dw;
118	unsigned long total, pos, val;
119	irqreturn_t ret = IRQ_NONE;
120	struct dw_edma_chan *chan;
121	unsigned long off, mask;
122
123	if (dir == EDMA_DIR_WRITE) {
124		total = dw->wr_ch_cnt;
125		off = 0;
126		mask = dw_irq->wr_mask;
127	} else {
128		total = dw->rd_ch_cnt;
129		off = dw->wr_ch_cnt;
130		mask = dw_irq->rd_mask;
131	}
132
133	for_each_set_bit(pos, &mask, total) {
134		chan = &dw->chan[pos + off];
135
136		val = dw_hdma_v0_core_status_int(chan);
137		if (FIELD_GET(HDMA_V0_STOP_INT_MASK, val)) {
138			dw_hdma_v0_core_clear_done_int(chan);
139			done(chan);
140
141			ret = IRQ_HANDLED;
142		}
143
144		if (FIELD_GET(HDMA_V0_ABORT_INT_MASK, val)) {
145			dw_hdma_v0_core_clear_abort_int(chan);
146			abort(chan);
147
148			ret = IRQ_HANDLED;
149		}
150	}
151
152	return ret;
153}
154
155static void dw_hdma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
156				     u32 control, u32 size, u64 sar, u64 dar)
157{
158	ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli);
159
160	if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
161		struct dw_hdma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
162
163		lli->control = control;
164		lli->transfer_size = size;
165		lli->sar.reg = sar;
166		lli->dar.reg = dar;
167	} else {
168		struct dw_hdma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
169
170		writel(control, &lli->control);
171		writel(size, &lli->transfer_size);
172		writeq(sar, &lli->sar.reg);
173		writeq(dar, &lli->dar.reg);
174	}
175}
176
177static void dw_hdma_v0_write_ll_link(struct dw_edma_chunk *chunk,
178				     int i, u32 control, u64 pointer)
179{
180	ptrdiff_t ofs = i * sizeof(struct dw_hdma_v0_lli);
181
182	if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
183		struct dw_hdma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
184
185		llp->control = control;
186		llp->llp.reg = pointer;
187	} else {
188		struct dw_hdma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
189
190		writel(control, &llp->control);
191		writeq(pointer, &llp->llp.reg);
192	}
193}
194
195static void dw_hdma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
196{
197	struct dw_edma_burst *child;
198	struct dw_edma_chan *chan = chunk->chan;
199	u32 control = 0, i = 0;
200	int j;
201
202	if (chunk->cb)
203		control = DW_HDMA_V0_CB;
204
205	j = chunk->bursts_alloc;
206	list_for_each_entry(child, &chunk->burst->list, list) {
207		j--;
208		if (!j) {
209			control |= DW_HDMA_V0_LIE;
210			if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
211				control |= DW_HDMA_V0_RIE;
212		}
213
214		dw_hdma_v0_write_ll_data(chunk, i++, control, child->sz,
215					 child->sar, child->dar);
216	}
217
218	control = DW_HDMA_V0_LLP | DW_HDMA_V0_TCB;
219	if (!chunk->cb)
220		control |= DW_HDMA_V0_CB;
221
222	dw_hdma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
223}
224
225static void dw_hdma_v0_sync_ll_data(struct dw_edma_chunk *chunk)
226{
227	/*
228	 * In case of remote HDMA engine setup, the DW PCIe RP/EP internal
229	 * configuration registers and application memory are normally accessed
230	 * over different buses. Ensure LL-data reaches the memory before the
231	 * doorbell register is toggled by issuing the dummy-read from the remote
232	 * LL memory in a hope that the MRd TLP will return only after the
233	 * last MWr TLP is completed
234	 */
235	if (!(chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
236		readl(chunk->ll_region.vaddr.io);
237}
238
239static void dw_hdma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
240{
241	struct dw_edma_chan *chan = chunk->chan;
242	struct dw_edma *dw = chan->dw;
243	u32 tmp;
244
245	dw_hdma_v0_core_write_chunk(chunk);
246
247	if (first) {
248		/* Enable engine */
249		SET_CH_32(dw, chan->dir, chan->id, ch_en, BIT(0));
250		/* Interrupt enable&unmask - done, abort */
251		tmp = GET_CH_32(dw, chan->dir, chan->id, int_setup) |
252		      HDMA_V0_STOP_INT_MASK | HDMA_V0_ABORT_INT_MASK |
253		      HDMA_V0_LOCAL_STOP_INT_EN | HDMA_V0_LOCAL_ABORT_INT_EN;
254		if (!(dw->chip->flags & DW_EDMA_CHIP_LOCAL))
255			tmp |= HDMA_V0_REMOTE_STOP_INT_EN | HDMA_V0_REMOTE_ABORT_INT_EN;
256		SET_CH_32(dw, chan->dir, chan->id, int_setup, tmp);
257		/* Channel control */
258		SET_CH_32(dw, chan->dir, chan->id, control1, HDMA_V0_LINKLIST_EN);
259		/* Linked list */
260		/* llp is not aligned on 64bit -> keep 32bit accesses */
261		SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
262			  lower_32_bits(chunk->ll_region.paddr));
263		SET_CH_32(dw, chan->dir, chan->id, llp.msb,
264			  upper_32_bits(chunk->ll_region.paddr));
265	}
266	/* Set consumer cycle */
267	SET_CH_32(dw, chan->dir, chan->id, cycle_sync,
268		  HDMA_V0_CONSUMER_CYCLE_STAT | HDMA_V0_CONSUMER_CYCLE_BIT);
269
270	dw_hdma_v0_sync_ll_data(chunk);
271
272	/* Doorbell */
273	SET_CH_32(dw, chan->dir, chan->id, doorbell, HDMA_V0_DOORBELL_START);
274}
275
276static void dw_hdma_v0_core_ch_config(struct dw_edma_chan *chan)
277{
278	struct dw_edma *dw = chan->dw;
279
280	/* MSI done addr - low, high */
281	SET_CH_32(dw, chan->dir, chan->id, msi_stop.lsb, chan->msi.address_lo);
282	SET_CH_32(dw, chan->dir, chan->id, msi_stop.msb, chan->msi.address_hi);
283	/* MSI abort addr - low, high */
284	SET_CH_32(dw, chan->dir, chan->id, msi_abort.lsb, chan->msi.address_lo);
285	SET_CH_32(dw, chan->dir, chan->id, msi_abort.msb, chan->msi.address_hi);
286	/* config MSI data */
287	SET_CH_32(dw, chan->dir, chan->id, msi_msgdata, chan->msi.data);
288}
289
290/* HDMA debugfs callbacks */
291static void dw_hdma_v0_core_debugfs_on(struct dw_edma *dw)
292{
293	dw_hdma_v0_debugfs_on(dw);
294}
295
296static const struct dw_edma_core_ops dw_hdma_v0_core = {
297	.off = dw_hdma_v0_core_off,
298	.ch_count = dw_hdma_v0_core_ch_count,
299	.ch_status = dw_hdma_v0_core_ch_status,
300	.handle_int = dw_hdma_v0_core_handle_int,
301	.start = dw_hdma_v0_core_start,
302	.ch_config = dw_hdma_v0_core_ch_config,
303	.debugfs_on = dw_hdma_v0_core_debugfs_on,
304};
305
306void dw_hdma_v0_core_register(struct dw_edma *dw)
307{
308	dw->core = &dw_hdma_v0_core;
309}
310