1215976Sjmallett// SPDX-License-Identifier: GPL-2.0
2232812Sjmallett/*
3215976Sjmallett * MediaTek UART APDMA driver.
4215976Sjmallett *
5215976Sjmallett * Copyright (c) 2019 MediaTek Inc.
6215976Sjmallett * Author: Long Cheng <long.cheng@mediatek.com>
7215976Sjmallett */
8215976Sjmallett
9215976Sjmallett#include <linux/clk.h>
10215976Sjmallett#include <linux/dmaengine.h>
11215976Sjmallett#include <linux/dma-mapping.h>
12215976Sjmallett#include <linux/err.h>
13215976Sjmallett#include <linux/init.h>
14215976Sjmallett#include <linux/interrupt.h>
15215976Sjmallett#include <linux/iopoll.h>
16215976Sjmallett#include <linux/kernel.h>
17215976Sjmallett#include <linux/list.h>
18232812Sjmallett#include <linux/module.h>
19215976Sjmallett#include <linux/of_dma.h>
20215976Sjmallett#include <linux/platform_device.h>
21215976Sjmallett#include <linux/pm_runtime.h>
22215976Sjmallett#include <linux/slab.h>
23215976Sjmallett#include <linux/spinlock.h>
24215976Sjmallett
25215976Sjmallett#include "../virt-dma.h"
26215976Sjmallett
27215976Sjmallett/* The default number of virtual channel */
28215976Sjmallett#define MTK_UART_APDMA_NR_VCHANS	8
29232812Sjmallett
30215976Sjmallett#define VFF_EN_B		BIT(0)
31215976Sjmallett#define VFF_STOP_B		BIT(0)
32215976Sjmallett#define VFF_FLUSH_B		BIT(0)
33215976Sjmallett#define VFF_4G_EN_B		BIT(0)
34215976Sjmallett/* rx valid size >=  vff thre */
35215976Sjmallett#define VFF_RX_INT_EN_B		(BIT(0) | BIT(1))
36215976Sjmallett/* tx left size >= vff thre */
37215976Sjmallett#define VFF_TX_INT_EN_B		BIT(0)
38215976Sjmallett#define VFF_WARM_RST_B		BIT(0)
39215976Sjmallett#define VFF_RX_INT_CLR_B	(BIT(0) | BIT(1))
40215976Sjmallett#define VFF_TX_INT_CLR_B	0
41215976Sjmallett#define VFF_STOP_CLR_B		0
42215976Sjmallett#define VFF_EN_CLR_B		0
43215976Sjmallett#define VFF_INT_EN_CLR_B	0
44215976Sjmallett#define VFF_4G_SUPPORT_CLR_B	0
45215976Sjmallett
46215976Sjmallett/*
47215976Sjmallett * interrupt trigger level for tx
48215976Sjmallett * if threshold is n, no polling is required to start tx.
49215976Sjmallett * otherwise need polling VFF_FLUSH.
50215976Sjmallett */
51215976Sjmallett#define VFF_TX_THRE(n)		(n)
52215976Sjmallett/* interrupt trigger level for rx */
53215976Sjmallett#define VFF_RX_THRE(n)		((n) * 3 / 4)
54215976Sjmallett
55215976Sjmallett#define VFF_RING_SIZE	0xffff
56215976Sjmallett/* invert this bit when wrap ring head again */
57215976Sjmallett#define VFF_RING_WRAP	0x10000
58215976Sjmallett
59215976Sjmallett#define VFF_INT_FLAG		0x00
60215976Sjmallett#define VFF_INT_EN		0x04
61215976Sjmallett#define VFF_EN			0x08
62215976Sjmallett#define VFF_RST			0x0c
63215976Sjmallett#define VFF_STOP		0x10
64215976Sjmallett#define VFF_FLUSH		0x14
65215976Sjmallett#define VFF_ADDR		0x1c
66215976Sjmallett#define VFF_LEN			0x24
67215976Sjmallett#define VFF_THRE		0x28
68215976Sjmallett#define VFF_WPT			0x2c
69215976Sjmallett#define VFF_RPT			0x30
70215976Sjmallett/* TX: the buffer size HW can read. RX: the buffer size SW can read. */
71215976Sjmallett#define VFF_VALID_SIZE		0x3c
72215976Sjmallett/* TX: the buffer size SW can write. RX: the buffer size HW can write. */
73215976Sjmallett#define VFF_LEFT_SIZE		0x40
74215976Sjmallett#define VFF_DEBUG_STATUS	0x50
75215976Sjmallett#define VFF_4G_SUPPORT		0x54
76215976Sjmallett
77215976Sjmallettstruct mtk_uart_apdmadev {
78215976Sjmallett	struct dma_device ddev;
79215976Sjmallett	struct clk *clk;
80215976Sjmallett	bool support_33bits;
81215976Sjmallett	unsigned int dma_requests;
82215976Sjmallett};
83215976Sjmallett
84215976Sjmallettstruct mtk_uart_apdma_desc {
85215976Sjmallett	struct virt_dma_desc vd;
86215976Sjmallett
87215976Sjmallett	dma_addr_t addr;
88215976Sjmallett	unsigned int avail_len;
89215976Sjmallett};
90215976Sjmallett
91215976Sjmallettstruct mtk_chan {
92215976Sjmallett	struct virt_dma_chan vc;
93215976Sjmallett	struct dma_slave_config	cfg;
94215976Sjmallett	struct mtk_uart_apdma_desc *desc;
95215976Sjmallett	enum dma_transfer_direction dir;
96215976Sjmallett
97215976Sjmallett	void __iomem *base;
98215976Sjmallett	unsigned int irq;
99215976Sjmallett
100215976Sjmallett	unsigned int rx_status;
101215976Sjmallett};
102215976Sjmallett
103215976Sjmallettstatic inline struct mtk_uart_apdmadev *
104215976Sjmallettto_mtk_uart_apdma_dev(struct dma_device *d)
105215976Sjmallett{
106215976Sjmallett	return container_of(d, struct mtk_uart_apdmadev, ddev);
107215976Sjmallett}
108215976Sjmallett
109215976Sjmallettstatic inline struct mtk_chan *to_mtk_uart_apdma_chan(struct dma_chan *c)
110215976Sjmallett{
111215976Sjmallett	return container_of(c, struct mtk_chan, vc.chan);
112215976Sjmallett}
113215976Sjmallett
114215976Sjmallettstatic inline struct mtk_uart_apdma_desc *to_mtk_uart_apdma_desc
115215976Sjmallett	(struct dma_async_tx_descriptor *t)
116215976Sjmallett{
117215976Sjmallett	return container_of(t, struct mtk_uart_apdma_desc, vd.tx);
118215976Sjmallett}
119215976Sjmallett
120215976Sjmallettstatic void mtk_uart_apdma_write(struct mtk_chan *c,
121215976Sjmallett			       unsigned int reg, unsigned int val)
122215976Sjmallett{
123215976Sjmallett	writel(val, c->base + reg);
124215976Sjmallett}
125215976Sjmallett
126215976Sjmallettstatic unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
127215976Sjmallett{
128215976Sjmallett	return readl(c->base + reg);
129215976Sjmallett}
130215976Sjmallett
131215976Sjmallettstatic void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
132215976Sjmallett{
133215976Sjmallett	kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
134215976Sjmallett}
135215976Sjmallett
136215976Sjmallettstatic void mtk_uart_apdma_start_tx(struct mtk_chan *c)
137215976Sjmallett{
138215976Sjmallett	struct mtk_uart_apdmadev *mtkd =
139215976Sjmallett				to_mtk_uart_apdma_dev(c->vc.chan.device);
140215976Sjmallett	struct mtk_uart_apdma_desc *d = c->desc;
141215976Sjmallett	unsigned int wpt, vff_sz;
142215976Sjmallett
143215976Sjmallett	vff_sz = c->cfg.dst_port_window_size;
144215976Sjmallett	if (!mtk_uart_apdma_read(c, VFF_LEN)) {
145215976Sjmallett		mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
146215976Sjmallett		mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
147215976Sjmallett		mtk_uart_apdma_write(c, VFF_THRE, VFF_TX_THRE(vff_sz));
148215976Sjmallett		mtk_uart_apdma_write(c, VFF_WPT, 0);
149215976Sjmallett		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
150215976Sjmallett
151215976Sjmallett		if (mtkd->support_33bits)
152215976Sjmallett			mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
153215976Sjmallett	}
154215976Sjmallett
155215976Sjmallett	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
156215976Sjmallett	if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
157215976Sjmallett		dev_err(c->vc.chan.device->dev, "Enable TX fail\n");
158215976Sjmallett
159215976Sjmallett	if (!mtk_uart_apdma_read(c, VFF_LEFT_SIZE)) {
160215976Sjmallett		mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
161215976Sjmallett		return;
162215976Sjmallett	}
163215976Sjmallett
164215976Sjmallett	wpt = mtk_uart_apdma_read(c, VFF_WPT);
165215976Sjmallett
166215976Sjmallett	wpt += c->desc->avail_len;
167215976Sjmallett	if ((wpt & VFF_RING_SIZE) == vff_sz)
168215976Sjmallett		wpt = (wpt & VFF_RING_WRAP) ^ VFF_RING_WRAP;
169215976Sjmallett
170215976Sjmallett	/* Let DMA start moving data */
171215976Sjmallett	mtk_uart_apdma_write(c, VFF_WPT, wpt);
172215976Sjmallett
173215976Sjmallett	/* HW auto set to 0 when left size >= threshold */
174215976Sjmallett	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_TX_INT_EN_B);
175215976Sjmallett	if (!mtk_uart_apdma_read(c, VFF_FLUSH))
176215976Sjmallett		mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
177215976Sjmallett}
178215976Sjmallett
179215976Sjmallettstatic void mtk_uart_apdma_start_rx(struct mtk_chan *c)
180215976Sjmallett{
181215976Sjmallett	struct mtk_uart_apdmadev *mtkd =
182215976Sjmallett				to_mtk_uart_apdma_dev(c->vc.chan.device);
183215976Sjmallett	struct mtk_uart_apdma_desc *d = c->desc;
184215976Sjmallett	unsigned int vff_sz;
185215976Sjmallett
186215976Sjmallett	vff_sz = c->cfg.src_port_window_size;
187215976Sjmallett	if (!mtk_uart_apdma_read(c, VFF_LEN)) {
188215976Sjmallett		mtk_uart_apdma_write(c, VFF_ADDR, d->addr);
189215976Sjmallett		mtk_uart_apdma_write(c, VFF_LEN, vff_sz);
190215976Sjmallett		mtk_uart_apdma_write(c, VFF_THRE, VFF_RX_THRE(vff_sz));
191215976Sjmallett		mtk_uart_apdma_write(c, VFF_RPT, 0);
192215976Sjmallett		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
193215976Sjmallett
194215976Sjmallett		if (mtkd->support_33bits)
195215976Sjmallett			mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_EN_B);
196215976Sjmallett	}
197215976Sjmallett
198215976Sjmallett	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_RX_INT_EN_B);
199215976Sjmallett	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_B);
200215976Sjmallett	if (mtk_uart_apdma_read(c, VFF_EN) != VFF_EN_B)
201215976Sjmallett		dev_err(c->vc.chan.device->dev, "Enable RX fail\n");
202215976Sjmallett}
203215976Sjmallett
204215976Sjmallettstatic void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
205215976Sjmallett{
206215976Sjmallett	mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
207215976Sjmallett	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
208215976Sjmallett	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
209215976Sjmallett}
210215976Sjmallett
211215976Sjmallettstatic void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
212215976Sjmallett{
213215976Sjmallett	struct mtk_uart_apdma_desc *d = c->desc;
214215976Sjmallett	unsigned int len, wg, rg;
215215976Sjmallett	int cnt;
216215976Sjmallett
217215976Sjmallett	mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
218215976Sjmallett
219215976Sjmallett	if (!mtk_uart_apdma_read(c, VFF_VALID_SIZE))
220215976Sjmallett		return;
221215976Sjmallett
222215976Sjmallett	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
223215976Sjmallett	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
224215976Sjmallett
225215976Sjmallett	len = c->cfg.src_port_window_size;
226215976Sjmallett	rg = mtk_uart_apdma_read(c, VFF_RPT);
227215976Sjmallett	wg = mtk_uart_apdma_read(c, VFF_WPT);
228215976Sjmallett	cnt = (wg & VFF_RING_SIZE) - (rg & VFF_RING_SIZE);
229215976Sjmallett
230215976Sjmallett	/*
231215976Sjmallett	 * The buffer is ring buffer. If wrap bit different,
232215976Sjmallett	 * represents the start of the next cycle for WPT
233215976Sjmallett	 */
234215976Sjmallett	if ((rg ^ wg) & VFF_RING_WRAP)
235215976Sjmallett		cnt += len;
236215976Sjmallett
237215976Sjmallett	c->rx_status = d->avail_len - cnt;
238215976Sjmallett	mtk_uart_apdma_write(c, VFF_RPT, wg);
239215976Sjmallett}
240215976Sjmallett
241215976Sjmallettstatic void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
242215976Sjmallett{
243215976Sjmallett	struct mtk_uart_apdma_desc *d = c->desc;
244215976Sjmallett
245215976Sjmallett	if (d) {
246215976Sjmallett		list_del(&d->vd.node);
247215976Sjmallett		vchan_cookie_complete(&d->vd);
248215976Sjmallett		c->desc = NULL;
249215976Sjmallett	}
250215976Sjmallett}
251215976Sjmallett
252215976Sjmallettstatic irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
253215976Sjmallett{
254215976Sjmallett	struct dma_chan *chan = (struct dma_chan *)dev_id;
255215976Sjmallett	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
256215976Sjmallett	unsigned long flags;
257215976Sjmallett
258215976Sjmallett	spin_lock_irqsave(&c->vc.lock, flags);
259215976Sjmallett	if (c->dir == DMA_DEV_TO_MEM)
260215976Sjmallett		mtk_uart_apdma_rx_handler(c);
261215976Sjmallett	else if (c->dir == DMA_MEM_TO_DEV)
262215976Sjmallett		mtk_uart_apdma_tx_handler(c);
263215976Sjmallett	mtk_uart_apdma_chan_complete_handler(c);
264215976Sjmallett	spin_unlock_irqrestore(&c->vc.lock, flags);
265215976Sjmallett
266215976Sjmallett	return IRQ_HANDLED;
267215976Sjmallett}
268215976Sjmallett
269215976Sjmallettstatic int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
270215976Sjmallett{
271215976Sjmallett	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
272215976Sjmallett	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
273215976Sjmallett	unsigned int status;
274215976Sjmallett	int ret;
275215976Sjmallett
276215976Sjmallett	ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
277215976Sjmallett	if (ret < 0) {
278215976Sjmallett		pm_runtime_put_noidle(chan->device->dev);
279215976Sjmallett		return ret;
280215976Sjmallett	}
281215976Sjmallett
282215976Sjmallett	mtk_uart_apdma_write(c, VFF_ADDR, 0);
283215976Sjmallett	mtk_uart_apdma_write(c, VFF_THRE, 0);
284215976Sjmallett	mtk_uart_apdma_write(c, VFF_LEN, 0);
285215976Sjmallett	mtk_uart_apdma_write(c, VFF_RST, VFF_WARM_RST_B);
286215976Sjmallett
287215976Sjmallett	ret = readx_poll_timeout(readl, c->base + VFF_EN,
288215976Sjmallett			  status, !status, 10, 100);
289215976Sjmallett	if (ret)
290215976Sjmallett		goto err_pm;
291215976Sjmallett
292215976Sjmallett	ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
293215976Sjmallett			  IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
294215976Sjmallett	if (ret < 0) {
295215976Sjmallett		dev_err(chan->device->dev, "Can't request dma IRQ\n");
296215976Sjmallett		ret = -EINVAL;
297215976Sjmallett		goto err_pm;
298215976Sjmallett	}
299215976Sjmallett
300215976Sjmallett	if (mtkd->support_33bits)
301215976Sjmallett		mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
302215976Sjmallett
303215976Sjmalletterr_pm:
304215976Sjmallett	pm_runtime_put_noidle(mtkd->ddev.dev);
305215976Sjmallett	return ret;
306215976Sjmallett}
307215976Sjmallett
308215976Sjmallettstatic void mtk_uart_apdma_free_chan_resources(struct dma_chan *chan)
309215976Sjmallett{
310215976Sjmallett	struct mtk_uart_apdmadev *mtkd = to_mtk_uart_apdma_dev(chan->device);
311215976Sjmallett	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
312215976Sjmallett
313215976Sjmallett	free_irq(c->irq, chan);
314215976Sjmallett
315215976Sjmallett	tasklet_kill(&c->vc.task);
316215976Sjmallett
317215976Sjmallett	vchan_free_chan_resources(&c->vc);
318215976Sjmallett
319215976Sjmallett	pm_runtime_put_sync(mtkd->ddev.dev);
320215976Sjmallett}
321215976Sjmallett
322215976Sjmallettstatic enum dma_status mtk_uart_apdma_tx_status(struct dma_chan *chan,
323215976Sjmallett					 dma_cookie_t cookie,
324215976Sjmallett					 struct dma_tx_state *txstate)
325215976Sjmallett{
326215976Sjmallett	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
327215976Sjmallett	enum dma_status ret;
328215976Sjmallett
329215976Sjmallett	ret = dma_cookie_status(chan, cookie, txstate);
330215976Sjmallett	if (!txstate)
331215976Sjmallett		return ret;
332215976Sjmallett
333215976Sjmallett	dma_set_residue(txstate, c->rx_status);
334215976Sjmallett
335215976Sjmallett	return ret;
336215976Sjmallett}
337215976Sjmallett
338215976Sjmallett/*
339215976Sjmallett * dmaengine_prep_slave_single will call the function. and sglen is 1.
340215976Sjmallett * 8250 uart using one ring buffer, and deal with one sg.
341215976Sjmallett */
342215976Sjmallettstatic struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
343215976Sjmallett	(struct dma_chan *chan, struct scatterlist *sgl,
344215976Sjmallett	unsigned int sglen, enum dma_transfer_direction dir,
345215976Sjmallett	unsigned long tx_flags, void *context)
346215976Sjmallett{
347215976Sjmallett	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
348215976Sjmallett	struct mtk_uart_apdma_desc *d;
349215976Sjmallett
350215976Sjmallett	if (!is_slave_direction(dir) || sglen != 1)
351215976Sjmallett		return NULL;
352215976Sjmallett
353215976Sjmallett	/* Now allocate and setup the descriptor */
354215976Sjmallett	d = kzalloc(sizeof(*d), GFP_NOWAIT);
355215976Sjmallett	if (!d)
356215976Sjmallett		return NULL;
357215976Sjmallett
358215976Sjmallett	d->avail_len = sg_dma_len(sgl);
359215976Sjmallett	d->addr = sg_dma_address(sgl);
360215976Sjmallett	c->dir = dir;
361215976Sjmallett
362215976Sjmallett	return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
363215976Sjmallett}
364215976Sjmallett
365215976Sjmallettstatic void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
366215976Sjmallett{
367215976Sjmallett	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
368215976Sjmallett	struct virt_dma_desc *vd;
369215976Sjmallett	unsigned long flags;
370215976Sjmallett
371215976Sjmallett	spin_lock_irqsave(&c->vc.lock, flags);
372215976Sjmallett	if (vchan_issue_pending(&c->vc) && !c->desc) {
373215976Sjmallett		vd = vchan_next_desc(&c->vc);
374215976Sjmallett		c->desc = to_mtk_uart_apdma_desc(&vd->tx);
375215976Sjmallett
376215976Sjmallett		if (c->dir == DMA_DEV_TO_MEM)
377215976Sjmallett			mtk_uart_apdma_start_rx(c);
378215976Sjmallett		else if (c->dir == DMA_MEM_TO_DEV)
379215976Sjmallett			mtk_uart_apdma_start_tx(c);
380215976Sjmallett	}
381215976Sjmallett
382215976Sjmallett	spin_unlock_irqrestore(&c->vc.lock, flags);
383215976Sjmallett}
384215976Sjmallett
385215976Sjmallettstatic int mtk_uart_apdma_slave_config(struct dma_chan *chan,
386215976Sjmallett				   struct dma_slave_config *config)
387215976Sjmallett{
388215976Sjmallett	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
389215976Sjmallett
390215976Sjmallett	memcpy(&c->cfg, config, sizeof(*config));
391215976Sjmallett
392215976Sjmallett	return 0;
393215976Sjmallett}
394215976Sjmallett
395215976Sjmallettstatic int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
396215976Sjmallett{
397215976Sjmallett	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
398215976Sjmallett	unsigned long flags;
399215976Sjmallett	unsigned int status;
400215976Sjmallett	LIST_HEAD(head);
401215976Sjmallett	int ret;
402215976Sjmallett
403215976Sjmallett	mtk_uart_apdma_write(c, VFF_FLUSH, VFF_FLUSH_B);
404215976Sjmallett
405215976Sjmallett	ret = readx_poll_timeout(readl, c->base + VFF_FLUSH,
406215976Sjmallett			  status, status != VFF_FLUSH_B, 10, 100);
407215976Sjmallett	if (ret)
408215976Sjmallett		dev_err(c->vc.chan.device->dev, "flush: fail, status=0x%x\n",
409215976Sjmallett			mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
410215976Sjmallett
411215976Sjmallett	/*
412215976Sjmallett	 * Stop need 3 steps.
413215976Sjmallett	 * 1. set stop to 1
414215976Sjmallett	 * 2. wait en to 0
415215976Sjmallett	 * 3. set stop as 0
416215976Sjmallett	 */
417215976Sjmallett	mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_B);
418215976Sjmallett	ret = readx_poll_timeout(readl, c->base + VFF_EN,
419215976Sjmallett			  status, !status, 10, 100);
420215976Sjmallett	if (ret)
421215976Sjmallett		dev_err(c->vc.chan.device->dev, "stop: fail, status=0x%x\n",
422215976Sjmallett			mtk_uart_apdma_read(c, VFF_DEBUG_STATUS));
423215976Sjmallett
424215976Sjmallett	mtk_uart_apdma_write(c, VFF_STOP, VFF_STOP_CLR_B);
425215976Sjmallett	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
426215976Sjmallett
427215976Sjmallett	if (c->dir == DMA_DEV_TO_MEM)
428215976Sjmallett		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_RX_INT_CLR_B);
429215976Sjmallett	else if (c->dir == DMA_MEM_TO_DEV)
430215976Sjmallett		mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
431215976Sjmallett
432215976Sjmallett	synchronize_irq(c->irq);
433215976Sjmallett
434215976Sjmallett	spin_lock_irqsave(&c->vc.lock, flags);
435215976Sjmallett	vchan_get_all_descriptors(&c->vc, &head);
436215976Sjmallett	spin_unlock_irqrestore(&c->vc.lock, flags);
437215976Sjmallett
438215976Sjmallett	vchan_dma_desc_free_list(&c->vc, &head);
439215976Sjmallett
440215976Sjmallett	return 0;
441215976Sjmallett}
442215976Sjmallett
443215976Sjmallettstatic int mtk_uart_apdma_device_pause(struct dma_chan *chan)
444215976Sjmallett{
445215976Sjmallett	struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
446215976Sjmallett	unsigned long flags;
447215976Sjmallett
448215976Sjmallett	spin_lock_irqsave(&c->vc.lock, flags);
449215976Sjmallett
450215976Sjmallett	mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
451215976Sjmallett	mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
452215976Sjmallett
453215976Sjmallett	spin_unlock_irqrestore(&c->vc.lock, flags);
454215976Sjmallett	synchronize_irq(c->irq);
455215976Sjmallett
456215976Sjmallett	return 0;
457215976Sjmallett}
458215976Sjmallett
459215976Sjmallettstatic void mtk_uart_apdma_free(struct mtk_uart_apdmadev *mtkd)
460215976Sjmallett{
461215976Sjmallett	while (!list_empty(&mtkd->ddev.channels)) {
462215976Sjmallett		struct mtk_chan *c = list_first_entry(&mtkd->ddev.channels,
463215976Sjmallett			struct mtk_chan, vc.chan.device_node);
464215976Sjmallett
465215976Sjmallett		list_del(&c->vc.chan.device_node);
466215976Sjmallett		tasklet_kill(&c->vc.task);
467215976Sjmallett	}
468215976Sjmallett}
469215976Sjmallett
470215976Sjmallettstatic const struct of_device_id mtk_uart_apdma_match[] = {
471215976Sjmallett	{ .compatible = "mediatek,mt6577-uart-dma", },
472215976Sjmallett	{ /* sentinel */ },
473215976Sjmallett};
474215976SjmallettMODULE_DEVICE_TABLE(of, mtk_uart_apdma_match);
475215976Sjmallett
476215976Sjmallettstatic int mtk_uart_apdma_probe(struct platform_device *pdev)
477215976Sjmallett{
478215976Sjmallett	struct device_node *np = pdev->dev.of_node;
479215976Sjmallett	struct mtk_uart_apdmadev *mtkd;
480215976Sjmallett	int bit_mask = 32, rc;
481215976Sjmallett	struct mtk_chan *c;
482215976Sjmallett	unsigned int i;
483215976Sjmallett
484215976Sjmallett	mtkd = devm_kzalloc(&pdev->dev, sizeof(*mtkd), GFP_KERNEL);
485215976Sjmallett	if (!mtkd)
486215976Sjmallett		return -ENOMEM;
487215976Sjmallett
488215976Sjmallett	mtkd->clk = devm_clk_get(&pdev->dev, NULL);
489215976Sjmallett	if (IS_ERR(mtkd->clk)) {
490215976Sjmallett		dev_err(&pdev->dev, "No clock specified\n");
491215976Sjmallett		rc = PTR_ERR(mtkd->clk);
492215976Sjmallett		return rc;
493215976Sjmallett	}
494215976Sjmallett
495215976Sjmallett	if (of_property_read_bool(np, "mediatek,dma-33bits"))
496215976Sjmallett		mtkd->support_33bits = true;
497215976Sjmallett
498215976Sjmallett	if (mtkd->support_33bits)
499215976Sjmallett		bit_mask = 33;
500215976Sjmallett
501215976Sjmallett	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(bit_mask));
502215976Sjmallett	if (rc)
503215976Sjmallett		return rc;
504215976Sjmallett
505215976Sjmallett	dma_cap_set(DMA_SLAVE, mtkd->ddev.cap_mask);
506215976Sjmallett	mtkd->ddev.device_alloc_chan_resources =
507215976Sjmallett				mtk_uart_apdma_alloc_chan_resources;
508215976Sjmallett	mtkd->ddev.device_free_chan_resources =
509215976Sjmallett				mtk_uart_apdma_free_chan_resources;
510215976Sjmallett	mtkd->ddev.device_tx_status = mtk_uart_apdma_tx_status;
511215976Sjmallett	mtkd->ddev.device_issue_pending = mtk_uart_apdma_issue_pending;
512215976Sjmallett	mtkd->ddev.device_prep_slave_sg = mtk_uart_apdma_prep_slave_sg;
513215976Sjmallett	mtkd->ddev.device_config = mtk_uart_apdma_slave_config;
514215976Sjmallett	mtkd->ddev.device_pause = mtk_uart_apdma_device_pause;
515215976Sjmallett	mtkd->ddev.device_terminate_all = mtk_uart_apdma_terminate_all;
516215976Sjmallett	mtkd->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
517215976Sjmallett	mtkd->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE);
518215976Sjmallett	mtkd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
519215976Sjmallett	mtkd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
520215976Sjmallett	mtkd->ddev.dev = &pdev->dev;
521215976Sjmallett	INIT_LIST_HEAD(&mtkd->ddev.channels);
522215976Sjmallett
523215976Sjmallett	mtkd->dma_requests = MTK_UART_APDMA_NR_VCHANS;
524215976Sjmallett	if (of_property_read_u32(np, "dma-requests", &mtkd->dma_requests)) {
525215976Sjmallett		dev_info(&pdev->dev,
526215976Sjmallett			 "Using %u as missing dma-requests property\n",
527215976Sjmallett			 MTK_UART_APDMA_NR_VCHANS);
528215976Sjmallett	}
529215976Sjmallett
530215976Sjmallett	for (i = 0; i < mtkd->dma_requests; i++) {
531215976Sjmallett		c = devm_kzalloc(mtkd->ddev.dev, sizeof(*c), GFP_KERNEL);
532215976Sjmallett		if (!c) {
533215976Sjmallett			rc = -ENODEV;
534215976Sjmallett			goto err_no_dma;
535215976Sjmallett		}
536215976Sjmallett
537215976Sjmallett		c->base = devm_platform_ioremap_resource(pdev, i);
538215976Sjmallett		if (IS_ERR(c->base)) {
539215976Sjmallett			rc = PTR_ERR(c->base);
540215976Sjmallett			goto err_no_dma;
541215976Sjmallett		}
542215976Sjmallett		c->vc.desc_free = mtk_uart_apdma_desc_free;
543215976Sjmallett		vchan_init(&c->vc, &mtkd->ddev);
544215976Sjmallett
545215976Sjmallett		rc = platform_get_irq(pdev, i);
546215976Sjmallett		if (rc < 0)
547215976Sjmallett			goto err_no_dma;
548215976Sjmallett		c->irq = rc;
549215976Sjmallett	}
550215976Sjmallett
551215976Sjmallett	pm_runtime_enable(&pdev->dev);
552215976Sjmallett
553215976Sjmallett	rc = dma_async_device_register(&mtkd->ddev);
554215976Sjmallett	if (rc)
555215976Sjmallett		goto rpm_disable;
556215976Sjmallett
557215976Sjmallett	platform_set_drvdata(pdev, mtkd);
558215976Sjmallett
559215976Sjmallett	/* Device-tree DMA controller registration */
560215976Sjmallett	rc = of_dma_controller_register(np, of_dma_xlate_by_chan_id, mtkd);
561215976Sjmallett	if (rc)
562215976Sjmallett		goto dma_remove;
563215976Sjmallett
564215976Sjmallett	return rc;
565215976Sjmallett
566215976Sjmallettdma_remove:
567215976Sjmallett	dma_async_device_unregister(&mtkd->ddev);
568215976Sjmallettrpm_disable:
569215976Sjmallett	pm_runtime_disable(&pdev->dev);
570215976Sjmalletterr_no_dma:
571215976Sjmallett	mtk_uart_apdma_free(mtkd);
572215976Sjmallett	return rc;
573215976Sjmallett}
574215976Sjmallett
575215976Sjmallettstatic void mtk_uart_apdma_remove(struct platform_device *pdev)
576215976Sjmallett{
577215976Sjmallett	struct mtk_uart_apdmadev *mtkd = platform_get_drvdata(pdev);
578215976Sjmallett
579215976Sjmallett	of_dma_controller_free(pdev->dev.of_node);
580215976Sjmallett
581215976Sjmallett	mtk_uart_apdma_free(mtkd);
582215976Sjmallett
583215976Sjmallett	dma_async_device_unregister(&mtkd->ddev);
584215976Sjmallett
585215976Sjmallett	pm_runtime_disable(&pdev->dev);
586215976Sjmallett}
587215976Sjmallett
588215976Sjmallett#ifdef CONFIG_PM_SLEEP
589215976Sjmallettstatic int mtk_uart_apdma_suspend(struct device *dev)
590215976Sjmallett{
591215976Sjmallett	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
592215976Sjmallett
593215976Sjmallett	if (!pm_runtime_suspended(dev))
594215976Sjmallett		clk_disable_unprepare(mtkd->clk);
595215976Sjmallett
596215976Sjmallett	return 0;
597215976Sjmallett}
598215976Sjmallett
599215976Sjmallettstatic int mtk_uart_apdma_resume(struct device *dev)
600215976Sjmallett{
601215976Sjmallett	int ret;
602215976Sjmallett	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
603215976Sjmallett
604215976Sjmallett	if (!pm_runtime_suspended(dev)) {
605215976Sjmallett		ret = clk_prepare_enable(mtkd->clk);
606215976Sjmallett		if (ret)
607215976Sjmallett			return ret;
608215976Sjmallett	}
609215976Sjmallett
610215976Sjmallett	return 0;
611215976Sjmallett}
612215976Sjmallett#endif /* CONFIG_PM_SLEEP */
613215976Sjmallett
614215976Sjmallett#ifdef CONFIG_PM
615215976Sjmallettstatic int mtk_uart_apdma_runtime_suspend(struct device *dev)
616215976Sjmallett{
617215976Sjmallett	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
618215976Sjmallett
619215976Sjmallett	clk_disable_unprepare(mtkd->clk);
620215976Sjmallett
621215976Sjmallett	return 0;
622215976Sjmallett}
623215976Sjmallett
624215976Sjmallettstatic int mtk_uart_apdma_runtime_resume(struct device *dev)
625215976Sjmallett{
626215976Sjmallett	struct mtk_uart_apdmadev *mtkd = dev_get_drvdata(dev);
627215976Sjmallett
628215976Sjmallett	return clk_prepare_enable(mtkd->clk);
629215976Sjmallett}
630215976Sjmallett#endif /* CONFIG_PM */
631215976Sjmallett
632215976Sjmallettstatic const struct dev_pm_ops mtk_uart_apdma_pm_ops = {
633215976Sjmallett	SET_SYSTEM_SLEEP_PM_OPS(mtk_uart_apdma_suspend, mtk_uart_apdma_resume)
634215976Sjmallett	SET_RUNTIME_PM_OPS(mtk_uart_apdma_runtime_suspend,
635215976Sjmallett			   mtk_uart_apdma_runtime_resume, NULL)
636215976Sjmallett};
637215976Sjmallett
638215976Sjmallettstatic struct platform_driver mtk_uart_apdma_driver = {
639215976Sjmallett	.probe	= mtk_uart_apdma_probe,
640215976Sjmallett	.remove_new = mtk_uart_apdma_remove,
641215976Sjmallett	.driver = {
642215976Sjmallett		.name		= KBUILD_MODNAME,
643215976Sjmallett		.pm		= &mtk_uart_apdma_pm_ops,
644215976Sjmallett		.of_match_table = of_match_ptr(mtk_uart_apdma_match),
645215976Sjmallett	},
646215976Sjmallett};
647215976Sjmallett
648215976Sjmallettmodule_platform_driver(mtk_uart_apdma_driver);
649215976Sjmallett
650215976SjmallettMODULE_DESCRIPTION("MediaTek UART APDMA Controller Driver");
651215976SjmallettMODULE_AUTHOR("Long Cheng <long.cheng@mediatek.com>");
652215976SjmallettMODULE_LICENSE("GPL v2");
653215976Sjmallett