1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * The driver for Freescale MPC512x LocalPlus Bus FIFO
4 * (called SCLPC in the Reference Manual).
5 *
6 * Copyright (C) 2013-2015 Alexander Popov <alex.popov@linux.com>.
7 */
8
9#include <linux/interrupt.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/of_address.h>
14#include <linux/of_irq.h>
15#include <linux/platform_device.h>
16#include <asm/mpc5121.h>
17#include <asm/io.h>
18#include <linux/spinlock.h>
19#include <linux/slab.h>
20#include <linux/dmaengine.h>
21#include <linux/dma-direction.h>
22#include <linux/dma-mapping.h>
23
24#define DRV_NAME "mpc512x_lpbfifo"
25
26struct cs_range {
27	u32 csnum;
28	u32 base; /* must be zero */
29	u32 addr;
30	u32 size;
31};
32
33static struct lpbfifo_data {
34	spinlock_t lock; /* for protecting lpbfifo_data */
35	phys_addr_t regs_phys;
36	resource_size_t regs_size;
37	struct mpc512x_lpbfifo __iomem *regs;
38	int irq;
39	struct cs_range *cs_ranges;
40	size_t cs_n;
41	struct dma_chan *chan;
42	struct mpc512x_lpbfifo_request *req;
43	dma_addr_t ram_bus_addr;
44	bool wait_lpbfifo_irq;
45	bool wait_lpbfifo_callback;
46} lpbfifo;
47
48/*
49 * A data transfer from RAM to some device on LPB is finished
50 * when both mpc512x_lpbfifo_irq() and mpc512x_lpbfifo_callback()
51 * have been called. We execute the callback registered in
52 * mpc512x_lpbfifo_request just after that.
53 * But for a data transfer from some device on LPB to RAM we don't enable
54 * LPBFIFO interrupt because clearing MPC512X_SCLPC_SUCCESS interrupt flag
55 * automatically disables LPBFIFO reading request to the DMA controller
56 * and the data transfer hangs. So the callback registered in
57 * mpc512x_lpbfifo_request is executed at the end of mpc512x_lpbfifo_callback().
58 */
59
60/*
61 * mpc512x_lpbfifo_irq - IRQ handler for LPB FIFO
62 */
63static irqreturn_t mpc512x_lpbfifo_irq(int irq, void *param)
64{
65	struct device *dev = (struct device *)param;
66	struct mpc512x_lpbfifo_request *req = NULL;
67	unsigned long flags;
68	u32 status;
69
70	spin_lock_irqsave(&lpbfifo.lock, flags);
71
72	if (!lpbfifo.regs)
73		goto end;
74
75	req = lpbfifo.req;
76	if (!req || req->dir == MPC512X_LPBFIFO_REQ_DIR_READ) {
77		dev_err(dev, "bogus LPBFIFO IRQ\n");
78		goto end;
79	}
80
81	status = in_be32(&lpbfifo.regs->status);
82	if (status != MPC512X_SCLPC_SUCCESS) {
83		dev_err(dev, "DMA transfer from RAM to peripheral failed\n");
84		out_be32(&lpbfifo.regs->enable,
85				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
86		goto end;
87	}
88	/* Clear the interrupt flag */
89	out_be32(&lpbfifo.regs->status, MPC512X_SCLPC_SUCCESS);
90
91	lpbfifo.wait_lpbfifo_irq = false;
92
93	if (lpbfifo.wait_lpbfifo_callback)
94		goto end;
95
96	/* Transfer is finished, set the FIFO as idle */
97	lpbfifo.req = NULL;
98
99	spin_unlock_irqrestore(&lpbfifo.lock, flags);
100
101	if (req->callback)
102		req->callback(req);
103
104	return IRQ_HANDLED;
105
106 end:
107	spin_unlock_irqrestore(&lpbfifo.lock, flags);
108	return IRQ_HANDLED;
109}
110
111/*
112 * mpc512x_lpbfifo_callback is called by DMA driver when
113 * DMA transaction is finished.
114 */
115static void mpc512x_lpbfifo_callback(void *param)
116{
117	unsigned long flags;
118	struct mpc512x_lpbfifo_request *req = NULL;
119	enum dma_data_direction dir;
120
121	spin_lock_irqsave(&lpbfifo.lock, flags);
122
123	if (!lpbfifo.regs) {
124		spin_unlock_irqrestore(&lpbfifo.lock, flags);
125		return;
126	}
127
128	req = lpbfifo.req;
129	if (!req) {
130		pr_err("bogus LPBFIFO callback\n");
131		spin_unlock_irqrestore(&lpbfifo.lock, flags);
132		return;
133	}
134
135	/* Release the mapping */
136	if (req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
137		dir = DMA_TO_DEVICE;
138	else
139		dir = DMA_FROM_DEVICE;
140	dma_unmap_single(lpbfifo.chan->device->dev,
141			lpbfifo.ram_bus_addr, req->size, dir);
142
143	lpbfifo.wait_lpbfifo_callback = false;
144
145	if (!lpbfifo.wait_lpbfifo_irq) {
146		/* Transfer is finished, set the FIFO as idle */
147		lpbfifo.req = NULL;
148
149		spin_unlock_irqrestore(&lpbfifo.lock, flags);
150
151		if (req->callback)
152			req->callback(req);
153	} else {
154		spin_unlock_irqrestore(&lpbfifo.lock, flags);
155	}
156}
157
158static int mpc512x_lpbfifo_kick(void)
159{
160	u32 bits;
161	bool no_incr = false;
162	u32 bpt = 32; /* max bytes per LPBFIFO transaction involving DMA */
163	u32 cs = 0;
164	size_t i;
165	struct dma_device *dma_dev = NULL;
166	struct scatterlist sg;
167	enum dma_data_direction dir;
168	struct dma_slave_config dma_conf = {};
169	struct dma_async_tx_descriptor *dma_tx = NULL;
170	dma_cookie_t cookie;
171	int ret;
172
173	/*
174	 * 1. Fit the requirements:
175	 * - the packet size must be a multiple of 4 since FIFO Data Word
176	 *    Register allows only full-word access according the Reference
177	 *    Manual;
178	 * - the physical address of the device on LPB and the packet size
179	 *    must be aligned on BPT (bytes per transaction) or 8-bytes
180	 *    boundary according the Reference Manual;
181	 * - but we choose DMA maxburst equal (or very close to) BPT to prevent
182	 *    DMA controller from overtaking FIFO and causing FIFO underflow
183	 *    error. So we force the packet size to be aligned on BPT boundary
184	 *    not to confuse DMA driver which requires the packet size to be
185	 *    aligned on maxburst boundary;
186	 * - BPT should be set to the LPB device port size for operation with
187	 *    disabled auto-incrementing according Reference Manual.
188	 */
189	if (lpbfifo.req->size == 0 || !IS_ALIGNED(lpbfifo.req->size, 4))
190		return -EINVAL;
191
192	if (lpbfifo.req->portsize != LPB_DEV_PORTSIZE_UNDEFINED) {
193		bpt = lpbfifo.req->portsize;
194		no_incr = true;
195	}
196
197	while (bpt > 1) {
198		if (IS_ALIGNED(lpbfifo.req->dev_phys_addr, min(bpt, 0x8u)) &&
199					IS_ALIGNED(lpbfifo.req->size, bpt)) {
200			break;
201		}
202
203		if (no_incr)
204			return -EINVAL;
205
206		bpt >>= 1;
207	}
208	dma_conf.dst_maxburst = max(bpt, 0x4u) / 4;
209	dma_conf.src_maxburst = max(bpt, 0x4u) / 4;
210
211	for (i = 0; i < lpbfifo.cs_n; i++) {
212		phys_addr_t cs_start = lpbfifo.cs_ranges[i].addr;
213		phys_addr_t cs_end = cs_start + lpbfifo.cs_ranges[i].size;
214		phys_addr_t access_start = lpbfifo.req->dev_phys_addr;
215		phys_addr_t access_end = access_start + lpbfifo.req->size;
216
217		if (access_start >= cs_start && access_end <= cs_end) {
218			cs = lpbfifo.cs_ranges[i].csnum;
219			break;
220		}
221	}
222	if (i == lpbfifo.cs_n)
223		return -EFAULT;
224
225	/* 2. Prepare DMA */
226	dma_dev = lpbfifo.chan->device;
227
228	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE) {
229		dir = DMA_TO_DEVICE;
230		dma_conf.direction = DMA_MEM_TO_DEV;
231		dma_conf.dst_addr = lpbfifo.regs_phys +
232				offsetof(struct mpc512x_lpbfifo, data_word);
233	} else {
234		dir = DMA_FROM_DEVICE;
235		dma_conf.direction = DMA_DEV_TO_MEM;
236		dma_conf.src_addr = lpbfifo.regs_phys +
237				offsetof(struct mpc512x_lpbfifo, data_word);
238	}
239	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
240	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
241
242	/* Make DMA channel work with LPB FIFO data register */
243	if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) {
244		ret = -EINVAL;
245		goto err_dma_prep;
246	}
247
248	sg_init_table(&sg, 1);
249
250	sg_dma_address(&sg) = dma_map_single(dma_dev->dev,
251			lpbfifo.req->ram_virt_addr, lpbfifo.req->size, dir);
252	if (dma_mapping_error(dma_dev->dev, sg_dma_address(&sg)))
253		return -EFAULT;
254
255	lpbfifo.ram_bus_addr = sg_dma_address(&sg); /* For freeing later */
256
257	sg_dma_len(&sg) = lpbfifo.req->size;
258
259	dma_tx = dmaengine_prep_slave_sg(lpbfifo.chan, &sg,
260						1, dma_conf.direction, 0);
261	if (!dma_tx) {
262		ret = -ENOSPC;
263		goto err_dma_prep;
264	}
265	dma_tx->callback = mpc512x_lpbfifo_callback;
266	dma_tx->callback_param = NULL;
267
268	/* 3. Prepare FIFO */
269	out_be32(&lpbfifo.regs->enable,
270				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
271	out_be32(&lpbfifo.regs->enable, 0x0);
272
273	/*
274	 * Configure the watermarks for write operation (RAM->DMA->FIFO->dev):
275	 * - high watermark 7 words according the Reference Manual,
276	 * - low watermark 512 bytes (half of the FIFO).
277	 * These watermarks don't work for read operation since the
278	 * MPC512X_SCLPC_FLUSH bit is set (according the Reference Manual).
279	 */
280	out_be32(&lpbfifo.regs->fifo_ctrl, MPC512X_SCLPC_FIFO_CTRL(0x7));
281	out_be32(&lpbfifo.regs->fifo_alarm, MPC512X_SCLPC_FIFO_ALARM(0x200));
282
283	/*
284	 * Start address is a physical address of the region which belongs
285	 * to the device on the LocalPlus Bus
286	 */
287	out_be32(&lpbfifo.regs->start_addr, lpbfifo.req->dev_phys_addr);
288
289	/*
290	 * Configure chip select, transfer direction, address increment option
291	 * and bytes per transaction option
292	 */
293	bits = MPC512X_SCLPC_CS(cs);
294	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_READ)
295		bits |= MPC512X_SCLPC_READ | MPC512X_SCLPC_FLUSH;
296	if (no_incr)
297		bits |= MPC512X_SCLPC_DAI;
298	bits |= MPC512X_SCLPC_BPT(bpt);
299	out_be32(&lpbfifo.regs->ctrl, bits);
300
301	/* Unmask irqs */
302	bits = MPC512X_SCLPC_ENABLE | MPC512X_SCLPC_ABORT_INT_ENABLE;
303	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
304		bits |= MPC512X_SCLPC_NORM_INT_ENABLE;
305	else
306		lpbfifo.wait_lpbfifo_irq = false;
307
308	out_be32(&lpbfifo.regs->enable, bits);
309
310	/* 4. Set packet size and kick FIFO off */
311	bits = lpbfifo.req->size | MPC512X_SCLPC_START;
312	out_be32(&lpbfifo.regs->pkt_size, bits);
313
314	/* 5. Finally kick DMA off */
315	cookie = dma_tx->tx_submit(dma_tx);
316	if (dma_submit_error(cookie)) {
317		ret = -ENOSPC;
318		goto err_dma_submit;
319	}
320
321	return 0;
322
323 err_dma_submit:
324	out_be32(&lpbfifo.regs->enable,
325				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
326 err_dma_prep:
327	dma_unmap_single(dma_dev->dev, sg_dma_address(&sg),
328						lpbfifo.req->size, dir);
329	return ret;
330}
331
332static int mpc512x_lpbfifo_submit_locked(struct mpc512x_lpbfifo_request *req)
333{
334	int ret = 0;
335
336	if (!lpbfifo.regs)
337		return -ENODEV;
338
339	/* Check whether a transfer is in progress */
340	if (lpbfifo.req)
341		return -EBUSY;
342
343	lpbfifo.wait_lpbfifo_irq = true;
344	lpbfifo.wait_lpbfifo_callback = true;
345	lpbfifo.req = req;
346
347	ret = mpc512x_lpbfifo_kick();
348	if (ret != 0)
349		lpbfifo.req = NULL; /* Set the FIFO as idle */
350
351	return ret;
352}
353
354int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req)
355{
356	unsigned long flags;
357	int ret = 0;
358
359	spin_lock_irqsave(&lpbfifo.lock, flags);
360	ret = mpc512x_lpbfifo_submit_locked(req);
361	spin_unlock_irqrestore(&lpbfifo.lock, flags);
362
363	return ret;
364}
365EXPORT_SYMBOL(mpc512x_lpbfifo_submit);
366
367/*
368 * LPBFIFO driver uses "ranges" property of "localbus" device tree node
369 * for being able to determine the chip select number of a client device
370 * ordering a DMA transfer.
371 */
372static int get_cs_ranges(struct device *dev)
373{
374	int ret = -ENODEV;
375	struct device_node *lb_node;
376	size_t i = 0;
377	struct of_range_parser parser;
378	struct of_range range;
379
380	lb_node = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-localbus");
381	if (!lb_node)
382		return ret;
383
384	of_range_parser_init(&parser, lb_node);
385	lpbfifo.cs_n = of_range_count(&parser);
386
387	lpbfifo.cs_ranges = devm_kcalloc(dev, lpbfifo.cs_n,
388					sizeof(struct cs_range), GFP_KERNEL);
389	if (!lpbfifo.cs_ranges)
390		goto end;
391
392	for_each_of_range(&parser, &range) {
393		u32 base = lower_32_bits(range.bus_addr);
394		if (base)
395			goto end;
396
397		lpbfifo.cs_ranges[i].csnum = upper_32_bits(range.bus_addr);
398		lpbfifo.cs_ranges[i].base = base;
399		lpbfifo.cs_ranges[i].addr = range.cpu_addr;
400		lpbfifo.cs_ranges[i].size = range.size;
401		i++;
402	}
403
404	ret = 0;
405
406 end:
407	of_node_put(lb_node);
408	return ret;
409}
410
411static int mpc512x_lpbfifo_probe(struct platform_device *pdev)
412{
413	struct resource r;
414	int ret = 0;
415
416	memset(&lpbfifo, 0, sizeof(struct lpbfifo_data));
417	spin_lock_init(&lpbfifo.lock);
418
419	lpbfifo.chan = dma_request_chan(&pdev->dev, "rx-tx");
420	if (IS_ERR(lpbfifo.chan))
421		return PTR_ERR(lpbfifo.chan);
422
423	if (of_address_to_resource(pdev->dev.of_node, 0, &r) != 0) {
424		dev_err(&pdev->dev, "bad 'reg' in 'sclpc' device tree node\n");
425		ret = -ENODEV;
426		goto err0;
427	}
428
429	lpbfifo.regs_phys = r.start;
430	lpbfifo.regs_size = resource_size(&r);
431
432	if (!devm_request_mem_region(&pdev->dev, lpbfifo.regs_phys,
433					lpbfifo.regs_size, DRV_NAME)) {
434		dev_err(&pdev->dev, "unable to request region\n");
435		ret = -EBUSY;
436		goto err0;
437	}
438
439	lpbfifo.regs = devm_ioremap(&pdev->dev,
440					lpbfifo.regs_phys, lpbfifo.regs_size);
441	if (!lpbfifo.regs) {
442		dev_err(&pdev->dev, "mapping registers failed\n");
443		ret = -ENOMEM;
444		goto err0;
445	}
446
447	out_be32(&lpbfifo.regs->enable,
448				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
449
450	if (get_cs_ranges(&pdev->dev) != 0) {
451		dev_err(&pdev->dev, "bad '/localbus' device tree node\n");
452		ret = -ENODEV;
453		goto err0;
454	}
455
456	lpbfifo.irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
457	if (!lpbfifo.irq) {
458		dev_err(&pdev->dev, "mapping irq failed\n");
459		ret = -ENODEV;
460		goto err0;
461	}
462
463	if (request_irq(lpbfifo.irq, mpc512x_lpbfifo_irq, 0,
464						DRV_NAME, &pdev->dev) != 0) {
465		dev_err(&pdev->dev, "requesting irq failed\n");
466		ret = -ENODEV;
467		goto err1;
468	}
469
470	dev_info(&pdev->dev, "probe succeeded\n");
471	return 0;
472
473 err1:
474	irq_dispose_mapping(lpbfifo.irq);
475 err0:
476	dma_release_channel(lpbfifo.chan);
477	return ret;
478}
479
480static void mpc512x_lpbfifo_remove(struct platform_device *pdev)
481{
482	unsigned long flags;
483	struct dma_device *dma_dev = lpbfifo.chan->device;
484	struct mpc512x_lpbfifo __iomem *regs = NULL;
485
486	spin_lock_irqsave(&lpbfifo.lock, flags);
487	regs = lpbfifo.regs;
488	lpbfifo.regs = NULL;
489	spin_unlock_irqrestore(&lpbfifo.lock, flags);
490
491	dma_dev->device_terminate_all(lpbfifo.chan);
492	out_be32(&regs->enable, MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
493
494	free_irq(lpbfifo.irq, &pdev->dev);
495	irq_dispose_mapping(lpbfifo.irq);
496	dma_release_channel(lpbfifo.chan);
497}
498
499static const struct of_device_id mpc512x_lpbfifo_match[] = {
500	{ .compatible = "fsl,mpc512x-lpbfifo", },
501	{},
502};
503MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match);
504
505static struct platform_driver mpc512x_lpbfifo_driver = {
506	.probe = mpc512x_lpbfifo_probe,
507	.remove_new = mpc512x_lpbfifo_remove,
508	.driver = {
509		.name = DRV_NAME,
510		.of_match_table = mpc512x_lpbfifo_match,
511	},
512};
513
514module_platform_driver(mpc512x_lpbfifo_driver);
515
516MODULE_AUTHOR("Alexander Popov <alex.popov@linux.com>");
517MODULE_DESCRIPTION("MPC512x LocalPlus Bus FIFO device driver");
518MODULE_LICENSE("GPL v2");
519