1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * DMA driver for Xilinx DMA/Bridge Subsystem
4 *
5 * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
6 * Copyright (C) 2022, Advanced Micro Devices, Inc.
7 */
8
9/*
10 * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
11 * between Host memory and the DMA subsystem. It does this by operating on
12 * 'descriptors' that contain information about the source, destination and
13 * amount of data to transfer. These direct memory transfers can be both in
14 * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
15 * configured to have a single AXI4 Master interface shared by all channels
16 * or one AXI4-Stream interface for each channel enabled. Memory transfers are
17 * specified on a per-channel basis in descriptor linked lists, which the DMA
18 * fetches from host memory and processes. Events such as descriptor completion
19 * and errors are signaled using interrupts. The core also provides up to 16
20 * user interrupt wires that generate interrupts to the host.
21 */
22
23#include <linux/mod_devicetable.h>
24#include <linux/bitfield.h>
25#include <linux/dmapool.h>
26#include <linux/regmap.h>
27#include <linux/dmaengine.h>
28#include <linux/dma/amd_xdma.h>
29#include <linux/platform_device.h>
30#include <linux/platform_data/amd_xdma.h>
31#include <linux/dma-mapping.h>
32#include <linux/pci.h>
33#include "../virt-dma.h"
34#include "xdma-regs.h"
35
36/* mmio regmap config for all XDMA registers */
37static const struct regmap_config xdma_regmap_config = {
38	.reg_bits = 32,
39	.val_bits = 32,
40	.reg_stride = 4,
41	.max_register = XDMA_REG_SPACE_LEN,
42};
43
44/**
45 * struct xdma_desc_block - Descriptor block
46 * @virt_addr: Virtual address of block start
47 * @dma_addr: DMA address of block start
48 */
49struct xdma_desc_block {
50	void		*virt_addr;
51	dma_addr_t	dma_addr;
52};
53
54/**
55 * struct xdma_chan - Driver specific DMA channel structure
56 * @vchan: Virtual channel
57 * @xdev_hdl: Pointer to DMA device structure
58 * @base: Offset of channel registers
59 * @desc_pool: Descriptor pool
60 * @busy: Busy flag of the channel
61 * @dir: Transferring direction of the channel
62 * @cfg: Transferring config of the channel
63 * @irq: IRQ assigned to the channel
64 */
65struct xdma_chan {
66	struct virt_dma_chan		vchan;
67	void				*xdev_hdl;
68	u32				base;
69	struct dma_pool			*desc_pool;
70	bool				busy;
71	enum dma_transfer_direction	dir;
72	struct dma_slave_config		cfg;
73	u32				irq;
74	struct completion		last_interrupt;
75	bool				stop_requested;
76};
77
78/**
79 * struct xdma_desc - DMA desc structure
80 * @vdesc: Virtual DMA descriptor
81 * @chan: DMA channel pointer
82 * @dir: Transferring direction of the request
83 * @desc_blocks: Hardware descriptor blocks
84 * @dblk_num: Number of hardware descriptor blocks
85 * @desc_num: Number of hardware descriptors
86 * @completed_desc_num: Completed hardware descriptors
87 * @cyclic: Cyclic transfer vs. scatter-gather
88 * @interleaved_dma: Interleaved DMA transfer
89 * @periods: Number of periods in the cyclic transfer
90 * @period_size: Size of a period in bytes in cyclic transfers
91 * @frames_left: Number of frames left in interleaved DMA transfer
92 * @error: tx error flag
93 */
94struct xdma_desc {
95	struct virt_dma_desc		vdesc;
96	struct xdma_chan		*chan;
97	enum dma_transfer_direction	dir;
98	struct xdma_desc_block		*desc_blocks;
99	u32				dblk_num;
100	u32				desc_num;
101	u32				completed_desc_num;
102	bool				cyclic;
103	bool				interleaved_dma;
104	u32				periods;
105	u32				period_size;
106	u32				frames_left;
107	bool				error;
108};
109
110#define XDMA_DEV_STATUS_REG_DMA		BIT(0)
111#define XDMA_DEV_STATUS_INIT_MSIX	BIT(1)
112
113/**
114 * struct xdma_device - DMA device structure
115 * @pdev: Platform device pointer
116 * @dma_dev: DMA device structure
117 * @rmap: MMIO regmap for DMA registers
118 * @h2c_chans: Host to Card channels
119 * @c2h_chans: Card to Host channels
120 * @h2c_chan_num: Number of H2C channels
121 * @c2h_chan_num: Number of C2H channels
122 * @irq_start: Start IRQ assigned to device
123 * @irq_num: Number of IRQ assigned to device
124 * @status: Initialization status
125 */
126struct xdma_device {
127	struct platform_device	*pdev;
128	struct dma_device	dma_dev;
129	struct regmap		*rmap;
130	struct xdma_chan	*h2c_chans;
131	struct xdma_chan	*c2h_chans;
132	u32			h2c_chan_num;
133	u32			c2h_chan_num;
134	u32			irq_start;
135	u32			irq_num;
136	u32			status;
137};
138
139#define xdma_err(xdev, fmt, args...)					\
140	dev_err(&(xdev)->pdev->dev, fmt, ##args)
141#define XDMA_CHAN_NUM(_xd) ({						\
142	typeof(_xd) (xd) = (_xd);					\
143	((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
144
145/* Get the last desc in a desc block */
146static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
147{
148	return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
149}
150
151/**
152 * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer
153 * @sw_desc: Tx descriptor pointer
154 */
155static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
156{
157	struct xdma_desc_block *block;
158	u32 last_blk_desc, desc_control;
159	struct xdma_hw_desc *desc;
160	int i;
161
162	desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
163	for (i = 1; i < sw_desc->dblk_num; i++) {
164		block = &sw_desc->desc_blocks[i - 1];
165		desc = xdma_blk_last_desc(block);
166
167		if (!(i & XDMA_DESC_BLOCK_MASK)) {
168			desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
169			continue;
170		}
171		desc->control = cpu_to_le32(desc_control);
172		desc->next_desc = cpu_to_le64(block[1].dma_addr);
173	}
174
175	/* update the last block */
176	last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
177	if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
178		block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
179		desc = xdma_blk_last_desc(block);
180		desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
181		desc->control = cpu_to_le32(desc_control);
182	}
183
184	block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
185	desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
186	desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
187}
188
189/**
190 * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
191 * @sw_desc: Tx descriptor pointer
192 */
193static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc)
194{
195	struct xdma_desc_block *block;
196	struct xdma_hw_desc *desc;
197	int i;
198
199	block = sw_desc->desc_blocks;
200	for (i = 0; i < sw_desc->desc_num - 1; i++) {
201		desc = block->virt_addr + i * XDMA_DESC_SIZE;
202		desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE));
203	}
204	desc = block->virt_addr + i * XDMA_DESC_SIZE;
205	desc->next_desc = cpu_to_le64(block->dma_addr);
206}
207
208static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
209{
210	return container_of(chan, struct xdma_chan, vchan.chan);
211}
212
213static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
214{
215	return container_of(vdesc, struct xdma_desc, vdesc);
216}
217
218/**
219 * xdma_channel_init - Initialize DMA channel registers
220 * @chan: DMA channel pointer
221 */
222static int xdma_channel_init(struct xdma_chan *chan)
223{
224	struct xdma_device *xdev = chan->xdev_hdl;
225	int ret;
226
227	ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
228			   CHAN_CTRL_NON_INCR_ADDR);
229	if (ret)
230		return ret;
231
232	ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
233			   CHAN_IM_ALL);
234	if (ret)
235		return ret;
236
237	return 0;
238}
239
240/**
241 * xdma_free_desc - Free descriptor
242 * @vdesc: Virtual DMA descriptor
243 */
244static void xdma_free_desc(struct virt_dma_desc *vdesc)
245{
246	struct xdma_desc *sw_desc;
247	int i;
248
249	sw_desc = to_xdma_desc(vdesc);
250	for (i = 0; i < sw_desc->dblk_num; i++) {
251		if (!sw_desc->desc_blocks[i].virt_addr)
252			break;
253		dma_pool_free(sw_desc->chan->desc_pool,
254			      sw_desc->desc_blocks[i].virt_addr,
255			      sw_desc->desc_blocks[i].dma_addr);
256	}
257	kfree(sw_desc->desc_blocks);
258	kfree(sw_desc);
259}
260
261/**
262 * xdma_alloc_desc - Allocate descriptor
263 * @chan: DMA channel pointer
264 * @desc_num: Number of hardware descriptors
265 * @cyclic: Whether this is a cyclic transfer
266 */
267static struct xdma_desc *
268xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
269{
270	struct xdma_desc *sw_desc;
271	struct xdma_hw_desc *desc;
272	dma_addr_t dma_addr;
273	u32 dblk_num;
274	u32 control;
275	void *addr;
276	int i, j;
277
278	sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
279	if (!sw_desc)
280		return NULL;
281
282	sw_desc->chan = chan;
283	sw_desc->desc_num = desc_num;
284	sw_desc->cyclic = cyclic;
285	sw_desc->error = false;
286	dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
287	sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
288				       GFP_NOWAIT);
289	if (!sw_desc->desc_blocks)
290		goto failed;
291
292	if (cyclic)
293		control = XDMA_DESC_CONTROL_CYCLIC;
294	else
295		control = XDMA_DESC_CONTROL(1, 0);
296
297	sw_desc->dblk_num = dblk_num;
298	for (i = 0; i < sw_desc->dblk_num; i++) {
299		addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
300		if (!addr)
301			goto failed;
302
303		sw_desc->desc_blocks[i].virt_addr = addr;
304		sw_desc->desc_blocks[i].dma_addr = dma_addr;
305		for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
306			desc[j].control = cpu_to_le32(control);
307	}
308
309	if (cyclic)
310		xdma_link_cyclic_desc_blocks(sw_desc);
311	else
312		xdma_link_sg_desc_blocks(sw_desc);
313
314	return sw_desc;
315
316failed:
317	xdma_free_desc(&sw_desc->vdesc);
318	return NULL;
319}
320
321/**
322 * xdma_xfer_start - Start DMA transfer
323 * @xchan: DMA channel pointer
324 */
325static int xdma_xfer_start(struct xdma_chan *xchan)
326{
327	struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
328	struct xdma_device *xdev = xchan->xdev_hdl;
329	struct xdma_desc_block *block;
330	u32 val, completed_blocks;
331	struct xdma_desc *desc;
332	int ret;
333
334	/*
335	 * check if there is not any submitted descriptor or channel is busy.
336	 * vchan lock should be held where this function is called.
337	 */
338	if (!vd || xchan->busy)
339		return -EINVAL;
340
341	/* clear run stop bit to get ready for transfer */
342	ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
343			   CHAN_CTRL_RUN_STOP);
344	if (ret)
345		return ret;
346
347	desc = to_xdma_desc(vd);
348	if (desc->dir != xchan->dir) {
349		xdma_err(xdev, "incorrect request direction");
350		return -EINVAL;
351	}
352
353	/* set DMA engine to the first descriptor block */
354	completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
355	block = &desc->desc_blocks[completed_blocks];
356	val = lower_32_bits(block->dma_addr);
357	ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
358	if (ret)
359		return ret;
360
361	val = upper_32_bits(block->dma_addr);
362	ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
363	if (ret)
364		return ret;
365
366	if (completed_blocks + 1 == desc->dblk_num)
367		val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
368	else
369		val = XDMA_DESC_ADJACENT - 1;
370	ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
371	if (ret)
372		return ret;
373
374	/* kick off DMA transfer */
375	ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
376			   CHAN_CTRL_START);
377	if (ret)
378		return ret;
379
380	xchan->busy = true;
381	xchan->stop_requested = false;
382	reinit_completion(&xchan->last_interrupt);
383
384	return 0;
385}
386
387/**
388 * xdma_xfer_stop - Stop DMA transfer
389 * @xchan: DMA channel pointer
390 */
391static int xdma_xfer_stop(struct xdma_chan *xchan)
392{
393	int ret;
394	struct xdma_device *xdev = xchan->xdev_hdl;
395
396	/* clear run stop bit to prevent any further auto-triggering */
397	ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
398			   CHAN_CTRL_RUN_STOP);
399	if (ret)
400		return ret;
401	return ret;
402}
403
404/**
405 * xdma_alloc_channels - Detect and allocate DMA channels
406 * @xdev: DMA device pointer
407 * @dir: Channel direction
408 */
409static int xdma_alloc_channels(struct xdma_device *xdev,
410			       enum dma_transfer_direction dir)
411{
412	struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
413	struct xdma_chan **chans, *xchan;
414	u32 base, identifier, target;
415	u32 *chan_num;
416	int i, j, ret;
417
418	if (dir == DMA_MEM_TO_DEV) {
419		base = XDMA_CHAN_H2C_OFFSET;
420		target = XDMA_CHAN_H2C_TARGET;
421		chans = &xdev->h2c_chans;
422		chan_num = &xdev->h2c_chan_num;
423	} else if (dir == DMA_DEV_TO_MEM) {
424		base = XDMA_CHAN_C2H_OFFSET;
425		target = XDMA_CHAN_C2H_TARGET;
426		chans = &xdev->c2h_chans;
427		chan_num = &xdev->c2h_chan_num;
428	} else {
429		xdma_err(xdev, "invalid direction specified");
430		return -EINVAL;
431	}
432
433	/* detect number of available DMA channels */
434	for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
435		ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
436				  &identifier);
437		if (ret)
438			return ret;
439
440		/* check if it is available DMA channel */
441		if (XDMA_CHAN_CHECK_TARGET(identifier, target))
442			(*chan_num)++;
443	}
444
445	if (!*chan_num) {
446		xdma_err(xdev, "does not probe any channel");
447		return -EINVAL;
448	}
449
450	*chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
451			      GFP_KERNEL);
452	if (!*chans)
453		return -ENOMEM;
454
455	for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
456		ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
457				  &identifier);
458		if (ret)
459			return ret;
460
461		if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
462			continue;
463
464		if (j == *chan_num) {
465			xdma_err(xdev, "invalid channel number");
466			return -EIO;
467		}
468
469		/* init channel structure and hardware */
470		xchan = &(*chans)[j];
471		xchan->xdev_hdl = xdev;
472		xchan->base = base + i * XDMA_CHAN_STRIDE;
473		xchan->dir = dir;
474		xchan->stop_requested = false;
475		init_completion(&xchan->last_interrupt);
476
477		ret = xdma_channel_init(xchan);
478		if (ret)
479			return ret;
480		xchan->vchan.desc_free = xdma_free_desc;
481		vchan_init(&xchan->vchan, &xdev->dma_dev);
482
483		j++;
484	}
485
486	dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
487		 (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
488
489	return 0;
490}
491
492/**
493 * xdma_issue_pending - Issue pending transactions
494 * @chan: DMA channel pointer
495 */
496static void xdma_issue_pending(struct dma_chan *chan)
497{
498	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
499	unsigned long flags;
500
501	spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
502	if (vchan_issue_pending(&xdma_chan->vchan))
503		xdma_xfer_start(xdma_chan);
504	spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
505}
506
507/**
508 * xdma_terminate_all - Terminate all transactions
509 * @chan: DMA channel pointer
510 */
511static int xdma_terminate_all(struct dma_chan *chan)
512{
513	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
514	struct virt_dma_desc *vd;
515	unsigned long flags;
516	LIST_HEAD(head);
517
518	xdma_xfer_stop(xdma_chan);
519
520	spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
521
522	xdma_chan->busy = false;
523	xdma_chan->stop_requested = true;
524	vd = vchan_next_desc(&xdma_chan->vchan);
525	if (vd) {
526		list_del(&vd->node);
527		dma_cookie_complete(&vd->tx);
528		vchan_terminate_vdesc(vd);
529	}
530	vchan_get_all_descriptors(&xdma_chan->vchan, &head);
531	list_splice_tail(&head, &xdma_chan->vchan.desc_terminated);
532
533	spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
534
535	return 0;
536}
537
538/**
539 * xdma_synchronize - Synchronize terminated transactions
540 * @chan: DMA channel pointer
541 */
542static void xdma_synchronize(struct dma_chan *chan)
543{
544	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
545	struct xdma_device *xdev = xdma_chan->xdev_hdl;
546	int st = 0;
547
548	/* If the engine continues running, wait for the last interrupt */
549	regmap_read(xdev->rmap, xdma_chan->base + XDMA_CHAN_STATUS, &st);
550	if (st & XDMA_CHAN_STATUS_BUSY)
551		wait_for_completion_timeout(&xdma_chan->last_interrupt, msecs_to_jiffies(1000));
552
553	vchan_synchronize(&xdma_chan->vchan);
554}
555
556/**
557 * xdma_fill_descs() - Fill hardware descriptors for one contiguous memory chunk.
558 *		       More than one descriptor will be used if the size is bigger
559 *		       than XDMA_DESC_BLEN_MAX.
560 * @sw_desc: Descriptor container
561 * @src_addr: First value for the ->src_addr field
562 * @dst_addr: First value for the ->dst_addr field
563 * @size: Size of the contiguous memory block
564 * @filled_descs_num: Index of the first descriptor to take care of in @sw_desc
565 */
566static inline u32 xdma_fill_descs(struct xdma_desc *sw_desc, u64 src_addr,
567				  u64 dst_addr, u32 size, u32 filled_descs_num)
568{
569	u32 left = size, len, desc_num = filled_descs_num;
570	struct xdma_desc_block *dblk;
571	struct xdma_hw_desc *desc;
572
573	dblk = sw_desc->desc_blocks + (desc_num / XDMA_DESC_ADJACENT);
574	desc = dblk->virt_addr;
575	desc += desc_num & XDMA_DESC_ADJACENT_MASK;
576	do {
577		len = min_t(u32, left, XDMA_DESC_BLEN_MAX);
578		/* set hardware descriptor */
579		desc->bytes = cpu_to_le32(len);
580		desc->src_addr = cpu_to_le64(src_addr);
581		desc->dst_addr = cpu_to_le64(dst_addr);
582		if (!(++desc_num & XDMA_DESC_ADJACENT_MASK))
583			desc = (++dblk)->virt_addr;
584		else
585			desc++;
586
587		src_addr += len;
588		dst_addr += len;
589		left -= len;
590	} while (left);
591
592	return desc_num - filled_descs_num;
593}
594
595/**
596 * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
597 * @chan: DMA channel pointer
598 * @sgl: Transfer scatter gather list
599 * @sg_len: Length of scatter gather list
600 * @dir: Transfer direction
601 * @flags: transfer ack flags
602 * @context: APP words of the descriptor
603 */
604static struct dma_async_tx_descriptor *
605xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
606		    unsigned int sg_len, enum dma_transfer_direction dir,
607		    unsigned long flags, void *context)
608{
609	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
610	struct dma_async_tx_descriptor *tx_desc;
611	struct xdma_desc *sw_desc;
612	u32 desc_num = 0, i;
613	u64 addr, dev_addr, *src, *dst;
614	struct scatterlist *sg;
615
616	for_each_sg(sgl, sg, sg_len, i)
617		desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
618
619	sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false);
620	if (!sw_desc)
621		return NULL;
622	sw_desc->dir = dir;
623	sw_desc->cyclic = false;
624	sw_desc->interleaved_dma = false;
625
626	if (dir == DMA_MEM_TO_DEV) {
627		dev_addr = xdma_chan->cfg.dst_addr;
628		src = &addr;
629		dst = &dev_addr;
630	} else {
631		dev_addr = xdma_chan->cfg.src_addr;
632		src = &dev_addr;
633		dst = &addr;
634	}
635
636	desc_num = 0;
637	for_each_sg(sgl, sg, sg_len, i) {
638		addr = sg_dma_address(sg);
639		desc_num += xdma_fill_descs(sw_desc, *src, *dst, sg_dma_len(sg), desc_num);
640		dev_addr += sg_dma_len(sg);
641	}
642
643	tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
644	if (!tx_desc)
645		goto failed;
646
647	return tx_desc;
648
649failed:
650	xdma_free_desc(&sw_desc->vdesc);
651
652	return NULL;
653}
654
655/**
656 * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
657 * @chan: DMA channel pointer
658 * @address: Device DMA address to access
659 * @size: Total length to transfer
660 * @period_size: Period size to use for each transfer
661 * @dir: Transfer direction
662 * @flags: Transfer ack flags
663 */
664static struct dma_async_tx_descriptor *
665xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
666		     size_t size, size_t period_size,
667		     enum dma_transfer_direction dir,
668		     unsigned long flags)
669{
670	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
671	struct xdma_device *xdev = xdma_chan->xdev_hdl;
672	unsigned int periods = size / period_size;
673	struct dma_async_tx_descriptor *tx_desc;
674	struct xdma_desc *sw_desc;
675	u64 addr, dev_addr, *src, *dst;
676	u32 desc_num;
677	unsigned int i;
678
679	/*
680	 * Simplify the whole logic by preventing an abnormally high number of
681	 * periods and periods size.
682	 */
683	if (period_size > XDMA_DESC_BLEN_MAX) {
684		xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX);
685		return NULL;
686	}
687
688	if (periods > XDMA_DESC_ADJACENT) {
689		xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT);
690		return NULL;
691	}
692
693	sw_desc = xdma_alloc_desc(xdma_chan, periods, true);
694	if (!sw_desc)
695		return NULL;
696
697	sw_desc->periods = periods;
698	sw_desc->period_size = period_size;
699	sw_desc->dir = dir;
700	sw_desc->interleaved_dma = false;
701
702	addr = address;
703	if (dir == DMA_MEM_TO_DEV) {
704		dev_addr = xdma_chan->cfg.dst_addr;
705		src = &addr;
706		dst = &dev_addr;
707	} else {
708		dev_addr = xdma_chan->cfg.src_addr;
709		src = &dev_addr;
710		dst = &addr;
711	}
712
713	desc_num = 0;
714	for (i = 0; i < periods; i++) {
715		desc_num += xdma_fill_descs(sw_desc, *src, *dst, period_size, desc_num);
716		addr += period_size;
717	}
718
719	tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
720	if (!tx_desc)
721		goto failed;
722
723	return tx_desc;
724
725failed:
726	xdma_free_desc(&sw_desc->vdesc);
727
728	return NULL;
729}
730
731/**
732 * xdma_prep_interleaved_dma - Prepare virtual descriptor for interleaved DMA transfers
733 * @chan: DMA channel
734 * @xt: DMA transfer template
735 * @flags: tx flags
736 */
737static struct dma_async_tx_descriptor *
738xdma_prep_interleaved_dma(struct dma_chan *chan,
739			  struct dma_interleaved_template *xt,
740			  unsigned long flags)
741{
742	int i;
743	u32 desc_num = 0, period_size = 0;
744	struct dma_async_tx_descriptor *tx_desc;
745	struct xdma_chan *xchan = to_xdma_chan(chan);
746	struct xdma_desc *sw_desc;
747	u64 src_addr, dst_addr;
748
749	for (i = 0; i < xt->frame_size; ++i)
750		desc_num += DIV_ROUND_UP(xt->sgl[i].size, XDMA_DESC_BLEN_MAX);
751
752	sw_desc = xdma_alloc_desc(xchan, desc_num, false);
753	if (!sw_desc)
754		return NULL;
755	sw_desc->dir = xt->dir;
756	sw_desc->interleaved_dma = true;
757	sw_desc->cyclic = flags & DMA_PREP_REPEAT;
758	sw_desc->frames_left = xt->numf;
759	sw_desc->periods = xt->numf;
760
761	desc_num = 0;
762	src_addr = xt->src_start;
763	dst_addr = xt->dst_start;
764	for (i = 0; i < xt->frame_size; ++i) {
765		desc_num += xdma_fill_descs(sw_desc, src_addr, dst_addr, xt->sgl[i].size, desc_num);
766		src_addr += dmaengine_get_src_icg(xt, &xt->sgl[i]) + (xt->src_inc ?
767							      xt->sgl[i].size : 0);
768		dst_addr += dmaengine_get_dst_icg(xt, &xt->sgl[i]) + (xt->dst_inc ?
769							      xt->sgl[i].size : 0);
770		period_size += xt->sgl[i].size;
771	}
772	sw_desc->period_size = period_size;
773
774	tx_desc = vchan_tx_prep(&xchan->vchan, &sw_desc->vdesc, flags);
775	if (tx_desc)
776		return tx_desc;
777
778	xdma_free_desc(&sw_desc->vdesc);
779	return NULL;
780}
781
782/**
783 * xdma_device_config - Configure the DMA channel
784 * @chan: DMA channel
785 * @cfg: channel configuration
786 */
787static int xdma_device_config(struct dma_chan *chan,
788			      struct dma_slave_config *cfg)
789{
790	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
791
792	memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
793
794	return 0;
795}
796
797/**
798 * xdma_free_chan_resources - Free channel resources
799 * @chan: DMA channel
800 */
801static void xdma_free_chan_resources(struct dma_chan *chan)
802{
803	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
804
805	vchan_free_chan_resources(&xdma_chan->vchan);
806	dma_pool_destroy(xdma_chan->desc_pool);
807	xdma_chan->desc_pool = NULL;
808}
809
810/**
811 * xdma_alloc_chan_resources - Allocate channel resources
812 * @chan: DMA channel
813 */
814static int xdma_alloc_chan_resources(struct dma_chan *chan)
815{
816	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
817	struct xdma_device *xdev = xdma_chan->xdev_hdl;
818	struct device *dev = xdev->dma_dev.dev;
819
820	while (dev && !dev_is_pci(dev))
821		dev = dev->parent;
822	if (!dev) {
823		xdma_err(xdev, "unable to find pci device");
824		return -EINVAL;
825	}
826
827	xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan), dev, XDMA_DESC_BLOCK_SIZE,
828					       XDMA_DESC_BLOCK_ALIGN, XDMA_DESC_BLOCK_BOUNDARY);
829	if (!xdma_chan->desc_pool) {
830		xdma_err(xdev, "unable to allocate descriptor pool");
831		return -ENOMEM;
832	}
833
834	return 0;
835}
836
837static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
838				      struct dma_tx_state *state)
839{
840	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
841	struct xdma_desc *desc = NULL;
842	struct virt_dma_desc *vd;
843	enum dma_status ret;
844	unsigned long flags;
845	unsigned int period_idx;
846	u32 residue = 0;
847
848	ret = dma_cookie_status(chan, cookie, state);
849	if (ret == DMA_COMPLETE)
850		return ret;
851
852	spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
853
854	vd = vchan_find_desc(&xdma_chan->vchan, cookie);
855	if (!vd)
856		goto out;
857
858	desc = to_xdma_desc(vd);
859	if (desc->error) {
860		ret = DMA_ERROR;
861	} else if (desc->cyclic) {
862		period_idx = desc->completed_desc_num % desc->periods;
863		residue = (desc->periods - period_idx) * desc->period_size;
864		dma_set_residue(state, residue);
865	}
866out:
867	spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
868
869	return ret;
870}
871
872/**
873 * xdma_channel_isr - XDMA channel interrupt handler
874 * @irq: IRQ number
875 * @dev_id: Pointer to the DMA channel structure
876 */
877static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
878{
879	struct xdma_chan *xchan = dev_id;
880	u32 complete_desc_num = 0;
881	struct xdma_device *xdev = xchan->xdev_hdl;
882	struct virt_dma_desc *vd, *next_vd;
883	struct xdma_desc *desc;
884	int ret;
885	u32 st;
886	bool repeat_tx;
887
888	if (xchan->stop_requested)
889		complete(&xchan->last_interrupt);
890
891	spin_lock(&xchan->vchan.lock);
892
893	/* get submitted request */
894	vd = vchan_next_desc(&xchan->vchan);
895	if (!vd)
896		goto out;
897
898	/* Clear-on-read the status register */
899	ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS_RC, &st);
900	if (ret)
901		goto out;
902
903	desc = to_xdma_desc(vd);
904
905	st &= XDMA_CHAN_STATUS_MASK;
906	if ((st & XDMA_CHAN_ERROR_MASK) ||
907	    !(st & (CHAN_CTRL_IE_DESC_COMPLETED | CHAN_CTRL_IE_DESC_STOPPED))) {
908		desc->error = true;
909		xdma_err(xdev, "channel error, status register value: 0x%x", st);
910		goto out;
911	}
912
913	ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
914			  &complete_desc_num);
915	if (ret)
916		goto out;
917
918	if (desc->interleaved_dma) {
919		xchan->busy = false;
920		desc->completed_desc_num += complete_desc_num;
921		if (complete_desc_num == XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT) {
922			xdma_xfer_start(xchan);
923			goto out;
924		}
925
926		/* last desc of any frame */
927		desc->frames_left--;
928		if (desc->frames_left)
929			goto out;
930
931		/* last desc of the last frame  */
932		repeat_tx = vd->tx.flags & DMA_PREP_REPEAT;
933		next_vd = list_first_entry_or_null(&vd->node, struct virt_dma_desc, node);
934		if (next_vd)
935			repeat_tx = repeat_tx && !(next_vd->tx.flags & DMA_PREP_LOAD_EOT);
936		if (repeat_tx) {
937			desc->frames_left = desc->periods;
938			desc->completed_desc_num = 0;
939			vchan_cyclic_callback(vd);
940		} else {
941			list_del(&vd->node);
942			vchan_cookie_complete(vd);
943		}
944		/* start (or continue) the tx of a first desc on the vc.desc_issued list, if any */
945		xdma_xfer_start(xchan);
946	} else if (!desc->cyclic) {
947		xchan->busy = false;
948		desc->completed_desc_num += complete_desc_num;
949
950		/* if all data blocks are transferred, remove and complete the request */
951		if (desc->completed_desc_num == desc->desc_num) {
952			list_del(&vd->node);
953			vchan_cookie_complete(vd);
954			goto out;
955		}
956
957		if (desc->completed_desc_num > desc->desc_num ||
958		    complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
959			goto out;
960
961		/* transfer the rest of data */
962		xdma_xfer_start(xchan);
963	} else {
964		desc->completed_desc_num = complete_desc_num;
965		vchan_cyclic_callback(vd);
966	}
967
968out:
969	spin_unlock(&xchan->vchan.lock);
970	return IRQ_HANDLED;
971}
972
973/**
974 * xdma_irq_fini - Uninitialize IRQ
975 * @xdev: DMA device pointer
976 */
977static void xdma_irq_fini(struct xdma_device *xdev)
978{
979	int i;
980
981	/* disable interrupt */
982	regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
983
984	/* free irq handler */
985	for (i = 0; i < xdev->h2c_chan_num; i++)
986		free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
987
988	for (i = 0; i < xdev->c2h_chan_num; i++)
989		free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
990}
991
992/**
993 * xdma_set_vector_reg - configure hardware IRQ registers
994 * @xdev: DMA device pointer
995 * @vec_tbl_start: Start of IRQ registers
996 * @irq_start: Start of IRQ
997 * @irq_num: Number of IRQ
998 */
999static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
1000			       u32 irq_start, u32 irq_num)
1001{
1002	u32 shift, i, val = 0;
1003	int ret;
1004
1005	/* Each IRQ register is 32 bit and contains 4 IRQs */
1006	while (irq_num > 0) {
1007		for (i = 0; i < 4; i++) {
1008			shift = XDMA_IRQ_VEC_SHIFT * i;
1009			val |= irq_start << shift;
1010			irq_start++;
1011			irq_num--;
1012			if (!irq_num)
1013				break;
1014		}
1015
1016		/* write IRQ register */
1017		ret = regmap_write(xdev->rmap, vec_tbl_start, val);
1018		if (ret)
1019			return ret;
1020		vec_tbl_start += sizeof(u32);
1021		val = 0;
1022	}
1023
1024	return 0;
1025}
1026
1027/**
1028 * xdma_irq_init - initialize IRQs
1029 * @xdev: DMA device pointer
1030 */
1031static int xdma_irq_init(struct xdma_device *xdev)
1032{
1033	u32 irq = xdev->irq_start;
1034	u32 user_irq_start;
1035	int i, j, ret;
1036
1037	/* return failure if there are not enough IRQs */
1038	if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
1039		xdma_err(xdev, "not enough irq");
1040		return -EINVAL;
1041	}
1042
1043	/* setup H2C interrupt handler */
1044	for (i = 0; i < xdev->h2c_chan_num; i++) {
1045		ret = request_irq(irq, xdma_channel_isr, 0,
1046				  "xdma-h2c-channel", &xdev->h2c_chans[i]);
1047		if (ret) {
1048			xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
1049				 i, irq, ret);
1050			goto failed_init_h2c;
1051		}
1052		xdev->h2c_chans[i].irq = irq;
1053		irq++;
1054	}
1055
1056	/* setup C2H interrupt handler */
1057	for (j = 0; j < xdev->c2h_chan_num; j++) {
1058		ret = request_irq(irq, xdma_channel_isr, 0,
1059				  "xdma-c2h-channel", &xdev->c2h_chans[j]);
1060		if (ret) {
1061			xdma_err(xdev, "C2H channel%d request irq%d failed: %d",
1062				 j, irq, ret);
1063			goto failed_init_c2h;
1064		}
1065		xdev->c2h_chans[j].irq = irq;
1066		irq++;
1067	}
1068
1069	/* config hardware IRQ registers */
1070	ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
1071				  XDMA_CHAN_NUM(xdev));
1072	if (ret) {
1073		xdma_err(xdev, "failed to set channel vectors: %d", ret);
1074		goto failed_init_c2h;
1075	}
1076
1077	/* config user IRQ registers if needed */
1078	user_irq_start = XDMA_CHAN_NUM(xdev);
1079	if (xdev->irq_num > user_irq_start) {
1080		ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
1081					  user_irq_start,
1082					  xdev->irq_num - user_irq_start);
1083		if (ret) {
1084			xdma_err(xdev, "failed to set user vectors: %d", ret);
1085			goto failed_init_c2h;
1086		}
1087	}
1088
1089	/* enable interrupt */
1090	ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
1091	if (ret)
1092		goto failed_init_c2h;
1093
1094	return 0;
1095
1096failed_init_c2h:
1097	while (j--)
1098		free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
1099failed_init_h2c:
1100	while (i--)
1101		free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
1102
1103	return ret;
1104}
1105
1106static bool xdma_filter_fn(struct dma_chan *chan, void *param)
1107{
1108	struct xdma_chan *xdma_chan = to_xdma_chan(chan);
1109	struct xdma_chan_info *chan_info = param;
1110
1111	return chan_info->dir == xdma_chan->dir;
1112}
1113
1114/**
1115 * xdma_disable_user_irq - Disable user interrupt
1116 * @pdev: Pointer to the platform_device structure
1117 * @irq_num: System IRQ number
1118 */
1119void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
1120{
1121	struct xdma_device *xdev = platform_get_drvdata(pdev);
1122	u32 index;
1123
1124	index = irq_num - xdev->irq_start;
1125	if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
1126		xdma_err(xdev, "invalid user irq number");
1127		return;
1128	}
1129	index -= XDMA_CHAN_NUM(xdev);
1130
1131	regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
1132}
1133EXPORT_SYMBOL(xdma_disable_user_irq);
1134
1135/**
1136 * xdma_enable_user_irq - Enable user logic interrupt
1137 * @pdev: Pointer to the platform_device structure
1138 * @irq_num: System IRQ number
1139 */
1140int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
1141{
1142	struct xdma_device *xdev = platform_get_drvdata(pdev);
1143	u32 index;
1144	int ret;
1145
1146	index = irq_num - xdev->irq_start;
1147	if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
1148		xdma_err(xdev, "invalid user irq number");
1149		return -EINVAL;
1150	}
1151	index -= XDMA_CHAN_NUM(xdev);
1152
1153	ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
1154	if (ret)
1155		return ret;
1156
1157	return 0;
1158}
1159EXPORT_SYMBOL(xdma_enable_user_irq);
1160
1161/**
1162 * xdma_get_user_irq - Get system IRQ number
1163 * @pdev: Pointer to the platform_device structure
1164 * @user_irq_index: User logic IRQ wire index
1165 *
1166 * Return: The system IRQ number allocated for the given wire index.
1167 */
1168int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
1169{
1170	struct xdma_device *xdev = platform_get_drvdata(pdev);
1171
1172	if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
1173		xdma_err(xdev, "invalid user irq index");
1174		return -EINVAL;
1175	}
1176
1177	return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
1178}
1179EXPORT_SYMBOL(xdma_get_user_irq);
1180
1181/**
1182 * xdma_remove - Driver remove function
1183 * @pdev: Pointer to the platform_device structure
1184 */
1185static void xdma_remove(struct platform_device *pdev)
1186{
1187	struct xdma_device *xdev = platform_get_drvdata(pdev);
1188
1189	if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
1190		xdma_irq_fini(xdev);
1191
1192	if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
1193		dma_async_device_unregister(&xdev->dma_dev);
1194}
1195
1196/**
1197 * xdma_probe - Driver probe function
1198 * @pdev: Pointer to the platform_device structure
1199 */
1200static int xdma_probe(struct platform_device *pdev)
1201{
1202	struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
1203	struct xdma_device *xdev;
1204	void __iomem *reg_base;
1205	struct resource *res;
1206	int ret = -ENODEV;
1207
1208	if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
1209		dev_err(&pdev->dev, "invalid max dma channels %d",
1210			pdata->max_dma_channels);
1211		return -EINVAL;
1212	}
1213
1214	xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1215	if (!xdev)
1216		return -ENOMEM;
1217
1218	platform_set_drvdata(pdev, xdev);
1219	xdev->pdev = pdev;
1220
1221	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1222	if (!res) {
1223		xdma_err(xdev, "failed to get irq resource");
1224		goto failed;
1225	}
1226	xdev->irq_start = res->start;
1227	xdev->irq_num = resource_size(res);
1228
1229	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1230	if (!res) {
1231		xdma_err(xdev, "failed to get io resource");
1232		goto failed;
1233	}
1234
1235	reg_base = devm_ioremap_resource(&pdev->dev, res);
1236	if (IS_ERR(reg_base)) {
1237		xdma_err(xdev, "ioremap failed");
1238		goto failed;
1239	}
1240
1241	xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
1242					   &xdma_regmap_config);
1243	if (!xdev->rmap) {
1244		xdma_err(xdev, "config regmap failed: %d", ret);
1245		goto failed;
1246	}
1247	INIT_LIST_HEAD(&xdev->dma_dev.channels);
1248
1249	ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
1250	if (ret) {
1251		xdma_err(xdev, "config H2C channels failed: %d", ret);
1252		goto failed;
1253	}
1254
1255	ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
1256	if (ret) {
1257		xdma_err(xdev, "config C2H channels failed: %d", ret);
1258		goto failed;
1259	}
1260
1261	dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
1262	dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
1263	dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
1264	dma_cap_set(DMA_INTERLEAVE, xdev->dma_dev.cap_mask);
1265	dma_cap_set(DMA_REPEAT, xdev->dma_dev.cap_mask);
1266	dma_cap_set(DMA_LOAD_EOT, xdev->dma_dev.cap_mask);
1267
1268	xdev->dma_dev.dev = &pdev->dev;
1269	xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1270	xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
1271	xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
1272	xdev->dma_dev.device_tx_status = xdma_tx_status;
1273	xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
1274	xdev->dma_dev.device_config = xdma_device_config;
1275	xdev->dma_dev.device_issue_pending = xdma_issue_pending;
1276	xdev->dma_dev.device_terminate_all = xdma_terminate_all;
1277	xdev->dma_dev.device_synchronize = xdma_synchronize;
1278	xdev->dma_dev.filter.map = pdata->device_map;
1279	xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
1280	xdev->dma_dev.filter.fn = xdma_filter_fn;
1281	xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
1282	xdev->dma_dev.device_prep_interleaved_dma = xdma_prep_interleaved_dma;
1283
1284	ret = dma_async_device_register(&xdev->dma_dev);
1285	if (ret) {
1286		xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
1287		goto failed;
1288	}
1289	xdev->status |= XDMA_DEV_STATUS_REG_DMA;
1290
1291	ret = xdma_irq_init(xdev);
1292	if (ret) {
1293		xdma_err(xdev, "failed to init msix: %d", ret);
1294		goto failed;
1295	}
1296	xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
1297
1298	return 0;
1299
1300failed:
1301	xdma_remove(pdev);
1302
1303	return ret;
1304}
1305
1306static const struct platform_device_id xdma_id_table[] = {
1307	{ "xdma", 0},
1308	{ },
1309};
1310
1311static struct platform_driver xdma_driver = {
1312	.driver		= {
1313		.name = "xdma",
1314	},
1315	.id_table	= xdma_id_table,
1316	.probe		= xdma_probe,
1317	.remove_new	= xdma_remove,
1318};
1319
1320module_platform_driver(xdma_driver);
1321
1322MODULE_DESCRIPTION("AMD XDMA driver");
1323MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
1324MODULE_LICENSE("GPL");
1325