1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * drivers/ata/sata_dwc_460ex.c
4 *
5 * Synopsys DesignWare Cores (DWC) SATA host driver
6 *
7 * Author: Mark Miesfeld <mmiesfeld@amcc.com>
8 *
9 * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
10 * Copyright 2008 DENX Software Engineering
11 *
12 * Based on versions provided by AMCC and Synopsys which are:
13 *          Copyright 2006 Applied Micro Circuits Corporation
14 *          COPYRIGHT (C) 2005  SYNOPSYS, INC.  ALL RIGHTS RESERVED
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/device.h>
20#include <linux/dmaengine.h>
21#include <linux/of.h>
22#include <linux/of_irq.h>
23#include <linux/platform_device.h>
24#include <linux/phy/phy.h>
25#include <linux/libata.h>
26#include <linux/slab.h>
27#include <trace/events/libata.h>
28
29#include "libata.h"
30
31#include <scsi/scsi_host.h>
32#include <scsi/scsi_cmnd.h>
33
34/* These two are defined in "libata.h" */
35#undef	DRV_NAME
36#undef	DRV_VERSION
37
38#define DRV_NAME        "sata-dwc"
39#define DRV_VERSION     "1.3"
40
41#define sata_dwc_writel(a, v)	writel_relaxed(v, a)
42#define sata_dwc_readl(a)	readl_relaxed(a)
43
44#define AHB_DMA_BRST_DFLT	64	/* 16 data items burst length */
45
46enum {
47	SATA_DWC_MAX_PORTS = 1,
48
49	SATA_DWC_SCR_OFFSET = 0x24,
50	SATA_DWC_REG_OFFSET = 0x64,
51};
52
53/* DWC SATA Registers */
54struct sata_dwc_regs {
55	u32 fptagr;		/* 1st party DMA tag */
56	u32 fpbor;		/* 1st party DMA buffer offset */
57	u32 fptcr;		/* 1st party DMA Xfr count */
58	u32 dmacr;		/* DMA Control */
59	u32 dbtsr;		/* DMA Burst Transac size */
60	u32 intpr;		/* Interrupt Pending */
61	u32 intmr;		/* Interrupt Mask */
62	u32 errmr;		/* Error Mask */
63	u32 llcr;		/* Link Layer Control */
64	u32 phycr;		/* PHY Control */
65	u32 physr;		/* PHY Status */
66	u32 rxbistpd;		/* Recvd BIST pattern def register */
67	u32 rxbistpd1;		/* Recvd BIST data dword1 */
68	u32 rxbistpd2;		/* Recvd BIST pattern data dword2 */
69	u32 txbistpd;		/* Trans BIST pattern def register */
70	u32 txbistpd1;		/* Trans BIST data dword1 */
71	u32 txbistpd2;		/* Trans BIST data dword2 */
72	u32 bistcr;		/* BIST Control Register */
73	u32 bistfctr;		/* BIST FIS Count Register */
74	u32 bistsr;		/* BIST Status Register */
75	u32 bistdecr;		/* BIST Dword Error count register */
76	u32 res[15];		/* Reserved locations */
77	u32 testr;		/* Test Register */
78	u32 versionr;		/* Version Register */
79	u32 idr;		/* ID Register */
80	u32 unimpl[192];	/* Unimplemented */
81	u32 dmadr[256];		/* FIFO Locations in DMA Mode */
82};
83
84enum {
85	SCR_SCONTROL_DET_ENABLE	=	0x00000001,
86	SCR_SSTATUS_DET_PRESENT	=	0x00000001,
87	SCR_SERROR_DIAG_X	=	0x04000000,
88/* DWC SATA Register Operations */
89	SATA_DWC_TXFIFO_DEPTH	=	0x01FF,
90	SATA_DWC_RXFIFO_DEPTH	=	0x01FF,
91	SATA_DWC_DMACR_TMOD_TXCHEN =	0x00000004,
92	SATA_DWC_DMACR_TXCHEN	= (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
93	SATA_DWC_DMACR_RXCHEN	= (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
94	SATA_DWC_DMACR_TXRXCH_CLEAR =	SATA_DWC_DMACR_TMOD_TXCHEN,
95	SATA_DWC_INTPR_DMAT	=	0x00000001,
96	SATA_DWC_INTPR_NEWFP	=	0x00000002,
97	SATA_DWC_INTPR_PMABRT	=	0x00000004,
98	SATA_DWC_INTPR_ERR	=	0x00000008,
99	SATA_DWC_INTPR_NEWBIST	=	0x00000010,
100	SATA_DWC_INTPR_IPF	=	0x10000000,
101	SATA_DWC_INTMR_DMATM	=	0x00000001,
102	SATA_DWC_INTMR_NEWFPM	=	0x00000002,
103	SATA_DWC_INTMR_PMABRTM	=	0x00000004,
104	SATA_DWC_INTMR_ERRM	=	0x00000008,
105	SATA_DWC_INTMR_NEWBISTM	=	0x00000010,
106	SATA_DWC_LLCR_SCRAMEN	=	0x00000001,
107	SATA_DWC_LLCR_DESCRAMEN	=	0x00000002,
108	SATA_DWC_LLCR_RPDEN	=	0x00000004,
109/* This is all error bits, zero's are reserved fields. */
110	SATA_DWC_SERROR_ERR_BITS =	0x0FFF0F03
111};
112
113#define SATA_DWC_SCR0_SPD_GET(v)	(((v) >> 4) & 0x0000000F)
114#define SATA_DWC_DMACR_TX_CLEAR(v)	(((v) & ~SATA_DWC_DMACR_TXCHEN) |\
115						 SATA_DWC_DMACR_TMOD_TXCHEN)
116#define SATA_DWC_DMACR_RX_CLEAR(v)	(((v) & ~SATA_DWC_DMACR_RXCHEN) |\
117						 SATA_DWC_DMACR_TMOD_TXCHEN)
118#define SATA_DWC_DBTSR_MWR(size)	(((size)/4) & SATA_DWC_TXFIFO_DEPTH)
119#define SATA_DWC_DBTSR_MRD(size)	((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
120						 << 16)
121struct sata_dwc_device {
122	struct device		*dev;		/* generic device struct */
123	struct ata_probe_ent	*pe;		/* ptr to probe-ent */
124	struct ata_host		*host;
125	struct sata_dwc_regs __iomem *sata_dwc_regs;	/* DW SATA specific */
126	u32			sactive_issued;
127	u32			sactive_queued;
128	struct phy		*phy;
129	phys_addr_t		dmadr;
130#ifdef CONFIG_SATA_DWC_OLD_DMA
131	struct dw_dma_chip	*dma;
132#endif
133};
134
135/*
136 * Allow one extra special slot for commands and DMA management
137 * to account for libata internal commands.
138 */
139#define SATA_DWC_QCMD_MAX	(ATA_MAX_QUEUE + 1)
140
141struct sata_dwc_device_port {
142	struct sata_dwc_device	*hsdev;
143	int			cmd_issued[SATA_DWC_QCMD_MAX];
144	int			dma_pending[SATA_DWC_QCMD_MAX];
145
146	/* DMA info */
147	struct dma_chan			*chan;
148	struct dma_async_tx_descriptor	*desc[SATA_DWC_QCMD_MAX];
149	u32				dma_interrupt_count;
150};
151
152/*
153 * Commonly used DWC SATA driver macros
154 */
155#define HSDEV_FROM_HOST(host)	((struct sata_dwc_device *)(host)->private_data)
156#define HSDEV_FROM_AP(ap)	((struct sata_dwc_device *)(ap)->host->private_data)
157#define HSDEVP_FROM_AP(ap)	((struct sata_dwc_device_port *)(ap)->private_data)
158#define HSDEV_FROM_QC(qc)	((struct sata_dwc_device *)(qc)->ap->host->private_data)
159#define HSDEV_FROM_HSDEVP(p)	((struct sata_dwc_device *)(p)->hsdev)
160
161enum {
162	SATA_DWC_CMD_ISSUED_NOT		= 0,
163	SATA_DWC_CMD_ISSUED_PEND	= 1,
164	SATA_DWC_CMD_ISSUED_EXEC	= 2,
165	SATA_DWC_CMD_ISSUED_NODATA	= 3,
166
167	SATA_DWC_DMA_PENDING_NONE	= 0,
168	SATA_DWC_DMA_PENDING_TX		= 1,
169	SATA_DWC_DMA_PENDING_RX		= 2,
170};
171
172/*
173 * Prototypes
174 */
175static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
176static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc);
177static void sata_dwc_dma_xfer_complete(struct ata_port *ap);
178static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
179
180#ifdef CONFIG_SATA_DWC_OLD_DMA
181
182#include <linux/platform_data/dma-dw.h>
183#include <linux/dma/dw.h>
184
185static struct dw_dma_slave sata_dwc_dma_dws = {
186	.src_id = 0,
187	.dst_id = 0,
188	.m_master = 1,
189	.p_master = 0,
190};
191
192static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
193{
194	struct dw_dma_slave *dws = &sata_dwc_dma_dws;
195
196	if (dws->dma_dev != chan->device->dev)
197		return false;
198
199	chan->private = dws;
200	return true;
201}
202
203static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
204{
205	struct sata_dwc_device *hsdev = hsdevp->hsdev;
206	struct dw_dma_slave *dws = &sata_dwc_dma_dws;
207	struct device *dev = hsdev->dev;
208	dma_cap_mask_t mask;
209
210	dws->dma_dev = dev;
211
212	dma_cap_zero(mask);
213	dma_cap_set(DMA_SLAVE, mask);
214
215	/* Acquire DMA channel */
216	hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
217	if (!hsdevp->chan) {
218		dev_err(dev, "%s: dma channel unavailable\n", __func__);
219		return -EAGAIN;
220	}
221
222	return 0;
223}
224
225static int sata_dwc_dma_init_old(struct platform_device *pdev,
226				 struct sata_dwc_device *hsdev)
227{
228	struct device *dev = &pdev->dev;
229	struct device_node *np = dev->of_node;
230
231	hsdev->dma = devm_kzalloc(dev, sizeof(*hsdev->dma), GFP_KERNEL);
232	if (!hsdev->dma)
233		return -ENOMEM;
234
235	hsdev->dma->dev = dev;
236	hsdev->dma->id = pdev->id;
237
238	/* Get SATA DMA interrupt number */
239	hsdev->dma->irq = irq_of_parse_and_map(np, 1);
240	if (!hsdev->dma->irq) {
241		dev_err(dev, "no SATA DMA irq\n");
242		return -ENODEV;
243	}
244
245	/* Get physical SATA DMA register base address */
246	hsdev->dma->regs = devm_platform_ioremap_resource(pdev, 1);
247	if (IS_ERR(hsdev->dma->regs))
248		return PTR_ERR(hsdev->dma->regs);
249
250	/* Initialize AHB DMAC */
251	return dw_dma_probe(hsdev->dma);
252}
253
254static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
255{
256	if (!hsdev->dma)
257		return;
258
259	dw_dma_remove(hsdev->dma);
260}
261
262#endif
263
264static const char *get_prot_descript(u8 protocol)
265{
266	switch (protocol) {
267	case ATA_PROT_NODATA:
268		return "ATA no data";
269	case ATA_PROT_PIO:
270		return "ATA PIO";
271	case ATA_PROT_DMA:
272		return "ATA DMA";
273	case ATA_PROT_NCQ:
274		return "ATA NCQ";
275	case ATA_PROT_NCQ_NODATA:
276		return "ATA NCQ no data";
277	case ATAPI_PROT_NODATA:
278		return "ATAPI no data";
279	case ATAPI_PROT_PIO:
280		return "ATAPI PIO";
281	case ATAPI_PROT_DMA:
282		return "ATAPI DMA";
283	default:
284		return "unknown";
285	}
286}
287
288static void dma_dwc_xfer_done(void *hsdev_instance)
289{
290	unsigned long flags;
291	struct sata_dwc_device *hsdev = hsdev_instance;
292	struct ata_host *host = (struct ata_host *)hsdev->host;
293	struct ata_port *ap;
294	struct sata_dwc_device_port *hsdevp;
295	u8 tag = 0;
296	unsigned int port = 0;
297
298	spin_lock_irqsave(&host->lock, flags);
299	ap = host->ports[port];
300	hsdevp = HSDEVP_FROM_AP(ap);
301	tag = ap->link.active_tag;
302
303	/*
304	 * Each DMA command produces 2 interrupts.  Only
305	 * complete the command after both interrupts have been
306	 * seen. (See sata_dwc_isr())
307	 */
308	hsdevp->dma_interrupt_count++;
309	sata_dwc_clear_dmacr(hsdevp, tag);
310
311	if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
312		dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n",
313			tag, hsdevp->dma_pending[tag]);
314	}
315
316	if ((hsdevp->dma_interrupt_count % 2) == 0)
317		sata_dwc_dma_xfer_complete(ap);
318
319	spin_unlock_irqrestore(&host->lock, flags);
320}
321
322static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc)
323{
324	struct ata_port *ap = qc->ap;
325	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
326	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
327	struct dma_slave_config sconf;
328	struct dma_async_tx_descriptor *desc;
329
330	if (qc->dma_dir == DMA_DEV_TO_MEM) {
331		sconf.src_addr = hsdev->dmadr;
332		sconf.device_fc = false;
333	} else {	/* DMA_MEM_TO_DEV */
334		sconf.dst_addr = hsdev->dmadr;
335		sconf.device_fc = false;
336	}
337
338	sconf.direction = qc->dma_dir;
339	sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4;	/* in items */
340	sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4;	/* in items */
341	sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
342	sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
343
344	dmaengine_slave_config(hsdevp->chan, &sconf);
345
346	/* Convert SG list to linked list of items (LLIs) for AHB DMA */
347	desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem,
348				       qc->dma_dir,
349				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
350
351	if (!desc)
352		return NULL;
353
354	desc->callback = dma_dwc_xfer_done;
355	desc->callback_param = hsdev;
356
357	dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
358		qc->sg, qc->n_elem, &hsdev->dmadr);
359
360	return desc;
361}
362
363static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
364{
365	if (scr > SCR_NOTIFICATION) {
366		dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
367			__func__, scr);
368		return -EINVAL;
369	}
370
371	*val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
372	dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
373		link->ap->print_id, scr, *val);
374
375	return 0;
376}
377
378static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
379{
380	dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
381		link->ap->print_id, scr, val);
382	if (scr > SCR_NOTIFICATION) {
383		dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
384			 __func__, scr);
385		return -EINVAL;
386	}
387	sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
388
389	return 0;
390}
391
392static void clear_serror(struct ata_port *ap)
393{
394	u32 val;
395	sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
396	sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
397}
398
399static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
400{
401	sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
402			sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
403}
404
405static u32 qcmd_tag_to_mask(u8 tag)
406{
407	return 0x00000001 << (tag & 0x1f);
408}
409
410/* See ahci.c */
411static void sata_dwc_error_intr(struct ata_port *ap,
412				struct sata_dwc_device *hsdev, uint intpr)
413{
414	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
415	struct ata_eh_info *ehi = &ap->link.eh_info;
416	unsigned int err_mask = 0, action = 0;
417	struct ata_queued_cmd *qc;
418	u32 serror;
419	u8 status, tag;
420
421	ata_ehi_clear_desc(ehi);
422
423	sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
424	status = ap->ops->sff_check_status(ap);
425
426	tag = ap->link.active_tag;
427
428	dev_err(ap->dev,
429		"%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d",
430		__func__, serror, intpr, status, hsdevp->dma_interrupt_count,
431		hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
432
433	/* Clear error register and interrupt bit */
434	clear_serror(ap);
435	clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
436
437	/* This is the only error happening now.  TODO check for exact error */
438
439	err_mask |= AC_ERR_HOST_BUS;
440	action |= ATA_EH_RESET;
441
442	/* Pass this on to EH */
443	ehi->serror |= serror;
444	ehi->action |= action;
445
446	qc = ata_qc_from_tag(ap, tag);
447	if (qc)
448		qc->err_mask |= err_mask;
449	else
450		ehi->err_mask |= err_mask;
451
452	ata_port_abort(ap);
453}
454
455/*
456 * Function : sata_dwc_isr
457 * arguments : irq, void *dev_instance, struct pt_regs *regs
458 * Return value : irqreturn_t - status of IRQ
459 * This Interrupt handler called via port ops registered function.
460 * .irq_handler = sata_dwc_isr
461 */
462static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
463{
464	struct ata_host *host = (struct ata_host *)dev_instance;
465	struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
466	struct ata_port *ap;
467	struct ata_queued_cmd *qc;
468	unsigned long flags;
469	u8 status, tag;
470	int handled, port = 0;
471	uint intpr, sactive, sactive2, tag_mask;
472	struct sata_dwc_device_port *hsdevp;
473	hsdev->sactive_issued = 0;
474
475	spin_lock_irqsave(&host->lock, flags);
476
477	/* Read the interrupt register */
478	intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
479
480	ap = host->ports[port];
481	hsdevp = HSDEVP_FROM_AP(ap);
482
483	dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
484		ap->link.active_tag);
485
486	/* Check for error interrupt */
487	if (intpr & SATA_DWC_INTPR_ERR) {
488		sata_dwc_error_intr(ap, hsdev, intpr);
489		handled = 1;
490		goto DONE;
491	}
492
493	/* Check for DMA SETUP FIS (FP DMA) interrupt */
494	if (intpr & SATA_DWC_INTPR_NEWFP) {
495		clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
496
497		tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
498		dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
499		if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
500			dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
501
502		hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
503
504		qc = ata_qc_from_tag(ap, tag);
505		if (unlikely(!qc)) {
506			dev_err(ap->dev, "failed to get qc");
507			handled = 1;
508			goto DONE;
509		}
510		/*
511		 * Start FP DMA for NCQ command.  At this point the tag is the
512		 * active tag.  It is the tag that matches the command about to
513		 * be completed.
514		 */
515		trace_ata_bmdma_start(ap, &qc->tf, tag);
516		qc->ap->link.active_tag = tag;
517		sata_dwc_bmdma_start_by_tag(qc, tag);
518
519		handled = 1;
520		goto DONE;
521	}
522	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
523	tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
524
525	/* If no sactive issued and tag_mask is zero then this is not NCQ */
526	if (hsdev->sactive_issued == 0 && tag_mask == 0) {
527		if (ap->link.active_tag == ATA_TAG_POISON)
528			tag = 0;
529		else
530			tag = ap->link.active_tag;
531		qc = ata_qc_from_tag(ap, tag);
532
533		/* DEV interrupt w/ no active qc? */
534		if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
535			dev_err(ap->dev,
536				"%s interrupt with no active qc qc=%p\n",
537				__func__, qc);
538			ap->ops->sff_check_status(ap);
539			handled = 1;
540			goto DONE;
541		}
542		status = ap->ops->sff_check_status(ap);
543
544		qc->ap->link.active_tag = tag;
545		hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
546
547		if (status & ATA_ERR) {
548			dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
549			sata_dwc_qc_complete(ap, qc);
550			handled = 1;
551			goto DONE;
552		}
553
554		dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
555			__func__, get_prot_descript(qc->tf.protocol));
556DRVSTILLBUSY:
557		if (ata_is_dma(qc->tf.protocol)) {
558			/*
559			 * Each DMA transaction produces 2 interrupts. The DMAC
560			 * transfer complete interrupt and the SATA controller
561			 * operation done interrupt. The command should be
562			 * completed only after both interrupts are seen.
563			 */
564			hsdevp->dma_interrupt_count++;
565			if (hsdevp->dma_pending[tag] == \
566					SATA_DWC_DMA_PENDING_NONE) {
567				dev_err(ap->dev,
568					"%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n",
569					__func__, intpr, status,
570					hsdevp->dma_pending[tag]);
571			}
572
573			if ((hsdevp->dma_interrupt_count % 2) == 0)
574				sata_dwc_dma_xfer_complete(ap);
575		} else if (ata_is_pio(qc->tf.protocol)) {
576			ata_sff_hsm_move(ap, qc, status, 0);
577			handled = 1;
578			goto DONE;
579		} else {
580			if (unlikely(sata_dwc_qc_complete(ap, qc)))
581				goto DRVSTILLBUSY;
582		}
583
584		handled = 1;
585		goto DONE;
586	}
587
588	/*
589	 * This is a NCQ command. At this point we need to figure out for which
590	 * tags we have gotten a completion interrupt.  One interrupt may serve
591	 * as completion for more than one operation when commands are queued
592	 * (NCQ).  We need to process each completed command.
593	 */
594
595	 /* process completed commands */
596	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
597	tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
598
599	if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
600		dev_dbg(ap->dev,
601			"%s NCQ:sactive=0x%08x  sactive_issued=0x%08x tag_mask=0x%08x\n",
602			__func__, sactive, hsdev->sactive_issued, tag_mask);
603	}
604
605	if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
606		dev_warn(ap->dev,
607			 "Bad tag mask?  sactive=0x%08x sactive_issued=0x%08x  tag_mask=0x%08x\n",
608			 sactive, hsdev->sactive_issued, tag_mask);
609	}
610
611	/* read just to clear ... not bad if currently still busy */
612	status = ap->ops->sff_check_status(ap);
613	dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
614
615	tag = 0;
616	while (tag_mask) {
617		while (!(tag_mask & 0x00000001)) {
618			tag++;
619			tag_mask <<= 1;
620		}
621
622		tag_mask &= (~0x00000001);
623		qc = ata_qc_from_tag(ap, tag);
624		if (unlikely(!qc)) {
625			dev_err(ap->dev, "failed to get qc");
626			handled = 1;
627			goto DONE;
628		}
629
630		/* To be picked up by completion functions */
631		qc->ap->link.active_tag = tag;
632		hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
633
634		/* Let libata/scsi layers handle error */
635		if (status & ATA_ERR) {
636			dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
637				status);
638			sata_dwc_qc_complete(ap, qc);
639			handled = 1;
640			goto DONE;
641		}
642
643		/* Process completed command */
644		dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
645			get_prot_descript(qc->tf.protocol));
646		if (ata_is_dma(qc->tf.protocol)) {
647			hsdevp->dma_interrupt_count++;
648			if (hsdevp->dma_pending[tag] == \
649					SATA_DWC_DMA_PENDING_NONE)
650				dev_warn(ap->dev, "%s: DMA not pending?\n",
651					__func__);
652			if ((hsdevp->dma_interrupt_count % 2) == 0)
653				sata_dwc_dma_xfer_complete(ap);
654		} else {
655			if (unlikely(sata_dwc_qc_complete(ap, qc)))
656				goto STILLBUSY;
657		}
658		continue;
659
660STILLBUSY:
661		ap->stats.idle_irq++;
662		dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
663			ap->print_id);
664	} /* while tag_mask */
665
666	/*
667	 * Check to see if any commands completed while we were processing our
668	 * initial set of completed commands (read status clears interrupts,
669	 * so we might miss a completed command interrupt if one came in while
670	 * we were processing --we read status as part of processing a completed
671	 * command).
672	 */
673	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
674	if (sactive2 != sactive) {
675		dev_dbg(ap->dev,
676			"More completed - sactive=0x%x sactive2=0x%x\n",
677			sactive, sactive2);
678	}
679	handled = 1;
680
681DONE:
682	spin_unlock_irqrestore(&host->lock, flags);
683	return IRQ_RETVAL(handled);
684}
685
686static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
687{
688	struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
689	u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
690
691	if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
692		dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
693		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
694	} else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
695		dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
696		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
697	} else {
698		/*
699		 * This should not happen, it indicates the driver is out of
700		 * sync.  If it does happen, clear dmacr anyway.
701		 */
702		dev_err(hsdev->dev,
703			"%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
704			__func__, tag, hsdevp->dma_pending[tag], dmacr);
705		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
706				SATA_DWC_DMACR_TXRXCH_CLEAR);
707	}
708}
709
710static void sata_dwc_dma_xfer_complete(struct ata_port *ap)
711{
712	struct ata_queued_cmd *qc;
713	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
714	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
715	u8 tag = 0;
716
717	tag = ap->link.active_tag;
718	qc = ata_qc_from_tag(ap, tag);
719	if (!qc) {
720		dev_err(ap->dev, "failed to get qc");
721		return;
722	}
723
724	if (ata_is_dma(qc->tf.protocol)) {
725		if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
726			dev_err(ap->dev,
727				"%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
728				__func__,
729				sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
730		}
731
732		hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
733		sata_dwc_qc_complete(ap, qc);
734		ap->link.active_tag = ATA_TAG_POISON;
735	} else {
736		sata_dwc_qc_complete(ap, qc);
737	}
738}
739
740static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc)
741{
742	u8 status = 0;
743	u32 mask = 0x0;
744	u8 tag = qc->hw_tag;
745	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
746	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
747	hsdev->sactive_queued = 0;
748
749	if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
750		dev_err(ap->dev, "TX DMA PENDING\n");
751	else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
752		dev_err(ap->dev, "RX DMA PENDING\n");
753	dev_dbg(ap->dev,
754		"QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n",
755		qc->tf.command, status, ap->print_id, qc->tf.protocol);
756
757	/* clear active bit */
758	mask = (~(qcmd_tag_to_mask(tag)));
759	hsdev->sactive_queued = hsdev->sactive_queued & mask;
760	hsdev->sactive_issued = hsdev->sactive_issued & mask;
761	ata_qc_complete(qc);
762	return 0;
763}
764
765static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
766{
767	/* Enable selective interrupts by setting the interrupt maskregister*/
768	sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
769			SATA_DWC_INTMR_ERRM |
770			SATA_DWC_INTMR_NEWFPM |
771			SATA_DWC_INTMR_PMABRTM |
772			SATA_DWC_INTMR_DMATM);
773	/*
774	 * Unmask the error bits that should trigger an error interrupt by
775	 * setting the error mask register.
776	 */
777	sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
778
779	dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
780		 __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
781		sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
782}
783
784static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
785{
786	port->cmd_addr		= base + 0x00;
787	port->data_addr		= base + 0x00;
788
789	port->error_addr	= base + 0x04;
790	port->feature_addr	= base + 0x04;
791
792	port->nsect_addr	= base + 0x08;
793
794	port->lbal_addr		= base + 0x0c;
795	port->lbam_addr		= base + 0x10;
796	port->lbah_addr		= base + 0x14;
797
798	port->device_addr	= base + 0x18;
799	port->command_addr	= base + 0x1c;
800	port->status_addr	= base + 0x1c;
801
802	port->altstatus_addr	= base + 0x20;
803	port->ctl_addr		= base + 0x20;
804}
805
806static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
807{
808	struct sata_dwc_device *hsdev = hsdevp->hsdev;
809	struct device *dev = hsdev->dev;
810
811#ifdef CONFIG_SATA_DWC_OLD_DMA
812	if (!of_property_present(dev->of_node, "dmas"))
813		return sata_dwc_dma_get_channel_old(hsdevp);
814#endif
815
816	hsdevp->chan = dma_request_chan(dev, "sata-dma");
817	if (IS_ERR(hsdevp->chan)) {
818		dev_err(dev, "failed to allocate dma channel: %ld\n",
819			PTR_ERR(hsdevp->chan));
820		return PTR_ERR(hsdevp->chan);
821	}
822
823	return 0;
824}
825
826/*
827 * Function : sata_dwc_port_start
828 * arguments : struct ata_ioports *port
829 * Return value : returns 0 if success, error code otherwise
830 * This function allocates the scatter gather LLI table for AHB DMA
831 */
832static int sata_dwc_port_start(struct ata_port *ap)
833{
834	int err = 0;
835	struct sata_dwc_device *hsdev;
836	struct sata_dwc_device_port *hsdevp = NULL;
837	struct device *pdev;
838	int i;
839
840	hsdev = HSDEV_FROM_AP(ap);
841
842	dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
843
844	hsdev->host = ap->host;
845	pdev = ap->host->dev;
846	if (!pdev) {
847		dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
848		err = -ENODEV;
849		goto CLEANUP;
850	}
851
852	/* Allocate Port Struct */
853	hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
854	if (!hsdevp) {
855		err = -ENOMEM;
856		goto CLEANUP;
857	}
858	hsdevp->hsdev = hsdev;
859
860	err = sata_dwc_dma_get_channel(hsdevp);
861	if (err)
862		goto CLEANUP_ALLOC;
863
864	err = phy_power_on(hsdev->phy);
865	if (err)
866		goto CLEANUP_ALLOC;
867
868	for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
869		hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
870
871	ap->bmdma_prd = NULL;	/* set these so libata doesn't use them */
872	ap->bmdma_prd_dma = 0;
873
874	if (ap->port_no == 0)  {
875		dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
876			__func__);
877		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
878				SATA_DWC_DMACR_TXRXCH_CLEAR);
879
880		dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
881			 __func__);
882		sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
883				(SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
884				 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
885	}
886
887	/* Clear any error bits before libata starts issuing commands */
888	clear_serror(ap);
889	ap->private_data = hsdevp;
890	dev_dbg(ap->dev, "%s: done\n", __func__);
891	return 0;
892
893CLEANUP_ALLOC:
894	kfree(hsdevp);
895CLEANUP:
896	dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
897	return err;
898}
899
900static void sata_dwc_port_stop(struct ata_port *ap)
901{
902	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
903	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
904
905	dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
906
907	dmaengine_terminate_sync(hsdevp->chan);
908	dma_release_channel(hsdevp->chan);
909	phy_power_off(hsdev->phy);
910
911	kfree(hsdevp);
912	ap->private_data = NULL;
913}
914
915/*
916 * Function : sata_dwc_exec_command_by_tag
917 * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
918 * Return value : None
919 * This function keeps track of individual command tag ids and calls
920 * ata_exec_command in libata
921 */
922static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
923					 struct ata_taskfile *tf,
924					 u8 tag, u32 cmd_issued)
925{
926	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
927
928	hsdevp->cmd_issued[tag] = cmd_issued;
929
930	/*
931	 * Clear SError before executing a new command.
932	 * sata_dwc_scr_write and read can not be used here. Clearing the PM
933	 * managed SError register for the disk needs to be done before the
934	 * task file is loaded.
935	 */
936	clear_serror(ap);
937	ata_sff_exec_command(ap, tf);
938}
939
940static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
941{
942	sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
943				     SATA_DWC_CMD_ISSUED_PEND);
944}
945
946static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
947{
948	u8 tag = qc->hw_tag;
949
950	if (!ata_is_ncq(qc->tf.protocol))
951		tag = 0;
952
953	sata_dwc_bmdma_setup_by_tag(qc, tag);
954}
955
956static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
957{
958	int start_dma;
959	u32 reg;
960	struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
961	struct ata_port *ap = qc->ap;
962	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
963	struct dma_async_tx_descriptor *desc = hsdevp->desc[tag];
964	int dir = qc->dma_dir;
965
966	if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
967		start_dma = 1;
968		if (dir == DMA_TO_DEVICE)
969			hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
970		else
971			hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
972	} else {
973		dev_err(ap->dev,
974			"%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n",
975			__func__, hsdevp->cmd_issued[tag], tag);
976		start_dma = 0;
977	}
978
979	if (start_dma) {
980		sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
981		if (reg & SATA_DWC_SERROR_ERR_BITS) {
982			dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
983				__func__, reg);
984		}
985
986		if (dir == DMA_TO_DEVICE)
987			sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
988					SATA_DWC_DMACR_TXCHEN);
989		else
990			sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
991					SATA_DWC_DMACR_RXCHEN);
992
993		/* Enable AHB DMA transfer on the specified channel */
994		dmaengine_submit(desc);
995		dma_async_issue_pending(hsdevp->chan);
996	}
997}
998
999static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
1000{
1001	u8 tag = qc->hw_tag;
1002
1003	if (!ata_is_ncq(qc->tf.protocol))
1004		tag = 0;
1005
1006	sata_dwc_bmdma_start_by_tag(qc, tag);
1007}
1008
1009static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
1010{
1011	u32 sactive;
1012	u8 tag = qc->hw_tag;
1013	struct ata_port *ap = qc->ap;
1014	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1015
1016	if (!ata_is_ncq(qc->tf.protocol))
1017		tag = 0;
1018
1019	if (ata_is_dma(qc->tf.protocol)) {
1020		hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
1021		if (!hsdevp->desc[tag])
1022			return AC_ERR_SYSTEM;
1023	} else {
1024		hsdevp->desc[tag] = NULL;
1025	}
1026
1027	if (ata_is_ncq(qc->tf.protocol)) {
1028		sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
1029		sactive |= (0x00000001 << tag);
1030		sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
1031
1032		trace_ata_tf_load(ap, &qc->tf);
1033		ap->ops->sff_tf_load(ap, &qc->tf);
1034		trace_ata_exec_command(ap, &qc->tf, tag);
1035		sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
1036					     SATA_DWC_CMD_ISSUED_PEND);
1037	} else {
1038		return ata_bmdma_qc_issue(qc);
1039	}
1040	return 0;
1041}
1042
1043static void sata_dwc_error_handler(struct ata_port *ap)
1044{
1045	ata_sff_error_handler(ap);
1046}
1047
1048static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
1049			      unsigned long deadline)
1050{
1051	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
1052	int ret;
1053
1054	ret = sata_sff_hardreset(link, class, deadline);
1055
1056	sata_dwc_enable_interrupts(hsdev);
1057
1058	/* Reconfigure the DMA control register */
1059	sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
1060			SATA_DWC_DMACR_TXRXCH_CLEAR);
1061
1062	/* Reconfigure the DMA Burst Transaction Size register */
1063	sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
1064			SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
1065			SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
1066
1067	return ret;
1068}
1069
1070static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
1071{
1072	/* SATA DWC is master only */
1073}
1074
1075/*
1076 * scsi mid-layer and libata interface structures
1077 */
1078static const struct scsi_host_template sata_dwc_sht = {
1079	ATA_NCQ_SHT(DRV_NAME),
1080	/*
1081	 * test-only: Currently this driver doesn't handle NCQ
1082	 * correctly. We enable NCQ but set the queue depth to a
1083	 * max of 1. This will get fixed in in a future release.
1084	 */
1085	.sg_tablesize		= LIBATA_MAX_PRD,
1086	/* .can_queue		= ATA_MAX_QUEUE, */
1087	/*
1088	 * Make sure a LLI block is not created that will span 8K max FIS
1089	 * boundary. If the block spans such a FIS boundary, there is a chance
1090	 * that a DMA burst will cross that boundary -- this results in an
1091	 * error in the host controller.
1092	 */
1093	.dma_boundary		= 0x1fff /* ATA_DMA_BOUNDARY */,
1094};
1095
1096static struct ata_port_operations sata_dwc_ops = {
1097	.inherits		= &ata_sff_port_ops,
1098
1099	.error_handler		= sata_dwc_error_handler,
1100	.hardreset		= sata_dwc_hardreset,
1101
1102	.qc_issue		= sata_dwc_qc_issue,
1103
1104	.scr_read		= sata_dwc_scr_read,
1105	.scr_write		= sata_dwc_scr_write,
1106
1107	.port_start		= sata_dwc_port_start,
1108	.port_stop		= sata_dwc_port_stop,
1109
1110	.sff_dev_select		= sata_dwc_dev_select,
1111
1112	.bmdma_setup		= sata_dwc_bmdma_setup,
1113	.bmdma_start		= sata_dwc_bmdma_start,
1114};
1115
1116static const struct ata_port_info sata_dwc_port_info[] = {
1117	{
1118		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
1119		.pio_mask	= ATA_PIO4,
1120		.udma_mask	= ATA_UDMA6,
1121		.port_ops	= &sata_dwc_ops,
1122	},
1123};
1124
1125static int sata_dwc_probe(struct platform_device *ofdev)
1126{
1127	struct device *dev = &ofdev->dev;
1128	struct device_node *np = dev->of_node;
1129	struct sata_dwc_device *hsdev;
1130	u32 idr, versionr;
1131	char *ver = (char *)&versionr;
1132	void __iomem *base;
1133	int err = 0;
1134	int irq;
1135	struct ata_host *host;
1136	struct ata_port_info pi = sata_dwc_port_info[0];
1137	const struct ata_port_info *ppi[] = { &pi, NULL };
1138	struct resource *res;
1139
1140	/* Allocate DWC SATA device */
1141	host = ata_host_alloc_pinfo(dev, ppi, SATA_DWC_MAX_PORTS);
1142	hsdev = devm_kzalloc(dev, sizeof(*hsdev), GFP_KERNEL);
1143	if (!host || !hsdev)
1144		return -ENOMEM;
1145
1146	host->private_data = hsdev;
1147
1148	/* Ioremap SATA registers */
1149	base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
1150	if (IS_ERR(base))
1151		return PTR_ERR(base);
1152	dev_dbg(dev, "ioremap done for SATA register address\n");
1153
1154	/* Synopsys DWC SATA specific Registers */
1155	hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
1156	hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
1157
1158	/* Setup port */
1159	host->ports[0]->ioaddr.cmd_addr = base;
1160	host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
1161	sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
1162
1163	/* Read the ID and Version Registers */
1164	idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
1165	versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
1166	dev_notice(dev, "id %d, controller version %c.%c%c\n", idr, ver[0], ver[1], ver[2]);
1167
1168	/* Save dev for later use in dev_xxx() routines */
1169	hsdev->dev = dev;
1170
1171	/* Enable SATA Interrupts */
1172	sata_dwc_enable_interrupts(hsdev);
1173
1174	/* Get SATA interrupt number */
1175	irq = irq_of_parse_and_map(np, 0);
1176	if (!irq) {
1177		dev_err(dev, "no SATA DMA irq\n");
1178		return -ENODEV;
1179	}
1180
1181#ifdef CONFIG_SATA_DWC_OLD_DMA
1182	if (!of_property_present(np, "dmas")) {
1183		err = sata_dwc_dma_init_old(ofdev, hsdev);
1184		if (err)
1185			return err;
1186	}
1187#endif
1188
1189	hsdev->phy = devm_phy_optional_get(dev, "sata-phy");
1190	if (IS_ERR(hsdev->phy))
1191		return PTR_ERR(hsdev->phy);
1192
1193	err = phy_init(hsdev->phy);
1194	if (err)
1195		goto error_out;
1196
1197	/*
1198	 * Now, register with libATA core, this will also initiate the
1199	 * device discovery process, invoking our port_start() handler &
1200	 * error_handler() to execute a dummy Softreset EH session
1201	 */
1202	err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1203	if (err)
1204		dev_err(dev, "failed to activate host");
1205
1206	return 0;
1207
1208error_out:
1209	phy_exit(hsdev->phy);
1210	return err;
1211}
1212
1213static void sata_dwc_remove(struct platform_device *ofdev)
1214{
1215	struct device *dev = &ofdev->dev;
1216	struct ata_host *host = dev_get_drvdata(dev);
1217	struct sata_dwc_device *hsdev = host->private_data;
1218
1219	ata_host_detach(host);
1220
1221	phy_exit(hsdev->phy);
1222
1223#ifdef CONFIG_SATA_DWC_OLD_DMA
1224	/* Free SATA DMA resources */
1225	sata_dwc_dma_exit_old(hsdev);
1226#endif
1227
1228	dev_dbg(dev, "done\n");
1229}
1230
1231static const struct of_device_id sata_dwc_match[] = {
1232	{ .compatible = "amcc,sata-460ex", },
1233	{}
1234};
1235MODULE_DEVICE_TABLE(of, sata_dwc_match);
1236
1237static struct platform_driver sata_dwc_driver = {
1238	.driver = {
1239		.name = DRV_NAME,
1240		.of_match_table = sata_dwc_match,
1241	},
1242	.probe = sata_dwc_probe,
1243	.remove_new = sata_dwc_remove,
1244};
1245
1246module_platform_driver(sata_dwc_driver);
1247
1248MODULE_LICENSE("GPL");
1249MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
1250MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver");
1251MODULE_VERSION(DRV_VERSION);
1252