1/*
2 *  sata_nv.c - NVIDIA nForce SATA
3 *
4 *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5 *  Copyright 2004 Andrew Chew
6 *
7 *
8 *  This program is free software; you can redistribute it and/or modify
9 *  it under the terms of the GNU General Public License as published by
10 *  the Free Software Foundation; either version 2, or (at your option)
11 *  any later version.
12 *
13 *  This program is distributed in the hope that it will be useful,
14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 *  GNU General Public License for more details.
17 *
18 *  You should have received a copy of the GNU General Public License
19 *  along with this program; see the file COPYING.  If not, write to
20 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 *  libata documentation is available via 'make {ps|pdf}docs',
24 *  as Documentation/DocBook/libata.*
25 *
26 *  No hardware documentation available outside of NVIDIA.
27 *  This driver programs the NVIDIA SATA controller in a similar
28 *  fashion as with other PCI IDE BMDMA controllers, with a few
29 *  NV-specific details such as register offsets, SATA phy location,
30 *  hotplug info, etc.
31 *
32 *  CK804/MCP04 controllers support an alternate programming interface
33 *  similar to the ADMA specification (with some modifications).
34 *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 *  sent through the legacy interface.
36 *
37 */
38
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
46#include <linux/device.h>
47#include <scsi/scsi_host.h>
48#include <scsi/scsi_device.h>
49#include <linux/libata.h>
50
51#define DRV_NAME			"sata_nv"
52#define DRV_VERSION			"3.4"
53
54#define NV_ADMA_DMA_BOUNDARY		0xffffffffUL
55
56enum {
57	NV_MMIO_BAR			= 5,
58
59	NV_PORTS			= 2,
60	NV_PIO_MASK			= 0x1f,
61	NV_MWDMA_MASK			= 0x07,
62	NV_UDMA_MASK			= 0x7f,
63	NV_PORT0_SCR_REG_OFFSET		= 0x00,
64	NV_PORT1_SCR_REG_OFFSET		= 0x40,
65
66	/* INT_STATUS/ENABLE */
67	NV_INT_STATUS			= 0x10,
68	NV_INT_ENABLE			= 0x11,
69	NV_INT_STATUS_CK804		= 0x440,
70	NV_INT_ENABLE_CK804		= 0x441,
71
72	/* INT_STATUS/ENABLE bits */
73	NV_INT_DEV			= 0x01,
74	NV_INT_PM			= 0x02,
75	NV_INT_ADDED			= 0x04,
76	NV_INT_REMOVED			= 0x08,
77
78	NV_INT_PORT_SHIFT		= 4,	/* each port occupies 4 bits */
79
80	NV_INT_ALL			= 0x0f,
81	NV_INT_MASK			= NV_INT_DEV |
82					  NV_INT_ADDED | NV_INT_REMOVED,
83
84	/* INT_CONFIG */
85	NV_INT_CONFIG			= 0x12,
86	NV_INT_CONFIG_METHD		= 0x01, // 0 = INT, 1 = SMI
87
88	// For PCI config register 20
89	NV_MCP_SATA_CFG_20		= 0x50,
90	NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91	NV_MCP_SATA_CFG_20_PORT0_EN	= (1 << 17),
92	NV_MCP_SATA_CFG_20_PORT1_EN	= (1 << 16),
93	NV_MCP_SATA_CFG_20_PORT0_PWB_EN	= (1 << 14),
94	NV_MCP_SATA_CFG_20_PORT1_PWB_EN	= (1 << 12),
95
96	NV_ADMA_MAX_CPBS		= 32,
97	NV_ADMA_CPB_SZ			= 128,
98	NV_ADMA_APRD_SZ			= 16,
99	NV_ADMA_SGTBL_LEN		= (1024 - NV_ADMA_CPB_SZ) /
100					   NV_ADMA_APRD_SZ,
101	NV_ADMA_SGTBL_TOTAL_LEN		= NV_ADMA_SGTBL_LEN + 5,
102	NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103	NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104					   (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106	/* BAR5 offset to ADMA general registers */
107	NV_ADMA_GEN			= 0x400,
108	NV_ADMA_GEN_CTL			= 0x00,
109	NV_ADMA_NOTIFIER_CLEAR		= 0x30,
110
111	/* BAR5 offset to ADMA ports */
112	NV_ADMA_PORT			= 0x480,
113
114	/* size of ADMA port register space  */
115	NV_ADMA_PORT_SIZE		= 0x100,
116
117	/* ADMA port registers */
118	NV_ADMA_CTL			= 0x40,
119	NV_ADMA_CPB_COUNT		= 0x42,
120	NV_ADMA_NEXT_CPB_IDX		= 0x43,
121	NV_ADMA_STAT			= 0x44,
122	NV_ADMA_CPB_BASE_LOW		= 0x48,
123	NV_ADMA_CPB_BASE_HIGH		= 0x4C,
124	NV_ADMA_APPEND			= 0x50,
125	NV_ADMA_NOTIFIER		= 0x68,
126	NV_ADMA_NOTIFIER_ERROR		= 0x6C,
127
128	/* NV_ADMA_CTL register bits */
129	NV_ADMA_CTL_HOTPLUG_IEN		= (1 << 0),
130	NV_ADMA_CTL_CHANNEL_RESET	= (1 << 5),
131	NV_ADMA_CTL_GO			= (1 << 7),
132	NV_ADMA_CTL_AIEN		= (1 << 8),
133	NV_ADMA_CTL_READ_NON_COHERENT	= (1 << 11),
134	NV_ADMA_CTL_WRITE_NON_COHERENT	= (1 << 12),
135
136	/* CPB response flag bits */
137	NV_CPB_RESP_DONE		= (1 << 0),
138	NV_CPB_RESP_ATA_ERR		= (1 << 3),
139	NV_CPB_RESP_CMD_ERR		= (1 << 4),
140	NV_CPB_RESP_CPB_ERR		= (1 << 7),
141
142	/* CPB control flag bits */
143	NV_CPB_CTL_CPB_VALID		= (1 << 0),
144	NV_CPB_CTL_QUEUE		= (1 << 1),
145	NV_CPB_CTL_APRD_VALID		= (1 << 2),
146	NV_CPB_CTL_IEN			= (1 << 3),
147	NV_CPB_CTL_FPDMA		= (1 << 4),
148
149	/* APRD flags */
150	NV_APRD_WRITE			= (1 << 1),
151	NV_APRD_END			= (1 << 2),
152	NV_APRD_CONT			= (1 << 3),
153
154	/* NV_ADMA_STAT flags */
155	NV_ADMA_STAT_TIMEOUT		= (1 << 0),
156	NV_ADMA_STAT_HOTUNPLUG		= (1 << 1),
157	NV_ADMA_STAT_HOTPLUG		= (1 << 2),
158	NV_ADMA_STAT_CPBERR		= (1 << 4),
159	NV_ADMA_STAT_SERROR		= (1 << 5),
160	NV_ADMA_STAT_CMD_COMPLETE	= (1 << 6),
161	NV_ADMA_STAT_IDLE		= (1 << 8),
162	NV_ADMA_STAT_LEGACY		= (1 << 9),
163	NV_ADMA_STAT_STOPPED		= (1 << 10),
164	NV_ADMA_STAT_DONE		= (1 << 12),
165	NV_ADMA_STAT_ERR		= NV_ADMA_STAT_CPBERR |
166	 				  NV_ADMA_STAT_TIMEOUT,
167
168	/* port flags */
169	NV_ADMA_PORT_REGISTER_MODE	= (1 << 0),
170	NV_ADMA_ATAPI_SETUP_COMPLETE	= (1 << 1),
171
172};
173
174/* ADMA Physical Region Descriptor - one SG segment */
175struct nv_adma_prd {
176	__le64			addr;
177	__le32			len;
178	u8			flags;
179	u8			packet_len;
180	__le16			reserved;
181};
182
183enum nv_adma_regbits {
184	CMDEND	= (1 << 15),		/* end of command list */
185	WNB	= (1 << 14),		/* wait-not-BSY */
186	IGN	= (1 << 13),		/* ignore this entry */
187	CS1n	= (1 << (4 + 8)),	/* std. PATA signals follow... */
188	DA2	= (1 << (2 + 8)),
189	DA1	= (1 << (1 + 8)),
190	DA0	= (1 << (0 + 8)),
191};
192
193/* ADMA Command Parameter Block
194   The first 5 SG segments are stored inside the Command Parameter Block itself.
195   If there are more than 5 segments the remainder are stored in a separate
196   memory area indicated by next_aprd. */
197struct nv_adma_cpb {
198	u8			resp_flags;    /* 0 */
199	u8			reserved1;     /* 1 */
200	u8			ctl_flags;     /* 2 */
201	/* len is length of taskfile in 64 bit words */
202 	u8			len;           /* 3  */
203	u8			tag;           /* 4 */
204	u8			next_cpb_idx;  /* 5 */
205	__le16			reserved2;     /* 6-7 */
206	__le16			tf[12];        /* 8-31 */
207	struct nv_adma_prd	aprd[5];       /* 32-111 */
208	__le64			next_aprd;     /* 112-119 */
209	__le64			reserved3;     /* 120-127 */
210};
211
212
213struct nv_adma_port_priv {
214	struct nv_adma_cpb	*cpb;
215	dma_addr_t		cpb_dma;
216	struct nv_adma_prd	*aprd;
217	dma_addr_t		aprd_dma;
218	void __iomem *		ctl_block;
219	void __iomem *		gen_block;
220	void __iomem *		notifier_clear_block;
221	u8			flags;
222	int			last_issue_ncq;
223};
224
225struct nv_host_priv {
226	unsigned long		type;
227};
228
229#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
230
231static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232#ifdef CONFIG_PM
233static int nv_pci_device_resume(struct pci_dev *pdev);
234#endif
235static void nv_ck804_host_stop(struct ata_host *host);
236static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
237static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
238static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
239static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
240static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
241
242static void nv_nf2_freeze(struct ata_port *ap);
243static void nv_nf2_thaw(struct ata_port *ap);
244static void nv_ck804_freeze(struct ata_port *ap);
245static void nv_ck804_thaw(struct ata_port *ap);
246static void nv_error_handler(struct ata_port *ap);
247static int nv_adma_slave_config(struct scsi_device *sdev);
248static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
249static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
250static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
251static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
252static void nv_adma_irq_clear(struct ata_port *ap);
253static int nv_adma_port_start(struct ata_port *ap);
254static void nv_adma_port_stop(struct ata_port *ap);
255#ifdef CONFIG_PM
256static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
257static int nv_adma_port_resume(struct ata_port *ap);
258#endif
259static void nv_adma_freeze(struct ata_port *ap);
260static void nv_adma_thaw(struct ata_port *ap);
261static void nv_adma_error_handler(struct ata_port *ap);
262static void nv_adma_host_stop(struct ata_host *host);
263static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
264static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
265
266enum nv_host_type
267{
268	GENERIC,
269	NFORCE2,
270	NFORCE3 = NFORCE2,	/* NF2 == NF3 as far as sata_nv is concerned */
271	CK804,
272	ADMA
273};
274
275static const struct pci_device_id nv_pci_tbl[] = {
276	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
277	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
278	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
279	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
280	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
281	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
282	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
283	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
284	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
285	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
286	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
287	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
288	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
289	{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
290
291	{ } /* terminate list */
292};
293
294static struct pci_driver nv_pci_driver = {
295	.name			= DRV_NAME,
296	.id_table		= nv_pci_tbl,
297	.probe			= nv_init_one,
298#ifdef CONFIG_PM
299	.suspend		= ata_pci_device_suspend,
300	.resume			= nv_pci_device_resume,
301#endif
302	.remove			= ata_pci_remove_one,
303};
304
305static struct scsi_host_template nv_sht = {
306	.module			= THIS_MODULE,
307	.name			= DRV_NAME,
308	.ioctl			= ata_scsi_ioctl,
309	.queuecommand		= ata_scsi_queuecmd,
310	.can_queue		= ATA_DEF_QUEUE,
311	.this_id		= ATA_SHT_THIS_ID,
312	.sg_tablesize		= LIBATA_MAX_PRD,
313	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
314	.emulated		= ATA_SHT_EMULATED,
315	.use_clustering		= ATA_SHT_USE_CLUSTERING,
316	.proc_name		= DRV_NAME,
317	.dma_boundary		= ATA_DMA_BOUNDARY,
318	.slave_configure	= ata_scsi_slave_config,
319	.slave_destroy		= ata_scsi_slave_destroy,
320	.bios_param		= ata_std_bios_param,
321};
322
323static struct scsi_host_template nv_adma_sht = {
324	.module			= THIS_MODULE,
325	.name			= DRV_NAME,
326	.ioctl			= ata_scsi_ioctl,
327	.queuecommand		= ata_scsi_queuecmd,
328	.change_queue_depth	= ata_scsi_change_queue_depth,
329	.can_queue		= NV_ADMA_MAX_CPBS,
330	.this_id		= ATA_SHT_THIS_ID,
331	.sg_tablesize		= NV_ADMA_SGTBL_TOTAL_LEN,
332	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
333	.emulated		= ATA_SHT_EMULATED,
334	.use_clustering		= ATA_SHT_USE_CLUSTERING,
335	.proc_name		= DRV_NAME,
336	.dma_boundary		= NV_ADMA_DMA_BOUNDARY,
337	.slave_configure	= nv_adma_slave_config,
338	.slave_destroy		= ata_scsi_slave_destroy,
339	.bios_param		= ata_std_bios_param,
340};
341
342static const struct ata_port_operations nv_generic_ops = {
343	.port_disable		= ata_port_disable,
344	.tf_load		= ata_tf_load,
345	.tf_read		= ata_tf_read,
346	.exec_command		= ata_exec_command,
347	.check_status		= ata_check_status,
348	.dev_select		= ata_std_dev_select,
349	.bmdma_setup		= ata_bmdma_setup,
350	.bmdma_start		= ata_bmdma_start,
351	.bmdma_stop		= ata_bmdma_stop,
352	.bmdma_status		= ata_bmdma_status,
353	.qc_prep		= ata_qc_prep,
354	.qc_issue		= ata_qc_issue_prot,
355	.freeze			= ata_bmdma_freeze,
356	.thaw			= ata_bmdma_thaw,
357	.error_handler		= nv_error_handler,
358	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
359	.data_xfer		= ata_data_xfer,
360	.irq_clear		= ata_bmdma_irq_clear,
361	.irq_on			= ata_irq_on,
362	.irq_ack		= ata_irq_ack,
363	.scr_read		= nv_scr_read,
364	.scr_write		= nv_scr_write,
365	.port_start		= ata_port_start,
366};
367
368static const struct ata_port_operations nv_nf2_ops = {
369	.port_disable		= ata_port_disable,
370	.tf_load		= ata_tf_load,
371	.tf_read		= ata_tf_read,
372	.exec_command		= ata_exec_command,
373	.check_status		= ata_check_status,
374	.dev_select		= ata_std_dev_select,
375	.bmdma_setup		= ata_bmdma_setup,
376	.bmdma_start		= ata_bmdma_start,
377	.bmdma_stop		= ata_bmdma_stop,
378	.bmdma_status		= ata_bmdma_status,
379	.qc_prep		= ata_qc_prep,
380	.qc_issue		= ata_qc_issue_prot,
381	.freeze			= nv_nf2_freeze,
382	.thaw			= nv_nf2_thaw,
383	.error_handler		= nv_error_handler,
384	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
385	.data_xfer		= ata_data_xfer,
386	.irq_clear		= ata_bmdma_irq_clear,
387	.irq_on			= ata_irq_on,
388	.irq_ack		= ata_irq_ack,
389	.scr_read		= nv_scr_read,
390	.scr_write		= nv_scr_write,
391	.port_start		= ata_port_start,
392};
393
394static const struct ata_port_operations nv_ck804_ops = {
395	.port_disable		= ata_port_disable,
396	.tf_load		= ata_tf_load,
397	.tf_read		= ata_tf_read,
398	.exec_command		= ata_exec_command,
399	.check_status		= ata_check_status,
400	.dev_select		= ata_std_dev_select,
401	.bmdma_setup		= ata_bmdma_setup,
402	.bmdma_start		= ata_bmdma_start,
403	.bmdma_stop		= ata_bmdma_stop,
404	.bmdma_status		= ata_bmdma_status,
405	.qc_prep		= ata_qc_prep,
406	.qc_issue		= ata_qc_issue_prot,
407	.freeze			= nv_ck804_freeze,
408	.thaw			= nv_ck804_thaw,
409	.error_handler		= nv_error_handler,
410	.post_internal_cmd	= ata_bmdma_post_internal_cmd,
411	.data_xfer		= ata_data_xfer,
412	.irq_clear		= ata_bmdma_irq_clear,
413	.irq_on			= ata_irq_on,
414	.irq_ack		= ata_irq_ack,
415	.scr_read		= nv_scr_read,
416	.scr_write		= nv_scr_write,
417	.port_start		= ata_port_start,
418	.host_stop		= nv_ck804_host_stop,
419};
420
421static const struct ata_port_operations nv_adma_ops = {
422	.port_disable		= ata_port_disable,
423	.tf_load		= ata_tf_load,
424	.tf_read		= nv_adma_tf_read,
425	.check_atapi_dma	= nv_adma_check_atapi_dma,
426	.exec_command		= ata_exec_command,
427	.check_status		= ata_check_status,
428	.dev_select		= ata_std_dev_select,
429	.bmdma_setup		= ata_bmdma_setup,
430	.bmdma_start		= ata_bmdma_start,
431	.bmdma_stop		= ata_bmdma_stop,
432	.bmdma_status		= ata_bmdma_status,
433	.qc_prep		= nv_adma_qc_prep,
434	.qc_issue		= nv_adma_qc_issue,
435	.freeze			= nv_adma_freeze,
436	.thaw			= nv_adma_thaw,
437	.error_handler		= nv_adma_error_handler,
438	.post_internal_cmd	= nv_adma_post_internal_cmd,
439	.data_xfer		= ata_data_xfer,
440	.irq_clear		= nv_adma_irq_clear,
441	.irq_on			= ata_irq_on,
442	.irq_ack		= ata_irq_ack,
443	.scr_read		= nv_scr_read,
444	.scr_write		= nv_scr_write,
445	.port_start		= nv_adma_port_start,
446	.port_stop		= nv_adma_port_stop,
447#ifdef CONFIG_PM
448	.port_suspend		= nv_adma_port_suspend,
449	.port_resume		= nv_adma_port_resume,
450#endif
451	.host_stop		= nv_adma_host_stop,
452};
453
454static const struct ata_port_info nv_port_info[] = {
455	/* generic */
456	{
457		.sht		= &nv_sht,
458		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
459				  ATA_FLAG_HRST_TO_RESUME,
460		.pio_mask	= NV_PIO_MASK,
461		.mwdma_mask	= NV_MWDMA_MASK,
462		.udma_mask	= NV_UDMA_MASK,
463		.port_ops	= &nv_generic_ops,
464		.irq_handler	= nv_generic_interrupt,
465	},
466	/* nforce2/3 */
467	{
468		.sht		= &nv_sht,
469		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
470				  ATA_FLAG_HRST_TO_RESUME,
471		.pio_mask	= NV_PIO_MASK,
472		.mwdma_mask	= NV_MWDMA_MASK,
473		.udma_mask	= NV_UDMA_MASK,
474		.port_ops	= &nv_nf2_ops,
475		.irq_handler	= nv_nf2_interrupt,
476	},
477	/* ck804 */
478	{
479		.sht		= &nv_sht,
480		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
481				  ATA_FLAG_HRST_TO_RESUME,
482		.pio_mask	= NV_PIO_MASK,
483		.mwdma_mask	= NV_MWDMA_MASK,
484		.udma_mask	= NV_UDMA_MASK,
485		.port_ops	= &nv_ck804_ops,
486		.irq_handler	= nv_ck804_interrupt,
487	},
488	/* ADMA */
489	{
490		.sht		= &nv_adma_sht,
491		.flags		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
492				  ATA_FLAG_HRST_TO_RESUME |
493				  ATA_FLAG_MMIO | ATA_FLAG_NCQ,
494		.pio_mask	= NV_PIO_MASK,
495		.mwdma_mask	= NV_MWDMA_MASK,
496		.udma_mask	= NV_UDMA_MASK,
497		.port_ops	= &nv_adma_ops,
498		.irq_handler	= nv_adma_interrupt,
499	},
500};
501
502MODULE_AUTHOR("NVIDIA");
503MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
504MODULE_LICENSE("GPL");
505MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
506MODULE_VERSION(DRV_VERSION);
507
508static int adma_enabled = 1;
509
510static void nv_adma_register_mode(struct ata_port *ap)
511{
512	struct nv_adma_port_priv *pp = ap->private_data;
513	void __iomem *mmio = pp->ctl_block;
514	u16 tmp, status;
515	int count = 0;
516
517	if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
518		return;
519
520	status = readw(mmio + NV_ADMA_STAT);
521	while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
522		ndelay(50);
523		status = readw(mmio + NV_ADMA_STAT);
524		count++;
525	}
526	if(count == 20)
527		ata_port_printk(ap, KERN_WARNING,
528			"timeout waiting for ADMA IDLE, stat=0x%hx\n",
529			status);
530
531	tmp = readw(mmio + NV_ADMA_CTL);
532	writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
533
534	count = 0;
535	status = readw(mmio + NV_ADMA_STAT);
536	while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
537		ndelay(50);
538		status = readw(mmio + NV_ADMA_STAT);
539		count++;
540	}
541	if(count == 20)
542		ata_port_printk(ap, KERN_WARNING,
543			 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
544			 status);
545
546	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
547}
548
549static void nv_adma_mode(struct ata_port *ap)
550{
551	struct nv_adma_port_priv *pp = ap->private_data;
552	void __iomem *mmio = pp->ctl_block;
553	u16 tmp, status;
554	int count = 0;
555
556	if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
557		return;
558
559	WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
560
561	tmp = readw(mmio + NV_ADMA_CTL);
562	writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
563
564	status = readw(mmio + NV_ADMA_STAT);
565	while(((status & NV_ADMA_STAT_LEGACY) ||
566	      !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
567		ndelay(50);
568		status = readw(mmio + NV_ADMA_STAT);
569		count++;
570	}
571	if(count == 20)
572		ata_port_printk(ap, KERN_WARNING,
573			"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
574			status);
575
576	pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
577}
578
579static int nv_adma_slave_config(struct scsi_device *sdev)
580{
581	struct ata_port *ap = ata_shost_to_port(sdev->host);
582	struct nv_adma_port_priv *pp = ap->private_data;
583	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
584	u64 bounce_limit;
585	unsigned long segment_boundary;
586	unsigned short sg_tablesize;
587	int rc;
588	int adma_enable;
589	u32 current_reg, new_reg, config_mask;
590
591	rc = ata_scsi_slave_config(sdev);
592
593	if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
594		/* Not a proper libata device, ignore */
595		return rc;
596
597	if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
598		/*
599		 * NVIDIA reports that ADMA mode does not support ATAPI commands.
600		 * Therefore ATAPI commands are sent through the legacy interface.
601		 * However, the legacy interface only supports 32-bit DMA.
602		 * Restrict DMA parameters as required by the legacy interface
603		 * when an ATAPI device is connected.
604		 */
605		bounce_limit = ATA_DMA_MASK;
606		segment_boundary = ATA_DMA_BOUNDARY;
607		/* Subtract 1 since an extra entry may be needed for padding, see
608		   libata-scsi.c */
609		sg_tablesize = LIBATA_MAX_PRD - 1;
610
611		/* Since the legacy DMA engine is in use, we need to disable ADMA
612		   on the port. */
613		adma_enable = 0;
614		nv_adma_register_mode(ap);
615	}
616	else {
617		bounce_limit = *ap->dev->dma_mask;
618		segment_boundary = NV_ADMA_DMA_BOUNDARY;
619		sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
620		adma_enable = 1;
621	}
622
623	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
624
625	if(ap->port_no == 1)
626		config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
627			      NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
628	else
629		config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
630			      NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
631
632	if(adma_enable) {
633		new_reg = current_reg | config_mask;
634		pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
635	}
636	else {
637		new_reg = current_reg & ~config_mask;
638		pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
639	}
640
641	if(current_reg != new_reg)
642		pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
643
644	blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
645	blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
646	blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
647	ata_port_printk(ap, KERN_INFO,
648		"bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
649		(unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
650	return rc;
651}
652
653static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
654{
655	struct nv_adma_port_priv *pp = qc->ap->private_data;
656	return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
657}
658
659static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
660{
661	/* Since commands where a result TF is requested are not
662	   executed in ADMA mode, the only time this function will be called
663	   in ADMA mode will be if a command fails. In this case we
664	   don't care about going into register mode with ADMA commands
665	   pending, as the commands will all shortly be aborted anyway. */
666	nv_adma_register_mode(ap);
667
668	ata_tf_read(ap, tf);
669}
670
671static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
672{
673	unsigned int idx = 0;
674
675	if(tf->flags & ATA_TFLAG_ISADDR) {
676		if (tf->flags & ATA_TFLAG_LBA48) {
677			cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
678			cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
679			cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
680			cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
681			cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
682			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
683		} else
684			cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
685
686		cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
687		cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
688		cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
689		cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
690	}
691
692	if(tf->flags & ATA_TFLAG_DEVICE)
693		cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
694
695	cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
696
697	while(idx < 12)
698		cpb[idx++] = cpu_to_le16(IGN);
699
700	return idx;
701}
702
703static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
704{
705	struct nv_adma_port_priv *pp = ap->private_data;
706	u8 flags = pp->cpb[cpb_num].resp_flags;
707
708	VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
709
710	if (unlikely((force_err ||
711		     flags & (NV_CPB_RESP_ATA_ERR |
712			      NV_CPB_RESP_CMD_ERR |
713			      NV_CPB_RESP_CPB_ERR)))) {
714		struct ata_eh_info *ehi = &ap->eh_info;
715		int freeze = 0;
716
717		ata_ehi_clear_desc(ehi);
718		ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
719		if (flags & NV_CPB_RESP_ATA_ERR) {
720			ata_ehi_push_desc(ehi, ": ATA error");
721			ehi->err_mask |= AC_ERR_DEV;
722		} else if (flags & NV_CPB_RESP_CMD_ERR) {
723			ata_ehi_push_desc(ehi, ": CMD error");
724			ehi->err_mask |= AC_ERR_DEV;
725		} else if (flags & NV_CPB_RESP_CPB_ERR) {
726			ata_ehi_push_desc(ehi, ": CPB error");
727			ehi->err_mask |= AC_ERR_SYSTEM;
728			freeze = 1;
729		} else {
730			/* notifier error, but no error in CPB flags? */
731			ehi->err_mask |= AC_ERR_OTHER;
732			freeze = 1;
733		}
734		/* Kill all commands. EH will determine what actually failed. */
735		if (freeze)
736			ata_port_freeze(ap);
737		else
738			ata_port_abort(ap);
739		return 1;
740	}
741
742	if (likely(flags & NV_CPB_RESP_DONE)) {
743		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
744		VPRINTK("CPB flags done, flags=0x%x\n", flags);
745		if (likely(qc)) {
746			DPRINTK("Completing qc from tag %d\n",cpb_num);
747			ata_qc_complete(qc);
748		} else {
749			struct ata_eh_info *ehi = &ap->eh_info;
750			/* Notifier bits set without a command may indicate the drive
751			   is misbehaving. Raise host state machine violation on this
752			   condition. */
753			ata_port_printk(ap, KERN_ERR, "notifier for tag %d with no command?\n",
754				cpb_num);
755			ehi->err_mask |= AC_ERR_HSM;
756			ehi->action |= ATA_EH_SOFTRESET;
757			ata_port_freeze(ap);
758			return 1;
759		}
760	}
761	return 0;
762}
763
764static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
765{
766	struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
767
768	/* freeze if hotplugged */
769	if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
770		ata_port_freeze(ap);
771		return 1;
772	}
773
774	/* bail out if not our interrupt */
775	if (!(irq_stat & NV_INT_DEV))
776		return 0;
777
778	/* DEV interrupt w/ no active qc? */
779	if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
780		ata_check_status(ap);
781		return 1;
782	}
783
784	/* handle interrupt */
785	return ata_host_intr(ap, qc);
786}
787
788static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
789{
790	struct ata_host *host = dev_instance;
791	int i, handled = 0;
792	u32 notifier_clears[2];
793
794	spin_lock(&host->lock);
795
796	for (i = 0; i < host->n_ports; i++) {
797		struct ata_port *ap = host->ports[i];
798		notifier_clears[i] = 0;
799
800		if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
801			struct nv_adma_port_priv *pp = ap->private_data;
802			void __iomem *mmio = pp->ctl_block;
803			u16 status;
804			u32 gen_ctl;
805			u32 notifier, notifier_error;
806
807			/* if ADMA is disabled, use standard ata interrupt handler */
808			if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
809				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
810					>> (NV_INT_PORT_SHIFT * i);
811				handled += nv_host_intr(ap, irq_stat);
812				continue;
813			}
814
815			/* if in ATA register mode, check for standard interrupts */
816			if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
817				u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
818					>> (NV_INT_PORT_SHIFT * i);
819				if(ata_tag_valid(ap->active_tag))
820					/** NV_INT_DEV indication seems unreliable at times
821					    at least in ADMA mode. Force it on always when a
822					    command is active, to prevent losing interrupts. */
823					irq_stat |= NV_INT_DEV;
824				handled += nv_host_intr(ap, irq_stat);
825			}
826
827			notifier = readl(mmio + NV_ADMA_NOTIFIER);
828			notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
829			notifier_clears[i] = notifier | notifier_error;
830
831			gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
832
833			if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
834			    !notifier_error)
835				/* Nothing to do */
836				continue;
837
838			status = readw(mmio + NV_ADMA_STAT);
839
840			/* Clear status. Ensure the controller sees the clearing before we start
841			   looking at any of the CPB statuses, so that any CPB completions after
842			   this point in the handler will raise another interrupt. */
843			writew(status, mmio + NV_ADMA_STAT);
844			readw(mmio + NV_ADMA_STAT); /* flush posted write */
845			rmb();
846
847			handled++; /* irq handled if we got here */
848
849			/* freeze if hotplugged or controller error */
850			if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
851					       NV_ADMA_STAT_HOTUNPLUG |
852					       NV_ADMA_STAT_TIMEOUT |
853					       NV_ADMA_STAT_SERROR))) {
854				struct ata_eh_info *ehi = &ap->eh_info;
855
856				ata_ehi_clear_desc(ehi);
857				ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
858				if (status & NV_ADMA_STAT_TIMEOUT) {
859					ehi->err_mask |= AC_ERR_SYSTEM;
860					ata_ehi_push_desc(ehi, ": timeout");
861				} else if (status & NV_ADMA_STAT_HOTPLUG) {
862					ata_ehi_hotplugged(ehi);
863					ata_ehi_push_desc(ehi, ": hotplug");
864				} else if (status & NV_ADMA_STAT_HOTUNPLUG) {
865					ata_ehi_hotplugged(ehi);
866					ata_ehi_push_desc(ehi, ": hot unplug");
867				} else if (status & NV_ADMA_STAT_SERROR) {
868					/* let libata analyze SError and figure out the cause */
869					ata_ehi_push_desc(ehi, ": SError");
870				}
871				ata_port_freeze(ap);
872				continue;
873			}
874
875			if (status & (NV_ADMA_STAT_DONE |
876				      NV_ADMA_STAT_CPBERR)) {
877				u32 check_commands;
878				int pos, error = 0;
879
880				if(ata_tag_valid(ap->active_tag))
881					check_commands = 1 << ap->active_tag;
882				else
883					check_commands = ap->sactive;
884
885				/** Check CPBs for completed commands */
886				while ((pos = ffs(check_commands)) && !error) {
887					pos--;
888					error = nv_adma_check_cpb(ap, pos,
889						notifier_error & (1 << pos) );
890					check_commands &= ~(1 << pos );
891				}
892			}
893		}
894	}
895
896	if(notifier_clears[0] || notifier_clears[1]) {
897		/* Note: Both notifier clear registers must be written
898		   if either is set, even if one is zero, according to NVIDIA. */
899		struct nv_adma_port_priv *pp = host->ports[0]->private_data;
900		writel(notifier_clears[0], pp->notifier_clear_block);
901		pp = host->ports[1]->private_data;
902		writel(notifier_clears[1], pp->notifier_clear_block);
903	}
904
905	spin_unlock(&host->lock);
906
907	return IRQ_RETVAL(handled);
908}
909
910static void nv_adma_freeze(struct ata_port *ap)
911{
912	struct nv_adma_port_priv *pp = ap->private_data;
913	void __iomem *mmio = pp->ctl_block;
914	u16 tmp;
915
916	nv_ck804_freeze(ap);
917
918	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
919		return;
920
921	/* clear any outstanding CK804 notifications */
922	writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
923		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
924
925	/* Disable interrupt */
926	tmp = readw(mmio + NV_ADMA_CTL);
927	writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
928		mmio + NV_ADMA_CTL);
929	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
930}
931
932static void nv_adma_thaw(struct ata_port *ap)
933{
934	struct nv_adma_port_priv *pp = ap->private_data;
935	void __iomem *mmio = pp->ctl_block;
936	u16 tmp;
937
938	nv_ck804_thaw(ap);
939
940	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
941		return;
942
943	/* Enable interrupt */
944	tmp = readw(mmio + NV_ADMA_CTL);
945	writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
946		mmio + NV_ADMA_CTL);
947	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
948}
949
950static void nv_adma_irq_clear(struct ata_port *ap)
951{
952	struct nv_adma_port_priv *pp = ap->private_data;
953	void __iomem *mmio = pp->ctl_block;
954	u32 notifier_clears[2];
955
956	if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
957		ata_bmdma_irq_clear(ap);
958		return;
959	}
960
961	/* clear any outstanding CK804 notifications */
962	writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
963		ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
964
965	/* clear ADMA status */
966	writew(0xffff, mmio + NV_ADMA_STAT);
967
968	/* clear notifiers - note both ports need to be written with
969	   something even though we are only clearing on one */
970	if (ap->port_no == 0) {
971		notifier_clears[0] = 0xFFFFFFFF;
972		notifier_clears[1] = 0;
973	} else {
974		notifier_clears[0] = 0;
975		notifier_clears[1] = 0xFFFFFFFF;
976	}
977	pp = ap->host->ports[0]->private_data;
978	writel(notifier_clears[0], pp->notifier_clear_block);
979	pp = ap->host->ports[1]->private_data;
980	writel(notifier_clears[1], pp->notifier_clear_block);
981}
982
983static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
984{
985	struct nv_adma_port_priv *pp = qc->ap->private_data;
986
987	if(pp->flags & NV_ADMA_PORT_REGISTER_MODE)
988		ata_bmdma_post_internal_cmd(qc);
989}
990
991static int nv_adma_port_start(struct ata_port *ap)
992{
993	struct device *dev = ap->host->dev;
994	struct nv_adma_port_priv *pp;
995	int rc;
996	void *mem;
997	dma_addr_t mem_dma;
998	void __iomem *mmio;
999	u16 tmp;
1000
1001	VPRINTK("ENTER\n");
1002
1003	rc = ata_port_start(ap);
1004	if (rc)
1005		return rc;
1006
1007	pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1008	if (!pp)
1009		return -ENOMEM;
1010
1011	mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1012	       ap->port_no * NV_ADMA_PORT_SIZE;
1013	pp->ctl_block = mmio;
1014	pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1015	pp->notifier_clear_block = pp->gen_block +
1016	       NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1017
1018	mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1019				  &mem_dma, GFP_KERNEL);
1020	if (!mem)
1021		return -ENOMEM;
1022	memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1023
1024	/*
1025	 * First item in chunk of DMA memory:
1026	 * 128-byte command parameter block (CPB)
1027	 * one for each command tag
1028	 */
1029	pp->cpb     = mem;
1030	pp->cpb_dma = mem_dma;
1031
1032	writel(mem_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1033	writel((mem_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1034
1035	mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1036	mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1037
1038	/*
1039	 * Second item: block of ADMA_SGTBL_LEN s/g entries
1040	 */
1041	pp->aprd = mem;
1042	pp->aprd_dma = mem_dma;
1043
1044	ap->private_data = pp;
1045
1046	/* clear any outstanding interrupt conditions */
1047	writew(0xffff, mmio + NV_ADMA_STAT);
1048
1049	/* initialize port variables */
1050	pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1051
1052	/* clear CPB fetch count */
1053	writew(0, mmio + NV_ADMA_CPB_COUNT);
1054
1055	/* clear GO for register mode, enable interrupt */
1056	tmp = readw(mmio + NV_ADMA_CTL);
1057	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1058		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1059
1060	tmp = readw(mmio + NV_ADMA_CTL);
1061	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1062	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1063	udelay(1);
1064	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1065	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1066
1067	return 0;
1068}
1069
1070static void nv_adma_port_stop(struct ata_port *ap)
1071{
1072	struct nv_adma_port_priv *pp = ap->private_data;
1073	void __iomem *mmio = pp->ctl_block;
1074
1075	VPRINTK("ENTER\n");
1076	writew(0, mmio + NV_ADMA_CTL);
1077}
1078
1079#ifdef CONFIG_PM
1080static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1081{
1082	struct nv_adma_port_priv *pp = ap->private_data;
1083	void __iomem *mmio = pp->ctl_block;
1084
1085	/* Go to register mode - clears GO */
1086	nv_adma_register_mode(ap);
1087
1088	/* clear CPB fetch count */
1089	writew(0, mmio + NV_ADMA_CPB_COUNT);
1090
1091	/* disable interrupt, shut down port */
1092	writew(0, mmio + NV_ADMA_CTL);
1093
1094	return 0;
1095}
1096
1097static int nv_adma_port_resume(struct ata_port *ap)
1098{
1099	struct nv_adma_port_priv *pp = ap->private_data;
1100	void __iomem *mmio = pp->ctl_block;
1101	u16 tmp;
1102
1103	/* set CPB block location */
1104	writel(pp->cpb_dma & 0xFFFFFFFF, 	mmio + NV_ADMA_CPB_BASE_LOW);
1105	writel((pp->cpb_dma >> 16 ) >> 16,	mmio + NV_ADMA_CPB_BASE_HIGH);
1106
1107	/* clear any outstanding interrupt conditions */
1108	writew(0xffff, mmio + NV_ADMA_STAT);
1109
1110	/* initialize port variables */
1111	pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1112
1113	/* clear CPB fetch count */
1114	writew(0, mmio + NV_ADMA_CPB_COUNT);
1115
1116	/* clear GO for register mode, enable interrupt */
1117	tmp = readw(mmio + NV_ADMA_CTL);
1118	writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1119		 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1120
1121	tmp = readw(mmio + NV_ADMA_CTL);
1122	writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1123	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1124	udelay(1);
1125	writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1126	readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1127
1128	return 0;
1129}
1130#endif
1131
1132static void nv_adma_setup_port(struct ata_port *ap)
1133{
1134	void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1135	struct ata_ioports *ioport = &ap->ioaddr;
1136
1137	VPRINTK("ENTER\n");
1138
1139	mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1140
1141	ioport->cmd_addr	= mmio;
1142	ioport->data_addr	= mmio + (ATA_REG_DATA * 4);
1143	ioport->error_addr	=
1144	ioport->feature_addr	= mmio + (ATA_REG_ERR * 4);
1145	ioport->nsect_addr	= mmio + (ATA_REG_NSECT * 4);
1146	ioport->lbal_addr	= mmio + (ATA_REG_LBAL * 4);
1147	ioport->lbam_addr	= mmio + (ATA_REG_LBAM * 4);
1148	ioport->lbah_addr	= mmio + (ATA_REG_LBAH * 4);
1149	ioport->device_addr	= mmio + (ATA_REG_DEVICE * 4);
1150	ioport->status_addr	=
1151	ioport->command_addr	= mmio + (ATA_REG_STATUS * 4);
1152	ioport->altstatus_addr	=
1153	ioport->ctl_addr	= mmio + 0x20;
1154}
1155
1156static int nv_adma_host_init(struct ata_host *host)
1157{
1158	struct pci_dev *pdev = to_pci_dev(host->dev);
1159	unsigned int i;
1160	u32 tmp32;
1161
1162	VPRINTK("ENTER\n");
1163
1164	/* enable ADMA on the ports */
1165	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1166	tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1167		 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1168		 NV_MCP_SATA_CFG_20_PORT1_EN |
1169		 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1170
1171	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1172
1173	for (i = 0; i < host->n_ports; i++)
1174		nv_adma_setup_port(host->ports[i]);
1175
1176	return 0;
1177}
1178
1179static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1180			      struct scatterlist *sg,
1181			      int idx,
1182			      struct nv_adma_prd *aprd)
1183{
1184	u8 flags = 0;
1185	if (qc->tf.flags & ATA_TFLAG_WRITE)
1186		flags |= NV_APRD_WRITE;
1187	if (idx == qc->n_elem - 1)
1188		flags |= NV_APRD_END;
1189	else if (idx != 4)
1190		flags |= NV_APRD_CONT;
1191
1192	aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1193	aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1194	aprd->flags = flags;
1195	aprd->packet_len = 0;
1196}
1197
1198static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1199{
1200	struct nv_adma_port_priv *pp = qc->ap->private_data;
1201	unsigned int idx;
1202	struct nv_adma_prd *aprd;
1203	struct scatterlist *sg;
1204
1205	VPRINTK("ENTER\n");
1206
1207	idx = 0;
1208
1209	ata_for_each_sg(sg, qc) {
1210		aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1211		nv_adma_fill_aprd(qc, sg, idx, aprd);
1212		idx++;
1213	}
1214	if (idx > 5)
1215		cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1216	else
1217		cpb->next_aprd = cpu_to_le64(0);
1218}
1219
1220static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1221{
1222	struct nv_adma_port_priv *pp = qc->ap->private_data;
1223
1224	/* ADMA engine can only be used for non-ATAPI DMA commands,
1225	   or interrupt-driven no-data commands, where a result taskfile
1226	   is not required. */
1227	if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1228	   (qc->tf.flags & ATA_TFLAG_POLLING) ||
1229	   (qc->flags & ATA_QCFLAG_RESULT_TF))
1230		return 1;
1231
1232	if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1233	   (qc->tf.protocol == ATA_PROT_NODATA))
1234		return 0;
1235
1236	return 1;
1237}
1238
1239static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1240{
1241	struct nv_adma_port_priv *pp = qc->ap->private_data;
1242	struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1243	u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1244		       NV_CPB_CTL_IEN;
1245
1246	if (nv_adma_use_reg_mode(qc)) {
1247		nv_adma_register_mode(qc->ap);
1248		ata_qc_prep(qc);
1249		return;
1250	}
1251
1252	cpb->resp_flags = NV_CPB_RESP_DONE;
1253	wmb();
1254	cpb->ctl_flags = 0;
1255	wmb();
1256
1257	cpb->len		= 3;
1258	cpb->tag		= qc->tag;
1259	cpb->next_cpb_idx	= 0;
1260
1261	/* turn on NCQ flags for NCQ commands */
1262	if (qc->tf.protocol == ATA_PROT_NCQ)
1263		ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1264
1265	VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1266
1267	nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1268
1269	if(qc->flags & ATA_QCFLAG_DMAMAP) {
1270		nv_adma_fill_sg(qc, cpb);
1271		ctl_flags |= NV_CPB_CTL_APRD_VALID;
1272	} else
1273		memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1274
1275	/* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1276	   finished filling in all of the contents */
1277	wmb();
1278	cpb->ctl_flags = ctl_flags;
1279	wmb();
1280	cpb->resp_flags = 0;
1281}
1282
1283static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1284{
1285	struct nv_adma_port_priv *pp = qc->ap->private_data;
1286	void __iomem *mmio = pp->ctl_block;
1287	int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1288
1289	VPRINTK("ENTER\n");
1290
1291	if (nv_adma_use_reg_mode(qc)) {
1292		/* use ATA register mode */
1293		VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1294		nv_adma_register_mode(qc->ap);
1295		return ata_qc_issue_prot(qc);
1296	} else
1297		nv_adma_mode(qc->ap);
1298
1299	/* write append register, command tag in lower 8 bits
1300	   and (number of cpbs to append -1) in top 8 bits */
1301	wmb();
1302
1303	if(curr_ncq != pp->last_issue_ncq) {
1304	   	/* Seems to need some delay before switching between NCQ and non-NCQ
1305		   commands, else we get command timeouts and such. */
1306		udelay(20);
1307		pp->last_issue_ncq = curr_ncq;
1308	}
1309
1310	writew(qc->tag, mmio + NV_ADMA_APPEND);
1311
1312	DPRINTK("Issued tag %u\n",qc->tag);
1313
1314	return 0;
1315}
1316
1317static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1318{
1319	struct ata_host *host = dev_instance;
1320	unsigned int i;
1321	unsigned int handled = 0;
1322	unsigned long flags;
1323
1324	spin_lock_irqsave(&host->lock, flags);
1325
1326	for (i = 0; i < host->n_ports; i++) {
1327		struct ata_port *ap;
1328
1329		ap = host->ports[i];
1330		if (ap &&
1331		    !(ap->flags & ATA_FLAG_DISABLED)) {
1332			struct ata_queued_cmd *qc;
1333
1334			qc = ata_qc_from_tag(ap, ap->active_tag);
1335			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1336				handled += ata_host_intr(ap, qc);
1337			else
1338				// No request pending?  Clear interrupt status
1339				// anyway, in case there's one pending.
1340				ap->ops->check_status(ap);
1341		}
1342
1343	}
1344
1345	spin_unlock_irqrestore(&host->lock, flags);
1346
1347	return IRQ_RETVAL(handled);
1348}
1349
1350static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1351{
1352	int i, handled = 0;
1353
1354	for (i = 0; i < host->n_ports; i++) {
1355		struct ata_port *ap = host->ports[i];
1356
1357		if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1358			handled += nv_host_intr(ap, irq_stat);
1359
1360		irq_stat >>= NV_INT_PORT_SHIFT;
1361	}
1362
1363	return IRQ_RETVAL(handled);
1364}
1365
1366static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1367{
1368	struct ata_host *host = dev_instance;
1369	u8 irq_stat;
1370	irqreturn_t ret;
1371
1372	spin_lock(&host->lock);
1373	irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1374	ret = nv_do_interrupt(host, irq_stat);
1375	spin_unlock(&host->lock);
1376
1377	return ret;
1378}
1379
1380static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1381{
1382	struct ata_host *host = dev_instance;
1383	u8 irq_stat;
1384	irqreturn_t ret;
1385
1386	spin_lock(&host->lock);
1387	irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1388	ret = nv_do_interrupt(host, irq_stat);
1389	spin_unlock(&host->lock);
1390
1391	return ret;
1392}
1393
1394static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1395{
1396	if (sc_reg > SCR_CONTROL)
1397		return 0xffffffffU;
1398
1399	return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1400}
1401
1402static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1403{
1404	if (sc_reg > SCR_CONTROL)
1405		return;
1406
1407	iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1408}
1409
1410static void nv_nf2_freeze(struct ata_port *ap)
1411{
1412	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1413	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1414	u8 mask;
1415
1416	mask = ioread8(scr_addr + NV_INT_ENABLE);
1417	mask &= ~(NV_INT_ALL << shift);
1418	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1419}
1420
1421static void nv_nf2_thaw(struct ata_port *ap)
1422{
1423	void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1424	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1425	u8 mask;
1426
1427	iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1428
1429	mask = ioread8(scr_addr + NV_INT_ENABLE);
1430	mask |= (NV_INT_MASK << shift);
1431	iowrite8(mask, scr_addr + NV_INT_ENABLE);
1432}
1433
1434static void nv_ck804_freeze(struct ata_port *ap)
1435{
1436	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1437	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1438	u8 mask;
1439
1440	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1441	mask &= ~(NV_INT_ALL << shift);
1442	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1443}
1444
1445static void nv_ck804_thaw(struct ata_port *ap)
1446{
1447	void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1448	int shift = ap->port_no * NV_INT_PORT_SHIFT;
1449	u8 mask;
1450
1451	writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1452
1453	mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1454	mask |= (NV_INT_MASK << shift);
1455	writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1456}
1457
1458static int nv_hardreset(struct ata_port *ap, unsigned int *class,
1459			unsigned long deadline)
1460{
1461	unsigned int dummy;
1462
1463	/* SATA hardreset fails to retrieve proper device signature on
1464	 * some controllers.  Don't classify on hardreset.  For more
1465	 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1466	 */
1467	return sata_std_hardreset(ap, &dummy, deadline);
1468}
1469
1470static void nv_error_handler(struct ata_port *ap)
1471{
1472	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1473			   nv_hardreset, ata_std_postreset);
1474}
1475
1476static void nv_adma_error_handler(struct ata_port *ap)
1477{
1478	struct nv_adma_port_priv *pp = ap->private_data;
1479	if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1480		void __iomem *mmio = pp->ctl_block;
1481		int i;
1482		u16 tmp;
1483
1484		if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1485			u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1486			u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1487			u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1488			u32 status = readw(mmio + NV_ADMA_STAT);
1489			u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1490			u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1491
1492			ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1493				"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1494				"next cpb count 0x%X next cpb idx 0x%x\n",
1495				notifier, notifier_error, gen_ctl, status,
1496				cpb_count, next_cpb_idx);
1497
1498			for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1499				struct nv_adma_cpb *cpb = &pp->cpb[i];
1500				if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1501				    ap->sactive & (1 << i) )
1502					ata_port_printk(ap, KERN_ERR,
1503						"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1504						i, cpb->ctl_flags, cpb->resp_flags);
1505			}
1506		}
1507
1508		/* Push us back into port register mode for error handling. */
1509		nv_adma_register_mode(ap);
1510
1511		/* Mark all of the CPBs as invalid to prevent them from being executed */
1512		for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1513			pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1514
1515		/* clear CPB fetch count */
1516		writew(0, mmio + NV_ADMA_CPB_COUNT);
1517
1518		/* Reset channel */
1519		tmp = readw(mmio + NV_ADMA_CTL);
1520		writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1521		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1522		udelay(1);
1523		writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1524		readw( mmio + NV_ADMA_CTL );	/* flush posted write */
1525	}
1526
1527	ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1528			   nv_hardreset, ata_std_postreset);
1529}
1530
1531static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1532{
1533	static int printed_version = 0;
1534	const struct ata_port_info *ppi[] = { NULL, NULL };
1535	struct ata_host *host;
1536	struct nv_host_priv *hpriv;
1537	int rc;
1538	u32 bar;
1539	void __iomem *base;
1540	unsigned long type = ent->driver_data;
1541
1542        // Make sure this is a SATA controller by counting the number of bars
1543        // (NVIDIA SATA controllers will always have six bars).  Otherwise,
1544        // it's an IDE controller and we ignore it.
1545	for (bar=0; bar<6; bar++)
1546		if (pci_resource_start(pdev, bar) == 0)
1547			return -ENODEV;
1548
1549	if (!printed_version++)
1550		dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1551
1552	rc = pcim_enable_device(pdev);
1553	if (rc)
1554		return rc;
1555
1556	/* determine type and allocate host */
1557	if (type >= CK804 && adma_enabled) {
1558		dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1559		type = ADMA;
1560	}
1561
1562	ppi[0] = &nv_port_info[type];
1563	rc = ata_pci_prepare_native_host(pdev, ppi, &host);
1564	if (rc)
1565		return rc;
1566
1567	hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1568	if (!hpriv)
1569		return -ENOMEM;
1570	hpriv->type = type;
1571	host->private_data = hpriv;
1572
1573	/* set 64bit dma masks, may fail */
1574	if (type == ADMA) {
1575		if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
1576			pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1577	}
1578
1579	/* request and iomap NV_MMIO_BAR */
1580	rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
1581	if (rc)
1582		return rc;
1583
1584	/* configure SCR access */
1585	base = host->iomap[NV_MMIO_BAR];
1586	host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1587	host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1588
1589	/* enable SATA space for CK804 */
1590	if (type >= CK804) {
1591		u8 regval;
1592
1593		pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1594		regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1595		pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1596	}
1597
1598	/* init ADMA */
1599	if (type == ADMA) {
1600		rc = nv_adma_host_init(host);
1601		if (rc)
1602			return rc;
1603	}
1604
1605	pci_set_master(pdev);
1606	return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
1607				 IRQF_SHARED, ppi[0]->sht);
1608}
1609
1610#ifdef CONFIG_PM
1611static int nv_pci_device_resume(struct pci_dev *pdev)
1612{
1613	struct ata_host *host = dev_get_drvdata(&pdev->dev);
1614	struct nv_host_priv *hpriv = host->private_data;
1615	int rc;
1616
1617	rc = ata_pci_device_do_resume(pdev);
1618	if(rc)
1619		return rc;
1620
1621	if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1622		if(hpriv->type >= CK804) {
1623			u8 regval;
1624
1625			pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1626			regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1627			pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1628		}
1629		if(hpriv->type == ADMA) {
1630			u32 tmp32;
1631			struct nv_adma_port_priv *pp;
1632			/* enable/disable ADMA on the ports appropriately */
1633			pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1634
1635			pp = host->ports[0]->private_data;
1636			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1637				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1638				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1639			else
1640				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
1641				 	   NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1642			pp = host->ports[1]->private_data;
1643			if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1644				tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1645				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1646			else
1647				tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
1648				 	   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1649
1650			pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1651		}
1652	}
1653
1654	ata_host_resume(host);
1655
1656	return 0;
1657}
1658#endif
1659
1660static void nv_ck804_host_stop(struct ata_host *host)
1661{
1662	struct pci_dev *pdev = to_pci_dev(host->dev);
1663	u8 regval;
1664
1665	/* disable SATA space for CK804 */
1666	pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1667	regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1668	pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1669}
1670
1671static void nv_adma_host_stop(struct ata_host *host)
1672{
1673	struct pci_dev *pdev = to_pci_dev(host->dev);
1674	u32 tmp32;
1675
1676	/* disable ADMA on the ports */
1677	pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1678	tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1679		   NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1680		   NV_MCP_SATA_CFG_20_PORT1_EN |
1681		   NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1682
1683	pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1684
1685	nv_ck804_host_stop(host);
1686}
1687
1688static int __init nv_init(void)
1689{
1690	return pci_register_driver(&nv_pci_driver);
1691}
1692
1693static void __exit nv_exit(void)
1694{
1695	pci_unregister_driver(&nv_pci_driver);
1696}
1697
1698module_init(nv_init);
1699module_exit(nv_exit);
1700module_param_named(adma, adma_enabled, bool, 0444);
1701MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
1702