1/*
2 *  libata-core.c - helper library for ATA
3 *
4 *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5 *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6 *		    on emails.
7 *
8 *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
9 *  Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 *  This program is free software; you can redistribute it and/or modify
13 *  it under the terms of the GNU General Public License as published by
14 *  the Free Software Foundation; either version 2, or (at your option)
15 *  any later version.
16 *
17 *  This program is distributed in the hope that it will be useful,
18 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
19 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20 *  GNU General Public License for more details.
21 *
22 *  You should have received a copy of the GNU General Public License
23 *  along with this program; see the file COPYING.  If not, write to
24 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 *  libata documentation is available via 'make {ps|pdf}docs',
28 *  as Documentation/DocBook/libata.*
29 *
30 *  Hardware documentation available from http://www.t13.org/ and
31 *  http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
50#include <linux/jiffies.h>
51#include <linux/scatterlist.h>
52#include <scsi/scsi.h>
53#include <scsi/scsi_cmnd.h>
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
62#define DRV_VERSION	"2.21"	/* must be exactly four chars */
63
64
65/* debounce timing parameters in msecs { interval, duration, timeout } */
66const unsigned long sata_deb_timing_normal[]		= {   5,  100, 2000 };
67const unsigned long sata_deb_timing_hotplug[]		= {  25,  500, 2000 };
68const unsigned long sata_deb_timing_long[]		= { 100, 2000, 5000 };
69
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71					u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
74
75unsigned int ata_print_id = 1;
76static struct workqueue_struct *ata_wq;
77
78struct workqueue_struct *ata_aux_wq;
79
80int atapi_enabled = 1;
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
92static int ata_ignore_hpa = 0;
93module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
94MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
95
96static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
97module_param(ata_probe_timeout, int, 0444);
98MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
99
100int libata_noacpi = 1;
101module_param_named(noacpi, libata_noacpi, int, 0444);
102MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
103
104MODULE_AUTHOR("Jeff Garzik");
105MODULE_DESCRIPTION("Library module for ATA devices");
106MODULE_LICENSE("GPL");
107MODULE_VERSION(DRV_VERSION);
108
109
110/**
111 *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
112 *	@tf: Taskfile to convert
113 *	@fis: Buffer into which data will output
114 *	@pmp: Port multiplier port
115 *
116 *	Converts a standard ATA taskfile to a Serial ATA
117 *	FIS structure (Register - Host to Device).
118 *
119 *	LOCKING:
120 *	Inherited from caller.
121 */
122
123void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
124{
125	fis[0] = 0x27;	/* Register - Host to Device FIS */
126	fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
127					    bit 7 indicates Command FIS */
128	fis[2] = tf->command;
129	fis[3] = tf->feature;
130
131	fis[4] = tf->lbal;
132	fis[5] = tf->lbam;
133	fis[6] = tf->lbah;
134	fis[7] = tf->device;
135
136	fis[8] = tf->hob_lbal;
137	fis[9] = tf->hob_lbam;
138	fis[10] = tf->hob_lbah;
139	fis[11] = tf->hob_feature;
140
141	fis[12] = tf->nsect;
142	fis[13] = tf->hob_nsect;
143	fis[14] = 0;
144	fis[15] = tf->ctl;
145
146	fis[16] = 0;
147	fis[17] = 0;
148	fis[18] = 0;
149	fis[19] = 0;
150}
151
152/**
153 *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
154 *	@fis: Buffer from which data will be input
155 *	@tf: Taskfile to output
156 *
157 *	Converts a serial ATA FIS structure to a standard ATA taskfile.
158 *
159 *	LOCKING:
160 *	Inherited from caller.
161 */
162
163void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
164{
165	tf->command	= fis[2];	/* status */
166	tf->feature	= fis[3];	/* error */
167
168	tf->lbal	= fis[4];
169	tf->lbam	= fis[5];
170	tf->lbah	= fis[6];
171	tf->device	= fis[7];
172
173	tf->hob_lbal	= fis[8];
174	tf->hob_lbam	= fis[9];
175	tf->hob_lbah	= fis[10];
176
177	tf->nsect	= fis[12];
178	tf->hob_nsect	= fis[13];
179}
180
181static const u8 ata_rw_cmds[] = {
182	/* pio multi */
183	ATA_CMD_READ_MULTI,
184	ATA_CMD_WRITE_MULTI,
185	ATA_CMD_READ_MULTI_EXT,
186	ATA_CMD_WRITE_MULTI_EXT,
187	0,
188	0,
189	0,
190	ATA_CMD_WRITE_MULTI_FUA_EXT,
191	/* pio */
192	ATA_CMD_PIO_READ,
193	ATA_CMD_PIO_WRITE,
194	ATA_CMD_PIO_READ_EXT,
195	ATA_CMD_PIO_WRITE_EXT,
196	0,
197	0,
198	0,
199	0,
200	/* dma */
201	ATA_CMD_READ,
202	ATA_CMD_WRITE,
203	ATA_CMD_READ_EXT,
204	ATA_CMD_WRITE_EXT,
205	0,
206	0,
207	0,
208	ATA_CMD_WRITE_FUA_EXT
209};
210
211/**
212 *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
213 *	@tf: command to examine and configure
214 *	@dev: device tf belongs to
215 *
216 *	Examine the device configuration and tf->flags to calculate
217 *	the proper read/write commands and protocol to use.
218 *
219 *	LOCKING:
220 *	caller.
221 */
222static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
223{
224	u8 cmd;
225
226	int index, fua, lba48, write;
227
228	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
229	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
230	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
231
232	if (dev->flags & ATA_DFLAG_PIO) {
233		tf->protocol = ATA_PROT_PIO;
234		index = dev->multi_count ? 0 : 8;
235	} else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
236		/* Unable to use DMA due to host limitation */
237		tf->protocol = ATA_PROT_PIO;
238		index = dev->multi_count ? 0 : 8;
239	} else {
240		tf->protocol = ATA_PROT_DMA;
241		index = 16;
242	}
243
244	cmd = ata_rw_cmds[index + fua + lba48 + write];
245	if (cmd) {
246		tf->command = cmd;
247		return 0;
248	}
249	return -1;
250}
251
252/**
253 *	ata_tf_read_block - Read block address from ATA taskfile
254 *	@tf: ATA taskfile of interest
255 *	@dev: ATA device @tf belongs to
256 *
257 *	LOCKING:
258 *	None.
259 *
260 *	Read block address from @tf.  This function can handle all
261 *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
262 *	flags select the address format to use.
263 *
264 *	RETURNS:
265 *	Block address read from @tf.
266 */
267u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
268{
269	u64 block = 0;
270
271	if (tf->flags & ATA_TFLAG_LBA) {
272		if (tf->flags & ATA_TFLAG_LBA48) {
273			block |= (u64)tf->hob_lbah << 40;
274			block |= (u64)tf->hob_lbam << 32;
275			block |= tf->hob_lbal << 24;
276		} else
277			block |= (tf->device & 0xf) << 24;
278
279		block |= tf->lbah << 16;
280		block |= tf->lbam << 8;
281		block |= tf->lbal;
282	} else {
283		u32 cyl, head, sect;
284
285		cyl = tf->lbam | (tf->lbah << 8);
286		head = tf->device & 0xf;
287		sect = tf->lbal;
288
289		block = (cyl * dev->heads + head) * dev->sectors + sect;
290	}
291
292	return block;
293}
294
295/**
296 *	ata_build_rw_tf - Build ATA taskfile for given read/write request
297 *	@tf: Target ATA taskfile
298 *	@dev: ATA device @tf belongs to
299 *	@block: Block address
300 *	@n_block: Number of blocks
301 *	@tf_flags: RW/FUA etc...
302 *	@tag: tag
303 *
304 *	LOCKING:
305 *	None.
306 *
307 *	Build ATA taskfile @tf for read/write request described by
308 *	@block, @n_block, @tf_flags and @tag on @dev.
309 *
310 *	RETURNS:
311 *
312 *	0 on success, -ERANGE if the request is too large for @dev,
313 *	-EINVAL if the request is invalid.
314 */
315int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
316		    u64 block, u32 n_block, unsigned int tf_flags,
317		    unsigned int tag)
318{
319	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
320	tf->flags |= tf_flags;
321
322	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
323		/* yay, NCQ */
324		if (!lba_48_ok(block, n_block))
325			return -ERANGE;
326
327		tf->protocol = ATA_PROT_NCQ;
328		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
329
330		if (tf->flags & ATA_TFLAG_WRITE)
331			tf->command = ATA_CMD_FPDMA_WRITE;
332		else
333			tf->command = ATA_CMD_FPDMA_READ;
334
335		tf->nsect = tag << 3;
336		tf->hob_feature = (n_block >> 8) & 0xff;
337		tf->feature = n_block & 0xff;
338
339		tf->hob_lbah = (block >> 40) & 0xff;
340		tf->hob_lbam = (block >> 32) & 0xff;
341		tf->hob_lbal = (block >> 24) & 0xff;
342		tf->lbah = (block >> 16) & 0xff;
343		tf->lbam = (block >> 8) & 0xff;
344		tf->lbal = block & 0xff;
345
346		tf->device = 1 << 6;
347		if (tf->flags & ATA_TFLAG_FUA)
348			tf->device |= 1 << 7;
349	} else if (dev->flags & ATA_DFLAG_LBA) {
350		tf->flags |= ATA_TFLAG_LBA;
351
352		if (lba_28_ok(block, n_block)) {
353			/* use LBA28 */
354			tf->device |= (block >> 24) & 0xf;
355		} else if (lba_48_ok(block, n_block)) {
356			if (!(dev->flags & ATA_DFLAG_LBA48))
357				return -ERANGE;
358
359			/* use LBA48 */
360			tf->flags |= ATA_TFLAG_LBA48;
361
362			tf->hob_nsect = (n_block >> 8) & 0xff;
363
364			tf->hob_lbah = (block >> 40) & 0xff;
365			tf->hob_lbam = (block >> 32) & 0xff;
366			tf->hob_lbal = (block >> 24) & 0xff;
367		} else
368			/* request too large even for LBA48 */
369			return -ERANGE;
370
371		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
372			return -EINVAL;
373
374		tf->nsect = n_block & 0xff;
375
376		tf->lbah = (block >> 16) & 0xff;
377		tf->lbam = (block >> 8) & 0xff;
378		tf->lbal = block & 0xff;
379
380		tf->device |= ATA_LBA;
381	} else {
382		/* CHS */
383		u32 sect, head, cyl, track;
384
385		/* The request -may- be too large for CHS addressing. */
386		if (!lba_28_ok(block, n_block))
387			return -ERANGE;
388
389		if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
390			return -EINVAL;
391
392		/* Convert LBA to CHS */
393		track = (u32)block / dev->sectors;
394		cyl   = track / dev->heads;
395		head  = track % dev->heads;
396		sect  = (u32)block % dev->sectors + 1;
397
398		DPRINTK("block %u track %u cyl %u head %u sect %u\n",
399			(u32)block, track, cyl, head, sect);
400
401		/* Check whether the converted CHS can fit.
402		   Cylinder: 0-65535
403		   Head: 0-15
404		   Sector: 1-255*/
405		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
406			return -ERANGE;
407
408		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
409		tf->lbal = sect;
410		tf->lbam = cyl;
411		tf->lbah = cyl >> 8;
412		tf->device |= head;
413	}
414
415	return 0;
416}
417
418/**
419 *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
420 *	@pio_mask: pio_mask
421 *	@mwdma_mask: mwdma_mask
422 *	@udma_mask: udma_mask
423 *
424 *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
425 *	unsigned int xfer_mask.
426 *
427 *	LOCKING:
428 *	None.
429 *
430 *	RETURNS:
431 *	Packed xfer_mask.
432 */
433static unsigned int ata_pack_xfermask(unsigned int pio_mask,
434				      unsigned int mwdma_mask,
435				      unsigned int udma_mask)
436{
437	return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
438		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
439		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
440}
441
442/**
443 *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
444 *	@xfer_mask: xfer_mask to unpack
445 *	@pio_mask: resulting pio_mask
446 *	@mwdma_mask: resulting mwdma_mask
447 *	@udma_mask: resulting udma_mask
448 *
449 *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
450 *	Any NULL distination masks will be ignored.
451 */
452static void ata_unpack_xfermask(unsigned int xfer_mask,
453				unsigned int *pio_mask,
454				unsigned int *mwdma_mask,
455				unsigned int *udma_mask)
456{
457	if (pio_mask)
458		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
459	if (mwdma_mask)
460		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
461	if (udma_mask)
462		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
463}
464
465static const struct ata_xfer_ent {
466	int shift, bits;
467	u8 base;
468} ata_xfer_tbl[] = {
469	{ ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
470	{ ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
471	{ ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
472	{ -1, },
473};
474
475/**
476 *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
477 *	@xfer_mask: xfer_mask of interest
478 *
479 *	Return matching XFER_* value for @xfer_mask.  Only the highest
480 *	bit of @xfer_mask is considered.
481 *
482 *	LOCKING:
483 *	None.
484 *
485 *	RETURNS:
486 *	Matching XFER_* value, 0 if no match found.
487 */
488static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
489{
490	int highbit = fls(xfer_mask) - 1;
491	const struct ata_xfer_ent *ent;
492
493	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
494		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
495			return ent->base + highbit - ent->shift;
496	return 0;
497}
498
499/**
500 *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
501 *	@xfer_mode: XFER_* of interest
502 *
503 *	Return matching xfer_mask for @xfer_mode.
504 *
505 *	LOCKING:
506 *	None.
507 *
508 *	RETURNS:
509 *	Matching xfer_mask, 0 if no match found.
510 */
511static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
512{
513	const struct ata_xfer_ent *ent;
514
515	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
517			return 1 << (ent->shift + xfer_mode - ent->base);
518	return 0;
519}
520
521/**
522 *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
523 *	@xfer_mode: XFER_* of interest
524 *
525 *	Return matching xfer_shift for @xfer_mode.
526 *
527 *	LOCKING:
528 *	None.
529 *
530 *	RETURNS:
531 *	Matching xfer_shift, -1 if no match found.
532 */
533static int ata_xfer_mode2shift(unsigned int xfer_mode)
534{
535	const struct ata_xfer_ent *ent;
536
537	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
539			return ent->shift;
540	return -1;
541}
542
543/**
544 *	ata_mode_string - convert xfer_mask to string
545 *	@xfer_mask: mask of bits supported; only highest bit counts.
546 *
547 *	Determine string which represents the highest speed
548 *	(highest bit in @modemask).
549 *
550 *	LOCKING:
551 *	None.
552 *
553 *	RETURNS:
554 *	Constant C string representing highest speed listed in
555 *	@mode_mask, or the constant C string "<n/a>".
556 */
557static const char *ata_mode_string(unsigned int xfer_mask)
558{
559	static const char * const xfer_mode_str[] = {
560		"PIO0",
561		"PIO1",
562		"PIO2",
563		"PIO3",
564		"PIO4",
565		"PIO5",
566		"PIO6",
567		"MWDMA0",
568		"MWDMA1",
569		"MWDMA2",
570		"MWDMA3",
571		"MWDMA4",
572		"UDMA/16",
573		"UDMA/25",
574		"UDMA/33",
575		"UDMA/44",
576		"UDMA/66",
577		"UDMA/100",
578		"UDMA/133",
579		"UDMA7",
580	};
581	int highbit;
582
583	highbit = fls(xfer_mask) - 1;
584	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
585		return xfer_mode_str[highbit];
586	return "<n/a>";
587}
588
589static const char *sata_spd_string(unsigned int spd)
590{
591	static const char * const spd_str[] = {
592		"1.5 Gbps",
593		"3.0 Gbps",
594	};
595
596	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
597		return "<unknown>";
598	return spd_str[spd - 1];
599}
600
601void ata_dev_disable(struct ata_device *dev)
602{
603	if (ata_dev_enabled(dev)) {
604		if (ata_msg_drv(dev->ap))
605			ata_dev_printk(dev, KERN_WARNING, "disabled\n");
606		ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
607					     ATA_DNXFER_QUIET);
608		dev->class++;
609	}
610}
611
612/**
613 *	ata_devchk - PATA device presence detection
614 *	@ap: ATA channel to examine
615 *	@device: Device to examine (starting at zero)
616 *
617 *	This technique was originally described in
618 *	Hale Landis's ATADRVR (www.ata-atapi.com), and
619 *	later found its way into the ATA/ATAPI spec.
620 *
621 *	Write a pattern to the ATA shadow registers,
622 *	and if a device is present, it will respond by
623 *	correctly storing and echoing back the
624 *	ATA shadow register contents.
625 *
626 *	LOCKING:
627 *	caller.
628 */
629
630static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
631{
632	struct ata_ioports *ioaddr = &ap->ioaddr;
633	u8 nsect, lbal;
634
635	ap->ops->dev_select(ap, device);
636
637	iowrite8(0x55, ioaddr->nsect_addr);
638	iowrite8(0xaa, ioaddr->lbal_addr);
639
640	iowrite8(0xaa, ioaddr->nsect_addr);
641	iowrite8(0x55, ioaddr->lbal_addr);
642
643	iowrite8(0x55, ioaddr->nsect_addr);
644	iowrite8(0xaa, ioaddr->lbal_addr);
645
646	nsect = ioread8(ioaddr->nsect_addr);
647	lbal = ioread8(ioaddr->lbal_addr);
648
649	if ((nsect == 0x55) && (lbal == 0xaa))
650		return 1;	/* we found a device */
651
652	return 0;		/* nothing found */
653}
654
655/**
656 *	ata_dev_classify - determine device type based on ATA-spec signature
657 *	@tf: ATA taskfile register set for device to be identified
658 *
659 *	Determine from taskfile register contents whether a device is
660 *	ATA or ATAPI, as per "Signature and persistence" section
661 *	of ATA/PI spec (volume 1, sect 5.14).
662 *
663 *	LOCKING:
664 *	None.
665 *
666 *	RETURNS:
667 *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
668 *	the event of failure.
669 */
670
671unsigned int ata_dev_classify(const struct ata_taskfile *tf)
672{
673	/* Apple's open source Darwin code hints that some devices only
674	 * put a proper signature into the LBA mid/high registers,
675	 * So, we only check those.  It's sufficient for uniqueness.
676	 */
677
678	if (((tf->lbam == 0) && (tf->lbah == 0)) ||
679	    ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
680		DPRINTK("found ATA device by sig\n");
681		return ATA_DEV_ATA;
682	}
683
684	if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
685	    ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
686		DPRINTK("found ATAPI device by sig\n");
687		return ATA_DEV_ATAPI;
688	}
689
690	DPRINTK("unknown device\n");
691	return ATA_DEV_UNKNOWN;
692}
693
694/**
695 *	ata_dev_try_classify - Parse returned ATA device signature
696 *	@ap: ATA channel to examine
697 *	@device: Device to examine (starting at zero)
698 *	@r_err: Value of error register on completion
699 *
700 *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
701 *	an ATA/ATAPI-defined set of values is placed in the ATA
702 *	shadow registers, indicating the results of device detection
703 *	and diagnostics.
704 *
705 *	Select the ATA device, and read the values from the ATA shadow
706 *	registers.  Then parse according to the Error register value,
707 *	and the spec-defined values examined by ata_dev_classify().
708 *
709 *	LOCKING:
710 *	caller.
711 *
712 *	RETURNS:
713 *	Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
714 */
715
716unsigned int
717ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
718{
719	struct ata_taskfile tf;
720	unsigned int class;
721	u8 err;
722
723	ap->ops->dev_select(ap, device);
724
725	memset(&tf, 0, sizeof(tf));
726
727	ap->ops->tf_read(ap, &tf);
728	err = tf.feature;
729	if (r_err)
730		*r_err = err;
731
732	/* see if device passed diags: if master then continue and warn later */
733	if (err == 0 && device == 0)
734		/* diagnostic fail : do nothing _YET_ */
735		ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
736	else if (err == 1)
737		/* do nothing */ ;
738	else if ((device == 0) && (err == 0x81))
739		/* do nothing */ ;
740	else
741		return ATA_DEV_NONE;
742
743	/* determine if device is ATA or ATAPI */
744	class = ata_dev_classify(&tf);
745
746	if (class == ATA_DEV_UNKNOWN)
747		return ATA_DEV_NONE;
748	if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
749		return ATA_DEV_NONE;
750	return class;
751}
752
753/**
754 *	ata_id_string - Convert IDENTIFY DEVICE page into string
755 *	@id: IDENTIFY DEVICE results we will examine
756 *	@s: string into which data is output
757 *	@ofs: offset into identify device page
758 *	@len: length of string to return. must be an even number.
759 *
760 *	The strings in the IDENTIFY DEVICE page are broken up into
761 *	16-bit chunks.  Run through the string, and output each
762 *	8-bit chunk linearly, regardless of platform.
763 *
764 *	LOCKING:
765 *	caller.
766 */
767
768void ata_id_string(const u16 *id, unsigned char *s,
769		   unsigned int ofs, unsigned int len)
770{
771	unsigned int c;
772
773	while (len > 0) {
774		c = id[ofs] >> 8;
775		*s = c;
776		s++;
777
778		c = id[ofs] & 0xff;
779		*s = c;
780		s++;
781
782		ofs++;
783		len -= 2;
784	}
785}
786
787/**
788 *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
789 *	@id: IDENTIFY DEVICE results we will examine
790 *	@s: string into which data is output
791 *	@ofs: offset into identify device page
792 *	@len: length of string to return. must be an odd number.
793 *
794 *	This function is identical to ata_id_string except that it
795 *	trims trailing spaces and terminates the resulting string with
796 *	null.  @len must be actual maximum length (even number) + 1.
797 *
798 *	LOCKING:
799 *	caller.
800 */
801void ata_id_c_string(const u16 *id, unsigned char *s,
802		     unsigned int ofs, unsigned int len)
803{
804	unsigned char *p;
805
806	WARN_ON(!(len & 1));
807
808	ata_id_string(id, s, ofs, len - 1);
809
810	p = s + strnlen(s, len - 1);
811	while (p > s && p[-1] == ' ')
812		p--;
813	*p = '\0';
814}
815
816static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
817{
818	u64 sectors = 0;
819
820	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
821	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
822	sectors |= (tf->hob_lbal & 0xff) << 24;
823	sectors |= (tf->lbah & 0xff) << 16;
824	sectors |= (tf->lbam & 0xff) << 8;
825	sectors |= (tf->lbal & 0xff);
826
827	return ++sectors;
828}
829
830static u64 ata_tf_to_lba(struct ata_taskfile *tf)
831{
832	u64 sectors = 0;
833
834	sectors |= (tf->device & 0x0f) << 24;
835	sectors |= (tf->lbah & 0xff) << 16;
836	sectors |= (tf->lbam & 0xff) << 8;
837	sectors |= (tf->lbal & 0xff);
838
839	return ++sectors;
840}
841
842/**
843 *	ata_read_native_max_address_ext	-	LBA48 native max query
844 *	@dev: Device to query
845 *
846 *	Perform an LBA48 size query upon the device in question. Return the
847 *	actual LBA48 size or zero if the command fails.
848 */
849
850static u64 ata_read_native_max_address_ext(struct ata_device *dev)
851{
852	unsigned int err;
853	struct ata_taskfile tf;
854
855	ata_tf_init(dev, &tf);
856
857	tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
858	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
859	tf.protocol |= ATA_PROT_NODATA;
860	tf.device |= 0x40;
861
862	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
863	if (err)
864		return 0;
865
866	return ata_tf_to_lba48(&tf);
867}
868
869/**
870 *	ata_read_native_max_address	-	LBA28 native max query
871 *	@dev: Device to query
872 *
873 *	Performa an LBA28 size query upon the device in question. Return the
874 *	actual LBA28 size or zero if the command fails.
875 */
876
877static u64 ata_read_native_max_address(struct ata_device *dev)
878{
879	unsigned int err;
880	struct ata_taskfile tf;
881
882	ata_tf_init(dev, &tf);
883
884	tf.command = ATA_CMD_READ_NATIVE_MAX;
885	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
886	tf.protocol |= ATA_PROT_NODATA;
887	tf.device |= 0x40;
888
889	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
890	if (err)
891		return 0;
892
893	return ata_tf_to_lba(&tf);
894}
895
896/**
897 *	ata_set_native_max_address_ext	-	LBA48 native max set
898 *	@dev: Device to query
899 *	@new_sectors: new max sectors value to set for the device
900 *
901 *	Perform an LBA48 size set max upon the device in question. Return the
902 *	actual LBA48 size or zero if the command fails.
903 */
904
905static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
906{
907	unsigned int err;
908	struct ata_taskfile tf;
909
910	new_sectors--;
911
912	ata_tf_init(dev, &tf);
913
914	tf.command = ATA_CMD_SET_MAX_EXT;
915	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
916	tf.protocol |= ATA_PROT_NODATA;
917	tf.device |= 0x40;
918
919	tf.lbal = (new_sectors >> 0) & 0xff;
920	tf.lbam = (new_sectors >> 8) & 0xff;
921	tf.lbah = (new_sectors >> 16) & 0xff;
922
923	tf.hob_lbal = (new_sectors >> 24) & 0xff;
924	tf.hob_lbam = (new_sectors >> 32) & 0xff;
925	tf.hob_lbah = (new_sectors >> 40) & 0xff;
926
927	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
928	if (err)
929		return 0;
930
931	return ata_tf_to_lba48(&tf);
932}
933
934/**
935 *	ata_set_native_max_address	-	LBA28 native max set
936 *	@dev: Device to query
937 *	@new_sectors: new max sectors value to set for the device
938 *
939 *	Perform an LBA28 size set max upon the device in question. Return the
940 *	actual LBA28 size or zero if the command fails.
941 */
942
943static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
944{
945	unsigned int err;
946	struct ata_taskfile tf;
947
948	new_sectors--;
949
950	ata_tf_init(dev, &tf);
951
952	tf.command = ATA_CMD_SET_MAX;
953	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
954	tf.protocol |= ATA_PROT_NODATA;
955
956	tf.lbal = (new_sectors >> 0) & 0xff;
957	tf.lbam = (new_sectors >> 8) & 0xff;
958	tf.lbah = (new_sectors >> 16) & 0xff;
959	tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
960
961	err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
962	if (err)
963		return 0;
964
965	return ata_tf_to_lba(&tf);
966}
967
968/**
969 *	ata_hpa_resize		-	Resize a device with an HPA set
970 *	@dev: Device to resize
971 *
972 *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
973 *	it if required to the full size of the media. The caller must check
974 *	the drive has the HPA feature set enabled.
975 */
976
977static u64 ata_hpa_resize(struct ata_device *dev)
978{
979	u64 sectors = dev->n_sectors;
980	u64 hpa_sectors;
981
982	if (ata_id_has_lba48(dev->id))
983		hpa_sectors = ata_read_native_max_address_ext(dev);
984	else
985		hpa_sectors = ata_read_native_max_address(dev);
986
987	if (hpa_sectors > sectors) {
988		ata_dev_printk(dev, KERN_INFO,
989			"Host Protected Area detected:\n"
990			"\tcurrent size: %lld sectors\n"
991			"\tnative size: %lld sectors\n",
992			(long long)sectors, (long long)hpa_sectors);
993
994		if (ata_ignore_hpa) {
995			if (ata_id_has_lba48(dev->id))
996				hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
997			else
998				hpa_sectors = ata_set_native_max_address(dev,
999								hpa_sectors);
1000
1001			if (hpa_sectors) {
1002				ata_dev_printk(dev, KERN_INFO, "native size "
1003					"increased to %lld sectors\n",
1004					(long long)hpa_sectors);
1005				return hpa_sectors;
1006			}
1007		}
1008	} else if (hpa_sectors < sectors)
1009		ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
1010			       "is smaller than sectors (%lld)\n", __FUNCTION__,
1011			       (long long)hpa_sectors, (long long)sectors);
1012
1013	return sectors;
1014}
1015
1016static u64 ata_id_n_sectors(const u16 *id)
1017{
1018	if (ata_id_has_lba(id)) {
1019		if (ata_id_has_lba48(id))
1020			return ata_id_u64(id, 100);
1021		else
1022			return ata_id_u32(id, 60);
1023	} else {
1024		if (ata_id_current_chs_valid(id))
1025			return ata_id_u32(id, 57);
1026		else
1027			return id[1] * id[3] * id[6];
1028	}
1029}
1030
1031/**
1032 *	ata_id_to_dma_mode	-	Identify DMA mode from id block
1033 *	@dev: device to identify
1034 *	@unknown: mode to assume if we cannot tell
1035 *
1036 *	Set up the timing values for the device based upon the identify
1037 *	reported values for the DMA mode. This function is used by drivers
1038 *	which rely upon firmware configured modes, but wish to report the
1039 *	mode correctly when possible.
1040 *
1041 *	In addition we emit similarly formatted messages to the default
1042 *	ata_dev_set_mode handler, in order to provide consistency of
1043 *	presentation.
1044 */
1045
1046void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1047{
1048	unsigned int mask;
1049	u8 mode;
1050
1051	/* Pack the DMA modes */
1052	mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1053	if (dev->id[53] & 0x04)
1054		mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1055
1056	/* Select the mode in use */
1057	mode = ata_xfer_mask2mode(mask);
1058
1059	if (mode != 0) {
1060		ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1061		       ata_mode_string(mask));
1062	} else {
1063		/* SWDMA perhaps ? */
1064		mode = unknown;
1065		ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1066	}
1067
1068	/* Configure the device reporting */
1069	dev->xfer_mode = mode;
1070	dev->xfer_shift = ata_xfer_mode2shift(mode);
1071}
1072
1073/**
1074 *	ata_noop_dev_select - Select device 0/1 on ATA bus
1075 *	@ap: ATA channel to manipulate
1076 *	@device: ATA device (numbered from zero) to select
1077 *
1078 *	This function performs no actual function.
1079 *
1080 *	May be used as the dev_select() entry in ata_port_operations.
1081 *
1082 *	LOCKING:
1083 *	caller.
1084 */
1085void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1086{
1087}
1088
1089
1090/**
1091 *	ata_std_dev_select - Select device 0/1 on ATA bus
1092 *	@ap: ATA channel to manipulate
1093 *	@device: ATA device (numbered from zero) to select
1094 *
1095 *	Use the method defined in the ATA specification to
1096 *	make either device 0, or device 1, active on the
1097 *	ATA channel.  Works with both PIO and MMIO.
1098 *
1099 *	May be used as the dev_select() entry in ata_port_operations.
1100 *
1101 *	LOCKING:
1102 *	caller.
1103 */
1104
1105void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1106{
1107	u8 tmp;
1108
1109	if (device == 0)
1110		tmp = ATA_DEVICE_OBS;
1111	else
1112		tmp = ATA_DEVICE_OBS | ATA_DEV1;
1113
1114	iowrite8(tmp, ap->ioaddr.device_addr);
1115	ata_pause(ap);		/* needed; also flushes, for mmio */
1116}
1117
1118/**
1119 *	ata_dev_select - Select device 0/1 on ATA bus
1120 *	@ap: ATA channel to manipulate
1121 *	@device: ATA device (numbered from zero) to select
1122 *	@wait: non-zero to wait for Status register BSY bit to clear
1123 *	@can_sleep: non-zero if context allows sleeping
1124 *
1125 *	Use the method defined in the ATA specification to
1126 *	make either device 0, or device 1, active on the
1127 *	ATA channel.
1128 *
1129 *	This is a high-level version of ata_std_dev_select(),
1130 *	which additionally provides the services of inserting
1131 *	the proper pauses and status polling, where needed.
1132 *
1133 *	LOCKING:
1134 *	caller.
1135 */
1136
1137void ata_dev_select(struct ata_port *ap, unsigned int device,
1138			   unsigned int wait, unsigned int can_sleep)
1139{
1140	if (ata_msg_probe(ap))
1141		ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1142				"device %u, wait %u\n", device, wait);
1143
1144	if (wait)
1145		ata_wait_idle(ap);
1146
1147	ap->ops->dev_select(ap, device);
1148
1149	if (wait) {
1150		if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1151			msleep(150);
1152		ata_wait_idle(ap);
1153	}
1154}
1155
1156/**
1157 *	ata_dump_id - IDENTIFY DEVICE info debugging output
1158 *	@id: IDENTIFY DEVICE page to dump
1159 *
1160 *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1161 *	page.
1162 *
1163 *	LOCKING:
1164 *	caller.
1165 */
1166
1167static inline void ata_dump_id(const u16 *id)
1168{
1169	DPRINTK("49==0x%04x  "
1170		"53==0x%04x  "
1171		"63==0x%04x  "
1172		"64==0x%04x  "
1173		"75==0x%04x  \n",
1174		id[49],
1175		id[53],
1176		id[63],
1177		id[64],
1178		id[75]);
1179	DPRINTK("80==0x%04x  "
1180		"81==0x%04x  "
1181		"82==0x%04x  "
1182		"83==0x%04x  "
1183		"84==0x%04x  \n",
1184		id[80],
1185		id[81],
1186		id[82],
1187		id[83],
1188		id[84]);
1189	DPRINTK("88==0x%04x  "
1190		"93==0x%04x\n",
1191		id[88],
1192		id[93]);
1193}
1194
1195static unsigned int ata_id_xfermask(const u16 *id)
1196{
1197	unsigned int pio_mask, mwdma_mask, udma_mask;
1198
1199	/* Usual case. Word 53 indicates word 64 is valid */
1200	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1201		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1202		pio_mask <<= 3;
1203		pio_mask |= 0x7;
1204	} else {
1205		/* If word 64 isn't valid then Word 51 high byte holds
1206		 * the PIO timing number for the maximum. Turn it into
1207		 * a mask.
1208		 */
1209		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1210		if (mode < 5)	/* Valid PIO range */
1211                	pio_mask = (2 << mode) - 1;
1212		else
1213			pio_mask = 1;
1214
1215		/* But wait.. there's more. Design your standards by
1216		 * committee and you too can get a free iordy field to
1217		 * process. However its the speeds not the modes that
1218		 * are supported... Note drivers using the timing API
1219		 * will get this right anyway
1220		 */
1221	}
1222
1223	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1224
1225	if (ata_id_is_cfa(id)) {
1226		/*
1227		 *	Process compact flash extended modes
1228		 */
1229		int pio = id[163] & 0x7;
1230		int dma = (id[163] >> 3) & 7;
1231
1232		if (pio)
1233			pio_mask |= (1 << 5);
1234		if (pio > 1)
1235			pio_mask |= (1 << 6);
1236		if (dma)
1237			mwdma_mask |= (1 << 3);
1238		if (dma > 1)
1239			mwdma_mask |= (1 << 4);
1240	}
1241
1242	udma_mask = 0;
1243	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1244		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1245
1246	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1247}
1248
1249/**
1250 *	ata_port_queue_task - Queue port_task
1251 *	@ap: The ata_port to queue port_task for
1252 *	@fn: workqueue function to be scheduled
1253 *	@data: data for @fn to use
1254 *	@delay: delay time for workqueue function
1255 *
1256 *	Schedule @fn(@data) for execution after @delay jiffies using
1257 *	port_task.  There is one port_task per port and it's the
1258 *	user(low level driver)'s responsibility to make sure that only
1259 *	one task is active at any given time.
1260 *
1261 *	libata core layer takes care of synchronization between
1262 *	port_task and EH.  ata_port_queue_task() may be ignored for EH
1263 *	synchronization.
1264 *
1265 *	LOCKING:
1266 *	Inherited from caller.
1267 */
1268void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1269			 unsigned long delay)
1270{
1271	int rc;
1272
1273	if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
1274		return;
1275
1276	PREPARE_DELAYED_WORK(&ap->port_task, fn);
1277	ap->port_task_data = data;
1278
1279	rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
1280
1281	/* rc == 0 means that another user is using port task */
1282	WARN_ON(rc == 0);
1283}
1284
1285/**
1286 *	ata_port_flush_task - Flush port_task
1287 *	@ap: The ata_port to flush port_task for
1288 *
1289 *	After this function completes, port_task is guranteed not to
1290 *	be running or scheduled.
1291 *
1292 *	LOCKING:
1293 *	Kernel thread context (may sleep)
1294 */
1295void ata_port_flush_task(struct ata_port *ap)
1296{
1297	unsigned long flags;
1298
1299	DPRINTK("ENTER\n");
1300
1301	spin_lock_irqsave(ap->lock, flags);
1302	ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
1303	spin_unlock_irqrestore(ap->lock, flags);
1304
1305	DPRINTK("flush #1\n");
1306	cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
1307
1308	/*
1309	 * At this point, if a task is running, it's guaranteed to see
1310	 * the FLUSH flag; thus, it will never queue pio tasks again.
1311	 * Cancel and flush.
1312	 */
1313	if (!cancel_delayed_work(&ap->port_task)) {
1314		if (ata_msg_ctl(ap))
1315			ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1316					__FUNCTION__);
1317		cancel_work_sync(&ap->port_task.work);
1318	}
1319
1320	spin_lock_irqsave(ap->lock, flags);
1321	ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
1322	spin_unlock_irqrestore(ap->lock, flags);
1323
1324	if (ata_msg_ctl(ap))
1325		ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1326}
1327
1328static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1329{
1330	struct completion *waiting = qc->private_data;
1331
1332	complete(waiting);
1333}
1334
1335/**
1336 *	ata_exec_internal_sg - execute libata internal command
1337 *	@dev: Device to which the command is sent
1338 *	@tf: Taskfile registers for the command and the result
1339 *	@cdb: CDB for packet command
1340 *	@dma_dir: Data tranfer direction of the command
1341 *	@sg: sg list for the data buffer of the command
1342 *	@n_elem: Number of sg entries
1343 *
1344 *	Executes libata internal command with timeout.  @tf contains
1345 *	command on entry and result on return.  Timeout and error
1346 *	conditions are reported via return value.  No recovery action
1347 *	is taken after a command times out.  It's caller's duty to
1348 *	clean up after timeout.
1349 *
1350 *	LOCKING:
1351 *	None.  Should be called with kernel context, might sleep.
1352 *
1353 *	RETURNS:
1354 *	Zero on success, AC_ERR_* mask on failure
1355 */
1356unsigned ata_exec_internal_sg(struct ata_device *dev,
1357			      struct ata_taskfile *tf, const u8 *cdb,
1358			      int dma_dir, struct scatterlist *sg,
1359			      unsigned int n_elem)
1360{
1361	struct ata_port *ap = dev->ap;
1362	u8 command = tf->command;
1363	struct ata_queued_cmd *qc;
1364	unsigned int tag, preempted_tag;
1365	u32 preempted_sactive, preempted_qc_active;
1366	DECLARE_COMPLETION_ONSTACK(wait);
1367	unsigned long flags;
1368	unsigned int err_mask;
1369	int rc;
1370
1371	spin_lock_irqsave(ap->lock, flags);
1372
1373	/* no internal command while frozen */
1374	if (ap->pflags & ATA_PFLAG_FROZEN) {
1375		spin_unlock_irqrestore(ap->lock, flags);
1376		return AC_ERR_SYSTEM;
1377	}
1378
1379	/* initialize internal qc */
1380
1381	if (ap->ops->error_handler)
1382		tag = ATA_TAG_INTERNAL;
1383	else
1384		tag = 0;
1385
1386	if (test_and_set_bit(tag, &ap->qc_allocated))
1387		BUG();
1388	qc = __ata_qc_from_tag(ap, tag);
1389
1390	qc->tag = tag;
1391	qc->scsicmd = NULL;
1392	qc->ap = ap;
1393	qc->dev = dev;
1394	ata_qc_reinit(qc);
1395
1396	preempted_tag = ap->active_tag;
1397	preempted_sactive = ap->sactive;
1398	preempted_qc_active = ap->qc_active;
1399	ap->active_tag = ATA_TAG_POISON;
1400	ap->sactive = 0;
1401	ap->qc_active = 0;
1402
1403	/* prepare & issue qc */
1404	qc->tf = *tf;
1405	if (cdb)
1406		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1407	qc->flags |= ATA_QCFLAG_RESULT_TF;
1408	qc->dma_dir = dma_dir;
1409	if (dma_dir != DMA_NONE) {
1410		unsigned int i, buflen = 0;
1411
1412		for (i = 0; i < n_elem; i++)
1413			buflen += sg[i].length;
1414
1415		ata_sg_init(qc, sg, n_elem);
1416		qc->nbytes = buflen;
1417	}
1418
1419	qc->private_data = &wait;
1420	qc->complete_fn = ata_qc_complete_internal;
1421
1422	ata_qc_issue(qc);
1423
1424	spin_unlock_irqrestore(ap->lock, flags);
1425
1426	rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1427
1428	ata_port_flush_task(ap);
1429
1430	if (!rc) {
1431		spin_lock_irqsave(ap->lock, flags);
1432
1433		/* We're racing with irq here.  If we lose, the
1434		 * following test prevents us from completing the qc
1435		 * twice.  If we win, the port is frozen and will be
1436		 * cleaned up by ->post_internal_cmd().
1437		 */
1438		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1439			qc->err_mask |= AC_ERR_TIMEOUT;
1440
1441			if (ap->ops->error_handler)
1442				ata_port_freeze(ap);
1443			else
1444				ata_qc_complete(qc);
1445
1446			if (ata_msg_warn(ap))
1447				ata_dev_printk(dev, KERN_WARNING,
1448					"qc timeout (cmd 0x%x)\n", command);
1449		}
1450
1451		spin_unlock_irqrestore(ap->lock, flags);
1452	}
1453
1454	/* do post_internal_cmd */
1455	if (ap->ops->post_internal_cmd)
1456		ap->ops->post_internal_cmd(qc);
1457
1458	/* perform minimal error analysis */
1459	if (qc->flags & ATA_QCFLAG_FAILED) {
1460		if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1461			qc->err_mask |= AC_ERR_DEV;
1462
1463		if (!qc->err_mask)
1464			qc->err_mask |= AC_ERR_OTHER;
1465
1466		if (qc->err_mask & ~AC_ERR_OTHER)
1467			qc->err_mask &= ~AC_ERR_OTHER;
1468	}
1469
1470	/* finish up */
1471	spin_lock_irqsave(ap->lock, flags);
1472
1473	*tf = qc->result_tf;
1474	err_mask = qc->err_mask;
1475
1476	ata_qc_free(qc);
1477	ap->active_tag = preempted_tag;
1478	ap->sactive = preempted_sactive;
1479	ap->qc_active = preempted_qc_active;
1480
1481	if (ap->flags & ATA_FLAG_DISABLED) {
1482		err_mask |= AC_ERR_SYSTEM;
1483		ata_port_probe(ap);
1484	}
1485
1486	spin_unlock_irqrestore(ap->lock, flags);
1487
1488	return err_mask;
1489}
1490
1491/**
1492 *	ata_exec_internal - execute libata internal command
1493 *	@dev: Device to which the command is sent
1494 *	@tf: Taskfile registers for the command and the result
1495 *	@cdb: CDB for packet command
1496 *	@dma_dir: Data tranfer direction of the command
1497 *	@buf: Data buffer of the command
1498 *	@buflen: Length of data buffer
1499 *
1500 *	Wrapper around ata_exec_internal_sg() which takes simple
1501 *	buffer instead of sg list.
1502 *
1503 *	LOCKING:
1504 *	None.  Should be called with kernel context, might sleep.
1505 *
1506 *	RETURNS:
1507 *	Zero on success, AC_ERR_* mask on failure
1508 */
1509unsigned ata_exec_internal(struct ata_device *dev,
1510			   struct ata_taskfile *tf, const u8 *cdb,
1511			   int dma_dir, void *buf, unsigned int buflen)
1512{
1513	struct scatterlist *psg = NULL, sg;
1514	unsigned int n_elem = 0;
1515
1516	if (dma_dir != DMA_NONE) {
1517		WARN_ON(!buf);
1518		sg_init_one(&sg, buf, buflen);
1519		psg = &sg;
1520		n_elem++;
1521	}
1522
1523	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
1524}
1525
1526/**
1527 *	ata_do_simple_cmd - execute simple internal command
1528 *	@dev: Device to which the command is sent
1529 *	@cmd: Opcode to execute
1530 *
1531 *	Execute a 'simple' command, that only consists of the opcode
1532 *	'cmd' itself, without filling any other registers
1533 *
1534 *	LOCKING:
1535 *	Kernel thread context (may sleep).
1536 *
1537 *	RETURNS:
1538 *	Zero on success, AC_ERR_* mask on failure
1539 */
1540unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1541{
1542	struct ata_taskfile tf;
1543
1544	ata_tf_init(dev, &tf);
1545
1546	tf.command = cmd;
1547	tf.flags |= ATA_TFLAG_DEVICE;
1548	tf.protocol = ATA_PROT_NODATA;
1549
1550	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1551}
1552
1553/**
1554 *	ata_pio_need_iordy	-	check if iordy needed
1555 *	@adev: ATA device
1556 *
1557 *	Check if the current speed of the device requires IORDY. Used
1558 *	by various controllers for chip configuration.
1559 */
1560
1561unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1562{
1563	/* Controller doesn't support  IORDY. Probably a pointless check
1564	   as the caller should know this */
1565	if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1566		return 0;
1567	/* PIO3 and higher it is mandatory */
1568	if (adev->pio_mode > XFER_PIO_2)
1569		return 1;
1570	/* We turn it on when possible */
1571	if (ata_id_has_iordy(adev->id))
1572		return 1;
1573	return 0;
1574}
1575
1576/**
1577 *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1578 *	@adev: ATA device
1579 *
1580 *	Compute the highest mode possible if we are not using iordy. Return
1581 *	-1 if no iordy mode is available.
1582 */
1583
1584static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1585{
1586	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1587	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1588		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1589		/* Is the speed faster than the drive allows non IORDY ? */
1590		if (pio) {
1591			/* This is cycle times not frequency - watch the logic! */
1592			if (pio > 240)	/* PIO2 is 240nS per cycle */
1593				return 3 << ATA_SHIFT_PIO;
1594			return 7 << ATA_SHIFT_PIO;
1595		}
1596	}
1597	return 3 << ATA_SHIFT_PIO;
1598}
1599
1600/**
1601 *	ata_dev_read_id - Read ID data from the specified device
1602 *	@dev: target device
1603 *	@p_class: pointer to class of the target device (may be changed)
1604 *	@flags: ATA_READID_* flags
1605 *	@id: buffer to read IDENTIFY data into
1606 *
1607 *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1608 *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1609 *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1610 *	for pre-ATA4 drives.
1611 *
1612 *	LOCKING:
1613 *	Kernel thread context (may sleep)
1614 *
1615 *	RETURNS:
1616 *	0 on success, -errno otherwise.
1617 */
1618int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1619		    unsigned int flags, u16 *id)
1620{
1621	struct ata_port *ap = dev->ap;
1622	unsigned int class = *p_class;
1623	struct ata_taskfile tf;
1624	unsigned int err_mask = 0;
1625	const char *reason;
1626	int may_fallback = 1, tried_spinup = 0;
1627	int rc;
1628
1629	if (ata_msg_ctl(ap))
1630		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1631
1632	ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1633 retry:
1634	ata_tf_init(dev, &tf);
1635
1636	switch (class) {
1637	case ATA_DEV_ATA:
1638		tf.command = ATA_CMD_ID_ATA;
1639		break;
1640	case ATA_DEV_ATAPI:
1641		tf.command = ATA_CMD_ID_ATAPI;
1642		break;
1643	default:
1644		rc = -ENODEV;
1645		reason = "unsupported class";
1646		goto err_out;
1647	}
1648
1649	tf.protocol = ATA_PROT_PIO;
1650
1651	/* Some devices choke if TF registers contain garbage.  Make
1652	 * sure those are properly initialized.
1653	 */
1654	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1655
1656	/* Device presence detection is unreliable on some
1657	 * controllers.  Always poll IDENTIFY if available.
1658	 */
1659	tf.flags |= ATA_TFLAG_POLLING;
1660
1661	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1662				     id, sizeof(id[0]) * ATA_ID_WORDS);
1663	if (err_mask) {
1664		if (err_mask & AC_ERR_NODEV_HINT) {
1665			DPRINTK("ata%u.%d: NODEV after polling detection\n",
1666				ap->print_id, dev->devno);
1667			return -ENOENT;
1668		}
1669
1670		/* Device or controller might have reported the wrong
1671		 * device class.  Give a shot at the other IDENTIFY if
1672		 * the current one is aborted by the device.
1673		 */
1674		if (may_fallback &&
1675		    (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1676			may_fallback = 0;
1677
1678			if (class == ATA_DEV_ATA)
1679				class = ATA_DEV_ATAPI;
1680			else
1681				class = ATA_DEV_ATA;
1682			goto retry;
1683		}
1684
1685		rc = -EIO;
1686		reason = "I/O error";
1687		goto err_out;
1688	}
1689
1690	/* Falling back doesn't make sense if ID data was read
1691	 * successfully at least once.
1692	 */
1693	may_fallback = 0;
1694
1695	swap_buf_le16(id, ATA_ID_WORDS);
1696
1697	/* sanity check */
1698	rc = -EINVAL;
1699	reason = "device reports invalid type";
1700
1701	if (class == ATA_DEV_ATA) {
1702		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1703			goto err_out;
1704	} else {
1705		if (ata_id_is_ata(id))
1706			goto err_out;
1707	}
1708
1709	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1710		tried_spinup = 1;
1711		/*
1712		 * Drive powered-up in standby mode, and requires a specific
1713		 * SET_FEATURES spin-up subcommand before it will accept
1714		 * anything other than the original IDENTIFY command.
1715		 */
1716		ata_tf_init(dev, &tf);
1717		tf.command = ATA_CMD_SET_FEATURES;
1718		tf.feature = SETFEATURES_SPINUP;
1719		tf.protocol = ATA_PROT_NODATA;
1720		tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1721		err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1722		if (err_mask) {
1723			rc = -EIO;
1724			reason = "SPINUP failed";
1725			goto err_out;
1726		}
1727		/*
1728		 * If the drive initially returned incomplete IDENTIFY info,
1729		 * we now must reissue the IDENTIFY command.
1730		 */
1731		if (id[2] == 0x37c8)
1732			goto retry;
1733	}
1734
1735	if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1736		/*
1737		 * The exact sequence expected by certain pre-ATA4 drives is:
1738		 * SRST RESET
1739		 * IDENTIFY
1740		 * INITIALIZE DEVICE PARAMETERS
1741		 * anything else..
1742		 * Some drives were very specific about that exact sequence.
1743		 */
1744		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1745			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1746			if (err_mask) {
1747				rc = -EIO;
1748				reason = "INIT_DEV_PARAMS failed";
1749				goto err_out;
1750			}
1751
1752			/* current CHS translation info (id[53-58]) might be
1753			 * changed. reread the identify device info.
1754			 */
1755			flags &= ~ATA_READID_POSTRESET;
1756			goto retry;
1757		}
1758	}
1759
1760	*p_class = class;
1761
1762	return 0;
1763
1764 err_out:
1765	if (ata_msg_warn(ap))
1766		ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1767			       "(%s, err_mask=0x%x)\n", reason, err_mask);
1768	return rc;
1769}
1770
1771static inline u8 ata_dev_knobble(struct ata_device *dev)
1772{
1773	return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1774}
1775
1776static void ata_dev_config_ncq(struct ata_device *dev,
1777			       char *desc, size_t desc_sz)
1778{
1779	struct ata_port *ap = dev->ap;
1780	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1781
1782	if (!ata_id_has_ncq(dev->id)) {
1783		desc[0] = '\0';
1784		return;
1785	}
1786	if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1787		snprintf(desc, desc_sz, "NCQ (not used)");
1788		return;
1789	}
1790	if (ap->flags & ATA_FLAG_NCQ) {
1791		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1792		dev->flags |= ATA_DFLAG_NCQ;
1793	}
1794
1795	if (hdepth >= ddepth)
1796		snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1797	else
1798		snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1799}
1800
1801/**
1802 *	ata_dev_configure - Configure the specified ATA/ATAPI device
1803 *	@dev: Target device to configure
1804 *
1805 *	Configure @dev according to @dev->id.  Generic and low-level
1806 *	driver specific fixups are also applied.
1807 *
1808 *	LOCKING:
1809 *	Kernel thread context (may sleep)
1810 *
1811 *	RETURNS:
1812 *	0 on success, -errno otherwise
1813 */
1814int ata_dev_configure(struct ata_device *dev)
1815{
1816	struct ata_port *ap = dev->ap;
1817	int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1818	const u16 *id = dev->id;
1819	unsigned int xfer_mask;
1820	char revbuf[7];		/* XYZ-99\0 */
1821	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1822	char modelbuf[ATA_ID_PROD_LEN+1];
1823	int rc;
1824
1825	if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1826		ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1827			       __FUNCTION__);
1828		return 0;
1829	}
1830
1831	if (ata_msg_probe(ap))
1832		ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1833
1834	/* set _SDD */
1835	rc = ata_acpi_push_id(dev);
1836	if (rc) {
1837		ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1838			rc);
1839	}
1840
1841	/* retrieve and execute the ATA task file of _GTF */
1842	ata_acpi_exec_tfs(ap);
1843
1844	/* print device capabilities */
1845	if (ata_msg_probe(ap))
1846		ata_dev_printk(dev, KERN_DEBUG,
1847			       "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1848			       "85:%04x 86:%04x 87:%04x 88:%04x\n",
1849			       __FUNCTION__,
1850			       id[49], id[82], id[83], id[84],
1851			       id[85], id[86], id[87], id[88]);
1852
1853	/* initialize to-be-configured parameters */
1854	dev->flags &= ~ATA_DFLAG_CFG_MASK;
1855	dev->max_sectors = 0;
1856	dev->cdb_len = 0;
1857	dev->n_sectors = 0;
1858	dev->cylinders = 0;
1859	dev->heads = 0;
1860	dev->sectors = 0;
1861
1862	/*
1863	 * common ATA, ATAPI feature tests
1864	 */
1865
1866	/* find max transfer mode; for printk only */
1867	xfer_mask = ata_id_xfermask(id);
1868
1869	if (ata_msg_probe(ap))
1870		ata_dump_id(id);
1871
1872	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1873	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1874			sizeof(fwrevbuf));
1875
1876	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1877			sizeof(modelbuf));
1878
1879	/* ATA-specific feature tests */
1880	if (dev->class == ATA_DEV_ATA) {
1881		if (ata_id_is_cfa(id)) {
1882			if (id[162] & 1) /* CPRM may make this media unusable */
1883				ata_dev_printk(dev, KERN_WARNING,
1884					       "supports DRM functions and may "
1885					       "not be fully accessable.\n");
1886			snprintf(revbuf, 7, "CFA");
1887		}
1888		else
1889			snprintf(revbuf, 7, "ATA-%d",  ata_id_major_version(id));
1890
1891		dev->n_sectors = ata_id_n_sectors(id);
1892
1893		if (dev->id[59] & 0x100)
1894			dev->multi_count = dev->id[59] & 0xff;
1895
1896		if (ata_id_has_lba(id)) {
1897			const char *lba_desc;
1898			char ncq_desc[20];
1899
1900			lba_desc = "LBA";
1901			dev->flags |= ATA_DFLAG_LBA;
1902			if (ata_id_has_lba48(id)) {
1903				dev->flags |= ATA_DFLAG_LBA48;
1904				lba_desc = "LBA48";
1905
1906				if (dev->n_sectors >= (1UL << 28) &&
1907				    ata_id_has_flush_ext(id))
1908					dev->flags |= ATA_DFLAG_FLUSH_EXT;
1909			}
1910
1911			if (ata_id_hpa_enabled(dev->id))
1912				dev->n_sectors = ata_hpa_resize(dev);
1913
1914			/* config NCQ */
1915			ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1916
1917			/* print device info to dmesg */
1918			if (ata_msg_drv(ap) && print_info) {
1919				ata_dev_printk(dev, KERN_INFO,
1920					"%s: %s, %s, max %s\n",
1921					revbuf, modelbuf, fwrevbuf,
1922					ata_mode_string(xfer_mask));
1923				ata_dev_printk(dev, KERN_INFO,
1924					"%Lu sectors, multi %u: %s %s\n",
1925					(unsigned long long)dev->n_sectors,
1926					dev->multi_count, lba_desc, ncq_desc);
1927			}
1928		} else {
1929			/* CHS */
1930
1931			/* Default translation */
1932			dev->cylinders	= id[1];
1933			dev->heads	= id[3];
1934			dev->sectors	= id[6];
1935
1936			if (ata_id_current_chs_valid(id)) {
1937				/* Current CHS translation is valid. */
1938				dev->cylinders = id[54];
1939				dev->heads     = id[55];
1940				dev->sectors   = id[56];
1941			}
1942
1943			/* print device info to dmesg */
1944			if (ata_msg_drv(ap) && print_info) {
1945				ata_dev_printk(dev, KERN_INFO,
1946					"%s: %s, %s, max %s\n",
1947					revbuf,	modelbuf, fwrevbuf,
1948					ata_mode_string(xfer_mask));
1949				ata_dev_printk(dev, KERN_INFO,
1950					"%Lu sectors, multi %u, CHS %u/%u/%u\n",
1951					(unsigned long long)dev->n_sectors,
1952					dev->multi_count, dev->cylinders,
1953					dev->heads, dev->sectors);
1954			}
1955		}
1956
1957		dev->cdb_len = 16;
1958	}
1959
1960	/* ATAPI-specific feature tests */
1961	else if (dev->class == ATA_DEV_ATAPI) {
1962		char *cdb_intr_string = "";
1963
1964		rc = atapi_cdb_len(id);
1965		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1966			if (ata_msg_warn(ap))
1967				ata_dev_printk(dev, KERN_WARNING,
1968					       "unsupported CDB len\n");
1969			rc = -EINVAL;
1970			goto err_out_nosup;
1971		}
1972		dev->cdb_len = (unsigned int) rc;
1973
1974		if (ata_id_cdb_intr(dev->id)) {
1975			dev->flags |= ATA_DFLAG_CDB_INTR;
1976			cdb_intr_string = ", CDB intr";
1977		}
1978
1979		/* print device info to dmesg */
1980		if (ata_msg_drv(ap) && print_info)
1981			ata_dev_printk(dev, KERN_INFO,
1982				       "ATAPI: %s, %s, max %s%s\n",
1983				       modelbuf, fwrevbuf,
1984				       ata_mode_string(xfer_mask),
1985				       cdb_intr_string);
1986	}
1987
1988	/* determine max_sectors */
1989	dev->max_sectors = ATA_MAX_SECTORS;
1990	if (dev->flags & ATA_DFLAG_LBA48)
1991		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1992
1993	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1994		/* Let the user know. We don't want to disallow opens for
1995		   rescue purposes, or in case the vendor is just a blithering
1996		   idiot */
1997                if (print_info) {
1998			ata_dev_printk(dev, KERN_WARNING,
1999"Drive reports diagnostics failure. This may indicate a drive\n");
2000			ata_dev_printk(dev, KERN_WARNING,
2001"fault or invalid emulation. Contact drive vendor for information.\n");
2002		}
2003	}
2004
2005	/* limit bridge transfers to udma5, 200 sectors */
2006	if (ata_dev_knobble(dev)) {
2007		if (ata_msg_drv(ap) && print_info)
2008			ata_dev_printk(dev, KERN_INFO,
2009				       "applying bridge limits\n");
2010		dev->udma_mask &= ATA_UDMA5;
2011		dev->max_sectors = ATA_MAX_SECTORS;
2012	}
2013
2014	if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
2015		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2016					 dev->max_sectors);
2017
2018	if (ap->ops->dev_config)
2019		ap->ops->dev_config(dev);
2020
2021	if (ata_msg_probe(ap))
2022		ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2023			__FUNCTION__, ata_chk_status(ap));
2024	return 0;
2025
2026err_out_nosup:
2027	if (ata_msg_probe(ap))
2028		ata_dev_printk(dev, KERN_DEBUG,
2029			       "%s: EXIT, err\n", __FUNCTION__);
2030	return rc;
2031}
2032
2033/**
2034 *	ata_cable_40wire	-	return 40 wire cable type
2035 *	@ap: port
2036 *
2037 *	Helper method for drivers which want to hardwire 40 wire cable
2038 *	detection.
2039 */
2040
2041int ata_cable_40wire(struct ata_port *ap)
2042{
2043	return ATA_CBL_PATA40;
2044}
2045
2046/**
2047 *	ata_cable_80wire	-	return 80 wire cable type
2048 *	@ap: port
2049 *
2050 *	Helper method for drivers which want to hardwire 80 wire cable
2051 *	detection.
2052 */
2053
2054int ata_cable_80wire(struct ata_port *ap)
2055{
2056	return ATA_CBL_PATA80;
2057}
2058
2059/**
2060 *	ata_cable_unknown	-	return unknown PATA cable.
2061 *	@ap: port
2062 *
2063 *	Helper method for drivers which have no PATA cable detection.
2064 */
2065
2066int ata_cable_unknown(struct ata_port *ap)
2067{
2068	return ATA_CBL_PATA_UNK;
2069}
2070
2071/**
2072 *	ata_cable_sata	-	return SATA cable type
2073 *	@ap: port
2074 *
2075 *	Helper method for drivers which have SATA cables
2076 */
2077
2078int ata_cable_sata(struct ata_port *ap)
2079{
2080	return ATA_CBL_SATA;
2081}
2082
2083/**
2084 *	ata_bus_probe - Reset and probe ATA bus
2085 *	@ap: Bus to probe
2086 *
2087 *	Master ATA bus probing function.  Initiates a hardware-dependent
2088 *	bus reset, then attempts to identify any devices found on
2089 *	the bus.
2090 *
2091 *	LOCKING:
2092 *	PCI/etc. bus probe sem.
2093 *
2094 *	RETURNS:
2095 *	Zero on success, negative errno otherwise.
2096 */
2097
2098int ata_bus_probe(struct ata_port *ap)
2099{
2100	unsigned int classes[ATA_MAX_DEVICES];
2101	int tries[ATA_MAX_DEVICES];
2102	int i, rc;
2103	struct ata_device *dev;
2104
2105	ata_port_probe(ap);
2106
2107	for (i = 0; i < ATA_MAX_DEVICES; i++)
2108		tries[i] = ATA_PROBE_MAX_TRIES;
2109
2110 retry:
2111	/* reset and determine device classes */
2112	ap->ops->phy_reset(ap);
2113
2114	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2115		dev = &ap->device[i];
2116
2117		if (!(ap->flags & ATA_FLAG_DISABLED) &&
2118		    dev->class != ATA_DEV_UNKNOWN)
2119			classes[dev->devno] = dev->class;
2120		else
2121			classes[dev->devno] = ATA_DEV_NONE;
2122
2123		dev->class = ATA_DEV_UNKNOWN;
2124	}
2125
2126	ata_port_probe(ap);
2127
2128	/* after the reset the device state is PIO 0 and the controller
2129	   state is undefined. Record the mode */
2130
2131	for (i = 0; i < ATA_MAX_DEVICES; i++)
2132		ap->device[i].pio_mode = XFER_PIO_0;
2133
2134	/* read IDENTIFY page and configure devices. We have to do the identify
2135	   specific sequence bass-ackwards so that PDIAG- is released by
2136	   the slave device */
2137
2138	for (i = ATA_MAX_DEVICES - 1; i >=  0; i--) {
2139		dev = &ap->device[i];
2140
2141		if (tries[i])
2142			dev->class = classes[i];
2143
2144		if (!ata_dev_enabled(dev))
2145			continue;
2146
2147		rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2148				     dev->id);
2149		if (rc)
2150			goto fail;
2151	}
2152
2153	/* Now ask for the cable type as PDIAG- should have been released */
2154	if (ap->ops->cable_detect)
2155		ap->cbl = ap->ops->cable_detect(ap);
2156
2157	/* After the identify sequence we can now set up the devices. We do
2158	   this in the normal order so that the user doesn't get confused */
2159
2160	for(i = 0; i < ATA_MAX_DEVICES; i++) {
2161		dev = &ap->device[i];
2162		if (!ata_dev_enabled(dev))
2163			continue;
2164
2165		ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2166		rc = ata_dev_configure(dev);
2167		ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2168		if (rc)
2169			goto fail;
2170	}
2171
2172	/* configure transfer mode */
2173	rc = ata_set_mode(ap, &dev);
2174	if (rc)
2175		goto fail;
2176
2177	for (i = 0; i < ATA_MAX_DEVICES; i++)
2178		if (ata_dev_enabled(&ap->device[i]))
2179			return 0;
2180
2181	/* no device present, disable port */
2182	ata_port_disable(ap);
2183	ap->ops->port_disable(ap);
2184	return -ENODEV;
2185
2186 fail:
2187	tries[dev->devno]--;
2188
2189	switch (rc) {
2190	case -EINVAL:
2191		/* eeek, something went very wrong, give up */
2192		tries[dev->devno] = 0;
2193		break;
2194
2195	case -ENODEV:
2196		/* give it just one more chance */
2197		tries[dev->devno] = min(tries[dev->devno], 1);
2198	case -EIO:
2199		if (tries[dev->devno] == 1) {
2200			/* This is the last chance, better to slow
2201			 * down than lose it.
2202			 */
2203			sata_down_spd_limit(ap);
2204			ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2205		}
2206	}
2207
2208	if (!tries[dev->devno])
2209		ata_dev_disable(dev);
2210
2211	goto retry;
2212}
2213
2214/**
2215 *	ata_port_probe - Mark port as enabled
2216 *	@ap: Port for which we indicate enablement
2217 *
2218 *	Modify @ap data structure such that the system
2219 *	thinks that the entire port is enabled.
2220 *
2221 *	LOCKING: host lock, or some other form of
2222 *	serialization.
2223 */
2224
2225void ata_port_probe(struct ata_port *ap)
2226{
2227	ap->flags &= ~ATA_FLAG_DISABLED;
2228}
2229
2230/**
2231 *	sata_print_link_status - Print SATA link status
2232 *	@ap: SATA port to printk link status about
2233 *
2234 *	This function prints link speed and status of a SATA link.
2235 *
2236 *	LOCKING:
2237 *	None.
2238 */
2239void sata_print_link_status(struct ata_port *ap)
2240{
2241	u32 sstatus, scontrol, tmp;
2242
2243	if (sata_scr_read(ap, SCR_STATUS, &sstatus))
2244		return;
2245	sata_scr_read(ap, SCR_CONTROL, &scontrol);
2246
2247	if (ata_port_online(ap)) {
2248		tmp = (sstatus >> 4) & 0xf;
2249		ata_port_printk(ap, KERN_INFO,
2250				"SATA link up %s (SStatus %X SControl %X)\n",
2251				sata_spd_string(tmp), sstatus, scontrol);
2252	} else {
2253		ata_port_printk(ap, KERN_INFO,
2254				"SATA link down (SStatus %X SControl %X)\n",
2255				sstatus, scontrol);
2256	}
2257}
2258
2259/**
2260 *	__sata_phy_reset - Wake/reset a low-level SATA PHY
2261 *	@ap: SATA port associated with target SATA PHY.
2262 *
2263 *	This function issues commands to standard SATA Sxxx
2264 *	PHY registers, to wake up the phy (and device), and
2265 *	clear any reset condition.
2266 *
2267 *	LOCKING:
2268 *	PCI/etc. bus probe sem.
2269 *
2270 */
2271void __sata_phy_reset(struct ata_port *ap)
2272{
2273	u32 sstatus;
2274	unsigned long timeout = jiffies + (HZ * 5);
2275
2276	if (ap->flags & ATA_FLAG_SATA_RESET) {
2277		/* issue phy wake/reset */
2278		sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2279		/* Couldn't find anything in SATA I/II specs, but
2280		 * AHCI-1.1 10.4.2 says at least 1 ms. */
2281		mdelay(1);
2282	}
2283	/* phy wake/clear reset */
2284	sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2285
2286	/* wait for phy to become ready, if necessary */
2287	do {
2288		msleep(200);
2289		sata_scr_read(ap, SCR_STATUS, &sstatus);
2290		if ((sstatus & 0xf) != 1)
2291			break;
2292	} while (time_before(jiffies, timeout));
2293
2294	/* print link status */
2295	sata_print_link_status(ap);
2296
2297	/* TODO: phy layer with polling, timeouts, etc. */
2298	if (!ata_port_offline(ap))
2299		ata_port_probe(ap);
2300	else
2301		ata_port_disable(ap);
2302
2303	if (ap->flags & ATA_FLAG_DISABLED)
2304		return;
2305
2306	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2307		ata_port_disable(ap);
2308		return;
2309	}
2310
2311	ap->cbl = ATA_CBL_SATA;
2312}
2313
2314/**
2315 *	sata_phy_reset - Reset SATA bus.
2316 *	@ap: SATA port associated with target SATA PHY.
2317 *
2318 *	This function resets the SATA bus, and then probes
2319 *	the bus for devices.
2320 *
2321 *	LOCKING:
2322 *	PCI/etc. bus probe sem.
2323 *
2324 */
2325void sata_phy_reset(struct ata_port *ap)
2326{
2327	__sata_phy_reset(ap);
2328	if (ap->flags & ATA_FLAG_DISABLED)
2329		return;
2330	ata_bus_reset(ap);
2331}
2332
2333/**
2334 *	ata_dev_pair		-	return other device on cable
2335 *	@adev: device
2336 *
2337 *	Obtain the other device on the same cable, or if none is
2338 *	present NULL is returned
2339 */
2340
2341struct ata_device *ata_dev_pair(struct ata_device *adev)
2342{
2343	struct ata_port *ap = adev->ap;
2344	struct ata_device *pair = &ap->device[1 - adev->devno];
2345	if (!ata_dev_enabled(pair))
2346		return NULL;
2347	return pair;
2348}
2349
2350/**
2351 *	ata_port_disable - Disable port.
2352 *	@ap: Port to be disabled.
2353 *
2354 *	Modify @ap data structure such that the system
2355 *	thinks that the entire port is disabled, and should
2356 *	never attempt to probe or communicate with devices
2357 *	on this port.
2358 *
2359 *	LOCKING: host lock, or some other form of
2360 *	serialization.
2361 */
2362
2363void ata_port_disable(struct ata_port *ap)
2364{
2365	ap->device[0].class = ATA_DEV_NONE;
2366	ap->device[1].class = ATA_DEV_NONE;
2367	ap->flags |= ATA_FLAG_DISABLED;
2368}
2369
2370/**
2371 *	sata_down_spd_limit - adjust SATA spd limit downward
2372 *	@ap: Port to adjust SATA spd limit for
2373 *
2374 *	Adjust SATA spd limit of @ap downward.  Note that this
2375 *	function only adjusts the limit.  The change must be applied
2376 *	using sata_set_spd().
2377 *
2378 *	LOCKING:
2379 *	Inherited from caller.
2380 *
2381 *	RETURNS:
2382 *	0 on success, negative errno on failure
2383 */
2384int sata_down_spd_limit(struct ata_port *ap)
2385{
2386	u32 sstatus, spd, mask;
2387	int rc, highbit;
2388
2389	rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2390	if (rc)
2391		return rc;
2392
2393	mask = ap->sata_spd_limit;
2394	if (mask <= 1)
2395		return -EINVAL;
2396	highbit = fls(mask) - 1;
2397	mask &= ~(1 << highbit);
2398
2399	spd = (sstatus >> 4) & 0xf;
2400	if (spd <= 1)
2401		return -EINVAL;
2402	spd--;
2403	mask &= (1 << spd) - 1;
2404	if (!mask)
2405		return -EINVAL;
2406
2407	ap->sata_spd_limit = mask;
2408
2409	ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2410			sata_spd_string(fls(mask)));
2411
2412	return 0;
2413}
2414
2415static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2416{
2417	u32 spd, limit;
2418
2419	if (ap->sata_spd_limit == UINT_MAX)
2420		limit = 0;
2421	else
2422		limit = fls(ap->sata_spd_limit);
2423
2424	spd = (*scontrol >> 4) & 0xf;
2425	*scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2426
2427	return spd != limit;
2428}
2429
2430/**
2431 *	sata_set_spd_needed - is SATA spd configuration needed
2432 *	@ap: Port in question
2433 *
2434 *	Test whether the spd limit in SControl matches
2435 *	@ap->sata_spd_limit.  This function is used to determine
2436 *	whether hardreset is necessary to apply SATA spd
2437 *	configuration.
2438 *
2439 *	LOCKING:
2440 *	Inherited from caller.
2441 *
2442 *	RETURNS:
2443 *	1 if SATA spd configuration is needed, 0 otherwise.
2444 */
2445int sata_set_spd_needed(struct ata_port *ap)
2446{
2447	u32 scontrol;
2448
2449	if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
2450		return 0;
2451
2452	return __sata_set_spd_needed(ap, &scontrol);
2453}
2454
2455/**
2456 *	sata_set_spd - set SATA spd according to spd limit
2457 *	@ap: Port to set SATA spd for
2458 *
2459 *	Set SATA spd of @ap according to sata_spd_limit.
2460 *
2461 *	LOCKING:
2462 *	Inherited from caller.
2463 *
2464 *	RETURNS:
2465 *	0 if spd doesn't need to be changed, 1 if spd has been
2466 *	changed.  Negative errno if SCR registers are inaccessible.
2467 */
2468int sata_set_spd(struct ata_port *ap)
2469{
2470	u32 scontrol;
2471	int rc;
2472
2473	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2474		return rc;
2475
2476	if (!__sata_set_spd_needed(ap, &scontrol))
2477		return 0;
2478
2479	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2480		return rc;
2481
2482	return 1;
2483}
2484
2485/*
2486 * This mode timing computation functionality is ported over from
2487 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2488 */
2489/*
2490 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2491 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2492 * for UDMA6, which is currently supported only by Maxtor drives.
2493 *
2494 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2495 */
2496
2497static const struct ata_timing ata_timing[] = {
2498
2499	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
2500	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
2501	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
2502	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
2503
2504	{ XFER_MW_DMA_4,  25,   0,   0,   0,  55,  20,  80,   0 },
2505	{ XFER_MW_DMA_3,  25,   0,   0,   0,  65,  25, 100,   0 },
2506	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
2507	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
2508	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
2509
2510/*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
2511
2512	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
2513	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
2514	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
2515
2516	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
2517	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
2518	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
2519
2520	{ XFER_PIO_6,     10,  55,  20,  80,  55,  20,  80,   0 },
2521	{ XFER_PIO_5,     15,  65,  25, 100,  65,  25, 100,   0 },
2522	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
2523	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
2524
2525	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
2526	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
2527	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
2528
2529/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
2530
2531	{ 0xFF }
2532};
2533
2534#define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
2535#define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
2536
2537static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2538{
2539	q->setup   = EZ(t->setup   * 1000,  T);
2540	q->act8b   = EZ(t->act8b   * 1000,  T);
2541	q->rec8b   = EZ(t->rec8b   * 1000,  T);
2542	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
2543	q->active  = EZ(t->active  * 1000,  T);
2544	q->recover = EZ(t->recover * 1000,  T);
2545	q->cycle   = EZ(t->cycle   * 1000,  T);
2546	q->udma    = EZ(t->udma    * 1000, UT);
2547}
2548
2549void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2550		      struct ata_timing *m, unsigned int what)
2551{
2552	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
2553	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
2554	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
2555	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
2556	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
2557	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2558	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
2559	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
2560}
2561
2562static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2563{
2564	const struct ata_timing *t;
2565
2566	for (t = ata_timing; t->mode != speed; t++)
2567		if (t->mode == 0xFF)
2568			return NULL;
2569	return t;
2570}
2571
2572int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2573		       struct ata_timing *t, int T, int UT)
2574{
2575	const struct ata_timing *s;
2576	struct ata_timing p;
2577
2578	/*
2579	 * Find the mode.
2580	 */
2581
2582	if (!(s = ata_timing_find_mode(speed)))
2583		return -EINVAL;
2584
2585	memcpy(t, s, sizeof(*s));
2586
2587	/*
2588	 * If the drive is an EIDE drive, it can tell us it needs extended
2589	 * PIO/MW_DMA cycle timing.
2590	 */
2591
2592	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
2593		memset(&p, 0, sizeof(p));
2594		if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2595			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2596					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2597		} else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2598			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2599		}
2600		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2601	}
2602
2603	/*
2604	 * Convert the timing to bus clock counts.
2605	 */
2606
2607	ata_timing_quantize(t, t, T, UT);
2608
2609	/*
2610	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2611	 * S.M.A.R.T * and some other commands. We have to ensure that the
2612	 * DMA cycle timing is slower/equal than the fastest PIO timing.
2613	 */
2614
2615	if (speed > XFER_PIO_6) {
2616		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2617		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2618	}
2619
2620	/*
2621	 * Lengthen active & recovery time so that cycle time is correct.
2622	 */
2623
2624	if (t->act8b + t->rec8b < t->cyc8b) {
2625		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2626		t->rec8b = t->cyc8b - t->act8b;
2627	}
2628
2629	if (t->active + t->recover < t->cycle) {
2630		t->active += (t->cycle - (t->active + t->recover)) / 2;
2631		t->recover = t->cycle - t->active;
2632	}
2633
2634	/* In a few cases quantisation may produce enough errors to
2635	   leave t->cycle too low for the sum of active and recovery
2636	   if so we must correct this */
2637	if (t->active + t->recover > t->cycle)
2638		t->cycle = t->active + t->recover;
2639
2640	return 0;
2641}
2642
2643/**
2644 *	ata_down_xfermask_limit - adjust dev xfer masks downward
2645 *	@dev: Device to adjust xfer masks
2646 *	@sel: ATA_DNXFER_* selector
2647 *
2648 *	Adjust xfer masks of @dev downward.  Note that this function
2649 *	does not apply the change.  Invoking ata_set_mode() afterwards
2650 *	will apply the limit.
2651 *
2652 *	LOCKING:
2653 *	Inherited from caller.
2654 *
2655 *	RETURNS:
2656 *	0 on success, negative errno on failure
2657 */
2658int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2659{
2660	char buf[32];
2661	unsigned int orig_mask, xfer_mask;
2662	unsigned int pio_mask, mwdma_mask, udma_mask;
2663	int quiet, highbit;
2664
2665	quiet = !!(sel & ATA_DNXFER_QUIET);
2666	sel &= ~ATA_DNXFER_QUIET;
2667
2668	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2669						  dev->mwdma_mask,
2670						  dev->udma_mask);
2671	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2672
2673	switch (sel) {
2674	case ATA_DNXFER_PIO:
2675		highbit = fls(pio_mask) - 1;
2676		pio_mask &= ~(1 << highbit);
2677		break;
2678
2679	case ATA_DNXFER_DMA:
2680		if (udma_mask) {
2681			highbit = fls(udma_mask) - 1;
2682			udma_mask &= ~(1 << highbit);
2683			if (!udma_mask)
2684				return -ENOENT;
2685		} else if (mwdma_mask) {
2686			highbit = fls(mwdma_mask) - 1;
2687			mwdma_mask &= ~(1 << highbit);
2688			if (!mwdma_mask)
2689				return -ENOENT;
2690		}
2691		break;
2692
2693	case ATA_DNXFER_40C:
2694		udma_mask &= ATA_UDMA_MASK_40C;
2695		break;
2696
2697	case ATA_DNXFER_FORCE_PIO0:
2698		pio_mask &= 1;
2699	case ATA_DNXFER_FORCE_PIO:
2700		mwdma_mask = 0;
2701		udma_mask = 0;
2702		break;
2703
2704	default:
2705		BUG();
2706	}
2707
2708	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2709
2710	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2711		return -ENOENT;
2712
2713	if (!quiet) {
2714		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2715			snprintf(buf, sizeof(buf), "%s:%s",
2716				 ata_mode_string(xfer_mask),
2717				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2718		else
2719			snprintf(buf, sizeof(buf), "%s",
2720				 ata_mode_string(xfer_mask));
2721
2722		ata_dev_printk(dev, KERN_WARNING,
2723			       "limiting speed to %s\n", buf);
2724	}
2725
2726	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2727			    &dev->udma_mask);
2728
2729	return 0;
2730}
2731
2732static int ata_dev_set_mode(struct ata_device *dev)
2733{
2734	struct ata_eh_context *ehc = &dev->ap->eh_context;
2735	unsigned int err_mask;
2736	int rc;
2737
2738	dev->flags &= ~ATA_DFLAG_PIO;
2739	if (dev->xfer_shift == ATA_SHIFT_PIO)
2740		dev->flags |= ATA_DFLAG_PIO;
2741
2742	err_mask = ata_dev_set_xfermode(dev);
2743	/* Old CFA may refuse this command, which is just fine */
2744	if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2745        	err_mask &= ~AC_ERR_DEV;
2746
2747	if (err_mask) {
2748		ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2749			       "(err_mask=0x%x)\n", err_mask);
2750		return -EIO;
2751	}
2752
2753	ehc->i.flags |= ATA_EHI_POST_SETMODE;
2754	rc = ata_dev_revalidate(dev, 0);
2755	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2756	if (rc)
2757		return rc;
2758
2759	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2760		dev->xfer_shift, (int)dev->xfer_mode);
2761
2762	ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2763		       ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2764	return 0;
2765}
2766
2767/**
2768 *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2769 *	@ap: port on which timings will be programmed
2770 *	@r_failed_dev: out paramter for failed device
2771 *
2772 *	Standard implementation of the function used to tune and set
2773 *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
2774 *	ata_dev_set_mode() fails, pointer to the failing device is
2775 *	returned in @r_failed_dev.
2776 *
2777 *	LOCKING:
2778 *	PCI/etc. bus probe sem.
2779 *
2780 *	RETURNS:
2781 *	0 on success, negative errno otherwise
2782 */
2783
2784int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2785{
2786	struct ata_device *dev;
2787	int i, rc = 0, used_dma = 0, found = 0;
2788
2789
2790	/* step 1: calculate xfer_mask */
2791	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2792		unsigned int pio_mask, dma_mask;
2793
2794		dev = &ap->device[i];
2795
2796		if (!ata_dev_enabled(dev))
2797			continue;
2798
2799		ata_dev_xfermask(dev);
2800
2801		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2802		dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2803		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2804		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2805
2806		found = 1;
2807		if (dev->dma_mode)
2808			used_dma = 1;
2809	}
2810	if (!found)
2811		goto out;
2812
2813	/* step 2: always set host PIO timings */
2814	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2815		dev = &ap->device[i];
2816		if (!ata_dev_enabled(dev))
2817			continue;
2818
2819		if (!dev->pio_mode) {
2820			ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2821			rc = -EINVAL;
2822			goto out;
2823		}
2824
2825		dev->xfer_mode = dev->pio_mode;
2826		dev->xfer_shift = ATA_SHIFT_PIO;
2827		if (ap->ops->set_piomode)
2828			ap->ops->set_piomode(ap, dev);
2829	}
2830
2831	/* step 3: set host DMA timings */
2832	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2833		dev = &ap->device[i];
2834
2835		if (!ata_dev_enabled(dev) || !dev->dma_mode)
2836			continue;
2837
2838		dev->xfer_mode = dev->dma_mode;
2839		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2840		if (ap->ops->set_dmamode)
2841			ap->ops->set_dmamode(ap, dev);
2842	}
2843
2844	/* step 4: update devices' xfer mode */
2845	for (i = 0; i < ATA_MAX_DEVICES; i++) {
2846		dev = &ap->device[i];
2847
2848		/* don't update suspended devices' xfer mode */
2849		if (!ata_dev_enabled(dev))
2850			continue;
2851
2852		rc = ata_dev_set_mode(dev);
2853		if (rc)
2854			goto out;
2855	}
2856
2857	/* Record simplex status. If we selected DMA then the other
2858	 * host channels are not permitted to do so.
2859	 */
2860	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2861		ap->host->simplex_claimed = ap;
2862
2863 out:
2864	if (rc)
2865		*r_failed_dev = dev;
2866	return rc;
2867}
2868
2869/**
2870 *	ata_set_mode - Program timings and issue SET FEATURES - XFER
2871 *	@ap: port on which timings will be programmed
2872 *	@r_failed_dev: out paramter for failed device
2873 *
2874 *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
2875 *	ata_set_mode() fails, pointer to the failing device is
2876 *	returned in @r_failed_dev.
2877 *
2878 *	LOCKING:
2879 *	PCI/etc. bus probe sem.
2880 *
2881 *	RETURNS:
2882 *	0 on success, negative errno otherwise
2883 */
2884int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2885{
2886	/* has private set_mode? */
2887	if (ap->ops->set_mode)
2888		return ap->ops->set_mode(ap, r_failed_dev);
2889	return ata_do_set_mode(ap, r_failed_dev);
2890}
2891
2892/**
2893 *	ata_tf_to_host - issue ATA taskfile to host controller
2894 *	@ap: port to which command is being issued
2895 *	@tf: ATA taskfile register set
2896 *
2897 *	Issues ATA taskfile register set to ATA host controller,
2898 *	with proper synchronization with interrupt handler and
2899 *	other threads.
2900 *
2901 *	LOCKING:
2902 *	spin_lock_irqsave(host lock)
2903 */
2904
2905static inline void ata_tf_to_host(struct ata_port *ap,
2906				  const struct ata_taskfile *tf)
2907{
2908	ap->ops->tf_load(ap, tf);
2909	ap->ops->exec_command(ap, tf);
2910}
2911
2912/**
2913 *	ata_busy_sleep - sleep until BSY clears, or timeout
2914 *	@ap: port containing status register to be polled
2915 *	@tmout_pat: impatience timeout
2916 *	@tmout: overall timeout
2917 *
2918 *	Sleep until ATA Status register bit BSY clears,
2919 *	or a timeout occurs.
2920 *
2921 *	LOCKING:
2922 *	Kernel thread context (may sleep).
2923 *
2924 *	RETURNS:
2925 *	0 on success, -errno otherwise.
2926 */
2927int ata_busy_sleep(struct ata_port *ap,
2928		   unsigned long tmout_pat, unsigned long tmout)
2929{
2930	unsigned long timer_start, timeout;
2931	u8 status;
2932
2933	status = ata_busy_wait(ap, ATA_BUSY, 300);
2934	timer_start = jiffies;
2935	timeout = timer_start + tmout_pat;
2936	while (status != 0xff && (status & ATA_BUSY) &&
2937	       time_before(jiffies, timeout)) {
2938		msleep(50);
2939		status = ata_busy_wait(ap, ATA_BUSY, 3);
2940	}
2941
2942	if (status != 0xff && (status & ATA_BUSY))
2943		ata_port_printk(ap, KERN_WARNING,
2944				"port is slow to respond, please be patient "
2945				"(Status 0x%x)\n", status);
2946
2947	timeout = timer_start + tmout;
2948	while (status != 0xff && (status & ATA_BUSY) &&
2949	       time_before(jiffies, timeout)) {
2950		msleep(50);
2951		status = ata_chk_status(ap);
2952	}
2953
2954	if (status == 0xff)
2955		return -ENODEV;
2956
2957	if (status & ATA_BUSY) {
2958		ata_port_printk(ap, KERN_ERR, "port failed to respond "
2959				"(%lu secs, Status 0x%x)\n",
2960				tmout / HZ, status);
2961		return -EBUSY;
2962	}
2963
2964	return 0;
2965}
2966
2967/**
2968 *	ata_wait_ready - sleep until BSY clears, or timeout
2969 *	@ap: port containing status register to be polled
2970 *	@deadline: deadline jiffies for the operation
2971 *
2972 *	Sleep until ATA Status register bit BSY clears, or timeout
2973 *	occurs.
2974 *
2975 *	LOCKING:
2976 *	Kernel thread context (may sleep).
2977 *
2978 *	RETURNS:
2979 *	0 on success, -errno otherwise.
2980 */
2981int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
2982{
2983	unsigned long start = jiffies;
2984	int warned = 0;
2985
2986	while (1) {
2987		u8 status = ata_chk_status(ap);
2988		unsigned long now = jiffies;
2989
2990		if (!(status & ATA_BUSY))
2991			return 0;
2992		if (!ata_port_online(ap) && status == 0xff)
2993			return -ENODEV;
2994		if (time_after(now, deadline))
2995			return -EBUSY;
2996
2997		if (!warned && time_after(now, start + 5 * HZ) &&
2998		    (deadline - now > 3 * HZ)) {
2999			ata_port_printk(ap, KERN_WARNING,
3000				"port is slow to respond, please be patient "
3001				"(Status 0x%x)\n", status);
3002			warned = 1;
3003		}
3004
3005		msleep(50);
3006	}
3007}
3008
3009static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3010			      unsigned long deadline)
3011{
3012	struct ata_ioports *ioaddr = &ap->ioaddr;
3013	unsigned int dev0 = devmask & (1 << 0);
3014	unsigned int dev1 = devmask & (1 << 1);
3015	int rc, ret = 0;
3016
3017	/* if device 0 was found in ata_devchk, wait for its
3018	 * BSY bit to clear
3019	 */
3020	if (dev0) {
3021		rc = ata_wait_ready(ap, deadline);
3022		if (rc) {
3023			if (rc != -ENODEV)
3024				return rc;
3025			ret = rc;
3026		}
3027	}
3028
3029	/* if device 1 was found in ata_devchk, wait for register
3030	 * access briefly, then wait for BSY to clear.
3031	 */
3032	if (dev1) {
3033		int i;
3034
3035		ap->ops->dev_select(ap, 1);
3036
3037		/* Wait for register access.  Some ATAPI devices fail
3038		 * to set nsect/lbal after reset, so don't waste too
3039		 * much time on it.  We're gonna wait for !BSY anyway.
3040		 */
3041		for (i = 0; i < 2; i++) {
3042			u8 nsect, lbal;
3043
3044			nsect = ioread8(ioaddr->nsect_addr);
3045			lbal = ioread8(ioaddr->lbal_addr);
3046			if ((nsect == 1) && (lbal == 1))
3047				break;
3048			msleep(50);	/* give drive a breather */
3049		}
3050
3051		rc = ata_wait_ready(ap, deadline);
3052		if (rc) {
3053			if (rc != -ENODEV)
3054				return rc;
3055			ret = rc;
3056		}
3057	}
3058
3059	/* is all this really necessary? */
3060	ap->ops->dev_select(ap, 0);
3061	if (dev1)
3062		ap->ops->dev_select(ap, 1);
3063	if (dev0)
3064		ap->ops->dev_select(ap, 0);
3065
3066	return ret;
3067}
3068
3069static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3070			     unsigned long deadline)
3071{
3072	struct ata_ioports *ioaddr = &ap->ioaddr;
3073
3074	DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3075
3076	/* software reset.  causes dev0 to be selected */
3077	iowrite8(ap->ctl, ioaddr->ctl_addr);
3078	udelay(20);
3079	iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3080	udelay(20);
3081	iowrite8(ap->ctl, ioaddr->ctl_addr);
3082
3083	/* spec mandates ">= 2ms" before checking status.
3084	 * We wait 150ms, because that was the magic delay used for
3085	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3086	 * between when the ATA command register is written, and then
3087	 * status is checked.  Because waiting for "a while" before
3088	 * checking status is fine, post SRST, we perform this magic
3089	 * delay here as well.
3090	 *
3091	 * Old drivers/ide uses the 2mS rule and then waits for ready
3092	 */
3093	msleep(150);
3094
3095	/* Before we perform post reset processing we want to see if
3096	 * the bus shows 0xFF because the odd clown forgets the D7
3097	 * pulldown resistor.
3098	 */
3099	if (ata_check_status(ap) == 0xFF)
3100		return -ENODEV;
3101
3102	return ata_bus_post_reset(ap, devmask, deadline);
3103}
3104
3105/**
3106 *	ata_bus_reset - reset host port and associated ATA channel
3107 *	@ap: port to reset
3108 *
3109 *	This is typically the first time we actually start issuing
3110 *	commands to the ATA channel.  We wait for BSY to clear, then
3111 *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3112 *	result.  Determine what devices, if any, are on the channel
3113 *	by looking at the device 0/1 error register.  Look at the signature
3114 *	stored in each device's taskfile registers, to determine if
3115 *	the device is ATA or ATAPI.
3116 *
3117 *	LOCKING:
3118 *	PCI/etc. bus probe sem.
3119 *	Obtains host lock.
3120 *
3121 *	SIDE EFFECTS:
3122 *	Sets ATA_FLAG_DISABLED if bus reset fails.
3123 */
3124
3125void ata_bus_reset(struct ata_port *ap)
3126{
3127	struct ata_ioports *ioaddr = &ap->ioaddr;
3128	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3129	u8 err;
3130	unsigned int dev0, dev1 = 0, devmask = 0;
3131	int rc;
3132
3133	DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3134
3135	/* determine if device 0/1 are present */
3136	if (ap->flags & ATA_FLAG_SATA_RESET)
3137		dev0 = 1;
3138	else {
3139		dev0 = ata_devchk(ap, 0);
3140		if (slave_possible)
3141			dev1 = ata_devchk(ap, 1);
3142	}
3143
3144	if (dev0)
3145		devmask |= (1 << 0);
3146	if (dev1)
3147		devmask |= (1 << 1);
3148
3149	/* select device 0 again */
3150	ap->ops->dev_select(ap, 0);
3151
3152	/* issue bus reset */
3153	if (ap->flags & ATA_FLAG_SRST) {
3154		rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3155		if (rc && rc != -ENODEV)
3156			goto err_out;
3157	}
3158
3159	/*
3160	 * determine by signature whether we have ATA or ATAPI devices
3161	 */
3162	ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
3163	if ((slave_possible) && (err != 0x81))
3164		ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
3165
3166	/* re-enable interrupts */
3167	ap->ops->irq_on(ap);
3168
3169	/* is double-select really necessary? */
3170	if (ap->device[1].class != ATA_DEV_NONE)
3171		ap->ops->dev_select(ap, 1);
3172	if (ap->device[0].class != ATA_DEV_NONE)
3173		ap->ops->dev_select(ap, 0);
3174
3175	/* if no devices were detected, disable this port */
3176	if ((ap->device[0].class == ATA_DEV_NONE) &&
3177	    (ap->device[1].class == ATA_DEV_NONE))
3178		goto err_out;
3179
3180	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3181		/* set up device control for ATA_FLAG_SATA_RESET */
3182		iowrite8(ap->ctl, ioaddr->ctl_addr);
3183	}
3184
3185	DPRINTK("EXIT\n");
3186	return;
3187
3188err_out:
3189	ata_port_printk(ap, KERN_ERR, "disabling port\n");
3190	ap->ops->port_disable(ap);
3191
3192	DPRINTK("EXIT\n");
3193}
3194
3195/**
3196 *	sata_phy_debounce - debounce SATA phy status
3197 *	@ap: ATA port to debounce SATA phy status for
3198 *	@params: timing parameters { interval, duratinon, timeout } in msec
3199 *	@deadline: deadline jiffies for the operation
3200 *
3201 *	Make sure SStatus of @ap reaches stable state, determined by
3202 *	holding the same value where DET is not 1 for @duration polled
3203 *	every @interval, before @timeout.  Timeout constraints the
3204 *	beginning of the stable state.  Because DET gets stuck at 1 on
3205 *	some controllers after hot unplugging, this functions waits
3206 *	until timeout then returns 0 if DET is stable at 1.
3207 *
3208 *	@timeout is further limited by @deadline.  The sooner of the
3209 *	two is used.
3210 *
3211 *	LOCKING:
3212 *	Kernel thread context (may sleep)
3213 *
3214 *	RETURNS:
3215 *	0 on success, -errno on failure.
3216 */
3217int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3218		      unsigned long deadline)
3219{
3220	unsigned long interval_msec = params[0];
3221	unsigned long duration = msecs_to_jiffies(params[1]);
3222	unsigned long last_jiffies, t;
3223	u32 last, cur;
3224	int rc;
3225
3226	t = jiffies + msecs_to_jiffies(params[2]);
3227	if (time_before(t, deadline))
3228		deadline = t;
3229
3230	if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3231		return rc;
3232	cur &= 0xf;
3233
3234	last = cur;
3235	last_jiffies = jiffies;
3236
3237	while (1) {
3238		msleep(interval_msec);
3239		if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3240			return rc;
3241		cur &= 0xf;
3242
3243		/* DET stable? */
3244		if (cur == last) {
3245			if (cur == 1 && time_before(jiffies, deadline))
3246				continue;
3247			if (time_after(jiffies, last_jiffies + duration))
3248				return 0;
3249			continue;
3250		}
3251
3252		/* unstable, start over */
3253		last = cur;
3254		last_jiffies = jiffies;
3255
3256		/* check deadline */
3257		if (time_after(jiffies, deadline))
3258			return -EBUSY;
3259	}
3260}
3261
3262/**
3263 *	sata_phy_resume - resume SATA phy
3264 *	@ap: ATA port to resume SATA phy for
3265 *	@params: timing parameters { interval, duratinon, timeout } in msec
3266 *	@deadline: deadline jiffies for the operation
3267 *
3268 *	Resume SATA phy of @ap and debounce it.
3269 *
3270 *	LOCKING:
3271 *	Kernel thread context (may sleep)
3272 *
3273 *	RETURNS:
3274 *	0 on success, -errno on failure.
3275 */
3276int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3277		    unsigned long deadline)
3278{
3279	u32 scontrol;
3280	int rc;
3281
3282	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3283		return rc;
3284
3285	scontrol = (scontrol & 0x0f0) | 0x300;
3286
3287	if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3288		return rc;
3289
3290	/* Some PHYs react badly if SStatus is pounded immediately
3291	 * after resuming.  Delay 200ms before debouncing.
3292	 */
3293	msleep(200);
3294
3295	return sata_phy_debounce(ap, params, deadline);
3296}
3297
3298/**
3299 *	ata_std_prereset - prepare for reset
3300 *	@ap: ATA port to be reset
3301 *	@deadline: deadline jiffies for the operation
3302 *
3303 *	@ap is about to be reset.  Initialize it.  Failure from
3304 *	prereset makes libata abort whole reset sequence and give up
3305 *	that port, so prereset should be best-effort.  It does its
3306 *	best to prepare for reset sequence but if things go wrong, it
3307 *	should just whine, not fail.
3308 *
3309 *	LOCKING:
3310 *	Kernel thread context (may sleep)
3311 *
3312 *	RETURNS:
3313 *	0 on success, -errno otherwise.
3314 */
3315int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
3316{
3317	struct ata_eh_context *ehc = &ap->eh_context;
3318	const unsigned long *timing = sata_ehc_deb_timing(ehc);
3319	int rc;
3320
3321	/* handle link resume */
3322	if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3323	    (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3324		ehc->i.action |= ATA_EH_HARDRESET;
3325
3326	/* if we're about to do hardreset, nothing more to do */
3327	if (ehc->i.action & ATA_EH_HARDRESET)
3328		return 0;
3329
3330	/* if SATA, resume phy */
3331	if (ap->cbl == ATA_CBL_SATA) {
3332		rc = sata_phy_resume(ap, timing, deadline);
3333		/* whine about phy resume failure but proceed */
3334		if (rc && rc != -EOPNOTSUPP)
3335			ata_port_printk(ap, KERN_WARNING, "failed to resume "
3336					"link for reset (errno=%d)\n", rc);
3337	}
3338
3339	/* Wait for !BSY if the controller can wait for the first D2H
3340	 * Reg FIS and we don't know that no device is attached.
3341	 */
3342	if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3343		rc = ata_wait_ready(ap, deadline);
3344		if (rc && rc != -ENODEV) {
3345			ata_port_printk(ap, KERN_WARNING, "device not ready "
3346					"(errno=%d), forcing hardreset\n", rc);
3347			ehc->i.action |= ATA_EH_HARDRESET;
3348		}
3349	}
3350
3351	return 0;
3352}
3353
3354/**
3355 *	ata_std_softreset - reset host port via ATA SRST
3356 *	@ap: port to reset
3357 *	@classes: resulting classes of attached devices
3358 *	@deadline: deadline jiffies for the operation
3359 *
3360 *	Reset host port using ATA SRST.
3361 *
3362 *	LOCKING:
3363 *	Kernel thread context (may sleep)
3364 *
3365 *	RETURNS:
3366 *	0 on success, -errno otherwise.
3367 */
3368int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3369		      unsigned long deadline)
3370{
3371	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3372	unsigned int devmask = 0;
3373	int rc;
3374	u8 err;
3375
3376	DPRINTK("ENTER\n");
3377
3378	if (ata_port_offline(ap)) {
3379		classes[0] = ATA_DEV_NONE;
3380		goto out;
3381	}
3382
3383	/* determine if device 0/1 are present */
3384	if (ata_devchk(ap, 0))
3385		devmask |= (1 << 0);
3386	if (slave_possible && ata_devchk(ap, 1))
3387		devmask |= (1 << 1);
3388
3389	/* select device 0 again */
3390	ap->ops->dev_select(ap, 0);
3391
3392	/* issue bus reset */
3393	DPRINTK("about to softreset, devmask=%x\n", devmask);
3394	rc = ata_bus_softreset(ap, devmask, deadline);
3395	/* if link is occupied, -ENODEV too is an error */
3396	if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
3397		ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3398		return rc;
3399	}
3400
3401	/* determine by signature whether we have ATA or ATAPI devices */
3402	classes[0] = ata_dev_try_classify(ap, 0, &err);
3403	if (slave_possible && err != 0x81)
3404		classes[1] = ata_dev_try_classify(ap, 1, &err);
3405
3406 out:
3407	DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3408	return 0;
3409}
3410
3411/**
3412 *	sata_port_hardreset - reset port via SATA phy reset
3413 *	@ap: port to reset
3414 *	@timing: timing parameters { interval, duratinon, timeout } in msec
3415 *	@deadline: deadline jiffies for the operation
3416 *
3417 *	SATA phy-reset host port using DET bits of SControl register.
3418 *
3419 *	LOCKING:
3420 *	Kernel thread context (may sleep)
3421 *
3422 *	RETURNS:
3423 *	0 on success, -errno otherwise.
3424 */
3425int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3426			unsigned long deadline)
3427{
3428	u32 scontrol;
3429	int rc;
3430
3431	DPRINTK("ENTER\n");
3432
3433	if (sata_set_spd_needed(ap)) {
3434		/* SATA spec says nothing about how to reconfigure
3435		 * spd.  To be on the safe side, turn off phy during
3436		 * reconfiguration.  This works for at least ICH7 AHCI
3437		 * and Sil3124.
3438		 */
3439		if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3440			goto out;
3441
3442		scontrol = (scontrol & 0x0f0) | 0x304;
3443
3444		if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3445			goto out;
3446
3447		sata_set_spd(ap);
3448	}
3449
3450	/* issue phy wake/reset */
3451	if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3452		goto out;
3453
3454	scontrol = (scontrol & 0x0f0) | 0x301;
3455
3456	if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
3457		goto out;
3458
3459	/* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3460	 * 10.4.2 says at least 1 ms.
3461	 */
3462	msleep(1);
3463
3464	/* bring phy back */
3465	rc = sata_phy_resume(ap, timing, deadline);
3466 out:
3467	DPRINTK("EXIT, rc=%d\n", rc);
3468	return rc;
3469}
3470
3471/**
3472 *	sata_std_hardreset - reset host port via SATA phy reset
3473 *	@ap: port to reset
3474 *	@class: resulting class of attached device
3475 *	@deadline: deadline jiffies for the operation
3476 *
3477 *	SATA phy-reset host port using DET bits of SControl register,
3478 *	wait for !BSY and classify the attached device.
3479 *
3480 *	LOCKING:
3481 *	Kernel thread context (may sleep)
3482 *
3483 *	RETURNS:
3484 *	0 on success, -errno otherwise.
3485 */
3486int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3487		       unsigned long deadline)
3488{
3489	const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3490	int rc;
3491
3492	DPRINTK("ENTER\n");
3493
3494	/* do hardreset */
3495	rc = sata_port_hardreset(ap, timing, deadline);
3496	if (rc) {
3497		ata_port_printk(ap, KERN_ERR,
3498				"COMRESET failed (errno=%d)\n", rc);
3499		return rc;
3500	}
3501
3502	/* TODO: phy layer with polling, timeouts, etc. */
3503	if (ata_port_offline(ap)) {
3504		*class = ATA_DEV_NONE;
3505		DPRINTK("EXIT, link offline\n");
3506		return 0;
3507	}
3508
3509	/* wait a while before checking status, see SRST for more info */
3510	msleep(150);
3511
3512	rc = ata_wait_ready(ap, deadline);
3513	/* link occupied, -ENODEV too is an error */
3514	if (rc) {
3515		ata_port_printk(ap, KERN_ERR,
3516				"COMRESET failed (errno=%d)\n", rc);
3517		return rc;
3518	}
3519
3520	ap->ops->dev_select(ap, 0);	/* probably unnecessary */
3521
3522	*class = ata_dev_try_classify(ap, 0, NULL);
3523
3524	DPRINTK("EXIT, class=%u\n", *class);
3525	return 0;
3526}
3527
3528/**
3529 *	ata_std_postreset - standard postreset callback
3530 *	@ap: the target ata_port
3531 *	@classes: classes of attached devices
3532 *
3533 *	This function is invoked after a successful reset.  Note that
3534 *	the device might have been reset more than once using
3535 *	different reset methods before postreset is invoked.
3536 *
3537 *	LOCKING:
3538 *	Kernel thread context (may sleep)
3539 */
3540void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3541{
3542	u32 serror;
3543
3544	DPRINTK("ENTER\n");
3545
3546	/* print link status */
3547	sata_print_link_status(ap);
3548
3549	/* clear SError */
3550	if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3551		sata_scr_write(ap, SCR_ERROR, serror);
3552
3553	/* re-enable interrupts */
3554	if (!ap->ops->error_handler)
3555		ap->ops->irq_on(ap);
3556
3557	/* is double-select really necessary? */
3558	if (classes[0] != ATA_DEV_NONE)
3559		ap->ops->dev_select(ap, 1);
3560	if (classes[1] != ATA_DEV_NONE)
3561		ap->ops->dev_select(ap, 0);
3562
3563	/* bail out if no device is present */
3564	if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3565		DPRINTK("EXIT, no device\n");
3566		return;
3567	}
3568
3569	/* set up device control */
3570	if (ap->ioaddr.ctl_addr)
3571		iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3572
3573	DPRINTK("EXIT\n");
3574}
3575
3576/**
3577 *	ata_dev_same_device - Determine whether new ID matches configured device
3578 *	@dev: device to compare against
3579 *	@new_class: class of the new device
3580 *	@new_id: IDENTIFY page of the new device
3581 *
3582 *	Compare @new_class and @new_id against @dev and determine
3583 *	whether @dev is the device indicated by @new_class and
3584 *	@new_id.
3585 *
3586 *	LOCKING:
3587 *	None.
3588 *
3589 *	RETURNS:
3590 *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3591 */
3592static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3593			       const u16 *new_id)
3594{
3595	const u16 *old_id = dev->id;
3596	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3597	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3598
3599	if (dev->class != new_class) {
3600		ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3601			       dev->class, new_class);
3602		return 0;
3603	}
3604
3605	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3606	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3607	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3608	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3609
3610	if (strcmp(model[0], model[1])) {
3611		ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3612			       "'%s' != '%s'\n", model[0], model[1]);
3613		return 0;
3614	}
3615
3616	if (strcmp(serial[0], serial[1])) {
3617		ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3618			       "'%s' != '%s'\n", serial[0], serial[1]);
3619		return 0;
3620	}
3621
3622	return 1;
3623}
3624
3625/**
3626 *	ata_dev_reread_id - Re-read IDENTIFY data
3627 *	@dev: target ATA device
3628 *	@readid_flags: read ID flags
3629 *
3630 *	Re-read IDENTIFY page and make sure @dev is still attached to
3631 *	the port.
3632 *
3633 *	LOCKING:
3634 *	Kernel thread context (may sleep)
3635 *
3636 *	RETURNS:
3637 *	0 on success, negative errno otherwise
3638 */
3639int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3640{
3641	unsigned int class = dev->class;
3642	u16 *id = (void *)dev->ap->sector_buf;
3643	int rc;
3644
3645	/* read ID data */
3646	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3647	if (rc)
3648		return rc;
3649
3650	/* is the device still there? */
3651	if (!ata_dev_same_device(dev, class, id))
3652		return -ENODEV;
3653
3654	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3655	return 0;
3656}
3657
3658/**
3659 *	ata_dev_revalidate - Revalidate ATA device
3660 *	@dev: device to revalidate
3661 *	@readid_flags: read ID flags
3662 *
3663 *	Re-read IDENTIFY page, make sure @dev is still attached to the
3664 *	port and reconfigure it according to the new IDENTIFY page.
3665 *
3666 *	LOCKING:
3667 *	Kernel thread context (may sleep)
3668 *
3669 *	RETURNS:
3670 *	0 on success, negative errno otherwise
3671 */
3672int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3673{
3674	u64 n_sectors = dev->n_sectors;
3675	int rc;
3676
3677	if (!ata_dev_enabled(dev))
3678		return -ENODEV;
3679
3680	/* re-read ID */
3681	rc = ata_dev_reread_id(dev, readid_flags);
3682	if (rc)
3683		goto fail;
3684
3685	/* configure device according to the new ID */
3686	rc = ata_dev_configure(dev);
3687	if (rc)
3688		goto fail;
3689
3690	/* verify n_sectors hasn't changed */
3691	if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
3692		ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3693			       "%llu != %llu\n",
3694			       (unsigned long long)n_sectors,
3695			       (unsigned long long)dev->n_sectors);
3696		rc = -ENODEV;
3697		goto fail;
3698	}
3699
3700	return 0;
3701
3702 fail:
3703	ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3704	return rc;
3705}
3706
3707struct ata_blacklist_entry {
3708	const char *model_num;
3709	const char *model_rev;
3710	unsigned long horkage;
3711};
3712
3713static const struct ata_blacklist_entry ata_device_blacklist [] = {
3714	/* Devices with DMA related problems under Linux */
3715	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
3716	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
3717	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
3718	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
3719	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
3720	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
3721	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
3722	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
3723	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
3724	{ "CRD-8480B",		NULL,		ATA_HORKAGE_NODMA },
3725	{ "CRD-8482B",		NULL,		ATA_HORKAGE_NODMA },
3726	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
3727	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
3728	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
3729	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
3730	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
3731	{ "HITACHI CDR-8335",	NULL,		ATA_HORKAGE_NODMA },
3732	{ "HITACHI CDR-8435",	NULL,		ATA_HORKAGE_NODMA },
3733	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
3734	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
3735	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
3736	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
3737	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
3738	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
3739	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
3740	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
3741	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3742	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
3743	{ "SAMSUNG CD-ROM SN-124","N001",	ATA_HORKAGE_NODMA },
3744	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
3745	{ "IOMEGA  ZIP 250       ATAPI", NULL,	ATA_HORKAGE_NODMA }, /* temporary fix */
3746
3747	/* Weird ATAPI devices */
3748	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
3749
3750	/* Devices we expect to fail diagnostics */
3751
3752	/* Devices where NCQ should be avoided */
3753	/* NCQ is slow */
3754        { "WDC WD740ADFD-00",   NULL,		ATA_HORKAGE_NONCQ },
3755	/* http://thread.gmane.org/gmane.linux.ide/14907 */
3756	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
3757	/* NCQ is broken */
3758	{ "Maxtor 6L250S0",     "BANC1G10",     ATA_HORKAGE_NONCQ },
3759	{ "Maxtor 6B200M0",	"BANC1B10",	ATA_HORKAGE_NONCQ },
3760	/* NCQ hard hangs device under heavier load, needs hard power cycle */
3761	{ "Maxtor 6B250S0",	"BANC1B70",	ATA_HORKAGE_NONCQ },
3762	/* Blacklist entries taken from Silicon Image 3124/3132
3763	   Windows driver .inf file - also several Linux problem reports */
3764	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ, },
3765	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ, },
3766	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ, },
3767	/* Drives which do spurious command completion */
3768	{ "HTS541680J9SA00",	"SB2IC7EP",	ATA_HORKAGE_NONCQ, },
3769	{ "HTS541612J9SA00",	"SBDIC7JP",	ATA_HORKAGE_NONCQ, },
3770	{ "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
3771	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ, },
3772
3773	/* Devices with NCQ limits */
3774
3775	/* End Marker */
3776	{ }
3777};
3778
3779unsigned long ata_device_blacklisted(const struct ata_device *dev)
3780{
3781	unsigned char model_num[ATA_ID_PROD_LEN + 1];
3782	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3783	const struct ata_blacklist_entry *ad = ata_device_blacklist;
3784
3785	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3786	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3787
3788	while (ad->model_num) {
3789		if (!strcmp(ad->model_num, model_num)) {
3790			if (ad->model_rev == NULL)
3791				return ad->horkage;
3792			if (!strcmp(ad->model_rev, model_rev))
3793				return ad->horkage;
3794		}
3795		ad++;
3796	}
3797	return 0;
3798}
3799
3800static int ata_dma_blacklisted(const struct ata_device *dev)
3801{
3802	/* We don't support polling DMA.
3803	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3804	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3805	 */
3806	if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3807	    (dev->flags & ATA_DFLAG_CDB_INTR))
3808		return 1;
3809	return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3810}
3811
3812/**
3813 *	ata_dev_xfermask - Compute supported xfermask of the given device
3814 *	@dev: Device to compute xfermask for
3815 *
3816 *	Compute supported xfermask of @dev and store it in
3817 *	dev->*_mask.  This function is responsible for applying all
3818 *	known limits including host controller limits, device
3819 *	blacklist, etc...
3820 *
3821 *	LOCKING:
3822 *	None.
3823 */
3824static void ata_dev_xfermask(struct ata_device *dev)
3825{
3826	struct ata_port *ap = dev->ap;
3827	struct ata_host *host = ap->host;
3828	unsigned long xfer_mask;
3829
3830	/* controller modes available */
3831	xfer_mask = ata_pack_xfermask(ap->pio_mask,
3832				      ap->mwdma_mask, ap->udma_mask);
3833
3834	/* drive modes available */
3835	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3836				       dev->mwdma_mask, dev->udma_mask);
3837	xfer_mask &= ata_id_xfermask(dev->id);
3838
3839	/*
3840	 *	CFA Advanced TrueIDE timings are not allowed on a shared
3841	 *	cable
3842	 */
3843	if (ata_dev_pair(dev)) {
3844		/* No PIO5 or PIO6 */
3845		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3846		/* No MWDMA3 or MWDMA 4 */
3847		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3848	}
3849
3850	if (ata_dma_blacklisted(dev)) {
3851		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3852		ata_dev_printk(dev, KERN_WARNING,
3853			       "device is on DMA blacklist, disabling DMA\n");
3854	}
3855
3856	if ((host->flags & ATA_HOST_SIMPLEX) &&
3857            host->simplex_claimed && host->simplex_claimed != ap) {
3858		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3859		ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3860			       "other device, disabling DMA\n");
3861	}
3862
3863	if (ap->flags & ATA_FLAG_NO_IORDY)
3864		xfer_mask &= ata_pio_mask_no_iordy(dev);
3865
3866	if (ap->ops->mode_filter)
3867		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3868
3869	/* Apply cable rule here.  Don't apply it early because when
3870	 * we handle hot plug the cable type can itself change.
3871	 * Check this last so that we know if the transfer rate was
3872	 * solely limited by the cable.
3873	 * Unknown or 80 wire cables reported host side are checked
3874	 * drive side as well. Cases where we know a 40wire cable
3875	 * is used safely for 80 are not checked here.
3876	 */
3877	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3878		/* UDMA/44 or higher would be available */
3879		if((ap->cbl == ATA_CBL_PATA40) ||
3880   		    (ata_drive_40wire(dev->id) &&
3881		     (ap->cbl == ATA_CBL_PATA_UNK ||
3882                     ap->cbl == ATA_CBL_PATA80))) {
3883		      	ata_dev_printk(dev, KERN_WARNING,
3884				 "limited to UDMA/33 due to 40-wire cable\n");
3885			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3886		}
3887
3888	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3889			    &dev->mwdma_mask, &dev->udma_mask);
3890}
3891
3892/**
3893 *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3894 *	@dev: Device to which command will be sent
3895 *
3896 *	Issue SET FEATURES - XFER MODE command to device @dev
3897 *	on port @ap.
3898 *
3899 *	LOCKING:
3900 *	PCI/etc. bus probe sem.
3901 *
3902 *	RETURNS:
3903 *	0 on success, AC_ERR_* mask otherwise.
3904 */
3905
3906static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3907{
3908	struct ata_taskfile tf;
3909	unsigned int err_mask;
3910
3911	/* set up set-features taskfile */
3912	DPRINTK("set features - xfer mode\n");
3913
3914	/* Some controllers and ATAPI devices show flaky interrupt
3915	 * behavior after setting xfer mode.  Use polling instead.
3916	 */
3917	ata_tf_init(dev, &tf);
3918	tf.command = ATA_CMD_SET_FEATURES;
3919	tf.feature = SETFEATURES_XFER;
3920	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
3921	tf.protocol = ATA_PROT_NODATA;
3922	tf.nsect = dev->xfer_mode;
3923
3924	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3925
3926	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3927	return err_mask;
3928}
3929
3930/**
3931 *	ata_dev_init_params - Issue INIT DEV PARAMS command
3932 *	@dev: Device to which command will be sent
3933 *	@heads: Number of heads (taskfile parameter)
3934 *	@sectors: Number of sectors (taskfile parameter)
3935 *
3936 *	LOCKING:
3937 *	Kernel thread context (may sleep)
3938 *
3939 *	RETURNS:
3940 *	0 on success, AC_ERR_* mask otherwise.
3941 */
3942static unsigned int ata_dev_init_params(struct ata_device *dev,
3943					u16 heads, u16 sectors)
3944{
3945	struct ata_taskfile tf;
3946	unsigned int err_mask;
3947
3948	/* Number of sectors per track 1-255. Number of heads 1-16 */
3949	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3950		return AC_ERR_INVALID;
3951
3952	/* set up init dev params taskfile */
3953	DPRINTK("init dev params \n");
3954
3955	ata_tf_init(dev, &tf);
3956	tf.command = ATA_CMD_INIT_DEV_PARAMS;
3957	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3958	tf.protocol = ATA_PROT_NODATA;
3959	tf.nsect = sectors;
3960	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3961
3962	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3963
3964	DPRINTK("EXIT, err_mask=%x\n", err_mask);
3965	return err_mask;
3966}
3967
3968/**
3969 *	ata_sg_clean - Unmap DMA memory associated with command
3970 *	@qc: Command containing DMA memory to be released
3971 *
3972 *	Unmap all mapped DMA memory associated with this command.
3973 *
3974 *	LOCKING:
3975 *	spin_lock_irqsave(host lock)
3976 */
3977void ata_sg_clean(struct ata_queued_cmd *qc)
3978{
3979	struct ata_port *ap = qc->ap;
3980	struct scatterlist *sg = qc->__sg;
3981	int dir = qc->dma_dir;
3982	void *pad_buf = NULL;
3983
3984	WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3985	WARN_ON(sg == NULL);
3986
3987	if (qc->flags & ATA_QCFLAG_SINGLE)
3988		WARN_ON(qc->n_elem > 1);
3989
3990	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3991
3992	/* if we padded the buffer out to 32-bit bound, and data
3993	 * xfer direction is from-device, we must copy from the
3994	 * pad buffer back into the supplied buffer
3995	 */
3996	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3997		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3998
3999	if (qc->flags & ATA_QCFLAG_SG) {
4000		if (qc->n_elem)
4001			dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4002		/* restore last sg */
4003		sg[qc->orig_n_elem - 1].length += qc->pad_len;
4004		if (pad_buf) {
4005			struct scatterlist *psg = &qc->pad_sgent;
4006			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4007			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4008			kunmap_atomic(addr, KM_IRQ0);
4009		}
4010	} else {
4011		if (qc->n_elem)
4012			dma_unmap_single(ap->dev,
4013				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4014				dir);
4015		/* restore sg */
4016		sg->length += qc->pad_len;
4017		if (pad_buf)
4018			memcpy(qc->buf_virt + sg->length - qc->pad_len,
4019			       pad_buf, qc->pad_len);
4020	}
4021
4022	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4023	qc->__sg = NULL;
4024}
4025
4026/**
4027 *	ata_fill_sg - Fill PCI IDE PRD table
4028 *	@qc: Metadata associated with taskfile to be transferred
4029 *
4030 *	Fill PCI IDE PRD (scatter-gather) table with segments
4031 *	associated with the current disk command.
4032 *
4033 *	LOCKING:
4034 *	spin_lock_irqsave(host lock)
4035 *
4036 */
4037static void ata_fill_sg(struct ata_queued_cmd *qc)
4038{
4039	struct ata_port *ap = qc->ap;
4040	struct scatterlist *sg;
4041	unsigned int idx;
4042
4043	WARN_ON(qc->__sg == NULL);
4044	WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4045
4046	idx = 0;
4047	ata_for_each_sg(sg, qc) {
4048		u32 addr, offset;
4049		u32 sg_len, len;
4050
4051		/* determine if physical DMA addr spans 64K boundary.
4052		 * Note h/w doesn't support 64-bit, so we unconditionally
4053		 * truncate dma_addr_t to u32.
4054		 */
4055		addr = (u32) sg_dma_address(sg);
4056		sg_len = sg_dma_len(sg);
4057
4058		while (sg_len) {
4059			offset = addr & 0xffff;
4060			len = sg_len;
4061			if ((offset + sg_len) > 0x10000)
4062				len = 0x10000 - offset;
4063
4064			ap->prd[idx].addr = cpu_to_le32(addr);
4065			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4066			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4067
4068			idx++;
4069			sg_len -= len;
4070			addr += len;
4071		}
4072	}
4073
4074	if (idx)
4075		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4076}
4077
4078/**
4079 *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4080 *	@qc: Metadata associated with taskfile to check
4081 *
4082 *	Allow low-level driver to filter ATA PACKET commands, returning
4083 *	a status indicating whether or not it is OK to use DMA for the
4084 *	supplied PACKET command.
4085 *
4086 *	LOCKING:
4087 *	spin_lock_irqsave(host lock)
4088 *
4089 *	RETURNS: 0 when ATAPI DMA can be used
4090 *               nonzero otherwise
4091 */
4092int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4093{
4094	struct ata_port *ap = qc->ap;
4095
4096	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4097	 * few ATAPI devices choke on such DMA requests.
4098	 */
4099	if (unlikely(qc->nbytes & 15))
4100		return 1;
4101
4102	if (ap->ops->check_atapi_dma)
4103		return ap->ops->check_atapi_dma(qc);
4104
4105	return 0;
4106}
4107
4108/**
4109 *	ata_qc_prep - Prepare taskfile for submission
4110 *	@qc: Metadata associated with taskfile to be prepared
4111 *
4112 *	Prepare ATA taskfile for submission.
4113 *
4114 *	LOCKING:
4115 *	spin_lock_irqsave(host lock)
4116 */
4117void ata_qc_prep(struct ata_queued_cmd *qc)
4118{
4119	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4120		return;
4121
4122	ata_fill_sg(qc);
4123}
4124
4125void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4126
4127/**
4128 *	ata_sg_init_one - Associate command with memory buffer
4129 *	@qc: Command to be associated
4130 *	@buf: Memory buffer
4131 *	@buflen: Length of memory buffer, in bytes.
4132 *
4133 *	Initialize the data-related elements of queued_cmd @qc
4134 *	to point to a single memory buffer, @buf of byte length @buflen.
4135 *
4136 *	LOCKING:
4137 *	spin_lock_irqsave(host lock)
4138 */
4139
4140void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4141{
4142	qc->flags |= ATA_QCFLAG_SINGLE;
4143
4144	qc->__sg = &qc->sgent;
4145	qc->n_elem = 1;
4146	qc->orig_n_elem = 1;
4147	qc->buf_virt = buf;
4148	qc->nbytes = buflen;
4149
4150	sg_init_one(&qc->sgent, buf, buflen);
4151}
4152
4153/**
4154 *	ata_sg_init - Associate command with scatter-gather table.
4155 *	@qc: Command to be associated
4156 *	@sg: Scatter-gather table.
4157 *	@n_elem: Number of elements in s/g table.
4158 *
4159 *	Initialize the data-related elements of queued_cmd @qc
4160 *	to point to a scatter-gather table @sg, containing @n_elem
4161 *	elements.
4162 *
4163 *	LOCKING:
4164 *	spin_lock_irqsave(host lock)
4165 */
4166
4167void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4168		 unsigned int n_elem)
4169{
4170	qc->flags |= ATA_QCFLAG_SG;
4171	qc->__sg = sg;
4172	qc->n_elem = n_elem;
4173	qc->orig_n_elem = n_elem;
4174}
4175
4176/**
4177 *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4178 *	@qc: Command with memory buffer to be mapped.
4179 *
4180 *	DMA-map the memory buffer associated with queued_cmd @qc.
4181 *
4182 *	LOCKING:
4183 *	spin_lock_irqsave(host lock)
4184 *
4185 *	RETURNS:
4186 *	Zero on success, negative on error.
4187 */
4188
4189static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4190{
4191	struct ata_port *ap = qc->ap;
4192	int dir = qc->dma_dir;
4193	struct scatterlist *sg = qc->__sg;
4194	dma_addr_t dma_address;
4195	int trim_sg = 0;
4196
4197	/* we must lengthen transfers to end on a 32-bit boundary */
4198	qc->pad_len = sg->length & 3;
4199	if (qc->pad_len) {
4200		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4201		struct scatterlist *psg = &qc->pad_sgent;
4202
4203		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4204
4205		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4206
4207		if (qc->tf.flags & ATA_TFLAG_WRITE)
4208			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4209			       qc->pad_len);
4210
4211		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4212		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4213		/* trim sg */
4214		sg->length -= qc->pad_len;
4215		if (sg->length == 0)
4216			trim_sg = 1;
4217
4218		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4219			sg->length, qc->pad_len);
4220	}
4221
4222	if (trim_sg) {
4223		qc->n_elem--;
4224		goto skip_map;
4225	}
4226
4227	dma_address = dma_map_single(ap->dev, qc->buf_virt,
4228				     sg->length, dir);
4229	if (dma_mapping_error(dma_address)) {
4230		/* restore sg */
4231		sg->length += qc->pad_len;
4232		return -1;
4233	}
4234
4235	sg_dma_address(sg) = dma_address;
4236	sg_dma_len(sg) = sg->length;
4237
4238skip_map:
4239	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4240		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4241
4242	return 0;
4243}
4244
4245/**
4246 *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4247 *	@qc: Command with scatter-gather table to be mapped.
4248 *
4249 *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4250 *
4251 *	LOCKING:
4252 *	spin_lock_irqsave(host lock)
4253 *
4254 *	RETURNS:
4255 *	Zero on success, negative on error.
4256 *
4257 */
4258
4259static int ata_sg_setup(struct ata_queued_cmd *qc)
4260{
4261	struct ata_port *ap = qc->ap;
4262	struct scatterlist *sg = qc->__sg;
4263	struct scatterlist *lsg = &sg[qc->n_elem - 1];
4264	int n_elem, pre_n_elem, dir, trim_sg = 0;
4265
4266	VPRINTK("ENTER, ata%u\n", ap->print_id);
4267	WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4268
4269	/* we must lengthen transfers to end on a 32-bit boundary */
4270	qc->pad_len = lsg->length & 3;
4271	if (qc->pad_len) {
4272		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4273		struct scatterlist *psg = &qc->pad_sgent;
4274		unsigned int offset;
4275
4276		WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4277
4278		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4279
4280		/*
4281		 * psg->page/offset are used to copy to-be-written
4282		 * data in this function or read data in ata_sg_clean.
4283		 */
4284		offset = lsg->offset + lsg->length - qc->pad_len;
4285		psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4286		psg->offset = offset_in_page(offset);
4287
4288		if (qc->tf.flags & ATA_TFLAG_WRITE) {
4289			void *addr = kmap_atomic(psg->page, KM_IRQ0);
4290			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4291			kunmap_atomic(addr, KM_IRQ0);
4292		}
4293
4294		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4295		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4296		/* trim last sg */
4297		lsg->length -= qc->pad_len;
4298		if (lsg->length == 0)
4299			trim_sg = 1;
4300
4301		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4302			qc->n_elem - 1, lsg->length, qc->pad_len);
4303	}
4304
4305	pre_n_elem = qc->n_elem;
4306	if (trim_sg && pre_n_elem)
4307		pre_n_elem--;
4308
4309	if (!pre_n_elem) {
4310		n_elem = 0;
4311		goto skip_map;
4312	}
4313
4314	dir = qc->dma_dir;
4315	n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4316	if (n_elem < 1) {
4317		/* restore last sg */
4318		lsg->length += qc->pad_len;
4319		return -1;
4320	}
4321
4322	DPRINTK("%d sg elements mapped\n", n_elem);
4323
4324skip_map:
4325	qc->n_elem = n_elem;
4326
4327	return 0;
4328}
4329
4330/**
4331 *	swap_buf_le16 - swap halves of 16-bit words in place
4332 *	@buf:  Buffer to swap
4333 *	@buf_words:  Number of 16-bit words in buffer.
4334 *
4335 *	Swap halves of 16-bit words if needed to convert from
4336 *	little-endian byte order to native cpu byte order, or
4337 *	vice-versa.
4338 *
4339 *	LOCKING:
4340 *	Inherited from caller.
4341 */
4342void swap_buf_le16(u16 *buf, unsigned int buf_words)
4343{
4344#ifdef __BIG_ENDIAN
4345	unsigned int i;
4346
4347	for (i = 0; i < buf_words; i++)
4348		buf[i] = le16_to_cpu(buf[i]);
4349#endif /* __BIG_ENDIAN */
4350}
4351
4352/**
4353 *	ata_data_xfer - Transfer data by PIO
4354 *	@adev: device to target
4355 *	@buf: data buffer
4356 *	@buflen: buffer length
4357 *	@write_data: read/write
4358 *
4359 *	Transfer data from/to the device data register by PIO.
4360 *
4361 *	LOCKING:
4362 *	Inherited from caller.
4363 */
4364void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4365		   unsigned int buflen, int write_data)
4366{
4367	struct ata_port *ap = adev->ap;
4368	unsigned int words = buflen >> 1;
4369
4370	/* Transfer multiple of 2 bytes */
4371	if (write_data)
4372		iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4373	else
4374		ioread16_rep(ap->ioaddr.data_addr, buf, words);
4375
4376	/* Transfer trailing 1 byte, if any. */
4377	if (unlikely(buflen & 0x01)) {
4378		u16 align_buf[1] = { 0 };
4379		unsigned char *trailing_buf = buf + buflen - 1;
4380
4381		if (write_data) {
4382			memcpy(align_buf, trailing_buf, 1);
4383			iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4384		} else {
4385			align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4386			memcpy(trailing_buf, align_buf, 1);
4387		}
4388	}
4389}
4390
4391/**
4392 *	ata_data_xfer_noirq - Transfer data by PIO
4393 *	@adev: device to target
4394 *	@buf: data buffer
4395 *	@buflen: buffer length
4396 *	@write_data: read/write
4397 *
4398 *	Transfer data from/to the device data register by PIO. Do the
4399 *	transfer with interrupts disabled.
4400 *
4401 *	LOCKING:
4402 *	Inherited from caller.
4403 */
4404void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4405			 unsigned int buflen, int write_data)
4406{
4407	unsigned long flags;
4408	local_irq_save(flags);
4409	ata_data_xfer(adev, buf, buflen, write_data);
4410	local_irq_restore(flags);
4411}
4412
4413
4414/**
4415 *	ata_pio_sector - Transfer a sector of data.
4416 *	@qc: Command on going
4417 *
4418 *	Transfer qc->sect_size bytes of data from/to the ATA device.
4419 *
4420 *	LOCKING:
4421 *	Inherited from caller.
4422 */
4423
4424static void ata_pio_sector(struct ata_queued_cmd *qc)
4425{
4426	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4427	struct scatterlist *sg = qc->__sg;
4428	struct ata_port *ap = qc->ap;
4429	struct page *page;
4430	unsigned int offset;
4431	unsigned char *buf;
4432
4433	if (qc->curbytes == qc->nbytes - qc->sect_size)
4434		ap->hsm_task_state = HSM_ST_LAST;
4435
4436	page = sg[qc->cursg].page;
4437	offset = sg[qc->cursg].offset + qc->cursg_ofs;
4438
4439	/* get the current page and offset */
4440	page = nth_page(page, (offset >> PAGE_SHIFT));
4441	offset %= PAGE_SIZE;
4442
4443	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4444
4445	if (PageHighMem(page)) {
4446		unsigned long flags;
4447
4448		local_irq_save(flags);
4449		buf = kmap_atomic(page, KM_IRQ0);
4450
4451		/* do the actual data transfer */
4452		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4453
4454		kunmap_atomic(buf, KM_IRQ0);
4455		local_irq_restore(flags);
4456	} else {
4457		buf = page_address(page);
4458		ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4459	}
4460
4461	qc->curbytes += qc->sect_size;
4462	qc->cursg_ofs += qc->sect_size;
4463
4464	if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4465		qc->cursg++;
4466		qc->cursg_ofs = 0;
4467	}
4468}
4469
4470/**
4471 *	ata_pio_sectors - Transfer one or many sectors.
4472 *	@qc: Command on going
4473 *
4474 *	Transfer one or many sectors of data from/to the
4475 *	ATA device for the DRQ request.
4476 *
4477 *	LOCKING:
4478 *	Inherited from caller.
4479 */
4480
4481static void ata_pio_sectors(struct ata_queued_cmd *qc)
4482{
4483	if (is_multi_taskfile(&qc->tf)) {
4484		/* READ/WRITE MULTIPLE */
4485		unsigned int nsect;
4486
4487		WARN_ON(qc->dev->multi_count == 0);
4488
4489		nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4490			    qc->dev->multi_count);
4491		while (nsect--)
4492			ata_pio_sector(qc);
4493	} else
4494		ata_pio_sector(qc);
4495}
4496
4497/**
4498 *	atapi_send_cdb - Write CDB bytes to hardware
4499 *	@ap: Port to which ATAPI device is attached.
4500 *	@qc: Taskfile currently active
4501 *
4502 *	When device has indicated its readiness to accept
4503 *	a CDB, this function is called.  Send the CDB.
4504 *
4505 *	LOCKING:
4506 *	caller.
4507 */
4508
4509static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4510{
4511	/* send SCSI cdb */
4512	DPRINTK("send cdb\n");
4513	WARN_ON(qc->dev->cdb_len < 12);
4514
4515	ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4516	ata_altstatus(ap); /* flush */
4517
4518	switch (qc->tf.protocol) {
4519	case ATA_PROT_ATAPI:
4520		ap->hsm_task_state = HSM_ST;
4521		break;
4522	case ATA_PROT_ATAPI_NODATA:
4523		ap->hsm_task_state = HSM_ST_LAST;
4524		break;
4525	case ATA_PROT_ATAPI_DMA:
4526		ap->hsm_task_state = HSM_ST_LAST;
4527		/* initiate bmdma */
4528		ap->ops->bmdma_start(qc);
4529		break;
4530	}
4531}
4532
4533/**
4534 *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
4535 *	@qc: Command on going
4536 *	@bytes: number of bytes
4537 *
4538 *	Transfer Transfer data from/to the ATAPI device.
4539 *
4540 *	LOCKING:
4541 *	Inherited from caller.
4542 *
4543 */
4544
4545static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4546{
4547	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4548	struct scatterlist *sg = qc->__sg;
4549	struct ata_port *ap = qc->ap;
4550	struct page *page;
4551	unsigned char *buf;
4552	unsigned int offset, count;
4553
4554	if (qc->curbytes + bytes >= qc->nbytes)
4555		ap->hsm_task_state = HSM_ST_LAST;
4556
4557next_sg:
4558	if (unlikely(qc->cursg >= qc->n_elem)) {
4559		/*
4560		 * The end of qc->sg is reached and the device expects
4561		 * more data to transfer. In order not to overrun qc->sg
4562		 * and fulfill length specified in the byte count register,
4563		 *    - for read case, discard trailing data from the device
4564		 *    - for write case, padding zero data to the device
4565		 */
4566		u16 pad_buf[1] = { 0 };
4567		unsigned int words = bytes >> 1;
4568		unsigned int i;
4569
4570		if (words) /* warning if bytes > 1 */
4571			ata_dev_printk(qc->dev, KERN_WARNING,
4572				       "%u bytes trailing data\n", bytes);
4573
4574		for (i = 0; i < words; i++)
4575			ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4576
4577		ap->hsm_task_state = HSM_ST_LAST;
4578		return;
4579	}
4580
4581	sg = &qc->__sg[qc->cursg];
4582
4583	page = sg->page;
4584	offset = sg->offset + qc->cursg_ofs;
4585
4586	/* get the current page and offset */
4587	page = nth_page(page, (offset >> PAGE_SHIFT));
4588	offset %= PAGE_SIZE;
4589
4590	/* don't overrun current sg */
4591	count = min(sg->length - qc->cursg_ofs, bytes);
4592
4593	/* don't cross page boundaries */
4594	count = min(count, (unsigned int)PAGE_SIZE - offset);
4595
4596	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4597
4598	if (PageHighMem(page)) {
4599		unsigned long flags;
4600
4601		local_irq_save(flags);
4602		buf = kmap_atomic(page, KM_IRQ0);
4603
4604		/* do the actual data transfer */
4605		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4606
4607		kunmap_atomic(buf, KM_IRQ0);
4608		local_irq_restore(flags);
4609	} else {
4610		buf = page_address(page);
4611		ap->ops->data_xfer(qc->dev,  buf + offset, count, do_write);
4612	}
4613
4614	bytes -= count;
4615	qc->curbytes += count;
4616	qc->cursg_ofs += count;
4617
4618	if (qc->cursg_ofs == sg->length) {
4619		qc->cursg++;
4620		qc->cursg_ofs = 0;
4621	}
4622
4623	if (bytes)
4624		goto next_sg;
4625}
4626
4627/**
4628 *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
4629 *	@qc: Command on going
4630 *
4631 *	Transfer Transfer data from/to the ATAPI device.
4632 *
4633 *	LOCKING:
4634 *	Inherited from caller.
4635 */
4636
4637static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4638{
4639	struct ata_port *ap = qc->ap;
4640	struct ata_device *dev = qc->dev;
4641	unsigned int ireason, bc_lo, bc_hi, bytes;
4642	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4643
4644	/* Abuse qc->result_tf for temp storage of intermediate TF
4645	 * here to save some kernel stack usage.
4646	 * For normal completion, qc->result_tf is not relevant. For
4647	 * error, qc->result_tf is later overwritten by ata_qc_complete().
4648	 * So, the correctness of qc->result_tf is not affected.
4649	 */
4650	ap->ops->tf_read(ap, &qc->result_tf);
4651	ireason = qc->result_tf.nsect;
4652	bc_lo = qc->result_tf.lbam;
4653	bc_hi = qc->result_tf.lbah;
4654	bytes = (bc_hi << 8) | bc_lo;
4655
4656	/* shall be cleared to zero, indicating xfer of data */
4657	if (ireason & (1 << 0))
4658		goto err_out;
4659
4660	/* make sure transfer direction matches expected */
4661	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4662	if (do_write != i_write)
4663		goto err_out;
4664
4665	VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4666
4667	__atapi_pio_bytes(qc, bytes);
4668
4669	return;
4670
4671err_out:
4672	ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4673	qc->err_mask |= AC_ERR_HSM;
4674	ap->hsm_task_state = HSM_ST_ERR;
4675}
4676
4677/**
4678 *	ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4679 *	@ap: the target ata_port
4680 *	@qc: qc on going
4681 *
4682 *	RETURNS:
4683 *	1 if ok in workqueue, 0 otherwise.
4684 */
4685
4686static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4687{
4688	if (qc->tf.flags & ATA_TFLAG_POLLING)
4689		return 1;
4690
4691	if (ap->hsm_task_state == HSM_ST_FIRST) {
4692		if (qc->tf.protocol == ATA_PROT_PIO &&
4693		    (qc->tf.flags & ATA_TFLAG_WRITE))
4694		    return 1;
4695
4696		if (is_atapi_taskfile(&qc->tf) &&
4697		    !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4698			return 1;
4699	}
4700
4701	return 0;
4702}
4703
4704/**
4705 *	ata_hsm_qc_complete - finish a qc running on standard HSM
4706 *	@qc: Command to complete
4707 *	@in_wq: 1 if called from workqueue, 0 otherwise
4708 *
4709 *	Finish @qc which is running on standard HSM.
4710 *
4711 *	LOCKING:
4712 *	If @in_wq is zero, spin_lock_irqsave(host lock).
4713 *	Otherwise, none on entry and grabs host lock.
4714 */
4715static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4716{
4717	struct ata_port *ap = qc->ap;
4718	unsigned long flags;
4719
4720	if (ap->ops->error_handler) {
4721		if (in_wq) {
4722			spin_lock_irqsave(ap->lock, flags);
4723
4724			/* EH might have kicked in while host lock is
4725			 * released.
4726			 */
4727			qc = ata_qc_from_tag(ap, qc->tag);
4728			if (qc) {
4729				if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4730					ap->ops->irq_on(ap);
4731					ata_qc_complete(qc);
4732				} else
4733					ata_port_freeze(ap);
4734			}
4735
4736			spin_unlock_irqrestore(ap->lock, flags);
4737		} else {
4738			if (likely(!(qc->err_mask & AC_ERR_HSM)))
4739				ata_qc_complete(qc);
4740			else
4741				ata_port_freeze(ap);
4742		}
4743	} else {
4744		if (in_wq) {
4745			spin_lock_irqsave(ap->lock, flags);
4746			ap->ops->irq_on(ap);
4747			ata_qc_complete(qc);
4748			spin_unlock_irqrestore(ap->lock, flags);
4749		} else
4750			ata_qc_complete(qc);
4751	}
4752}
4753
4754/**
4755 *	ata_hsm_move - move the HSM to the next state.
4756 *	@ap: the target ata_port
4757 *	@qc: qc on going
4758 *	@status: current device status
4759 *	@in_wq: 1 if called from workqueue, 0 otherwise
4760 *
4761 *	RETURNS:
4762 *	1 when poll next status needed, 0 otherwise.
4763 */
4764int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4765		 u8 status, int in_wq)
4766{
4767	unsigned long flags = 0;
4768	int poll_next;
4769
4770	WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4771
4772	/* Make sure ata_qc_issue_prot() does not throw things
4773	 * like DMA polling into the workqueue. Notice that
4774	 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4775	 */
4776	WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4777
4778fsm_start:
4779	DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4780		ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4781
4782	switch (ap->hsm_task_state) {
4783	case HSM_ST_FIRST:
4784		/* Send first data block or PACKET CDB */
4785
4786		/* If polling, we will stay in the work queue after
4787		 * sending the data. Otherwise, interrupt handler
4788		 * takes over after sending the data.
4789		 */
4790		poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4791
4792		/* check device status */
4793		if (unlikely((status & ATA_DRQ) == 0)) {
4794			/* handle BSY=0, DRQ=0 as error */
4795			if (likely(status & (ATA_ERR | ATA_DF)))
4796				/* device stops HSM for abort/error */
4797				qc->err_mask |= AC_ERR_DEV;
4798			else
4799				/* HSM violation. Let EH handle this */
4800				qc->err_mask |= AC_ERR_HSM;
4801
4802			ap->hsm_task_state = HSM_ST_ERR;
4803			goto fsm_start;
4804		}
4805
4806		/* Device should not ask for data transfer (DRQ=1)
4807		 * when it finds something wrong.
4808		 * We ignore DRQ here and stop the HSM by
4809		 * changing hsm_task_state to HSM_ST_ERR and
4810		 * let the EH abort the command or reset the device.
4811		 */
4812		if (unlikely(status & (ATA_ERR | ATA_DF))) {
4813			ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4814					"error, dev_stat 0x%X\n", status);
4815			qc->err_mask |= AC_ERR_HSM;
4816			ap->hsm_task_state = HSM_ST_ERR;
4817			goto fsm_start;
4818		}
4819
4820		/* Send the CDB (atapi) or the first data block (ata pio out).
4821		 * During the state transition, interrupt handler shouldn't
4822		 * be invoked before the data transfer is complete and
4823		 * hsm_task_state is changed. Hence, the following locking.
4824		 */
4825		if (in_wq)
4826			spin_lock_irqsave(ap->lock, flags);
4827
4828		if (qc->tf.protocol == ATA_PROT_PIO) {
4829			/* PIO data out protocol.
4830			 * send first data block.
4831			 */
4832
4833			/* ata_pio_sectors() might change the state
4834			 * to HSM_ST_LAST. so, the state is changed here
4835			 * before ata_pio_sectors().
4836			 */
4837			ap->hsm_task_state = HSM_ST;
4838			ata_pio_sectors(qc);
4839			ata_altstatus(ap); /* flush */
4840		} else
4841			/* send CDB */
4842			atapi_send_cdb(ap, qc);
4843
4844		if (in_wq)
4845			spin_unlock_irqrestore(ap->lock, flags);
4846
4847		/* if polling, ata_pio_task() handles the rest.
4848		 * otherwise, interrupt handler takes over from here.
4849		 */
4850		break;
4851
4852	case HSM_ST:
4853		/* complete command or read/write the data register */
4854		if (qc->tf.protocol == ATA_PROT_ATAPI) {
4855			/* ATAPI PIO protocol */
4856			if ((status & ATA_DRQ) == 0) {
4857				/* No more data to transfer or device error.
4858				 * Device error will be tagged in HSM_ST_LAST.
4859				 */
4860				ap->hsm_task_state = HSM_ST_LAST;
4861				goto fsm_start;
4862			}
4863
4864			/* Device should not ask for data transfer (DRQ=1)
4865			 * when it finds something wrong.
4866			 * We ignore DRQ here and stop the HSM by
4867			 * changing hsm_task_state to HSM_ST_ERR and
4868			 * let the EH abort the command or reset the device.
4869			 */
4870			if (unlikely(status & (ATA_ERR | ATA_DF))) {
4871				ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4872						"device error, dev_stat 0x%X\n",
4873						status);
4874				qc->err_mask |= AC_ERR_HSM;
4875				ap->hsm_task_state = HSM_ST_ERR;
4876				goto fsm_start;
4877			}
4878
4879			atapi_pio_bytes(qc);
4880
4881			if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4882				/* bad ireason reported by device */
4883				goto fsm_start;
4884
4885		} else {
4886			/* ATA PIO protocol */
4887			if (unlikely((status & ATA_DRQ) == 0)) {
4888				/* handle BSY=0, DRQ=0 as error */
4889				if (likely(status & (ATA_ERR | ATA_DF)))
4890					/* device stops HSM for abort/error */
4891					qc->err_mask |= AC_ERR_DEV;
4892				else
4893					/* HSM violation. Let EH handle this.
4894					 * Phantom devices also trigger this
4895					 * condition.  Mark hint.
4896					 */
4897					qc->err_mask |= AC_ERR_HSM |
4898							AC_ERR_NODEV_HINT;
4899
4900				ap->hsm_task_state = HSM_ST_ERR;
4901				goto fsm_start;
4902			}
4903
4904			/* For PIO reads, some devices may ask for
4905			 * data transfer (DRQ=1) alone with ERR=1.
4906			 * We respect DRQ here and transfer one
4907			 * block of junk data before changing the
4908			 * hsm_task_state to HSM_ST_ERR.
4909			 *
4910			 * For PIO writes, ERR=1 DRQ=1 doesn't make
4911			 * sense since the data block has been
4912			 * transferred to the device.
4913			 */
4914			if (unlikely(status & (ATA_ERR | ATA_DF))) {
4915				/* data might be corrputed */
4916				qc->err_mask |= AC_ERR_DEV;
4917
4918				if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4919					ata_pio_sectors(qc);
4920					ata_altstatus(ap);
4921					status = ata_wait_idle(ap);
4922				}
4923
4924				if (status & (ATA_BUSY | ATA_DRQ))
4925					qc->err_mask |= AC_ERR_HSM;
4926
4927				/* ata_pio_sectors() might change the
4928				 * state to HSM_ST_LAST. so, the state
4929				 * is changed after ata_pio_sectors().
4930				 */
4931				ap->hsm_task_state = HSM_ST_ERR;
4932				goto fsm_start;
4933			}
4934
4935			ata_pio_sectors(qc);
4936
4937			if (ap->hsm_task_state == HSM_ST_LAST &&
4938			    (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4939				/* all data read */
4940				ata_altstatus(ap);
4941				status = ata_wait_idle(ap);
4942				goto fsm_start;
4943			}
4944		}
4945
4946		ata_altstatus(ap); /* flush */
4947		poll_next = 1;
4948		break;
4949
4950	case HSM_ST_LAST:
4951		if (unlikely(!ata_ok(status))) {
4952			qc->err_mask |= __ac_err_mask(status);
4953			ap->hsm_task_state = HSM_ST_ERR;
4954			goto fsm_start;
4955		}
4956
4957		/* no more data to transfer */
4958		DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4959			ap->print_id, qc->dev->devno, status);
4960
4961		WARN_ON(qc->err_mask);
4962
4963		ap->hsm_task_state = HSM_ST_IDLE;
4964
4965		/* complete taskfile transaction */
4966		ata_hsm_qc_complete(qc, in_wq);
4967
4968		poll_next = 0;
4969		break;
4970
4971	case HSM_ST_ERR:
4972		/* make sure qc->err_mask is available to
4973		 * know what's wrong and recover
4974		 */
4975		WARN_ON(qc->err_mask == 0);
4976
4977		ap->hsm_task_state = HSM_ST_IDLE;
4978
4979		/* complete taskfile transaction */
4980		ata_hsm_qc_complete(qc, in_wq);
4981
4982		poll_next = 0;
4983		break;
4984	default:
4985		poll_next = 0;
4986		BUG();
4987	}
4988
4989	return poll_next;
4990}
4991
4992static void ata_pio_task(struct work_struct *work)
4993{
4994	struct ata_port *ap =
4995		container_of(work, struct ata_port, port_task.work);
4996	struct ata_queued_cmd *qc = ap->port_task_data;
4997	u8 status;
4998	int poll_next;
4999
5000fsm_start:
5001	WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5002
5003	/*
5004	 * This is purely heuristic.  This is a fast path.
5005	 * Sometimes when we enter, BSY will be cleared in
5006	 * a chk-status or two.  If not, the drive is probably seeking
5007	 * or something.  Snooze for a couple msecs, then
5008	 * chk-status again.  If still busy, queue delayed work.
5009	 */
5010	status = ata_busy_wait(ap, ATA_BUSY, 5);
5011	if (status & ATA_BUSY) {
5012		msleep(2);
5013		status = ata_busy_wait(ap, ATA_BUSY, 10);
5014		if (status & ATA_BUSY) {
5015			ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5016			return;
5017		}
5018	}
5019
5020	/* move the HSM */
5021	poll_next = ata_hsm_move(ap, qc, status, 1);
5022
5023	/* another command or interrupt handler
5024	 * may be running at this point.
5025	 */
5026	if (poll_next)
5027		goto fsm_start;
5028}
5029
5030/**
5031 *	ata_qc_new - Request an available ATA command, for queueing
5032 *	@ap: Port associated with device @dev
5033 *	@dev: Device from whom we request an available command structure
5034 *
5035 *	LOCKING:
5036 *	None.
5037 */
5038
5039static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5040{
5041	struct ata_queued_cmd *qc = NULL;
5042	unsigned int i;
5043
5044	/* no command while frozen */
5045	if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5046		return NULL;
5047
5048	/* the last tag is reserved for internal command. */
5049	for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5050		if (!test_and_set_bit(i, &ap->qc_allocated)) {
5051			qc = __ata_qc_from_tag(ap, i);
5052			break;
5053		}
5054
5055	if (qc)
5056		qc->tag = i;
5057
5058	return qc;
5059}
5060
5061/**
5062 *	ata_qc_new_init - Request an available ATA command, and initialize it
5063 *	@dev: Device from whom we request an available command structure
5064 *
5065 *	LOCKING:
5066 *	None.
5067 */
5068
5069struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5070{
5071	struct ata_port *ap = dev->ap;
5072	struct ata_queued_cmd *qc;
5073
5074	qc = ata_qc_new(ap);
5075	if (qc) {
5076		qc->scsicmd = NULL;
5077		qc->ap = ap;
5078		qc->dev = dev;
5079
5080		ata_qc_reinit(qc);
5081	}
5082
5083	return qc;
5084}
5085
5086/**
5087 *	ata_qc_free - free unused ata_queued_cmd
5088 *	@qc: Command to complete
5089 *
5090 *	Designed to free unused ata_queued_cmd object
5091 *	in case something prevents using it.
5092 *
5093 *	LOCKING:
5094 *	spin_lock_irqsave(host lock)
5095 */
5096void ata_qc_free(struct ata_queued_cmd *qc)
5097{
5098	struct ata_port *ap = qc->ap;
5099	unsigned int tag;
5100
5101	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5102
5103	qc->flags = 0;
5104	tag = qc->tag;
5105	if (likely(ata_tag_valid(tag))) {
5106		qc->tag = ATA_TAG_POISON;
5107		clear_bit(tag, &ap->qc_allocated);
5108	}
5109}
5110
5111void __ata_qc_complete(struct ata_queued_cmd *qc)
5112{
5113	struct ata_port *ap = qc->ap;
5114
5115	WARN_ON(qc == NULL);	/* ata_qc_from_tag _might_ return NULL */
5116	WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5117
5118	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5119		ata_sg_clean(qc);
5120
5121	/* command should be marked inactive atomically with qc completion */
5122	if (qc->tf.protocol == ATA_PROT_NCQ)
5123		ap->sactive &= ~(1 << qc->tag);
5124	else
5125		ap->active_tag = ATA_TAG_POISON;
5126
5127	/* atapi: mark qc as inactive to prevent the interrupt handler
5128	 * from completing the command twice later, before the error handler
5129	 * is called. (when rc != 0 and atapi request sense is needed)
5130	 */
5131	qc->flags &= ~ATA_QCFLAG_ACTIVE;
5132	ap->qc_active &= ~(1 << qc->tag);
5133
5134	/* call completion callback */
5135	qc->complete_fn(qc);
5136}
5137
5138static void fill_result_tf(struct ata_queued_cmd *qc)
5139{
5140	struct ata_port *ap = qc->ap;
5141
5142	qc->result_tf.flags = qc->tf.flags;
5143	ap->ops->tf_read(ap, &qc->result_tf);
5144}
5145
5146/**
5147 *	ata_qc_complete - Complete an active ATA command
5148 *	@qc: Command to complete
5149 *	@err_mask: ATA Status register contents
5150 *
5151 *	Indicate to the mid and upper layers that an ATA
5152 *	command has completed, with either an ok or not-ok status.
5153 *
5154 *	LOCKING:
5155 *	spin_lock_irqsave(host lock)
5156 */
5157void ata_qc_complete(struct ata_queued_cmd *qc)
5158{
5159	struct ata_port *ap = qc->ap;
5160
5161	if (ap->ops->error_handler) {
5162		WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5163
5164		if (unlikely(qc->err_mask))
5165			qc->flags |= ATA_QCFLAG_FAILED;
5166
5167		if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5168			if (!ata_tag_internal(qc->tag)) {
5169				/* always fill result TF for failed qc */
5170				fill_result_tf(qc);
5171				ata_qc_schedule_eh(qc);
5172				return;
5173			}
5174		}
5175
5176		/* read result TF if requested */
5177		if (qc->flags & ATA_QCFLAG_RESULT_TF)
5178			fill_result_tf(qc);
5179
5180		__ata_qc_complete(qc);
5181	} else {
5182		if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5183			return;
5184
5185		/* read result TF if failed or requested */
5186		if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5187			fill_result_tf(qc);
5188
5189		__ata_qc_complete(qc);
5190	}
5191}
5192
5193/**
5194 *	ata_qc_complete_multiple - Complete multiple qcs successfully
5195 *	@ap: port in question
5196 *	@qc_active: new qc_active mask
5197 *	@finish_qc: LLDD callback invoked before completing a qc
5198 *
5199 *	Complete in-flight commands.  This functions is meant to be
5200 *	called from low-level driver's interrupt routine to complete
5201 *	requests normally.  ap->qc_active and @qc_active is compared
5202 *	and commands are completed accordingly.
5203 *
5204 *	LOCKING:
5205 *	spin_lock_irqsave(host lock)
5206 *
5207 *	RETURNS:
5208 *	Number of completed commands on success, -errno otherwise.
5209 */
5210int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5211			     void (*finish_qc)(struct ata_queued_cmd *))
5212{
5213	int nr_done = 0;
5214	u32 done_mask;
5215	int i;
5216
5217	done_mask = ap->qc_active ^ qc_active;
5218
5219	if (unlikely(done_mask & qc_active)) {
5220		ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5221				"(%08x->%08x)\n", ap->qc_active, qc_active);
5222		return -EINVAL;
5223	}
5224
5225	for (i = 0; i < ATA_MAX_QUEUE; i++) {
5226		struct ata_queued_cmd *qc;
5227
5228		if (!(done_mask & (1 << i)))
5229			continue;
5230
5231		if ((qc = ata_qc_from_tag(ap, i))) {
5232			if (finish_qc)
5233				finish_qc(qc);
5234			ata_qc_complete(qc);
5235			nr_done++;
5236		}
5237	}
5238
5239	return nr_done;
5240}
5241
5242static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5243{
5244	struct ata_port *ap = qc->ap;
5245
5246	switch (qc->tf.protocol) {
5247	case ATA_PROT_NCQ:
5248	case ATA_PROT_DMA:
5249	case ATA_PROT_ATAPI_DMA:
5250		return 1;
5251
5252	case ATA_PROT_ATAPI:
5253	case ATA_PROT_PIO:
5254		if (ap->flags & ATA_FLAG_PIO_DMA)
5255			return 1;
5256
5257		/* fall through */
5258
5259	default:
5260		return 0;
5261	}
5262
5263	/* never reached */
5264}
5265
5266/**
5267 *	ata_qc_issue - issue taskfile to device
5268 *	@qc: command to issue to device
5269 *
5270 *	Prepare an ATA command to submission to device.
5271 *	This includes mapping the data into a DMA-able
5272 *	area, filling in the S/G table, and finally
5273 *	writing the taskfile to hardware, starting the command.
5274 *
5275 *	LOCKING:
5276 *	spin_lock_irqsave(host lock)
5277 */
5278void ata_qc_issue(struct ata_queued_cmd *qc)
5279{
5280	struct ata_port *ap = qc->ap;
5281
5282	/* Make sure only one non-NCQ command is outstanding.  The
5283	 * check is skipped for old EH because it reuses active qc to
5284	 * request ATAPI sense.
5285	 */
5286	WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5287
5288	if (qc->tf.protocol == ATA_PROT_NCQ) {
5289		WARN_ON(ap->sactive & (1 << qc->tag));
5290		ap->sactive |= 1 << qc->tag;
5291	} else {
5292		WARN_ON(ap->sactive);
5293		ap->active_tag = qc->tag;
5294	}
5295
5296	qc->flags |= ATA_QCFLAG_ACTIVE;
5297	ap->qc_active |= 1 << qc->tag;
5298
5299	if (ata_should_dma_map(qc)) {
5300		if (qc->flags & ATA_QCFLAG_SG) {
5301			if (ata_sg_setup(qc))
5302				goto sg_err;
5303		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
5304			if (ata_sg_setup_one(qc))
5305				goto sg_err;
5306		}
5307	} else {
5308		qc->flags &= ~ATA_QCFLAG_DMAMAP;
5309	}
5310
5311	ap->ops->qc_prep(qc);
5312
5313	qc->err_mask |= ap->ops->qc_issue(qc);
5314	if (unlikely(qc->err_mask))
5315		goto err;
5316	return;
5317
5318sg_err:
5319	qc->flags &= ~ATA_QCFLAG_DMAMAP;
5320	qc->err_mask |= AC_ERR_SYSTEM;
5321err:
5322	ata_qc_complete(qc);
5323}
5324
5325/**
5326 *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5327 *	@qc: command to issue to device
5328 *
5329 *	Using various libata functions and hooks, this function
5330 *	starts an ATA command.  ATA commands are grouped into
5331 *	classes called "protocols", and issuing each type of protocol
5332 *	is slightly different.
5333 *
5334 *	May be used as the qc_issue() entry in ata_port_operations.
5335 *
5336 *	LOCKING:
5337 *	spin_lock_irqsave(host lock)
5338 *
5339 *	RETURNS:
5340 *	Zero on success, AC_ERR_* mask on failure
5341 */
5342
5343unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5344{
5345	struct ata_port *ap = qc->ap;
5346
5347	/* Use polling pio if the LLD doesn't handle
5348	 * interrupt driven pio and atapi CDB interrupt.
5349	 */
5350	if (ap->flags & ATA_FLAG_PIO_POLLING) {
5351		switch (qc->tf.protocol) {
5352		case ATA_PROT_PIO:
5353		case ATA_PROT_NODATA:
5354		case ATA_PROT_ATAPI:
5355		case ATA_PROT_ATAPI_NODATA:
5356			qc->tf.flags |= ATA_TFLAG_POLLING;
5357			break;
5358		case ATA_PROT_ATAPI_DMA:
5359			if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5360				/* see ata_dma_blacklisted() */
5361				BUG();
5362			break;
5363		default:
5364			break;
5365		}
5366	}
5367
5368	/* select the device */
5369	ata_dev_select(ap, qc->dev->devno, 1, 0);
5370
5371	/* start the command */
5372	switch (qc->tf.protocol) {
5373	case ATA_PROT_NODATA:
5374		if (qc->tf.flags & ATA_TFLAG_POLLING)
5375			ata_qc_set_polling(qc);
5376
5377		ata_tf_to_host(ap, &qc->tf);
5378		ap->hsm_task_state = HSM_ST_LAST;
5379
5380		if (qc->tf.flags & ATA_TFLAG_POLLING)
5381			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5382
5383		break;
5384
5385	case ATA_PROT_DMA:
5386		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5387
5388		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5389		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5390		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
5391		ap->hsm_task_state = HSM_ST_LAST;
5392		break;
5393
5394	case ATA_PROT_PIO:
5395		if (qc->tf.flags & ATA_TFLAG_POLLING)
5396			ata_qc_set_polling(qc);
5397
5398		ata_tf_to_host(ap, &qc->tf);
5399
5400		if (qc->tf.flags & ATA_TFLAG_WRITE) {
5401			/* PIO data out protocol */
5402			ap->hsm_task_state = HSM_ST_FIRST;
5403			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5404
5405			/* always send first data block using
5406			 * the ata_pio_task() codepath.
5407			 */
5408		} else {
5409			/* PIO data in protocol */
5410			ap->hsm_task_state = HSM_ST;
5411
5412			if (qc->tf.flags & ATA_TFLAG_POLLING)
5413				ata_port_queue_task(ap, ata_pio_task, qc, 0);
5414
5415			/* if polling, ata_pio_task() handles the rest.
5416			 * otherwise, interrupt handler takes over from here.
5417			 */
5418		}
5419
5420		break;
5421
5422	case ATA_PROT_ATAPI:
5423	case ATA_PROT_ATAPI_NODATA:
5424		if (qc->tf.flags & ATA_TFLAG_POLLING)
5425			ata_qc_set_polling(qc);
5426
5427		ata_tf_to_host(ap, &qc->tf);
5428
5429		ap->hsm_task_state = HSM_ST_FIRST;
5430
5431		/* send cdb by polling if no cdb interrupt */
5432		if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5433		    (qc->tf.flags & ATA_TFLAG_POLLING))
5434			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5435		break;
5436
5437	case ATA_PROT_ATAPI_DMA:
5438		WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5439
5440		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
5441		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
5442		ap->hsm_task_state = HSM_ST_FIRST;
5443
5444		/* send cdb by polling if no cdb interrupt */
5445		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5446			ata_port_queue_task(ap, ata_pio_task, qc, 0);
5447		break;
5448
5449	default:
5450		WARN_ON(1);
5451		return AC_ERR_SYSTEM;
5452	}
5453
5454	return 0;
5455}
5456
5457/**
5458 *	ata_host_intr - Handle host interrupt for given (port, task)
5459 *	@ap: Port on which interrupt arrived (possibly...)
5460 *	@qc: Taskfile currently active in engine
5461 *
5462 *	Handle host interrupt for given queued command.  Currently,
5463 *	only DMA interrupts are handled.  All other commands are
5464 *	handled via polling with interrupts disabled (nIEN bit).
5465 *
5466 *	LOCKING:
5467 *	spin_lock_irqsave(host lock)
5468 *
5469 *	RETURNS:
5470 *	One if interrupt was handled, zero if not (shared irq).
5471 */
5472
5473inline unsigned int ata_host_intr (struct ata_port *ap,
5474				   struct ata_queued_cmd *qc)
5475{
5476	struct ata_eh_info *ehi = &ap->eh_info;
5477	u8 status, host_stat = 0;
5478
5479	VPRINTK("ata%u: protocol %d task_state %d\n",
5480		ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5481
5482	/* Check whether we are expecting interrupt in this state */
5483	switch (ap->hsm_task_state) {
5484	case HSM_ST_FIRST:
5485		/* Some pre-ATAPI-4 devices assert INTRQ
5486		 * at this state when ready to receive CDB.
5487		 */
5488
5489		/* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5490		 * The flag was turned on only for atapi devices.
5491		 * No need to check is_atapi_taskfile(&qc->tf) again.
5492		 */
5493		if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5494			goto idle_irq;
5495		break;
5496	case HSM_ST_LAST:
5497		if (qc->tf.protocol == ATA_PROT_DMA ||
5498		    qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5499			/* check status of DMA engine */
5500			host_stat = ap->ops->bmdma_status(ap);
5501			VPRINTK("ata%u: host_stat 0x%X\n",
5502				ap->print_id, host_stat);
5503
5504			/* if it's not our irq... */
5505			if (!(host_stat & ATA_DMA_INTR))
5506				goto idle_irq;
5507
5508			/* before we do anything else, clear DMA-Start bit */
5509			ap->ops->bmdma_stop(qc);
5510
5511			if (unlikely(host_stat & ATA_DMA_ERR)) {
5512				/* error when transfering data to/from memory */
5513				qc->err_mask |= AC_ERR_HOST_BUS;
5514				ap->hsm_task_state = HSM_ST_ERR;
5515			}
5516		}
5517		break;
5518	case HSM_ST:
5519		break;
5520	default:
5521		goto idle_irq;
5522	}
5523
5524	/* check altstatus */
5525	status = ata_altstatus(ap);
5526	if (status & ATA_BUSY)
5527		goto idle_irq;
5528
5529	/* check main status, clearing INTRQ */
5530	status = ata_chk_status(ap);
5531	if (unlikely(status & ATA_BUSY))
5532		goto idle_irq;
5533
5534	/* ack bmdma irq events */
5535	ap->ops->irq_clear(ap);
5536
5537	ata_hsm_move(ap, qc, status, 0);
5538
5539	if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5540				       qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5541		ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5542
5543	return 1;	/* irq handled */
5544
5545idle_irq:
5546	ap->stats.idle_irq++;
5547
5548#ifdef ATA_IRQ_TRAP
5549	if ((ap->stats.idle_irq % 1000) == 0) {
5550		ap->ops->irq_ack(ap, 0); /* debug trap */
5551		ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5552		return 1;
5553	}
5554#endif
5555	return 0;	/* irq not handled */
5556}
5557
5558/**
5559 *	ata_interrupt - Default ATA host interrupt handler
5560 *	@irq: irq line (unused)
5561 *	@dev_instance: pointer to our ata_host information structure
5562 *
5563 *	Default interrupt handler for PCI IDE devices.  Calls
5564 *	ata_host_intr() for each port that is not disabled.
5565 *
5566 *	LOCKING:
5567 *	Obtains host lock during operation.
5568 *
5569 *	RETURNS:
5570 *	IRQ_NONE or IRQ_HANDLED.
5571 */
5572
5573irqreturn_t ata_interrupt (int irq, void *dev_instance)
5574{
5575	struct ata_host *host = dev_instance;
5576	unsigned int i;
5577	unsigned int handled = 0;
5578	unsigned long flags;
5579
5580	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5581	spin_lock_irqsave(&host->lock, flags);
5582
5583	for (i = 0; i < host->n_ports; i++) {
5584		struct ata_port *ap;
5585
5586		ap = host->ports[i];
5587		if (ap &&
5588		    !(ap->flags & ATA_FLAG_DISABLED)) {
5589			struct ata_queued_cmd *qc;
5590
5591			qc = ata_qc_from_tag(ap, ap->active_tag);
5592			if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5593			    (qc->flags & ATA_QCFLAG_ACTIVE))
5594				handled |= ata_host_intr(ap, qc);
5595		}
5596	}
5597
5598	spin_unlock_irqrestore(&host->lock, flags);
5599
5600	return IRQ_RETVAL(handled);
5601}
5602
5603/**
5604 *	sata_scr_valid - test whether SCRs are accessible
5605 *	@ap: ATA port to test SCR accessibility for
5606 *
5607 *	Test whether SCRs are accessible for @ap.
5608 *
5609 *	LOCKING:
5610 *	None.
5611 *
5612 *	RETURNS:
5613 *	1 if SCRs are accessible, 0 otherwise.
5614 */
5615int sata_scr_valid(struct ata_port *ap)
5616{
5617	return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5618}
5619
5620/**
5621 *	sata_scr_read - read SCR register of the specified port
5622 *	@ap: ATA port to read SCR for
5623 *	@reg: SCR to read
5624 *	@val: Place to store read value
5625 *
5626 *	Read SCR register @reg of @ap into *@val.  This function is
5627 *	guaranteed to succeed if the cable type of the port is SATA
5628 *	and the port implements ->scr_read.
5629 *
5630 *	LOCKING:
5631 *	None.
5632 *
5633 *	RETURNS:
5634 *	0 on success, negative errno on failure.
5635 */
5636int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5637{
5638	if (sata_scr_valid(ap)) {
5639		*val = ap->ops->scr_read(ap, reg);
5640		return 0;
5641	}
5642	return -EOPNOTSUPP;
5643}
5644
5645/**
5646 *	sata_scr_write - write SCR register of the specified port
5647 *	@ap: ATA port to write SCR for
5648 *	@reg: SCR to write
5649 *	@val: value to write
5650 *
5651 *	Write @val to SCR register @reg of @ap.  This function is
5652 *	guaranteed to succeed if the cable type of the port is SATA
5653 *	and the port implements ->scr_read.
5654 *
5655 *	LOCKING:
5656 *	None.
5657 *
5658 *	RETURNS:
5659 *	0 on success, negative errno on failure.
5660 */
5661int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5662{
5663	if (sata_scr_valid(ap)) {
5664		ap->ops->scr_write(ap, reg, val);
5665		return 0;
5666	}
5667	return -EOPNOTSUPP;
5668}
5669
5670/**
5671 *	sata_scr_write_flush - write SCR register of the specified port and flush
5672 *	@ap: ATA port to write SCR for
5673 *	@reg: SCR to write
5674 *	@val: value to write
5675 *
5676 *	This function is identical to sata_scr_write() except that this
5677 *	function performs flush after writing to the register.
5678 *
5679 *	LOCKING:
5680 *	None.
5681 *
5682 *	RETURNS:
5683 *	0 on success, negative errno on failure.
5684 */
5685int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5686{
5687	if (sata_scr_valid(ap)) {
5688		ap->ops->scr_write(ap, reg, val);
5689		ap->ops->scr_read(ap, reg);
5690		return 0;
5691	}
5692	return -EOPNOTSUPP;
5693}
5694
5695/**
5696 *	ata_port_online - test whether the given port is online
5697 *	@ap: ATA port to test
5698 *
5699 *	Test whether @ap is online.  Note that this function returns 0
5700 *	if online status of @ap cannot be obtained, so
5701 *	ata_port_online(ap) != !ata_port_offline(ap).
5702 *
5703 *	LOCKING:
5704 *	None.
5705 *
5706 *	RETURNS:
5707 *	1 if the port online status is available and online.
5708 */
5709int ata_port_online(struct ata_port *ap)
5710{
5711	u32 sstatus;
5712
5713	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5714		return 1;
5715	return 0;
5716}
5717
5718/**
5719 *	ata_port_offline - test whether the given port is offline
5720 *	@ap: ATA port to test
5721 *
5722 *	Test whether @ap is offline.  Note that this function returns
5723 *	0 if offline status of @ap cannot be obtained, so
5724 *	ata_port_online(ap) != !ata_port_offline(ap).
5725 *
5726 *	LOCKING:
5727 *	None.
5728 *
5729 *	RETURNS:
5730 *	1 if the port offline status is available and offline.
5731 */
5732int ata_port_offline(struct ata_port *ap)
5733{
5734	u32 sstatus;
5735
5736	if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5737		return 1;
5738	return 0;
5739}
5740
5741int ata_flush_cache(struct ata_device *dev)
5742{
5743	unsigned int err_mask;
5744	u8 cmd;
5745
5746	if (!ata_try_flush_cache(dev))
5747		return 0;
5748
5749	if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5750		cmd = ATA_CMD_FLUSH_EXT;
5751	else
5752		cmd = ATA_CMD_FLUSH;
5753
5754	err_mask = ata_do_simple_cmd(dev, cmd);
5755	if (err_mask) {
5756		ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5757		return -EIO;
5758	}
5759
5760	return 0;
5761}
5762
5763#ifdef CONFIG_PM
5764static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5765			       unsigned int action, unsigned int ehi_flags,
5766			       int wait)
5767{
5768	unsigned long flags;
5769	int i, rc;
5770
5771	for (i = 0; i < host->n_ports; i++) {
5772		struct ata_port *ap = host->ports[i];
5773
5774		/* Previous resume operation might still be in
5775		 * progress.  Wait for PM_PENDING to clear.
5776		 */
5777		if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5778			ata_port_wait_eh(ap);
5779			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5780		}
5781
5782		/* request PM ops to EH */
5783		spin_lock_irqsave(ap->lock, flags);
5784
5785		ap->pm_mesg = mesg;
5786		if (wait) {
5787			rc = 0;
5788			ap->pm_result = &rc;
5789		}
5790
5791		ap->pflags |= ATA_PFLAG_PM_PENDING;
5792		ap->eh_info.action |= action;
5793		ap->eh_info.flags |= ehi_flags;
5794
5795		ata_port_schedule_eh(ap);
5796
5797		spin_unlock_irqrestore(ap->lock, flags);
5798
5799		/* wait and check result */
5800		if (wait) {
5801			ata_port_wait_eh(ap);
5802			WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5803			if (rc)
5804				return rc;
5805		}
5806	}
5807
5808	return 0;
5809}
5810
5811/**
5812 *	ata_host_suspend - suspend host
5813 *	@host: host to suspend
5814 *	@mesg: PM message
5815 *
5816 *	Suspend @host.  Actual operation is performed by EH.  This
5817 *	function requests EH to perform PM operations and waits for EH
5818 *	to finish.
5819 *
5820 *	LOCKING:
5821 *	Kernel thread context (may sleep).
5822 *
5823 *	RETURNS:
5824 *	0 on success, -errno on failure.
5825 */
5826int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5827{
5828	int rc;
5829
5830	rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5831	if (rc == 0)
5832		host->dev->power.power_state = mesg;
5833	return rc;
5834}
5835
5836/**
5837 *	ata_host_resume - resume host
5838 *	@host: host to resume
5839 *
5840 *	Resume @host.  Actual operation is performed by EH.  This
5841 *	function requests EH to perform PM operations and returns.
5842 *	Note that all resume operations are performed parallely.
5843 *
5844 *	LOCKING:
5845 *	Kernel thread context (may sleep).
5846 */
5847void ata_host_resume(struct ata_host *host)
5848{
5849	ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5850			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5851	host->dev->power.power_state = PMSG_ON;
5852}
5853#endif
5854
5855/**
5856 *	ata_port_start - Set port up for dma.
5857 *	@ap: Port to initialize
5858 *
5859 *	Called just after data structures for each port are
5860 *	initialized.  Allocates space for PRD table.
5861 *
5862 *	May be used as the port_start() entry in ata_port_operations.
5863 *
5864 *	LOCKING:
5865 *	Inherited from caller.
5866 */
5867int ata_port_start(struct ata_port *ap)
5868{
5869	struct device *dev = ap->dev;
5870	int rc;
5871
5872	ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5873				      GFP_KERNEL);
5874	if (!ap->prd)
5875		return -ENOMEM;
5876
5877	rc = ata_pad_alloc(ap, dev);
5878	if (rc)
5879		return rc;
5880
5881	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5882		(unsigned long long)ap->prd_dma);
5883	return 0;
5884}
5885
5886/**
5887 *	ata_dev_init - Initialize an ata_device structure
5888 *	@dev: Device structure to initialize
5889 *
5890 *	Initialize @dev in preparation for probing.
5891 *
5892 *	LOCKING:
5893 *	Inherited from caller.
5894 */
5895void ata_dev_init(struct ata_device *dev)
5896{
5897	struct ata_port *ap = dev->ap;
5898	unsigned long flags;
5899
5900	/* SATA spd limit is bound to the first device */
5901	ap->sata_spd_limit = ap->hw_sata_spd_limit;
5902
5903	/* High bits of dev->flags are used to record warm plug
5904	 * requests which occur asynchronously.  Synchronize using
5905	 * host lock.
5906	 */
5907	spin_lock_irqsave(ap->lock, flags);
5908	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5909	spin_unlock_irqrestore(ap->lock, flags);
5910
5911	memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5912	       sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5913	dev->pio_mask = UINT_MAX;
5914	dev->mwdma_mask = UINT_MAX;
5915	dev->udma_mask = UINT_MAX;
5916}
5917
5918/**
5919 *	ata_port_alloc - allocate and initialize basic ATA port resources
5920 *	@host: ATA host this allocated port belongs to
5921 *
5922 *	Allocate and initialize basic ATA port resources.
5923 *
5924 *	RETURNS:
5925 *	Allocate ATA port on success, NULL on failure.
5926 *
5927 *	LOCKING:
5928 *	Inherited from calling layer (may sleep).
5929 */
5930struct ata_port *ata_port_alloc(struct ata_host *host)
5931{
5932	struct ata_port *ap;
5933	unsigned int i;
5934
5935	DPRINTK("ENTER\n");
5936
5937	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5938	if (!ap)
5939		return NULL;
5940
5941	ap->pflags |= ATA_PFLAG_INITIALIZING;
5942	ap->lock = &host->lock;
5943	ap->flags = ATA_FLAG_DISABLED;
5944	ap->print_id = -1;
5945	ap->ctl = ATA_DEVCTL_OBS;
5946	ap->host = host;
5947	ap->dev = host->dev;
5948
5949	ap->hw_sata_spd_limit = UINT_MAX;
5950	ap->active_tag = ATA_TAG_POISON;
5951	ap->last_ctl = 0xFF;
5952
5953#if defined(ATA_VERBOSE_DEBUG)
5954	/* turn on all debugging levels */
5955	ap->msg_enable = 0x00FF;
5956#elif defined(ATA_DEBUG)
5957	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5958#else
5959	ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5960#endif
5961
5962	INIT_DELAYED_WORK(&ap->port_task, NULL);
5963	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5964	INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5965	INIT_LIST_HEAD(&ap->eh_done_q);
5966	init_waitqueue_head(&ap->eh_wait_q);
5967
5968	ap->cbl = ATA_CBL_NONE;
5969
5970	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5971		struct ata_device *dev = &ap->device[i];
5972		dev->ap = ap;
5973		dev->devno = i;
5974		ata_dev_init(dev);
5975	}
5976
5977#ifdef ATA_IRQ_TRAP
5978	ap->stats.unhandled_irq = 1;
5979	ap->stats.idle_irq = 1;
5980#endif
5981	return ap;
5982}
5983
5984static void ata_host_release(struct device *gendev, void *res)
5985{
5986	struct ata_host *host = dev_get_drvdata(gendev);
5987	int i;
5988
5989	for (i = 0; i < host->n_ports; i++) {
5990		struct ata_port *ap = host->ports[i];
5991
5992		if (!ap)
5993			continue;
5994
5995		if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
5996			ap->ops->port_stop(ap);
5997	}
5998
5999	if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6000		host->ops->host_stop(host);
6001
6002	for (i = 0; i < host->n_ports; i++) {
6003		struct ata_port *ap = host->ports[i];
6004
6005		if (!ap)
6006			continue;
6007
6008		if (ap->scsi_host)
6009			scsi_host_put(ap->scsi_host);
6010
6011		kfree(ap);
6012		host->ports[i] = NULL;
6013	}
6014
6015	dev_set_drvdata(gendev, NULL);
6016}
6017
6018/**
6019 *	ata_host_alloc - allocate and init basic ATA host resources
6020 *	@dev: generic device this host is associated with
6021 *	@max_ports: maximum number of ATA ports associated with this host
6022 *
6023 *	Allocate and initialize basic ATA host resources.  LLD calls
6024 *	this function to allocate a host, initializes it fully and
6025 *	attaches it using ata_host_register().
6026 *
6027 *	@max_ports ports are allocated and host->n_ports is
6028 *	initialized to @max_ports.  The caller is allowed to decrease
6029 *	host->n_ports before calling ata_host_register().  The unused
6030 *	ports will be automatically freed on registration.
6031 *
6032 *	RETURNS:
6033 *	Allocate ATA host on success, NULL on failure.
6034 *
6035 *	LOCKING:
6036 *	Inherited from calling layer (may sleep).
6037 */
6038struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6039{
6040	struct ata_host *host;
6041	size_t sz;
6042	int i;
6043
6044	DPRINTK("ENTER\n");
6045
6046	if (!devres_open_group(dev, NULL, GFP_KERNEL))
6047		return NULL;
6048
6049	/* alloc a container for our list of ATA ports (buses) */
6050	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6051	/* alloc a container for our list of ATA ports (buses) */
6052	host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6053	if (!host)
6054		goto err_out;
6055
6056	devres_add(dev, host);
6057	dev_set_drvdata(dev, host);
6058
6059	spin_lock_init(&host->lock);
6060	host->dev = dev;
6061	host->n_ports = max_ports;
6062
6063	/* allocate ports bound to this host */
6064	for (i = 0; i < max_ports; i++) {
6065		struct ata_port *ap;
6066
6067		ap = ata_port_alloc(host);
6068		if (!ap)
6069			goto err_out;
6070
6071		ap->port_no = i;
6072		host->ports[i] = ap;
6073	}
6074
6075	devres_remove_group(dev, NULL);
6076	return host;
6077
6078 err_out:
6079	devres_release_group(dev, NULL);
6080	return NULL;
6081}
6082
6083/**
6084 *	ata_host_alloc_pinfo - alloc host and init with port_info array
6085 *	@dev: generic device this host is associated with
6086 *	@ppi: array of ATA port_info to initialize host with
6087 *	@n_ports: number of ATA ports attached to this host
6088 *
6089 *	Allocate ATA host and initialize with info from @ppi.  If NULL
6090 *	terminated, @ppi may contain fewer entries than @n_ports.  The
6091 *	last entry will be used for the remaining ports.
6092 *
6093 *	RETURNS:
6094 *	Allocate ATA host on success, NULL on failure.
6095 *
6096 *	LOCKING:
6097 *	Inherited from calling layer (may sleep).
6098 */
6099struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6100				      const struct ata_port_info * const * ppi,
6101				      int n_ports)
6102{
6103	const struct ata_port_info *pi;
6104	struct ata_host *host;
6105	int i, j;
6106
6107	host = ata_host_alloc(dev, n_ports);
6108	if (!host)
6109		return NULL;
6110
6111	for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6112		struct ata_port *ap = host->ports[i];
6113
6114		if (ppi[j])
6115			pi = ppi[j++];
6116
6117		ap->pio_mask = pi->pio_mask;
6118		ap->mwdma_mask = pi->mwdma_mask;
6119		ap->udma_mask = pi->udma_mask;
6120		ap->flags |= pi->flags;
6121		ap->ops = pi->port_ops;
6122
6123		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6124			host->ops = pi->port_ops;
6125		if (!host->private_data && pi->private_data)
6126			host->private_data = pi->private_data;
6127	}
6128
6129	return host;
6130}
6131
6132/**
6133 *	ata_host_start - start and freeze ports of an ATA host
6134 *	@host: ATA host to start ports for
6135 *
6136 *	Start and then freeze ports of @host.  Started status is
6137 *	recorded in host->flags, so this function can be called
6138 *	multiple times.  Ports are guaranteed to get started only
6139 *	once.  If host->ops isn't initialized yet, its set to the
6140 *	first non-dummy port ops.
6141 *
6142 *	LOCKING:
6143 *	Inherited from calling layer (may sleep).
6144 *
6145 *	RETURNS:
6146 *	0 if all ports are started successfully, -errno otherwise.
6147 */
6148int ata_host_start(struct ata_host *host)
6149{
6150	int i, rc;
6151
6152	if (host->flags & ATA_HOST_STARTED)
6153		return 0;
6154
6155	for (i = 0; i < host->n_ports; i++) {
6156		struct ata_port *ap = host->ports[i];
6157
6158		if (!host->ops && !ata_port_is_dummy(ap))
6159			host->ops = ap->ops;
6160
6161		if (ap->ops->port_start) {
6162			rc = ap->ops->port_start(ap);
6163			if (rc) {
6164				ata_port_printk(ap, KERN_ERR, "failed to "
6165						"start port (errno=%d)\n", rc);
6166				goto err_out;
6167			}
6168		}
6169
6170		ata_eh_freeze_port(ap);
6171	}
6172
6173	host->flags |= ATA_HOST_STARTED;
6174	return 0;
6175
6176 err_out:
6177	while (--i >= 0) {
6178		struct ata_port *ap = host->ports[i];
6179
6180		if (ap->ops->port_stop)
6181			ap->ops->port_stop(ap);
6182	}
6183	return rc;
6184}
6185
6186/**
6187 *	ata_sas_host_init - Initialize a host struct
6188 *	@host:	host to initialize
6189 *	@dev:	device host is attached to
6190 *	@flags:	host flags
6191 *	@ops:	port_ops
6192 *
6193 *	LOCKING:
6194 *	PCI/etc. bus probe sem.
6195 *
6196 */
6197/* KILLME - the only user left is ipr */
6198void ata_host_init(struct ata_host *host, struct device *dev,
6199		   unsigned long flags, const struct ata_port_operations *ops)
6200{
6201	spin_lock_init(&host->lock);
6202	host->dev = dev;
6203	host->flags = flags;
6204	host->ops = ops;
6205}
6206
6207/**
6208 *	ata_host_register - register initialized ATA host
6209 *	@host: ATA host to register
6210 *	@sht: template for SCSI host
6211 *
6212 *	Register initialized ATA host.  @host is allocated using
6213 *	ata_host_alloc() and fully initialized by LLD.  This function
6214 *	starts ports, registers @host with ATA and SCSI layers and
6215 *	probe registered devices.
6216 *
6217 *	LOCKING:
6218 *	Inherited from calling layer (may sleep).
6219 *
6220 *	RETURNS:
6221 *	0 on success, -errno otherwise.
6222 */
6223int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6224{
6225	int i, rc;
6226
6227	/* host must have been started */
6228	if (!(host->flags & ATA_HOST_STARTED)) {
6229		dev_printk(KERN_ERR, host->dev,
6230			   "BUG: trying to register unstarted host\n");
6231		WARN_ON(1);
6232		return -EINVAL;
6233	}
6234
6235	/* Blow away unused ports.  This happens when LLD can't
6236	 * determine the exact number of ports to allocate at
6237	 * allocation time.
6238	 */
6239	for (i = host->n_ports; host->ports[i]; i++)
6240		kfree(host->ports[i]);
6241
6242	/* give ports names and add SCSI hosts */
6243	for (i = 0; i < host->n_ports; i++)
6244		host->ports[i]->print_id = ata_print_id++;
6245
6246	rc = ata_scsi_add_hosts(host, sht);
6247	if (rc)
6248		return rc;
6249
6250	/* set cable, sata_spd_limit and report */
6251	for (i = 0; i < host->n_ports; i++) {
6252		struct ata_port *ap = host->ports[i];
6253		int irq_line;
6254		u32 scontrol;
6255		unsigned long xfer_mask;
6256
6257		/* set SATA cable type if still unset */
6258		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6259			ap->cbl = ATA_CBL_SATA;
6260
6261		/* init sata_spd_limit to the current value */
6262		if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6263			int spd = (scontrol >> 4) & 0xf;
6264			if (spd)
6265				ap->hw_sata_spd_limit &= (1 << spd) - 1;
6266		}
6267		ap->sata_spd_limit = ap->hw_sata_spd_limit;
6268
6269		/* report the secondary IRQ for second channel legacy */
6270		irq_line = host->irq;
6271		if (i == 1 && host->irq2)
6272			irq_line = host->irq2;
6273
6274		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6275					      ap->udma_mask);
6276
6277		/* print per-port info to dmesg */
6278		if (!ata_port_is_dummy(ap))
6279			ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6280					"ctl 0x%p bmdma 0x%p irq %d\n",
6281					ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
6282					ata_mode_string(xfer_mask),
6283					ap->ioaddr.cmd_addr,
6284					ap->ioaddr.ctl_addr,
6285					ap->ioaddr.bmdma_addr,
6286					irq_line);
6287		else
6288			ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6289	}
6290
6291	/* perform each probe synchronously */
6292	DPRINTK("probe begin\n");
6293	for (i = 0; i < host->n_ports; i++) {
6294		struct ata_port *ap = host->ports[i];
6295		int rc;
6296
6297		/* probe */
6298		if (ap->ops->error_handler) {
6299			struct ata_eh_info *ehi = &ap->eh_info;
6300			unsigned long flags;
6301
6302			ata_port_probe(ap);
6303
6304			/* kick EH for boot probing */
6305			spin_lock_irqsave(ap->lock, flags);
6306
6307			ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6308			ehi->action |= ATA_EH_SOFTRESET;
6309			ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6310
6311			ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6312			ap->pflags |= ATA_PFLAG_LOADING;
6313			ata_port_schedule_eh(ap);
6314
6315			spin_unlock_irqrestore(ap->lock, flags);
6316
6317			/* wait for EH to finish */
6318			ata_port_wait_eh(ap);
6319		} else {
6320			DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6321			rc = ata_bus_probe(ap);
6322			DPRINTK("ata%u: bus probe end\n", ap->print_id);
6323
6324			if (rc) {
6325			}
6326		}
6327	}
6328
6329	/* probes are done, now scan each port's disk(s) */
6330	DPRINTK("host probe begin\n");
6331	for (i = 0; i < host->n_ports; i++) {
6332		struct ata_port *ap = host->ports[i];
6333
6334		ata_scsi_scan_host(ap);
6335	}
6336
6337	return 0;
6338}
6339
6340/**
6341 *	ata_host_activate - start host, request IRQ and register it
6342 *	@host: target ATA host
6343 *	@irq: IRQ to request
6344 *	@irq_handler: irq_handler used when requesting IRQ
6345 *	@irq_flags: irq_flags used when requesting IRQ
6346 *	@sht: scsi_host_template to use when registering the host
6347 *
6348 *	After allocating an ATA host and initializing it, most libata
6349 *	LLDs perform three steps to activate the host - start host,
6350 *	request IRQ and register it.  This helper takes necessasry
6351 *	arguments and performs the three steps in one go.
6352 *
6353 *	LOCKING:
6354 *	Inherited from calling layer (may sleep).
6355 *
6356 *	RETURNS:
6357 *	0 on success, -errno otherwise.
6358 */
6359int ata_host_activate(struct ata_host *host, int irq,
6360		      irq_handler_t irq_handler, unsigned long irq_flags,
6361		      struct scsi_host_template *sht)
6362{
6363	int rc;
6364
6365	rc = ata_host_start(host);
6366	if (rc)
6367		return rc;
6368
6369	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6370			      dev_driver_string(host->dev), host);
6371	if (rc)
6372		return rc;
6373
6374	/* Used to print device info at probe */
6375	host->irq = irq;
6376
6377	rc = ata_host_register(host, sht);
6378	/* if failed, just free the IRQ and leave ports alone */
6379	if (rc)
6380		devm_free_irq(host->dev, irq, host);
6381
6382	return rc;
6383}
6384
6385/**
6386 *	ata_port_detach - Detach ATA port in prepration of device removal
6387 *	@ap: ATA port to be detached
6388 *
6389 *	Detach all ATA devices and the associated SCSI devices of @ap;
6390 *	then, remove the associated SCSI host.  @ap is guaranteed to
6391 *	be quiescent on return from this function.
6392 *
6393 *	LOCKING:
6394 *	Kernel thread context (may sleep).
6395 */
6396void ata_port_detach(struct ata_port *ap)
6397{
6398	unsigned long flags;
6399	int i;
6400
6401	if (!ap->ops->error_handler)
6402		goto skip_eh;
6403
6404	/* tell EH we're leaving & flush EH */
6405	spin_lock_irqsave(ap->lock, flags);
6406	ap->pflags |= ATA_PFLAG_UNLOADING;
6407	spin_unlock_irqrestore(ap->lock, flags);
6408
6409	ata_port_wait_eh(ap);
6410
6411	/* EH is now guaranteed to see UNLOADING, so no new device
6412	 * will be attached.  Disable all existing devices.
6413	 */
6414	spin_lock_irqsave(ap->lock, flags);
6415
6416	for (i = 0; i < ATA_MAX_DEVICES; i++)
6417		ata_dev_disable(&ap->device[i]);
6418
6419	spin_unlock_irqrestore(ap->lock, flags);
6420
6421	/* Final freeze & EH.  All in-flight commands are aborted.  EH
6422	 * will be skipped and retrials will be terminated with bad
6423	 * target.
6424	 */
6425	spin_lock_irqsave(ap->lock, flags);
6426	ata_port_freeze(ap);	/* won't be thawed */
6427	spin_unlock_irqrestore(ap->lock, flags);
6428
6429	ata_port_wait_eh(ap);
6430
6431	/* Flush hotplug task.  The sequence is similar to
6432	 * ata_port_flush_task().
6433	 */
6434	cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
6435	cancel_delayed_work(&ap->hotplug_task);
6436	cancel_work_sync(&ap->hotplug_task.work);
6437
6438 skip_eh:
6439	/* remove the associated SCSI host */
6440	scsi_remove_host(ap->scsi_host);
6441}
6442
6443/**
6444 *	ata_host_detach - Detach all ports of an ATA host
6445 *	@host: Host to detach
6446 *
6447 *	Detach all ports of @host.
6448 *
6449 *	LOCKING:
6450 *	Kernel thread context (may sleep).
6451 */
6452void ata_host_detach(struct ata_host *host)
6453{
6454	int i;
6455
6456	for (i = 0; i < host->n_ports; i++)
6457		ata_port_detach(host->ports[i]);
6458}
6459
6460/**
6461 *	ata_std_ports - initialize ioaddr with standard port offsets.
6462 *	@ioaddr: IO address structure to be initialized
6463 *
6464 *	Utility function which initializes data_addr, error_addr,
6465 *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6466 *	device_addr, status_addr, and command_addr to standard offsets
6467 *	relative to cmd_addr.
6468 *
6469 *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6470 */
6471
6472void ata_std_ports(struct ata_ioports *ioaddr)
6473{
6474	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6475	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6476	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6477	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6478	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6479	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6480	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6481	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6482	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6483	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6484}
6485
6486
6487#ifdef CONFIG_PCI
6488
6489/**
6490 *	ata_pci_remove_one - PCI layer callback for device removal
6491 *	@pdev: PCI device that was removed
6492 *
6493 *	PCI layer indicates to libata via this hook that hot-unplug or
6494 *	module unload event has occurred.  Detach all ports.  Resource
6495 *	release is handled via devres.
6496 *
6497 *	LOCKING:
6498 *	Inherited from PCI layer (may sleep).
6499 */
6500void ata_pci_remove_one(struct pci_dev *pdev)
6501{
6502	struct device *dev = pci_dev_to_dev(pdev);
6503	struct ata_host *host = dev_get_drvdata(dev);
6504
6505	ata_host_detach(host);
6506}
6507
6508/* move to PCI subsystem */
6509int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6510{
6511	unsigned long tmp = 0;
6512
6513	switch (bits->width) {
6514	case 1: {
6515		u8 tmp8 = 0;
6516		pci_read_config_byte(pdev, bits->reg, &tmp8);
6517		tmp = tmp8;
6518		break;
6519	}
6520	case 2: {
6521		u16 tmp16 = 0;
6522		pci_read_config_word(pdev, bits->reg, &tmp16);
6523		tmp = tmp16;
6524		break;
6525	}
6526	case 4: {
6527		u32 tmp32 = 0;
6528		pci_read_config_dword(pdev, bits->reg, &tmp32);
6529		tmp = tmp32;
6530		break;
6531	}
6532
6533	default:
6534		return -EINVAL;
6535	}
6536
6537	tmp &= bits->mask;
6538
6539	return (tmp == bits->val) ? 1 : 0;
6540}
6541
6542#ifdef CONFIG_PM
6543void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6544{
6545	pci_save_state(pdev);
6546	pci_disable_device(pdev);
6547
6548	if (mesg.event == PM_EVENT_SUSPEND)
6549		pci_set_power_state(pdev, PCI_D3hot);
6550}
6551
6552int ata_pci_device_do_resume(struct pci_dev *pdev)
6553{
6554	int rc;
6555
6556	pci_set_power_state(pdev, PCI_D0);
6557	pci_restore_state(pdev);
6558
6559	rc = pcim_enable_device(pdev);
6560	if (rc) {
6561		dev_printk(KERN_ERR, &pdev->dev,
6562			   "failed to enable device after resume (%d)\n", rc);
6563		return rc;
6564	}
6565
6566	pci_set_master(pdev);
6567	return 0;
6568}
6569
6570int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6571{
6572	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6573	int rc = 0;
6574
6575	rc = ata_host_suspend(host, mesg);
6576	if (rc)
6577		return rc;
6578
6579	ata_pci_device_do_suspend(pdev, mesg);
6580
6581	return 0;
6582}
6583
6584int ata_pci_device_resume(struct pci_dev *pdev)
6585{
6586	struct ata_host *host = dev_get_drvdata(&pdev->dev);
6587	int rc;
6588
6589	rc = ata_pci_device_do_resume(pdev);
6590	if (rc == 0)
6591		ata_host_resume(host);
6592	return rc;
6593}
6594#endif /* CONFIG_PM */
6595
6596#endif /* CONFIG_PCI */
6597
6598
6599static int __init ata_init(void)
6600{
6601	ata_probe_timeout *= HZ;
6602	ata_wq = create_workqueue("ata");
6603	if (!ata_wq)
6604		return -ENOMEM;
6605
6606	ata_aux_wq = create_singlethread_workqueue("ata_aux");
6607	if (!ata_aux_wq) {
6608		destroy_workqueue(ata_wq);
6609		return -ENOMEM;
6610	}
6611
6612	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6613	return 0;
6614}
6615
6616static void __exit ata_exit(void)
6617{
6618	destroy_workqueue(ata_wq);
6619	destroy_workqueue(ata_aux_wq);
6620}
6621
6622subsys_initcall(ata_init);
6623module_exit(ata_exit);
6624
6625static unsigned long ratelimit_time;
6626static DEFINE_SPINLOCK(ata_ratelimit_lock);
6627
6628int ata_ratelimit(void)
6629{
6630	int rc;
6631	unsigned long flags;
6632
6633	spin_lock_irqsave(&ata_ratelimit_lock, flags);
6634
6635	if (time_after(jiffies, ratelimit_time)) {
6636		rc = 1;
6637		ratelimit_time = jiffies + (HZ/5);
6638	} else
6639		rc = 0;
6640
6641	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6642
6643	return rc;
6644}
6645
6646/**
6647 *	ata_wait_register - wait until register value changes
6648 *	@reg: IO-mapped register
6649 *	@mask: Mask to apply to read register value
6650 *	@val: Wait condition
6651 *	@interval_msec: polling interval in milliseconds
6652 *	@timeout_msec: timeout in milliseconds
6653 *
6654 *	Waiting for some bits of register to change is a common
6655 *	operation for ATA controllers.  This function reads 32bit LE
6656 *	IO-mapped register @reg and tests for the following condition.
6657 *
6658 *	(*@reg & mask) != val
6659 *
6660 *	If the condition is met, it returns; otherwise, the process is
6661 *	repeated after @interval_msec until timeout.
6662 *
6663 *	LOCKING:
6664 *	Kernel thread context (may sleep)
6665 *
6666 *	RETURNS:
6667 *	The final register value.
6668 */
6669u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6670		      unsigned long interval_msec,
6671		      unsigned long timeout_msec)
6672{
6673	unsigned long timeout;
6674	u32 tmp;
6675
6676	tmp = ioread32(reg);
6677
6678	/* Calculate timeout _after_ the first read to make sure
6679	 * preceding writes reach the controller before starting to
6680	 * eat away the timeout.
6681	 */
6682	timeout = jiffies + (timeout_msec * HZ) / 1000;
6683
6684	while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6685		msleep(interval_msec);
6686		tmp = ioread32(reg);
6687	}
6688
6689	return tmp;
6690}
6691
6692/*
6693 * Dummy port_ops
6694 */
6695static void ata_dummy_noret(struct ata_port *ap)	{ }
6696static int ata_dummy_ret0(struct ata_port *ap)		{ return 0; }
6697static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6698
6699static u8 ata_dummy_check_status(struct ata_port *ap)
6700{
6701	return ATA_DRDY;
6702}
6703
6704static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6705{
6706	return AC_ERR_SYSTEM;
6707}
6708
6709const struct ata_port_operations ata_dummy_port_ops = {
6710	.port_disable		= ata_port_disable,
6711	.check_status		= ata_dummy_check_status,
6712	.check_altstatus	= ata_dummy_check_status,
6713	.dev_select		= ata_noop_dev_select,
6714	.qc_prep		= ata_noop_qc_prep,
6715	.qc_issue		= ata_dummy_qc_issue,
6716	.freeze			= ata_dummy_noret,
6717	.thaw			= ata_dummy_noret,
6718	.error_handler		= ata_dummy_noret,
6719	.post_internal_cmd	= ata_dummy_qc_noret,
6720	.irq_clear		= ata_dummy_noret,
6721	.port_start		= ata_dummy_ret0,
6722	.port_stop		= ata_dummy_noret,
6723};
6724
6725const struct ata_port_info ata_dummy_port_info = {
6726	.port_ops		= &ata_dummy_port_ops,
6727};
6728
6729/*
6730 * libata is essentially a library of internal helper functions for
6731 * low-level ATA host controller drivers.  As such, the API/ABI is
6732 * likely to change as new drivers are added and updated.
6733 * Do not depend on ABI/API stability.
6734 */
6735
6736EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6737EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6738EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6739EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6740EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6741EXPORT_SYMBOL_GPL(ata_std_bios_param);
6742EXPORT_SYMBOL_GPL(ata_std_ports);
6743EXPORT_SYMBOL_GPL(ata_host_init);
6744EXPORT_SYMBOL_GPL(ata_host_alloc);
6745EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6746EXPORT_SYMBOL_GPL(ata_host_start);
6747EXPORT_SYMBOL_GPL(ata_host_register);
6748EXPORT_SYMBOL_GPL(ata_host_activate);
6749EXPORT_SYMBOL_GPL(ata_host_detach);
6750EXPORT_SYMBOL_GPL(ata_sg_init);
6751EXPORT_SYMBOL_GPL(ata_sg_init_one);
6752EXPORT_SYMBOL_GPL(ata_hsm_move);
6753EXPORT_SYMBOL_GPL(ata_qc_complete);
6754EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6755EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6756EXPORT_SYMBOL_GPL(ata_tf_load);
6757EXPORT_SYMBOL_GPL(ata_tf_read);
6758EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6759EXPORT_SYMBOL_GPL(ata_std_dev_select);
6760EXPORT_SYMBOL_GPL(sata_print_link_status);
6761EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6762EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6763EXPORT_SYMBOL_GPL(ata_check_status);
6764EXPORT_SYMBOL_GPL(ata_altstatus);
6765EXPORT_SYMBOL_GPL(ata_exec_command);
6766EXPORT_SYMBOL_GPL(ata_port_start);
6767EXPORT_SYMBOL_GPL(ata_sff_port_start);
6768EXPORT_SYMBOL_GPL(ata_interrupt);
6769EXPORT_SYMBOL_GPL(ata_do_set_mode);
6770EXPORT_SYMBOL_GPL(ata_data_xfer);
6771EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6772EXPORT_SYMBOL_GPL(ata_qc_prep);
6773EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6774EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6775EXPORT_SYMBOL_GPL(ata_bmdma_start);
6776EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6777EXPORT_SYMBOL_GPL(ata_bmdma_status);
6778EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6779EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6780EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6781EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6782EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6783EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6784EXPORT_SYMBOL_GPL(ata_port_probe);
6785EXPORT_SYMBOL_GPL(ata_dev_disable);
6786EXPORT_SYMBOL_GPL(sata_set_spd);
6787EXPORT_SYMBOL_GPL(sata_phy_debounce);
6788EXPORT_SYMBOL_GPL(sata_phy_resume);
6789EXPORT_SYMBOL_GPL(sata_phy_reset);
6790EXPORT_SYMBOL_GPL(__sata_phy_reset);
6791EXPORT_SYMBOL_GPL(ata_bus_reset);
6792EXPORT_SYMBOL_GPL(ata_std_prereset);
6793EXPORT_SYMBOL_GPL(ata_std_softreset);
6794EXPORT_SYMBOL_GPL(sata_port_hardreset);
6795EXPORT_SYMBOL_GPL(sata_std_hardreset);
6796EXPORT_SYMBOL_GPL(ata_std_postreset);
6797EXPORT_SYMBOL_GPL(ata_dev_classify);
6798EXPORT_SYMBOL_GPL(ata_dev_pair);
6799EXPORT_SYMBOL_GPL(ata_port_disable);
6800EXPORT_SYMBOL_GPL(ata_ratelimit);
6801EXPORT_SYMBOL_GPL(ata_wait_register);
6802EXPORT_SYMBOL_GPL(ata_busy_sleep);
6803EXPORT_SYMBOL_GPL(ata_wait_ready);
6804EXPORT_SYMBOL_GPL(ata_port_queue_task);
6805EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6806EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6807EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6808EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6809EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6810EXPORT_SYMBOL_GPL(ata_host_intr);
6811EXPORT_SYMBOL_GPL(sata_scr_valid);
6812EXPORT_SYMBOL_GPL(sata_scr_read);
6813EXPORT_SYMBOL_GPL(sata_scr_write);
6814EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6815EXPORT_SYMBOL_GPL(ata_port_online);
6816EXPORT_SYMBOL_GPL(ata_port_offline);
6817#ifdef CONFIG_PM
6818EXPORT_SYMBOL_GPL(ata_host_suspend);
6819EXPORT_SYMBOL_GPL(ata_host_resume);
6820#endif /* CONFIG_PM */
6821EXPORT_SYMBOL_GPL(ata_id_string);
6822EXPORT_SYMBOL_GPL(ata_id_c_string);
6823EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6824EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6825EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6826
6827EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6828EXPORT_SYMBOL_GPL(ata_timing_compute);
6829EXPORT_SYMBOL_GPL(ata_timing_merge);
6830
6831#ifdef CONFIG_PCI
6832EXPORT_SYMBOL_GPL(pci_test_config_bits);
6833EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
6834EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
6835EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
6836EXPORT_SYMBOL_GPL(ata_pci_init_one);
6837EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6838#ifdef CONFIG_PM
6839EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6840EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6841EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6842EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6843#endif /* CONFIG_PM */
6844EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6845EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6846#endif /* CONFIG_PCI */
6847
6848EXPORT_SYMBOL_GPL(ata_eng_timeout);
6849EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6850EXPORT_SYMBOL_GPL(ata_port_abort);
6851EXPORT_SYMBOL_GPL(ata_port_freeze);
6852EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6853EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6854EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6855EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6856EXPORT_SYMBOL_GPL(ata_do_eh);
6857EXPORT_SYMBOL_GPL(ata_irq_on);
6858EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6859EXPORT_SYMBOL_GPL(ata_irq_ack);
6860EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
6861EXPORT_SYMBOL_GPL(ata_dev_try_classify);
6862
6863EXPORT_SYMBOL_GPL(ata_cable_40wire);
6864EXPORT_SYMBOL_GPL(ata_cable_80wire);
6865EXPORT_SYMBOL_GPL(ata_cable_unknown);
6866EXPORT_SYMBOL_GPL(ata_cable_sata);
6867