1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *  libata-core.c - helper library for ATA
4 *
5 *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
6 *  Copyright 2003-2004 Jeff Garzik
7 *
8 *  libata documentation is available via 'make {ps|pdf}docs',
9 *  as Documentation/driver-api/libata.rst
10 *
11 *  Hardware documentation available from http://www.t13.org/ and
12 *  http://www.sata-io.org/
13 *
14 *  Standards documents from:
15 *	http://www.t13.org (ATA standards, PCI DMA IDE spec)
16 *	http://www.t10.org (SCSI MMC - for ATAPI MMC)
17 *	http://www.sata-io.org (SATA)
18 *	http://www.compactflash.org (CF)
19 *	http://www.qic.org (QIC157 - Tape and DSC)
20 *	http://www.ce-ata.org (CE-ATA: not supported)
21 *
22 * libata is essentially a library of internal helper functions for
23 * low-level ATA host controller drivers.  As such, the API/ABI is
24 * likely to change as new drivers are added and updated.
25 * Do not depend on ABI/API stability.
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/list.h>
33#include <linux/mm.h>
34#include <linux/spinlock.h>
35#include <linux/blkdev.h>
36#include <linux/delay.h>
37#include <linux/timer.h>
38#include <linux/time.h>
39#include <linux/interrupt.h>
40#include <linux/completion.h>
41#include <linux/suspend.h>
42#include <linux/workqueue.h>
43#include <linux/scatterlist.h>
44#include <linux/io.h>
45#include <linux/log2.h>
46#include <linux/slab.h>
47#include <linux/glob.h>
48#include <scsi/scsi.h>
49#include <scsi/scsi_cmnd.h>
50#include <scsi/scsi_host.h>
51#include <linux/libata.h>
52#include <asm/byteorder.h>
53#include <asm/unaligned.h>
54#include <linux/cdrom.h>
55#include <linux/ratelimit.h>
56#include <linux/leds.h>
57#include <linux/pm_runtime.h>
58#include <linux/platform_device.h>
59#include <asm/setup.h>
60
61#define CREATE_TRACE_POINTS
62#include <trace/events/libata.h>
63
64#include "libata.h"
65#include "libata-transport.h"
66
67const struct ata_port_operations ata_base_port_ops = {
68	.prereset		= ata_std_prereset,
69	.postreset		= ata_std_postreset,
70	.error_handler		= ata_std_error_handler,
71	.sched_eh		= ata_std_sched_eh,
72	.end_eh			= ata_std_end_eh,
73};
74
75const struct ata_port_operations sata_port_ops = {
76	.inherits		= &ata_base_port_ops,
77
78	.qc_defer		= ata_std_qc_defer,
79	.hardreset		= sata_std_hardreset,
80};
81EXPORT_SYMBOL_GPL(sata_port_ops);
82
83static unsigned int ata_dev_init_params(struct ata_device *dev,
84					u16 heads, u16 sectors);
85static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
86static void ata_dev_xfermask(struct ata_device *dev);
87static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
88
89atomic_t ata_print_id = ATOMIC_INIT(0);
90
91#ifdef CONFIG_ATA_FORCE
92struct ata_force_param {
93	const char	*name;
94	u8		cbl;
95	u8		spd_limit;
96	unsigned int	xfer_mask;
97	unsigned int	horkage_on;
98	unsigned int	horkage_off;
99	u16		lflags_on;
100	u16		lflags_off;
101};
102
103struct ata_force_ent {
104	int			port;
105	int			device;
106	struct ata_force_param	param;
107};
108
109static struct ata_force_ent *ata_force_tbl;
110static int ata_force_tbl_size;
111
112static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
113/* param_buf is thrown away after initialization, disallow read */
114module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
115MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
116#endif
117
118static int atapi_enabled = 1;
119module_param(atapi_enabled, int, 0444);
120MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
121
122static int atapi_dmadir = 0;
123module_param(atapi_dmadir, int, 0444);
124MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
125
126int atapi_passthru16 = 1;
127module_param(atapi_passthru16, int, 0444);
128MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
129
130int libata_fua = 0;
131module_param_named(fua, libata_fua, int, 0444);
132MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
133
134static int ata_ignore_hpa;
135module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
136MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
137
138static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
139module_param_named(dma, libata_dma_mask, int, 0444);
140MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
141
142static int ata_probe_timeout;
143module_param(ata_probe_timeout, int, 0444);
144MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
145
146int libata_noacpi = 0;
147module_param_named(noacpi, libata_noacpi, int, 0444);
148MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
149
150int libata_allow_tpm = 0;
151module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
152MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
153
154static int atapi_an;
155module_param(atapi_an, int, 0444);
156MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
157
158MODULE_AUTHOR("Jeff Garzik");
159MODULE_DESCRIPTION("Library module for ATA devices");
160MODULE_LICENSE("GPL");
161MODULE_VERSION(DRV_VERSION);
162
163static inline bool ata_dev_print_info(struct ata_device *dev)
164{
165	struct ata_eh_context *ehc = &dev->link->eh_context;
166
167	return ehc->i.flags & ATA_EHI_PRINTINFO;
168}
169
170static bool ata_sstatus_online(u32 sstatus)
171{
172	return (sstatus & 0xf) == 0x3;
173}
174
175/**
176 *	ata_link_next - link iteration helper
177 *	@link: the previous link, NULL to start
178 *	@ap: ATA port containing links to iterate
179 *	@mode: iteration mode, one of ATA_LITER_*
180 *
181 *	LOCKING:
182 *	Host lock or EH context.
183 *
184 *	RETURNS:
185 *	Pointer to the next link.
186 */
187struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
188			       enum ata_link_iter_mode mode)
189{
190	BUG_ON(mode != ATA_LITER_EDGE &&
191	       mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
192
193	/* NULL link indicates start of iteration */
194	if (!link)
195		switch (mode) {
196		case ATA_LITER_EDGE:
197		case ATA_LITER_PMP_FIRST:
198			if (sata_pmp_attached(ap))
199				return ap->pmp_link;
200			fallthrough;
201		case ATA_LITER_HOST_FIRST:
202			return &ap->link;
203		}
204
205	/* we just iterated over the host link, what's next? */
206	if (link == &ap->link)
207		switch (mode) {
208		case ATA_LITER_HOST_FIRST:
209			if (sata_pmp_attached(ap))
210				return ap->pmp_link;
211			fallthrough;
212		case ATA_LITER_PMP_FIRST:
213			if (unlikely(ap->slave_link))
214				return ap->slave_link;
215			fallthrough;
216		case ATA_LITER_EDGE:
217			return NULL;
218		}
219
220	/* slave_link excludes PMP */
221	if (unlikely(link == ap->slave_link))
222		return NULL;
223
224	/* we were over a PMP link */
225	if (++link < ap->pmp_link + ap->nr_pmp_links)
226		return link;
227
228	if (mode == ATA_LITER_PMP_FIRST)
229		return &ap->link;
230
231	return NULL;
232}
233EXPORT_SYMBOL_GPL(ata_link_next);
234
235/**
236 *	ata_dev_next - device iteration helper
237 *	@dev: the previous device, NULL to start
238 *	@link: ATA link containing devices to iterate
239 *	@mode: iteration mode, one of ATA_DITER_*
240 *
241 *	LOCKING:
242 *	Host lock or EH context.
243 *
244 *	RETURNS:
245 *	Pointer to the next device.
246 */
247struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
248				enum ata_dev_iter_mode mode)
249{
250	BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
251	       mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
252
253	/* NULL dev indicates start of iteration */
254	if (!dev)
255		switch (mode) {
256		case ATA_DITER_ENABLED:
257		case ATA_DITER_ALL:
258			dev = link->device;
259			goto check;
260		case ATA_DITER_ENABLED_REVERSE:
261		case ATA_DITER_ALL_REVERSE:
262			dev = link->device + ata_link_max_devices(link) - 1;
263			goto check;
264		}
265
266 next:
267	/* move to the next one */
268	switch (mode) {
269	case ATA_DITER_ENABLED:
270	case ATA_DITER_ALL:
271		if (++dev < link->device + ata_link_max_devices(link))
272			goto check;
273		return NULL;
274	case ATA_DITER_ENABLED_REVERSE:
275	case ATA_DITER_ALL_REVERSE:
276		if (--dev >= link->device)
277			goto check;
278		return NULL;
279	}
280
281 check:
282	if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
283	    !ata_dev_enabled(dev))
284		goto next;
285	return dev;
286}
287EXPORT_SYMBOL_GPL(ata_dev_next);
288
289/**
290 *	ata_dev_phys_link - find physical link for a device
291 *	@dev: ATA device to look up physical link for
292 *
293 *	Look up physical link which @dev is attached to.  Note that
294 *	this is different from @dev->link only when @dev is on slave
295 *	link.  For all other cases, it's the same as @dev->link.
296 *
297 *	LOCKING:
298 *	Don't care.
299 *
300 *	RETURNS:
301 *	Pointer to the found physical link.
302 */
303struct ata_link *ata_dev_phys_link(struct ata_device *dev)
304{
305	struct ata_port *ap = dev->link->ap;
306
307	if (!ap->slave_link)
308		return dev->link;
309	if (!dev->devno)
310		return &ap->link;
311	return ap->slave_link;
312}
313
314#ifdef CONFIG_ATA_FORCE
315/**
316 *	ata_force_cbl - force cable type according to libata.force
317 *	@ap: ATA port of interest
318 *
319 *	Force cable type according to libata.force and whine about it.
320 *	The last entry which has matching port number is used, so it
321 *	can be specified as part of device force parameters.  For
322 *	example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
323 *	same effect.
324 *
325 *	LOCKING:
326 *	EH context.
327 */
328void ata_force_cbl(struct ata_port *ap)
329{
330	int i;
331
332	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
333		const struct ata_force_ent *fe = &ata_force_tbl[i];
334
335		if (fe->port != -1 && fe->port != ap->print_id)
336			continue;
337
338		if (fe->param.cbl == ATA_CBL_NONE)
339			continue;
340
341		ap->cbl = fe->param.cbl;
342		ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
343		return;
344	}
345}
346
347/**
348 *	ata_force_link_limits - force link limits according to libata.force
349 *	@link: ATA link of interest
350 *
351 *	Force link flags and SATA spd limit according to libata.force
352 *	and whine about it.  When only the port part is specified
353 *	(e.g. 1:), the limit applies to all links connected to both
354 *	the host link and all fan-out ports connected via PMP.  If the
355 *	device part is specified as 0 (e.g. 1.00:), it specifies the
356 *	first fan-out link not the host link.  Device number 15 always
357 *	points to the host link whether PMP is attached or not.  If the
358 *	controller has slave link, device number 16 points to it.
359 *
360 *	LOCKING:
361 *	EH context.
362 */
363static void ata_force_link_limits(struct ata_link *link)
364{
365	bool did_spd = false;
366	int linkno = link->pmp;
367	int i;
368
369	if (ata_is_host_link(link))
370		linkno += 15;
371
372	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
373		const struct ata_force_ent *fe = &ata_force_tbl[i];
374
375		if (fe->port != -1 && fe->port != link->ap->print_id)
376			continue;
377
378		if (fe->device != -1 && fe->device != linkno)
379			continue;
380
381		/* only honor the first spd limit */
382		if (!did_spd && fe->param.spd_limit) {
383			link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
384			ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
385					fe->param.name);
386			did_spd = true;
387		}
388
389		/* let lflags stack */
390		if (fe->param.lflags_on) {
391			link->flags |= fe->param.lflags_on;
392			ata_link_notice(link,
393					"FORCE: link flag 0x%x forced -> 0x%x\n",
394					fe->param.lflags_on, link->flags);
395		}
396		if (fe->param.lflags_off) {
397			link->flags &= ~fe->param.lflags_off;
398			ata_link_notice(link,
399				"FORCE: link flag 0x%x cleared -> 0x%x\n",
400				fe->param.lflags_off, link->flags);
401		}
402	}
403}
404
405/**
406 *	ata_force_xfermask - force xfermask according to libata.force
407 *	@dev: ATA device of interest
408 *
409 *	Force xfer_mask according to libata.force and whine about it.
410 *	For consistency with link selection, device number 15 selects
411 *	the first device connected to the host link.
412 *
413 *	LOCKING:
414 *	EH context.
415 */
416static void ata_force_xfermask(struct ata_device *dev)
417{
418	int devno = dev->link->pmp + dev->devno;
419	int alt_devno = devno;
420	int i;
421
422	/* allow n.15/16 for devices attached to host port */
423	if (ata_is_host_link(dev->link))
424		alt_devno += 15;
425
426	for (i = ata_force_tbl_size - 1; i >= 0; i--) {
427		const struct ata_force_ent *fe = &ata_force_tbl[i];
428		unsigned int pio_mask, mwdma_mask, udma_mask;
429
430		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
431			continue;
432
433		if (fe->device != -1 && fe->device != devno &&
434		    fe->device != alt_devno)
435			continue;
436
437		if (!fe->param.xfer_mask)
438			continue;
439
440		ata_unpack_xfermask(fe->param.xfer_mask,
441				    &pio_mask, &mwdma_mask, &udma_mask);
442		if (udma_mask)
443			dev->udma_mask = udma_mask;
444		else if (mwdma_mask) {
445			dev->udma_mask = 0;
446			dev->mwdma_mask = mwdma_mask;
447		} else {
448			dev->udma_mask = 0;
449			dev->mwdma_mask = 0;
450			dev->pio_mask = pio_mask;
451		}
452
453		ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
454			       fe->param.name);
455		return;
456	}
457}
458
459/**
460 *	ata_force_horkage - force horkage according to libata.force
461 *	@dev: ATA device of interest
462 *
463 *	Force horkage according to libata.force and whine about it.
464 *	For consistency with link selection, device number 15 selects
465 *	the first device connected to the host link.
466 *
467 *	LOCKING:
468 *	EH context.
469 */
470static void ata_force_horkage(struct ata_device *dev)
471{
472	int devno = dev->link->pmp + dev->devno;
473	int alt_devno = devno;
474	int i;
475
476	/* allow n.15/16 for devices attached to host port */
477	if (ata_is_host_link(dev->link))
478		alt_devno += 15;
479
480	for (i = 0; i < ata_force_tbl_size; i++) {
481		const struct ata_force_ent *fe = &ata_force_tbl[i];
482
483		if (fe->port != -1 && fe->port != dev->link->ap->print_id)
484			continue;
485
486		if (fe->device != -1 && fe->device != devno &&
487		    fe->device != alt_devno)
488			continue;
489
490		if (!(~dev->horkage & fe->param.horkage_on) &&
491		    !(dev->horkage & fe->param.horkage_off))
492			continue;
493
494		dev->horkage |= fe->param.horkage_on;
495		dev->horkage &= ~fe->param.horkage_off;
496
497		ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
498			       fe->param.name);
499	}
500}
501#else
502static inline void ata_force_link_limits(struct ata_link *link) { }
503static inline void ata_force_xfermask(struct ata_device *dev) { }
504static inline void ata_force_horkage(struct ata_device *dev) { }
505#endif
506
507/**
508 *	atapi_cmd_type - Determine ATAPI command type from SCSI opcode
509 *	@opcode: SCSI opcode
510 *
511 *	Determine ATAPI command type from @opcode.
512 *
513 *	LOCKING:
514 *	None.
515 *
516 *	RETURNS:
517 *	ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
518 */
519int atapi_cmd_type(u8 opcode)
520{
521	switch (opcode) {
522	case GPCMD_READ_10:
523	case GPCMD_READ_12:
524		return ATAPI_READ;
525
526	case GPCMD_WRITE_10:
527	case GPCMD_WRITE_12:
528	case GPCMD_WRITE_AND_VERIFY_10:
529		return ATAPI_WRITE;
530
531	case GPCMD_READ_CD:
532	case GPCMD_READ_CD_MSF:
533		return ATAPI_READ_CD;
534
535	case ATA_16:
536	case ATA_12:
537		if (atapi_passthru16)
538			return ATAPI_PASS_THRU;
539		fallthrough;
540	default:
541		return ATAPI_MISC;
542	}
543}
544EXPORT_SYMBOL_GPL(atapi_cmd_type);
545
546static const u8 ata_rw_cmds[] = {
547	/* pio multi */
548	ATA_CMD_READ_MULTI,
549	ATA_CMD_WRITE_MULTI,
550	ATA_CMD_READ_MULTI_EXT,
551	ATA_CMD_WRITE_MULTI_EXT,
552	0,
553	0,
554	0,
555	0,
556	/* pio */
557	ATA_CMD_PIO_READ,
558	ATA_CMD_PIO_WRITE,
559	ATA_CMD_PIO_READ_EXT,
560	ATA_CMD_PIO_WRITE_EXT,
561	0,
562	0,
563	0,
564	0,
565	/* dma */
566	ATA_CMD_READ,
567	ATA_CMD_WRITE,
568	ATA_CMD_READ_EXT,
569	ATA_CMD_WRITE_EXT,
570	0,
571	0,
572	0,
573	ATA_CMD_WRITE_FUA_EXT
574};
575
576/**
577 *	ata_set_rwcmd_protocol - set taskfile r/w command and protocol
578 *	@dev: target device for the taskfile
579 *	@tf: taskfile to examine and configure
580 *
581 *	Examine the device configuration and tf->flags to determine
582 *	the proper read/write command and protocol to use for @tf.
583 *
584 *	LOCKING:
585 *	caller.
586 */
587static bool ata_set_rwcmd_protocol(struct ata_device *dev,
588				   struct ata_taskfile *tf)
589{
590	u8 cmd;
591
592	int index, fua, lba48, write;
593
594	fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
595	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
596	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
597
598	if (dev->flags & ATA_DFLAG_PIO) {
599		tf->protocol = ATA_PROT_PIO;
600		index = dev->multi_count ? 0 : 8;
601	} else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
602		/* Unable to use DMA due to host limitation */
603		tf->protocol = ATA_PROT_PIO;
604		index = dev->multi_count ? 0 : 8;
605	} else {
606		tf->protocol = ATA_PROT_DMA;
607		index = 16;
608	}
609
610	cmd = ata_rw_cmds[index + fua + lba48 + write];
611	if (!cmd)
612		return false;
613
614	tf->command = cmd;
615
616	return true;
617}
618
619/**
620 *	ata_tf_read_block - Read block address from ATA taskfile
621 *	@tf: ATA taskfile of interest
622 *	@dev: ATA device @tf belongs to
623 *
624 *	LOCKING:
625 *	None.
626 *
627 *	Read block address from @tf.  This function can handle all
628 *	three address formats - LBA, LBA48 and CHS.  tf->protocol and
629 *	flags select the address format to use.
630 *
631 *	RETURNS:
632 *	Block address read from @tf.
633 */
634u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
635{
636	u64 block = 0;
637
638	if (tf->flags & ATA_TFLAG_LBA) {
639		if (tf->flags & ATA_TFLAG_LBA48) {
640			block |= (u64)tf->hob_lbah << 40;
641			block |= (u64)tf->hob_lbam << 32;
642			block |= (u64)tf->hob_lbal << 24;
643		} else
644			block |= (tf->device & 0xf) << 24;
645
646		block |= tf->lbah << 16;
647		block |= tf->lbam << 8;
648		block |= tf->lbal;
649	} else {
650		u32 cyl, head, sect;
651
652		cyl = tf->lbam | (tf->lbah << 8);
653		head = tf->device & 0xf;
654		sect = tf->lbal;
655
656		if (!sect) {
657			ata_dev_warn(dev,
658				     "device reported invalid CHS sector 0\n");
659			return U64_MAX;
660		}
661
662		block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
663	}
664
665	return block;
666}
667
668/*
669 * Set a taskfile command duration limit index.
670 */
671static inline void ata_set_tf_cdl(struct ata_queued_cmd *qc, int cdl)
672{
673	struct ata_taskfile *tf = &qc->tf;
674
675	if (tf->protocol == ATA_PROT_NCQ)
676		tf->auxiliary |= cdl;
677	else
678		tf->feature |= cdl;
679
680	/*
681	 * Mark this command as having a CDL and request the result
682	 * task file so that we can inspect the sense data available
683	 * bit on completion.
684	 */
685	qc->flags |= ATA_QCFLAG_HAS_CDL | ATA_QCFLAG_RESULT_TF;
686}
687
688/**
689 *	ata_build_rw_tf - Build ATA taskfile for given read/write request
690 *	@qc: Metadata associated with the taskfile to build
691 *	@block: Block address
692 *	@n_block: Number of blocks
693 *	@tf_flags: RW/FUA etc...
694 *	@cdl: Command duration limit index
695 *	@class: IO priority class
696 *
697 *	LOCKING:
698 *	None.
699 *
700 *	Build ATA taskfile for the command @qc for read/write request described
701 *	by @block, @n_block, @tf_flags and @class.
702 *
703 *	RETURNS:
704 *
705 *	0 on success, -ERANGE if the request is too large for @dev,
706 *	-EINVAL if the request is invalid.
707 */
708int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
709		    unsigned int tf_flags, int cdl, int class)
710{
711	struct ata_taskfile *tf = &qc->tf;
712	struct ata_device *dev = qc->dev;
713
714	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
715	tf->flags |= tf_flags;
716
717	if (ata_ncq_enabled(dev)) {
718		/* yay, NCQ */
719		if (!lba_48_ok(block, n_block))
720			return -ERANGE;
721
722		tf->protocol = ATA_PROT_NCQ;
723		tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
724
725		if (tf->flags & ATA_TFLAG_WRITE)
726			tf->command = ATA_CMD_FPDMA_WRITE;
727		else
728			tf->command = ATA_CMD_FPDMA_READ;
729
730		tf->nsect = qc->hw_tag << 3;
731		tf->hob_feature = (n_block >> 8) & 0xff;
732		tf->feature = n_block & 0xff;
733
734		tf->hob_lbah = (block >> 40) & 0xff;
735		tf->hob_lbam = (block >> 32) & 0xff;
736		tf->hob_lbal = (block >> 24) & 0xff;
737		tf->lbah = (block >> 16) & 0xff;
738		tf->lbam = (block >> 8) & 0xff;
739		tf->lbal = block & 0xff;
740
741		tf->device = ATA_LBA;
742		if (tf->flags & ATA_TFLAG_FUA)
743			tf->device |= 1 << 7;
744
745		if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED &&
746		    class == IOPRIO_CLASS_RT)
747			tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
748
749		if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl)
750			ata_set_tf_cdl(qc, cdl);
751
752	} else if (dev->flags & ATA_DFLAG_LBA) {
753		tf->flags |= ATA_TFLAG_LBA;
754
755		if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl)
756			ata_set_tf_cdl(qc, cdl);
757
758		/* Both FUA writes and a CDL index require 48-bit commands */
759		if (!(tf->flags & ATA_TFLAG_FUA) &&
760		    !(qc->flags & ATA_QCFLAG_HAS_CDL) &&
761		    lba_28_ok(block, n_block)) {
762			/* use LBA28 */
763			tf->device |= (block >> 24) & 0xf;
764		} else if (lba_48_ok(block, n_block)) {
765			if (!(dev->flags & ATA_DFLAG_LBA48))
766				return -ERANGE;
767
768			/* use LBA48 */
769			tf->flags |= ATA_TFLAG_LBA48;
770
771			tf->hob_nsect = (n_block >> 8) & 0xff;
772
773			tf->hob_lbah = (block >> 40) & 0xff;
774			tf->hob_lbam = (block >> 32) & 0xff;
775			tf->hob_lbal = (block >> 24) & 0xff;
776		} else {
777			/* request too large even for LBA48 */
778			return -ERANGE;
779		}
780
781		if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
782			return -EINVAL;
783
784		tf->nsect = n_block & 0xff;
785
786		tf->lbah = (block >> 16) & 0xff;
787		tf->lbam = (block >> 8) & 0xff;
788		tf->lbal = block & 0xff;
789
790		tf->device |= ATA_LBA;
791	} else {
792		/* CHS */
793		u32 sect, head, cyl, track;
794
795		/* The request -may- be too large for CHS addressing. */
796		if (!lba_28_ok(block, n_block))
797			return -ERANGE;
798
799		if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
800			return -EINVAL;
801
802		/* Convert LBA to CHS */
803		track = (u32)block / dev->sectors;
804		cyl   = track / dev->heads;
805		head  = track % dev->heads;
806		sect  = (u32)block % dev->sectors + 1;
807
808		/* Check whether the converted CHS can fit.
809		   Cylinder: 0-65535
810		   Head: 0-15
811		   Sector: 1-255*/
812		if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
813			return -ERANGE;
814
815		tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
816		tf->lbal = sect;
817		tf->lbam = cyl;
818		tf->lbah = cyl >> 8;
819		tf->device |= head;
820	}
821
822	return 0;
823}
824
825/**
826 *	ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
827 *	@pio_mask: pio_mask
828 *	@mwdma_mask: mwdma_mask
829 *	@udma_mask: udma_mask
830 *
831 *	Pack @pio_mask, @mwdma_mask and @udma_mask into a single
832 *	unsigned int xfer_mask.
833 *
834 *	LOCKING:
835 *	None.
836 *
837 *	RETURNS:
838 *	Packed xfer_mask.
839 */
840unsigned int ata_pack_xfermask(unsigned int pio_mask,
841			       unsigned int mwdma_mask,
842			       unsigned int udma_mask)
843{
844	return	((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
845		((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
846		((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
847}
848EXPORT_SYMBOL_GPL(ata_pack_xfermask);
849
850/**
851 *	ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
852 *	@xfer_mask: xfer_mask to unpack
853 *	@pio_mask: resulting pio_mask
854 *	@mwdma_mask: resulting mwdma_mask
855 *	@udma_mask: resulting udma_mask
856 *
857 *	Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
858 *	Any NULL destination masks will be ignored.
859 */
860void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
861			 unsigned int *mwdma_mask, unsigned int *udma_mask)
862{
863	if (pio_mask)
864		*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
865	if (mwdma_mask)
866		*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
867	if (udma_mask)
868		*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
869}
870
871static const struct ata_xfer_ent {
872	int shift, bits;
873	u8 base;
874} ata_xfer_tbl[] = {
875	{ ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
876	{ ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
877	{ ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
878	{ -1, },
879};
880
881/**
882 *	ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
883 *	@xfer_mask: xfer_mask of interest
884 *
885 *	Return matching XFER_* value for @xfer_mask.  Only the highest
886 *	bit of @xfer_mask is considered.
887 *
888 *	LOCKING:
889 *	None.
890 *
891 *	RETURNS:
892 *	Matching XFER_* value, 0xff if no match found.
893 */
894u8 ata_xfer_mask2mode(unsigned int xfer_mask)
895{
896	int highbit = fls(xfer_mask) - 1;
897	const struct ata_xfer_ent *ent;
898
899	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
900		if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
901			return ent->base + highbit - ent->shift;
902	return 0xff;
903}
904EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
905
906/**
907 *	ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
908 *	@xfer_mode: XFER_* of interest
909 *
910 *	Return matching xfer_mask for @xfer_mode.
911 *
912 *	LOCKING:
913 *	None.
914 *
915 *	RETURNS:
916 *	Matching xfer_mask, 0 if no match found.
917 */
918unsigned int ata_xfer_mode2mask(u8 xfer_mode)
919{
920	const struct ata_xfer_ent *ent;
921
922	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
923		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
924			return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
925				& ~((1 << ent->shift) - 1);
926	return 0;
927}
928EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
929
930/**
931 *	ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
932 *	@xfer_mode: XFER_* of interest
933 *
934 *	Return matching xfer_shift for @xfer_mode.
935 *
936 *	LOCKING:
937 *	None.
938 *
939 *	RETURNS:
940 *	Matching xfer_shift, -1 if no match found.
941 */
942int ata_xfer_mode2shift(u8 xfer_mode)
943{
944	const struct ata_xfer_ent *ent;
945
946	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
947		if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
948			return ent->shift;
949	return -1;
950}
951EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
952
953/**
954 *	ata_mode_string - convert xfer_mask to string
955 *	@xfer_mask: mask of bits supported; only highest bit counts.
956 *
957 *	Determine string which represents the highest speed
958 *	(highest bit in @modemask).
959 *
960 *	LOCKING:
961 *	None.
962 *
963 *	RETURNS:
964 *	Constant C string representing highest speed listed in
965 *	@mode_mask, or the constant C string "<n/a>".
966 */
967const char *ata_mode_string(unsigned int xfer_mask)
968{
969	static const char * const xfer_mode_str[] = {
970		"PIO0",
971		"PIO1",
972		"PIO2",
973		"PIO3",
974		"PIO4",
975		"PIO5",
976		"PIO6",
977		"MWDMA0",
978		"MWDMA1",
979		"MWDMA2",
980		"MWDMA3",
981		"MWDMA4",
982		"UDMA/16",
983		"UDMA/25",
984		"UDMA/33",
985		"UDMA/44",
986		"UDMA/66",
987		"UDMA/100",
988		"UDMA/133",
989		"UDMA7",
990	};
991	int highbit;
992
993	highbit = fls(xfer_mask) - 1;
994	if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
995		return xfer_mode_str[highbit];
996	return "<n/a>";
997}
998EXPORT_SYMBOL_GPL(ata_mode_string);
999
1000const char *sata_spd_string(unsigned int spd)
1001{
1002	static const char * const spd_str[] = {
1003		"1.5 Gbps",
1004		"3.0 Gbps",
1005		"6.0 Gbps",
1006	};
1007
1008	if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
1009		return "<unknown>";
1010	return spd_str[spd - 1];
1011}
1012
1013/**
1014 *	ata_dev_classify - determine device type based on ATA-spec signature
1015 *	@tf: ATA taskfile register set for device to be identified
1016 *
1017 *	Determine from taskfile register contents whether a device is
1018 *	ATA or ATAPI, as per "Signature and persistence" section
1019 *	of ATA/PI spec (volume 1, sect 5.14).
1020 *
1021 *	LOCKING:
1022 *	None.
1023 *
1024 *	RETURNS:
1025 *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1026 *	%ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1027 */
1028unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1029{
1030	/* Apple's open source Darwin code hints that some devices only
1031	 * put a proper signature into the LBA mid/high registers,
1032	 * So, we only check those.  It's sufficient for uniqueness.
1033	 *
1034	 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1035	 * signatures for ATA and ATAPI devices attached on SerialATA,
1036	 * 0x3c/0xc3 and 0x69/0x96 respectively.  However, SerialATA
1037	 * spec has never mentioned about using different signatures
1038	 * for ATA/ATAPI devices.  Then, Serial ATA II: Port
1039	 * Multiplier specification began to use 0x69/0x96 to identify
1040	 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1041	 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1042	 * 0x69/0x96 shortly and described them as reserved for
1043	 * SerialATA.
1044	 *
1045	 * We follow the current spec and consider that 0x69/0x96
1046	 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1047	 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1048	 * SEMB signature.  This is worked around in
1049	 * ata_dev_read_id().
1050	 */
1051	if (tf->lbam == 0 && tf->lbah == 0)
1052		return ATA_DEV_ATA;
1053
1054	if (tf->lbam == 0x14 && tf->lbah == 0xeb)
1055		return ATA_DEV_ATAPI;
1056
1057	if (tf->lbam == 0x69 && tf->lbah == 0x96)
1058		return ATA_DEV_PMP;
1059
1060	if (tf->lbam == 0x3c && tf->lbah == 0xc3)
1061		return ATA_DEV_SEMB;
1062
1063	if (tf->lbam == 0xcd && tf->lbah == 0xab)
1064		return ATA_DEV_ZAC;
1065
1066	return ATA_DEV_UNKNOWN;
1067}
1068EXPORT_SYMBOL_GPL(ata_dev_classify);
1069
1070/**
1071 *	ata_id_string - Convert IDENTIFY DEVICE page into string
1072 *	@id: IDENTIFY DEVICE results we will examine
1073 *	@s: string into which data is output
1074 *	@ofs: offset into identify device page
1075 *	@len: length of string to return. must be an even number.
1076 *
1077 *	The strings in the IDENTIFY DEVICE page are broken up into
1078 *	16-bit chunks.  Run through the string, and output each
1079 *	8-bit chunk linearly, regardless of platform.
1080 *
1081 *	LOCKING:
1082 *	caller.
1083 */
1084
1085void ata_id_string(const u16 *id, unsigned char *s,
1086		   unsigned int ofs, unsigned int len)
1087{
1088	unsigned int c;
1089
1090	BUG_ON(len & 1);
1091
1092	while (len > 0) {
1093		c = id[ofs] >> 8;
1094		*s = c;
1095		s++;
1096
1097		c = id[ofs] & 0xff;
1098		*s = c;
1099		s++;
1100
1101		ofs++;
1102		len -= 2;
1103	}
1104}
1105EXPORT_SYMBOL_GPL(ata_id_string);
1106
1107/**
1108 *	ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1109 *	@id: IDENTIFY DEVICE results we will examine
1110 *	@s: string into which data is output
1111 *	@ofs: offset into identify device page
1112 *	@len: length of string to return. must be an odd number.
1113 *
1114 *	This function is identical to ata_id_string except that it
1115 *	trims trailing spaces and terminates the resulting string with
1116 *	null.  @len must be actual maximum length (even number) + 1.
1117 *
1118 *	LOCKING:
1119 *	caller.
1120 */
1121void ata_id_c_string(const u16 *id, unsigned char *s,
1122		     unsigned int ofs, unsigned int len)
1123{
1124	unsigned char *p;
1125
1126	ata_id_string(id, s, ofs, len - 1);
1127
1128	p = s + strnlen(s, len - 1);
1129	while (p > s && p[-1] == ' ')
1130		p--;
1131	*p = '\0';
1132}
1133EXPORT_SYMBOL_GPL(ata_id_c_string);
1134
1135static u64 ata_id_n_sectors(const u16 *id)
1136{
1137	if (ata_id_has_lba(id)) {
1138		if (ata_id_has_lba48(id))
1139			return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1140
1141		return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1142	}
1143
1144	if (ata_id_current_chs_valid(id))
1145		return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] *
1146		       (u32)id[ATA_ID_CUR_SECTORS];
1147
1148	return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] *
1149	       (u32)id[ATA_ID_SECTORS];
1150}
1151
1152u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1153{
1154	u64 sectors = 0;
1155
1156	sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1157	sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1158	sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1159	sectors |= (tf->lbah & 0xff) << 16;
1160	sectors |= (tf->lbam & 0xff) << 8;
1161	sectors |= (tf->lbal & 0xff);
1162
1163	return sectors;
1164}
1165
1166u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1167{
1168	u64 sectors = 0;
1169
1170	sectors |= (tf->device & 0x0f) << 24;
1171	sectors |= (tf->lbah & 0xff) << 16;
1172	sectors |= (tf->lbam & 0xff) << 8;
1173	sectors |= (tf->lbal & 0xff);
1174
1175	return sectors;
1176}
1177
1178/**
1179 *	ata_read_native_max_address - Read native max address
1180 *	@dev: target device
1181 *	@max_sectors: out parameter for the result native max address
1182 *
1183 *	Perform an LBA48 or LBA28 native size query upon the device in
1184 *	question.
1185 *
1186 *	RETURNS:
1187 *	0 on success, -EACCES if command is aborted by the drive.
1188 *	-EIO on other errors.
1189 */
1190static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1191{
1192	unsigned int err_mask;
1193	struct ata_taskfile tf;
1194	int lba48 = ata_id_has_lba48(dev->id);
1195
1196	ata_tf_init(dev, &tf);
1197
1198	/* always clear all address registers */
1199	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1200
1201	if (lba48) {
1202		tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1203		tf.flags |= ATA_TFLAG_LBA48;
1204	} else
1205		tf.command = ATA_CMD_READ_NATIVE_MAX;
1206
1207	tf.protocol = ATA_PROT_NODATA;
1208	tf.device |= ATA_LBA;
1209
1210	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1211	if (err_mask) {
1212		ata_dev_warn(dev,
1213			     "failed to read native max address (err_mask=0x%x)\n",
1214			     err_mask);
1215		if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
1216			return -EACCES;
1217		return -EIO;
1218	}
1219
1220	if (lba48)
1221		*max_sectors = ata_tf_to_lba48(&tf) + 1;
1222	else
1223		*max_sectors = ata_tf_to_lba(&tf) + 1;
1224	if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1225		(*max_sectors)--;
1226	return 0;
1227}
1228
1229/**
1230 *	ata_set_max_sectors - Set max sectors
1231 *	@dev: target device
1232 *	@new_sectors: new max sectors value to set for the device
1233 *
1234 *	Set max sectors of @dev to @new_sectors.
1235 *
1236 *	RETURNS:
1237 *	0 on success, -EACCES if command is aborted or denied (due to
1238 *	previous non-volatile SET_MAX) by the drive.  -EIO on other
1239 *	errors.
1240 */
1241static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1242{
1243	unsigned int err_mask;
1244	struct ata_taskfile tf;
1245	int lba48 = ata_id_has_lba48(dev->id);
1246
1247	new_sectors--;
1248
1249	ata_tf_init(dev, &tf);
1250
1251	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1252
1253	if (lba48) {
1254		tf.command = ATA_CMD_SET_MAX_EXT;
1255		tf.flags |= ATA_TFLAG_LBA48;
1256
1257		tf.hob_lbal = (new_sectors >> 24) & 0xff;
1258		tf.hob_lbam = (new_sectors >> 32) & 0xff;
1259		tf.hob_lbah = (new_sectors >> 40) & 0xff;
1260	} else {
1261		tf.command = ATA_CMD_SET_MAX;
1262
1263		tf.device |= (new_sectors >> 24) & 0xf;
1264	}
1265
1266	tf.protocol = ATA_PROT_NODATA;
1267	tf.device |= ATA_LBA;
1268
1269	tf.lbal = (new_sectors >> 0) & 0xff;
1270	tf.lbam = (new_sectors >> 8) & 0xff;
1271	tf.lbah = (new_sectors >> 16) & 0xff;
1272
1273	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1274	if (err_mask) {
1275		ata_dev_warn(dev,
1276			     "failed to set max address (err_mask=0x%x)\n",
1277			     err_mask);
1278		if (err_mask == AC_ERR_DEV &&
1279		    (tf.error & (ATA_ABORTED | ATA_IDNF)))
1280			return -EACCES;
1281		return -EIO;
1282	}
1283
1284	return 0;
1285}
1286
1287/**
1288 *	ata_hpa_resize		-	Resize a device with an HPA set
1289 *	@dev: Device to resize
1290 *
1291 *	Read the size of an LBA28 or LBA48 disk with HPA features and resize
1292 *	it if required to the full size of the media. The caller must check
1293 *	the drive has the HPA feature set enabled.
1294 *
1295 *	RETURNS:
1296 *	0 on success, -errno on failure.
1297 */
1298static int ata_hpa_resize(struct ata_device *dev)
1299{
1300	bool print_info = ata_dev_print_info(dev);
1301	bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1302	u64 sectors = ata_id_n_sectors(dev->id);
1303	u64 native_sectors;
1304	int rc;
1305
1306	/* do we need to do it? */
1307	if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1308	    !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1309	    (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1310		return 0;
1311
1312	/* read native max address */
1313	rc = ata_read_native_max_address(dev, &native_sectors);
1314	if (rc) {
1315		/* If device aborted the command or HPA isn't going to
1316		 * be unlocked, skip HPA resizing.
1317		 */
1318		if (rc == -EACCES || !unlock_hpa) {
1319			ata_dev_warn(dev,
1320				     "HPA support seems broken, skipping HPA handling\n");
1321			dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1322
1323			/* we can continue if device aborted the command */
1324			if (rc == -EACCES)
1325				rc = 0;
1326		}
1327
1328		return rc;
1329	}
1330	dev->n_native_sectors = native_sectors;
1331
1332	/* nothing to do? */
1333	if (native_sectors <= sectors || !unlock_hpa) {
1334		if (!print_info || native_sectors == sectors)
1335			return 0;
1336
1337		if (native_sectors > sectors)
1338			ata_dev_info(dev,
1339				"HPA detected: current %llu, native %llu\n",
1340				(unsigned long long)sectors,
1341				(unsigned long long)native_sectors);
1342		else if (native_sectors < sectors)
1343			ata_dev_warn(dev,
1344				"native sectors (%llu) is smaller than sectors (%llu)\n",
1345				(unsigned long long)native_sectors,
1346				(unsigned long long)sectors);
1347		return 0;
1348	}
1349
1350	/* let's unlock HPA */
1351	rc = ata_set_max_sectors(dev, native_sectors);
1352	if (rc == -EACCES) {
1353		/* if device aborted the command, skip HPA resizing */
1354		ata_dev_warn(dev,
1355			     "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1356			     (unsigned long long)sectors,
1357			     (unsigned long long)native_sectors);
1358		dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1359		return 0;
1360	} else if (rc)
1361		return rc;
1362
1363	/* re-read IDENTIFY data */
1364	rc = ata_dev_reread_id(dev, 0);
1365	if (rc) {
1366		ata_dev_err(dev,
1367			    "failed to re-read IDENTIFY data after HPA resizing\n");
1368		return rc;
1369	}
1370
1371	if (print_info) {
1372		u64 new_sectors = ata_id_n_sectors(dev->id);
1373		ata_dev_info(dev,
1374			"HPA unlocked: %llu -> %llu, native %llu\n",
1375			(unsigned long long)sectors,
1376			(unsigned long long)new_sectors,
1377			(unsigned long long)native_sectors);
1378	}
1379
1380	return 0;
1381}
1382
1383/**
1384 *	ata_dump_id - IDENTIFY DEVICE info debugging output
1385 *	@dev: device from which the information is fetched
1386 *	@id: IDENTIFY DEVICE page to dump
1387 *
1388 *	Dump selected 16-bit words from the given IDENTIFY DEVICE
1389 *	page.
1390 *
1391 *	LOCKING:
1392 *	caller.
1393 */
1394
1395static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
1396{
1397	ata_dev_dbg(dev,
1398		"49==0x%04x  53==0x%04x  63==0x%04x  64==0x%04x  75==0x%04x\n"
1399		"80==0x%04x  81==0x%04x  82==0x%04x  83==0x%04x  84==0x%04x\n"
1400		"88==0x%04x  93==0x%04x\n",
1401		id[49], id[53], id[63], id[64], id[75], id[80],
1402		id[81], id[82], id[83], id[84], id[88], id[93]);
1403}
1404
1405/**
1406 *	ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1407 *	@id: IDENTIFY data to compute xfer mask from
1408 *
1409 *	Compute the xfermask for this device. This is not as trivial
1410 *	as it seems if we must consider early devices correctly.
1411 *
1412 *	FIXME: pre IDE drive timing (do we care ?).
1413 *
1414 *	LOCKING:
1415 *	None.
1416 *
1417 *	RETURNS:
1418 *	Computed xfermask
1419 */
1420unsigned int ata_id_xfermask(const u16 *id)
1421{
1422	unsigned int pio_mask, mwdma_mask, udma_mask;
1423
1424	/* Usual case. Word 53 indicates word 64 is valid */
1425	if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1426		pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1427		pio_mask <<= 3;
1428		pio_mask |= 0x7;
1429	} else {
1430		/* If word 64 isn't valid then Word 51 high byte holds
1431		 * the PIO timing number for the maximum. Turn it into
1432		 * a mask.
1433		 */
1434		u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1435		if (mode < 5)	/* Valid PIO range */
1436			pio_mask = (2 << mode) - 1;
1437		else
1438			pio_mask = 1;
1439
1440		/* But wait.. there's more. Design your standards by
1441		 * committee and you too can get a free iordy field to
1442		 * process. However it is the speeds not the modes that
1443		 * are supported... Note drivers using the timing API
1444		 * will get this right anyway
1445		 */
1446	}
1447
1448	mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1449
1450	if (ata_id_is_cfa(id)) {
1451		/*
1452		 *	Process compact flash extended modes
1453		 */
1454		int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1455		int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1456
1457		if (pio)
1458			pio_mask |= (1 << 5);
1459		if (pio > 1)
1460			pio_mask |= (1 << 6);
1461		if (dma)
1462			mwdma_mask |= (1 << 3);
1463		if (dma > 1)
1464			mwdma_mask |= (1 << 4);
1465	}
1466
1467	udma_mask = 0;
1468	if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1469		udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1470
1471	return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1472}
1473EXPORT_SYMBOL_GPL(ata_id_xfermask);
1474
1475static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1476{
1477	struct completion *waiting = qc->private_data;
1478
1479	complete(waiting);
1480}
1481
1482/**
1483 *	ata_exec_internal_sg - execute libata internal command
1484 *	@dev: Device to which the command is sent
1485 *	@tf: Taskfile registers for the command and the result
1486 *	@cdb: CDB for packet command
1487 *	@dma_dir: Data transfer direction of the command
1488 *	@sgl: sg list for the data buffer of the command
1489 *	@n_elem: Number of sg entries
1490 *	@timeout: Timeout in msecs (0 for default)
1491 *
1492 *	Executes libata internal command with timeout.  @tf contains
1493 *	command on entry and result on return.  Timeout and error
1494 *	conditions are reported via return value.  No recovery action
1495 *	is taken after a command times out.  It's caller's duty to
1496 *	clean up after timeout.
1497 *
1498 *	LOCKING:
1499 *	None.  Should be called with kernel context, might sleep.
1500 *
1501 *	RETURNS:
1502 *	Zero on success, AC_ERR_* mask on failure
1503 */
1504static unsigned ata_exec_internal_sg(struct ata_device *dev,
1505				     struct ata_taskfile *tf, const u8 *cdb,
1506				     int dma_dir, struct scatterlist *sgl,
1507				     unsigned int n_elem, unsigned int timeout)
1508{
1509	struct ata_link *link = dev->link;
1510	struct ata_port *ap = link->ap;
1511	u8 command = tf->command;
1512	int auto_timeout = 0;
1513	struct ata_queued_cmd *qc;
1514	unsigned int preempted_tag;
1515	u32 preempted_sactive;
1516	u64 preempted_qc_active;
1517	int preempted_nr_active_links;
1518	DECLARE_COMPLETION_ONSTACK(wait);
1519	unsigned long flags;
1520	unsigned int err_mask;
1521	int rc;
1522
1523	spin_lock_irqsave(ap->lock, flags);
1524
1525	/* no internal command while frozen */
1526	if (ata_port_is_frozen(ap)) {
1527		spin_unlock_irqrestore(ap->lock, flags);
1528		return AC_ERR_SYSTEM;
1529	}
1530
1531	/* initialize internal qc */
1532	qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1533
1534	qc->tag = ATA_TAG_INTERNAL;
1535	qc->hw_tag = 0;
1536	qc->scsicmd = NULL;
1537	qc->ap = ap;
1538	qc->dev = dev;
1539	ata_qc_reinit(qc);
1540
1541	preempted_tag = link->active_tag;
1542	preempted_sactive = link->sactive;
1543	preempted_qc_active = ap->qc_active;
1544	preempted_nr_active_links = ap->nr_active_links;
1545	link->active_tag = ATA_TAG_POISON;
1546	link->sactive = 0;
1547	ap->qc_active = 0;
1548	ap->nr_active_links = 0;
1549
1550	/* prepare & issue qc */
1551	qc->tf = *tf;
1552	if (cdb)
1553		memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1554
1555	/* some SATA bridges need us to indicate data xfer direction */
1556	if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1557	    dma_dir == DMA_FROM_DEVICE)
1558		qc->tf.feature |= ATAPI_DMADIR;
1559
1560	qc->flags |= ATA_QCFLAG_RESULT_TF;
1561	qc->dma_dir = dma_dir;
1562	if (dma_dir != DMA_NONE) {
1563		unsigned int i, buflen = 0;
1564		struct scatterlist *sg;
1565
1566		for_each_sg(sgl, sg, n_elem, i)
1567			buflen += sg->length;
1568
1569		ata_sg_init(qc, sgl, n_elem);
1570		qc->nbytes = buflen;
1571	}
1572
1573	qc->private_data = &wait;
1574	qc->complete_fn = ata_qc_complete_internal;
1575
1576	ata_qc_issue(qc);
1577
1578	spin_unlock_irqrestore(ap->lock, flags);
1579
1580	if (!timeout) {
1581		if (ata_probe_timeout)
1582			timeout = ata_probe_timeout * 1000;
1583		else {
1584			timeout = ata_internal_cmd_timeout(dev, command);
1585			auto_timeout = 1;
1586		}
1587	}
1588
1589	ata_eh_release(ap);
1590
1591	rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1592
1593	ata_eh_acquire(ap);
1594
1595	ata_sff_flush_pio_task(ap);
1596
1597	if (!rc) {
1598		spin_lock_irqsave(ap->lock, flags);
1599
1600		/* We're racing with irq here.  If we lose, the
1601		 * following test prevents us from completing the qc
1602		 * twice.  If we win, the port is frozen and will be
1603		 * cleaned up by ->post_internal_cmd().
1604		 */
1605		if (qc->flags & ATA_QCFLAG_ACTIVE) {
1606			qc->err_mask |= AC_ERR_TIMEOUT;
1607
1608			ata_port_freeze(ap);
1609
1610			ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
1611				     timeout, command);
1612		}
1613
1614		spin_unlock_irqrestore(ap->lock, flags);
1615	}
1616
1617	/* do post_internal_cmd */
1618	if (ap->ops->post_internal_cmd)
1619		ap->ops->post_internal_cmd(qc);
1620
1621	/* perform minimal error analysis */
1622	if (qc->flags & ATA_QCFLAG_EH) {
1623		if (qc->result_tf.status & (ATA_ERR | ATA_DF))
1624			qc->err_mask |= AC_ERR_DEV;
1625
1626		if (!qc->err_mask)
1627			qc->err_mask |= AC_ERR_OTHER;
1628
1629		if (qc->err_mask & ~AC_ERR_OTHER)
1630			qc->err_mask &= ~AC_ERR_OTHER;
1631	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1632		qc->result_tf.status |= ATA_SENSE;
1633	}
1634
1635	/* finish up */
1636	spin_lock_irqsave(ap->lock, flags);
1637
1638	*tf = qc->result_tf;
1639	err_mask = qc->err_mask;
1640
1641	ata_qc_free(qc);
1642	link->active_tag = preempted_tag;
1643	link->sactive = preempted_sactive;
1644	ap->qc_active = preempted_qc_active;
1645	ap->nr_active_links = preempted_nr_active_links;
1646
1647	spin_unlock_irqrestore(ap->lock, flags);
1648
1649	if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1650		ata_internal_cmd_timed_out(dev, command);
1651
1652	return err_mask;
1653}
1654
1655/**
1656 *	ata_exec_internal - execute libata internal command
1657 *	@dev: Device to which the command is sent
1658 *	@tf: Taskfile registers for the command and the result
1659 *	@cdb: CDB for packet command
1660 *	@dma_dir: Data transfer direction of the command
1661 *	@buf: Data buffer of the command
1662 *	@buflen: Length of data buffer
1663 *	@timeout: Timeout in msecs (0 for default)
1664 *
1665 *	Wrapper around ata_exec_internal_sg() which takes simple
1666 *	buffer instead of sg list.
1667 *
1668 *	LOCKING:
1669 *	None.  Should be called with kernel context, might sleep.
1670 *
1671 *	RETURNS:
1672 *	Zero on success, AC_ERR_* mask on failure
1673 */
1674unsigned ata_exec_internal(struct ata_device *dev,
1675			   struct ata_taskfile *tf, const u8 *cdb,
1676			   int dma_dir, void *buf, unsigned int buflen,
1677			   unsigned int timeout)
1678{
1679	struct scatterlist *psg = NULL, sg;
1680	unsigned int n_elem = 0;
1681
1682	if (dma_dir != DMA_NONE) {
1683		WARN_ON(!buf);
1684		sg_init_one(&sg, buf, buflen);
1685		psg = &sg;
1686		n_elem++;
1687	}
1688
1689	return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1690				    timeout);
1691}
1692
1693/**
1694 *	ata_pio_need_iordy	-	check if iordy needed
1695 *	@adev: ATA device
1696 *
1697 *	Check if the current speed of the device requires IORDY. Used
1698 *	by various controllers for chip configuration.
1699 */
1700unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1701{
1702	/* Don't set IORDY if we're preparing for reset.  IORDY may
1703	 * lead to controller lock up on certain controllers if the
1704	 * port is not occupied.  See bko#11703 for details.
1705	 */
1706	if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1707		return 0;
1708	/* Controller doesn't support IORDY.  Probably a pointless
1709	 * check as the caller should know this.
1710	 */
1711	if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1712		return 0;
1713	/* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6.  */
1714	if (ata_id_is_cfa(adev->id)
1715	    && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1716		return 0;
1717	/* PIO3 and higher it is mandatory */
1718	if (adev->pio_mode > XFER_PIO_2)
1719		return 1;
1720	/* We turn it on when possible */
1721	if (ata_id_has_iordy(adev->id))
1722		return 1;
1723	return 0;
1724}
1725EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
1726
1727/**
1728 *	ata_pio_mask_no_iordy	-	Return the non IORDY mask
1729 *	@adev: ATA device
1730 *
1731 *	Compute the highest mode possible if we are not using iordy. Return
1732 *	-1 if no iordy mode is available.
1733 */
1734static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1735{
1736	/* If we have no drive specific rule, then PIO 2 is non IORDY */
1737	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE */
1738		u16 pio = adev->id[ATA_ID_EIDE_PIO];
1739		/* Is the speed faster than the drive allows non IORDY ? */
1740		if (pio) {
1741			/* This is cycle times not frequency - watch the logic! */
1742			if (pio > 240)	/* PIO2 is 240nS per cycle */
1743				return 3 << ATA_SHIFT_PIO;
1744			return 7 << ATA_SHIFT_PIO;
1745		}
1746	}
1747	return 3 << ATA_SHIFT_PIO;
1748}
1749
1750/**
1751 *	ata_do_dev_read_id		-	default ID read method
1752 *	@dev: device
1753 *	@tf: proposed taskfile
1754 *	@id: data buffer
1755 *
1756 *	Issue the identify taskfile and hand back the buffer containing
1757 *	identify data. For some RAID controllers and for pre ATA devices
1758 *	this function is wrapped or replaced by the driver
1759 */
1760unsigned int ata_do_dev_read_id(struct ata_device *dev,
1761				struct ata_taskfile *tf, __le16 *id)
1762{
1763	return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1764				     id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1765}
1766EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1767
1768/**
1769 *	ata_dev_read_id - Read ID data from the specified device
1770 *	@dev: target device
1771 *	@p_class: pointer to class of the target device (may be changed)
1772 *	@flags: ATA_READID_* flags
1773 *	@id: buffer to read IDENTIFY data into
1774 *
1775 *	Read ID data from the specified device.  ATA_CMD_ID_ATA is
1776 *	performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1777 *	devices.  This function also issues ATA_CMD_INIT_DEV_PARAMS
1778 *	for pre-ATA4 drives.
1779 *
1780 *	FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1781 *	now we abort if we hit that case.
1782 *
1783 *	LOCKING:
1784 *	Kernel thread context (may sleep)
1785 *
1786 *	RETURNS:
1787 *	0 on success, -errno otherwise.
1788 */
1789int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1790		    unsigned int flags, u16 *id)
1791{
1792	struct ata_port *ap = dev->link->ap;
1793	unsigned int class = *p_class;
1794	struct ata_taskfile tf;
1795	unsigned int err_mask = 0;
1796	const char *reason;
1797	bool is_semb = class == ATA_DEV_SEMB;
1798	int may_fallback = 1, tried_spinup = 0;
1799	int rc;
1800
1801retry:
1802	ata_tf_init(dev, &tf);
1803
1804	switch (class) {
1805	case ATA_DEV_SEMB:
1806		class = ATA_DEV_ATA;	/* some hard drives report SEMB sig */
1807		fallthrough;
1808	case ATA_DEV_ATA:
1809	case ATA_DEV_ZAC:
1810		tf.command = ATA_CMD_ID_ATA;
1811		break;
1812	case ATA_DEV_ATAPI:
1813		tf.command = ATA_CMD_ID_ATAPI;
1814		break;
1815	default:
1816		rc = -ENODEV;
1817		reason = "unsupported class";
1818		goto err_out;
1819	}
1820
1821	tf.protocol = ATA_PROT_PIO;
1822
1823	/* Some devices choke if TF registers contain garbage.  Make
1824	 * sure those are properly initialized.
1825	 */
1826	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1827
1828	/* Device presence detection is unreliable on some
1829	 * controllers.  Always poll IDENTIFY if available.
1830	 */
1831	tf.flags |= ATA_TFLAG_POLLING;
1832
1833	if (ap->ops->read_id)
1834		err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
1835	else
1836		err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
1837
1838	if (err_mask) {
1839		if (err_mask & AC_ERR_NODEV_HINT) {
1840			ata_dev_dbg(dev, "NODEV after polling detection\n");
1841			return -ENOENT;
1842		}
1843
1844		if (is_semb) {
1845			ata_dev_info(dev,
1846		     "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1847			/* SEMB is not supported yet */
1848			*p_class = ATA_DEV_SEMB_UNSUP;
1849			return 0;
1850		}
1851
1852		if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
1853			/* Device or controller might have reported
1854			 * the wrong device class.  Give a shot at the
1855			 * other IDENTIFY if the current one is
1856			 * aborted by the device.
1857			 */
1858			if (may_fallback) {
1859				may_fallback = 0;
1860
1861				if (class == ATA_DEV_ATA)
1862					class = ATA_DEV_ATAPI;
1863				else
1864					class = ATA_DEV_ATA;
1865				goto retry;
1866			}
1867
1868			/* Control reaches here iff the device aborted
1869			 * both flavors of IDENTIFYs which happens
1870			 * sometimes with phantom devices.
1871			 */
1872			ata_dev_dbg(dev,
1873				    "both IDENTIFYs aborted, assuming NODEV\n");
1874			return -ENOENT;
1875		}
1876
1877		rc = -EIO;
1878		reason = "I/O error";
1879		goto err_out;
1880	}
1881
1882	if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1883		ata_dev_info(dev, "dumping IDENTIFY data, "
1884			    "class=%d may_fallback=%d tried_spinup=%d\n",
1885			    class, may_fallback, tried_spinup);
1886		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
1887			       16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1888	}
1889
1890	/* Falling back doesn't make sense if ID data was read
1891	 * successfully at least once.
1892	 */
1893	may_fallback = 0;
1894
1895	swap_buf_le16(id, ATA_ID_WORDS);
1896
1897	/* sanity check */
1898	rc = -EINVAL;
1899	reason = "device reports invalid type";
1900
1901	if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1902		if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1903			goto err_out;
1904		if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1905							ata_id_is_ata(id)) {
1906			ata_dev_dbg(dev,
1907				"host indicates ignore ATA devices, ignored\n");
1908			return -ENOENT;
1909		}
1910	} else {
1911		if (ata_id_is_ata(id))
1912			goto err_out;
1913	}
1914
1915	if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1916		tried_spinup = 1;
1917		/*
1918		 * Drive powered-up in standby mode, and requires a specific
1919		 * SET_FEATURES spin-up subcommand before it will accept
1920		 * anything other than the original IDENTIFY command.
1921		 */
1922		err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1923		if (err_mask && id[2] != 0x738c) {
1924			rc = -EIO;
1925			reason = "SPINUP failed";
1926			goto err_out;
1927		}
1928		/*
1929		 * If the drive initially returned incomplete IDENTIFY info,
1930		 * we now must reissue the IDENTIFY command.
1931		 */
1932		if (id[2] == 0x37c8)
1933			goto retry;
1934	}
1935
1936	if ((flags & ATA_READID_POSTRESET) &&
1937	    (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1938		/*
1939		 * The exact sequence expected by certain pre-ATA4 drives is:
1940		 * SRST RESET
1941		 * IDENTIFY (optional in early ATA)
1942		 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1943		 * anything else..
1944		 * Some drives were very specific about that exact sequence.
1945		 *
1946		 * Note that ATA4 says lba is mandatory so the second check
1947		 * should never trigger.
1948		 */
1949		if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1950			err_mask = ata_dev_init_params(dev, id[3], id[6]);
1951			if (err_mask) {
1952				rc = -EIO;
1953				reason = "INIT_DEV_PARAMS failed";
1954				goto err_out;
1955			}
1956
1957			/* current CHS translation info (id[53-58]) might be
1958			 * changed. reread the identify device info.
1959			 */
1960			flags &= ~ATA_READID_POSTRESET;
1961			goto retry;
1962		}
1963	}
1964
1965	*p_class = class;
1966
1967	return 0;
1968
1969 err_out:
1970	ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1971		     reason, err_mask);
1972	return rc;
1973}
1974
1975bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf,
1976			   bool set_active)
1977{
1978	/* Only applies to ATA and ZAC devices */
1979	if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
1980		return false;
1981
1982	ata_tf_init(dev, tf);
1983	tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1984	tf->protocol = ATA_PROT_NODATA;
1985
1986	if (set_active) {
1987		/* VERIFY for 1 sector at lba=0 */
1988		tf->command = ATA_CMD_VERIFY;
1989		tf->nsect = 1;
1990		if (dev->flags & ATA_DFLAG_LBA) {
1991			tf->flags |= ATA_TFLAG_LBA;
1992			tf->device |= ATA_LBA;
1993		} else {
1994			/* CHS */
1995			tf->lbal = 0x1; /* sect */
1996		}
1997	} else {
1998		tf->command = ATA_CMD_STANDBYNOW1;
1999	}
2000
2001	return true;
2002}
2003
2004static bool ata_dev_power_is_active(struct ata_device *dev)
2005{
2006	struct ata_taskfile tf;
2007	unsigned int err_mask;
2008
2009	ata_tf_init(dev, &tf);
2010	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2011	tf.protocol = ATA_PROT_NODATA;
2012	tf.command = ATA_CMD_CHK_POWER;
2013
2014	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2015	if (err_mask) {
2016		ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
2017			    err_mask);
2018		/*
2019		 * Assume we are in standby mode so that we always force a
2020		 * spinup in ata_dev_power_set_active().
2021		 */
2022		return false;
2023	}
2024
2025	ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
2026
2027	/* Active or idle */
2028	return tf.nsect == 0xff;
2029}
2030
2031/**
2032 *	ata_dev_power_set_standby - Set a device power mode to standby
2033 *	@dev: target device
2034 *
2035 *	Issue a STANDBY IMMEDIATE command to set a device power mode to standby.
2036 *	For an HDD device, this spins down the disks.
2037 *
2038 *	LOCKING:
2039 *	Kernel thread context (may sleep).
2040 */
2041void ata_dev_power_set_standby(struct ata_device *dev)
2042{
2043	unsigned long ap_flags = dev->link->ap->flags;
2044	struct ata_taskfile tf;
2045	unsigned int err_mask;
2046
2047	/* If the device is already sleeping or in standby, do nothing. */
2048	if ((dev->flags & ATA_DFLAG_SLEEPING) ||
2049	    !ata_dev_power_is_active(dev))
2050		return;
2051
2052	/*
2053	 * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
2054	 * causing some drives to spin up and down again. For these, do nothing
2055	 * if we are being called on shutdown.
2056	 */
2057	if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
2058	    system_state == SYSTEM_POWER_OFF)
2059		return;
2060
2061	if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
2062	    system_entering_hibernation())
2063		return;
2064
2065	/* Issue STANDBY IMMEDIATE command only if supported by the device */
2066	if (!ata_dev_power_init_tf(dev, &tf, false))
2067		return;
2068
2069	ata_dev_notice(dev, "Entering standby power mode\n");
2070
2071	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2072	if (err_mask)
2073		ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n",
2074			    err_mask);
2075}
2076
2077/**
2078 *	ata_dev_power_set_active -  Set a device power mode to active
2079 *	@dev: target device
2080 *
2081 *	Issue a VERIFY command to enter to ensure that the device is in the
2082 *	active power mode. For a spun-down HDD (standby or idle power mode),
2083 *	the VERIFY command will complete after the disk spins up.
2084 *
2085 *	LOCKING:
2086 *	Kernel thread context (may sleep).
2087 */
2088void ata_dev_power_set_active(struct ata_device *dev)
2089{
2090	struct ata_taskfile tf;
2091	unsigned int err_mask;
2092
2093	/*
2094	 * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
2095	 * if supported by the device.
2096	 */
2097	if (!ata_dev_power_init_tf(dev, &tf, true))
2098		return;
2099
2100	/*
2101	 * Check the device power state & condition and force a spinup with
2102	 * VERIFY command only if the drive is not already ACTIVE or IDLE.
2103	 */
2104	if (ata_dev_power_is_active(dev))
2105		return;
2106
2107	ata_dev_notice(dev, "Entering active power mode\n");
2108
2109	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2110	if (err_mask)
2111		ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n",
2112			    err_mask);
2113}
2114
2115/**
2116 *	ata_read_log_page - read a specific log page
2117 *	@dev: target device
2118 *	@log: log to read
2119 *	@page: page to read
2120 *	@buf: buffer to store read page
2121 *	@sectors: number of sectors to read
2122 *
2123 *	Read log page using READ_LOG_EXT command.
2124 *
2125 *	LOCKING:
2126 *	Kernel thread context (may sleep).
2127 *
2128 *	RETURNS:
2129 *	0 on success, AC_ERR_* mask otherwise.
2130 */
2131unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
2132			       u8 page, void *buf, unsigned int sectors)
2133{
2134	unsigned long ap_flags = dev->link->ap->flags;
2135	struct ata_taskfile tf;
2136	unsigned int err_mask;
2137	bool dma = false;
2138
2139	ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
2140
2141	/*
2142	 * Return error without actually issuing the command on controllers
2143	 * which e.g. lockup on a read log page.
2144	 */
2145	if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2146		return AC_ERR_DEV;
2147
2148retry:
2149	ata_tf_init(dev, &tf);
2150	if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
2151	    !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
2152		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2153		tf.protocol = ATA_PROT_DMA;
2154		dma = true;
2155	} else {
2156		tf.command = ATA_CMD_READ_LOG_EXT;
2157		tf.protocol = ATA_PROT_PIO;
2158		dma = false;
2159	}
2160	tf.lbal = log;
2161	tf.lbam = page;
2162	tf.nsect = sectors;
2163	tf.hob_nsect = sectors >> 8;
2164	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2165
2166	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2167				     buf, sectors * ATA_SECT_SIZE, 0);
2168
2169	if (err_mask) {
2170		if (dma) {
2171			dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2172			if (!ata_port_is_frozen(dev->link->ap))
2173				goto retry;
2174		}
2175		ata_dev_err(dev,
2176			    "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
2177			    (unsigned int)log, (unsigned int)page, err_mask);
2178	}
2179
2180	return err_mask;
2181}
2182
2183static int ata_log_supported(struct ata_device *dev, u8 log)
2184{
2185	struct ata_port *ap = dev->link->ap;
2186
2187	if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
2188		return 0;
2189
2190	if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2191		return 0;
2192	return get_unaligned_le16(&ap->sector_buf[log * 2]);
2193}
2194
2195static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2196{
2197	struct ata_port *ap = dev->link->ap;
2198	unsigned int err, i;
2199
2200	if (dev->horkage & ATA_HORKAGE_NO_ID_DEV_LOG)
2201		return false;
2202
2203	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2204		/*
2205		 * IDENTIFY DEVICE data log is defined as mandatory starting
2206		 * with ACS-3 (ATA version 10). Warn about the missing log
2207		 * for drives which implement this ATA level or above.
2208		 */
2209		if (ata_id_major_version(dev->id) >= 10)
2210			ata_dev_warn(dev,
2211				"ATA Identify Device Log not supported\n");
2212		dev->horkage |= ATA_HORKAGE_NO_ID_DEV_LOG;
2213		return false;
2214	}
2215
2216	/*
2217	 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2218	 * supported.
2219	 */
2220	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2221				1);
2222	if (err)
2223		return false;
2224
2225	for (i = 0; i < ap->sector_buf[8]; i++) {
2226		if (ap->sector_buf[9 + i] == page)
2227			return true;
2228	}
2229
2230	return false;
2231}
2232
2233static int ata_do_link_spd_horkage(struct ata_device *dev)
2234{
2235	struct ata_link *plink = ata_dev_phys_link(dev);
2236	u32 target, target_limit;
2237
2238	if (!sata_scr_valid(plink))
2239		return 0;
2240
2241	if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2242		target = 1;
2243	else
2244		return 0;
2245
2246	target_limit = (1 << target) - 1;
2247
2248	/* if already on stricter limit, no need to push further */
2249	if (plink->sata_spd_limit <= target_limit)
2250		return 0;
2251
2252	plink->sata_spd_limit = target_limit;
2253
2254	/* Request another EH round by returning -EAGAIN if link is
2255	 * going faster than the target speed.  Forward progress is
2256	 * guaranteed by setting sata_spd_limit to target_limit above.
2257	 */
2258	if (plink->sata_spd > target) {
2259		ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2260			     sata_spd_string(target));
2261		return -EAGAIN;
2262	}
2263	return 0;
2264}
2265
2266static inline u8 ata_dev_knobble(struct ata_device *dev)
2267{
2268	struct ata_port *ap = dev->link->ap;
2269
2270	if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2271		return 0;
2272
2273	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2274}
2275
2276static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2277{
2278	struct ata_port *ap = dev->link->ap;
2279	unsigned int err_mask;
2280
2281	if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2282		ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2283		return;
2284	}
2285	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2286				     0, ap->sector_buf, 1);
2287	if (!err_mask) {
2288		u8 *cmds = dev->ncq_send_recv_cmds;
2289
2290		dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2291		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2292
2293		if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2294			ata_dev_dbg(dev, "disabling queued TRIM support\n");
2295			cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2296				~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2297		}
2298	}
2299}
2300
2301static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2302{
2303	struct ata_port *ap = dev->link->ap;
2304	unsigned int err_mask;
2305
2306	if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2307		ata_dev_warn(dev,
2308			     "NCQ Send/Recv Log not supported\n");
2309		return;
2310	}
2311	err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2312				     0, ap->sector_buf, 1);
2313	if (!err_mask) {
2314		u8 *cmds = dev->ncq_non_data_cmds;
2315
2316		memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2317	}
2318}
2319
2320static void ata_dev_config_ncq_prio(struct ata_device *dev)
2321{
2322	struct ata_port *ap = dev->link->ap;
2323	unsigned int err_mask;
2324
2325	if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2326		return;
2327
2328	err_mask = ata_read_log_page(dev,
2329				     ATA_LOG_IDENTIFY_DEVICE,
2330				     ATA_LOG_SATA_SETTINGS,
2331				     ap->sector_buf,
2332				     1);
2333	if (err_mask)
2334		goto not_supported;
2335
2336	if (!(ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
2337		goto not_supported;
2338
2339	dev->flags |= ATA_DFLAG_NCQ_PRIO;
2340
2341	return;
2342
2343not_supported:
2344	dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
2345	dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2346}
2347
2348static bool ata_dev_check_adapter(struct ata_device *dev,
2349				  unsigned short vendor_id)
2350{
2351	struct pci_dev *pcidev = NULL;
2352	struct device *parent_dev = NULL;
2353
2354	for (parent_dev = dev->tdev.parent; parent_dev != NULL;
2355	     parent_dev = parent_dev->parent) {
2356		if (dev_is_pci(parent_dev)) {
2357			pcidev = to_pci_dev(parent_dev);
2358			if (pcidev->vendor == vendor_id)
2359				return true;
2360			break;
2361		}
2362	}
2363
2364	return false;
2365}
2366
2367static int ata_dev_config_ncq(struct ata_device *dev,
2368			       char *desc, size_t desc_sz)
2369{
2370	struct ata_port *ap = dev->link->ap;
2371	int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2372	unsigned int err_mask;
2373	char *aa_desc = "";
2374
2375	if (!ata_id_has_ncq(dev->id)) {
2376		desc[0] = '\0';
2377		return 0;
2378	}
2379	if (!IS_ENABLED(CONFIG_SATA_HOST))
2380		return 0;
2381	if (dev->horkage & ATA_HORKAGE_NONCQ) {
2382		snprintf(desc, desc_sz, "NCQ (not used)");
2383		return 0;
2384	}
2385
2386	if (dev->horkage & ATA_HORKAGE_NO_NCQ_ON_ATI &&
2387	    ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
2388		snprintf(desc, desc_sz, "NCQ (not used)");
2389		return 0;
2390	}
2391
2392	if (ap->flags & ATA_FLAG_NCQ) {
2393		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2394		dev->flags |= ATA_DFLAG_NCQ;
2395	}
2396
2397	if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2398		(ap->flags & ATA_FLAG_FPDMA_AA) &&
2399		ata_id_has_fpdma_aa(dev->id)) {
2400		err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2401			SATA_FPDMA_AA);
2402		if (err_mask) {
2403			ata_dev_err(dev,
2404				    "failed to enable AA (error_mask=0x%x)\n",
2405				    err_mask);
2406			if (err_mask != AC_ERR_DEV) {
2407				dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2408				return -EIO;
2409			}
2410		} else
2411			aa_desc = ", AA";
2412	}
2413
2414	if (hdepth >= ddepth)
2415		snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2416	else
2417		snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2418			ddepth, aa_desc);
2419
2420	if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2421		if (ata_id_has_ncq_send_and_recv(dev->id))
2422			ata_dev_config_ncq_send_recv(dev);
2423		if (ata_id_has_ncq_non_data(dev->id))
2424			ata_dev_config_ncq_non_data(dev);
2425		if (ata_id_has_ncq_prio(dev->id))
2426			ata_dev_config_ncq_prio(dev);
2427	}
2428
2429	return 0;
2430}
2431
2432static void ata_dev_config_sense_reporting(struct ata_device *dev)
2433{
2434	unsigned int err_mask;
2435
2436	if (!ata_id_has_sense_reporting(dev->id))
2437		return;
2438
2439	if (ata_id_sense_reporting_enabled(dev->id))
2440		return;
2441
2442	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2443	if (err_mask) {
2444		ata_dev_dbg(dev,
2445			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
2446			    err_mask);
2447	}
2448}
2449
2450static void ata_dev_config_zac(struct ata_device *dev)
2451{
2452	struct ata_port *ap = dev->link->ap;
2453	unsigned int err_mask;
2454	u8 *identify_buf = ap->sector_buf;
2455
2456	dev->zac_zones_optimal_open = U32_MAX;
2457	dev->zac_zones_optimal_nonseq = U32_MAX;
2458	dev->zac_zones_max_open = U32_MAX;
2459
2460	/*
2461	 * Always set the 'ZAC' flag for Host-managed devices.
2462	 */
2463	if (dev->class == ATA_DEV_ZAC)
2464		dev->flags |= ATA_DFLAG_ZAC;
2465	else if (ata_id_zoned_cap(dev->id) == 0x01)
2466		/*
2467		 * Check for host-aware devices.
2468		 */
2469		dev->flags |= ATA_DFLAG_ZAC;
2470
2471	if (!(dev->flags & ATA_DFLAG_ZAC))
2472		return;
2473
2474	if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2475		ata_dev_warn(dev,
2476			     "ATA Zoned Information Log not supported\n");
2477		return;
2478	}
2479
2480	/*
2481	 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2482	 */
2483	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2484				     ATA_LOG_ZONED_INFORMATION,
2485				     identify_buf, 1);
2486	if (!err_mask) {
2487		u64 zoned_cap, opt_open, opt_nonseq, max_open;
2488
2489		zoned_cap = get_unaligned_le64(&identify_buf[8]);
2490		if ((zoned_cap >> 63))
2491			dev->zac_zoned_cap = (zoned_cap & 1);
2492		opt_open = get_unaligned_le64(&identify_buf[24]);
2493		if ((opt_open >> 63))
2494			dev->zac_zones_optimal_open = (u32)opt_open;
2495		opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2496		if ((opt_nonseq >> 63))
2497			dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2498		max_open = get_unaligned_le64(&identify_buf[40]);
2499		if ((max_open >> 63))
2500			dev->zac_zones_max_open = (u32)max_open;
2501	}
2502}
2503
2504static void ata_dev_config_trusted(struct ata_device *dev)
2505{
2506	struct ata_port *ap = dev->link->ap;
2507	u64 trusted_cap;
2508	unsigned int err;
2509
2510	if (!ata_id_has_trusted(dev->id))
2511		return;
2512
2513	if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2514		ata_dev_warn(dev,
2515			     "Security Log not supported\n");
2516		return;
2517	}
2518
2519	err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2520			ap->sector_buf, 1);
2521	if (err)
2522		return;
2523
2524	trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2525	if (!(trusted_cap & (1ULL << 63))) {
2526		ata_dev_dbg(dev,
2527			    "Trusted Computing capability qword not valid!\n");
2528		return;
2529	}
2530
2531	if (trusted_cap & (1 << 0))
2532		dev->flags |= ATA_DFLAG_TRUSTED;
2533}
2534
2535static void ata_dev_config_cdl(struct ata_device *dev)
2536{
2537	struct ata_port *ap = dev->link->ap;
2538	unsigned int err_mask;
2539	bool cdl_enabled;
2540	u64 val;
2541
2542	if (ata_id_major_version(dev->id) < 11)
2543		goto not_supported;
2544
2545	if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
2546	    !ata_identify_page_supported(dev, ATA_LOG_SUPPORTED_CAPABILITIES) ||
2547	    !ata_identify_page_supported(dev, ATA_LOG_CURRENT_SETTINGS))
2548		goto not_supported;
2549
2550	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2551				     ATA_LOG_SUPPORTED_CAPABILITIES,
2552				     ap->sector_buf, 1);
2553	if (err_mask)
2554		goto not_supported;
2555
2556	/* Check Command Duration Limit Supported bits */
2557	val = get_unaligned_le64(&ap->sector_buf[168]);
2558	if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(0)))
2559		goto not_supported;
2560
2561	/* Warn the user if command duration guideline is not supported */
2562	if (!(val & BIT_ULL(1)))
2563		ata_dev_warn(dev,
2564			"Command duration guideline is not supported\n");
2565
2566	/*
2567	 * We must have support for the sense data for successful NCQ commands
2568	 * log indicated by the successful NCQ command sense data supported bit.
2569	 */
2570	val = get_unaligned_le64(&ap->sector_buf[8]);
2571	if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(47))) {
2572		ata_dev_warn(dev,
2573			"CDL supported but Successful NCQ Command Sense Data is not supported\n");
2574		goto not_supported;
2575	}
2576
2577	/* Without NCQ autosense, the successful NCQ commands log is useless. */
2578	if (!ata_id_has_ncq_autosense(dev->id)) {
2579		ata_dev_warn(dev,
2580			"CDL supported but NCQ autosense is not supported\n");
2581		goto not_supported;
2582	}
2583
2584	/*
2585	 * If CDL is marked as enabled, make sure the feature is enabled too.
2586	 * Conversely, if CDL is disabled, make sure the feature is turned off.
2587	 */
2588	err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2589				     ATA_LOG_CURRENT_SETTINGS,
2590				     ap->sector_buf, 1);
2591	if (err_mask)
2592		goto not_supported;
2593
2594	val = get_unaligned_le64(&ap->sector_buf[8]);
2595	cdl_enabled = val & BIT_ULL(63) && val & BIT_ULL(21);
2596	if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
2597		if (!cdl_enabled) {
2598			/* Enable CDL on the device */
2599			err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 1);
2600			if (err_mask) {
2601				ata_dev_err(dev,
2602					    "Enable CDL feature failed\n");
2603				goto not_supported;
2604			}
2605		}
2606	} else {
2607		if (cdl_enabled) {
2608			/* Disable CDL on the device */
2609			err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 0);
2610			if (err_mask) {
2611				ata_dev_err(dev,
2612					    "Disable CDL feature failed\n");
2613				goto not_supported;
2614			}
2615		}
2616	}
2617
2618	/*
2619	 * While CDL itself has to be enabled using sysfs, CDL requires that
2620	 * sense data for successful NCQ commands is enabled to work properly.
2621	 * Just like ata_dev_config_sense_reporting(), enable it unconditionally
2622	 * if supported.
2623	 */
2624	if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(18))) {
2625		err_mask = ata_dev_set_feature(dev,
2626					SETFEATURE_SENSE_DATA_SUCC_NCQ, 0x1);
2627		if (err_mask) {
2628			ata_dev_warn(dev,
2629				     "failed to enable Sense Data for successful NCQ commands, Emask 0x%x\n",
2630				     err_mask);
2631			goto not_supported;
2632		}
2633	}
2634
2635	/*
2636	 * Allocate a buffer to handle reading the sense data for successful
2637	 * NCQ Commands log page for commands using a CDL with one of the limit
2638	 * policy set to 0xD (successful completion with sense data available
2639	 * bit set).
2640	 */
2641	if (!ap->ncq_sense_buf) {
2642		ap->ncq_sense_buf = kmalloc(ATA_LOG_SENSE_NCQ_SIZE, GFP_KERNEL);
2643		if (!ap->ncq_sense_buf)
2644			goto not_supported;
2645	}
2646
2647	/*
2648	 * Command duration limits is supported: cache the CDL log page 18h
2649	 * (command duration descriptors).
2650	 */
2651	err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, ap->sector_buf, 1);
2652	if (err_mask) {
2653		ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
2654		goto not_supported;
2655	}
2656
2657	memcpy(dev->cdl, ap->sector_buf, ATA_LOG_CDL_SIZE);
2658	dev->flags |= ATA_DFLAG_CDL;
2659
2660	return;
2661
2662not_supported:
2663	dev->flags &= ~(ATA_DFLAG_CDL | ATA_DFLAG_CDL_ENABLED);
2664	kfree(ap->ncq_sense_buf);
2665	ap->ncq_sense_buf = NULL;
2666}
2667
2668static int ata_dev_config_lba(struct ata_device *dev)
2669{
2670	const u16 *id = dev->id;
2671	const char *lba_desc;
2672	char ncq_desc[32];
2673	int ret;
2674
2675	dev->flags |= ATA_DFLAG_LBA;
2676
2677	if (ata_id_has_lba48(id)) {
2678		lba_desc = "LBA48";
2679		dev->flags |= ATA_DFLAG_LBA48;
2680		if (dev->n_sectors >= (1UL << 28) &&
2681		    ata_id_has_flush_ext(id))
2682			dev->flags |= ATA_DFLAG_FLUSH_EXT;
2683	} else {
2684		lba_desc = "LBA";
2685	}
2686
2687	/* config NCQ */
2688	ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2689
2690	/* print device info to dmesg */
2691	if (ata_dev_print_info(dev))
2692		ata_dev_info(dev,
2693			     "%llu sectors, multi %u: %s %s\n",
2694			     (unsigned long long)dev->n_sectors,
2695			     dev->multi_count, lba_desc, ncq_desc);
2696
2697	return ret;
2698}
2699
2700static void ata_dev_config_chs(struct ata_device *dev)
2701{
2702	const u16 *id = dev->id;
2703
2704	if (ata_id_current_chs_valid(id)) {
2705		/* Current CHS translation is valid. */
2706		dev->cylinders = id[54];
2707		dev->heads     = id[55];
2708		dev->sectors   = id[56];
2709	} else {
2710		/* Default translation */
2711		dev->cylinders	= id[1];
2712		dev->heads	= id[3];
2713		dev->sectors	= id[6];
2714	}
2715
2716	/* print device info to dmesg */
2717	if (ata_dev_print_info(dev))
2718		ata_dev_info(dev,
2719			     "%llu sectors, multi %u, CHS %u/%u/%u\n",
2720			     (unsigned long long)dev->n_sectors,
2721			     dev->multi_count, dev->cylinders,
2722			     dev->heads, dev->sectors);
2723}
2724
2725static void ata_dev_config_fua(struct ata_device *dev)
2726{
2727	/* Ignore FUA support if its use is disabled globally */
2728	if (!libata_fua)
2729		goto nofua;
2730
2731	/* Ignore devices without support for WRITE DMA FUA EXT */
2732	if (!(dev->flags & ATA_DFLAG_LBA48) || !ata_id_has_fua(dev->id))
2733		goto nofua;
2734
2735	/* Ignore known bad devices and devices that lack NCQ support */
2736	if (!ata_ncq_supported(dev) || (dev->horkage & ATA_HORKAGE_NO_FUA))
2737		goto nofua;
2738
2739	dev->flags |= ATA_DFLAG_FUA;
2740
2741	return;
2742
2743nofua:
2744	dev->flags &= ~ATA_DFLAG_FUA;
2745}
2746
2747static void ata_dev_config_devslp(struct ata_device *dev)
2748{
2749	u8 *sata_setting = dev->link->ap->sector_buf;
2750	unsigned int err_mask;
2751	int i, j;
2752
2753	/*
2754	 * Check device sleep capability. Get DevSlp timing variables
2755	 * from SATA Settings page of Identify Device Data Log.
2756	 */
2757	if (!ata_id_has_devslp(dev->id) ||
2758	    !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
2759		return;
2760
2761	err_mask = ata_read_log_page(dev,
2762				     ATA_LOG_IDENTIFY_DEVICE,
2763				     ATA_LOG_SATA_SETTINGS,
2764				     sata_setting, 1);
2765	if (err_mask)
2766		return;
2767
2768	dev->flags |= ATA_DFLAG_DEVSLP;
2769	for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2770		j = ATA_LOG_DEVSLP_OFFSET + i;
2771		dev->devslp_timing[i] = sata_setting[j];
2772	}
2773}
2774
2775static void ata_dev_config_cpr(struct ata_device *dev)
2776{
2777	unsigned int err_mask;
2778	size_t buf_len;
2779	int i, nr_cpr = 0;
2780	struct ata_cpr_log *cpr_log = NULL;
2781	u8 *desc, *buf = NULL;
2782
2783	if (ata_id_major_version(dev->id) < 11)
2784		goto out;
2785
2786	buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
2787	if (buf_len == 0)
2788		goto out;
2789
2790	/*
2791	 * Read the concurrent positioning ranges log (0x47). We can have at
2792	 * most 255 32B range descriptors plus a 64B header. This log varies in
2793	 * size, so use the size reported in the GPL directory. Reading beyond
2794	 * the supported length will result in an error.
2795	 */
2796	buf_len <<= 9;
2797	buf = kzalloc(buf_len, GFP_KERNEL);
2798	if (!buf)
2799		goto out;
2800
2801	err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
2802				     0, buf, buf_len >> 9);
2803	if (err_mask)
2804		goto out;
2805
2806	nr_cpr = buf[0];
2807	if (!nr_cpr)
2808		goto out;
2809
2810	cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
2811	if (!cpr_log)
2812		goto out;
2813
2814	cpr_log->nr_cpr = nr_cpr;
2815	desc = &buf[64];
2816	for (i = 0; i < nr_cpr; i++, desc += 32) {
2817		cpr_log->cpr[i].num = desc[0];
2818		cpr_log->cpr[i].num_storage_elements = desc[1];
2819		cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
2820		cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
2821	}
2822
2823out:
2824	swap(dev->cpr_log, cpr_log);
2825	kfree(cpr_log);
2826	kfree(buf);
2827}
2828
2829static void ata_dev_print_features(struct ata_device *dev)
2830{
2831	if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
2832		return;
2833
2834	ata_dev_info(dev,
2835		     "Features:%s%s%s%s%s%s%s%s\n",
2836		     dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
2837		     dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
2838		     dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
2839		     dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
2840		     dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
2841		     dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
2842		     dev->flags & ATA_DFLAG_CDL ? " CDL" : "",
2843		     dev->cpr_log ? " CPR" : "");
2844}
2845
2846/**
2847 *	ata_dev_configure - Configure the specified ATA/ATAPI device
2848 *	@dev: Target device to configure
2849 *
2850 *	Configure @dev according to @dev->id.  Generic and low-level
2851 *	driver specific fixups are also applied.
2852 *
2853 *	LOCKING:
2854 *	Kernel thread context (may sleep)
2855 *
2856 *	RETURNS:
2857 *	0 on success, -errno otherwise
2858 */
2859int ata_dev_configure(struct ata_device *dev)
2860{
2861	struct ata_port *ap = dev->link->ap;
2862	bool print_info = ata_dev_print_info(dev);
2863	const u16 *id = dev->id;
2864	unsigned int xfer_mask;
2865	unsigned int err_mask;
2866	char revbuf[7];		/* XYZ-99\0 */
2867	char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2868	char modelbuf[ATA_ID_PROD_LEN+1];
2869	int rc;
2870
2871	if (!ata_dev_enabled(dev)) {
2872		ata_dev_dbg(dev, "no device\n");
2873		return 0;
2874	}
2875
2876	/* set horkage */
2877	dev->horkage |= ata_dev_blacklisted(dev);
2878	ata_force_horkage(dev);
2879
2880	if (dev->horkage & ATA_HORKAGE_DISABLE) {
2881		ata_dev_info(dev, "unsupported device, disabling\n");
2882		ata_dev_disable(dev);
2883		return 0;
2884	}
2885
2886	if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2887	    dev->class == ATA_DEV_ATAPI) {
2888		ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2889			     atapi_enabled ? "not supported with this driver"
2890			     : "disabled");
2891		ata_dev_disable(dev);
2892		return 0;
2893	}
2894
2895	rc = ata_do_link_spd_horkage(dev);
2896	if (rc)
2897		return rc;
2898
2899	/* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2900	if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2901	    (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2902		dev->horkage |= ATA_HORKAGE_NOLPM;
2903
2904	if (ap->flags & ATA_FLAG_NO_LPM)
2905		dev->horkage |= ATA_HORKAGE_NOLPM;
2906
2907	if (dev->horkage & ATA_HORKAGE_NOLPM) {
2908		ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2909		dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2910	}
2911
2912	/* let ACPI work its magic */
2913	rc = ata_acpi_on_devcfg(dev);
2914	if (rc)
2915		return rc;
2916
2917	/* massage HPA, do it early as it might change IDENTIFY data */
2918	rc = ata_hpa_resize(dev);
2919	if (rc)
2920		return rc;
2921
2922	/* print device capabilities */
2923	ata_dev_dbg(dev,
2924		    "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2925		    "85:%04x 86:%04x 87:%04x 88:%04x\n",
2926		    __func__,
2927		    id[49], id[82], id[83], id[84],
2928		    id[85], id[86], id[87], id[88]);
2929
2930	/* initialize to-be-configured parameters */
2931	dev->flags &= ~ATA_DFLAG_CFG_MASK;
2932	dev->max_sectors = 0;
2933	dev->cdb_len = 0;
2934	dev->n_sectors = 0;
2935	dev->cylinders = 0;
2936	dev->heads = 0;
2937	dev->sectors = 0;
2938	dev->multi_count = 0;
2939
2940	/*
2941	 * common ATA, ATAPI feature tests
2942	 */
2943
2944	/* find max transfer mode; for printk only */
2945	xfer_mask = ata_id_xfermask(id);
2946
2947	ata_dump_id(dev, id);
2948
2949	/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2950	ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2951			sizeof(fwrevbuf));
2952
2953	ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2954			sizeof(modelbuf));
2955
2956	/* ATA-specific feature tests */
2957	if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2958		if (ata_id_is_cfa(id)) {
2959			/* CPRM may make this media unusable */
2960			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2961				ata_dev_warn(dev,
2962	"supports DRM functions and may not be fully accessible\n");
2963			snprintf(revbuf, 7, "CFA");
2964		} else {
2965			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2966			/* Warn the user if the device has TPM extensions */
2967			if (ata_id_has_tpm(id))
2968				ata_dev_warn(dev,
2969	"supports DRM functions and may not be fully accessible\n");
2970		}
2971
2972		dev->n_sectors = ata_id_n_sectors(id);
2973
2974		/* get current R/W Multiple count setting */
2975		if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2976			unsigned int max = dev->id[47] & 0xff;
2977			unsigned int cnt = dev->id[59] & 0xff;
2978			/* only recognize/allow powers of two here */
2979			if (is_power_of_2(max) && is_power_of_2(cnt))
2980				if (cnt <= max)
2981					dev->multi_count = cnt;
2982		}
2983
2984		/* print device info to dmesg */
2985		if (print_info)
2986			ata_dev_info(dev, "%s: %s, %s, max %s\n",
2987				     revbuf, modelbuf, fwrevbuf,
2988				     ata_mode_string(xfer_mask));
2989
2990		if (ata_id_has_lba(id)) {
2991			rc = ata_dev_config_lba(dev);
2992			if (rc)
2993				return rc;
2994		} else {
2995			ata_dev_config_chs(dev);
2996		}
2997
2998		ata_dev_config_fua(dev);
2999		ata_dev_config_devslp(dev);
3000		ata_dev_config_sense_reporting(dev);
3001		ata_dev_config_zac(dev);
3002		ata_dev_config_trusted(dev);
3003		ata_dev_config_cpr(dev);
3004		ata_dev_config_cdl(dev);
3005		dev->cdb_len = 32;
3006
3007		if (print_info)
3008			ata_dev_print_features(dev);
3009	}
3010
3011	/* ATAPI-specific feature tests */
3012	else if (dev->class == ATA_DEV_ATAPI) {
3013		const char *cdb_intr_string = "";
3014		const char *atapi_an_string = "";
3015		const char *dma_dir_string = "";
3016		u32 sntf;
3017
3018		rc = atapi_cdb_len(id);
3019		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
3020			ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
3021			rc = -EINVAL;
3022			goto err_out_nosup;
3023		}
3024		dev->cdb_len = (unsigned int) rc;
3025
3026		/* Enable ATAPI AN if both the host and device have
3027		 * the support.  If PMP is attached, SNTF is required
3028		 * to enable ATAPI AN to discern between PHY status
3029		 * changed notifications and ATAPI ANs.
3030		 */
3031		if (atapi_an &&
3032		    (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
3033		    (!sata_pmp_attached(ap) ||
3034		     sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
3035			/* issue SET feature command to turn this on */
3036			err_mask = ata_dev_set_feature(dev,
3037					SETFEATURES_SATA_ENABLE, SATA_AN);
3038			if (err_mask)
3039				ata_dev_err(dev,
3040					    "failed to enable ATAPI AN (err_mask=0x%x)\n",
3041					    err_mask);
3042			else {
3043				dev->flags |= ATA_DFLAG_AN;
3044				atapi_an_string = ", ATAPI AN";
3045			}
3046		}
3047
3048		if (ata_id_cdb_intr(dev->id)) {
3049			dev->flags |= ATA_DFLAG_CDB_INTR;
3050			cdb_intr_string = ", CDB intr";
3051		}
3052
3053		if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
3054			dev->flags |= ATA_DFLAG_DMADIR;
3055			dma_dir_string = ", DMADIR";
3056		}
3057
3058		if (ata_id_has_da(dev->id)) {
3059			dev->flags |= ATA_DFLAG_DA;
3060			zpodd_init(dev);
3061		}
3062
3063		/* print device info to dmesg */
3064		if (print_info)
3065			ata_dev_info(dev,
3066				     "ATAPI: %s, %s, max %s%s%s%s\n",
3067				     modelbuf, fwrevbuf,
3068				     ata_mode_string(xfer_mask),
3069				     cdb_intr_string, atapi_an_string,
3070				     dma_dir_string);
3071	}
3072
3073	/* determine max_sectors */
3074	dev->max_sectors = ATA_MAX_SECTORS;
3075	if (dev->flags & ATA_DFLAG_LBA48)
3076		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
3077
3078	/* Limit PATA drive on SATA cable bridge transfers to udma5,
3079	   200 sectors */
3080	if (ata_dev_knobble(dev)) {
3081		if (print_info)
3082			ata_dev_info(dev, "applying bridge limits\n");
3083		dev->udma_mask &= ATA_UDMA5;
3084		dev->max_sectors = ATA_MAX_SECTORS;
3085	}
3086
3087	if ((dev->class == ATA_DEV_ATAPI) &&
3088	    (atapi_command_packet_set(id) == TYPE_TAPE)) {
3089		dev->max_sectors = ATA_MAX_SECTORS_TAPE;
3090		dev->horkage |= ATA_HORKAGE_STUCK_ERR;
3091	}
3092
3093	if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
3094		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
3095					 dev->max_sectors);
3096
3097	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
3098		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
3099					 dev->max_sectors);
3100
3101	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
3102		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
3103
3104	if (ap->ops->dev_config)
3105		ap->ops->dev_config(dev);
3106
3107	if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
3108		/* Let the user know. We don't want to disallow opens for
3109		   rescue purposes, or in case the vendor is just a blithering
3110		   idiot. Do this after the dev_config call as some controllers
3111		   with buggy firmware may want to avoid reporting false device
3112		   bugs */
3113
3114		if (print_info) {
3115			ata_dev_warn(dev,
3116"Drive reports diagnostics failure. This may indicate a drive\n");
3117			ata_dev_warn(dev,
3118"fault or invalid emulation. Contact drive vendor for information.\n");
3119		}
3120	}
3121
3122	if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
3123		ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
3124		ata_dev_warn(dev, "         contact the vendor or visit http://ata.wiki.kernel.org\n");
3125	}
3126
3127	return 0;
3128
3129err_out_nosup:
3130	return rc;
3131}
3132
3133/**
3134 *	ata_cable_40wire	-	return 40 wire cable type
3135 *	@ap: port
3136 *
3137 *	Helper method for drivers which want to hardwire 40 wire cable
3138 *	detection.
3139 */
3140
3141int ata_cable_40wire(struct ata_port *ap)
3142{
3143	return ATA_CBL_PATA40;
3144}
3145EXPORT_SYMBOL_GPL(ata_cable_40wire);
3146
3147/**
3148 *	ata_cable_80wire	-	return 80 wire cable type
3149 *	@ap: port
3150 *
3151 *	Helper method for drivers which want to hardwire 80 wire cable
3152 *	detection.
3153 */
3154
3155int ata_cable_80wire(struct ata_port *ap)
3156{
3157	return ATA_CBL_PATA80;
3158}
3159EXPORT_SYMBOL_GPL(ata_cable_80wire);
3160
3161/**
3162 *	ata_cable_unknown	-	return unknown PATA cable.
3163 *	@ap: port
3164 *
3165 *	Helper method for drivers which have no PATA cable detection.
3166 */
3167
3168int ata_cable_unknown(struct ata_port *ap)
3169{
3170	return ATA_CBL_PATA_UNK;
3171}
3172EXPORT_SYMBOL_GPL(ata_cable_unknown);
3173
3174/**
3175 *	ata_cable_ignore	-	return ignored PATA cable.
3176 *	@ap: port
3177 *
3178 *	Helper method for drivers which don't use cable type to limit
3179 *	transfer mode.
3180 */
3181int ata_cable_ignore(struct ata_port *ap)
3182{
3183	return ATA_CBL_PATA_IGN;
3184}
3185EXPORT_SYMBOL_GPL(ata_cable_ignore);
3186
3187/**
3188 *	ata_cable_sata	-	return SATA cable type
3189 *	@ap: port
3190 *
3191 *	Helper method for drivers which have SATA cables
3192 */
3193
3194int ata_cable_sata(struct ata_port *ap)
3195{
3196	return ATA_CBL_SATA;
3197}
3198EXPORT_SYMBOL_GPL(ata_cable_sata);
3199
3200/**
3201 *	sata_print_link_status - Print SATA link status
3202 *	@link: SATA link to printk link status about
3203 *
3204 *	This function prints link speed and status of a SATA link.
3205 *
3206 *	LOCKING:
3207 *	None.
3208 */
3209static void sata_print_link_status(struct ata_link *link)
3210{
3211	u32 sstatus, scontrol, tmp;
3212
3213	if (sata_scr_read(link, SCR_STATUS, &sstatus))
3214		return;
3215	if (sata_scr_read(link, SCR_CONTROL, &scontrol))
3216		return;
3217
3218	if (ata_phys_link_online(link)) {
3219		tmp = (sstatus >> 4) & 0xf;
3220		ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
3221			      sata_spd_string(tmp), sstatus, scontrol);
3222	} else {
3223		ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
3224			      sstatus, scontrol);
3225	}
3226}
3227
3228/**
3229 *	ata_dev_pair		-	return other device on cable
3230 *	@adev: device
3231 *
3232 *	Obtain the other device on the same cable, or if none is
3233 *	present NULL is returned
3234 */
3235
3236struct ata_device *ata_dev_pair(struct ata_device *adev)
3237{
3238	struct ata_link *link = adev->link;
3239	struct ata_device *pair = &link->device[1 - adev->devno];
3240	if (!ata_dev_enabled(pair))
3241		return NULL;
3242	return pair;
3243}
3244EXPORT_SYMBOL_GPL(ata_dev_pair);
3245
3246/**
3247 *	sata_down_spd_limit - adjust SATA spd limit downward
3248 *	@link: Link to adjust SATA spd limit for
3249 *	@spd_limit: Additional limit
3250 *
3251 *	Adjust SATA spd limit of @link downward.  Note that this
3252 *	function only adjusts the limit.  The change must be applied
3253 *	using sata_set_spd().
3254 *
3255 *	If @spd_limit is non-zero, the speed is limited to equal to or
3256 *	lower than @spd_limit if such speed is supported.  If
3257 *	@spd_limit is slower than any supported speed, only the lowest
3258 *	supported speed is allowed.
3259 *
3260 *	LOCKING:
3261 *	Inherited from caller.
3262 *
3263 *	RETURNS:
3264 *	0 on success, negative errno on failure
3265 */
3266int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
3267{
3268	u32 sstatus, spd, mask;
3269	int rc, bit;
3270
3271	if (!sata_scr_valid(link))
3272		return -EOPNOTSUPP;
3273
3274	/* If SCR can be read, use it to determine the current SPD.
3275	 * If not, use cached value in link->sata_spd.
3276	 */
3277	rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3278	if (rc == 0 && ata_sstatus_online(sstatus))
3279		spd = (sstatus >> 4) & 0xf;
3280	else
3281		spd = link->sata_spd;
3282
3283	mask = link->sata_spd_limit;
3284	if (mask <= 1)
3285		return -EINVAL;
3286
3287	/* unconditionally mask off the highest bit */
3288	bit = fls(mask) - 1;
3289	mask &= ~(1 << bit);
3290
3291	/*
3292	 * Mask off all speeds higher than or equal to the current one.  At
3293	 * this point, if current SPD is not available and we previously
3294	 * recorded the link speed from SStatus, the driver has already
3295	 * masked off the highest bit so mask should already be 1 or 0.
3296	 * Otherwise, we should not force 1.5Gbps on a link where we have
3297	 * not previously recorded speed from SStatus.  Just return in this
3298	 * case.
3299	 */
3300	if (spd > 1)
3301		mask &= (1 << (spd - 1)) - 1;
3302	else if (link->sata_spd)
3303		return -EINVAL;
3304
3305	/* were we already at the bottom? */
3306	if (!mask)
3307		return -EINVAL;
3308
3309	if (spd_limit) {
3310		if (mask & ((1 << spd_limit) - 1))
3311			mask &= (1 << spd_limit) - 1;
3312		else {
3313			bit = ffs(mask) - 1;
3314			mask = 1 << bit;
3315		}
3316	}
3317
3318	link->sata_spd_limit = mask;
3319
3320	ata_link_warn(link, "limiting SATA link speed to %s\n",
3321		      sata_spd_string(fls(mask)));
3322
3323	return 0;
3324}
3325
3326#ifdef CONFIG_ATA_ACPI
3327/**
3328 *	ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3329 *	@xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3330 *	@cycle: cycle duration in ns
3331 *
3332 *	Return matching xfer mode for @cycle.  The returned mode is of
3333 *	the transfer type specified by @xfer_shift.  If @cycle is too
3334 *	slow for @xfer_shift, 0xff is returned.  If @cycle is faster
3335 *	than the fastest known mode, the fasted mode is returned.
3336 *
3337 *	LOCKING:
3338 *	None.
3339 *
3340 *	RETURNS:
3341 *	Matching xfer_mode, 0xff if no match found.
3342 */
3343u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3344{
3345	u8 base_mode = 0xff, last_mode = 0xff;
3346	const struct ata_xfer_ent *ent;
3347	const struct ata_timing *t;
3348
3349	for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3350		if (ent->shift == xfer_shift)
3351			base_mode = ent->base;
3352
3353	for (t = ata_timing_find_mode(base_mode);
3354	     t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3355		unsigned short this_cycle;
3356
3357		switch (xfer_shift) {
3358		case ATA_SHIFT_PIO:
3359		case ATA_SHIFT_MWDMA:
3360			this_cycle = t->cycle;
3361			break;
3362		case ATA_SHIFT_UDMA:
3363			this_cycle = t->udma;
3364			break;
3365		default:
3366			return 0xff;
3367		}
3368
3369		if (cycle > this_cycle)
3370			break;
3371
3372		last_mode = t->mode;
3373	}
3374
3375	return last_mode;
3376}
3377#endif
3378
3379/**
3380 *	ata_down_xfermask_limit - adjust dev xfer masks downward
3381 *	@dev: Device to adjust xfer masks
3382 *	@sel: ATA_DNXFER_* selector
3383 *
3384 *	Adjust xfer masks of @dev downward.  Note that this function
3385 *	does not apply the change.  Invoking ata_set_mode() afterwards
3386 *	will apply the limit.
3387 *
3388 *	LOCKING:
3389 *	Inherited from caller.
3390 *
3391 *	RETURNS:
3392 *	0 on success, negative errno on failure
3393 */
3394int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3395{
3396	char buf[32];
3397	unsigned int orig_mask, xfer_mask;
3398	unsigned int pio_mask, mwdma_mask, udma_mask;
3399	int quiet, highbit;
3400
3401	quiet = !!(sel & ATA_DNXFER_QUIET);
3402	sel &= ~ATA_DNXFER_QUIET;
3403
3404	xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3405						  dev->mwdma_mask,
3406						  dev->udma_mask);
3407	ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3408
3409	switch (sel) {
3410	case ATA_DNXFER_PIO:
3411		highbit = fls(pio_mask) - 1;
3412		pio_mask &= ~(1 << highbit);
3413		break;
3414
3415	case ATA_DNXFER_DMA:
3416		if (udma_mask) {
3417			highbit = fls(udma_mask) - 1;
3418			udma_mask &= ~(1 << highbit);
3419			if (!udma_mask)
3420				return -ENOENT;
3421		} else if (mwdma_mask) {
3422			highbit = fls(mwdma_mask) - 1;
3423			mwdma_mask &= ~(1 << highbit);
3424			if (!mwdma_mask)
3425				return -ENOENT;
3426		}
3427		break;
3428
3429	case ATA_DNXFER_40C:
3430		udma_mask &= ATA_UDMA_MASK_40C;
3431		break;
3432
3433	case ATA_DNXFER_FORCE_PIO0:
3434		pio_mask &= 1;
3435		fallthrough;
3436	case ATA_DNXFER_FORCE_PIO:
3437		mwdma_mask = 0;
3438		udma_mask = 0;
3439		break;
3440
3441	default:
3442		BUG();
3443	}
3444
3445	xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3446
3447	if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3448		return -ENOENT;
3449
3450	if (!quiet) {
3451		if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3452			snprintf(buf, sizeof(buf), "%s:%s",
3453				 ata_mode_string(xfer_mask),
3454				 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3455		else
3456			snprintf(buf, sizeof(buf), "%s",
3457				 ata_mode_string(xfer_mask));
3458
3459		ata_dev_warn(dev, "limiting speed to %s\n", buf);
3460	}
3461
3462	ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3463			    &dev->udma_mask);
3464
3465	return 0;
3466}
3467
3468static int ata_dev_set_mode(struct ata_device *dev)
3469{
3470	struct ata_port *ap = dev->link->ap;
3471	struct ata_eh_context *ehc = &dev->link->eh_context;
3472	const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3473	const char *dev_err_whine = "";
3474	int ign_dev_err = 0;
3475	unsigned int err_mask = 0;
3476	int rc;
3477
3478	dev->flags &= ~ATA_DFLAG_PIO;
3479	if (dev->xfer_shift == ATA_SHIFT_PIO)
3480		dev->flags |= ATA_DFLAG_PIO;
3481
3482	if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3483		dev_err_whine = " (SET_XFERMODE skipped)";
3484	else {
3485		if (nosetxfer)
3486			ata_dev_warn(dev,
3487				     "NOSETXFER but PATA detected - can't "
3488				     "skip SETXFER, might malfunction\n");
3489		err_mask = ata_dev_set_xfermode(dev);
3490	}
3491
3492	if (err_mask & ~AC_ERR_DEV)
3493		goto fail;
3494
3495	/* revalidate */
3496	ehc->i.flags |= ATA_EHI_POST_SETMODE;
3497	rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3498	ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3499	if (rc)
3500		return rc;
3501
3502	if (dev->xfer_shift == ATA_SHIFT_PIO) {
3503		/* Old CFA may refuse this command, which is just fine */
3504		if (ata_id_is_cfa(dev->id))
3505			ign_dev_err = 1;
3506		/* Catch several broken garbage emulations plus some pre
3507		   ATA devices */
3508		if (ata_id_major_version(dev->id) == 0 &&
3509					dev->pio_mode <= XFER_PIO_2)
3510			ign_dev_err = 1;
3511		/* Some very old devices and some bad newer ones fail
3512		   any kind of SET_XFERMODE request but support PIO0-2
3513		   timings and no IORDY */
3514		if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3515			ign_dev_err = 1;
3516	}
3517	/* Early MWDMA devices do DMA but don't allow DMA mode setting.
3518	   Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3519	if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3520	    dev->dma_mode == XFER_MW_DMA_0 &&
3521	    (dev->id[63] >> 8) & 1)
3522		ign_dev_err = 1;
3523
3524	/* if the device is actually configured correctly, ignore dev err */
3525	if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3526		ign_dev_err = 1;
3527
3528	if (err_mask & AC_ERR_DEV) {
3529		if (!ign_dev_err)
3530			goto fail;
3531		else
3532			dev_err_whine = " (device error ignored)";
3533	}
3534
3535	ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
3536		    dev->xfer_shift, (int)dev->xfer_mode);
3537
3538	if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3539	    ehc->i.flags & ATA_EHI_DID_HARDRESET)
3540		ata_dev_info(dev, "configured for %s%s\n",
3541			     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3542			     dev_err_whine);
3543
3544	return 0;
3545
3546 fail:
3547	ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3548	return -EIO;
3549}
3550
3551/**
3552 *	ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3553 *	@link: link on which timings will be programmed
3554 *	@r_failed_dev: out parameter for failed device
3555 *
3556 *	Standard implementation of the function used to tune and set
3557 *	ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3558 *	ata_dev_set_mode() fails, pointer to the failing device is
3559 *	returned in @r_failed_dev.
3560 *
3561 *	LOCKING:
3562 *	PCI/etc. bus probe sem.
3563 *
3564 *	RETURNS:
3565 *	0 on success, negative errno otherwise
3566 */
3567
3568int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3569{
3570	struct ata_port *ap = link->ap;
3571	struct ata_device *dev;
3572	int rc = 0, used_dma = 0, found = 0;
3573
3574	/* step 1: calculate xfer_mask */
3575	ata_for_each_dev(dev, link, ENABLED) {
3576		unsigned int pio_mask, dma_mask;
3577		unsigned int mode_mask;
3578
3579		mode_mask = ATA_DMA_MASK_ATA;
3580		if (dev->class == ATA_DEV_ATAPI)
3581			mode_mask = ATA_DMA_MASK_ATAPI;
3582		else if (ata_id_is_cfa(dev->id))
3583			mode_mask = ATA_DMA_MASK_CFA;
3584
3585		ata_dev_xfermask(dev);
3586		ata_force_xfermask(dev);
3587
3588		pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3589
3590		if (libata_dma_mask & mode_mask)
3591			dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3592						     dev->udma_mask);
3593		else
3594			dma_mask = 0;
3595
3596		dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3597		dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3598
3599		found = 1;
3600		if (ata_dma_enabled(dev))
3601			used_dma = 1;
3602	}
3603	if (!found)
3604		goto out;
3605
3606	/* step 2: always set host PIO timings */
3607	ata_for_each_dev(dev, link, ENABLED) {
3608		if (dev->pio_mode == 0xff) {
3609			ata_dev_warn(dev, "no PIO support\n");
3610			rc = -EINVAL;
3611			goto out;
3612		}
3613
3614		dev->xfer_mode = dev->pio_mode;
3615		dev->xfer_shift = ATA_SHIFT_PIO;
3616		if (ap->ops->set_piomode)
3617			ap->ops->set_piomode(ap, dev);
3618	}
3619
3620	/* step 3: set host DMA timings */
3621	ata_for_each_dev(dev, link, ENABLED) {
3622		if (!ata_dma_enabled(dev))
3623			continue;
3624
3625		dev->xfer_mode = dev->dma_mode;
3626		dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3627		if (ap->ops->set_dmamode)
3628			ap->ops->set_dmamode(ap, dev);
3629	}
3630
3631	/* step 4: update devices' xfer mode */
3632	ata_for_each_dev(dev, link, ENABLED) {
3633		rc = ata_dev_set_mode(dev);
3634		if (rc)
3635			goto out;
3636	}
3637
3638	/* Record simplex status. If we selected DMA then the other
3639	 * host channels are not permitted to do so.
3640	 */
3641	if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3642		ap->host->simplex_claimed = ap;
3643
3644 out:
3645	if (rc)
3646		*r_failed_dev = dev;
3647	return rc;
3648}
3649EXPORT_SYMBOL_GPL(ata_do_set_mode);
3650
3651/**
3652 *	ata_wait_ready - wait for link to become ready
3653 *	@link: link to be waited on
3654 *	@deadline: deadline jiffies for the operation
3655 *	@check_ready: callback to check link readiness
3656 *
3657 *	Wait for @link to become ready.  @check_ready should return
3658 *	positive number if @link is ready, 0 if it isn't, -ENODEV if
3659 *	link doesn't seem to be occupied, other errno for other error
3660 *	conditions.
3661 *
3662 *	Transient -ENODEV conditions are allowed for
3663 *	ATA_TMOUT_FF_WAIT.
3664 *
3665 *	LOCKING:
3666 *	EH context.
3667 *
3668 *	RETURNS:
3669 *	0 if @link is ready before @deadline; otherwise, -errno.
3670 */
3671int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3672		   int (*check_ready)(struct ata_link *link))
3673{
3674	unsigned long start = jiffies;
3675	unsigned long nodev_deadline;
3676	int warned = 0;
3677
3678	/* choose which 0xff timeout to use, read comment in libata.h */
3679	if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3680		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3681	else
3682		nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3683
3684	/* Slave readiness can't be tested separately from master.  On
3685	 * M/S emulation configuration, this function should be called
3686	 * only on the master and it will handle both master and slave.
3687	 */
3688	WARN_ON(link == link->ap->slave_link);
3689
3690	if (time_after(nodev_deadline, deadline))
3691		nodev_deadline = deadline;
3692
3693	while (1) {
3694		unsigned long now = jiffies;
3695		int ready, tmp;
3696
3697		ready = tmp = check_ready(link);
3698		if (ready > 0)
3699			return 0;
3700
3701		/*
3702		 * -ENODEV could be transient.  Ignore -ENODEV if link
3703		 * is online.  Also, some SATA devices take a long
3704		 * time to clear 0xff after reset.  Wait for
3705		 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3706		 * offline.
3707		 *
3708		 * Note that some PATA controllers (pata_ali) explode
3709		 * if status register is read more than once when
3710		 * there's no device attached.
3711		 */
3712		if (ready == -ENODEV) {
3713			if (ata_link_online(link))
3714				ready = 0;
3715			else if ((link->ap->flags & ATA_FLAG_SATA) &&
3716				 !ata_link_offline(link) &&
3717				 time_before(now, nodev_deadline))
3718				ready = 0;
3719		}
3720
3721		if (ready)
3722			return ready;
3723		if (time_after(now, deadline))
3724			return -EBUSY;
3725
3726		if (!warned && time_after(now, start + 5 * HZ) &&
3727		    (deadline - now > 3 * HZ)) {
3728			ata_link_warn(link,
3729				"link is slow to respond, please be patient "
3730				"(ready=%d)\n", tmp);
3731			warned = 1;
3732		}
3733
3734		ata_msleep(link->ap, 50);
3735	}
3736}
3737
3738/**
3739 *	ata_wait_after_reset - wait for link to become ready after reset
3740 *	@link: link to be waited on
3741 *	@deadline: deadline jiffies for the operation
3742 *	@check_ready: callback to check link readiness
3743 *
3744 *	Wait for @link to become ready after reset.
3745 *
3746 *	LOCKING:
3747 *	EH context.
3748 *
3749 *	RETURNS:
3750 *	0 if @link is ready before @deadline; otherwise, -errno.
3751 */
3752int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3753				int (*check_ready)(struct ata_link *link))
3754{
3755	ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3756
3757	return ata_wait_ready(link, deadline, check_ready);
3758}
3759EXPORT_SYMBOL_GPL(ata_wait_after_reset);
3760
3761/**
3762 *	ata_std_prereset - prepare for reset
3763 *	@link: ATA link to be reset
3764 *	@deadline: deadline jiffies for the operation
3765 *
3766 *	@link is about to be reset.  Initialize it.  Failure from
3767 *	prereset makes libata abort whole reset sequence and give up
3768 *	that port, so prereset should be best-effort.  It does its
3769 *	best to prepare for reset sequence but if things go wrong, it
3770 *	should just whine, not fail.
3771 *
3772 *	LOCKING:
3773 *	Kernel thread context (may sleep)
3774 *
3775 *	RETURNS:
3776 *	Always 0.
3777 */
3778int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3779{
3780	struct ata_port *ap = link->ap;
3781	struct ata_eh_context *ehc = &link->eh_context;
3782	const unsigned int *timing = sata_ehc_deb_timing(ehc);
3783	int rc;
3784
3785	/* if we're about to do hardreset, nothing more to do */
3786	if (ehc->i.action & ATA_EH_HARDRESET)
3787		return 0;
3788
3789	/* if SATA, resume link */
3790	if (ap->flags & ATA_FLAG_SATA) {
3791		rc = sata_link_resume(link, timing, deadline);
3792		/* whine about phy resume failure but proceed */
3793		if (rc && rc != -EOPNOTSUPP)
3794			ata_link_warn(link,
3795				      "failed to resume link for reset (errno=%d)\n",
3796				      rc);
3797	}
3798
3799	/* no point in trying softreset on offline link */
3800	if (ata_phys_link_offline(link))
3801		ehc->i.action &= ~ATA_EH_SOFTRESET;
3802
3803	return 0;
3804}
3805EXPORT_SYMBOL_GPL(ata_std_prereset);
3806
3807/**
3808 *	sata_std_hardreset - COMRESET w/o waiting or classification
3809 *	@link: link to reset
3810 *	@class: resulting class of attached device
3811 *	@deadline: deadline jiffies for the operation
3812 *
3813 *	Standard SATA COMRESET w/o waiting or classification.
3814 *
3815 *	LOCKING:
3816 *	Kernel thread context (may sleep)
3817 *
3818 *	RETURNS:
3819 *	0 if link offline, -EAGAIN if link online, -errno on errors.
3820 */
3821int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3822		       unsigned long deadline)
3823{
3824	const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
3825	bool online;
3826	int rc;
3827
3828	/* do hardreset */
3829	rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3830	return online ? -EAGAIN : rc;
3831}
3832EXPORT_SYMBOL_GPL(sata_std_hardreset);
3833
3834/**
3835 *	ata_std_postreset - standard postreset callback
3836 *	@link: the target ata_link
3837 *	@classes: classes of attached devices
3838 *
3839 *	This function is invoked after a successful reset.  Note that
3840 *	the device might have been reset more than once using
3841 *	different reset methods before postreset is invoked.
3842 *
3843 *	LOCKING:
3844 *	Kernel thread context (may sleep)
3845 */
3846void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3847{
3848	u32 serror;
3849
3850	/* reset complete, clear SError */
3851	if (!sata_scr_read(link, SCR_ERROR, &serror))
3852		sata_scr_write(link, SCR_ERROR, serror);
3853
3854	/* print link status */
3855	sata_print_link_status(link);
3856}
3857EXPORT_SYMBOL_GPL(ata_std_postreset);
3858
3859/**
3860 *	ata_dev_same_device - Determine whether new ID matches configured device
3861 *	@dev: device to compare against
3862 *	@new_class: class of the new device
3863 *	@new_id: IDENTIFY page of the new device
3864 *
3865 *	Compare @new_class and @new_id against @dev and determine
3866 *	whether @dev is the device indicated by @new_class and
3867 *	@new_id.
3868 *
3869 *	LOCKING:
3870 *	None.
3871 *
3872 *	RETURNS:
3873 *	1 if @dev matches @new_class and @new_id, 0 otherwise.
3874 */
3875static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3876			       const u16 *new_id)
3877{
3878	const u16 *old_id = dev->id;
3879	unsigned char model[2][ATA_ID_PROD_LEN + 1];
3880	unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3881
3882	if (dev->class != new_class) {
3883		ata_dev_info(dev, "class mismatch %d != %d\n",
3884			     dev->class, new_class);
3885		return 0;
3886	}
3887
3888	ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3889	ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3890	ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3891	ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3892
3893	if (strcmp(model[0], model[1])) {
3894		ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3895			     model[0], model[1]);
3896		return 0;
3897	}
3898
3899	if (strcmp(serial[0], serial[1])) {
3900		ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3901			     serial[0], serial[1]);
3902		return 0;
3903	}
3904
3905	return 1;
3906}
3907
3908/**
3909 *	ata_dev_reread_id - Re-read IDENTIFY data
3910 *	@dev: target ATA device
3911 *	@readid_flags: read ID flags
3912 *
3913 *	Re-read IDENTIFY page and make sure @dev is still attached to
3914 *	the port.
3915 *
3916 *	LOCKING:
3917 *	Kernel thread context (may sleep)
3918 *
3919 *	RETURNS:
3920 *	0 on success, negative errno otherwise
3921 */
3922int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3923{
3924	unsigned int class = dev->class;
3925	u16 *id = (void *)dev->link->ap->sector_buf;
3926	int rc;
3927
3928	/* read ID data */
3929	rc = ata_dev_read_id(dev, &class, readid_flags, id);
3930	if (rc)
3931		return rc;
3932
3933	/* is the device still there? */
3934	if (!ata_dev_same_device(dev, class, id))
3935		return -ENODEV;
3936
3937	memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3938	return 0;
3939}
3940
3941/**
3942 *	ata_dev_revalidate - Revalidate ATA device
3943 *	@dev: device to revalidate
3944 *	@new_class: new class code
3945 *	@readid_flags: read ID flags
3946 *
3947 *	Re-read IDENTIFY page, make sure @dev is still attached to the
3948 *	port and reconfigure it according to the new IDENTIFY page.
3949 *
3950 *	LOCKING:
3951 *	Kernel thread context (may sleep)
3952 *
3953 *	RETURNS:
3954 *	0 on success, negative errno otherwise
3955 */
3956int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3957		       unsigned int readid_flags)
3958{
3959	u64 n_sectors = dev->n_sectors;
3960	u64 n_native_sectors = dev->n_native_sectors;
3961	int rc;
3962
3963	if (!ata_dev_enabled(dev))
3964		return -ENODEV;
3965
3966	/* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3967	if (ata_class_enabled(new_class) && new_class == ATA_DEV_PMP) {
3968		ata_dev_info(dev, "class mismatch %u != %u\n",
3969			     dev->class, new_class);
3970		rc = -ENODEV;
3971		goto fail;
3972	}
3973
3974	/* re-read ID */
3975	rc = ata_dev_reread_id(dev, readid_flags);
3976	if (rc)
3977		goto fail;
3978
3979	/* configure device according to the new ID */
3980	rc = ata_dev_configure(dev);
3981	if (rc)
3982		goto fail;
3983
3984	/* verify n_sectors hasn't changed */
3985	if (dev->class != ATA_DEV_ATA || !n_sectors ||
3986	    dev->n_sectors == n_sectors)
3987		return 0;
3988
3989	/* n_sectors has changed */
3990	ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3991		     (unsigned long long)n_sectors,
3992		     (unsigned long long)dev->n_sectors);
3993
3994	/*
3995	 * Something could have caused HPA to be unlocked
3996	 * involuntarily.  If n_native_sectors hasn't changed and the
3997	 * new size matches it, keep the device.
3998	 */
3999	if (dev->n_native_sectors == n_native_sectors &&
4000	    dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
4001		ata_dev_warn(dev,
4002			     "new n_sectors matches native, probably "
4003			     "late HPA unlock, n_sectors updated\n");
4004		/* use the larger n_sectors */
4005		return 0;
4006	}
4007
4008	/*
4009	 * Some BIOSes boot w/o HPA but resume w/ HPA locked.  Try
4010	 * unlocking HPA in those cases.
4011	 *
4012	 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
4013	 */
4014	if (dev->n_native_sectors == n_native_sectors &&
4015	    dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
4016	    !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
4017		ata_dev_warn(dev,
4018			     "old n_sectors matches native, probably "
4019			     "late HPA lock, will try to unlock HPA\n");
4020		/* try unlocking HPA */
4021		dev->flags |= ATA_DFLAG_UNLOCK_HPA;
4022		rc = -EIO;
4023	} else
4024		rc = -ENODEV;
4025
4026	/* restore original n_[native_]sectors and fail */
4027	dev->n_native_sectors = n_native_sectors;
4028	dev->n_sectors = n_sectors;
4029 fail:
4030	ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
4031	return rc;
4032}
4033
4034struct ata_blacklist_entry {
4035	const char *model_num;
4036	const char *model_rev;
4037	unsigned long horkage;
4038};
4039
4040static const struct ata_blacklist_entry ata_device_blacklist [] = {
4041	/* Devices with DMA related problems under Linux */
4042	{ "WDC AC11000H",	NULL,		ATA_HORKAGE_NODMA },
4043	{ "WDC AC22100H",	NULL,		ATA_HORKAGE_NODMA },
4044	{ "WDC AC32500H",	NULL,		ATA_HORKAGE_NODMA },
4045	{ "WDC AC33100H",	NULL,		ATA_HORKAGE_NODMA },
4046	{ "WDC AC31600H",	NULL,		ATA_HORKAGE_NODMA },
4047	{ "WDC AC32100H",	"24.09P07",	ATA_HORKAGE_NODMA },
4048	{ "WDC AC23200L",	"21.10N21",	ATA_HORKAGE_NODMA },
4049	{ "Compaq CRD-8241B", 	NULL,		ATA_HORKAGE_NODMA },
4050	{ "CRD-8400B",		NULL, 		ATA_HORKAGE_NODMA },
4051	{ "CRD-848[02]B",	NULL,		ATA_HORKAGE_NODMA },
4052	{ "CRD-84",		NULL,		ATA_HORKAGE_NODMA },
4053	{ "SanDisk SDP3B",	NULL,		ATA_HORKAGE_NODMA },
4054	{ "SanDisk SDP3B-64",	NULL,		ATA_HORKAGE_NODMA },
4055	{ "SANYO CD-ROM CRD",	NULL,		ATA_HORKAGE_NODMA },
4056	{ "HITACHI CDR-8",	NULL,		ATA_HORKAGE_NODMA },
4057	{ "HITACHI CDR-8[34]35",NULL,		ATA_HORKAGE_NODMA },
4058	{ "Toshiba CD-ROM XM-6202B", NULL,	ATA_HORKAGE_NODMA },
4059	{ "TOSHIBA CD-ROM XM-1702BC", NULL,	ATA_HORKAGE_NODMA },
4060	{ "CD-532E-A", 		NULL,		ATA_HORKAGE_NODMA },
4061	{ "E-IDE CD-ROM CR-840",NULL,		ATA_HORKAGE_NODMA },
4062	{ "CD-ROM Drive/F5A",	NULL,		ATA_HORKAGE_NODMA },
4063	{ "WPI CDD-820", 	NULL,		ATA_HORKAGE_NODMA },
4064	{ "SAMSUNG CD-ROM SC-148C", NULL,	ATA_HORKAGE_NODMA },
4065	{ "SAMSUNG CD-ROM SC",	NULL,		ATA_HORKAGE_NODMA },
4066	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4067	{ "_NEC DV5800A", 	NULL,		ATA_HORKAGE_NODMA },
4068	{ "SAMSUNG CD-ROM SN-124", "N001",	ATA_HORKAGE_NODMA },
4069	{ "Seagate STT20000A", NULL,		ATA_HORKAGE_NODMA },
4070	{ " 2GB ATA Flash Disk", "ADMA428M",	ATA_HORKAGE_NODMA },
4071	{ "VRFDFC22048UCHC-TE*", NULL,		ATA_HORKAGE_NODMA },
4072	/* Odd clown on sil3726/4726 PMPs */
4073	{ "Config  Disk",	NULL,		ATA_HORKAGE_DISABLE },
4074	/* Similar story with ASMedia 1092 */
4075	{ "ASMT109x- Config",	NULL,		ATA_HORKAGE_DISABLE },
4076
4077	/* Weird ATAPI devices */
4078	{ "TORiSAN DVD-ROM DRD-N216", NULL,	ATA_HORKAGE_MAX_SEC_128 },
4079	{ "QUANTUM DAT    DAT72-000", NULL,	ATA_HORKAGE_ATAPI_MOD16_DMA },
4080	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4081	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
4082
4083	/*
4084	 * Causes silent data corruption with higher max sects.
4085	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4086	 */
4087	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
4088
4089	/*
4090	 * These devices time out with higher max sects.
4091	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
4092	 */
4093	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4094	{ "LITEON EP1-*",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
4095
4096	/* Devices we expect to fail diagnostics */
4097
4098	/* Devices where NCQ should be avoided */
4099	/* NCQ is slow */
4100	{ "WDC WD740ADFD-00",	NULL,		ATA_HORKAGE_NONCQ },
4101	{ "WDC WD740ADFD-00NLR1", NULL,		ATA_HORKAGE_NONCQ },
4102	/* http://thread.gmane.org/gmane.linux.ide/14907 */
4103	{ "FUJITSU MHT2060BH",	NULL,		ATA_HORKAGE_NONCQ },
4104	/* NCQ is broken */
4105	{ "Maxtor *",		"BANC*",	ATA_HORKAGE_NONCQ },
4106	{ "Maxtor 7V300F0",	"VA111630",	ATA_HORKAGE_NONCQ },
4107	{ "ST380817AS",		"3.42",		ATA_HORKAGE_NONCQ },
4108	{ "ST3160023AS",	"3.42",		ATA_HORKAGE_NONCQ },
4109	{ "OCZ CORE_SSD",	"02.10104",	ATA_HORKAGE_NONCQ },
4110
4111	/* Seagate NCQ + FLUSH CACHE firmware bug */
4112	{ "ST31500341AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4113						ATA_HORKAGE_FIRMWARE_WARN },
4114
4115	{ "ST31000333AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4116						ATA_HORKAGE_FIRMWARE_WARN },
4117
4118	{ "ST3640[36]23AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4119						ATA_HORKAGE_FIRMWARE_WARN },
4120
4121	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
4122						ATA_HORKAGE_FIRMWARE_WARN },
4123
4124	/* drives which fail FPDMA_AA activation (some may freeze afterwards)
4125	   the ST disks also have LPM issues */
4126	{ "ST1000LM024 HN-M101MBB", NULL,	ATA_HORKAGE_BROKEN_FPDMA_AA |
4127						ATA_HORKAGE_NOLPM },
4128	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
4129
4130	/* Blacklist entries taken from Silicon Image 3124/3132
4131	   Windows driver .inf file - also several Linux problem reports */
4132	{ "HTS541060G9SA00",    "MB3OC60D",     ATA_HORKAGE_NONCQ },
4133	{ "HTS541080G9SA00",    "MB4OC60D",     ATA_HORKAGE_NONCQ },
4134	{ "HTS541010G9SA00",    "MBZOC60D",     ATA_HORKAGE_NONCQ },
4135
4136	/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
4137	{ "C300-CTFDDAC128MAG",	"0001",		ATA_HORKAGE_NONCQ },
4138
4139	/* Sandisk SD7/8/9s lock up hard on large trims */
4140	{ "SanDisk SD[789]*",	NULL,		ATA_HORKAGE_MAX_TRIM_128M },
4141
4142	/* devices which puke on READ_NATIVE_MAX */
4143	{ "HDS724040KLSA80",	"KFAOA20N",	ATA_HORKAGE_BROKEN_HPA },
4144	{ "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4145	{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4146	{ "MAXTOR 6L080L4",	"A93.0500",	ATA_HORKAGE_BROKEN_HPA },
4147
4148	/* this one allows HPA unlocking but fails IOs on the area */
4149	{ "OCZ-VERTEX",		    "1.30",	ATA_HORKAGE_BROKEN_HPA },
4150
4151	/* Devices which report 1 sector over size HPA */
4152	{ "ST340823A",		NULL,		ATA_HORKAGE_HPA_SIZE },
4153	{ "ST320413A",		NULL,		ATA_HORKAGE_HPA_SIZE },
4154	{ "ST310211A",		NULL,		ATA_HORKAGE_HPA_SIZE },
4155
4156	/* Devices which get the IVB wrong */
4157	{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB },
4158	/* Maybe we should just blacklist TSSTcorp... */
4159	{ "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]",  ATA_HORKAGE_IVB },
4160
4161	/* Devices that do not need bridging limits applied */
4162	{ "MTRON MSP-SATA*",		NULL,	ATA_HORKAGE_BRIDGE_OK },
4163	{ "BUFFALO HD-QSU2/R5",		NULL,	ATA_HORKAGE_BRIDGE_OK },
4164
4165	/* Devices which aren't very happy with higher link speeds */
4166	{ "WD My Book",			NULL,	ATA_HORKAGE_1_5_GBPS },
4167	{ "Seagate FreeAgent GoFlex",	NULL,	ATA_HORKAGE_1_5_GBPS },
4168
4169	/*
4170	 * Devices which choke on SETXFER.  Applies only if both the
4171	 * device and controller are SATA.
4172	 */
4173	{ "PIONEER DVD-RW  DVRTD08",	NULL,	ATA_HORKAGE_NOSETXFER },
4174	{ "PIONEER DVD-RW  DVRTD08A",	NULL,	ATA_HORKAGE_NOSETXFER },
4175	{ "PIONEER DVD-RW  DVR-215",	NULL,	ATA_HORKAGE_NOSETXFER },
4176	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
4177	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
4178
4179	/* These specific Pioneer models have LPM issues */
4180	{ "PIONEER BD-RW   BDR-207M",	NULL,	ATA_HORKAGE_NOLPM },
4181	{ "PIONEER BD-RW   BDR-205",	NULL,	ATA_HORKAGE_NOLPM },
4182
4183	/* Crucial BX100 SSD 500GB has broken LPM support */
4184	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
4185
4186	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4187	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4188						ATA_HORKAGE_ZERO_AFTER_TRIM |
4189						ATA_HORKAGE_NOLPM },
4190	/* 512GB MX100 with newer firmware has only LPM issues */
4191	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM |
4192						ATA_HORKAGE_NOLPM },
4193
4194	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4195	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4196						ATA_HORKAGE_ZERO_AFTER_TRIM |
4197						ATA_HORKAGE_NOLPM },
4198	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4199						ATA_HORKAGE_ZERO_AFTER_TRIM |
4200						ATA_HORKAGE_NOLPM },
4201
4202	/* These specific Samsung models/firmware-revs do not handle LPM well */
4203	{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
4204	{ "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM },
4205	{ "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM },
4206	{ "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM },
4207
4208	/* devices that don't properly handle queued TRIM commands */
4209	{ "Micron_M500IT_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4210						ATA_HORKAGE_ZERO_AFTER_TRIM },
4211	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4212						ATA_HORKAGE_ZERO_AFTER_TRIM },
4213	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4214						ATA_HORKAGE_ZERO_AFTER_TRIM },
4215	{ "Micron_1100_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4216						ATA_HORKAGE_ZERO_AFTER_TRIM, },
4217	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4218						ATA_HORKAGE_ZERO_AFTER_TRIM },
4219	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4220						ATA_HORKAGE_ZERO_AFTER_TRIM },
4221	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
4222						ATA_HORKAGE_ZERO_AFTER_TRIM },
4223	{ "Samsung SSD 840 EVO*",	NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4224						ATA_HORKAGE_NO_DMA_LOG |
4225						ATA_HORKAGE_ZERO_AFTER_TRIM },
4226	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4227						ATA_HORKAGE_ZERO_AFTER_TRIM },
4228	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4229						ATA_HORKAGE_ZERO_AFTER_TRIM },
4230	{ "Samsung SSD 860*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4231						ATA_HORKAGE_ZERO_AFTER_TRIM |
4232						ATA_HORKAGE_NO_NCQ_ON_ATI },
4233	{ "Samsung SSD 870*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4234						ATA_HORKAGE_ZERO_AFTER_TRIM |
4235						ATA_HORKAGE_NO_NCQ_ON_ATI },
4236	{ "SAMSUNG*MZ7LH*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4237						ATA_HORKAGE_ZERO_AFTER_TRIM |
4238						ATA_HORKAGE_NO_NCQ_ON_ATI, },
4239	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
4240						ATA_HORKAGE_ZERO_AFTER_TRIM },
4241
4242	/* devices that don't properly handle TRIM commands */
4243	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM },
4244	{ "M88V29*",			NULL,	ATA_HORKAGE_NOTRIM },
4245
4246	/*
4247	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4248	 * (Return Zero After Trim) flags in the ATA Command Set are
4249	 * unreliable in the sense that they only define what happens if
4250	 * the device successfully executed the DSM TRIM command. TRIM
4251	 * is only advisory, however, and the device is free to silently
4252	 * ignore all or parts of the request.
4253	 *
4254	 * Whitelist drives that are known to reliably return zeroes
4255	 * after TRIM.
4256	 */
4257
4258	/*
4259	 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
4260	 * that model before whitelisting all other intel SSDs.
4261	 */
4262	{ "INTEL*SSDSC2MH*",		NULL,	0 },
4263
4264	{ "Micron*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4265	{ "Crucial*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4266	{ "INTEL*SSD*", 		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4267	{ "SSD*INTEL*",			NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4268	{ "Samsung*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4269	{ "SAMSUNG*SSD*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4270	{ "SAMSUNG*MZ7KM*",		NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4271	{ "ST[1248][0248]0[FH]*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM },
4272
4273	/*
4274	 * Some WD SATA-I drives spin up and down erratically when the link
4275	 * is put into the slumber mode.  We don't have full list of the
4276	 * affected devices.  Disable LPM if the device matches one of the
4277	 * known prefixes and is SATA-1.  As a side effect LPM partial is
4278	 * lost too.
4279	 *
4280	 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
4281	 */
4282	{ "WDC WD800JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4283	{ "WDC WD1200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4284	{ "WDC WD1600JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4285	{ "WDC WD2000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4286	{ "WDC WD2500JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4287	{ "WDC WD3000JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4288	{ "WDC WD3200JD-*",		NULL,	ATA_HORKAGE_WD_BROKEN_LPM },
4289
4290	/*
4291	 * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
4292	 * log page is accessed. Ensure we never ask for this log page with
4293	 * these devices.
4294	 */
4295	{ "SATADOM-ML 3ME",		NULL,	ATA_HORKAGE_NO_LOG_DIR },
4296
4297	/* Buggy FUA */
4298	{ "Maxtor",		"BANC1G10",	ATA_HORKAGE_NO_FUA },
4299	{ "WDC*WD2500J*",	NULL,		ATA_HORKAGE_NO_FUA },
4300	{ "OCZ-VERTEX*",	NULL,		ATA_HORKAGE_NO_FUA },
4301	{ "INTEL*SSDSC2CT*",	NULL,		ATA_HORKAGE_NO_FUA },
4302
4303	/* End Marker */
4304	{ }
4305};
4306
4307static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4308{
4309	unsigned char model_num[ATA_ID_PROD_LEN + 1];
4310	unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4311	const struct ata_blacklist_entry *ad = ata_device_blacklist;
4312
4313	ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4314	ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4315
4316	while (ad->model_num) {
4317		if (glob_match(ad->model_num, model_num)) {
4318			if (ad->model_rev == NULL)
4319				return ad->horkage;
4320			if (glob_match(ad->model_rev, model_rev))
4321				return ad->horkage;
4322		}
4323		ad++;
4324	}
4325	return 0;
4326}
4327
4328static int ata_dma_blacklisted(const struct ata_device *dev)
4329{
4330	/* We don't support polling DMA.
4331	 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4332	 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4333	 */
4334	if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4335	    (dev->flags & ATA_DFLAG_CDB_INTR))
4336		return 1;
4337	return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4338}
4339
4340/**
4341 *	ata_is_40wire		-	check drive side detection
4342 *	@dev: device
4343 *
4344 *	Perform drive side detection decoding, allowing for device vendors
4345 *	who can't follow the documentation.
4346 */
4347
4348static int ata_is_40wire(struct ata_device *dev)
4349{
4350	if (dev->horkage & ATA_HORKAGE_IVB)
4351		return ata_drive_40wire_relaxed(dev->id);
4352	return ata_drive_40wire(dev->id);
4353}
4354
4355/**
4356 *	cable_is_40wire		-	40/80/SATA decider
4357 *	@ap: port to consider
4358 *
4359 *	This function encapsulates the policy for speed management
4360 *	in one place. At the moment we don't cache the result but
4361 *	there is a good case for setting ap->cbl to the result when
4362 *	we are called with unknown cables (and figuring out if it
4363 *	impacts hotplug at all).
4364 *
4365 *	Return 1 if the cable appears to be 40 wire.
4366 */
4367
4368static int cable_is_40wire(struct ata_port *ap)
4369{
4370	struct ata_link *link;
4371	struct ata_device *dev;
4372
4373	/* If the controller thinks we are 40 wire, we are. */
4374	if (ap->cbl == ATA_CBL_PATA40)
4375		return 1;
4376
4377	/* If the controller thinks we are 80 wire, we are. */
4378	if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4379		return 0;
4380
4381	/* If the system is known to be 40 wire short cable (eg
4382	 * laptop), then we allow 80 wire modes even if the drive
4383	 * isn't sure.
4384	 */
4385	if (ap->cbl == ATA_CBL_PATA40_SHORT)
4386		return 0;
4387
4388	/* If the controller doesn't know, we scan.
4389	 *
4390	 * Note: We look for all 40 wire detects at this point.  Any
4391	 *       80 wire detect is taken to be 80 wire cable because
4392	 * - in many setups only the one drive (slave if present) will
4393	 *   give a valid detect
4394	 * - if you have a non detect capable drive you don't want it
4395	 *   to colour the choice
4396	 */
4397	ata_for_each_link(link, ap, EDGE) {
4398		ata_for_each_dev(dev, link, ENABLED) {
4399			if (!ata_is_40wire(dev))
4400				return 0;
4401		}
4402	}
4403	return 1;
4404}
4405
4406/**
4407 *	ata_dev_xfermask - Compute supported xfermask of the given device
4408 *	@dev: Device to compute xfermask for
4409 *
4410 *	Compute supported xfermask of @dev and store it in
4411 *	dev->*_mask.  This function is responsible for applying all
4412 *	known limits including host controller limits, device
4413 *	blacklist, etc...
4414 *
4415 *	LOCKING:
4416 *	None.
4417 */
4418static void ata_dev_xfermask(struct ata_device *dev)
4419{
4420	struct ata_link *link = dev->link;
4421	struct ata_port *ap = link->ap;
4422	struct ata_host *host = ap->host;
4423	unsigned int xfer_mask;
4424
4425	/* controller modes available */
4426	xfer_mask = ata_pack_xfermask(ap->pio_mask,
4427				      ap->mwdma_mask, ap->udma_mask);
4428
4429	/* drive modes available */
4430	xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4431				       dev->mwdma_mask, dev->udma_mask);
4432	xfer_mask &= ata_id_xfermask(dev->id);
4433
4434	/*
4435	 *	CFA Advanced TrueIDE timings are not allowed on a shared
4436	 *	cable
4437	 */
4438	if (ata_dev_pair(dev)) {
4439		/* No PIO5 or PIO6 */
4440		xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4441		/* No MWDMA3 or MWDMA 4 */
4442		xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4443	}
4444
4445	if (ata_dma_blacklisted(dev)) {
4446		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4447		ata_dev_warn(dev,
4448			     "device is on DMA blacklist, disabling DMA\n");
4449	}
4450
4451	if ((host->flags & ATA_HOST_SIMPLEX) &&
4452	    host->simplex_claimed && host->simplex_claimed != ap) {
4453		xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4454		ata_dev_warn(dev,
4455			     "simplex DMA is claimed by other device, disabling DMA\n");
4456	}
4457
4458	if (ap->flags & ATA_FLAG_NO_IORDY)
4459		xfer_mask &= ata_pio_mask_no_iordy(dev);
4460
4461	if (ap->ops->mode_filter)
4462		xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4463
4464	/* Apply cable rule here.  Don't apply it early because when
4465	 * we handle hot plug the cable type can itself change.
4466	 * Check this last so that we know if the transfer rate was
4467	 * solely limited by the cable.
4468	 * Unknown or 80 wire cables reported host side are checked
4469	 * drive side as well. Cases where we know a 40wire cable
4470	 * is used safely for 80 are not checked here.
4471	 */
4472	if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4473		/* UDMA/44 or higher would be available */
4474		if (cable_is_40wire(ap)) {
4475			ata_dev_warn(dev,
4476				     "limited to UDMA/33 due to 40-wire cable\n");
4477			xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4478		}
4479
4480	ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4481			    &dev->mwdma_mask, &dev->udma_mask);
4482}
4483
4484/**
4485 *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4486 *	@dev: Device to which command will be sent
4487 *
4488 *	Issue SET FEATURES - XFER MODE command to device @dev
4489 *	on port @ap.
4490 *
4491 *	LOCKING:
4492 *	PCI/etc. bus probe sem.
4493 *
4494 *	RETURNS:
4495 *	0 on success, AC_ERR_* mask otherwise.
4496 */
4497
4498static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4499{
4500	struct ata_taskfile tf;
4501
4502	/* set up set-features taskfile */
4503	ata_dev_dbg(dev, "set features - xfer mode\n");
4504
4505	/* Some controllers and ATAPI devices show flaky interrupt
4506	 * behavior after setting xfer mode.  Use polling instead.
4507	 */
4508	ata_tf_init(dev, &tf);
4509	tf.command = ATA_CMD_SET_FEATURES;
4510	tf.feature = SETFEATURES_XFER;
4511	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4512	tf.protocol = ATA_PROT_NODATA;
4513	/* If we are using IORDY we must send the mode setting command */
4514	if (ata_pio_need_iordy(dev))
4515		tf.nsect = dev->xfer_mode;
4516	/* If the device has IORDY and the controller does not - turn it off */
4517 	else if (ata_id_has_iordy(dev->id))
4518		tf.nsect = 0x01;
4519	else /* In the ancient relic department - skip all of this */
4520		return 0;
4521
4522	/*
4523	 * On some disks, this command causes spin-up, so we need longer
4524	 * timeout.
4525	 */
4526	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4527}
4528
4529/**
4530 *	ata_dev_set_feature - Issue SET FEATURES
4531 *	@dev: Device to which command will be sent
4532 *	@subcmd: The SET FEATURES subcommand to be sent
4533 *	@action: The sector count represents a subcommand specific action
4534 *
4535 *	Issue SET FEATURES command to device @dev on port @ap with sector count
4536 *
4537 *	LOCKING:
4538 *	PCI/etc. bus probe sem.
4539 *
4540 *	RETURNS:
4541 *	0 on success, AC_ERR_* mask otherwise.
4542 */
4543unsigned int ata_dev_set_feature(struct ata_device *dev, u8 subcmd, u8 action)
4544{
4545	struct ata_taskfile tf;
4546	unsigned int timeout = 0;
4547
4548	/* set up set-features taskfile */
4549	ata_dev_dbg(dev, "set features\n");
4550
4551	ata_tf_init(dev, &tf);
4552	tf.command = ATA_CMD_SET_FEATURES;
4553	tf.feature = subcmd;
4554	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4555	tf.protocol = ATA_PROT_NODATA;
4556	tf.nsect = action;
4557
4558	if (subcmd == SETFEATURES_SPINUP)
4559		timeout = ata_probe_timeout ?
4560			  ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4561
4562	return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4563}
4564EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4565
4566/**
4567 *	ata_dev_init_params - Issue INIT DEV PARAMS command
4568 *	@dev: Device to which command will be sent
4569 *	@heads: Number of heads (taskfile parameter)
4570 *	@sectors: Number of sectors (taskfile parameter)
4571 *
4572 *	LOCKING:
4573 *	Kernel thread context (may sleep)
4574 *
4575 *	RETURNS:
4576 *	0 on success, AC_ERR_* mask otherwise.
4577 */
4578static unsigned int ata_dev_init_params(struct ata_device *dev,
4579					u16 heads, u16 sectors)
4580{
4581	struct ata_taskfile tf;
4582	unsigned int err_mask;
4583
4584	/* Number of sectors per track 1-255. Number of heads 1-16 */
4585	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4586		return AC_ERR_INVALID;
4587
4588	/* set up init dev params taskfile */
4589	ata_dev_dbg(dev, "init dev params \n");
4590
4591	ata_tf_init(dev, &tf);
4592	tf.command = ATA_CMD_INIT_DEV_PARAMS;
4593	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4594	tf.protocol = ATA_PROT_NODATA;
4595	tf.nsect = sectors;
4596	tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4597
4598	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4599	/* A clean abort indicates an original or just out of spec drive
4600	   and we should continue as we issue the setup based on the
4601	   drive reported working geometry */
4602	if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
4603		err_mask = 0;
4604
4605	return err_mask;
4606}
4607
4608/**
4609 *	atapi_check_dma - Check whether ATAPI DMA can be supported
4610 *	@qc: Metadata associated with taskfile to check
4611 *
4612 *	Allow low-level driver to filter ATA PACKET commands, returning
4613 *	a status indicating whether or not it is OK to use DMA for the
4614 *	supplied PACKET command.
4615 *
4616 *	LOCKING:
4617 *	spin_lock_irqsave(host lock)
4618 *
4619 *	RETURNS: 0 when ATAPI DMA can be used
4620 *               nonzero otherwise
4621 */
4622int atapi_check_dma(struct ata_queued_cmd *qc)
4623{
4624	struct ata_port *ap = qc->ap;
4625
4626	/* Don't allow DMA if it isn't multiple of 16 bytes.  Quite a
4627	 * few ATAPI devices choke on such DMA requests.
4628	 */
4629	if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4630	    unlikely(qc->nbytes & 15))
4631		return 1;
4632
4633	if (ap->ops->check_atapi_dma)
4634		return ap->ops->check_atapi_dma(qc);
4635
4636	return 0;
4637}
4638
4639/**
4640 *	ata_std_qc_defer - Check whether a qc needs to be deferred
4641 *	@qc: ATA command in question
4642 *
4643 *	Non-NCQ commands cannot run with any other command, NCQ or
4644 *	not.  As upper layer only knows the queue depth, we are
4645 *	responsible for maintaining exclusion.  This function checks
4646 *	whether a new command @qc can be issued.
4647 *
4648 *	LOCKING:
4649 *	spin_lock_irqsave(host lock)
4650 *
4651 *	RETURNS:
4652 *	ATA_DEFER_* if deferring is needed, 0 otherwise.
4653 */
4654int ata_std_qc_defer(struct ata_queued_cmd *qc)
4655{
4656	struct ata_link *link = qc->dev->link;
4657
4658	if (ata_is_ncq(qc->tf.protocol)) {
4659		if (!ata_tag_valid(link->active_tag))
4660			return 0;
4661	} else {
4662		if (!ata_tag_valid(link->active_tag) && !link->sactive)
4663			return 0;
4664	}
4665
4666	return ATA_DEFER_LINK;
4667}
4668EXPORT_SYMBOL_GPL(ata_std_qc_defer);
4669
4670enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4671{
4672	return AC_ERR_OK;
4673}
4674EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4675
4676/**
4677 *	ata_sg_init - Associate command with scatter-gather table.
4678 *	@qc: Command to be associated
4679 *	@sg: Scatter-gather table.
4680 *	@n_elem: Number of elements in s/g table.
4681 *
4682 *	Initialize the data-related elements of queued_cmd @qc
4683 *	to point to a scatter-gather table @sg, containing @n_elem
4684 *	elements.
4685 *
4686 *	LOCKING:
4687 *	spin_lock_irqsave(host lock)
4688 */
4689void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4690		 unsigned int n_elem)
4691{
4692	qc->sg = sg;
4693	qc->n_elem = n_elem;
4694	qc->cursg = qc->sg;
4695}
4696
4697#ifdef CONFIG_HAS_DMA
4698
4699/**
4700 *	ata_sg_clean - Unmap DMA memory associated with command
4701 *	@qc: Command containing DMA memory to be released
4702 *
4703 *	Unmap all mapped DMA memory associated with this command.
4704 *
4705 *	LOCKING:
4706 *	spin_lock_irqsave(host lock)
4707 */
4708static void ata_sg_clean(struct ata_queued_cmd *qc)
4709{
4710	struct ata_port *ap = qc->ap;
4711	struct scatterlist *sg = qc->sg;
4712	int dir = qc->dma_dir;
4713
4714	WARN_ON_ONCE(sg == NULL);
4715
4716	if (qc->n_elem)
4717		dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4718
4719	qc->flags &= ~ATA_QCFLAG_DMAMAP;
4720	qc->sg = NULL;
4721}
4722
4723/**
4724 *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4725 *	@qc: Command with scatter-gather table to be mapped.
4726 *
4727 *	DMA-map the scatter-gather table associated with queued_cmd @qc.
4728 *
4729 *	LOCKING:
4730 *	spin_lock_irqsave(host lock)
4731 *
4732 *	RETURNS:
4733 *	Zero on success, negative on error.
4734 *
4735 */
4736static int ata_sg_setup(struct ata_queued_cmd *qc)
4737{
4738	struct ata_port *ap = qc->ap;
4739	unsigned int n_elem;
4740
4741	n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4742	if (n_elem < 1)
4743		return -1;
4744
4745	qc->orig_n_elem = qc->n_elem;
4746	qc->n_elem = n_elem;
4747	qc->flags |= ATA_QCFLAG_DMAMAP;
4748
4749	return 0;
4750}
4751
4752#else /* !CONFIG_HAS_DMA */
4753
4754static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4755static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4756
4757#endif /* !CONFIG_HAS_DMA */
4758
4759/**
4760 *	swap_buf_le16 - swap halves of 16-bit words in place
4761 *	@buf:  Buffer to swap
4762 *	@buf_words:  Number of 16-bit words in buffer.
4763 *
4764 *	Swap halves of 16-bit words if needed to convert from
4765 *	little-endian byte order to native cpu byte order, or
4766 *	vice-versa.
4767 *
4768 *	LOCKING:
4769 *	Inherited from caller.
4770 */
4771void swap_buf_le16(u16 *buf, unsigned int buf_words)
4772{
4773#ifdef __BIG_ENDIAN
4774	unsigned int i;
4775
4776	for (i = 0; i < buf_words; i++)
4777		buf[i] = le16_to_cpu(buf[i]);
4778#endif /* __BIG_ENDIAN */
4779}
4780
4781/**
4782 *	ata_qc_free - free unused ata_queued_cmd
4783 *	@qc: Command to complete
4784 *
4785 *	Designed to free unused ata_queued_cmd object
4786 *	in case something prevents using it.
4787 *
4788 *	LOCKING:
4789 *	spin_lock_irqsave(host lock)
4790 */
4791void ata_qc_free(struct ata_queued_cmd *qc)
4792{
4793	qc->flags = 0;
4794	if (ata_tag_valid(qc->tag))
4795		qc->tag = ATA_TAG_POISON;
4796}
4797
4798void __ata_qc_complete(struct ata_queued_cmd *qc)
4799{
4800	struct ata_port *ap;
4801	struct ata_link *link;
4802
4803	WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4804	WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4805	ap = qc->ap;
4806	link = qc->dev->link;
4807
4808	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4809		ata_sg_clean(qc);
4810
4811	/* command should be marked inactive atomically with qc completion */
4812	if (ata_is_ncq(qc->tf.protocol)) {
4813		link->sactive &= ~(1 << qc->hw_tag);
4814		if (!link->sactive)
4815			ap->nr_active_links--;
4816	} else {
4817		link->active_tag = ATA_TAG_POISON;
4818		ap->nr_active_links--;
4819	}
4820
4821	/* clear exclusive status */
4822	if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4823		     ap->excl_link == link))
4824		ap->excl_link = NULL;
4825
4826	/* atapi: mark qc as inactive to prevent the interrupt handler
4827	 * from completing the command twice later, before the error handler
4828	 * is called. (when rc != 0 and atapi request sense is needed)
4829	 */
4830	qc->flags &= ~ATA_QCFLAG_ACTIVE;
4831	ap->qc_active &= ~(1ULL << qc->tag);
4832
4833	/* call completion callback */
4834	qc->complete_fn(qc);
4835}
4836
4837static void fill_result_tf(struct ata_queued_cmd *qc)
4838{
4839	struct ata_port *ap = qc->ap;
4840
4841	qc->result_tf.flags = qc->tf.flags;
4842	ap->ops->qc_fill_rtf(qc);
4843}
4844
4845static void ata_verify_xfer(struct ata_queued_cmd *qc)
4846{
4847	struct ata_device *dev = qc->dev;
4848
4849	if (!ata_is_data(qc->tf.protocol))
4850		return;
4851
4852	if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4853		return;
4854
4855	dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4856}
4857
4858/**
4859 *	ata_qc_complete - Complete an active ATA command
4860 *	@qc: Command to complete
4861 *
4862 *	Indicate to the mid and upper layers that an ATA command has
4863 *	completed, with either an ok or not-ok status.
4864 *
4865 *	Refrain from calling this function multiple times when
4866 *	successfully completing multiple NCQ commands.
4867 *	ata_qc_complete_multiple() should be used instead, which will
4868 *	properly update IRQ expect state.
4869 *
4870 *	LOCKING:
4871 *	spin_lock_irqsave(host lock)
4872 */
4873void ata_qc_complete(struct ata_queued_cmd *qc)
4874{
4875	struct ata_port *ap = qc->ap;
4876	struct ata_device *dev = qc->dev;
4877	struct ata_eh_info *ehi = &dev->link->eh_info;
4878
4879	/* Trigger the LED (if available) */
4880	ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
4881
4882	/*
4883	 * In order to synchronize EH with the regular execution path, a qc that
4884	 * is owned by EH is marked with ATA_QCFLAG_EH.
4885	 *
4886	 * The normal execution path is responsible for not accessing a qc owned
4887	 * by EH.  libata core enforces the rule by returning NULL from
4888	 * ata_qc_from_tag() for qcs owned by EH.
4889	 */
4890	if (unlikely(qc->err_mask))
4891		qc->flags |= ATA_QCFLAG_EH;
4892
4893	/*
4894	 * Finish internal commands without any further processing and always
4895	 * with the result TF filled.
4896	 */
4897	if (unlikely(ata_tag_internal(qc->tag))) {
4898		fill_result_tf(qc);
4899		trace_ata_qc_complete_internal(qc);
4900		__ata_qc_complete(qc);
4901		return;
4902	}
4903
4904	/* Non-internal qc has failed.  Fill the result TF and summon EH. */
4905	if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
4906		fill_result_tf(qc);
4907		trace_ata_qc_complete_failed(qc);
4908		ata_qc_schedule_eh(qc);
4909		return;
4910	}
4911
4912	WARN_ON_ONCE(ata_port_is_frozen(ap));
4913
4914	/* read result TF if requested */
4915	if (qc->flags & ATA_QCFLAG_RESULT_TF)
4916		fill_result_tf(qc);
4917
4918	trace_ata_qc_complete_done(qc);
4919
4920	/*
4921	 * For CDL commands that completed without an error, check if we have
4922	 * sense data (ATA_SENSE is set). If we do, then the command may have
4923	 * been aborted by the device due to a limit timeout using the policy
4924	 * 0xD. For these commands, invoke EH to get the command sense data.
4925	 */
4926	if (qc->flags & ATA_QCFLAG_HAS_CDL &&
4927	    qc->result_tf.status & ATA_SENSE) {
4928		/*
4929		 * Tell SCSI EH to not overwrite scmd->result even if this
4930		 * command is finished with result SAM_STAT_GOOD.
4931		 */
4932		qc->scsicmd->flags |= SCMD_FORCE_EH_SUCCESS;
4933		qc->flags |= ATA_QCFLAG_EH_SUCCESS_CMD;
4934		ehi->dev_action[dev->devno] |= ATA_EH_GET_SUCCESS_SENSE;
4935
4936		/*
4937		 * set pending so that ata_qc_schedule_eh() does not trigger
4938		 * fast drain, and freeze the port.
4939		 */
4940		ap->pflags |= ATA_PFLAG_EH_PENDING;
4941		ata_qc_schedule_eh(qc);
4942		return;
4943	}
4944
4945	/* Some commands need post-processing after successful completion. */
4946	switch (qc->tf.command) {
4947	case ATA_CMD_SET_FEATURES:
4948		if (qc->tf.feature != SETFEATURES_WC_ON &&
4949		    qc->tf.feature != SETFEATURES_WC_OFF &&
4950		    qc->tf.feature != SETFEATURES_RA_ON &&
4951		    qc->tf.feature != SETFEATURES_RA_OFF)
4952			break;
4953		fallthrough;
4954	case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4955	case ATA_CMD_SET_MULTI: /* multi_count changed */
4956		/* revalidate device */
4957		ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4958		ata_port_schedule_eh(ap);
4959		break;
4960
4961	case ATA_CMD_SLEEP:
4962		dev->flags |= ATA_DFLAG_SLEEPING;
4963		break;
4964	}
4965
4966	if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4967		ata_verify_xfer(qc);
4968
4969	__ata_qc_complete(qc);
4970}
4971EXPORT_SYMBOL_GPL(ata_qc_complete);
4972
4973/**
4974 *	ata_qc_get_active - get bitmask of active qcs
4975 *	@ap: port in question
4976 *
4977 *	LOCKING:
4978 *	spin_lock_irqsave(host lock)
4979 *
4980 *	RETURNS:
4981 *	Bitmask of active qcs
4982 */
4983u64 ata_qc_get_active(struct ata_port *ap)
4984{
4985	u64 qc_active = ap->qc_active;
4986
4987	/* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4988	if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4989		qc_active |= (1 << 0);
4990		qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4991	}
4992
4993	return qc_active;
4994}
4995EXPORT_SYMBOL_GPL(ata_qc_get_active);
4996
4997/**
4998 *	ata_qc_issue - issue taskfile to device
4999 *	@qc: command to issue to device
5000 *
5001 *	Prepare an ATA command to submission to device.
5002 *	This includes mapping the data into a DMA-able
5003 *	area, filling in the S/G table, and finally
5004 *	writing the taskfile to hardware, starting the command.
5005 *
5006 *	LOCKING:
5007 *	spin_lock_irqsave(host lock)
5008 */
5009void ata_qc_issue(struct ata_queued_cmd *qc)
5010{
5011	struct ata_port *ap = qc->ap;
5012	struct ata_link *link = qc->dev->link;
5013	u8 prot = qc->tf.protocol;
5014
5015	/* Make sure only one non-NCQ command is outstanding. */
5016	WARN_ON_ONCE(ata_tag_valid(link->active_tag));
5017
5018	if (ata_is_ncq(prot)) {
5019		WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
5020
5021		if (!link->sactive)
5022			ap->nr_active_links++;
5023		link->sactive |= 1 << qc->hw_tag;
5024	} else {
5025		WARN_ON_ONCE(link->sactive);
5026
5027		ap->nr_active_links++;
5028		link->active_tag = qc->tag;
5029	}
5030
5031	qc->flags |= ATA_QCFLAG_ACTIVE;
5032	ap->qc_active |= 1ULL << qc->tag;
5033
5034	/*
5035	 * We guarantee to LLDs that they will have at least one
5036	 * non-zero sg if the command is a data command.
5037	 */
5038	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
5039		goto sys_err;
5040
5041	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5042				 (ap->flags & ATA_FLAG_PIO_DMA)))
5043		if (ata_sg_setup(qc))
5044			goto sys_err;
5045
5046	/* if device is sleeping, schedule reset and abort the link */
5047	if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5048		link->eh_info.action |= ATA_EH_RESET;
5049		ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5050		ata_link_abort(link);
5051		return;
5052	}
5053
5054	trace_ata_qc_prep(qc);
5055	qc->err_mask |= ap->ops->qc_prep(qc);
5056	if (unlikely(qc->err_mask))
5057		goto err;
5058	trace_ata_qc_issue(qc);
5059	qc->err_mask |= ap->ops->qc_issue(qc);
5060	if (unlikely(qc->err_mask))
5061		goto err;
5062	return;
5063
5064sys_err:
5065	qc->err_mask |= AC_ERR_SYSTEM;
5066err:
5067	ata_qc_complete(qc);
5068}
5069
5070/**
5071 *	ata_phys_link_online - test whether the given link is online
5072 *	@link: ATA link to test
5073 *
5074 *	Test whether @link is online.  Note that this function returns
5075 *	0 if online status of @link cannot be obtained, so
5076 *	ata_link_online(link) != !ata_link_offline(link).
5077 *
5078 *	LOCKING:
5079 *	None.
5080 *
5081 *	RETURNS:
5082 *	True if the port online status is available and online.
5083 */
5084bool ata_phys_link_online(struct ata_link *link)
5085{
5086	u32 sstatus;
5087
5088	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5089	    ata_sstatus_online(sstatus))
5090		return true;
5091	return false;
5092}
5093
5094/**
5095 *	ata_phys_link_offline - test whether the given link is offline
5096 *	@link: ATA link to test
5097 *
5098 *	Test whether @link is offline.  Note that this function
5099 *	returns 0 if offline status of @link cannot be obtained, so
5100 *	ata_link_online(link) != !ata_link_offline(link).
5101 *
5102 *	LOCKING:
5103 *	None.
5104 *
5105 *	RETURNS:
5106 *	True if the port offline status is available and offline.
5107 */
5108bool ata_phys_link_offline(struct ata_link *link)
5109{
5110	u32 sstatus;
5111
5112	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5113	    !ata_sstatus_online(sstatus))
5114		return true;
5115	return false;
5116}
5117
5118/**
5119 *	ata_link_online - test whether the given link is online
5120 *	@link: ATA link to test
5121 *
5122 *	Test whether @link is online.  This is identical to
5123 *	ata_phys_link_online() when there's no slave link.  When
5124 *	there's a slave link, this function should only be called on
5125 *	the master link and will return true if any of M/S links is
5126 *	online.
5127 *
5128 *	LOCKING:
5129 *	None.
5130 *
5131 *	RETURNS:
5132 *	True if the port online status is available and online.
5133 */
5134bool ata_link_online(struct ata_link *link)
5135{
5136	struct ata_link *slave = link->ap->slave_link;
5137
5138	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5139
5140	return ata_phys_link_online(link) ||
5141		(slave && ata_phys_link_online(slave));
5142}
5143EXPORT_SYMBOL_GPL(ata_link_online);
5144
5145/**
5146 *	ata_link_offline - test whether the given link is offline
5147 *	@link: ATA link to test
5148 *
5149 *	Test whether @link is offline.  This is identical to
5150 *	ata_phys_link_offline() when there's no slave link.  When
5151 *	there's a slave link, this function should only be called on
5152 *	the master link and will return true if both M/S links are
5153 *	offline.
5154 *
5155 *	LOCKING:
5156 *	None.
5157 *
5158 *	RETURNS:
5159 *	True if the port offline status is available and offline.
5160 */
5161bool ata_link_offline(struct ata_link *link)
5162{
5163	struct ata_link *slave = link->ap->slave_link;
5164
5165	WARN_ON(link == slave);	/* shouldn't be called on slave link */
5166
5167	return ata_phys_link_offline(link) &&
5168		(!slave || ata_phys_link_offline(slave));
5169}
5170EXPORT_SYMBOL_GPL(ata_link_offline);
5171
5172#ifdef CONFIG_PM
5173static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
5174				unsigned int action, unsigned int ehi_flags,
5175				bool async)
5176{
5177	struct ata_link *link;
5178	unsigned long flags;
5179
5180	spin_lock_irqsave(ap->lock, flags);
5181
5182	/*
5183	 * A previous PM operation might still be in progress. Wait for
5184	 * ATA_PFLAG_PM_PENDING to clear.
5185	 */
5186	if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5187		spin_unlock_irqrestore(ap->lock, flags);
5188		ata_port_wait_eh(ap);
5189		spin_lock_irqsave(ap->lock, flags);
5190	}
5191
5192	/* Request PM operation to EH */
5193	ap->pm_mesg = mesg;
5194	ap->pflags |= ATA_PFLAG_PM_PENDING;
5195	ata_for_each_link(link, ap, HOST_FIRST) {
5196		link->eh_info.action |= action;
5197		link->eh_info.flags |= ehi_flags;
5198	}
5199
5200	ata_port_schedule_eh(ap);
5201
5202	spin_unlock_irqrestore(ap->lock, flags);
5203
5204	if (!async)
5205		ata_port_wait_eh(ap);
5206}
5207
5208static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg,
5209			     bool async)
5210{
5211	/*
5212	 * We are about to suspend the port, so we do not care about
5213	 * scsi_rescan_device() calls scheduled by previous resume operations.
5214	 * The next resume will schedule the rescan again. So cancel any rescan
5215	 * that is not done yet.
5216	 */
5217	cancel_delayed_work_sync(&ap->scsi_rescan_task);
5218
5219	/*
5220	 * On some hardware, device fails to respond after spun down for
5221	 * suspend. As the device will not be used until being resumed, we
5222	 * do not need to touch the device. Ask EH to skip the usual stuff
5223	 * and proceed directly to suspend.
5224	 *
5225	 * http://thread.gmane.org/gmane.linux.ide/46764
5226	 */
5227	ata_port_request_pm(ap, mesg, 0,
5228			    ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
5229			    ATA_EHI_NO_RECOVERY,
5230			    async);
5231}
5232
5233static int ata_port_pm_suspend(struct device *dev)
5234{
5235	struct ata_port *ap = to_ata_port(dev);
5236
5237	if (pm_runtime_suspended(dev))
5238		return 0;
5239
5240	ata_port_suspend(ap, PMSG_SUSPEND, false);
5241	return 0;
5242}
5243
5244static int ata_port_pm_freeze(struct device *dev)
5245{
5246	struct ata_port *ap = to_ata_port(dev);
5247
5248	if (pm_runtime_suspended(dev))
5249		return 0;
5250
5251	ata_port_suspend(ap, PMSG_FREEZE, false);
5252	return 0;
5253}
5254
5255static int ata_port_pm_poweroff(struct device *dev)
5256{
5257	if (!pm_runtime_suspended(dev))
5258		ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE, false);
5259	return 0;
5260}
5261
5262static void ata_port_resume(struct ata_port *ap, pm_message_t mesg,
5263			    bool async)
5264{
5265	ata_port_request_pm(ap, mesg, ATA_EH_RESET,
5266			    ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET,
5267			    async);
5268}
5269
5270static int ata_port_pm_resume(struct device *dev)
5271{
5272	if (!pm_runtime_suspended(dev))
5273		ata_port_resume(to_ata_port(dev), PMSG_RESUME, true);
5274	return 0;
5275}
5276
5277/*
5278 * For ODDs, the upper layer will poll for media change every few seconds,
5279 * which will make it enter and leave suspend state every few seconds. And
5280 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5281 * is very little and the ODD may malfunction after constantly being reset.
5282 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5283 * ODD is attached to the port.
5284 */
5285static int ata_port_runtime_idle(struct device *dev)
5286{
5287	struct ata_port *ap = to_ata_port(dev);
5288	struct ata_link *link;
5289	struct ata_device *adev;
5290
5291	ata_for_each_link(link, ap, HOST_FIRST) {
5292		ata_for_each_dev(adev, link, ENABLED)
5293			if (adev->class == ATA_DEV_ATAPI &&
5294			    !zpodd_dev_enabled(adev))
5295				return -EBUSY;
5296	}
5297
5298	return 0;
5299}
5300
5301static int ata_port_runtime_suspend(struct device *dev)
5302{
5303	ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND, false);
5304	return 0;
5305}
5306
5307static int ata_port_runtime_resume(struct device *dev)
5308{
5309	ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME, false);
5310	return 0;
5311}
5312
5313static const struct dev_pm_ops ata_port_pm_ops = {
5314	.suspend = ata_port_pm_suspend,
5315	.resume = ata_port_pm_resume,
5316	.freeze = ata_port_pm_freeze,
5317	.thaw = ata_port_pm_resume,
5318	.poweroff = ata_port_pm_poweroff,
5319	.restore = ata_port_pm_resume,
5320
5321	.runtime_suspend = ata_port_runtime_suspend,
5322	.runtime_resume = ata_port_runtime_resume,
5323	.runtime_idle = ata_port_runtime_idle,
5324};
5325
5326/* sas ports don't participate in pm runtime management of ata_ports,
5327 * and need to resume ata devices at the domain level, not the per-port
5328 * level. sas suspend/resume is async to allow parallel port recovery
5329 * since sas has multiple ata_port instances per Scsi_Host.
5330 */
5331void ata_sas_port_suspend(struct ata_port *ap)
5332{
5333	ata_port_suspend(ap, PMSG_SUSPEND, true);
5334}
5335EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5336
5337void ata_sas_port_resume(struct ata_port *ap)
5338{
5339	ata_port_resume(ap, PMSG_RESUME, true);
5340}
5341EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5342
5343/**
5344 *	ata_host_suspend - suspend host
5345 *	@host: host to suspend
5346 *	@mesg: PM message
5347 *
5348 *	Suspend @host.  Actual operation is performed by port suspend.
5349 */
5350void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5351{
5352	host->dev->power.power_state = mesg;
5353}
5354EXPORT_SYMBOL_GPL(ata_host_suspend);
5355
5356/**
5357 *	ata_host_resume - resume host
5358 *	@host: host to resume
5359 *
5360 *	Resume @host.  Actual operation is performed by port resume.
5361 */
5362void ata_host_resume(struct ata_host *host)
5363{
5364	host->dev->power.power_state = PMSG_ON;
5365}
5366EXPORT_SYMBOL_GPL(ata_host_resume);
5367#endif
5368
5369const struct device_type ata_port_type = {
5370	.name = ATA_PORT_TYPE_NAME,
5371#ifdef CONFIG_PM
5372	.pm = &ata_port_pm_ops,
5373#endif
5374};
5375
5376/**
5377 *	ata_dev_init - Initialize an ata_device structure
5378 *	@dev: Device structure to initialize
5379 *
5380 *	Initialize @dev in preparation for probing.
5381 *
5382 *	LOCKING:
5383 *	Inherited from caller.
5384 */
5385void ata_dev_init(struct ata_device *dev)
5386{
5387	struct ata_link *link = ata_dev_phys_link(dev);
5388	struct ata_port *ap = link->ap;
5389	unsigned long flags;
5390
5391	/* SATA spd limit is bound to the attached device, reset together */
5392	link->sata_spd_limit = link->hw_sata_spd_limit;
5393	link->sata_spd = 0;
5394
5395	/* High bits of dev->flags are used to record warm plug
5396	 * requests which occur asynchronously.  Synchronize using
5397	 * host lock.
5398	 */
5399	spin_lock_irqsave(ap->lock, flags);
5400	dev->flags &= ~ATA_DFLAG_INIT_MASK;
5401	dev->horkage = 0;
5402	spin_unlock_irqrestore(ap->lock, flags);
5403
5404	memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5405	       ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5406	dev->pio_mask = UINT_MAX;
5407	dev->mwdma_mask = UINT_MAX;
5408	dev->udma_mask = UINT_MAX;
5409}
5410
5411/**
5412 *	ata_link_init - Initialize an ata_link structure
5413 *	@ap: ATA port link is attached to
5414 *	@link: Link structure to initialize
5415 *	@pmp: Port multiplier port number
5416 *
5417 *	Initialize @link.
5418 *
5419 *	LOCKING:
5420 *	Kernel thread context (may sleep)
5421 */
5422void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5423{
5424	int i;
5425
5426	/* clear everything except for devices */
5427	memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5428	       ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5429
5430	link->ap = ap;
5431	link->pmp = pmp;
5432	link->active_tag = ATA_TAG_POISON;
5433	link->hw_sata_spd_limit = UINT_MAX;
5434
5435	/* can't use iterator, ap isn't initialized yet */
5436	for (i = 0; i < ATA_MAX_DEVICES; i++) {
5437		struct ata_device *dev = &link->device[i];
5438
5439		dev->link = link;
5440		dev->devno = dev - link->device;
5441#ifdef CONFIG_ATA_ACPI
5442		dev->gtf_filter = ata_acpi_gtf_filter;
5443#endif
5444		ata_dev_init(dev);
5445	}
5446}
5447
5448/**
5449 *	sata_link_init_spd - Initialize link->sata_spd_limit
5450 *	@link: Link to configure sata_spd_limit for
5451 *
5452 *	Initialize ``link->[hw_]sata_spd_limit`` to the currently
5453 *	configured value.
5454 *
5455 *	LOCKING:
5456 *	Kernel thread context (may sleep).
5457 *
5458 *	RETURNS:
5459 *	0 on success, -errno on failure.
5460 */
5461int sata_link_init_spd(struct ata_link *link)
5462{
5463	u8 spd;
5464	int rc;
5465
5466	rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5467	if (rc)
5468		return rc;
5469
5470	spd = (link->saved_scontrol >> 4) & 0xf;
5471	if (spd)
5472		link->hw_sata_spd_limit &= (1 << spd) - 1;
5473
5474	ata_force_link_limits(link);
5475
5476	link->sata_spd_limit = link->hw_sata_spd_limit;
5477
5478	return 0;
5479}
5480
5481/**
5482 *	ata_port_alloc - allocate and initialize basic ATA port resources
5483 *	@host: ATA host this allocated port belongs to
5484 *
5485 *	Allocate and initialize basic ATA port resources.
5486 *
5487 *	RETURNS:
5488 *	Allocate ATA port on success, NULL on failure.
5489 *
5490 *	LOCKING:
5491 *	Inherited from calling layer (may sleep).
5492 */
5493struct ata_port *ata_port_alloc(struct ata_host *host)
5494{
5495	struct ata_port *ap;
5496
5497	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5498	if (!ap)
5499		return NULL;
5500
5501	ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5502	ap->lock = &host->lock;
5503	ap->print_id = -1;
5504	ap->local_port_no = -1;
5505	ap->host = host;
5506	ap->dev = host->dev;
5507
5508	mutex_init(&ap->scsi_scan_mutex);
5509	INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5510	INIT_DELAYED_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5511	INIT_LIST_HEAD(&ap->eh_done_q);
5512	init_waitqueue_head(&ap->eh_wait_q);
5513	init_completion(&ap->park_req_pending);
5514	timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5515		    TIMER_DEFERRABLE);
5516
5517	ap->cbl = ATA_CBL_NONE;
5518
5519	ata_link_init(ap, &ap->link, 0);
5520
5521#ifdef ATA_IRQ_TRAP
5522	ap->stats.unhandled_irq = 1;
5523	ap->stats.idle_irq = 1;
5524#endif
5525	ata_sff_port_init(ap);
5526
5527	return ap;
5528}
5529
5530static void ata_devres_release(struct device *gendev, void *res)
5531{
5532	struct ata_host *host = dev_get_drvdata(gendev);
5533	int i;
5534
5535	for (i = 0; i < host->n_ports; i++) {
5536		struct ata_port *ap = host->ports[i];
5537
5538		if (!ap)
5539			continue;
5540
5541		if (ap->scsi_host)
5542			scsi_host_put(ap->scsi_host);
5543
5544	}
5545
5546	dev_set_drvdata(gendev, NULL);
5547	ata_host_put(host);
5548}
5549
5550static void ata_host_release(struct kref *kref)
5551{
5552	struct ata_host *host = container_of(kref, struct ata_host, kref);
5553	int i;
5554
5555	for (i = 0; i < host->n_ports; i++) {
5556		struct ata_port *ap = host->ports[i];
5557
5558		kfree(ap->pmp_link);
5559		kfree(ap->slave_link);
5560		kfree(ap->ncq_sense_buf);
5561		kfree(ap);
5562		host->ports[i] = NULL;
5563	}
5564	kfree(host);
5565}
5566
5567void ata_host_get(struct ata_host *host)
5568{
5569	kref_get(&host->kref);
5570}
5571
5572void ata_host_put(struct ata_host *host)
5573{
5574	kref_put(&host->kref, ata_host_release);
5575}
5576EXPORT_SYMBOL_GPL(ata_host_put);
5577
5578/**
5579 *	ata_host_alloc - allocate and init basic ATA host resources
5580 *	@dev: generic device this host is associated with
5581 *	@max_ports: maximum number of ATA ports associated with this host
5582 *
5583 *	Allocate and initialize basic ATA host resources.  LLD calls
5584 *	this function to allocate a host, initializes it fully and
5585 *	attaches it using ata_host_register().
5586 *
5587 *	@max_ports ports are allocated and host->n_ports is
5588 *	initialized to @max_ports.  The caller is allowed to decrease
5589 *	host->n_ports before calling ata_host_register().  The unused
5590 *	ports will be automatically freed on registration.
5591 *
5592 *	RETURNS:
5593 *	Allocate ATA host on success, NULL on failure.
5594 *
5595 *	LOCKING:
5596 *	Inherited from calling layer (may sleep).
5597 */
5598struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5599{
5600	struct ata_host *host;
5601	size_t sz;
5602	int i;
5603	void *dr;
5604
5605	/* alloc a container for our list of ATA ports (buses) */
5606	sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5607	host = kzalloc(sz, GFP_KERNEL);
5608	if (!host)
5609		return NULL;
5610
5611	if (!devres_open_group(dev, NULL, GFP_KERNEL))
5612		goto err_free;
5613
5614	dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5615	if (!dr)
5616		goto err_out;
5617
5618	devres_add(dev, dr);
5619	dev_set_drvdata(dev, host);
5620
5621	spin_lock_init(&host->lock);
5622	mutex_init(&host->eh_mutex);
5623	host->dev = dev;
5624	host->n_ports = max_ports;
5625	kref_init(&host->kref);
5626
5627	/* allocate ports bound to this host */
5628	for (i = 0; i < max_ports; i++) {
5629		struct ata_port *ap;
5630
5631		ap = ata_port_alloc(host);
5632		if (!ap)
5633			goto err_out;
5634
5635		ap->port_no = i;
5636		host->ports[i] = ap;
5637	}
5638
5639	devres_remove_group(dev, NULL);
5640	return host;
5641
5642 err_out:
5643	devres_release_group(dev, NULL);
5644 err_free:
5645	kfree(host);
5646	return NULL;
5647}
5648EXPORT_SYMBOL_GPL(ata_host_alloc);
5649
5650/**
5651 *	ata_host_alloc_pinfo - alloc host and init with port_info array
5652 *	@dev: generic device this host is associated with
5653 *	@ppi: array of ATA port_info to initialize host with
5654 *	@n_ports: number of ATA ports attached to this host
5655 *
5656 *	Allocate ATA host and initialize with info from @ppi.  If NULL
5657 *	terminated, @ppi may contain fewer entries than @n_ports.  The
5658 *	last entry will be used for the remaining ports.
5659 *
5660 *	RETURNS:
5661 *	Allocate ATA host on success, NULL on failure.
5662 *
5663 *	LOCKING:
5664 *	Inherited from calling layer (may sleep).
5665 */
5666struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5667				      const struct ata_port_info * const * ppi,
5668				      int n_ports)
5669{
5670	const struct ata_port_info *pi = &ata_dummy_port_info;
5671	struct ata_host *host;
5672	int i, j;
5673
5674	host = ata_host_alloc(dev, n_ports);
5675	if (!host)
5676		return NULL;
5677
5678	for (i = 0, j = 0; i < host->n_ports; i++) {
5679		struct ata_port *ap = host->ports[i];
5680
5681		if (ppi[j])
5682			pi = ppi[j++];
5683
5684		ap->pio_mask = pi->pio_mask;
5685		ap->mwdma_mask = pi->mwdma_mask;
5686		ap->udma_mask = pi->udma_mask;
5687		ap->flags |= pi->flags;
5688		ap->link.flags |= pi->link_flags;
5689		ap->ops = pi->port_ops;
5690
5691		if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5692			host->ops = pi->port_ops;
5693	}
5694
5695	return host;
5696}
5697EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
5698
5699static void ata_host_stop(struct device *gendev, void *res)
5700{
5701	struct ata_host *host = dev_get_drvdata(gendev);
5702	int i;
5703
5704	WARN_ON(!(host->flags & ATA_HOST_STARTED));
5705
5706	for (i = 0; i < host->n_ports; i++) {
5707		struct ata_port *ap = host->ports[i];
5708
5709		if (ap->ops->port_stop)
5710			ap->ops->port_stop(ap);
5711	}
5712
5713	if (host->ops->host_stop)
5714		host->ops->host_stop(host);
5715}
5716
5717/**
5718 *	ata_finalize_port_ops - finalize ata_port_operations
5719 *	@ops: ata_port_operations to finalize
5720 *
5721 *	An ata_port_operations can inherit from another ops and that
5722 *	ops can again inherit from another.  This can go on as many
5723 *	times as necessary as long as there is no loop in the
5724 *	inheritance chain.
5725 *
5726 *	Ops tables are finalized when the host is started.  NULL or
5727 *	unspecified entries are inherited from the closet ancestor
5728 *	which has the method and the entry is populated with it.
5729 *	After finalization, the ops table directly points to all the
5730 *	methods and ->inherits is no longer necessary and cleared.
5731 *
5732 *	Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5733 *
5734 *	LOCKING:
5735 *	None.
5736 */
5737static void ata_finalize_port_ops(struct ata_port_operations *ops)
5738{
5739	static DEFINE_SPINLOCK(lock);
5740	const struct ata_port_operations *cur;
5741	void **begin = (void **)ops;
5742	void **end = (void **)&ops->inherits;
5743	void **pp;
5744
5745	if (!ops || !ops->inherits)
5746		return;
5747
5748	spin_lock(&lock);
5749
5750	for (cur = ops->inherits; cur; cur = cur->inherits) {
5751		void **inherit = (void **)cur;
5752
5753		for (pp = begin; pp < end; pp++, inherit++)
5754			if (!*pp)
5755				*pp = *inherit;
5756	}
5757
5758	for (pp = begin; pp < end; pp++)
5759		if (IS_ERR(*pp))
5760			*pp = NULL;
5761
5762	ops->inherits = NULL;
5763
5764	spin_unlock(&lock);
5765}
5766
5767/**
5768 *	ata_host_start - start and freeze ports of an ATA host
5769 *	@host: ATA host to start ports for
5770 *
5771 *	Start and then freeze ports of @host.  Started status is
5772 *	recorded in host->flags, so this function can be called
5773 *	multiple times.  Ports are guaranteed to get started only
5774 *	once.  If host->ops is not initialized yet, it is set to the
5775 *	first non-dummy port ops.
5776 *
5777 *	LOCKING:
5778 *	Inherited from calling layer (may sleep).
5779 *
5780 *	RETURNS:
5781 *	0 if all ports are started successfully, -errno otherwise.
5782 */
5783int ata_host_start(struct ata_host *host)
5784{
5785	int have_stop = 0;
5786	void *start_dr = NULL;
5787	int i, rc;
5788
5789	if (host->flags & ATA_HOST_STARTED)
5790		return 0;
5791
5792	ata_finalize_port_ops(host->ops);
5793
5794	for (i = 0; i < host->n_ports; i++) {
5795		struct ata_port *ap = host->ports[i];
5796
5797		ata_finalize_port_ops(ap->ops);
5798
5799		if (!host->ops && !ata_port_is_dummy(ap))
5800			host->ops = ap->ops;
5801
5802		if (ap->ops->port_stop)
5803			have_stop = 1;
5804	}
5805
5806	if (host->ops && host->ops->host_stop)
5807		have_stop = 1;
5808
5809	if (have_stop) {
5810		start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5811		if (!start_dr)
5812			return -ENOMEM;
5813	}
5814
5815	for (i = 0; i < host->n_ports; i++) {
5816		struct ata_port *ap = host->ports[i];
5817
5818		if (ap->ops->port_start) {
5819			rc = ap->ops->port_start(ap);
5820			if (rc) {
5821				if (rc != -ENODEV)
5822					dev_err(host->dev,
5823						"failed to start port %d (errno=%d)\n",
5824						i, rc);
5825				goto err_out;
5826			}
5827		}
5828		ata_eh_freeze_port(ap);
5829	}
5830
5831	if (start_dr)
5832		devres_add(host->dev, start_dr);
5833	host->flags |= ATA_HOST_STARTED;
5834	return 0;
5835
5836 err_out:
5837	while (--i >= 0) {
5838		struct ata_port *ap = host->ports[i];
5839
5840		if (ap->ops->port_stop)
5841			ap->ops->port_stop(ap);
5842	}
5843	devres_free(start_dr);
5844	return rc;
5845}
5846EXPORT_SYMBOL_GPL(ata_host_start);
5847
5848/**
5849 *	ata_host_init - Initialize a host struct for sas (ipr, libsas)
5850 *	@host:	host to initialize
5851 *	@dev:	device host is attached to
5852 *	@ops:	port_ops
5853 *
5854 */
5855void ata_host_init(struct ata_host *host, struct device *dev,
5856		   struct ata_port_operations *ops)
5857{
5858	spin_lock_init(&host->lock);
5859	mutex_init(&host->eh_mutex);
5860	host->n_tags = ATA_MAX_QUEUE;
5861	host->dev = dev;
5862	host->ops = ops;
5863	kref_init(&host->kref);
5864}
5865EXPORT_SYMBOL_GPL(ata_host_init);
5866
5867void ata_port_probe(struct ata_port *ap)
5868{
5869	struct ata_eh_info *ehi = &ap->link.eh_info;
5870	unsigned long flags;
5871
5872	/* kick EH for boot probing */
5873	spin_lock_irqsave(ap->lock, flags);
5874
5875	ehi->probe_mask |= ATA_ALL_DEVICES;
5876	ehi->action |= ATA_EH_RESET;
5877	ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5878
5879	ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5880	ap->pflags |= ATA_PFLAG_LOADING;
5881	ata_port_schedule_eh(ap);
5882
5883	spin_unlock_irqrestore(ap->lock, flags);
5884}
5885EXPORT_SYMBOL_GPL(ata_port_probe);
5886
5887static void async_port_probe(void *data, async_cookie_t cookie)
5888{
5889	struct ata_port *ap = data;
5890
5891	/*
5892	 * If we're not allowed to scan this host in parallel,
5893	 * we need to wait until all previous scans have completed
5894	 * before going further.
5895	 * Jeff Garzik says this is only within a controller, so we
5896	 * don't need to wait for port 0, only for later ports.
5897	 */
5898	if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5899		async_synchronize_cookie(cookie);
5900
5901	ata_port_probe(ap);
5902	ata_port_wait_eh(ap);
5903
5904	/* in order to keep device order, we need to synchronize at this point */
5905	async_synchronize_cookie(cookie);
5906
5907	ata_scsi_scan_host(ap, 1);
5908}
5909
5910/**
5911 *	ata_host_register - register initialized ATA host
5912 *	@host: ATA host to register
5913 *	@sht: template for SCSI host
5914 *
5915 *	Register initialized ATA host.  @host is allocated using
5916 *	ata_host_alloc() and fully initialized by LLD.  This function
5917 *	starts ports, registers @host with ATA and SCSI layers and
5918 *	probe registered devices.
5919 *
5920 *	LOCKING:
5921 *	Inherited from calling layer (may sleep).
5922 *
5923 *	RETURNS:
5924 *	0 on success, -errno otherwise.
5925 */
5926int ata_host_register(struct ata_host *host, const struct scsi_host_template *sht)
5927{
5928	int i, rc;
5929
5930	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
5931
5932	/* host must have been started */
5933	if (!(host->flags & ATA_HOST_STARTED)) {
5934		dev_err(host->dev, "BUG: trying to register unstarted host\n");
5935		WARN_ON(1);
5936		return -EINVAL;
5937	}
5938
5939	/* Blow away unused ports.  This happens when LLD can't
5940	 * determine the exact number of ports to allocate at
5941	 * allocation time.
5942	 */
5943	for (i = host->n_ports; host->ports[i]; i++)
5944		kfree(host->ports[i]);
5945
5946	/* give ports names and add SCSI hosts */
5947	for (i = 0; i < host->n_ports; i++) {
5948		host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
5949		host->ports[i]->local_port_no = i + 1;
5950	}
5951
5952	/* Create associated sysfs transport objects  */
5953	for (i = 0; i < host->n_ports; i++) {
5954		rc = ata_tport_add(host->dev,host->ports[i]);
5955		if (rc) {
5956			goto err_tadd;
5957		}
5958	}
5959
5960	rc = ata_scsi_add_hosts(host, sht);
5961	if (rc)
5962		goto err_tadd;
5963
5964	/* set cable, sata_spd_limit and report */
5965	for (i = 0; i < host->n_ports; i++) {
5966		struct ata_port *ap = host->ports[i];
5967		unsigned int xfer_mask;
5968
5969		/* set SATA cable type if still unset */
5970		if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5971			ap->cbl = ATA_CBL_SATA;
5972
5973		/* init sata_spd_limit to the current value */
5974		sata_link_init_spd(&ap->link);
5975		if (ap->slave_link)
5976			sata_link_init_spd(ap->slave_link);
5977
5978		/* print per-port info to dmesg */
5979		xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5980					      ap->udma_mask);
5981
5982		if (!ata_port_is_dummy(ap)) {
5983			ata_port_info(ap, "%cATA max %s %s\n",
5984				      (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5985				      ata_mode_string(xfer_mask),
5986				      ap->link.eh_info.desc);
5987			ata_ehi_clear_desc(&ap->link.eh_info);
5988		} else
5989			ata_port_info(ap, "DUMMY\n");
5990	}
5991
5992	/* perform each probe asynchronously */
5993	for (i = 0; i < host->n_ports; i++) {
5994		struct ata_port *ap = host->ports[i];
5995		ap->cookie = async_schedule(async_port_probe, ap);
5996	}
5997
5998	return 0;
5999
6000 err_tadd:
6001	while (--i >= 0) {
6002		ata_tport_delete(host->ports[i]);
6003	}
6004	return rc;
6005
6006}
6007EXPORT_SYMBOL_GPL(ata_host_register);
6008
6009/**
6010 *	ata_host_activate - start host, request IRQ and register it
6011 *	@host: target ATA host
6012 *	@irq: IRQ to request
6013 *	@irq_handler: irq_handler used when requesting IRQ
6014 *	@irq_flags: irq_flags used when requesting IRQ
6015 *	@sht: scsi_host_template to use when registering the host
6016 *
6017 *	After allocating an ATA host and initializing it, most libata
6018 *	LLDs perform three steps to activate the host - start host,
6019 *	request IRQ and register it.  This helper takes necessary
6020 *	arguments and performs the three steps in one go.
6021 *
6022 *	An invalid IRQ skips the IRQ registration and expects the host to
6023 *	have set polling mode on the port. In this case, @irq_handler
6024 *	should be NULL.
6025 *
6026 *	LOCKING:
6027 *	Inherited from calling layer (may sleep).
6028 *
6029 *	RETURNS:
6030 *	0 on success, -errno otherwise.
6031 */
6032int ata_host_activate(struct ata_host *host, int irq,
6033		      irq_handler_t irq_handler, unsigned long irq_flags,
6034		      const struct scsi_host_template *sht)
6035{
6036	int i, rc;
6037	char *irq_desc;
6038
6039	rc = ata_host_start(host);
6040	if (rc)
6041		return rc;
6042
6043	/* Special case for polling mode */
6044	if (!irq) {
6045		WARN_ON(irq_handler);
6046		return ata_host_register(host, sht);
6047	}
6048
6049	irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
6050				  dev_driver_string(host->dev),
6051				  dev_name(host->dev));
6052	if (!irq_desc)
6053		return -ENOMEM;
6054
6055	rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6056			      irq_desc, host);
6057	if (rc)
6058		return rc;
6059
6060	for (i = 0; i < host->n_ports; i++)
6061		ata_port_desc_misc(host->ports[i], irq);
6062
6063	rc = ata_host_register(host, sht);
6064	/* if failed, just free the IRQ and leave ports alone */
6065	if (rc)
6066		devm_free_irq(host->dev, irq, host);
6067
6068	return rc;
6069}
6070EXPORT_SYMBOL_GPL(ata_host_activate);
6071
6072/**
6073 *	ata_port_detach - Detach ATA port in preparation of device removal
6074 *	@ap: ATA port to be detached
6075 *
6076 *	Detach all ATA devices and the associated SCSI devices of @ap;
6077 *	then, remove the associated SCSI host.  @ap is guaranteed to
6078 *	be quiescent on return from this function.
6079 *
6080 *	LOCKING:
6081 *	Kernel thread context (may sleep).
6082 */
6083static void ata_port_detach(struct ata_port *ap)
6084{
6085	unsigned long flags;
6086	struct ata_link *link;
6087	struct ata_device *dev;
6088
6089	/* Ensure ata_port probe has completed */
6090	async_synchronize_cookie(ap->cookie + 1);
6091
6092	/* Wait for any ongoing EH */
6093	ata_port_wait_eh(ap);
6094
6095	mutex_lock(&ap->scsi_scan_mutex);
6096	spin_lock_irqsave(ap->lock, flags);
6097
6098	/* Remove scsi devices */
6099	ata_for_each_link(link, ap, HOST_FIRST) {
6100		ata_for_each_dev(dev, link, ALL) {
6101			if (dev->sdev) {
6102				spin_unlock_irqrestore(ap->lock, flags);
6103				scsi_remove_device(dev->sdev);
6104				spin_lock_irqsave(ap->lock, flags);
6105				dev->sdev = NULL;
6106			}
6107		}
6108	}
6109
6110	/* Tell EH to disable all devices */
6111	ap->pflags |= ATA_PFLAG_UNLOADING;
6112	ata_port_schedule_eh(ap);
6113
6114	spin_unlock_irqrestore(ap->lock, flags);
6115	mutex_unlock(&ap->scsi_scan_mutex);
6116
6117	/* wait till EH commits suicide */
6118	ata_port_wait_eh(ap);
6119
6120	/* it better be dead now */
6121	WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
6122
6123	cancel_delayed_work_sync(&ap->hotplug_task);
6124	cancel_delayed_work_sync(&ap->scsi_rescan_task);
6125
6126	/* clean up zpodd on port removal */
6127	ata_for_each_link(link, ap, HOST_FIRST) {
6128		ata_for_each_dev(dev, link, ALL) {
6129			if (zpodd_dev_enabled(dev))
6130				zpodd_exit(dev);
6131		}
6132	}
6133	if (ap->pmp_link) {
6134		int i;
6135		for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
6136			ata_tlink_delete(&ap->pmp_link[i]);
6137	}
6138	/* remove the associated SCSI host */
6139	scsi_remove_host(ap->scsi_host);
6140	ata_tport_delete(ap);
6141}
6142
6143/**
6144 *	ata_host_detach - Detach all ports of an ATA host
6145 *	@host: Host to detach
6146 *
6147 *	Detach all ports of @host.
6148 *
6149 *	LOCKING:
6150 *	Kernel thread context (may sleep).
6151 */
6152void ata_host_detach(struct ata_host *host)
6153{
6154	int i;
6155
6156	for (i = 0; i < host->n_ports; i++)
6157		ata_port_detach(host->ports[i]);
6158
6159	/* the host is dead now, dissociate ACPI */
6160	ata_acpi_dissociate(host);
6161}
6162EXPORT_SYMBOL_GPL(ata_host_detach);
6163
6164#ifdef CONFIG_PCI
6165
6166/**
6167 *	ata_pci_remove_one - PCI layer callback for device removal
6168 *	@pdev: PCI device that was removed
6169 *
6170 *	PCI layer indicates to libata via this hook that hot-unplug or
6171 *	module unload event has occurred.  Detach all ports.  Resource
6172 *	release is handled via devres.
6173 *
6174 *	LOCKING:
6175 *	Inherited from PCI layer (may sleep).
6176 */
6177void ata_pci_remove_one(struct pci_dev *pdev)
6178{
6179	struct ata_host *host = pci_get_drvdata(pdev);
6180
6181	ata_host_detach(host);
6182}
6183EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6184
6185void ata_pci_shutdown_one(struct pci_dev *pdev)
6186{
6187	struct ata_host *host = pci_get_drvdata(pdev);
6188	int i;
6189
6190	for (i = 0; i < host->n_ports; i++) {
6191		struct ata_port *ap = host->ports[i];
6192
6193		ap->pflags |= ATA_PFLAG_FROZEN;
6194
6195		/* Disable port interrupts */
6196		if (ap->ops->freeze)
6197			ap->ops->freeze(ap);
6198
6199		/* Stop the port DMA engines */
6200		if (ap->ops->port_stop)
6201			ap->ops->port_stop(ap);
6202	}
6203}
6204EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
6205
6206/* move to PCI subsystem */
6207int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6208{
6209	unsigned long tmp = 0;
6210
6211	switch (bits->width) {
6212	case 1: {
6213		u8 tmp8 = 0;
6214		pci_read_config_byte(pdev, bits->reg, &tmp8);
6215		tmp = tmp8;
6216		break;
6217	}
6218	case 2: {
6219		u16 tmp16 = 0;
6220		pci_read_config_word(pdev, bits->reg, &tmp16);
6221		tmp = tmp16;
6222		break;
6223	}
6224	case 4: {
6225		u32 tmp32 = 0;
6226		pci_read_config_dword(pdev, bits->reg, &tmp32);
6227		tmp = tmp32;
6228		break;
6229	}
6230
6231	default:
6232		return -EINVAL;
6233	}
6234
6235	tmp &= bits->mask;
6236
6237	return (tmp == bits->val) ? 1 : 0;
6238}
6239EXPORT_SYMBOL_GPL(pci_test_config_bits);
6240
6241#ifdef CONFIG_PM
6242void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6243{
6244	pci_save_state(pdev);
6245	pci_disable_device(pdev);
6246
6247	if (mesg.event & PM_EVENT_SLEEP)
6248		pci_set_power_state(pdev, PCI_D3hot);
6249}
6250EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6251
6252int ata_pci_device_do_resume(struct pci_dev *pdev)
6253{
6254	int rc;
6255
6256	pci_set_power_state(pdev, PCI_D0);
6257	pci_restore_state(pdev);
6258
6259	rc = pcim_enable_device(pdev);
6260	if (rc) {
6261		dev_err(&pdev->dev,
6262			"failed to enable device after resume (%d)\n", rc);
6263		return rc;
6264	}
6265
6266	pci_set_master(pdev);
6267	return 0;
6268}
6269EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6270
6271int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6272{
6273	struct ata_host *host = pci_get_drvdata(pdev);
6274
6275	ata_host_suspend(host, mesg);
6276
6277	ata_pci_device_do_suspend(pdev, mesg);
6278
6279	return 0;
6280}
6281EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6282
6283int ata_pci_device_resume(struct pci_dev *pdev)
6284{
6285	struct ata_host *host = pci_get_drvdata(pdev);
6286	int rc;
6287
6288	rc = ata_pci_device_do_resume(pdev);
6289	if (rc == 0)
6290		ata_host_resume(host);
6291	return rc;
6292}
6293EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6294#endif /* CONFIG_PM */
6295#endif /* CONFIG_PCI */
6296
6297/**
6298 *	ata_platform_remove_one - Platform layer callback for device removal
6299 *	@pdev: Platform device that was removed
6300 *
6301 *	Platform layer indicates to libata via this hook that hot-unplug or
6302 *	module unload event has occurred.  Detach all ports.  Resource
6303 *	release is handled via devres.
6304 *
6305 *	LOCKING:
6306 *	Inherited from platform layer (may sleep).
6307 */
6308void ata_platform_remove_one(struct platform_device *pdev)
6309{
6310	struct ata_host *host = platform_get_drvdata(pdev);
6311
6312	ata_host_detach(host);
6313}
6314EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6315
6316#ifdef CONFIG_ATA_FORCE
6317
6318#define force_cbl(name, flag)				\
6319	{ #name,	.cbl		= (flag) }
6320
6321#define force_spd_limit(spd, val)			\
6322	{ #spd,	.spd_limit		= (val) }
6323
6324#define force_xfer(mode, shift)				\
6325	{ #mode,	.xfer_mask	= (1UL << (shift)) }
6326
6327#define force_lflag_on(name, flags)			\
6328	{ #name,	.lflags_on	= (flags) }
6329
6330#define force_lflag_onoff(name, flags)			\
6331	{ "no" #name,	.lflags_on	= (flags) },	\
6332	{ #name,	.lflags_off	= (flags) }
6333
6334#define force_horkage_on(name, flag)			\
6335	{ #name,	.horkage_on	= (flag) }
6336
6337#define force_horkage_onoff(name, flag)			\
6338	{ "no" #name,	.horkage_on	= (flag) },	\
6339	{ #name,	.horkage_off	= (flag) }
6340
6341static const struct ata_force_param force_tbl[] __initconst = {
6342	force_cbl(40c,			ATA_CBL_PATA40),
6343	force_cbl(80c,			ATA_CBL_PATA80),
6344	force_cbl(short40c,		ATA_CBL_PATA40_SHORT),
6345	force_cbl(unk,			ATA_CBL_PATA_UNK),
6346	force_cbl(ign,			ATA_CBL_PATA_IGN),
6347	force_cbl(sata,			ATA_CBL_SATA),
6348
6349	force_spd_limit(1.5Gbps,	1),
6350	force_spd_limit(3.0Gbps,	2),
6351
6352	force_xfer(pio0,		ATA_SHIFT_PIO + 0),
6353	force_xfer(pio1,		ATA_SHIFT_PIO + 1),
6354	force_xfer(pio2,		ATA_SHIFT_PIO + 2),
6355	force_xfer(pio3,		ATA_SHIFT_PIO + 3),
6356	force_xfer(pio4,		ATA_SHIFT_PIO + 4),
6357	force_xfer(pio5,		ATA_SHIFT_PIO + 5),
6358	force_xfer(pio6,		ATA_SHIFT_PIO + 6),
6359	force_xfer(mwdma0,		ATA_SHIFT_MWDMA + 0),
6360	force_xfer(mwdma1,		ATA_SHIFT_MWDMA + 1),
6361	force_xfer(mwdma2,		ATA_SHIFT_MWDMA + 2),
6362	force_xfer(mwdma3,		ATA_SHIFT_MWDMA + 3),
6363	force_xfer(mwdma4,		ATA_SHIFT_MWDMA + 4),
6364	force_xfer(udma0,		ATA_SHIFT_UDMA + 0),
6365	force_xfer(udma16,		ATA_SHIFT_UDMA + 0),
6366	force_xfer(udma/16,		ATA_SHIFT_UDMA + 0),
6367	force_xfer(udma1,		ATA_SHIFT_UDMA + 1),
6368	force_xfer(udma25,		ATA_SHIFT_UDMA + 1),
6369	force_xfer(udma/25,		ATA_SHIFT_UDMA + 1),
6370	force_xfer(udma2,		ATA_SHIFT_UDMA + 2),
6371	force_xfer(udma33,		ATA_SHIFT_UDMA + 2),
6372	force_xfer(udma/33,		ATA_SHIFT_UDMA + 2),
6373	force_xfer(udma3,		ATA_SHIFT_UDMA + 3),
6374	force_xfer(udma44,		ATA_SHIFT_UDMA + 3),
6375	force_xfer(udma/44,		ATA_SHIFT_UDMA + 3),
6376	force_xfer(udma4,		ATA_SHIFT_UDMA + 4),
6377	force_xfer(udma66,		ATA_SHIFT_UDMA + 4),
6378	force_xfer(udma/66,		ATA_SHIFT_UDMA + 4),
6379	force_xfer(udma5,		ATA_SHIFT_UDMA + 5),
6380	force_xfer(udma100,		ATA_SHIFT_UDMA + 5),
6381	force_xfer(udma/100,		ATA_SHIFT_UDMA + 5),
6382	force_xfer(udma6,		ATA_SHIFT_UDMA + 6),
6383	force_xfer(udma133,		ATA_SHIFT_UDMA + 6),
6384	force_xfer(udma/133,		ATA_SHIFT_UDMA + 6),
6385	force_xfer(udma7,		ATA_SHIFT_UDMA + 7),
6386
6387	force_lflag_on(nohrst,		ATA_LFLAG_NO_HRST),
6388	force_lflag_on(nosrst,		ATA_LFLAG_NO_SRST),
6389	force_lflag_on(norst,		ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
6390	force_lflag_on(rstonce,		ATA_LFLAG_RST_ONCE),
6391	force_lflag_onoff(dbdelay,	ATA_LFLAG_NO_DEBOUNCE_DELAY),
6392
6393	force_horkage_onoff(ncq,	ATA_HORKAGE_NONCQ),
6394	force_horkage_onoff(ncqtrim,	ATA_HORKAGE_NO_NCQ_TRIM),
6395	force_horkage_onoff(ncqati,	ATA_HORKAGE_NO_NCQ_ON_ATI),
6396
6397	force_horkage_onoff(trim,	ATA_HORKAGE_NOTRIM),
6398	force_horkage_on(trim_zero,	ATA_HORKAGE_ZERO_AFTER_TRIM),
6399	force_horkage_on(max_trim_128m, ATA_HORKAGE_MAX_TRIM_128M),
6400
6401	force_horkage_onoff(dma,	ATA_HORKAGE_NODMA),
6402	force_horkage_on(atapi_dmadir,	ATA_HORKAGE_ATAPI_DMADIR),
6403	force_horkage_on(atapi_mod16_dma, ATA_HORKAGE_ATAPI_MOD16_DMA),
6404
6405	force_horkage_onoff(dmalog,	ATA_HORKAGE_NO_DMA_LOG),
6406	force_horkage_onoff(iddevlog,	ATA_HORKAGE_NO_ID_DEV_LOG),
6407	force_horkage_onoff(logdir,	ATA_HORKAGE_NO_LOG_DIR),
6408
6409	force_horkage_on(max_sec_128,	ATA_HORKAGE_MAX_SEC_128),
6410	force_horkage_on(max_sec_1024,	ATA_HORKAGE_MAX_SEC_1024),
6411	force_horkage_on(max_sec_lba48,	ATA_HORKAGE_MAX_SEC_LBA48),
6412
6413	force_horkage_onoff(lpm,	ATA_HORKAGE_NOLPM),
6414	force_horkage_onoff(setxfer,	ATA_HORKAGE_NOSETXFER),
6415	force_horkage_on(dump_id,	ATA_HORKAGE_DUMP_ID),
6416	force_horkage_onoff(fua,	ATA_HORKAGE_NO_FUA),
6417
6418	force_horkage_on(disable,	ATA_HORKAGE_DISABLE),
6419};
6420
6421static int __init ata_parse_force_one(char **cur,
6422				      struct ata_force_ent *force_ent,
6423				      const char **reason)
6424{
6425	char *start = *cur, *p = *cur;
6426	char *id, *val, *endp;
6427	const struct ata_force_param *match_fp = NULL;
6428	int nr_matches = 0, i;
6429
6430	/* find where this param ends and update *cur */
6431	while (*p != '\0' && *p != ',')
6432		p++;
6433
6434	if (*p == '\0')
6435		*cur = p;
6436	else
6437		*cur = p + 1;
6438
6439	*p = '\0';
6440
6441	/* parse */
6442	p = strchr(start, ':');
6443	if (!p) {
6444		val = strstrip(start);
6445		goto parse_val;
6446	}
6447	*p = '\0';
6448
6449	id = strstrip(start);
6450	val = strstrip(p + 1);
6451
6452	/* parse id */
6453	p = strchr(id, '.');
6454	if (p) {
6455		*p++ = '\0';
6456		force_ent->device = simple_strtoul(p, &endp, 10);
6457		if (p == endp || *endp != '\0') {
6458			*reason = "invalid device";
6459			return -EINVAL;
6460		}
6461	}
6462
6463	force_ent->port = simple_strtoul(id, &endp, 10);
6464	if (id == endp || *endp != '\0') {
6465		*reason = "invalid port/link";
6466		return -EINVAL;
6467	}
6468
6469 parse_val:
6470	/* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6471	for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6472		const struct ata_force_param *fp = &force_tbl[i];
6473
6474		if (strncasecmp(val, fp->name, strlen(val)))
6475			continue;
6476
6477		nr_matches++;
6478		match_fp = fp;
6479
6480		if (strcasecmp(val, fp->name) == 0) {
6481			nr_matches = 1;
6482			break;
6483		}
6484	}
6485
6486	if (!nr_matches) {
6487		*reason = "unknown value";
6488		return -EINVAL;
6489	}
6490	if (nr_matches > 1) {
6491		*reason = "ambiguous value";
6492		return -EINVAL;
6493	}
6494
6495	force_ent->param = *match_fp;
6496
6497	return 0;
6498}
6499
6500static void __init ata_parse_force_param(void)
6501{
6502	int idx = 0, size = 1;
6503	int last_port = -1, last_device = -1;
6504	char *p, *cur, *next;
6505
6506	/* Calculate maximum number of params and allocate ata_force_tbl */
6507	for (p = ata_force_param_buf; *p; p++)
6508		if (*p == ',')
6509			size++;
6510
6511	ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
6512	if (!ata_force_tbl) {
6513		printk(KERN_WARNING "ata: failed to extend force table, "
6514		       "libata.force ignored\n");
6515		return;
6516	}
6517
6518	/* parse and populate the table */
6519	for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6520		const char *reason = "";
6521		struct ata_force_ent te = { .port = -1, .device = -1 };
6522
6523		next = cur;
6524		if (ata_parse_force_one(&next, &te, &reason)) {
6525			printk(KERN_WARNING "ata: failed to parse force "
6526			       "parameter \"%s\" (%s)\n",
6527			       cur, reason);
6528			continue;
6529		}
6530
6531		if (te.port == -1) {
6532			te.port = last_port;
6533			te.device = last_device;
6534		}
6535
6536		ata_force_tbl[idx++] = te;
6537
6538		last_port = te.port;
6539		last_device = te.device;
6540	}
6541
6542	ata_force_tbl_size = idx;
6543}
6544
6545static void ata_free_force_param(void)
6546{
6547	kfree(ata_force_tbl);
6548}
6549#else
6550static inline void ata_parse_force_param(void) { }
6551static inline void ata_free_force_param(void) { }
6552#endif
6553
6554static int __init ata_init(void)
6555{
6556	int rc;
6557
6558	ata_parse_force_param();
6559
6560	rc = ata_sff_init();
6561	if (rc) {
6562		ata_free_force_param();
6563		return rc;
6564	}
6565
6566	libata_transport_init();
6567	ata_scsi_transport_template = ata_attach_transport();
6568	if (!ata_scsi_transport_template) {
6569		ata_sff_exit();
6570		rc = -ENOMEM;
6571		goto err_out;
6572	}
6573
6574	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6575	return 0;
6576
6577err_out:
6578	return rc;
6579}
6580
6581static void __exit ata_exit(void)
6582{
6583	ata_release_transport(ata_scsi_transport_template);
6584	libata_transport_exit();
6585	ata_sff_exit();
6586	ata_free_force_param();
6587}
6588
6589subsys_initcall(ata_init);
6590module_exit(ata_exit);
6591
6592static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6593
6594int ata_ratelimit(void)
6595{
6596	return __ratelimit(&ratelimit);
6597}
6598EXPORT_SYMBOL_GPL(ata_ratelimit);
6599
6600/**
6601 *	ata_msleep - ATA EH owner aware msleep
6602 *	@ap: ATA port to attribute the sleep to
6603 *	@msecs: duration to sleep in milliseconds
6604 *
6605 *	Sleeps @msecs.  If the current task is owner of @ap's EH, the
6606 *	ownership is released before going to sleep and reacquired
6607 *	after the sleep is complete.  IOW, other ports sharing the
6608 *	@ap->host will be allowed to own the EH while this task is
6609 *	sleeping.
6610 *
6611 *	LOCKING:
6612 *	Might sleep.
6613 */
6614void ata_msleep(struct ata_port *ap, unsigned int msecs)
6615{
6616	bool owns_eh = ap && ap->host->eh_owner == current;
6617
6618	if (owns_eh)
6619		ata_eh_release(ap);
6620
6621	if (msecs < 20) {
6622		unsigned long usecs = msecs * USEC_PER_MSEC;
6623		usleep_range(usecs, usecs + 50);
6624	} else {
6625		msleep(msecs);
6626	}
6627
6628	if (owns_eh)
6629		ata_eh_acquire(ap);
6630}
6631EXPORT_SYMBOL_GPL(ata_msleep);
6632
6633/**
6634 *	ata_wait_register - wait until register value changes
6635 *	@ap: ATA port to wait register for, can be NULL
6636 *	@reg: IO-mapped register
6637 *	@mask: Mask to apply to read register value
6638 *	@val: Wait condition
6639 *	@interval: polling interval in milliseconds
6640 *	@timeout: timeout in milliseconds
6641 *
6642 *	Waiting for some bits of register to change is a common
6643 *	operation for ATA controllers.  This function reads 32bit LE
6644 *	IO-mapped register @reg and tests for the following condition.
6645 *
6646 *	(*@reg & mask) != val
6647 *
6648 *	If the condition is met, it returns; otherwise, the process is
6649 *	repeated after @interval_msec until timeout.
6650 *
6651 *	LOCKING:
6652 *	Kernel thread context (may sleep)
6653 *
6654 *	RETURNS:
6655 *	The final register value.
6656 */
6657u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6658		      unsigned int interval, unsigned int timeout)
6659{
6660	unsigned long deadline;
6661	u32 tmp;
6662
6663	tmp = ioread32(reg);
6664
6665	/* Calculate timeout _after_ the first read to make sure
6666	 * preceding writes reach the controller before starting to
6667	 * eat away the timeout.
6668	 */
6669	deadline = ata_deadline(jiffies, timeout);
6670
6671	while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6672		ata_msleep(ap, interval);
6673		tmp = ioread32(reg);
6674	}
6675
6676	return tmp;
6677}
6678EXPORT_SYMBOL_GPL(ata_wait_register);
6679
6680/*
6681 * Dummy port_ops
6682 */
6683static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6684{
6685	return AC_ERR_SYSTEM;
6686}
6687
6688static void ata_dummy_error_handler(struct ata_port *ap)
6689{
6690	/* truly dummy */
6691}
6692
6693struct ata_port_operations ata_dummy_port_ops = {
6694	.qc_prep		= ata_noop_qc_prep,
6695	.qc_issue		= ata_dummy_qc_issue,
6696	.error_handler		= ata_dummy_error_handler,
6697	.sched_eh		= ata_std_sched_eh,
6698	.end_eh			= ata_std_end_eh,
6699};
6700EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6701
6702const struct ata_port_info ata_dummy_port_info = {
6703	.port_ops		= &ata_dummy_port_ops,
6704};
6705EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6706
6707void ata_print_version(const struct device *dev, const char *version)
6708{
6709	dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6710}
6711EXPORT_SYMBOL(ata_print_version);
6712
6713EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
6714EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
6715EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
6716EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
6717EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);
6718