• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/ata/
1/*
2 *  libata-eh.c - libata error handling
3 *
4 *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
5 *    		    Please ALWAYS copy linux-ide@vger.kernel.org
6 *		    on emails.
7 *
8 *  Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 *  This program is free software; you can redistribute it and/or
12 *  modify it under the terms of the GNU General Public License as
13 *  published by the Free Software Foundation; either version 2, or
14 *  (at your option) any later version.
15 *
16 *  This program is distributed in the hope that it will be useful,
17 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 *  General Public License for more details.
20 *
21 *  You should have received a copy of the GNU General Public License
22 *  along with this program; see the file COPYING.  If not, write to
23 *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 *  USA.
25 *
26 *
27 *  libata documentation is available via 'make {ps|pdf}docs',
28 *  as Documentation/DocBook/libata.*
29 *
30 *  Hardware documentation available from http://www.t13.org/ and
31 *  http://www.sata-io.org/
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/blkdev.h>
37#include <linux/pci.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_host.h>
40#include <scsi/scsi_eh.h>
41#include <scsi/scsi_device.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_dbg.h>
44#include "../scsi/scsi_transport_api.h"
45
46#include <linux/libata.h>
47
48#include "libata.h"
49
50enum {
51	/* speed down verdicts */
52	ATA_EH_SPDN_NCQ_OFF		= (1 << 0),
53	ATA_EH_SPDN_SPEED_DOWN		= (1 << 1),
54	ATA_EH_SPDN_FALLBACK_TO_PIO	= (1 << 2),
55	ATA_EH_SPDN_KEEP_ERRORS		= (1 << 3),
56
57	/* error flags */
58	ATA_EFLAG_IS_IO			= (1 << 0),
59	ATA_EFLAG_DUBIOUS_XFER		= (1 << 1),
60
61	/* error categories */
62	ATA_ECAT_NONE			= 0,
63	ATA_ECAT_ATA_BUS		= 1,
64	ATA_ECAT_TOUT_HSM		= 2,
65	ATA_ECAT_UNK_DEV		= 3,
66	ATA_ECAT_DUBIOUS_NONE		= 4,
67	ATA_ECAT_DUBIOUS_ATA_BUS	= 5,
68	ATA_ECAT_DUBIOUS_TOUT_HSM	= 6,
69	ATA_ECAT_DUBIOUS_UNK_DEV	= 7,
70	ATA_ECAT_NR			= 8,
71
72	ATA_EH_CMD_DFL_TIMEOUT		=  5000,
73
74	/* always put at least this amount of time between resets */
75	ATA_EH_RESET_COOL_DOWN		=  5000,
76
77	/* Waiting in ->prereset can never be reliable.  It's
78	 * sometimes nice to wait there but it can't be depended upon;
79	 * otherwise, we wouldn't be resetting.  Just give it enough
80	 * time for most drives to spin up.
81	 */
82	ATA_EH_PRERESET_TIMEOUT		= 10000,
83	ATA_EH_FASTDRAIN_INTERVAL	=  3000,
84
85	ATA_EH_UA_TRIES			= 5,
86
87	/* probe speed down parameters, see ata_eh_schedule_probe() */
88	ATA_EH_PROBE_TRIAL_INTERVAL	= 60000,	/* 1 min */
89	ATA_EH_PROBE_TRIALS		= 2,
90};
91
92/* The following table determines how we sequence resets.  Each entry
93 * represents timeout for that try.  The first try can be soft or
94 * hardreset.  All others are hardreset if available.  In most cases
95 * the first reset w/ 10sec timeout should succeed.  Following entries
96 * are mostly for error handling, hotplug and retarded devices.
97 */
98static const unsigned long ata_eh_reset_timeouts[] = {
99	10000,	/* most drives spin up by 10sec */
100	10000,	/* > 99% working drives spin up before 20sec */
101	35000,	/* give > 30 secs of idleness for retarded devices */
102	 5000,	/* and sweet one last chance */
103	ULONG_MAX, /* > 1 min has elapsed, give up */
104};
105
106static const unsigned long ata_eh_identify_timeouts[] = {
107	 5000,	/* covers > 99% of successes and not too boring on failures */
108	10000,  /* combined time till here is enough even for media access */
109	30000,	/* for true idiots */
110	ULONG_MAX,
111};
112
113static const unsigned long ata_eh_flush_timeouts[] = {
114	15000,	/* be generous with flush */
115	15000,  /* ditto */
116	30000,	/* and even more generous */
117	ULONG_MAX,
118};
119
120static const unsigned long ata_eh_other_timeouts[] = {
121	 5000,	/* same rationale as identify timeout */
122	10000,	/* ditto */
123	/* but no merciful 30sec for other commands, it just isn't worth it */
124	ULONG_MAX,
125};
126
127struct ata_eh_cmd_timeout_ent {
128	const u8		*commands;
129	const unsigned long	*timeouts;
130};
131
132/* The following table determines timeouts to use for EH internal
133 * commands.  Each table entry is a command class and matches the
134 * commands the entry applies to and the timeout table to use.
135 *
136 * On the retry after a command timed out, the next timeout value from
137 * the table is used.  If the table doesn't contain further entries,
138 * the last value is used.
139 *
140 * ehc->cmd_timeout_idx keeps track of which timeout to use per
141 * command class, so if SET_FEATURES times out on the first try, the
142 * next try will use the second timeout value only for that class.
143 */
144#define CMDS(cmds...)	(const u8 []){ cmds, 0 }
145static const struct ata_eh_cmd_timeout_ent
146ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
147	{ .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
148	  .timeouts = ata_eh_identify_timeouts, },
149	{ .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
150	  .timeouts = ata_eh_other_timeouts, },
151	{ .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
152	  .timeouts = ata_eh_other_timeouts, },
153	{ .commands = CMDS(ATA_CMD_SET_FEATURES),
154	  .timeouts = ata_eh_other_timeouts, },
155	{ .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
156	  .timeouts = ata_eh_other_timeouts, },
157	{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
158	  .timeouts = ata_eh_flush_timeouts },
159};
160#undef CMDS
161
162static void __ata_port_freeze(struct ata_port *ap);
163#ifdef CONFIG_PM
164static void ata_eh_handle_port_suspend(struct ata_port *ap);
165static void ata_eh_handle_port_resume(struct ata_port *ap);
166#else /* CONFIG_PM */
167static void ata_eh_handle_port_suspend(struct ata_port *ap)
168{ }
169
170static void ata_eh_handle_port_resume(struct ata_port *ap)
171{ }
172#endif /* CONFIG_PM */
173
174static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
175				 va_list args)
176{
177	ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
178				     ATA_EH_DESC_LEN - ehi->desc_len,
179				     fmt, args);
180}
181
182/**
183 *	__ata_ehi_push_desc - push error description without adding separator
184 *	@ehi: target EHI
185 *	@fmt: printf format string
186 *
187 *	Format string according to @fmt and append it to @ehi->desc.
188 *
189 *	LOCKING:
190 *	spin_lock_irqsave(host lock)
191 */
192void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
193{
194	va_list args;
195
196	va_start(args, fmt);
197	__ata_ehi_pushv_desc(ehi, fmt, args);
198	va_end(args);
199}
200
201/**
202 *	ata_ehi_push_desc - push error description with separator
203 *	@ehi: target EHI
204 *	@fmt: printf format string
205 *
206 *	Format string according to @fmt and append it to @ehi->desc.
207 *	If @ehi->desc is not empty, ", " is added in-between.
208 *
209 *	LOCKING:
210 *	spin_lock_irqsave(host lock)
211 */
212void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
213{
214	va_list args;
215
216	if (ehi->desc_len)
217		__ata_ehi_push_desc(ehi, ", ");
218
219	va_start(args, fmt);
220	__ata_ehi_pushv_desc(ehi, fmt, args);
221	va_end(args);
222}
223
224/**
225 *	ata_ehi_clear_desc - clean error description
226 *	@ehi: target EHI
227 *
228 *	Clear @ehi->desc.
229 *
230 *	LOCKING:
231 *	spin_lock_irqsave(host lock)
232 */
233void ata_ehi_clear_desc(struct ata_eh_info *ehi)
234{
235	ehi->desc[0] = '\0';
236	ehi->desc_len = 0;
237}
238
239/**
240 *	ata_port_desc - append port description
241 *	@ap: target ATA port
242 *	@fmt: printf format string
243 *
244 *	Format string according to @fmt and append it to port
245 *	description.  If port description is not empty, " " is added
246 *	in-between.  This function is to be used while initializing
247 *	ata_host.  The description is printed on host registration.
248 *
249 *	LOCKING:
250 *	None.
251 */
252void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
253{
254	va_list args;
255
256	WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
257
258	if (ap->link.eh_info.desc_len)
259		__ata_ehi_push_desc(&ap->link.eh_info, " ");
260
261	va_start(args, fmt);
262	__ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
263	va_end(args);
264}
265
266#ifdef CONFIG_PCI
267
268/**
269 *	ata_port_pbar_desc - append PCI BAR description
270 *	@ap: target ATA port
271 *	@bar: target PCI BAR
272 *	@offset: offset into PCI BAR
273 *	@name: name of the area
274 *
275 *	If @offset is negative, this function formats a string which
276 *	contains the name, address, size and type of the BAR and
277 *	appends it to the port description.  If @offset is zero or
278 *	positive, only name and offsetted address is appended.
279 *
280 *	LOCKING:
281 *	None.
282 */
283void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
284			const char *name)
285{
286	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
287	char *type = "";
288	unsigned long long start, len;
289
290	if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
291		type = "m";
292	else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
293		type = "i";
294
295	start = (unsigned long long)pci_resource_start(pdev, bar);
296	len = (unsigned long long)pci_resource_len(pdev, bar);
297
298	if (offset < 0)
299		ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
300	else
301		ata_port_desc(ap, "%s 0x%llx", name,
302				start + (unsigned long long)offset);
303}
304
305#endif /* CONFIG_PCI */
306
307static int ata_lookup_timeout_table(u8 cmd)
308{
309	int i;
310
311	for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
312		const u8 *cur;
313
314		for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
315			if (*cur == cmd)
316				return i;
317	}
318
319	return -1;
320}
321
322/**
323 *	ata_internal_cmd_timeout - determine timeout for an internal command
324 *	@dev: target device
325 *	@cmd: internal command to be issued
326 *
327 *	Determine timeout for internal command @cmd for @dev.
328 *
329 *	LOCKING:
330 *	EH context.
331 *
332 *	RETURNS:
333 *	Determined timeout.
334 */
335unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
336{
337	struct ata_eh_context *ehc = &dev->link->eh_context;
338	int ent = ata_lookup_timeout_table(cmd);
339	int idx;
340
341	if (ent < 0)
342		return ATA_EH_CMD_DFL_TIMEOUT;
343
344	idx = ehc->cmd_timeout_idx[dev->devno][ent];
345	return ata_eh_cmd_timeout_table[ent].timeouts[idx];
346}
347
348/**
349 *	ata_internal_cmd_timed_out - notification for internal command timeout
350 *	@dev: target device
351 *	@cmd: internal command which timed out
352 *
353 *	Notify EH that internal command @cmd for @dev timed out.  This
354 *	function should be called only for commands whose timeouts are
355 *	determined using ata_internal_cmd_timeout().
356 *
357 *	LOCKING:
358 *	EH context.
359 */
360void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
361{
362	struct ata_eh_context *ehc = &dev->link->eh_context;
363	int ent = ata_lookup_timeout_table(cmd);
364	int idx;
365
366	if (ent < 0)
367		return;
368
369	idx = ehc->cmd_timeout_idx[dev->devno][ent];
370	if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
371		ehc->cmd_timeout_idx[dev->devno][ent]++;
372}
373
374static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
375			     unsigned int err_mask)
376{
377	struct ata_ering_entry *ent;
378
379	WARN_ON(!err_mask);
380
381	ering->cursor++;
382	ering->cursor %= ATA_ERING_SIZE;
383
384	ent = &ering->ring[ering->cursor];
385	ent->eflags = eflags;
386	ent->err_mask = err_mask;
387	ent->timestamp = get_jiffies_64();
388}
389
390static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
391{
392	struct ata_ering_entry *ent = &ering->ring[ering->cursor];
393
394	if (ent->err_mask)
395		return ent;
396	return NULL;
397}
398
399static void ata_ering_clear(struct ata_ering *ering)
400{
401	memset(ering, 0, sizeof(*ering));
402}
403
404static int ata_ering_map(struct ata_ering *ering,
405			 int (*map_fn)(struct ata_ering_entry *, void *),
406			 void *arg)
407{
408	int idx, rc = 0;
409	struct ata_ering_entry *ent;
410
411	idx = ering->cursor;
412	do {
413		ent = &ering->ring[idx];
414		if (!ent->err_mask)
415			break;
416		rc = map_fn(ent, arg);
417		if (rc)
418			break;
419		idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
420	} while (idx != ering->cursor);
421
422	return rc;
423}
424
425static unsigned int ata_eh_dev_action(struct ata_device *dev)
426{
427	struct ata_eh_context *ehc = &dev->link->eh_context;
428
429	return ehc->i.action | ehc->i.dev_action[dev->devno];
430}
431
432static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
433				struct ata_eh_info *ehi, unsigned int action)
434{
435	struct ata_device *tdev;
436
437	if (!dev) {
438		ehi->action &= ~action;
439		ata_for_each_dev(tdev, link, ALL)
440			ehi->dev_action[tdev->devno] &= ~action;
441	} else {
442		/* doesn't make sense for port-wide EH actions */
443		WARN_ON(!(action & ATA_EH_PERDEV_MASK));
444
445		/* break ehi->action into ehi->dev_action */
446		if (ehi->action & action) {
447			ata_for_each_dev(tdev, link, ALL)
448				ehi->dev_action[tdev->devno] |=
449					ehi->action & action;
450			ehi->action &= ~action;
451		}
452
453		/* turn off the specified per-dev action */
454		ehi->dev_action[dev->devno] &= ~action;
455	}
456}
457
458/**
459 *	ata_scsi_timed_out - SCSI layer time out callback
460 *	@cmd: timed out SCSI command
461 *
462 *	Handles SCSI layer timeout.  We race with normal completion of
463 *	the qc for @cmd.  If the qc is already gone, we lose and let
464 *	the scsi command finish (EH_HANDLED).  Otherwise, the qc has
465 *	timed out and EH should be invoked.  Prevent ata_qc_complete()
466 *	from finishing it by setting EH_SCHEDULED and return
467 *	EH_NOT_HANDLED.
468 *
469 *	TODO: kill this function once old EH is gone.
470 *
471 *	LOCKING:
472 *	Called from timer context
473 *
474 *	RETURNS:
475 *	EH_HANDLED or EH_NOT_HANDLED
476 */
477enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
478{
479	struct Scsi_Host *host = cmd->device->host;
480	struct ata_port *ap = ata_shost_to_port(host);
481	unsigned long flags;
482	struct ata_queued_cmd *qc;
483	enum blk_eh_timer_return ret;
484
485	DPRINTK("ENTER\n");
486
487	if (ap->ops->error_handler) {
488		ret = BLK_EH_NOT_HANDLED;
489		goto out;
490	}
491
492	ret = BLK_EH_HANDLED;
493	spin_lock_irqsave(ap->lock, flags);
494	qc = ata_qc_from_tag(ap, ap->link.active_tag);
495	if (qc) {
496		WARN_ON(qc->scsicmd != cmd);
497		qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
498		qc->err_mask |= AC_ERR_TIMEOUT;
499		ret = BLK_EH_NOT_HANDLED;
500	}
501	spin_unlock_irqrestore(ap->lock, flags);
502
503 out:
504	DPRINTK("EXIT, ret=%d\n", ret);
505	return ret;
506}
507
508static void ata_eh_unload(struct ata_port *ap)
509{
510	struct ata_link *link;
511	struct ata_device *dev;
512	unsigned long flags;
513
514	/* Restore SControl IPM and SPD for the next driver and
515	 * disable attached devices.
516	 */
517	ata_for_each_link(link, ap, PMP_FIRST) {
518		sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
519		ata_for_each_dev(dev, link, ALL)
520			ata_dev_disable(dev);
521	}
522
523	/* freeze and set UNLOADED */
524	spin_lock_irqsave(ap->lock, flags);
525
526	ata_port_freeze(ap);			/* won't be thawed */
527	ap->pflags &= ~ATA_PFLAG_EH_PENDING;	/* clear pending from freeze */
528	ap->pflags |= ATA_PFLAG_UNLOADED;
529
530	spin_unlock_irqrestore(ap->lock, flags);
531}
532
533/**
534 *	ata_scsi_error - SCSI layer error handler callback
535 *	@host: SCSI host on which error occurred
536 *
537 *	Handles SCSI-layer-thrown error events.
538 *
539 *	LOCKING:
540 *	Inherited from SCSI layer (none, can sleep)
541 *
542 *	RETURNS:
543 *	Zero.
544 */
545void ata_scsi_error(struct Scsi_Host *host)
546{
547	struct ata_port *ap = ata_shost_to_port(host);
548	int i;
549	unsigned long flags;
550
551	DPRINTK("ENTER\n");
552
553	/* make sure sff pio task is not running */
554	ata_sff_flush_pio_task(ap);
555
556	/* synchronize with host lock and sort out timeouts */
557
558	/* For new EH, all qcs are finished in one of three ways -
559	 * normal completion, error completion, and SCSI timeout.
560	 * Both completions can race against SCSI timeout.  When normal
561	 * completion wins, the qc never reaches EH.  When error
562	 * completion wins, the qc has ATA_QCFLAG_FAILED set.
563	 *
564	 * When SCSI timeout wins, things are a bit more complex.
565	 * Normal or error completion can occur after the timeout but
566	 * before this point.  In such cases, both types of
567	 * completions are honored.  A scmd is determined to have
568	 * timed out iff its associated qc is active and not failed.
569	 */
570	if (ap->ops->error_handler) {
571		struct scsi_cmnd *scmd, *tmp;
572		int nr_timedout = 0;
573
574		spin_lock_irqsave(ap->lock, flags);
575
576		/* This must occur under the ap->lock as we don't want
577		   a polled recovery to race the real interrupt handler
578
579		   The lost_interrupt handler checks for any completed but
580		   non-notified command and completes much like an IRQ handler.
581
582		   We then fall into the error recovery code which will treat
583		   this as if normal completion won the race */
584
585		if (ap->ops->lost_interrupt)
586			ap->ops->lost_interrupt(ap);
587
588		list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
589			struct ata_queued_cmd *qc;
590
591			for (i = 0; i < ATA_MAX_QUEUE; i++) {
592				qc = __ata_qc_from_tag(ap, i);
593				if (qc->flags & ATA_QCFLAG_ACTIVE &&
594				    qc->scsicmd == scmd)
595					break;
596			}
597
598			if (i < ATA_MAX_QUEUE) {
599				/* the scmd has an associated qc */
600				if (!(qc->flags & ATA_QCFLAG_FAILED)) {
601					/* which hasn't failed yet, timeout */
602					qc->err_mask |= AC_ERR_TIMEOUT;
603					qc->flags |= ATA_QCFLAG_FAILED;
604					nr_timedout++;
605				}
606			} else {
607				/* Normal completion occurred after
608				 * SCSI timeout but before this point.
609				 * Successfully complete it.
610				 */
611				scmd->retries = scmd->allowed;
612				scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
613			}
614		}
615
616		/* If we have timed out qcs.  They belong to EH from
617		 * this point but the state of the controller is
618		 * unknown.  Freeze the port to make sure the IRQ
619		 * handler doesn't diddle with those qcs.  This must
620		 * be done atomically w.r.t. setting QCFLAG_FAILED.
621		 */
622		if (nr_timedout)
623			__ata_port_freeze(ap);
624
625		spin_unlock_irqrestore(ap->lock, flags);
626
627		/* initialize eh_tries */
628		ap->eh_tries = ATA_EH_MAX_TRIES;
629	} else
630		spin_unlock_wait(ap->lock);
631
632	/* If we timed raced normal completion and there is nothing to
633	   recover nr_timedout == 0 why exactly are we doing error recovery ? */
634
635 repeat:
636	/* invoke error handler */
637	if (ap->ops->error_handler) {
638		struct ata_link *link;
639
640		/* kill fast drain timer */
641		del_timer_sync(&ap->fastdrain_timer);
642
643		/* process port resume request */
644		ata_eh_handle_port_resume(ap);
645
646		/* fetch & clear EH info */
647		spin_lock_irqsave(ap->lock, flags);
648
649		ata_for_each_link(link, ap, HOST_FIRST) {
650			struct ata_eh_context *ehc = &link->eh_context;
651			struct ata_device *dev;
652
653			memset(&link->eh_context, 0, sizeof(link->eh_context));
654			link->eh_context.i = link->eh_info;
655			memset(&link->eh_info, 0, sizeof(link->eh_info));
656
657			ata_for_each_dev(dev, link, ENABLED) {
658				int devno = dev->devno;
659
660				ehc->saved_xfer_mode[devno] = dev->xfer_mode;
661				if (ata_ncq_enabled(dev))
662					ehc->saved_ncq_enabled |= 1 << devno;
663			}
664		}
665
666		ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
667		ap->pflags &= ~ATA_PFLAG_EH_PENDING;
668		ap->excl_link = NULL;	/* don't maintain exclusion over EH */
669
670		spin_unlock_irqrestore(ap->lock, flags);
671
672		/* invoke EH, skip if unloading or suspended */
673		if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
674			ap->ops->error_handler(ap);
675		else {
676			/* if unloading, commence suicide */
677			if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
678			    !(ap->pflags & ATA_PFLAG_UNLOADED))
679				ata_eh_unload(ap);
680			ata_eh_finish(ap);
681		}
682
683		/* process port suspend request */
684		ata_eh_handle_port_suspend(ap);
685
686		/* Exception might have happend after ->error_handler
687		 * recovered the port but before this point.  Repeat
688		 * EH in such case.
689		 */
690		spin_lock_irqsave(ap->lock, flags);
691
692		if (ap->pflags & ATA_PFLAG_EH_PENDING) {
693			if (--ap->eh_tries) {
694				spin_unlock_irqrestore(ap->lock, flags);
695				goto repeat;
696			}
697			ata_port_printk(ap, KERN_ERR, "EH pending after %d "
698					"tries, giving up\n", ATA_EH_MAX_TRIES);
699			ap->pflags &= ~ATA_PFLAG_EH_PENDING;
700		}
701
702		/* this run is complete, make sure EH info is clear */
703		ata_for_each_link(link, ap, HOST_FIRST)
704			memset(&link->eh_info, 0, sizeof(link->eh_info));
705
706		/* Clear host_eh_scheduled while holding ap->lock such
707		 * that if exception occurs after this point but
708		 * before EH completion, SCSI midlayer will
709		 * re-initiate EH.
710		 */
711		host->host_eh_scheduled = 0;
712
713		spin_unlock_irqrestore(ap->lock, flags);
714	} else {
715		WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
716		ap->ops->eng_timeout(ap);
717	}
718
719	/* finish or retry handled scmd's and clean up */
720	WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
721
722	scsi_eh_flush_done_q(&ap->eh_done_q);
723
724	/* clean up */
725	spin_lock_irqsave(ap->lock, flags);
726
727	if (ap->pflags & ATA_PFLAG_LOADING)
728		ap->pflags &= ~ATA_PFLAG_LOADING;
729	else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
730		schedule_delayed_work(&ap->hotplug_task, 0);
731
732	if (ap->pflags & ATA_PFLAG_RECOVERED)
733		ata_port_printk(ap, KERN_INFO, "EH complete\n");
734
735	ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
736
737	/* tell wait_eh that we're done */
738	ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
739	wake_up_all(&ap->eh_wait_q);
740
741	spin_unlock_irqrestore(ap->lock, flags);
742
743	DPRINTK("EXIT\n");
744}
745
746/**
747 *	ata_port_wait_eh - Wait for the currently pending EH to complete
748 *	@ap: Port to wait EH for
749 *
750 *	Wait until the currently pending EH is complete.
751 *
752 *	LOCKING:
753 *	Kernel thread context (may sleep).
754 */
755void ata_port_wait_eh(struct ata_port *ap)
756{
757	unsigned long flags;
758	DEFINE_WAIT(wait);
759
760 retry:
761	spin_lock_irqsave(ap->lock, flags);
762
763	while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
764		prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
765		spin_unlock_irqrestore(ap->lock, flags);
766		schedule();
767		spin_lock_irqsave(ap->lock, flags);
768	}
769	finish_wait(&ap->eh_wait_q, &wait);
770
771	spin_unlock_irqrestore(ap->lock, flags);
772
773	/* make sure SCSI EH is complete */
774	if (scsi_host_in_recovery(ap->scsi_host)) {
775		msleep(10);
776		goto retry;
777	}
778}
779
780static int ata_eh_nr_in_flight(struct ata_port *ap)
781{
782	unsigned int tag;
783	int nr = 0;
784
785	/* count only non-internal commands */
786	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
787		if (ata_qc_from_tag(ap, tag))
788			nr++;
789
790	return nr;
791}
792
793void ata_eh_fastdrain_timerfn(unsigned long arg)
794{
795	struct ata_port *ap = (void *)arg;
796	unsigned long flags;
797	int cnt;
798
799	spin_lock_irqsave(ap->lock, flags);
800
801	cnt = ata_eh_nr_in_flight(ap);
802
803	/* are we done? */
804	if (!cnt)
805		goto out_unlock;
806
807	if (cnt == ap->fastdrain_cnt) {
808		unsigned int tag;
809
810		/* No progress during the last interval, tag all
811		 * in-flight qcs as timed out and freeze the port.
812		 */
813		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
814			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
815			if (qc)
816				qc->err_mask |= AC_ERR_TIMEOUT;
817		}
818
819		ata_port_freeze(ap);
820	} else {
821		/* some qcs have finished, give it another chance */
822		ap->fastdrain_cnt = cnt;
823		ap->fastdrain_timer.expires =
824			ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
825		add_timer(&ap->fastdrain_timer);
826	}
827
828 out_unlock:
829	spin_unlock_irqrestore(ap->lock, flags);
830}
831
832/**
833 *	ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
834 *	@ap: target ATA port
835 *	@fastdrain: activate fast drain
836 *
837 *	Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
838 *	is non-zero and EH wasn't pending before.  Fast drain ensures
839 *	that EH kicks in in timely manner.
840 *
841 *	LOCKING:
842 *	spin_lock_irqsave(host lock)
843 */
844static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
845{
846	int cnt;
847
848	/* already scheduled? */
849	if (ap->pflags & ATA_PFLAG_EH_PENDING)
850		return;
851
852	ap->pflags |= ATA_PFLAG_EH_PENDING;
853
854	if (!fastdrain)
855		return;
856
857	/* do we have in-flight qcs? */
858	cnt = ata_eh_nr_in_flight(ap);
859	if (!cnt)
860		return;
861
862	/* activate fast drain */
863	ap->fastdrain_cnt = cnt;
864	ap->fastdrain_timer.expires =
865		ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
866	add_timer(&ap->fastdrain_timer);
867}
868
869/**
870 *	ata_qc_schedule_eh - schedule qc for error handling
871 *	@qc: command to schedule error handling for
872 *
873 *	Schedule error handling for @qc.  EH will kick in as soon as
874 *	other commands are drained.
875 *
876 *	LOCKING:
877 *	spin_lock_irqsave(host lock)
878 */
879void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
880{
881	struct ata_port *ap = qc->ap;
882	struct request_queue *q = qc->scsicmd->device->request_queue;
883	unsigned long flags;
884
885	WARN_ON(!ap->ops->error_handler);
886
887	qc->flags |= ATA_QCFLAG_FAILED;
888	ata_eh_set_pending(ap, 1);
889
890	/* The following will fail if timeout has already expired.
891	 * ata_scsi_error() takes care of such scmds on EH entry.
892	 * Note that ATA_QCFLAG_FAILED is unconditionally set after
893	 * this function completes.
894	 */
895	spin_lock_irqsave(q->queue_lock, flags);
896	blk_abort_request(qc->scsicmd->request);
897	spin_unlock_irqrestore(q->queue_lock, flags);
898}
899
900/**
901 *	ata_port_schedule_eh - schedule error handling without a qc
902 *	@ap: ATA port to schedule EH for
903 *
904 *	Schedule error handling for @ap.  EH will kick in as soon as
905 *	all commands are drained.
906 *
907 *	LOCKING:
908 *	spin_lock_irqsave(host lock)
909 */
910void ata_port_schedule_eh(struct ata_port *ap)
911{
912	WARN_ON(!ap->ops->error_handler);
913
914	if (ap->pflags & ATA_PFLAG_INITIALIZING)
915		return;
916
917	ata_eh_set_pending(ap, 1);
918	scsi_schedule_eh(ap->scsi_host);
919
920	DPRINTK("port EH scheduled\n");
921}
922
923static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
924{
925	int tag, nr_aborted = 0;
926
927	WARN_ON(!ap->ops->error_handler);
928
929	/* we're gonna abort all commands, no need for fast drain */
930	ata_eh_set_pending(ap, 0);
931
932	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
933		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
934
935		if (qc && (!link || qc->dev->link == link)) {
936			qc->flags |= ATA_QCFLAG_FAILED;
937			ata_qc_complete(qc);
938			nr_aborted++;
939		}
940	}
941
942	if (!nr_aborted)
943		ata_port_schedule_eh(ap);
944
945	return nr_aborted;
946}
947
948/**
949 *	ata_link_abort - abort all qc's on the link
950 *	@link: ATA link to abort qc's for
951 *
952 *	Abort all active qc's active on @link and schedule EH.
953 *
954 *	LOCKING:
955 *	spin_lock_irqsave(host lock)
956 *
957 *	RETURNS:
958 *	Number of aborted qc's.
959 */
960int ata_link_abort(struct ata_link *link)
961{
962	return ata_do_link_abort(link->ap, link);
963}
964
965/**
966 *	ata_port_abort - abort all qc's on the port
967 *	@ap: ATA port to abort qc's for
968 *
969 *	Abort all active qc's of @ap and schedule EH.
970 *
971 *	LOCKING:
972 *	spin_lock_irqsave(host_set lock)
973 *
974 *	RETURNS:
975 *	Number of aborted qc's.
976 */
977int ata_port_abort(struct ata_port *ap)
978{
979	return ata_do_link_abort(ap, NULL);
980}
981
982/**
983 *	__ata_port_freeze - freeze port
984 *	@ap: ATA port to freeze
985 *
986 *	This function is called when HSM violation or some other
987 *	condition disrupts normal operation of the port.  Frozen port
988 *	is not allowed to perform any operation until the port is
989 *	thawed, which usually follows a successful reset.
990 *
991 *	ap->ops->freeze() callback can be used for freezing the port
992 *	hardware-wise (e.g. mask interrupt and stop DMA engine).  If a
993 *	port cannot be frozen hardware-wise, the interrupt handler
994 *	must ack and clear interrupts unconditionally while the port
995 *	is frozen.
996 *
997 *	LOCKING:
998 *	spin_lock_irqsave(host lock)
999 */
1000static void __ata_port_freeze(struct ata_port *ap)
1001{
1002	WARN_ON(!ap->ops->error_handler);
1003
1004	if (ap->ops->freeze)
1005		ap->ops->freeze(ap);
1006
1007	ap->pflags |= ATA_PFLAG_FROZEN;
1008
1009	DPRINTK("ata%u port frozen\n", ap->print_id);
1010}
1011
1012/**
1013 *	ata_port_freeze - abort & freeze port
1014 *	@ap: ATA port to freeze
1015 *
1016 *	Abort and freeze @ap.  The freeze operation must be called
1017 *	first, because some hardware requires special operations
1018 *	before the taskfile registers are accessible.
1019 *
1020 *	LOCKING:
1021 *	spin_lock_irqsave(host lock)
1022 *
1023 *	RETURNS:
1024 *	Number of aborted commands.
1025 */
1026int ata_port_freeze(struct ata_port *ap)
1027{
1028	int nr_aborted;
1029
1030	WARN_ON(!ap->ops->error_handler);
1031
1032	__ata_port_freeze(ap);
1033	nr_aborted = ata_port_abort(ap);
1034
1035	return nr_aborted;
1036}
1037
1038/**
1039 *	sata_async_notification - SATA async notification handler
1040 *	@ap: ATA port where async notification is received
1041 *
1042 *	Handler to be called when async notification via SDB FIS is
1043 *	received.  This function schedules EH if necessary.
1044 *
1045 *	LOCKING:
1046 *	spin_lock_irqsave(host lock)
1047 *
1048 *	RETURNS:
1049 *	1 if EH is scheduled, 0 otherwise.
1050 */
1051int sata_async_notification(struct ata_port *ap)
1052{
1053	u32 sntf;
1054	int rc;
1055
1056	if (!(ap->flags & ATA_FLAG_AN))
1057		return 0;
1058
1059	rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1060	if (rc == 0)
1061		sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1062
1063	if (!sata_pmp_attached(ap) || rc) {
1064		/* PMP is not attached or SNTF is not available */
1065		if (!sata_pmp_attached(ap)) {
1066			/* PMP is not attached.  Check whether ATAPI
1067			 * AN is configured.  If so, notify media
1068			 * change.
1069			 */
1070			struct ata_device *dev = ap->link.device;
1071
1072			if ((dev->class == ATA_DEV_ATAPI) &&
1073			    (dev->flags & ATA_DFLAG_AN))
1074				ata_scsi_media_change_notify(dev);
1075			return 0;
1076		} else {
1077			/* PMP is attached but SNTF is not available.
1078			 * ATAPI async media change notification is
1079			 * not used.  The PMP must be reporting PHY
1080			 * status change, schedule EH.
1081			 */
1082			ata_port_schedule_eh(ap);
1083			return 1;
1084		}
1085	} else {
1086		/* PMP is attached and SNTF is available */
1087		struct ata_link *link;
1088
1089		/* check and notify ATAPI AN */
1090		ata_for_each_link(link, ap, EDGE) {
1091			if (!(sntf & (1 << link->pmp)))
1092				continue;
1093
1094			if ((link->device->class == ATA_DEV_ATAPI) &&
1095			    (link->device->flags & ATA_DFLAG_AN))
1096				ata_scsi_media_change_notify(link->device);
1097		}
1098
1099		/* If PMP is reporting that PHY status of some
1100		 * downstream ports has changed, schedule EH.
1101		 */
1102		if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1103			ata_port_schedule_eh(ap);
1104			return 1;
1105		}
1106
1107		return 0;
1108	}
1109}
1110
1111/**
1112 *	ata_eh_freeze_port - EH helper to freeze port
1113 *	@ap: ATA port to freeze
1114 *
1115 *	Freeze @ap.
1116 *
1117 *	LOCKING:
1118 *	None.
1119 */
1120void ata_eh_freeze_port(struct ata_port *ap)
1121{
1122	unsigned long flags;
1123
1124	if (!ap->ops->error_handler)
1125		return;
1126
1127	spin_lock_irqsave(ap->lock, flags);
1128	__ata_port_freeze(ap);
1129	spin_unlock_irqrestore(ap->lock, flags);
1130}
1131
1132/**
1133 *	ata_port_thaw_port - EH helper to thaw port
1134 *	@ap: ATA port to thaw
1135 *
1136 *	Thaw frozen port @ap.
1137 *
1138 *	LOCKING:
1139 *	None.
1140 */
1141void ata_eh_thaw_port(struct ata_port *ap)
1142{
1143	unsigned long flags;
1144
1145	if (!ap->ops->error_handler)
1146		return;
1147
1148	spin_lock_irqsave(ap->lock, flags);
1149
1150	ap->pflags &= ~ATA_PFLAG_FROZEN;
1151
1152	if (ap->ops->thaw)
1153		ap->ops->thaw(ap);
1154
1155	spin_unlock_irqrestore(ap->lock, flags);
1156
1157	DPRINTK("ata%u port thawed\n", ap->print_id);
1158}
1159
1160static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1161{
1162	/* nada */
1163}
1164
1165static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1166{
1167	struct ata_port *ap = qc->ap;
1168	struct scsi_cmnd *scmd = qc->scsicmd;
1169	unsigned long flags;
1170
1171	spin_lock_irqsave(ap->lock, flags);
1172	qc->scsidone = ata_eh_scsidone;
1173	__ata_qc_complete(qc);
1174	WARN_ON(ata_tag_valid(qc->tag));
1175	spin_unlock_irqrestore(ap->lock, flags);
1176
1177	scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1178}
1179
1180/**
1181 *	ata_eh_qc_complete - Complete an active ATA command from EH
1182 *	@qc: Command to complete
1183 *
1184 *	Indicate to the mid and upper layers that an ATA command has
1185 *	completed.  To be used from EH.
1186 */
1187void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1188{
1189	struct scsi_cmnd *scmd = qc->scsicmd;
1190	scmd->retries = scmd->allowed;
1191	__ata_eh_qc_complete(qc);
1192}
1193
1194/**
1195 *	ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1196 *	@qc: Command to retry
1197 *
1198 *	Indicate to the mid and upper layers that an ATA command
1199 *	should be retried.  To be used from EH.
1200 *
1201 *	SCSI midlayer limits the number of retries to scmd->allowed.
1202 *	scmd->retries is decremented for commands which get retried
1203 *	due to unrelated failures (qc->err_mask is zero).
1204 */
1205void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1206{
1207	struct scsi_cmnd *scmd = qc->scsicmd;
1208	if (!qc->err_mask && scmd->retries)
1209		scmd->retries--;
1210	__ata_eh_qc_complete(qc);
1211}
1212
1213/**
1214 *	ata_dev_disable - disable ATA device
1215 *	@dev: ATA device to disable
1216 *
1217 *	Disable @dev.
1218 *
1219 *	Locking:
1220 *	EH context.
1221 */
1222void ata_dev_disable(struct ata_device *dev)
1223{
1224	if (!ata_dev_enabled(dev))
1225		return;
1226
1227	if (ata_msg_drv(dev->link->ap))
1228		ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1229	ata_acpi_on_disable(dev);
1230	ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1231	dev->class++;
1232
1233	/* From now till the next successful probe, ering is used to
1234	 * track probe failures.  Clear accumulated device error info.
1235	 */
1236	ata_ering_clear(&dev->ering);
1237}
1238
1239/**
1240 *	ata_eh_detach_dev - detach ATA device
1241 *	@dev: ATA device to detach
1242 *
1243 *	Detach @dev.
1244 *
1245 *	LOCKING:
1246 *	None.
1247 */
1248void ata_eh_detach_dev(struct ata_device *dev)
1249{
1250	struct ata_link *link = dev->link;
1251	struct ata_port *ap = link->ap;
1252	struct ata_eh_context *ehc = &link->eh_context;
1253	unsigned long flags;
1254
1255	ata_dev_disable(dev);
1256
1257	spin_lock_irqsave(ap->lock, flags);
1258
1259	dev->flags &= ~ATA_DFLAG_DETACH;
1260
1261	if (ata_scsi_offline_dev(dev)) {
1262		dev->flags |= ATA_DFLAG_DETACHED;
1263		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1264	}
1265
1266	/* clear per-dev EH info */
1267	ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1268	ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1269	ehc->saved_xfer_mode[dev->devno] = 0;
1270	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1271
1272	spin_unlock_irqrestore(ap->lock, flags);
1273}
1274
1275/**
1276 *	ata_eh_about_to_do - about to perform eh_action
1277 *	@link: target ATA link
1278 *	@dev: target ATA dev for per-dev action (can be NULL)
1279 *	@action: action about to be performed
1280 *
1281 *	Called just before performing EH actions to clear related bits
1282 *	in @link->eh_info such that eh actions are not unnecessarily
1283 *	repeated.
1284 *
1285 *	LOCKING:
1286 *	None.
1287 */
1288void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1289			unsigned int action)
1290{
1291	struct ata_port *ap = link->ap;
1292	struct ata_eh_info *ehi = &link->eh_info;
1293	struct ata_eh_context *ehc = &link->eh_context;
1294	unsigned long flags;
1295
1296	spin_lock_irqsave(ap->lock, flags);
1297
1298	ata_eh_clear_action(link, dev, ehi, action);
1299
1300	/* About to take EH action, set RECOVERED.  Ignore actions on
1301	 * slave links as master will do them again.
1302	 */
1303	if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1304		ap->pflags |= ATA_PFLAG_RECOVERED;
1305
1306	spin_unlock_irqrestore(ap->lock, flags);
1307}
1308
1309/**
1310 *	ata_eh_done - EH action complete
1311*	@ap: target ATA port
1312 *	@dev: target ATA dev for per-dev action (can be NULL)
1313 *	@action: action just completed
1314 *
1315 *	Called right after performing EH actions to clear related bits
1316 *	in @link->eh_context.
1317 *
1318 *	LOCKING:
1319 *	None.
1320 */
1321void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1322		 unsigned int action)
1323{
1324	struct ata_eh_context *ehc = &link->eh_context;
1325
1326	ata_eh_clear_action(link, dev, &ehc->i, action);
1327}
1328
1329/**
1330 *	ata_err_string - convert err_mask to descriptive string
1331 *	@err_mask: error mask to convert to string
1332 *
1333 *	Convert @err_mask to descriptive string.  Errors are
1334 *	prioritized according to severity and only the most severe
1335 *	error is reported.
1336 *
1337 *	LOCKING:
1338 *	None.
1339 *
1340 *	RETURNS:
1341 *	Descriptive string for @err_mask
1342 */
1343static const char *ata_err_string(unsigned int err_mask)
1344{
1345	if (err_mask & AC_ERR_HOST_BUS)
1346		return "host bus error";
1347	if (err_mask & AC_ERR_ATA_BUS)
1348		return "ATA bus error";
1349	if (err_mask & AC_ERR_TIMEOUT)
1350		return "timeout";
1351	if (err_mask & AC_ERR_HSM)
1352		return "HSM violation";
1353	if (err_mask & AC_ERR_SYSTEM)
1354		return "internal error";
1355	if (err_mask & AC_ERR_MEDIA)
1356		return "media error";
1357	if (err_mask & AC_ERR_INVALID)
1358		return "invalid argument";
1359	if (err_mask & AC_ERR_DEV)
1360		return "device error";
1361	return "unknown error";
1362}
1363
1364/**
1365 *	ata_read_log_page - read a specific log page
1366 *	@dev: target device
1367 *	@page: page to read
1368 *	@buf: buffer to store read page
1369 *	@sectors: number of sectors to read
1370 *
1371 *	Read log page using READ_LOG_EXT command.
1372 *
1373 *	LOCKING:
1374 *	Kernel thread context (may sleep).
1375 *
1376 *	RETURNS:
1377 *	0 on success, AC_ERR_* mask otherwise.
1378 */
1379static unsigned int ata_read_log_page(struct ata_device *dev,
1380				      u8 page, void *buf, unsigned int sectors)
1381{
1382	struct ata_taskfile tf;
1383	unsigned int err_mask;
1384
1385	DPRINTK("read log page - page %d\n", page);
1386
1387	ata_tf_init(dev, &tf);
1388	tf.command = ATA_CMD_READ_LOG_EXT;
1389	tf.lbal = page;
1390	tf.nsect = sectors;
1391	tf.hob_nsect = sectors >> 8;
1392	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1393	tf.protocol = ATA_PROT_PIO;
1394
1395	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1396				     buf, sectors * ATA_SECT_SIZE, 0);
1397
1398	DPRINTK("EXIT, err_mask=%x\n", err_mask);
1399	return err_mask;
1400}
1401
1402/**
1403 *	ata_eh_read_log_10h - Read log page 10h for NCQ error details
1404 *	@dev: Device to read log page 10h from
1405 *	@tag: Resulting tag of the failed command
1406 *	@tf: Resulting taskfile registers of the failed command
1407 *
1408 *	Read log page 10h to obtain NCQ error details and clear error
1409 *	condition.
1410 *
1411 *	LOCKING:
1412 *	Kernel thread context (may sleep).
1413 *
1414 *	RETURNS:
1415 *	0 on success, -errno otherwise.
1416 */
1417static int ata_eh_read_log_10h(struct ata_device *dev,
1418			       int *tag, struct ata_taskfile *tf)
1419{
1420	u8 *buf = dev->link->ap->sector_buf;
1421	unsigned int err_mask;
1422	u8 csum;
1423	int i;
1424
1425	err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1426	if (err_mask)
1427		return -EIO;
1428
1429	csum = 0;
1430	for (i = 0; i < ATA_SECT_SIZE; i++)
1431		csum += buf[i];
1432	if (csum)
1433		ata_dev_printk(dev, KERN_WARNING,
1434			       "invalid checksum 0x%x on log page 10h\n", csum);
1435
1436	if (buf[0] & 0x80)
1437		return -ENOENT;
1438
1439	*tag = buf[0] & 0x1f;
1440
1441	tf->command = buf[2];
1442	tf->feature = buf[3];
1443	tf->lbal = buf[4];
1444	tf->lbam = buf[5];
1445	tf->lbah = buf[6];
1446	tf->device = buf[7];
1447	tf->hob_lbal = buf[8];
1448	tf->hob_lbam = buf[9];
1449	tf->hob_lbah = buf[10];
1450	tf->nsect = buf[12];
1451	tf->hob_nsect = buf[13];
1452
1453	return 0;
1454}
1455
1456/**
1457 *	atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1458 *	@dev: target ATAPI device
1459 *	@r_sense_key: out parameter for sense_key
1460 *
1461 *	Perform ATAPI TEST_UNIT_READY.
1462 *
1463 *	LOCKING:
1464 *	EH context (may sleep).
1465 *
1466 *	RETURNS:
1467 *	0 on success, AC_ERR_* mask on failure.
1468 */
1469static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1470{
1471	u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1472	struct ata_taskfile tf;
1473	unsigned int err_mask;
1474
1475	ata_tf_init(dev, &tf);
1476
1477	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1478	tf.command = ATA_CMD_PACKET;
1479	tf.protocol = ATAPI_PROT_NODATA;
1480
1481	err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1482	if (err_mask == AC_ERR_DEV)
1483		*r_sense_key = tf.feature >> 4;
1484	return err_mask;
1485}
1486
1487/**
1488 *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1489 *	@dev: device to perform REQUEST_SENSE to
1490 *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1491 *	@dfl_sense_key: default sense key to use
1492 *
1493 *	Perform ATAPI REQUEST_SENSE after the device reported CHECK
1494 *	SENSE.  This function is EH helper.
1495 *
1496 *	LOCKING:
1497 *	Kernel thread context (may sleep).
1498 *
1499 *	RETURNS:
1500 *	0 on success, AC_ERR_* mask on failure
1501 */
1502static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1503					   u8 *sense_buf, u8 dfl_sense_key)
1504{
1505	u8 cdb[ATAPI_CDB_LEN] =
1506		{ REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1507	struct ata_port *ap = dev->link->ap;
1508	struct ata_taskfile tf;
1509
1510	DPRINTK("ATAPI request sense\n");
1511
1512	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1513
1514	/* initialize sense_buf with the error register,
1515	 * for the case where they are -not- overwritten
1516	 */
1517	sense_buf[0] = 0x70;
1518	sense_buf[2] = dfl_sense_key;
1519
1520	/* some devices time out if garbage left in tf */
1521	ata_tf_init(dev, &tf);
1522
1523	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1524	tf.command = ATA_CMD_PACKET;
1525
1526	/* is it pointless to prefer PIO for "safety reasons"? */
1527	if (ap->flags & ATA_FLAG_PIO_DMA) {
1528		tf.protocol = ATAPI_PROT_DMA;
1529		tf.feature |= ATAPI_PKT_DMA;
1530	} else {
1531		tf.protocol = ATAPI_PROT_PIO;
1532		tf.lbam = SCSI_SENSE_BUFFERSIZE;
1533		tf.lbah = 0;
1534	}
1535
1536	return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1537				 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1538}
1539
1540/**
1541 *	ata_eh_analyze_serror - analyze SError for a failed port
1542 *	@link: ATA link to analyze SError for
1543 *
1544 *	Analyze SError if available and further determine cause of
1545 *	failure.
1546 *
1547 *	LOCKING:
1548 *	None.
1549 */
1550static void ata_eh_analyze_serror(struct ata_link *link)
1551{
1552	struct ata_eh_context *ehc = &link->eh_context;
1553	u32 serror = ehc->i.serror;
1554	unsigned int err_mask = 0, action = 0;
1555	u32 hotplug_mask;
1556
1557	if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1558		err_mask |= AC_ERR_ATA_BUS;
1559		action |= ATA_EH_RESET;
1560	}
1561	if (serror & SERR_PROTOCOL) {
1562		err_mask |= AC_ERR_HSM;
1563		action |= ATA_EH_RESET;
1564	}
1565	if (serror & SERR_INTERNAL) {
1566		err_mask |= AC_ERR_SYSTEM;
1567		action |= ATA_EH_RESET;
1568	}
1569
1570	/* Determine whether a hotplug event has occurred.  Both
1571	 * SError.N/X are considered hotplug events for enabled or
1572	 * host links.  For disabled PMP links, only N bit is
1573	 * considered as X bit is left at 1 for link plugging.
1574	 */
1575	hotplug_mask = 0;
1576
1577	if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1578		hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1579	else
1580		hotplug_mask = SERR_PHYRDY_CHG;
1581
1582	if (serror & hotplug_mask)
1583		ata_ehi_hotplugged(&ehc->i);
1584
1585	ehc->i.err_mask |= err_mask;
1586	ehc->i.action |= action;
1587}
1588
1589/**
1590 *	ata_eh_analyze_ncq_error - analyze NCQ error
1591 *	@link: ATA link to analyze NCQ error for
1592 *
1593 *	Read log page 10h, determine the offending qc and acquire
1594 *	error status TF.  For NCQ device errors, all LLDDs have to do
1595 *	is setting AC_ERR_DEV in ehi->err_mask.  This function takes
1596 *	care of the rest.
1597 *
1598 *	LOCKING:
1599 *	Kernel thread context (may sleep).
1600 */
1601void ata_eh_analyze_ncq_error(struct ata_link *link)
1602{
1603	struct ata_port *ap = link->ap;
1604	struct ata_eh_context *ehc = &link->eh_context;
1605	struct ata_device *dev = link->device;
1606	struct ata_queued_cmd *qc;
1607	struct ata_taskfile tf;
1608	int tag, rc;
1609
1610	/* if frozen, we can't do much */
1611	if (ap->pflags & ATA_PFLAG_FROZEN)
1612		return;
1613
1614	/* is it NCQ device error? */
1615	if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1616		return;
1617
1618	/* has LLDD analyzed already? */
1619	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1620		qc = __ata_qc_from_tag(ap, tag);
1621
1622		if (!(qc->flags & ATA_QCFLAG_FAILED))
1623			continue;
1624
1625		if (qc->err_mask)
1626			return;
1627	}
1628
1629	/* okay, this error is ours */
1630	memset(&tf, 0, sizeof(tf));
1631	rc = ata_eh_read_log_10h(dev, &tag, &tf);
1632	if (rc) {
1633		ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1634				"(errno=%d)\n", rc);
1635		return;
1636	}
1637
1638	if (!(link->sactive & (1 << tag))) {
1639		ata_link_printk(link, KERN_ERR, "log page 10h reported "
1640				"inactive tag %d\n", tag);
1641		return;
1642	}
1643
1644	/* we've got the perpetrator, condemn it */
1645	qc = __ata_qc_from_tag(ap, tag);
1646	memcpy(&qc->result_tf, &tf, sizeof(tf));
1647	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1648	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1649	ehc->i.err_mask &= ~AC_ERR_DEV;
1650}
1651
1652/**
1653 *	ata_eh_analyze_tf - analyze taskfile of a failed qc
1654 *	@qc: qc to analyze
1655 *	@tf: Taskfile registers to analyze
1656 *
1657 *	Analyze taskfile of @qc and further determine cause of
1658 *	failure.  This function also requests ATAPI sense data if
1659 *	avaliable.
1660 *
1661 *	LOCKING:
1662 *	Kernel thread context (may sleep).
1663 *
1664 *	RETURNS:
1665 *	Determined recovery action
1666 */
1667static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1668				      const struct ata_taskfile *tf)
1669{
1670	unsigned int tmp, action = 0;
1671	u8 stat = tf->command, err = tf->feature;
1672
1673	if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1674		qc->err_mask |= AC_ERR_HSM;
1675		return ATA_EH_RESET;
1676	}
1677
1678	if (stat & (ATA_ERR | ATA_DF))
1679		qc->err_mask |= AC_ERR_DEV;
1680	else
1681		return 0;
1682
1683	switch (qc->dev->class) {
1684	case ATA_DEV_ATA:
1685		if (err & ATA_ICRC)
1686			qc->err_mask |= AC_ERR_ATA_BUS;
1687		if (err & ATA_UNC)
1688			qc->err_mask |= AC_ERR_MEDIA;
1689		if (err & ATA_IDNF)
1690			qc->err_mask |= AC_ERR_INVALID;
1691		break;
1692
1693	case ATA_DEV_ATAPI:
1694		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1695			tmp = atapi_eh_request_sense(qc->dev,
1696						qc->scsicmd->sense_buffer,
1697						qc->result_tf.feature >> 4);
1698			if (!tmp) {
1699				/* ATA_QCFLAG_SENSE_VALID is used to
1700				 * tell atapi_qc_complete() that sense
1701				 * data is already valid.
1702				 *
1703				 * TODO: interpret sense data and set
1704				 * appropriate err_mask.
1705				 */
1706				qc->flags |= ATA_QCFLAG_SENSE_VALID;
1707			} else
1708				qc->err_mask |= tmp;
1709		}
1710	}
1711
1712	if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1713		action |= ATA_EH_RESET;
1714
1715	return action;
1716}
1717
1718static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1719				   int *xfer_ok)
1720{
1721	int base = 0;
1722
1723	if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1724		*xfer_ok = 1;
1725
1726	if (!*xfer_ok)
1727		base = ATA_ECAT_DUBIOUS_NONE;
1728
1729	if (err_mask & AC_ERR_ATA_BUS)
1730		return base + ATA_ECAT_ATA_BUS;
1731
1732	if (err_mask & AC_ERR_TIMEOUT)
1733		return base + ATA_ECAT_TOUT_HSM;
1734
1735	if (eflags & ATA_EFLAG_IS_IO) {
1736		if (err_mask & AC_ERR_HSM)
1737			return base + ATA_ECAT_TOUT_HSM;
1738		if ((err_mask &
1739		     (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1740			return base + ATA_ECAT_UNK_DEV;
1741	}
1742
1743	return 0;
1744}
1745
1746struct speed_down_verdict_arg {
1747	u64 since;
1748	int xfer_ok;
1749	int nr_errors[ATA_ECAT_NR];
1750};
1751
1752static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1753{
1754	struct speed_down_verdict_arg *arg = void_arg;
1755	int cat;
1756
1757	if (ent->timestamp < arg->since)
1758		return -1;
1759
1760	cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1761				      &arg->xfer_ok);
1762	arg->nr_errors[cat]++;
1763
1764	return 0;
1765}
1766
1767/**
1768 *	ata_eh_speed_down_verdict - Determine speed down verdict
1769 *	@dev: Device of interest
1770 *
1771 *	This function examines error ring of @dev and determines
1772 *	whether NCQ needs to be turned off, transfer speed should be
1773 *	stepped down, or falling back to PIO is necessary.
1774 *
1775 *	ECAT_ATA_BUS	: ATA_BUS error for any command
1776 *
1777 *	ECAT_TOUT_HSM	: TIMEOUT for any command or HSM violation for
1778 *			  IO commands
1779 *
1780 *	ECAT_UNK_DEV	: Unknown DEV error for IO commands
1781 *
1782 *	ECAT_DUBIOUS_*	: Identical to above three but occurred while
1783 *			  data transfer hasn't been verified.
1784 *
1785 *	Verdicts are
1786 *
1787 *	NCQ_OFF		: Turn off NCQ.
1788 *
1789 *	SPEED_DOWN	: Speed down transfer speed but don't fall back
1790 *			  to PIO.
1791 *
1792 *	FALLBACK_TO_PIO	: Fall back to PIO.
1793 *
1794 *	Even if multiple verdicts are returned, only one action is
1795 *	taken per error.  An action triggered by non-DUBIOUS errors
1796 *	clears ering, while one triggered by DUBIOUS_* errors doesn't.
1797 *	This is to expedite speed down decisions right after device is
1798 *	initially configured.
1799 *
1800 *	The followings are speed down rules.  #1 and #2 deal with
1801 *	DUBIOUS errors.
1802 *
1803 *	1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1804 *	   occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1805 *
1806 *	2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1807 *	   occurred during last 5 mins, NCQ_OFF.
1808 *
1809 *	3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1810 *	   ocurred during last 5 mins, FALLBACK_TO_PIO
1811 *
1812 *	4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1813 *	   during last 10 mins, NCQ_OFF.
1814 *
1815 *	5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1816 *	   UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1817 *
1818 *	LOCKING:
1819 *	Inherited from caller.
1820 *
1821 *	RETURNS:
1822 *	OR of ATA_EH_SPDN_* flags.
1823 */
1824static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1825{
1826	const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1827	u64 j64 = get_jiffies_64();
1828	struct speed_down_verdict_arg arg;
1829	unsigned int verdict = 0;
1830
1831	/* scan past 5 mins of error history */
1832	memset(&arg, 0, sizeof(arg));
1833	arg.since = j64 - min(j64, j5mins);
1834	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1835
1836	if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1837	    arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1838		verdict |= ATA_EH_SPDN_SPEED_DOWN |
1839			ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1840
1841	if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1842	    arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1843		verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1844
1845	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1846	    arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1847	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1848		verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1849
1850	/* scan past 10 mins of error history */
1851	memset(&arg, 0, sizeof(arg));
1852	arg.since = j64 - min(j64, j10mins);
1853	ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1854
1855	if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1856	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1857		verdict |= ATA_EH_SPDN_NCQ_OFF;
1858
1859	if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1860	    arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1861	    arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1862		verdict |= ATA_EH_SPDN_SPEED_DOWN;
1863
1864	return verdict;
1865}
1866
1867/**
1868 *	ata_eh_speed_down - record error and speed down if necessary
1869 *	@dev: Failed device
1870 *	@eflags: mask of ATA_EFLAG_* flags
1871 *	@err_mask: err_mask of the error
1872 *
1873 *	Record error and examine error history to determine whether
1874 *	adjusting transmission speed is necessary.  It also sets
1875 *	transmission limits appropriately if such adjustment is
1876 *	necessary.
1877 *
1878 *	LOCKING:
1879 *	Kernel thread context (may sleep).
1880 *
1881 *	RETURNS:
1882 *	Determined recovery action.
1883 */
1884static unsigned int ata_eh_speed_down(struct ata_device *dev,
1885				unsigned int eflags, unsigned int err_mask)
1886{
1887	struct ata_link *link = ata_dev_phys_link(dev);
1888	int xfer_ok = 0;
1889	unsigned int verdict;
1890	unsigned int action = 0;
1891
1892	/* don't bother if Cat-0 error */
1893	if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1894		return 0;
1895
1896	/* record error and determine whether speed down is necessary */
1897	ata_ering_record(&dev->ering, eflags, err_mask);
1898	verdict = ata_eh_speed_down_verdict(dev);
1899
1900	/* turn off NCQ? */
1901	if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1902	    (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1903			   ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1904		dev->flags |= ATA_DFLAG_NCQ_OFF;
1905		ata_dev_printk(dev, KERN_WARNING,
1906			       "NCQ disabled due to excessive errors\n");
1907		goto done;
1908	}
1909
1910	/* speed down? */
1911	if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1912		/* speed down SATA link speed if possible */
1913		if (sata_down_spd_limit(link, 0) == 0) {
1914			action |= ATA_EH_RESET;
1915			goto done;
1916		}
1917
1918		/* lower transfer mode */
1919		if (dev->spdn_cnt < 2) {
1920			static const int dma_dnxfer_sel[] =
1921				{ ATA_DNXFER_DMA, ATA_DNXFER_40C };
1922			static const int pio_dnxfer_sel[] =
1923				{ ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1924			int sel;
1925
1926			if (dev->xfer_shift != ATA_SHIFT_PIO)
1927				sel = dma_dnxfer_sel[dev->spdn_cnt];
1928			else
1929				sel = pio_dnxfer_sel[dev->spdn_cnt];
1930
1931			dev->spdn_cnt++;
1932
1933			if (ata_down_xfermask_limit(dev, sel) == 0) {
1934				action |= ATA_EH_RESET;
1935				goto done;
1936			}
1937		}
1938	}
1939
1940	/* Fall back to PIO?  Slowing down to PIO is meaningless for
1941	 * SATA ATA devices.  Consider it only for PATA and SATAPI.
1942	 */
1943	if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1944	    (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1945	    (dev->xfer_shift != ATA_SHIFT_PIO)) {
1946		if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1947			dev->spdn_cnt = 0;
1948			action |= ATA_EH_RESET;
1949			goto done;
1950		}
1951	}
1952
1953	return 0;
1954 done:
1955	/* device has been slowed down, blow error history */
1956	if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1957		ata_ering_clear(&dev->ering);
1958	return action;
1959}
1960
1961/**
1962 *	ata_eh_link_autopsy - analyze error and determine recovery action
1963 *	@link: host link to perform autopsy on
1964 *
1965 *	Analyze why @link failed and determine which recovery actions
1966 *	are needed.  This function also sets more detailed AC_ERR_*
1967 *	values and fills sense data for ATAPI CHECK SENSE.
1968 *
1969 *	LOCKING:
1970 *	Kernel thread context (may sleep).
1971 */
1972static void ata_eh_link_autopsy(struct ata_link *link)
1973{
1974	struct ata_port *ap = link->ap;
1975	struct ata_eh_context *ehc = &link->eh_context;
1976	struct ata_device *dev;
1977	unsigned int all_err_mask = 0, eflags = 0;
1978	int tag;
1979	u32 serror;
1980	int rc;
1981
1982	DPRINTK("ENTER\n");
1983
1984	if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1985		return;
1986
1987	/* obtain and analyze SError */
1988	rc = sata_scr_read(link, SCR_ERROR, &serror);
1989	if (rc == 0) {
1990		ehc->i.serror |= serror;
1991		ata_eh_analyze_serror(link);
1992	} else if (rc != -EOPNOTSUPP) {
1993		/* SError read failed, force reset and probing */
1994		ehc->i.probe_mask |= ATA_ALL_DEVICES;
1995		ehc->i.action |= ATA_EH_RESET;
1996		ehc->i.err_mask |= AC_ERR_OTHER;
1997	}
1998
1999	/* analyze NCQ failure */
2000	ata_eh_analyze_ncq_error(link);
2001
2002	/* any real error trumps AC_ERR_OTHER */
2003	if (ehc->i.err_mask & ~AC_ERR_OTHER)
2004		ehc->i.err_mask &= ~AC_ERR_OTHER;
2005
2006	all_err_mask |= ehc->i.err_mask;
2007
2008	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2009		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2010
2011		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2012		    ata_dev_phys_link(qc->dev) != link)
2013			continue;
2014
2015		/* inherit upper level err_mask */
2016		qc->err_mask |= ehc->i.err_mask;
2017
2018		/* analyze TF */
2019		ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2020
2021		/* DEV errors are probably spurious in case of ATA_BUS error */
2022		if (qc->err_mask & AC_ERR_ATA_BUS)
2023			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2024					  AC_ERR_INVALID);
2025
2026		/* any real error trumps unknown error */
2027		if (qc->err_mask & ~AC_ERR_OTHER)
2028			qc->err_mask &= ~AC_ERR_OTHER;
2029
2030		/* SENSE_VALID trumps dev/unknown error and revalidation */
2031		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2032			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2033
2034		/* determine whether the command is worth retrying */
2035		if (qc->flags & ATA_QCFLAG_IO ||
2036		    (!(qc->err_mask & AC_ERR_INVALID) &&
2037		     qc->err_mask != AC_ERR_DEV))
2038			qc->flags |= ATA_QCFLAG_RETRY;
2039
2040		/* accumulate error info */
2041		ehc->i.dev = qc->dev;
2042		all_err_mask |= qc->err_mask;
2043		if (qc->flags & ATA_QCFLAG_IO)
2044			eflags |= ATA_EFLAG_IS_IO;
2045	}
2046
2047	/* enforce default EH actions */
2048	if (ap->pflags & ATA_PFLAG_FROZEN ||
2049	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2050		ehc->i.action |= ATA_EH_RESET;
2051	else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2052		 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2053		ehc->i.action |= ATA_EH_REVALIDATE;
2054
2055	/* If we have offending qcs and the associated failed device,
2056	 * perform per-dev EH action only on the offending device.
2057	 */
2058	if (ehc->i.dev) {
2059		ehc->i.dev_action[ehc->i.dev->devno] |=
2060			ehc->i.action & ATA_EH_PERDEV_MASK;
2061		ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2062	}
2063
2064	/* propagate timeout to host link */
2065	if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2066		ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2067
2068	/* record error and consider speeding down */
2069	dev = ehc->i.dev;
2070	if (!dev && ((ata_link_max_devices(link) == 1 &&
2071		      ata_dev_enabled(link->device))))
2072	    dev = link->device;
2073
2074	if (dev) {
2075		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2076			eflags |= ATA_EFLAG_DUBIOUS_XFER;
2077		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2078	}
2079
2080	DPRINTK("EXIT\n");
2081}
2082
2083/**
2084 *	ata_eh_autopsy - analyze error and determine recovery action
2085 *	@ap: host port to perform autopsy on
2086 *
2087 *	Analyze all links of @ap and determine why they failed and
2088 *	which recovery actions are needed.
2089 *
2090 *	LOCKING:
2091 *	Kernel thread context (may sleep).
2092 */
2093void ata_eh_autopsy(struct ata_port *ap)
2094{
2095	struct ata_link *link;
2096
2097	ata_for_each_link(link, ap, EDGE)
2098		ata_eh_link_autopsy(link);
2099
2100	/* Handle the frigging slave link.  Autopsy is done similarly
2101	 * but actions and flags are transferred over to the master
2102	 * link and handled from there.
2103	 */
2104	if (ap->slave_link) {
2105		struct ata_eh_context *mehc = &ap->link.eh_context;
2106		struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2107
2108		/* transfer control flags from master to slave */
2109		sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2110
2111		/* perform autopsy on the slave link */
2112		ata_eh_link_autopsy(ap->slave_link);
2113
2114		/* transfer actions from slave to master and clear slave */
2115		ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2116		mehc->i.action		|= sehc->i.action;
2117		mehc->i.dev_action[1]	|= sehc->i.dev_action[1];
2118		mehc->i.flags		|= sehc->i.flags;
2119		ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2120	}
2121
2122	/* Autopsy of fanout ports can affect host link autopsy.
2123	 * Perform host link autopsy last.
2124	 */
2125	if (sata_pmp_attached(ap))
2126		ata_eh_link_autopsy(&ap->link);
2127}
2128
2129/**
2130 *	ata_get_cmd_descript - get description for ATA command
2131 *	@command: ATA command code to get description for
2132 *
2133 *	Return a textual description of the given command, or NULL if the
2134 *	command is not known.
2135 *
2136 *	LOCKING:
2137 *	None
2138 */
2139const char *ata_get_cmd_descript(u8 command)
2140{
2141#ifdef CONFIG_ATA_VERBOSE_ERROR
2142	static const struct
2143	{
2144		u8 command;
2145		const char *text;
2146	} cmd_descr[] = {
2147		{ ATA_CMD_DEV_RESET,		"DEVICE RESET" },
2148		{ ATA_CMD_CHK_POWER, 		"CHECK POWER MODE" },
2149		{ ATA_CMD_STANDBY, 		"STANDBY" },
2150		{ ATA_CMD_IDLE, 		"IDLE" },
2151		{ ATA_CMD_EDD, 			"EXECUTE DEVICE DIAGNOSTIC" },
2152		{ ATA_CMD_DOWNLOAD_MICRO,   	"DOWNLOAD MICROCODE" },
2153		{ ATA_CMD_NOP,			"NOP" },
2154		{ ATA_CMD_FLUSH, 		"FLUSH CACHE" },
2155		{ ATA_CMD_FLUSH_EXT, 		"FLUSH CACHE EXT" },
2156		{ ATA_CMD_ID_ATA,  		"IDENTIFY DEVICE" },
2157		{ ATA_CMD_ID_ATAPI, 		"IDENTIFY PACKET DEVICE" },
2158		{ ATA_CMD_SERVICE, 		"SERVICE" },
2159		{ ATA_CMD_READ, 		"READ DMA" },
2160		{ ATA_CMD_READ_EXT, 		"READ DMA EXT" },
2161		{ ATA_CMD_READ_QUEUED, 		"READ DMA QUEUED" },
2162		{ ATA_CMD_READ_STREAM_EXT, 	"READ STREAM EXT" },
2163		{ ATA_CMD_READ_STREAM_DMA_EXT,  "READ STREAM DMA EXT" },
2164		{ ATA_CMD_WRITE, 		"WRITE DMA" },
2165		{ ATA_CMD_WRITE_EXT, 		"WRITE DMA EXT" },
2166		{ ATA_CMD_WRITE_QUEUED, 	"WRITE DMA QUEUED EXT" },
2167		{ ATA_CMD_WRITE_STREAM_EXT, 	"WRITE STREAM EXT" },
2168		{ ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2169		{ ATA_CMD_WRITE_FUA_EXT,	"WRITE DMA FUA EXT" },
2170		{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2171		{ ATA_CMD_FPDMA_READ,		"READ FPDMA QUEUED" },
2172		{ ATA_CMD_FPDMA_WRITE,		"WRITE FPDMA QUEUED" },
2173		{ ATA_CMD_PIO_READ,		"READ SECTOR(S)" },
2174		{ ATA_CMD_PIO_READ_EXT,		"READ SECTOR(S) EXT" },
2175		{ ATA_CMD_PIO_WRITE,		"WRITE SECTOR(S)" },
2176		{ ATA_CMD_PIO_WRITE_EXT,	"WRITE SECTOR(S) EXT" },
2177		{ ATA_CMD_READ_MULTI,		"READ MULTIPLE" },
2178		{ ATA_CMD_READ_MULTI_EXT,	"READ MULTIPLE EXT" },
2179		{ ATA_CMD_WRITE_MULTI,		"WRITE MULTIPLE" },
2180		{ ATA_CMD_WRITE_MULTI_EXT,	"WRITE MULTIPLE EXT" },
2181		{ ATA_CMD_WRITE_MULTI_FUA_EXT, 	"WRITE MULTIPLE FUA EXT" },
2182		{ ATA_CMD_SET_FEATURES,		"SET FEATURES" },
2183		{ ATA_CMD_SET_MULTI,		"SET MULTIPLE MODE" },
2184		{ ATA_CMD_VERIFY,		"READ VERIFY SECTOR(S)" },
2185		{ ATA_CMD_VERIFY_EXT,		"READ VERIFY SECTOR(S) EXT" },
2186		{ ATA_CMD_WRITE_UNCORR_EXT,	"WRITE UNCORRECTABLE EXT" },
2187		{ ATA_CMD_STANDBYNOW1,		"STANDBY IMMEDIATE" },
2188		{ ATA_CMD_IDLEIMMEDIATE,	"IDLE IMMEDIATE" },
2189		{ ATA_CMD_SLEEP,		"SLEEP" },
2190		{ ATA_CMD_INIT_DEV_PARAMS,	"INITIALIZE DEVICE PARAMETERS" },
2191		{ ATA_CMD_READ_NATIVE_MAX,	"READ NATIVE MAX ADDRESS" },
2192		{ ATA_CMD_READ_NATIVE_MAX_EXT,	"READ NATIVE MAX ADDRESS EXT" },
2193		{ ATA_CMD_SET_MAX,		"SET MAX ADDRESS" },
2194		{ ATA_CMD_SET_MAX_EXT,		"SET MAX ADDRESS EXT" },
2195		{ ATA_CMD_READ_LOG_EXT,		"READ LOG EXT" },
2196		{ ATA_CMD_WRITE_LOG_EXT,	"WRITE LOG EXT" },
2197		{ ATA_CMD_READ_LOG_DMA_EXT,	"READ LOG DMA EXT" },
2198		{ ATA_CMD_WRITE_LOG_DMA_EXT, 	"WRITE LOG DMA EXT" },
2199		{ ATA_CMD_TRUSTED_RCV,		"TRUSTED RECEIVE" },
2200		{ ATA_CMD_TRUSTED_RCV_DMA, 	"TRUSTED RECEIVE DMA" },
2201		{ ATA_CMD_TRUSTED_SND,		"TRUSTED SEND" },
2202		{ ATA_CMD_TRUSTED_SND_DMA, 	"TRUSTED SEND DMA" },
2203		{ ATA_CMD_PMP_READ,		"READ BUFFER" },
2204		{ ATA_CMD_PMP_WRITE,		"WRITE BUFFER" },
2205		{ ATA_CMD_CONF_OVERLAY,		"DEVICE CONFIGURATION OVERLAY" },
2206		{ ATA_CMD_SEC_SET_PASS,		"SECURITY SET PASSWORD" },
2207		{ ATA_CMD_SEC_UNLOCK,		"SECURITY UNLOCK" },
2208		{ ATA_CMD_SEC_ERASE_PREP,	"SECURITY ERASE PREPARE" },
2209		{ ATA_CMD_SEC_ERASE_UNIT,	"SECURITY ERASE UNIT" },
2210		{ ATA_CMD_SEC_FREEZE_LOCK,	"SECURITY FREEZE LOCK" },
2211		{ ATA_CMD_SEC_DISABLE_PASS,	"SECURITY DISABLE PASSWORD" },
2212		{ ATA_CMD_CONFIG_STREAM,	"CONFIGURE STREAM" },
2213		{ ATA_CMD_SMART,		"SMART" },
2214		{ ATA_CMD_MEDIA_LOCK,		"DOOR LOCK" },
2215		{ ATA_CMD_MEDIA_UNLOCK,		"DOOR UNLOCK" },
2216		{ ATA_CMD_DSM,			"DATA SET MANAGEMENT" },
2217		{ ATA_CMD_CHK_MED_CRD_TYP, 	"CHECK MEDIA CARD TYPE" },
2218		{ ATA_CMD_CFA_REQ_EXT_ERR, 	"CFA REQUEST EXTENDED ERROR" },
2219		{ ATA_CMD_CFA_WRITE_NE,		"CFA WRITE SECTORS WITHOUT ERASE" },
2220		{ ATA_CMD_CFA_TRANS_SECT,	"CFA TRANSLATE SECTOR" },
2221		{ ATA_CMD_CFA_ERASE,		"CFA ERASE SECTORS" },
2222		{ ATA_CMD_CFA_WRITE_MULT_NE, 	"CFA WRITE MULTIPLE WITHOUT ERASE" },
2223		{ ATA_CMD_READ_LONG,		"READ LONG (with retries)" },
2224		{ ATA_CMD_READ_LONG_ONCE,	"READ LONG (without retries)" },
2225		{ ATA_CMD_WRITE_LONG,		"WRITE LONG (with retries)" },
2226		{ ATA_CMD_WRITE_LONG_ONCE,	"WRITE LONG (without retries)" },
2227		{ ATA_CMD_RESTORE,		"RECALIBRATE" },
2228		{ 0,				NULL } /* terminate list */
2229	};
2230
2231	unsigned int i;
2232	for (i = 0; cmd_descr[i].text; i++)
2233		if (cmd_descr[i].command == command)
2234			return cmd_descr[i].text;
2235#endif
2236
2237	return NULL;
2238}
2239
2240/**
2241 *	ata_eh_link_report - report error handling to user
2242 *	@link: ATA link EH is going on
2243 *
2244 *	Report EH to user.
2245 *
2246 *	LOCKING:
2247 *	None.
2248 */
2249static void ata_eh_link_report(struct ata_link *link)
2250{
2251	struct ata_port *ap = link->ap;
2252	struct ata_eh_context *ehc = &link->eh_context;
2253	const char *frozen, *desc;
2254	char tries_buf[6];
2255	int tag, nr_failed = 0;
2256
2257	if (ehc->i.flags & ATA_EHI_QUIET)
2258		return;
2259
2260	desc = NULL;
2261	if (ehc->i.desc[0] != '\0')
2262		desc = ehc->i.desc;
2263
2264	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2265		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2266
2267		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2268		    ata_dev_phys_link(qc->dev) != link ||
2269		    ((qc->flags & ATA_QCFLAG_QUIET) &&
2270		     qc->err_mask == AC_ERR_DEV))
2271			continue;
2272		if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2273			continue;
2274
2275		nr_failed++;
2276	}
2277
2278	if (!nr_failed && !ehc->i.err_mask)
2279		return;
2280
2281	frozen = "";
2282	if (ap->pflags & ATA_PFLAG_FROZEN)
2283		frozen = " frozen";
2284
2285	memset(tries_buf, 0, sizeof(tries_buf));
2286	if (ap->eh_tries < ATA_EH_MAX_TRIES)
2287		snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2288			 ap->eh_tries);
2289
2290	if (ehc->i.dev) {
2291		ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2292			       "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2293			       ehc->i.err_mask, link->sactive, ehc->i.serror,
2294			       ehc->i.action, frozen, tries_buf);
2295		if (desc)
2296			ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2297	} else {
2298		ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2299				"SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2300				ehc->i.err_mask, link->sactive, ehc->i.serror,
2301				ehc->i.action, frozen, tries_buf);
2302		if (desc)
2303			ata_link_printk(link, KERN_ERR, "%s\n", desc);
2304	}
2305
2306#ifdef CONFIG_ATA_VERBOSE_ERROR
2307	if (ehc->i.serror)
2308		ata_link_printk(link, KERN_ERR,
2309		  "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2310		  ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2311		  ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2312		  ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2313		  ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2314		  ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2315		  ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2316		  ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2317		  ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2318		  ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2319		  ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2320		  ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2321		  ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2322		  ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2323		  ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2324		  ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2325		  ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2326		  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2327#endif
2328
2329	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2330		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2331		struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2332		const u8 *cdb = qc->cdb;
2333		char data_buf[20] = "";
2334		char cdb_buf[70] = "";
2335
2336		if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2337		    ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2338			continue;
2339
2340		if (qc->dma_dir != DMA_NONE) {
2341			static const char *dma_str[] = {
2342				[DMA_BIDIRECTIONAL]	= "bidi",
2343				[DMA_TO_DEVICE]		= "out",
2344				[DMA_FROM_DEVICE]	= "in",
2345			};
2346			static const char *prot_str[] = {
2347				[ATA_PROT_PIO]		= "pio",
2348				[ATA_PROT_DMA]		= "dma",
2349				[ATA_PROT_NCQ]		= "ncq",
2350				[ATAPI_PROT_PIO]	= "pio",
2351				[ATAPI_PROT_DMA]	= "dma",
2352			};
2353
2354			snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2355				 prot_str[qc->tf.protocol], qc->nbytes,
2356				 dma_str[qc->dma_dir]);
2357		}
2358
2359		if (ata_is_atapi(qc->tf.protocol)) {
2360			if (qc->scsicmd)
2361				scsi_print_command(qc->scsicmd);
2362			else
2363				snprintf(cdb_buf, sizeof(cdb_buf),
2364				 "cdb %02x %02x %02x %02x %02x %02x %02x %02x  "
2365				 "%02x %02x %02x %02x %02x %02x %02x %02x\n         ",
2366				 cdb[0], cdb[1], cdb[2], cdb[3],
2367				 cdb[4], cdb[5], cdb[6], cdb[7],
2368				 cdb[8], cdb[9], cdb[10], cdb[11],
2369				 cdb[12], cdb[13], cdb[14], cdb[15]);
2370		} else {
2371			const char *descr = ata_get_cmd_descript(cmd->command);
2372			if (descr)
2373				ata_dev_printk(qc->dev, KERN_ERR,
2374					"failed command: %s\n", descr);
2375		}
2376
2377		ata_dev_printk(qc->dev, KERN_ERR,
2378			"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2379			"tag %d%s\n         %s"
2380			"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2381			"Emask 0x%x (%s)%s\n",
2382			cmd->command, cmd->feature, cmd->nsect,
2383			cmd->lbal, cmd->lbam, cmd->lbah,
2384			cmd->hob_feature, cmd->hob_nsect,
2385			cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2386			cmd->device, qc->tag, data_buf, cdb_buf,
2387			res->command, res->feature, res->nsect,
2388			res->lbal, res->lbam, res->lbah,
2389			res->hob_feature, res->hob_nsect,
2390			res->hob_lbal, res->hob_lbam, res->hob_lbah,
2391			res->device, qc->err_mask, ata_err_string(qc->err_mask),
2392			qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2393
2394#ifdef CONFIG_ATA_VERBOSE_ERROR
2395		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2396				    ATA_ERR)) {
2397			if (res->command & ATA_BUSY)
2398				ata_dev_printk(qc->dev, KERN_ERR,
2399				  "status: { Busy }\n");
2400			else
2401				ata_dev_printk(qc->dev, KERN_ERR,
2402				  "status: { %s%s%s%s}\n",
2403				  res->command & ATA_DRDY ? "DRDY " : "",
2404				  res->command & ATA_DF ? "DF " : "",
2405				  res->command & ATA_DRQ ? "DRQ " : "",
2406				  res->command & ATA_ERR ? "ERR " : "");
2407		}
2408
2409		if (cmd->command != ATA_CMD_PACKET &&
2410		    (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2411				     ATA_ABORTED)))
2412			ata_dev_printk(qc->dev, KERN_ERR,
2413			  "error: { %s%s%s%s}\n",
2414			  res->feature & ATA_ICRC ? "ICRC " : "",
2415			  res->feature & ATA_UNC ? "UNC " : "",
2416			  res->feature & ATA_IDNF ? "IDNF " : "",
2417			  res->feature & ATA_ABORTED ? "ABRT " : "");
2418#endif
2419	}
2420}
2421
2422/**
2423 *	ata_eh_report - report error handling to user
2424 *	@ap: ATA port to report EH about
2425 *
2426 *	Report EH to user.
2427 *
2428 *	LOCKING:
2429 *	None.
2430 */
2431void ata_eh_report(struct ata_port *ap)
2432{
2433	struct ata_link *link;
2434
2435	ata_for_each_link(link, ap, HOST_FIRST)
2436		ata_eh_link_report(link);
2437}
2438
2439static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2440			unsigned int *classes, unsigned long deadline,
2441			bool clear_classes)
2442{
2443	struct ata_device *dev;
2444
2445	if (clear_classes)
2446		ata_for_each_dev(dev, link, ALL)
2447			classes[dev->devno] = ATA_DEV_UNKNOWN;
2448
2449	return reset(link, classes, deadline);
2450}
2451
2452static int ata_eh_followup_srst_needed(struct ata_link *link,
2453				       int rc, const unsigned int *classes)
2454{
2455	if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2456		return 0;
2457	if (rc == -EAGAIN)
2458		return 1;
2459	if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2460		return 1;
2461	return 0;
2462}
2463
2464int ata_eh_reset(struct ata_link *link, int classify,
2465		 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2466		 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2467{
2468	struct ata_port *ap = link->ap;
2469	struct ata_link *slave = ap->slave_link;
2470	struct ata_eh_context *ehc = &link->eh_context;
2471	struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2472	unsigned int *classes = ehc->classes;
2473	unsigned int lflags = link->flags;
2474	int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2475	int max_tries = 0, try = 0;
2476	struct ata_link *failed_link;
2477	struct ata_device *dev;
2478	unsigned long deadline, now;
2479	ata_reset_fn_t reset;
2480	unsigned long flags;
2481	u32 sstatus;
2482	int nr_unknown, rc;
2483
2484	/*
2485	 * Prepare to reset
2486	 */
2487	while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2488		max_tries++;
2489	if (link->flags & ATA_LFLAG_NO_HRST)
2490		hardreset = NULL;
2491	if (link->flags & ATA_LFLAG_NO_SRST)
2492		softreset = NULL;
2493
2494	/* make sure each reset attemp is at least COOL_DOWN apart */
2495	if (ehc->i.flags & ATA_EHI_DID_RESET) {
2496		now = jiffies;
2497		WARN_ON(time_after(ehc->last_reset, now));
2498		deadline = ata_deadline(ehc->last_reset,
2499					ATA_EH_RESET_COOL_DOWN);
2500		if (time_before(now, deadline))
2501			schedule_timeout_uninterruptible(deadline - now);
2502	}
2503
2504	spin_lock_irqsave(ap->lock, flags);
2505	ap->pflags |= ATA_PFLAG_RESETTING;
2506	spin_unlock_irqrestore(ap->lock, flags);
2507
2508	ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2509
2510	ata_for_each_dev(dev, link, ALL) {
2511		/* If we issue an SRST then an ATA drive (not ATAPI)
2512		 * may change configuration and be in PIO0 timing. If
2513		 * we do a hard reset (or are coming from power on)
2514		 * this is true for ATA or ATAPI. Until we've set a
2515		 * suitable controller mode we should not touch the
2516		 * bus as we may be talking too fast.
2517		 */
2518		dev->pio_mode = XFER_PIO_0;
2519
2520		/* If the controller has a pio mode setup function
2521		 * then use it to set the chipset to rights. Don't
2522		 * touch the DMA setup as that will be dealt with when
2523		 * configuring devices.
2524		 */
2525		if (ap->ops->set_piomode)
2526			ap->ops->set_piomode(ap, dev);
2527	}
2528
2529	/* prefer hardreset */
2530	reset = NULL;
2531	ehc->i.action &= ~ATA_EH_RESET;
2532	if (hardreset) {
2533		reset = hardreset;
2534		ehc->i.action |= ATA_EH_HARDRESET;
2535	} else if (softreset) {
2536		reset = softreset;
2537		ehc->i.action |= ATA_EH_SOFTRESET;
2538	}
2539
2540	if (prereset) {
2541		unsigned long deadline = ata_deadline(jiffies,
2542						      ATA_EH_PRERESET_TIMEOUT);
2543
2544		if (slave) {
2545			sehc->i.action &= ~ATA_EH_RESET;
2546			sehc->i.action |= ehc->i.action;
2547		}
2548
2549		rc = prereset(link, deadline);
2550
2551		/* If present, do prereset on slave link too.  Reset
2552		 * is skipped iff both master and slave links report
2553		 * -ENOENT or clear ATA_EH_RESET.
2554		 */
2555		if (slave && (rc == 0 || rc == -ENOENT)) {
2556			int tmp;
2557
2558			tmp = prereset(slave, deadline);
2559			if (tmp != -ENOENT)
2560				rc = tmp;
2561
2562			ehc->i.action |= sehc->i.action;
2563		}
2564
2565		if (rc) {
2566			if (rc == -ENOENT) {
2567				ata_link_printk(link, KERN_DEBUG,
2568						"port disabled. ignoring.\n");
2569				ehc->i.action &= ~ATA_EH_RESET;
2570
2571				ata_for_each_dev(dev, link, ALL)
2572					classes[dev->devno] = ATA_DEV_NONE;
2573
2574				rc = 0;
2575			} else
2576				ata_link_printk(link, KERN_ERR,
2577					"prereset failed (errno=%d)\n", rc);
2578			goto out;
2579		}
2580
2581		/* prereset() might have cleared ATA_EH_RESET.  If so,
2582		 * bang classes, thaw and return.
2583		 */
2584		if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2585			ata_for_each_dev(dev, link, ALL)
2586				classes[dev->devno] = ATA_DEV_NONE;
2587			if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2588			    ata_is_host_link(link))
2589				ata_eh_thaw_port(ap);
2590			rc = 0;
2591			goto out;
2592		}
2593	}
2594
2595 retry:
2596	/*
2597	 * Perform reset
2598	 */
2599	if (ata_is_host_link(link))
2600		ata_eh_freeze_port(ap);
2601
2602	deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2603
2604	if (reset) {
2605		if (verbose)
2606			ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2607					reset == softreset ? "soft" : "hard");
2608
2609		/* mark that this EH session started with reset */
2610		ehc->last_reset = jiffies;
2611		if (reset == hardreset)
2612			ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2613		else
2614			ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2615
2616		rc = ata_do_reset(link, reset, classes, deadline, true);
2617		if (rc && rc != -EAGAIN) {
2618			failed_link = link;
2619			goto fail;
2620		}
2621
2622		/* hardreset slave link if existent */
2623		if (slave && reset == hardreset) {
2624			int tmp;
2625
2626			if (verbose)
2627				ata_link_printk(slave, KERN_INFO,
2628						"hard resetting link\n");
2629
2630			ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2631			tmp = ata_do_reset(slave, reset, classes, deadline,
2632					   false);
2633			switch (tmp) {
2634			case -EAGAIN:
2635				rc = -EAGAIN;
2636			case 0:
2637				break;
2638			default:
2639				failed_link = slave;
2640				rc = tmp;
2641				goto fail;
2642			}
2643		}
2644
2645		/* perform follow-up SRST if necessary */
2646		if (reset == hardreset &&
2647		    ata_eh_followup_srst_needed(link, rc, classes)) {
2648			reset = softreset;
2649
2650			if (!reset) {
2651				ata_link_printk(link, KERN_ERR,
2652						"follow-up softreset required "
2653						"but no softreset avaliable\n");
2654				failed_link = link;
2655				rc = -EINVAL;
2656				goto fail;
2657			}
2658
2659			ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2660			rc = ata_do_reset(link, reset, classes, deadline, true);
2661			if (rc) {
2662				failed_link = link;
2663				goto fail;
2664			}
2665		}
2666	} else {
2667		if (verbose)
2668			ata_link_printk(link, KERN_INFO, "no reset method "
2669					"available, skipping reset\n");
2670		if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2671			lflags |= ATA_LFLAG_ASSUME_ATA;
2672	}
2673
2674	/*
2675	 * Post-reset processing
2676	 */
2677	ata_for_each_dev(dev, link, ALL) {
2678		/* After the reset, the device state is PIO 0 and the
2679		 * controller state is undefined.  Reset also wakes up
2680		 * drives from sleeping mode.
2681		 */
2682		dev->pio_mode = XFER_PIO_0;
2683		dev->flags &= ~ATA_DFLAG_SLEEPING;
2684
2685		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2686			continue;
2687
2688		/* apply class override */
2689		if (lflags & ATA_LFLAG_ASSUME_ATA)
2690			classes[dev->devno] = ATA_DEV_ATA;
2691		else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2692			classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2693	}
2694
2695	/* record current link speed */
2696	if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2697		link->sata_spd = (sstatus >> 4) & 0xf;
2698	if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2699		slave->sata_spd = (sstatus >> 4) & 0xf;
2700
2701	/* thaw the port */
2702	if (ata_is_host_link(link))
2703		ata_eh_thaw_port(ap);
2704
2705	/* postreset() should clear hardware SError.  Although SError
2706	 * is cleared during link resume, clearing SError here is
2707	 * necessary as some PHYs raise hotplug events after SRST.
2708	 * This introduces race condition where hotplug occurs between
2709	 * reset and here.  This race is mediated by cross checking
2710	 * link onlineness and classification result later.
2711	 */
2712	if (postreset) {
2713		postreset(link, classes);
2714		if (slave)
2715			postreset(slave, classes);
2716	}
2717
2718	/*
2719	 * Some controllers can't be frozen very well and may set
2720	 * spuruious error conditions during reset.  Clear accumulated
2721	 * error information.  As reset is the final recovery action,
2722	 * nothing is lost by doing this.
2723	 */
2724	spin_lock_irqsave(link->ap->lock, flags);
2725	memset(&link->eh_info, 0, sizeof(link->eh_info));
2726	if (slave)
2727		memset(&slave->eh_info, 0, sizeof(link->eh_info));
2728	ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2729	spin_unlock_irqrestore(link->ap->lock, flags);
2730
2731	/*
2732	 * Make sure onlineness and classification result correspond.
2733	 * Hotplug could have happened during reset and some
2734	 * controllers fail to wait while a drive is spinning up after
2735	 * being hotplugged causing misdetection.  By cross checking
2736	 * link on/offlineness and classification result, those
2737	 * conditions can be reliably detected and retried.
2738	 */
2739	nr_unknown = 0;
2740	ata_for_each_dev(dev, link, ALL) {
2741		if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2742			if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2743				ata_dev_printk(dev, KERN_DEBUG, "link online "
2744					       "but device misclassifed\n");
2745				classes[dev->devno] = ATA_DEV_NONE;
2746				nr_unknown++;
2747			}
2748		} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2749			if (ata_class_enabled(classes[dev->devno]))
2750				ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2751					       "clearing class %d to NONE\n",
2752					       classes[dev->devno]);
2753			classes[dev->devno] = ATA_DEV_NONE;
2754		} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2755			ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2756				       "clearing UNKNOWN to NONE\n");
2757			classes[dev->devno] = ATA_DEV_NONE;
2758		}
2759	}
2760
2761	if (classify && nr_unknown) {
2762		if (try < max_tries) {
2763			ata_link_printk(link, KERN_WARNING, "link online but "
2764					"%d devices misclassified, retrying\n",
2765					nr_unknown);
2766			failed_link = link;
2767			rc = -EAGAIN;
2768			goto fail;
2769		}
2770		ata_link_printk(link, KERN_WARNING,
2771				"link online but %d devices misclassified, "
2772				"device detection might fail\n", nr_unknown);
2773	}
2774
2775	/* reset successful, schedule revalidation */
2776	ata_eh_done(link, NULL, ATA_EH_RESET);
2777	if (slave)
2778		ata_eh_done(slave, NULL, ATA_EH_RESET);
2779	ehc->last_reset = jiffies;	/* update to completion time */
2780	ehc->i.action |= ATA_EH_REVALIDATE;
2781
2782	rc = 0;
2783 out:
2784	/* clear hotplug flag */
2785	ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2786	if (slave)
2787		sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2788
2789	spin_lock_irqsave(ap->lock, flags);
2790	ap->pflags &= ~ATA_PFLAG_RESETTING;
2791	spin_unlock_irqrestore(ap->lock, flags);
2792
2793	return rc;
2794
2795 fail:
2796	/* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2797	if (!ata_is_host_link(link) &&
2798	    sata_scr_read(link, SCR_STATUS, &sstatus))
2799		rc = -ERESTART;
2800
2801	if (rc == -ERESTART || try >= max_tries)
2802		goto out;
2803
2804	now = jiffies;
2805	if (time_before(now, deadline)) {
2806		unsigned long delta = deadline - now;
2807
2808		ata_link_printk(failed_link, KERN_WARNING,
2809			"reset failed (errno=%d), retrying in %u secs\n",
2810			rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2811
2812		while (delta)
2813			delta = schedule_timeout_uninterruptible(delta);
2814	}
2815
2816	if (try == max_tries - 1) {
2817		sata_down_spd_limit(link, 0);
2818		if (slave)
2819			sata_down_spd_limit(slave, 0);
2820	} else if (rc == -EPIPE)
2821		sata_down_spd_limit(failed_link, 0);
2822
2823	if (hardreset)
2824		reset = hardreset;
2825	goto retry;
2826}
2827
2828static inline void ata_eh_pull_park_action(struct ata_port *ap)
2829{
2830	struct ata_link *link;
2831	struct ata_device *dev;
2832	unsigned long flags;
2833
2834	/*
2835	 * This function can be thought of as an extended version of
2836	 * ata_eh_about_to_do() specially crafted to accommodate the
2837	 * requirements of ATA_EH_PARK handling. Since the EH thread
2838	 * does not leave the do {} while () loop in ata_eh_recover as
2839	 * long as the timeout for a park request to *one* device on
2840	 * the port has not expired, and since we still want to pick
2841	 * up park requests to other devices on the same port or
2842	 * timeout updates for the same device, we have to pull
2843	 * ATA_EH_PARK actions from eh_info into eh_context.i
2844	 * ourselves at the beginning of each pass over the loop.
2845	 *
2846	 * Additionally, all write accesses to &ap->park_req_pending
2847	 * through INIT_COMPLETION() (see below) or complete_all()
2848	 * (see ata_scsi_park_store()) are protected by the host lock.
2849	 * As a result we have that park_req_pending.done is zero on
2850	 * exit from this function, i.e. when ATA_EH_PARK actions for
2851	 * *all* devices on port ap have been pulled into the
2852	 * respective eh_context structs. If, and only if,
2853	 * park_req_pending.done is non-zero by the time we reach
2854	 * wait_for_completion_timeout(), another ATA_EH_PARK action
2855	 * has been scheduled for at least one of the devices on port
2856	 * ap and we have to cycle over the do {} while () loop in
2857	 * ata_eh_recover() again.
2858	 */
2859
2860	spin_lock_irqsave(ap->lock, flags);
2861	INIT_COMPLETION(ap->park_req_pending);
2862	ata_for_each_link(link, ap, EDGE) {
2863		ata_for_each_dev(dev, link, ALL) {
2864			struct ata_eh_info *ehi = &link->eh_info;
2865
2866			link->eh_context.i.dev_action[dev->devno] |=
2867				ehi->dev_action[dev->devno] & ATA_EH_PARK;
2868			ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2869		}
2870	}
2871	spin_unlock_irqrestore(ap->lock, flags);
2872}
2873
2874static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2875{
2876	struct ata_eh_context *ehc = &dev->link->eh_context;
2877	struct ata_taskfile tf;
2878	unsigned int err_mask;
2879
2880	ata_tf_init(dev, &tf);
2881	if (park) {
2882		ehc->unloaded_mask |= 1 << dev->devno;
2883		tf.command = ATA_CMD_IDLEIMMEDIATE;
2884		tf.feature = 0x44;
2885		tf.lbal = 0x4c;
2886		tf.lbam = 0x4e;
2887		tf.lbah = 0x55;
2888	} else {
2889		ehc->unloaded_mask &= ~(1 << dev->devno);
2890		tf.command = ATA_CMD_CHK_POWER;
2891	}
2892
2893	tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2894	tf.protocol |= ATA_PROT_NODATA;
2895	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2896	if (park && (err_mask || tf.lbal != 0xc4)) {
2897		ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2898		ehc->unloaded_mask &= ~(1 << dev->devno);
2899	}
2900}
2901
2902static int ata_eh_revalidate_and_attach(struct ata_link *link,
2903					struct ata_device **r_failed_dev)
2904{
2905	struct ata_port *ap = link->ap;
2906	struct ata_eh_context *ehc = &link->eh_context;
2907	struct ata_device *dev;
2908	unsigned int new_mask = 0;
2909	unsigned long flags;
2910	int rc = 0;
2911
2912	DPRINTK("ENTER\n");
2913
2914	/* For PATA drive side cable detection to work, IDENTIFY must
2915	 * be done backwards such that PDIAG- is released by the slave
2916	 * device before the master device is identified.
2917	 */
2918	ata_for_each_dev(dev, link, ALL_REVERSE) {
2919		unsigned int action = ata_eh_dev_action(dev);
2920		unsigned int readid_flags = 0;
2921
2922		if (ehc->i.flags & ATA_EHI_DID_RESET)
2923			readid_flags |= ATA_READID_POSTRESET;
2924
2925		if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2926			WARN_ON(dev->class == ATA_DEV_PMP);
2927
2928			if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2929				rc = -EIO;
2930				goto err;
2931			}
2932
2933			ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2934			rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2935						readid_flags);
2936			if (rc)
2937				goto err;
2938
2939			ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2940
2941			/* Configuration may have changed, reconfigure
2942			 * transfer mode.
2943			 */
2944			ehc->i.flags |= ATA_EHI_SETMODE;
2945
2946			/* schedule the scsi_rescan_device() here */
2947			schedule_work(&(ap->scsi_rescan_task));
2948		} else if (dev->class == ATA_DEV_UNKNOWN &&
2949			   ehc->tries[dev->devno] &&
2950			   ata_class_enabled(ehc->classes[dev->devno])) {
2951			/* Temporarily set dev->class, it will be
2952			 * permanently set once all configurations are
2953			 * complete.  This is necessary because new
2954			 * device configuration is done in two
2955			 * separate loops.
2956			 */
2957			dev->class = ehc->classes[dev->devno];
2958
2959			if (dev->class == ATA_DEV_PMP)
2960				rc = sata_pmp_attach(dev);
2961			else
2962				rc = ata_dev_read_id(dev, &dev->class,
2963						     readid_flags, dev->id);
2964
2965			/* read_id might have changed class, store and reset */
2966			ehc->classes[dev->devno] = dev->class;
2967			dev->class = ATA_DEV_UNKNOWN;
2968
2969			switch (rc) {
2970			case 0:
2971				/* clear error info accumulated during probe */
2972				ata_ering_clear(&dev->ering);
2973				new_mask |= 1 << dev->devno;
2974				break;
2975			case -ENOENT:
2976				/* IDENTIFY was issued to non-existent
2977				 * device.  No need to reset.  Just
2978				 * thaw and ignore the device.
2979				 */
2980				ata_eh_thaw_port(ap);
2981				break;
2982			default:
2983				goto err;
2984			}
2985		}
2986	}
2987
2988	/* PDIAG- should have been released, ask cable type if post-reset */
2989	if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2990		if (ap->ops->cable_detect)
2991			ap->cbl = ap->ops->cable_detect(ap);
2992		ata_force_cbl(ap);
2993	}
2994
2995	/* Configure new devices forward such that user doesn't see
2996	 * device detection messages backwards.
2997	 */
2998	ata_for_each_dev(dev, link, ALL) {
2999		if (!(new_mask & (1 << dev->devno)))
3000			continue;
3001
3002		dev->class = ehc->classes[dev->devno];
3003
3004		if (dev->class == ATA_DEV_PMP)
3005			continue;
3006
3007		ehc->i.flags |= ATA_EHI_PRINTINFO;
3008		rc = ata_dev_configure(dev);
3009		ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3010		if (rc) {
3011			dev->class = ATA_DEV_UNKNOWN;
3012			goto err;
3013		}
3014
3015		spin_lock_irqsave(ap->lock, flags);
3016		ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3017		spin_unlock_irqrestore(ap->lock, flags);
3018
3019		/* new device discovered, configure xfermode */
3020		ehc->i.flags |= ATA_EHI_SETMODE;
3021	}
3022
3023	return 0;
3024
3025 err:
3026	*r_failed_dev = dev;
3027	DPRINTK("EXIT rc=%d\n", rc);
3028	return rc;
3029}
3030
3031/**
3032 *	ata_set_mode - Program timings and issue SET FEATURES - XFER
3033 *	@link: link on which timings will be programmed
3034 *	@r_failed_dev: out parameter for failed device
3035 *
3036 *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).  If
3037 *	ata_set_mode() fails, pointer to the failing device is
3038 *	returned in @r_failed_dev.
3039 *
3040 *	LOCKING:
3041 *	PCI/etc. bus probe sem.
3042 *
3043 *	RETURNS:
3044 *	0 on success, negative errno otherwise
3045 */
3046int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3047{
3048	struct ata_port *ap = link->ap;
3049	struct ata_device *dev;
3050	int rc;
3051
3052	/* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3053	ata_for_each_dev(dev, link, ENABLED) {
3054		if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3055			struct ata_ering_entry *ent;
3056
3057			ent = ata_ering_top(&dev->ering);
3058			if (ent)
3059				ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3060		}
3061	}
3062
3063	/* has private set_mode? */
3064	if (ap->ops->set_mode)
3065		rc = ap->ops->set_mode(link, r_failed_dev);
3066	else
3067		rc = ata_do_set_mode(link, r_failed_dev);
3068
3069	/* if transfer mode has changed, set DUBIOUS_XFER on device */
3070	ata_for_each_dev(dev, link, ENABLED) {
3071		struct ata_eh_context *ehc = &link->eh_context;
3072		u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3073		u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3074
3075		if (dev->xfer_mode != saved_xfer_mode ||
3076		    ata_ncq_enabled(dev) != saved_ncq)
3077			dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3078	}
3079
3080	return rc;
3081}
3082
3083/**
3084 *	atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3085 *	@dev: ATAPI device to clear UA for
3086 *
3087 *	Resets and other operations can make an ATAPI device raise
3088 *	UNIT ATTENTION which causes the next operation to fail.  This
3089 *	function clears UA.
3090 *
3091 *	LOCKING:
3092 *	EH context (may sleep).
3093 *
3094 *	RETURNS:
3095 *	0 on success, -errno on failure.
3096 */
3097static int atapi_eh_clear_ua(struct ata_device *dev)
3098{
3099	int i;
3100
3101	for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3102		u8 *sense_buffer = dev->link->ap->sector_buf;
3103		u8 sense_key = 0;
3104		unsigned int err_mask;
3105
3106		err_mask = atapi_eh_tur(dev, &sense_key);
3107		if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3108			ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3109				"failed (err_mask=0x%x)\n", err_mask);
3110			return -EIO;
3111		}
3112
3113		if (!err_mask || sense_key != UNIT_ATTENTION)
3114			return 0;
3115
3116		err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3117		if (err_mask) {
3118			ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3119				"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3120			return -EIO;
3121		}
3122	}
3123
3124	ata_dev_printk(dev, KERN_WARNING,
3125		"UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3126
3127	return 0;
3128}
3129
3130/**
3131 *	ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3132 *	@dev: ATA device which may need FLUSH retry
3133 *
3134 *	If @dev failed FLUSH, it needs to be reported upper layer
3135 *	immediately as it means that @dev failed to remap and already
3136 *	lost at least a sector and further FLUSH retrials won't make
3137 *	any difference to the lost sector.  However, if FLUSH failed
3138 *	for other reasons, for example transmission error, FLUSH needs
3139 *	to be retried.
3140 *
3141 *	This function determines whether FLUSH failure retry is
3142 *	necessary and performs it if so.
3143 *
3144 *	RETURNS:
3145 *	0 if EH can continue, -errno if EH needs to be repeated.
3146 */
3147static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3148{
3149	struct ata_link *link = dev->link;
3150	struct ata_port *ap = link->ap;
3151	struct ata_queued_cmd *qc;
3152	struct ata_taskfile tf;
3153	unsigned int err_mask;
3154	int rc = 0;
3155
3156	/* did flush fail for this device? */
3157	if (!ata_tag_valid(link->active_tag))
3158		return 0;
3159
3160	qc = __ata_qc_from_tag(ap, link->active_tag);
3161	if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3162			       qc->tf.command != ATA_CMD_FLUSH))
3163		return 0;
3164
3165	/* if the device failed it, it should be reported to upper layers */
3166	if (qc->err_mask & AC_ERR_DEV)
3167		return 0;
3168
3169	/* flush failed for some other reason, give it another shot */
3170	ata_tf_init(dev, &tf);
3171
3172	tf.command = qc->tf.command;
3173	tf.flags |= ATA_TFLAG_DEVICE;
3174	tf.protocol = ATA_PROT_NODATA;
3175
3176	ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3177		       tf.command, qc->err_mask);
3178
3179	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3180	if (!err_mask) {
3181		/*
3182		 * FLUSH is complete but there's no way to
3183		 * successfully complete a failed command from EH.
3184		 * Making sure retry is allowed at least once and
3185		 * retrying it should do the trick - whatever was in
3186		 * the cache is already on the platter and this won't
3187		 * cause infinite loop.
3188		 */
3189		qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3190	} else {
3191		ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3192			       err_mask);
3193		rc = -EIO;
3194
3195		/* if device failed it, report it to upper layers */
3196		if (err_mask & AC_ERR_DEV) {
3197			qc->err_mask |= AC_ERR_DEV;
3198			qc->result_tf = tf;
3199			if (!(ap->pflags & ATA_PFLAG_FROZEN))
3200				rc = 0;
3201		}
3202	}
3203	return rc;
3204}
3205
3206static int ata_link_nr_enabled(struct ata_link *link)
3207{
3208	struct ata_device *dev;
3209	int cnt = 0;
3210
3211	ata_for_each_dev(dev, link, ENABLED)
3212		cnt++;
3213	return cnt;
3214}
3215
3216static int ata_link_nr_vacant(struct ata_link *link)
3217{
3218	struct ata_device *dev;
3219	int cnt = 0;
3220
3221	ata_for_each_dev(dev, link, ALL)
3222		if (dev->class == ATA_DEV_UNKNOWN)
3223			cnt++;
3224	return cnt;
3225}
3226
3227static int ata_eh_skip_recovery(struct ata_link *link)
3228{
3229	struct ata_port *ap = link->ap;
3230	struct ata_eh_context *ehc = &link->eh_context;
3231	struct ata_device *dev;
3232
3233	/* skip disabled links */
3234	if (link->flags & ATA_LFLAG_DISABLED)
3235		return 1;
3236
3237	/* skip if explicitly requested */
3238	if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
3239		return 1;
3240
3241	/* thaw frozen port and recover failed devices */
3242	if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3243		return 0;
3244
3245	/* reset at least once if reset is requested */
3246	if ((ehc->i.action & ATA_EH_RESET) &&
3247	    !(ehc->i.flags & ATA_EHI_DID_RESET))
3248		return 0;
3249
3250	/* skip if class codes for all vacant slots are ATA_DEV_NONE */
3251	ata_for_each_dev(dev, link, ALL) {
3252		if (dev->class == ATA_DEV_UNKNOWN &&
3253		    ehc->classes[dev->devno] != ATA_DEV_NONE)
3254			return 0;
3255	}
3256
3257	return 1;
3258}
3259
3260static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3261{
3262	u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3263	u64 now = get_jiffies_64();
3264	int *trials = void_arg;
3265
3266	if (ent->timestamp < now - min(now, interval))
3267		return -1;
3268
3269	(*trials)++;
3270	return 0;
3271}
3272
3273static int ata_eh_schedule_probe(struct ata_device *dev)
3274{
3275	struct ata_eh_context *ehc = &dev->link->eh_context;
3276	struct ata_link *link = ata_dev_phys_link(dev);
3277	int trials = 0;
3278
3279	if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3280	    (ehc->did_probe_mask & (1 << dev->devno)))
3281		return 0;
3282
3283	ata_eh_detach_dev(dev);
3284	ata_dev_init(dev);
3285	ehc->did_probe_mask |= (1 << dev->devno);
3286	ehc->i.action |= ATA_EH_RESET;
3287	ehc->saved_xfer_mode[dev->devno] = 0;
3288	ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3289
3290	ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3291	ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3292
3293	if (trials > ATA_EH_PROBE_TRIALS)
3294		sata_down_spd_limit(link, 1);
3295
3296	return 1;
3297}
3298
3299static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3300{
3301	struct ata_eh_context *ehc = &dev->link->eh_context;
3302
3303	/* -EAGAIN from EH routine indicates retry without prejudice.
3304	 * The requester is responsible for ensuring forward progress.
3305	 */
3306	if (err != -EAGAIN)
3307		ehc->tries[dev->devno]--;
3308
3309	switch (err) {
3310	case -ENODEV:
3311		/* device missing or wrong IDENTIFY data, schedule probing */
3312		ehc->i.probe_mask |= (1 << dev->devno);
3313	case -EINVAL:
3314		/* give it just one more chance */
3315		ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3316	case -EIO:
3317		if (ehc->tries[dev->devno] == 1) {
3318			/* This is the last chance, better to slow
3319			 * down than lose it.
3320			 */
3321			sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3322			if (dev->pio_mode > XFER_PIO_0)
3323				ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3324		}
3325	}
3326
3327	if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3328		/* disable device if it has used up all its chances */
3329		ata_dev_disable(dev);
3330
3331		/* detach if offline */
3332		if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3333			ata_eh_detach_dev(dev);
3334
3335		/* schedule probe if necessary */
3336		if (ata_eh_schedule_probe(dev)) {
3337			ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3338			memset(ehc->cmd_timeout_idx[dev->devno], 0,
3339			       sizeof(ehc->cmd_timeout_idx[dev->devno]));
3340		}
3341
3342		return 1;
3343	} else {
3344		ehc->i.action |= ATA_EH_RESET;
3345		return 0;
3346	}
3347}
3348
3349/**
3350 *	ata_eh_recover - recover host port after error
3351 *	@ap: host port to recover
3352 *	@prereset: prereset method (can be NULL)
3353 *	@softreset: softreset method (can be NULL)
3354 *	@hardreset: hardreset method (can be NULL)
3355 *	@postreset: postreset method (can be NULL)
3356 *	@r_failed_link: out parameter for failed link
3357 *
3358 *	This is the alpha and omega, eum and yang, heart and soul of
3359 *	libata exception handling.  On entry, actions required to
3360 *	recover each link and hotplug requests are recorded in the
3361 *	link's eh_context.  This function executes all the operations
3362 *	with appropriate retrials and fallbacks to resurrect failed
3363 *	devices, detach goners and greet newcomers.
3364 *
3365 *	LOCKING:
3366 *	Kernel thread context (may sleep).
3367 *
3368 *	RETURNS:
3369 *	0 on success, -errno on failure.
3370 */
3371int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3372		   ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3373		   ata_postreset_fn_t postreset,
3374		   struct ata_link **r_failed_link)
3375{
3376	struct ata_link *link;
3377	struct ata_device *dev;
3378	int nr_failed_devs;
3379	int rc;
3380	unsigned long flags, deadline;
3381
3382	DPRINTK("ENTER\n");
3383
3384	/* prep for recovery */
3385	ata_for_each_link(link, ap, EDGE) {
3386		struct ata_eh_context *ehc = &link->eh_context;
3387
3388		/* re-enable link? */
3389		if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3390			ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3391			spin_lock_irqsave(ap->lock, flags);
3392			link->flags &= ~ATA_LFLAG_DISABLED;
3393			spin_unlock_irqrestore(ap->lock, flags);
3394			ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3395		}
3396
3397		ata_for_each_dev(dev, link, ALL) {
3398			if (link->flags & ATA_LFLAG_NO_RETRY)
3399				ehc->tries[dev->devno] = 1;
3400			else
3401				ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3402
3403			/* collect port action mask recorded in dev actions */
3404			ehc->i.action |= ehc->i.dev_action[dev->devno] &
3405					 ~ATA_EH_PERDEV_MASK;
3406			ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3407
3408			/* process hotplug request */
3409			if (dev->flags & ATA_DFLAG_DETACH)
3410				ata_eh_detach_dev(dev);
3411
3412			/* schedule probe if necessary */
3413			if (!ata_dev_enabled(dev))
3414				ata_eh_schedule_probe(dev);
3415		}
3416	}
3417
3418 retry:
3419	rc = 0;
3420	nr_failed_devs = 0;
3421
3422	/* if UNLOADING, finish immediately */
3423	if (ap->pflags & ATA_PFLAG_UNLOADING)
3424		goto out;
3425
3426	/* prep for EH */
3427	ata_for_each_link(link, ap, EDGE) {
3428		struct ata_eh_context *ehc = &link->eh_context;
3429
3430		/* skip EH if possible. */
3431		if (ata_eh_skip_recovery(link))
3432			ehc->i.action = 0;
3433
3434		ata_for_each_dev(dev, link, ALL)
3435			ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3436	}
3437
3438	/* reset */
3439	ata_for_each_link(link, ap, EDGE) {
3440		struct ata_eh_context *ehc = &link->eh_context;
3441
3442		if (!(ehc->i.action & ATA_EH_RESET))
3443			continue;
3444
3445		rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3446				  prereset, softreset, hardreset, postreset);
3447		if (rc) {
3448			ata_link_printk(link, KERN_ERR,
3449					"reset failed, giving up\n");
3450			goto out;
3451		}
3452	}
3453
3454	do {
3455		unsigned long now;
3456
3457		/*
3458		 * clears ATA_EH_PARK in eh_info and resets
3459		 * ap->park_req_pending
3460		 */
3461		ata_eh_pull_park_action(ap);
3462
3463		deadline = jiffies;
3464		ata_for_each_link(link, ap, EDGE) {
3465			ata_for_each_dev(dev, link, ALL) {
3466				struct ata_eh_context *ehc = &link->eh_context;
3467				unsigned long tmp;
3468
3469				if (dev->class != ATA_DEV_ATA)
3470					continue;
3471				if (!(ehc->i.dev_action[dev->devno] &
3472				      ATA_EH_PARK))
3473					continue;
3474				tmp = dev->unpark_deadline;
3475				if (time_before(deadline, tmp))
3476					deadline = tmp;
3477				else if (time_before_eq(tmp, jiffies))
3478					continue;
3479				if (ehc->unloaded_mask & (1 << dev->devno))
3480					continue;
3481
3482				ata_eh_park_issue_cmd(dev, 1);
3483			}
3484		}
3485
3486		now = jiffies;
3487		if (time_before_eq(deadline, now))
3488			break;
3489
3490		deadline = wait_for_completion_timeout(&ap->park_req_pending,
3491						       deadline - now);
3492	} while (deadline);
3493	ata_for_each_link(link, ap, EDGE) {
3494		ata_for_each_dev(dev, link, ALL) {
3495			if (!(link->eh_context.unloaded_mask &
3496			      (1 << dev->devno)))
3497				continue;
3498
3499			ata_eh_park_issue_cmd(dev, 0);
3500			ata_eh_done(link, dev, ATA_EH_PARK);
3501		}
3502	}
3503
3504	/* the rest */
3505	ata_for_each_link(link, ap, EDGE) {
3506		struct ata_eh_context *ehc = &link->eh_context;
3507
3508		/* revalidate existing devices and attach new ones */
3509		rc = ata_eh_revalidate_and_attach(link, &dev);
3510		if (rc)
3511			goto dev_fail;
3512
3513		/* if PMP got attached, return, pmp EH will take care of it */
3514		if (link->device->class == ATA_DEV_PMP) {
3515			ehc->i.action = 0;
3516			return 0;
3517		}
3518
3519		/* configure transfer mode if necessary */
3520		if (ehc->i.flags & ATA_EHI_SETMODE) {
3521			rc = ata_set_mode(link, &dev);
3522			if (rc)
3523				goto dev_fail;
3524			ehc->i.flags &= ~ATA_EHI_SETMODE;
3525		}
3526
3527		/* If reset has been issued, clear UA to avoid
3528		 * disrupting the current users of the device.
3529		 */
3530		if (ehc->i.flags & ATA_EHI_DID_RESET) {
3531			ata_for_each_dev(dev, link, ALL) {
3532				if (dev->class != ATA_DEV_ATAPI)
3533					continue;
3534				rc = atapi_eh_clear_ua(dev);
3535				if (rc)
3536					goto dev_fail;
3537			}
3538		}
3539
3540		/* retry flush if necessary */
3541		ata_for_each_dev(dev, link, ALL) {
3542			if (dev->class != ATA_DEV_ATA)
3543				continue;
3544			rc = ata_eh_maybe_retry_flush(dev);
3545			if (rc)
3546				goto dev_fail;
3547		}
3548
3549		/* configure link power saving */
3550		if (ehc->i.action & ATA_EH_LPM)
3551			ata_for_each_dev(dev, link, ALL)
3552				ata_dev_enable_pm(dev, ap->pm_policy);
3553
3554		/* this link is okay now */
3555		ehc->i.flags = 0;
3556		continue;
3557
3558dev_fail:
3559		nr_failed_devs++;
3560		ata_eh_handle_dev_fail(dev, rc);
3561
3562		if (ap->pflags & ATA_PFLAG_FROZEN) {
3563			/* PMP reset requires working host port.
3564			 * Can't retry if it's frozen.
3565			 */
3566			if (sata_pmp_attached(ap))
3567				goto out;
3568			break;
3569		}
3570	}
3571
3572	if (nr_failed_devs)
3573		goto retry;
3574
3575 out:
3576	if (rc && r_failed_link)
3577		*r_failed_link = link;
3578
3579	DPRINTK("EXIT, rc=%d\n", rc);
3580	return rc;
3581}
3582
3583/**
3584 *	ata_eh_finish - finish up EH
3585 *	@ap: host port to finish EH for
3586 *
3587 *	Recovery is complete.  Clean up EH states and retry or finish
3588 *	failed qcs.
3589 *
3590 *	LOCKING:
3591 *	None.
3592 */
3593void ata_eh_finish(struct ata_port *ap)
3594{
3595	int tag;
3596
3597	/* retry or finish qcs */
3598	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3599		struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3600
3601		if (!(qc->flags & ATA_QCFLAG_FAILED))
3602			continue;
3603
3604		if (qc->err_mask) {
3605			if (qc->flags & ATA_QCFLAG_RETRY)
3606				ata_eh_qc_retry(qc);
3607			else
3608				ata_eh_qc_complete(qc);
3609		} else {
3610			if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3611				ata_eh_qc_complete(qc);
3612			} else {
3613				/* feed zero TF to sense generation */
3614				memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3615				ata_eh_qc_retry(qc);
3616			}
3617		}
3618	}
3619
3620	/* make sure nr_active_links is zero after EH */
3621	WARN_ON(ap->nr_active_links);
3622	ap->nr_active_links = 0;
3623}
3624
3625/**
3626 *	ata_do_eh - do standard error handling
3627 *	@ap: host port to handle error for
3628 *
3629 *	@prereset: prereset method (can be NULL)
3630 *	@softreset: softreset method (can be NULL)
3631 *	@hardreset: hardreset method (can be NULL)
3632 *	@postreset: postreset method (can be NULL)
3633 *
3634 *	Perform standard error handling sequence.
3635 *
3636 *	LOCKING:
3637 *	Kernel thread context (may sleep).
3638 */
3639void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3640	       ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3641	       ata_postreset_fn_t postreset)
3642{
3643	struct ata_device *dev;
3644	int rc;
3645
3646	ata_eh_autopsy(ap);
3647	ata_eh_report(ap);
3648
3649	rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3650			    NULL);
3651	if (rc) {
3652		ata_for_each_dev(dev, &ap->link, ALL)
3653			ata_dev_disable(dev);
3654	}
3655
3656	ata_eh_finish(ap);
3657}
3658
3659/**
3660 *	ata_std_error_handler - standard error handler
3661 *	@ap: host port to handle error for
3662 *
3663 *	Standard error handler
3664 *
3665 *	LOCKING:
3666 *	Kernel thread context (may sleep).
3667 */
3668void ata_std_error_handler(struct ata_port *ap)
3669{
3670	struct ata_port_operations *ops = ap->ops;
3671	ata_reset_fn_t hardreset = ops->hardreset;
3672
3673	/* ignore built-in hardreset if SCR access is not available */
3674	if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
3675		hardreset = NULL;
3676
3677	ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3678}
3679
3680#ifdef CONFIG_PM
3681/**
3682 *	ata_eh_handle_port_suspend - perform port suspend operation
3683 *	@ap: port to suspend
3684 *
3685 *	Suspend @ap.
3686 *
3687 *	LOCKING:
3688 *	Kernel thread context (may sleep).
3689 */
3690static void ata_eh_handle_port_suspend(struct ata_port *ap)
3691{
3692	unsigned long flags;
3693	int rc = 0;
3694
3695	/* are we suspending? */
3696	spin_lock_irqsave(ap->lock, flags);
3697	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3698	    ap->pm_mesg.event == PM_EVENT_ON) {
3699		spin_unlock_irqrestore(ap->lock, flags);
3700		return;
3701	}
3702	spin_unlock_irqrestore(ap->lock, flags);
3703
3704	WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3705
3706	/* tell ACPI we're suspending */
3707	rc = ata_acpi_on_suspend(ap);
3708	if (rc)
3709		goto out;
3710
3711	/* suspend */
3712	ata_eh_freeze_port(ap);
3713
3714	if (ap->ops->port_suspend)
3715		rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3716
3717	ata_acpi_set_state(ap, PMSG_SUSPEND);
3718 out:
3719	/* report result */
3720	spin_lock_irqsave(ap->lock, flags);
3721
3722	ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3723	if (rc == 0)
3724		ap->pflags |= ATA_PFLAG_SUSPENDED;
3725	else if (ap->pflags & ATA_PFLAG_FROZEN)
3726		ata_port_schedule_eh(ap);
3727
3728	if (ap->pm_result) {
3729		*ap->pm_result = rc;
3730		ap->pm_result = NULL;
3731	}
3732
3733	spin_unlock_irqrestore(ap->lock, flags);
3734
3735	return;
3736}
3737
3738/**
3739 *	ata_eh_handle_port_resume - perform port resume operation
3740 *	@ap: port to resume
3741 *
3742 *	Resume @ap.
3743 *
3744 *	LOCKING:
3745 *	Kernel thread context (may sleep).
3746 */
3747static void ata_eh_handle_port_resume(struct ata_port *ap)
3748{
3749	struct ata_link *link;
3750	struct ata_device *dev;
3751	unsigned long flags;
3752	int rc = 0;
3753
3754	/* are we resuming? */
3755	spin_lock_irqsave(ap->lock, flags);
3756	if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3757	    ap->pm_mesg.event != PM_EVENT_ON) {
3758		spin_unlock_irqrestore(ap->lock, flags);
3759		return;
3760	}
3761	spin_unlock_irqrestore(ap->lock, flags);
3762
3763	WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3764
3765	/*
3766	 * Error timestamps are in jiffies which doesn't run while
3767	 * suspended and PHY events during resume isn't too uncommon.
3768	 * When the two are combined, it can lead to unnecessary speed
3769	 * downs if the machine is suspended and resumed repeatedly.
3770	 * Clear error history.
3771	 */
3772	ata_for_each_link(link, ap, HOST_FIRST)
3773		ata_for_each_dev(dev, link, ALL)
3774			ata_ering_clear(&dev->ering);
3775
3776	ata_acpi_set_state(ap, PMSG_ON);
3777
3778	if (ap->ops->port_resume)
3779		rc = ap->ops->port_resume(ap);
3780
3781	/* tell ACPI that we're resuming */
3782	ata_acpi_on_resume(ap);
3783
3784	/* report result */
3785	spin_lock_irqsave(ap->lock, flags);
3786	ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3787	if (ap->pm_result) {
3788		*ap->pm_result = rc;
3789		ap->pm_result = NULL;
3790	}
3791	spin_unlock_irqrestore(ap->lock, flags);
3792}
3793#endif /* CONFIG_PM */
3794