ata_da.c revision 298649
1/*-
2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/cam/ata/ata_da.c 298649 2016-04-26 15:38:17Z pfg $");
29
30#include "opt_ada.h"
31
32#include <sys/param.h>
33
34#ifdef _KERNEL
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/taskqueue.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/conf.h>
43#include <sys/devicestat.h>
44#include <sys/eventhandler.h>
45#include <sys/malloc.h>
46#include <sys/cons.h>
47#include <sys/proc.h>
48#include <sys/reboot.h>
49#include <geom/geom_disk.h>
50#endif /* _KERNEL */
51
52#ifndef _KERNEL
53#include <stdio.h>
54#include <string.h>
55#endif /* _KERNEL */
56
57#include <cam/cam.h>
58#include <cam/cam_ccb.h>
59#include <cam/cam_periph.h>
60#include <cam/cam_xpt_periph.h>
61#include <cam/cam_sim.h>
62#include <cam/cam_iosched.h>
63
64#include <cam/ata/ata_all.h>
65
66#include <machine/md_var.h>	/* geometry translation */
67
68#ifdef _KERNEL
69
70#define ATA_MAX_28BIT_LBA               268435455UL
71
72extern int iosched_debug;
73
74typedef enum {
75	ADA_STATE_RAHEAD,
76	ADA_STATE_WCACHE,
77	ADA_STATE_NORMAL
78} ada_state;
79
80typedef enum {
81	ADA_FLAG_CAN_48BIT	= 0x0002,
82	ADA_FLAG_CAN_FLUSHCACHE	= 0x0004,
83	ADA_FLAG_CAN_NCQ	= 0x0008,
84	ADA_FLAG_CAN_DMA	= 0x0010,
85	ADA_FLAG_NEED_OTAG	= 0x0020,
86	ADA_FLAG_WAS_OTAG	= 0x0040,
87	ADA_FLAG_CAN_TRIM	= 0x0080,
88	ADA_FLAG_OPEN		= 0x0100,
89	ADA_FLAG_SCTX_INIT	= 0x0200,
90	ADA_FLAG_CAN_CFA        = 0x0400,
91	ADA_FLAG_CAN_POWERMGT   = 0x0800,
92	ADA_FLAG_CAN_DMA48	= 0x1000,
93	ADA_FLAG_DIRTY		= 0x2000,
94	ADA_FLAG_CAN_NCQ_TRIM	= 0x4000,	/* CAN_TRIM also set */
95	ADA_FLAG_PIM_CAN_NCQ_TRIM = 0x8000
96} ada_flags;
97
98typedef enum {
99	ADA_Q_NONE		= 0x00,
100	ADA_Q_4K		= 0x01,
101	ADA_Q_NCQ_TRIM_BROKEN	= 0x02,
102} ada_quirks;
103
104#define ADA_Q_BIT_STRING	\
105	"\020"			\
106	"\0014K"		\
107	"\002NCQ_TRIM_BROKEN"
108
109typedef enum {
110	ADA_CCB_RAHEAD		= 0x01,
111	ADA_CCB_WCACHE		= 0x02,
112	ADA_CCB_BUFFER_IO	= 0x03,
113	ADA_CCB_DUMP		= 0x05,
114	ADA_CCB_TRIM		= 0x06,
115	ADA_CCB_TYPE_MASK	= 0x0F,
116} ada_ccb_state;
117
118/* Offsets into our private area for storing information */
119#define ccb_state	ppriv_field0
120#define ccb_bp		ppriv_ptr1
121
122typedef enum {
123	ADA_DELETE_NONE,
124	ADA_DELETE_DISABLE,
125	ADA_DELETE_CFA_ERASE,
126	ADA_DELETE_DSM_TRIM,
127	ADA_DELETE_NCQ_DSM_TRIM,
128	ADA_DELETE_MIN = ADA_DELETE_CFA_ERASE,
129	ADA_DELETE_MAX = ADA_DELETE_NCQ_DSM_TRIM,
130} ada_delete_methods;
131
132static const char *ada_delete_method_names[] =
133    { "NONE", "DISABLE", "CFA_ERASE", "DSM_TRIM", "NCQ_DSM_TRIM" };
134#if 0
135static const char *ada_delete_method_desc[] =
136    { "NONE", "DISABLED", "CFA Erase", "DSM Trim", "DSM Trim via NCQ" };
137#endif
138
139struct disk_params {
140	u_int8_t  heads;
141	u_int8_t  secs_per_track;
142	u_int32_t cylinders;
143	u_int32_t secsize;	/* Number of bytes/logical sector */
144	u_int64_t sectors;	/* Total number sectors */
145};
146
147#define TRIM_MAX_BLOCKS	8
148#define TRIM_MAX_RANGES	(TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES)
149struct trim_request {
150	uint8_t		data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE];
151	TAILQ_HEAD(, bio) bps;
152};
153
154struct ada_softc {
155	struct   cam_iosched_softc *cam_iosched;
156	int	 outstanding_cmds;	/* Number of active commands */
157	int	 refcount;		/* Active xpt_action() calls */
158	ada_state state;
159	ada_flags flags;
160	ada_quirks quirks;
161	ada_delete_methods delete_method;
162	int	 trim_max_ranges;
163	int	 read_ahead;
164	int	 write_cache;
165	int	 unmappedio;
166	int	 rotating;
167#ifdef ADA_TEST_FAILURE
168	int      force_read_error;
169	int      force_write_error;
170	int      periodic_read_error;
171	int      periodic_read_count;
172#endif
173	struct	 disk_params params;
174	struct	 disk *disk;
175	struct task		sysctl_task;
176	struct sysctl_ctx_list	sysctl_ctx;
177	struct sysctl_oid	*sysctl_tree;
178	struct callout		sendordered_c;
179	struct trim_request	trim_req;
180#ifdef CAM_IO_STATS
181	struct sysctl_ctx_list	sysctl_stats_ctx;
182	struct sysctl_oid	*sysctl_stats_tree;
183	u_int	timeouts;
184	u_int	errors;
185	u_int	invalidations;
186#endif
187};
188
189struct ada_quirk_entry {
190	struct scsi_inquiry_pattern inq_pat;
191	ada_quirks quirks;
192};
193
194static struct ada_quirk_entry ada_quirk_table[] =
195{
196	{
197		/* Hitachi Advanced Format (4k) drives */
198		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Hitachi H??????????E3*", "*" },
199		/*quirks*/ADA_Q_4K
200	},
201	{
202		/* Samsung Advanced Format (4k) drives */
203		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD155UI*", "*" },
204		/*quirks*/ADA_Q_4K
205	},
206	{
207		/* Samsung Advanced Format (4k) drives */
208		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD204UI*", "*" },
209		/*quirks*/ADA_Q_4K
210	},
211	{
212		/* Seagate Barracuda Green Advanced Format (4k) drives */
213		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DL*", "*" },
214		/*quirks*/ADA_Q_4K
215	},
216	{
217		/* Seagate Barracuda Advanced Format (4k) drives */
218		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???DM*", "*" },
219		/*quirks*/ADA_Q_4K
220	},
221	{
222		/* Seagate Barracuda Advanced Format (4k) drives */
223		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DM*", "*" },
224		/*quirks*/ADA_Q_4K
225	},
226	{
227		/* Seagate Momentus Advanced Format (4k) drives */
228		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500423AS*", "*" },
229		/*quirks*/ADA_Q_4K
230	},
231	{
232		/* Seagate Momentus Advanced Format (4k) drives */
233		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500424AS*", "*" },
234		/*quirks*/ADA_Q_4K
235	},
236	{
237		/* Seagate Momentus Advanced Format (4k) drives */
238		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640423AS*", "*" },
239		/*quirks*/ADA_Q_4K
240	},
241	{
242		/* Seagate Momentus Advanced Format (4k) drives */
243		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640424AS*", "*" },
244		/*quirks*/ADA_Q_4K
245	},
246	{
247		/* Seagate Momentus Advanced Format (4k) drives */
248		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750420AS*", "*" },
249		/*quirks*/ADA_Q_4K
250	},
251	{
252		/* Seagate Momentus Advanced Format (4k) drives */
253		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750422AS*", "*" },
254		/*quirks*/ADA_Q_4K
255	},
256	{
257		/* Seagate Momentus Advanced Format (4k) drives */
258		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750423AS*", "*" },
259		/*quirks*/ADA_Q_4K
260	},
261	{
262		/* Seagate Momentus Thin Advanced Format (4k) drives */
263		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???LT*", "*" },
264		/*quirks*/ADA_Q_4K
265	},
266	{
267		/* WDC Caviar Red Advanced Format (4k) drives */
268		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????CX*", "*" },
269		/*quirks*/ADA_Q_4K
270	},
271	{
272		/* WDC Caviar Green Advanced Format (4k) drives */
273		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RS*", "*" },
274		/*quirks*/ADA_Q_4K
275	},
276	{
277		/* WDC Caviar Green/Red Advanced Format (4k) drives */
278		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RX*", "*" },
279		/*quirks*/ADA_Q_4K
280	},
281	{
282		/* WDC Caviar Red Advanced Format (4k) drives */
283		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????CX*", "*" },
284		/*quirks*/ADA_Q_4K
285	},
286	{
287		/* WDC Caviar Black Advanced Format (4k) drives */
288		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????EX*", "*" },
289		/*quirks*/ADA_Q_4K
290	},
291	{
292		/* WDC Caviar Green Advanced Format (4k) drives */
293		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RS*", "*" },
294		/*quirks*/ADA_Q_4K
295	},
296	{
297		/* WDC Caviar Green Advanced Format (4k) drives */
298		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RX*", "*" },
299		/*quirks*/ADA_Q_4K
300	},
301	{
302		/* WDC Scorpio Black Advanced Format (4k) drives */
303		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PKT*", "*" },
304		/*quirks*/ADA_Q_4K
305	},
306	{
307		/* WDC Scorpio Black Advanced Format (4k) drives */
308		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PKT*", "*" },
309		/*quirks*/ADA_Q_4K
310	},
311	{
312		/* WDC Scorpio Blue Advanced Format (4k) drives */
313		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PVT*", "*" },
314		/*quirks*/ADA_Q_4K
315	},
316	{
317		/* WDC Scorpio Blue Advanced Format (4k) drives */
318		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PVT*", "*" },
319		/*quirks*/ADA_Q_4K
320	},
321	/* SSDs */
322	{
323		/*
324		 * Corsair Force 2 SSDs
325		 * 4k optimised & trim only works in 4k requests + 4k aligned
326		 */
327		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair CSSD-F*", "*" },
328		/*quirks*/ADA_Q_4K
329	},
330	{
331		/*
332		 * Corsair Force 3 SSDs
333		 * 4k optimised & trim only works in 4k requests + 4k aligned
334		 */
335		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force 3*", "*" },
336		/*quirks*/ADA_Q_4K
337	},
338	{
339		/*
340		 * Corsair Neutron GTX SSDs
341		 * 4k optimised & trim only works in 4k requests + 4k aligned
342		 */
343		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
344		/*quirks*/ADA_Q_4K
345	},
346	{
347		/*
348		 * Corsair Force GT & GS SSDs
349		 * 4k optimised & trim only works in 4k requests + 4k aligned
350		 */
351		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force G*", "*" },
352		/*quirks*/ADA_Q_4K
353	},
354	{
355		/*
356		 * Crucial M4 SSDs
357		 * 4k optimised & trim only works in 4k requests + 4k aligned
358		 */
359		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "M4-CT???M4SSD2*", "*" },
360		/*quirks*/ADA_Q_4K
361	},
362	{
363		/*
364		 * Crucial M500 SSDs MU07 firmware
365		 * NCQ Trim works
366		 */
367		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "MU07" },
368		/*quirks*/0
369	},
370	{
371		/*
372		 * Crucial M500 SSDs all other firmware
373		 * NCQ Trim doesn't work
374		 */
375		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "*" },
376		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
377	},
378	{
379		/*
380		 * Crucial M550 SSDs
381		 * NCQ Trim doesn't work, but only on MU01 firmware
382		 */
383		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M550*", "MU01" },
384		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
385	},
386	{
387		/*
388		 * Crucial MX100 SSDs
389		 * NCQ Trim doesn't work, but only on MU01 firmware
390		 */
391		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*MX100*", "MU01" },
392		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
393	},
394	{
395		/*
396		 * Crucial RealSSD C300 SSDs
397		 * 4k optimised
398		 */
399		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "C300-CTFDDAC???MAG*",
400		"*" }, /*quirks*/ADA_Q_4K
401	},
402	{
403		/*
404		 * FCCT M500 SSDs
405		 * NCQ Trim doesn't work
406		 */
407		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "FCCT*M500*", "*" },
408		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
409	},
410	{
411		/*
412		 * Intel 320 Series SSDs
413		 * 4k optimised & trim only works in 4k requests + 4k aligned
414		 */
415		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2CW*", "*" },
416		/*quirks*/ADA_Q_4K
417	},
418	{
419		/*
420		 * Intel 330 Series SSDs
421		 * 4k optimised & trim only works in 4k requests + 4k aligned
422		 */
423		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2CT*", "*" },
424		/*quirks*/ADA_Q_4K
425	},
426	{
427		/*
428		 * Intel 510 Series SSDs
429		 * 4k optimised & trim only works in 4k requests + 4k aligned
430		 */
431		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2MH*", "*" },
432		/*quirks*/ADA_Q_4K
433	},
434	{
435		/*
436		 * Intel 520 Series SSDs
437		 * 4k optimised & trim only works in 4k requests + 4k aligned
438		 */
439		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BW*", "*" },
440		/*quirks*/ADA_Q_4K
441	},
442	{
443		/*
444		 * Intel X25-M Series SSDs
445		 * 4k optimised & trim only works in 4k requests + 4k aligned
446		 */
447		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2M*", "*" },
448		/*quirks*/ADA_Q_4K
449	},
450	{
451		/*
452		 * Kingston E100 Series SSDs
453		 * 4k optimised & trim only works in 4k requests + 4k aligned
454		 */
455		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SE100S3*", "*" },
456		/*quirks*/ADA_Q_4K
457	},
458	{
459		/*
460		 * Kingston HyperX 3k SSDs
461		 * 4k optimised & trim only works in 4k requests + 4k aligned
462		 */
463		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SH103S3*", "*" },
464		/*quirks*/ADA_Q_4K
465	},
466	{
467		/*
468		 * Marvell SSDs (entry taken from OpenSolaris)
469		 * 4k optimised & trim only works in 4k requests + 4k aligned
470		 */
471		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "MARVELL SD88SA02*", "*" },
472		/*quirks*/ADA_Q_4K
473	},
474	{
475		/*
476		 * Micron M500 SSDs firmware MU07
477		 * NCQ Trim works?
478		 */
479		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "MU07" },
480		/*quirks*/0
481	},
482	{
483		/*
484		 * Micron M500 SSDs all other firmware
485		 * NCQ Trim doesn't work
486		 */
487		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "*" },
488		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
489	},
490	{
491		/*
492		 * Micron M5[15]0 SSDs
493		 * NCQ Trim doesn't work, but only MU01 firmware
494		 */
495		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M5[15]0*", "MU01" },
496		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
497	},
498	{
499		/*
500		 * OCZ Agility 2 SSDs
501		 * 4k optimised & trim only works in 4k requests + 4k aligned
502		 */
503		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
504		/*quirks*/ADA_Q_4K
505	},
506	{
507		/*
508		 * OCZ Agility 3 SSDs
509		 * 4k optimised & trim only works in 4k requests + 4k aligned
510		 */
511		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY3*", "*" },
512		/*quirks*/ADA_Q_4K
513	},
514	{
515		/*
516		 * OCZ Deneva R Series SSDs
517		 * 4k optimised & trim only works in 4k requests + 4k aligned
518		 */
519		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "DENRSTE251M45*", "*" },
520		/*quirks*/ADA_Q_4K
521	},
522	{
523		/*
524		 * OCZ Vertex 2 SSDs (inc pro series)
525		 * 4k optimised & trim only works in 4k requests + 4k aligned
526		 */
527		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ?VERTEX2*", "*" },
528		/*quirks*/ADA_Q_4K
529	},
530	{
531		/*
532		 * OCZ Vertex 3 SSDs
533		 * 4k optimised & trim only works in 4k requests + 4k aligned
534		 */
535		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX3*", "*" },
536		/*quirks*/ADA_Q_4K
537	},
538	{
539		/*
540		 * OCZ Vertex 4 SSDs
541		 * 4k optimised & trim only works in 4k requests + 4k aligned
542		 */
543		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX4*", "*" },
544		/*quirks*/ADA_Q_4K
545	},
546	{
547		/*
548		 * Samsung 830 Series SSDs
549		 * 4k optimised, NCQ TRIM Broken (normal TRIM is fine)
550		 */
551		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD 830 Series*", "*" },
552		/*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
553	},
554	{
555		/*
556		 * Samsung 840 SSDs
557		 * 4k optimised, NCQ TRIM Broken (normal TRIM is fine)
558		 */
559		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 840*", "*" },
560		/*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
561	},
562	{
563		/*
564		 * Samsung 850 SSDs
565		 * 4k optimised, NCQ TRIM broken (normal TRIM fine)
566		 */
567		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 850*", "*" },
568		/*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
569	},
570	{
571		/*
572		 * Samsung SM863 Series SSDs (MZ7KM*)
573		 * 4k optimised, NCQ believed to be working
574		 */
575		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7KM*", "*" },
576		/*quirks*/ADA_Q_4K
577	},
578	{
579		/*
580		 * Samsung 843T Series SSDs (MZ7WD*)
581		 * Samsung PM851 Series SSDs (MZ7TE*)
582		 * Samsung PM853T Series SSDs (MZ7GE*)
583		 * 4k optimised, NCQ believed to be broken since these are
584		 * appear to be built with the same controllers as the 840/850.
585		 */
586		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7*", "*" },
587		/*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
588	},
589	{
590		/*
591		 * Samsung PM851 Series SSDs Dell OEM
592		 * device model          "SAMSUNG SSD PM851 mSATA 256GB"
593		 * 4k optimised, NCQ broken
594		 */
595		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD PM851*", "*" },
596		/*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
597	},
598	{
599		/*
600		 * SuperTalent TeraDrive CT SSDs
601		 * 4k optimised & trim only works in 4k requests + 4k aligned
602		 */
603		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "FTM??CT25H*", "*" },
604		/*quirks*/ADA_Q_4K
605	},
606	{
607		/*
608		 * XceedIOPS SATA SSDs
609		 * 4k optimised
610		 */
611		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SG9XCS2D*", "*" },
612		/*quirks*/ADA_Q_4K
613	},
614	{
615		/* Default */
616		{
617		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
618		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
619		},
620		/*quirks*/0
621	},
622};
623
624static	disk_strategy_t	adastrategy;
625static	dumper_t	adadump;
626static	periph_init_t	adainit;
627static	void		adaasync(void *callback_arg, u_int32_t code,
628				struct cam_path *path, void *arg);
629static	void		adasysctlinit(void *context, int pending);
630static	periph_ctor_t	adaregister;
631static	periph_dtor_t	adacleanup;
632static	periph_start_t	adastart;
633static	periph_oninv_t	adaoninvalidate;
634static	void		adadone(struct cam_periph *periph,
635			       union ccb *done_ccb);
636static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
637				u_int32_t sense_flags);
638static void		adagetparams(struct cam_periph *periph,
639				struct ccb_getdev *cgd);
640static timeout_t	adasendorderedtag;
641static void		adashutdown(void *arg, int howto);
642static void		adasuspend(void *arg);
643static void		adaresume(void *arg);
644
645#ifndef	ADA_DEFAULT_LEGACY_ALIASES
646#define	ADA_DEFAULT_LEGACY_ALIASES	1
647#endif
648
649#ifndef ADA_DEFAULT_TIMEOUT
650#define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
651#endif
652
653#ifndef	ADA_DEFAULT_RETRY
654#define	ADA_DEFAULT_RETRY	4
655#endif
656
657#ifndef	ADA_DEFAULT_SEND_ORDERED
658#define	ADA_DEFAULT_SEND_ORDERED	1
659#endif
660
661#ifndef	ADA_DEFAULT_SPINDOWN_SHUTDOWN
662#define	ADA_DEFAULT_SPINDOWN_SHUTDOWN	1
663#endif
664
665#ifndef	ADA_DEFAULT_SPINDOWN_SUSPEND
666#define	ADA_DEFAULT_SPINDOWN_SUSPEND	1
667#endif
668
669#ifndef	ADA_DEFAULT_READ_AHEAD
670#define	ADA_DEFAULT_READ_AHEAD	1
671#endif
672
673#ifndef	ADA_DEFAULT_WRITE_CACHE
674#define	ADA_DEFAULT_WRITE_CACHE	1
675#endif
676
677#define	ADA_RA	(softc->read_ahead >= 0 ? \
678		 softc->read_ahead : ada_read_ahead)
679#define	ADA_WC	(softc->write_cache >= 0 ? \
680		 softc->write_cache : ada_write_cache)
681
682/*
683 * Most platforms map firmware geometry to actual, but some don't.  If
684 * not overridden, default to nothing.
685 */
686#ifndef ata_disk_firmware_geom_adjust
687#define	ata_disk_firmware_geom_adjust(disk)
688#endif
689
690static int ada_retry_count = ADA_DEFAULT_RETRY;
691static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
692static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
693static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
694static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND;
695static int ada_read_ahead = ADA_DEFAULT_READ_AHEAD;
696static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
697
698static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
699            "CAM Direct Access Disk driver");
700SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RWTUN,
701           &ada_retry_count, 0, "Normal I/O retry count");
702SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
703           &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
704SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
705           &ada_send_ordered, 0, "Send Ordered Tags");
706SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RWTUN,
707           &ada_spindown_shutdown, 0, "Spin down upon shutdown");
708SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RWTUN,
709           &ada_spindown_suspend, 0, "Spin down upon suspend");
710SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RWTUN,
711           &ada_read_ahead, 0, "Enable disk read-ahead");
712SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RWTUN,
713           &ada_write_cache, 0, "Enable disk write cache");
714
715/*
716 * ADA_ORDEREDTAG_INTERVAL determines how often, relative
717 * to the default timeout, we check to see whether an ordered
718 * tagged transaction is appropriate to prevent simple tag
719 * starvation.  Since we'd like to ensure that there is at least
720 * 1/2 of the timeout length left for a starved transaction to
721 * complete after we've sent an ordered tag, we must poll at least
722 * four times in every timeout period.  This takes care of the worst
723 * case where a starved transaction starts during an interval that
724 * meets the requirement "don't send an ordered tag" test so it takes
725 * us two intervals to determine that a tag must be sent.
726 */
727#ifndef ADA_ORDEREDTAG_INTERVAL
728#define ADA_ORDEREDTAG_INTERVAL 4
729#endif
730
731static struct periph_driver adadriver =
732{
733	adainit, "ada",
734	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
735};
736
737static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
738
739PERIPHDRIVER_DECLARE(ada, adadriver);
740
741static int
742adaopen(struct disk *dp)
743{
744	struct cam_periph *periph;
745	struct ada_softc *softc;
746	int error;
747
748	periph = (struct cam_periph *)dp->d_drv1;
749	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
750		return(ENXIO);
751	}
752
753	cam_periph_lock(periph);
754	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
755		cam_periph_unlock(periph);
756		cam_periph_release(periph);
757		return (error);
758	}
759
760	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
761	    ("adaopen\n"));
762
763	softc = (struct ada_softc *)periph->softc;
764	softc->flags |= ADA_FLAG_OPEN;
765
766	cam_periph_unhold(periph);
767	cam_periph_unlock(periph);
768	return (0);
769}
770
771static int
772adaclose(struct disk *dp)
773{
774	struct	cam_periph *periph;
775	struct	ada_softc *softc;
776	union ccb *ccb;
777	int error;
778
779	periph = (struct cam_periph *)dp->d_drv1;
780	softc = (struct ada_softc *)periph->softc;
781	cam_periph_lock(periph);
782
783	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
784	    ("adaclose\n"));
785
786	/* We only sync the cache if the drive is capable of it. */
787	if ((softc->flags & ADA_FLAG_DIRTY) != 0 &&
788	    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 &&
789	    (periph->flags & CAM_PERIPH_INVALID) == 0 &&
790	    cam_periph_hold(periph, PRIBIO) == 0) {
791
792		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
793		cam_fill_ataio(&ccb->ataio,
794				    1,
795				    adadone,
796				    CAM_DIR_NONE,
797				    0,
798				    NULL,
799				    0,
800				    ada_default_timeout*1000);
801
802		if (softc->flags & ADA_FLAG_CAN_48BIT)
803			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
804		else
805			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
806		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
807		    /*sense_flags*/0, softc->disk->d_devstat);
808
809		if (error != 0)
810			xpt_print(periph->path, "Synchronize cache failed\n");
811		else
812			softc->flags &= ~ADA_FLAG_DIRTY;
813		xpt_release_ccb(ccb);
814		cam_periph_unhold(periph);
815	}
816
817	softc->flags &= ~ADA_FLAG_OPEN;
818
819	while (softc->refcount != 0)
820		cam_periph_sleep(periph, &softc->refcount, PRIBIO, "adaclose", 1);
821	cam_periph_unlock(periph);
822	cam_periph_release(periph);
823	return (0);
824}
825
826static void
827adaschedule(struct cam_periph *periph)
828{
829	struct ada_softc *softc = (struct ada_softc *)periph->softc;
830
831	if (softc->state != ADA_STATE_NORMAL)
832		return;
833
834	cam_iosched_schedule(softc->cam_iosched, periph);
835}
836
837/*
838 * Actually translate the requested transfer into one the physical driver
839 * can understand.  The transfer is described by a buf and will include
840 * only one physical transfer.
841 */
842static void
843adastrategy(struct bio *bp)
844{
845	struct cam_periph *periph;
846	struct ada_softc *softc;
847
848	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
849	softc = (struct ada_softc *)periph->softc;
850
851	cam_periph_lock(periph);
852
853	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastrategy(%p)\n", bp));
854
855	/*
856	 * If the device has been made invalid, error out
857	 */
858	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
859		cam_periph_unlock(periph);
860		biofinish(bp, NULL, ENXIO);
861		return;
862	}
863
864	/*
865	 * Place it in the queue of disk activities for this disk
866	 */
867	cam_iosched_queue_work(softc->cam_iosched, bp);
868
869	/*
870	 * Schedule ourselves for performing the work.
871	 */
872	adaschedule(periph);
873	cam_periph_unlock(periph);
874
875	return;
876}
877
878static int
879adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
880{
881	struct	    cam_periph *periph;
882	struct	    ada_softc *softc;
883	u_int	    secsize;
884	union	    ccb ccb;
885	struct	    disk *dp;
886	uint64_t    lba;
887	uint16_t    count;
888	int	    error = 0;
889
890	dp = arg;
891	periph = dp->d_drv1;
892	softc = (struct ada_softc *)periph->softc;
893	cam_periph_lock(periph);
894	secsize = softc->params.secsize;
895	lba = offset / secsize;
896	count = length / secsize;
897
898	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
899		cam_periph_unlock(periph);
900		return (ENXIO);
901	}
902
903	if (length > 0) {
904		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
905		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
906		cam_fill_ataio(&ccb.ataio,
907		    0,
908		    adadone,
909		    CAM_DIR_OUT,
910		    0,
911		    (u_int8_t *) virtual,
912		    length,
913		    ada_default_timeout*1000);
914		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
915		    (lba + count >= ATA_MAX_28BIT_LBA ||
916		    count >= 256)) {
917			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
918			    0, lba, count);
919		} else {
920			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
921			    0, lba, count);
922		}
923		xpt_polled_action(&ccb);
924
925		error = cam_periph_error(&ccb,
926		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
927		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
928			cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0,
929			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
930		if (error != 0)
931			printf("Aborting dump due to I/O error.\n");
932
933		cam_periph_unlock(periph);
934		return (error);
935	}
936
937	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
938		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
939
940		/*
941		 * Tell the drive to flush its internal cache. if we
942		 * can't flush in 5s we have big problems. No need to
943		 * wait the default 60s to detect problems.
944		 */
945		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
946		cam_fill_ataio(&ccb.ataio,
947				    0,
948				    adadone,
949				    CAM_DIR_NONE,
950				    0,
951				    NULL,
952				    0,
953				    5*1000);
954
955		if (softc->flags & ADA_FLAG_CAN_48BIT)
956			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
957		else
958			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
959		xpt_polled_action(&ccb);
960
961		error = cam_periph_error(&ccb,
962		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
963		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
964			cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0,
965			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
966		if (error != 0)
967			xpt_print(periph->path, "Synchronize cache failed\n");
968	}
969	cam_periph_unlock(periph);
970	return (error);
971}
972
973static void
974adainit(void)
975{
976	cam_status status;
977
978	/*
979	 * Install a global async callback.  This callback will
980	 * receive async callbacks like "new device found".
981	 */
982	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
983
984	if (status != CAM_REQ_CMP) {
985		printf("ada: Failed to attach master async callback "
986		       "due to status 0x%x!\n", status);
987	} else if (ada_send_ordered) {
988
989		/* Register our event handlers */
990		if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend,
991					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
992		    printf("adainit: power event registration failed!\n");
993		if ((EVENTHANDLER_REGISTER(power_resume, adaresume,
994					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
995		    printf("adainit: power event registration failed!\n");
996		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
997					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
998		    printf("adainit: shutdown event registration failed!\n");
999	}
1000}
1001
1002/*
1003 * Callback from GEOM, called when it has finished cleaning up its
1004 * resources.
1005 */
1006static void
1007adadiskgonecb(struct disk *dp)
1008{
1009	struct cam_periph *periph;
1010
1011	periph = (struct cam_periph *)dp->d_drv1;
1012
1013	cam_periph_release(periph);
1014}
1015
1016static void
1017adaoninvalidate(struct cam_periph *periph)
1018{
1019	struct ada_softc *softc;
1020
1021	softc = (struct ada_softc *)periph->softc;
1022
1023	/*
1024	 * De-register any async callbacks.
1025	 */
1026	xpt_register_async(0, adaasync, periph, periph->path);
1027#ifdef CAM_IO_STATS
1028	softc->invalidations++;
1029#endif
1030
1031	/*
1032	 * Return all queued I/O with ENXIO.
1033	 * XXX Handle any transactions queued to the card
1034	 *     with XPT_ABORT_CCB.
1035	 */
1036	cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
1037
1038	disk_gone(softc->disk);
1039}
1040
1041static void
1042adacleanup(struct cam_periph *periph)
1043{
1044	struct ada_softc *softc;
1045
1046	softc = (struct ada_softc *)periph->softc;
1047
1048	cam_periph_unlock(periph);
1049
1050	cam_iosched_fini(softc->cam_iosched);
1051
1052	/*
1053	 * If we can't free the sysctl tree, oh well...
1054	 */
1055	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0) {
1056#ifdef CAM_IO_STATS
1057		if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
1058			xpt_print(periph->path,
1059			    "can't remove sysctl stats context\n");
1060#endif
1061		if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
1062			xpt_print(periph->path,
1063			    "can't remove sysctl context\n");
1064	}
1065
1066	disk_destroy(softc->disk);
1067	callout_drain(&softc->sendordered_c);
1068	free(softc, M_DEVBUF);
1069	cam_periph_lock(periph);
1070}
1071
1072static void
1073adasetdeletemethod(struct ada_softc *softc)
1074{
1075
1076	if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM)
1077		softc->delete_method = ADA_DELETE_NCQ_DSM_TRIM;
1078	else if (softc->flags & ADA_FLAG_CAN_TRIM)
1079		softc->delete_method = ADA_DELETE_DSM_TRIM;
1080	else if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT))
1081		softc->delete_method = ADA_DELETE_CFA_ERASE;
1082	else
1083		softc->delete_method = ADA_DELETE_NONE;
1084}
1085
1086static void
1087adaasync(void *callback_arg, u_int32_t code,
1088	struct cam_path *path, void *arg)
1089{
1090	struct ccb_getdev cgd;
1091	struct cam_periph *periph;
1092	struct ada_softc *softc;
1093
1094	periph = (struct cam_periph *)callback_arg;
1095	switch (code) {
1096	case AC_FOUND_DEVICE:
1097	{
1098		struct ccb_getdev *cgd;
1099		cam_status status;
1100
1101		cgd = (struct ccb_getdev *)arg;
1102		if (cgd == NULL)
1103			break;
1104
1105		if (cgd->protocol != PROTO_ATA)
1106			break;
1107
1108		/*
1109		 * Allocate a peripheral instance for
1110		 * this device and start the probe
1111		 * process.
1112		 */
1113		status = cam_periph_alloc(adaregister, adaoninvalidate,
1114					  adacleanup, adastart,
1115					  "ada", CAM_PERIPH_BIO,
1116					  path, adaasync,
1117					  AC_FOUND_DEVICE, cgd);
1118
1119		if (status != CAM_REQ_CMP
1120		 && status != CAM_REQ_INPROG)
1121			printf("adaasync: Unable to attach to new device "
1122				"due to status 0x%x\n", status);
1123		break;
1124	}
1125	case AC_GETDEV_CHANGED:
1126	{
1127		softc = (struct ada_softc *)periph->softc;
1128		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1129		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1130		xpt_action((union ccb *)&cgd);
1131
1132		if ((cgd.ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
1133		    (cgd.inq_flags & SID_DMA))
1134			softc->flags |= ADA_FLAG_CAN_DMA;
1135		else
1136			softc->flags &= ~ADA_FLAG_CAN_DMA;
1137		if (cgd.ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
1138			softc->flags |= ADA_FLAG_CAN_48BIT;
1139			if (cgd.inq_flags & SID_DMA48)
1140				softc->flags |= ADA_FLAG_CAN_DMA48;
1141			else
1142				softc->flags &= ~ADA_FLAG_CAN_DMA48;
1143		} else
1144			softc->flags &= ~(ADA_FLAG_CAN_48BIT |
1145			    ADA_FLAG_CAN_DMA48);
1146		if ((cgd.ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
1147		    (cgd.inq_flags & SID_DMA) && (cgd.inq_flags & SID_CmdQue))
1148			softc->flags |= ADA_FLAG_CAN_NCQ;
1149		else
1150			softc->flags &= ~ADA_FLAG_CAN_NCQ;
1151
1152		if ((cgd.ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
1153		    (cgd.inq_flags & SID_DMA)) {
1154			softc->flags |= ADA_FLAG_CAN_TRIM;
1155			/*
1156			 * If we can do RCVSND_FPDMA_QUEUED commands, we may be able to do
1157			 * NCQ trims, if we support trims at all. We also need support from
1158			 * the sim do do things properly. Perhaps we should look at log 13
1159			 * dword 0 bit 0 and dword 1 bit 0 are set too...
1160			 */
1161			if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 &&
1162			    (softc->flags & ADA_FLAG_PIM_CAN_NCQ_TRIM) != 0 &&
1163			    (cgd.ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 &&
1164			    (softc->flags & ADA_FLAG_CAN_TRIM) != 0)
1165				softc->flags |= ADA_FLAG_CAN_NCQ_TRIM;
1166			else
1167				softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM;
1168		} else
1169			softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM);
1170		adasetdeletemethod(softc);
1171
1172		cam_periph_async(periph, code, path, arg);
1173		break;
1174	}
1175	case AC_ADVINFO_CHANGED:
1176	{
1177		uintptr_t buftype;
1178
1179		buftype = (uintptr_t)arg;
1180		if (buftype == CDAI_TYPE_PHYS_PATH) {
1181			struct ada_softc *softc;
1182
1183			softc = periph->softc;
1184			disk_attr_changed(softc->disk, "GEOM::physpath",
1185					  M_NOWAIT);
1186		}
1187		break;
1188	}
1189	case AC_SENT_BDR:
1190	case AC_BUS_RESET:
1191	{
1192		softc = (struct ada_softc *)periph->softc;
1193		cam_periph_async(periph, code, path, arg);
1194		if (softc->state != ADA_STATE_NORMAL)
1195			break;
1196		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1197		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1198		xpt_action((union ccb *)&cgd);
1199		if (ADA_RA >= 0 &&
1200		    cgd.ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD)
1201			softc->state = ADA_STATE_RAHEAD;
1202		else if (ADA_WC >= 0 &&
1203		    cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE)
1204			softc->state = ADA_STATE_WCACHE;
1205		else
1206		    break;
1207		if (cam_periph_acquire(periph) != CAM_REQ_CMP)
1208			softc->state = ADA_STATE_NORMAL;
1209		else
1210			xpt_schedule(periph, CAM_PRIORITY_DEV);
1211	}
1212	default:
1213		cam_periph_async(periph, code, path, arg);
1214		break;
1215	}
1216}
1217
1218static void
1219adasysctlinit(void *context, int pending)
1220{
1221	struct cam_periph *periph;
1222	struct ada_softc *softc;
1223	char tmpstr[80], tmpstr2[80];
1224
1225	periph = (struct cam_periph *)context;
1226
1227	/* periph was held for us when this task was enqueued */
1228	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
1229		cam_periph_release(periph);
1230		return;
1231	}
1232
1233	softc = (struct ada_softc *)periph->softc;
1234	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
1235	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
1236
1237	sysctl_ctx_init(&softc->sysctl_ctx);
1238	softc->flags |= ADA_FLAG_SCTX_INIT;
1239	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1240		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
1241		CTLFLAG_RD, 0, tmpstr);
1242	if (softc->sysctl_tree == NULL) {
1243		printf("adasysctlinit: unable to allocate sysctl tree\n");
1244		cam_periph_release(periph);
1245		return;
1246	}
1247
1248	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1249		OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RW,
1250		softc, 0, adadeletemethodsysctl, "A",
1251		"BIO_DELETE execution method");
1252	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1253		OID_AUTO, "read_ahead", CTLFLAG_RW | CTLFLAG_MPSAFE,
1254		&softc->read_ahead, 0, "Enable disk read ahead.");
1255	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1256		OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE,
1257		&softc->write_cache, 0, "Enable disk write cache.");
1258	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1259		OID_AUTO, "unmapped_io", CTLFLAG_RD | CTLFLAG_MPSAFE,
1260		&softc->unmappedio, 0, "Unmapped I/O leaf");
1261	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1262		OID_AUTO, "rotating", CTLFLAG_RD | CTLFLAG_MPSAFE,
1263		&softc->rotating, 0, "Rotating media");
1264#ifdef ADA_TEST_FAILURE
1265	/*
1266	 * Add a 'door bell' sysctl which allows one to set it from userland
1267	 * and cause something bad to happen.  For the moment, we only allow
1268	 * whacking the next read or write.
1269	 */
1270	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1271		OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1272		&softc->force_read_error, 0,
1273		"Force a read error for the next N reads.");
1274	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1275		OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1276		&softc->force_write_error, 0,
1277		"Force a write error for the next N writes.");
1278	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1279		OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1280		&softc->periodic_read_error, 0,
1281		"Force a read error every N reads (don't set too low).");
1282#endif
1283
1284#ifdef CAM_IO_STATS
1285	softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
1286		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
1287		CTLFLAG_RD, 0, "Statistics");
1288	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
1289		SYSCTL_CHILDREN(softc->sysctl_stats_tree),
1290		OID_AUTO, "timeouts", CTLFLAG_RD | CTLFLAG_MPSAFE,
1291		&softc->timeouts, 0,
1292		"Device timeouts reported by the SIM");
1293	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
1294		SYSCTL_CHILDREN(softc->sysctl_stats_tree),
1295		OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE,
1296		&softc->errors, 0,
1297		"Transport errors reported by the SIM.");
1298	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
1299		SYSCTL_CHILDREN(softc->sysctl_stats_tree),
1300		OID_AUTO, "pack_invalidations", CTLFLAG_RD | CTLFLAG_MPSAFE,
1301		&softc->invalidations, 0,
1302		"Device pack invalidations.");
1303#endif
1304
1305	cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
1306	    softc->sysctl_tree);
1307
1308	cam_periph_release(periph);
1309}
1310
1311static int
1312adagetattr(struct bio *bp)
1313{
1314	int ret;
1315	struct cam_periph *periph;
1316
1317	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1318	cam_periph_lock(periph);
1319	ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1320	    periph->path);
1321	cam_periph_unlock(periph);
1322	if (ret == 0)
1323		bp->bio_completed = bp->bio_length;
1324	return ret;
1325}
1326
1327static int
1328adadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
1329{
1330	char buf[16];
1331	const char *p;
1332	struct ada_softc *softc;
1333	int i, error, value, methods;
1334
1335	softc = (struct ada_softc *)arg1;
1336
1337	value = softc->delete_method;
1338	if (value < 0 || value > ADA_DELETE_MAX)
1339		p = "UNKNOWN";
1340	else
1341		p = ada_delete_method_names[value];
1342	strncpy(buf, p, sizeof(buf));
1343	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1344	if (error != 0 || req->newptr == NULL)
1345		return (error);
1346	methods = 1 << ADA_DELETE_DISABLE;
1347	if ((softc->flags & ADA_FLAG_CAN_CFA) &&
1348	    !(softc->flags & ADA_FLAG_CAN_48BIT))
1349		methods |= 1 << ADA_DELETE_CFA_ERASE;
1350	if (softc->flags & ADA_FLAG_CAN_TRIM)
1351		methods |= 1 << ADA_DELETE_DSM_TRIM;
1352	if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM)
1353		methods |= 1 << ADA_DELETE_NCQ_DSM_TRIM;
1354	for (i = 0; i <= ADA_DELETE_MAX; i++) {
1355		if (!(methods & (1 << i)) ||
1356		    strcmp(buf, ada_delete_method_names[i]) != 0)
1357			continue;
1358		softc->delete_method = i;
1359		return (0);
1360	}
1361	return (EINVAL);
1362}
1363
1364static cam_status
1365adaregister(struct cam_periph *periph, void *arg)
1366{
1367	struct ada_softc *softc;
1368	struct ccb_pathinq cpi;
1369	struct ccb_getdev *cgd;
1370	char   announce_buf[80];
1371	struct disk_params *dp;
1372	caddr_t match;
1373	u_int maxio;
1374	int quirks;
1375
1376	cgd = (struct ccb_getdev *)arg;
1377	if (cgd == NULL) {
1378		printf("adaregister: no getdev CCB, can't register device\n");
1379		return(CAM_REQ_CMP_ERR);
1380	}
1381
1382	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
1383	    M_NOWAIT|M_ZERO);
1384
1385	if (softc == NULL) {
1386		printf("adaregister: Unable to probe new device. "
1387		    "Unable to allocate softc\n");
1388		return(CAM_REQ_CMP_ERR);
1389	}
1390
1391	if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
1392		printf("adaregister: Unable to probe new device. "
1393		       "Unable to allocate iosched memory\n");
1394		return(CAM_REQ_CMP_ERR);
1395	}
1396
1397	if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
1398	    (cgd->inq_flags & SID_DMA))
1399		softc->flags |= ADA_FLAG_CAN_DMA;
1400	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
1401		softc->flags |= ADA_FLAG_CAN_48BIT;
1402		if (cgd->inq_flags & SID_DMA48)
1403			softc->flags |= ADA_FLAG_CAN_DMA48;
1404	}
1405	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
1406		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
1407	if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
1408		softc->flags |= ADA_FLAG_CAN_POWERMGT;
1409	if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
1410	    (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
1411		softc->flags |= ADA_FLAG_CAN_NCQ;
1412	if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
1413	    (cgd->inq_flags & SID_DMA)) {
1414		softc->flags |= ADA_FLAG_CAN_TRIM;
1415		softc->trim_max_ranges = TRIM_MAX_RANGES;
1416		if (cgd->ident_data.max_dsm_blocks != 0) {
1417			softc->trim_max_ranges =
1418			    min(cgd->ident_data.max_dsm_blocks *
1419				ATA_DSM_BLK_RANGES, softc->trim_max_ranges);
1420		}
1421	}
1422	if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
1423		softc->flags |= ADA_FLAG_CAN_CFA;
1424
1425	adasetdeletemethod(softc);
1426
1427	periph->softc = softc;
1428
1429	/*
1430	 * See if this device has any quirks.
1431	 */
1432	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
1433			       (caddr_t)ada_quirk_table,
1434			       nitems(ada_quirk_table),
1435			       sizeof(*ada_quirk_table), ata_identify_match);
1436	if (match != NULL)
1437		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
1438	else
1439		softc->quirks = ADA_Q_NONE;
1440
1441	bzero(&cpi, sizeof(cpi));
1442	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
1443	cpi.ccb_h.func_code = XPT_PATH_INQ;
1444	xpt_action((union ccb *)&cpi);
1445
1446	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
1447
1448	/*
1449	 * Register this media as a disk
1450	 */
1451	(void)cam_periph_hold(periph, PRIBIO);
1452	cam_periph_unlock(periph);
1453	snprintf(announce_buf, sizeof(announce_buf),
1454	    "kern.cam.ada.%d.quirks", periph->unit_number);
1455	quirks = softc->quirks;
1456	TUNABLE_INT_FETCH(announce_buf, &quirks);
1457	softc->quirks = quirks;
1458	softc->read_ahead = -1;
1459	snprintf(announce_buf, sizeof(announce_buf),
1460	    "kern.cam.ada.%d.read_ahead", periph->unit_number);
1461	TUNABLE_INT_FETCH(announce_buf, &softc->read_ahead);
1462	softc->write_cache = -1;
1463	snprintf(announce_buf, sizeof(announce_buf),
1464	    "kern.cam.ada.%d.write_cache", periph->unit_number);
1465	TUNABLE_INT_FETCH(announce_buf, &softc->write_cache);
1466	/* Disable queue sorting for non-rotational media by default. */
1467	if (cgd->ident_data.media_rotation_rate == ATA_RATE_NON_ROTATING) {
1468		softc->rotating = 0;
1469	} else {
1470		softc->rotating = 1;
1471	}
1472	cam_iosched_set_sort_queue(softc->cam_iosched,  softc->rotating ? -1 : 0);
1473	adagetparams(periph, cgd);
1474	softc->disk = disk_alloc();
1475	softc->disk->d_rotation_rate = cgd->ident_data.media_rotation_rate;
1476	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
1477			  periph->unit_number, softc->params.secsize,
1478			  DEVSTAT_ALL_SUPPORTED,
1479			  DEVSTAT_TYPE_DIRECT |
1480			  XPORT_DEVSTAT_TYPE(cpi.transport),
1481			  DEVSTAT_PRIORITY_DISK);
1482	softc->disk->d_open = adaopen;
1483	softc->disk->d_close = adaclose;
1484	softc->disk->d_strategy = adastrategy;
1485	softc->disk->d_getattr = adagetattr;
1486	softc->disk->d_dump = adadump;
1487	softc->disk->d_gone = adadiskgonecb;
1488	softc->disk->d_name = "ada";
1489	softc->disk->d_drv1 = periph;
1490	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
1491	if (maxio == 0)
1492		maxio = DFLTPHYS;	/* traditional default */
1493	else if (maxio > MAXPHYS)
1494		maxio = MAXPHYS;	/* for safety */
1495	if (softc->flags & ADA_FLAG_CAN_48BIT)
1496		maxio = min(maxio, 65536 * softc->params.secsize);
1497	else					/* 28bit ATA command limit */
1498		maxio = min(maxio, 256 * softc->params.secsize);
1499	softc->disk->d_maxsize = maxio;
1500	softc->disk->d_unit = periph->unit_number;
1501	softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION;
1502	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
1503		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
1504	if (softc->flags & ADA_FLAG_CAN_TRIM) {
1505		softc->disk->d_flags |= DISKFLAG_CANDELETE;
1506		softc->disk->d_delmaxsize = softc->params.secsize *
1507					    ATA_DSM_RANGE_MAX *
1508					    softc->trim_max_ranges;
1509	} else if ((softc->flags & ADA_FLAG_CAN_CFA) &&
1510	    !(softc->flags & ADA_FLAG_CAN_48BIT)) {
1511		softc->disk->d_flags |= DISKFLAG_CANDELETE;
1512		softc->disk->d_delmaxsize = 256 * softc->params.secsize;
1513	} else
1514		softc->disk->d_delmaxsize = maxio;
1515	if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
1516		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
1517		softc->unmappedio = 1;
1518	}
1519	/*
1520	 * If we can do RCVSND_FPDMA_QUEUED commands, we may be able to do
1521	 * NCQ trims, if we support trims at all. We also need support from
1522	 * the sim do do things properly. Perhaps we should look at log 13
1523	 * dword 0 bit 0 and dword 1 bit 0 are set too...
1524	 */
1525	if (cpi.hba_misc & PIM_ATA_EXT)
1526		softc->flags |= ADA_FLAG_PIM_CAN_NCQ_TRIM;
1527	if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 &&
1528	    (softc->flags & ADA_FLAG_PIM_CAN_NCQ_TRIM) != 0 &&
1529	    (cgd->ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 &&
1530	    (softc->flags & ADA_FLAG_CAN_TRIM) != 0)
1531		softc->flags |= ADA_FLAG_CAN_NCQ_TRIM;
1532	strlcpy(softc->disk->d_descr, cgd->ident_data.model,
1533	    MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
1534	strlcpy(softc->disk->d_ident, cgd->ident_data.serial,
1535	    MIN(sizeof(softc->disk->d_ident), sizeof(cgd->ident_data.serial)));
1536	softc->disk->d_hba_vendor = cpi.hba_vendor;
1537	softc->disk->d_hba_device = cpi.hba_device;
1538	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
1539	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
1540
1541	softc->disk->d_sectorsize = softc->params.secsize;
1542	softc->disk->d_mediasize = (off_t)softc->params.sectors *
1543	    softc->params.secsize;
1544	if (ata_physical_sector_size(&cgd->ident_data) !=
1545	    softc->params.secsize) {
1546		softc->disk->d_stripesize =
1547		    ata_physical_sector_size(&cgd->ident_data);
1548		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
1549		    ata_logical_sector_offset(&cgd->ident_data)) %
1550		    softc->disk->d_stripesize;
1551	} else if (softc->quirks & ADA_Q_4K) {
1552		softc->disk->d_stripesize = 4096;
1553		softc->disk->d_stripeoffset = 0;
1554	}
1555	softc->disk->d_fwsectors = softc->params.secs_per_track;
1556	softc->disk->d_fwheads = softc->params.heads;
1557	ata_disk_firmware_geom_adjust(softc->disk);
1558	adasetdeletemethod(softc);
1559
1560	/*
1561	 * Acquire a reference to the periph before we register with GEOM.
1562	 * We'll release this reference once GEOM calls us back (via
1563	 * adadiskgonecb()) telling us that our provider has been freed.
1564	 */
1565	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1566		xpt_print(periph->path, "%s: lost periph during "
1567			  "registration!\n", __func__);
1568		cam_periph_lock(periph);
1569		return (CAM_REQ_CMP_ERR);
1570	}
1571	disk_create(softc->disk, DISK_VERSION);
1572	cam_periph_lock(periph);
1573	cam_periph_unhold(periph);
1574
1575	dp = &softc->params;
1576	snprintf(announce_buf, sizeof(announce_buf),
1577	    "%juMB (%ju %u byte sectors)",
1578	    ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024),
1579	    (uintmax_t)dp->sectors, dp->secsize);
1580	xpt_announce_periph(periph, announce_buf);
1581	xpt_announce_quirks(periph, softc->quirks, ADA_Q_BIT_STRING);
1582
1583	/*
1584	 * Create our sysctl variables, now that we know
1585	 * we have successfully attached.
1586	 */
1587	if (cam_periph_acquire(periph) == CAM_REQ_CMP)
1588		taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
1589
1590	/*
1591	 * Add async callbacks for bus reset and
1592	 * bus device reset calls.  I don't bother
1593	 * checking if this fails as, in most cases,
1594	 * the system will function just fine without
1595	 * them and the only alternative would be to
1596	 * not attach the device on failure.
1597	 */
1598	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
1599	    AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED,
1600	    adaasync, periph, periph->path);
1601
1602	/*
1603	 * Schedule a periodic event to occasionally send an
1604	 * ordered tag to a device.
1605	 */
1606	callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
1607	callout_reset(&softc->sendordered_c,
1608	    (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
1609	    adasendorderedtag, softc);
1610
1611	if (ADA_RA >= 0 &&
1612	    cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) {
1613		softc->state = ADA_STATE_RAHEAD;
1614	} else if (ADA_WC >= 0 &&
1615	    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
1616		softc->state = ADA_STATE_WCACHE;
1617	} else {
1618		softc->state = ADA_STATE_NORMAL;
1619		return(CAM_REQ_CMP);
1620	}
1621	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
1622		softc->state = ADA_STATE_NORMAL;
1623	else
1624		xpt_schedule(periph, CAM_PRIORITY_DEV);
1625	return(CAM_REQ_CMP);
1626}
1627
1628static int
1629ada_dsmtrim_req_create(struct ada_softc *softc, struct bio *bp, struct trim_request *req)
1630{
1631	uint64_t lastlba = (uint64_t)-1;
1632	int c, lastcount = 0, off, ranges = 0;
1633
1634	bzero(req, sizeof(*req));
1635	TAILQ_INIT(&req->bps);
1636	do {
1637		uint64_t lba = bp->bio_pblkno;
1638		int count = bp->bio_bcount / softc->params.secsize;
1639
1640		/* Try to extend the previous range. */
1641		if (lba == lastlba) {
1642			c = min(count, ATA_DSM_RANGE_MAX - lastcount);
1643			lastcount += c;
1644			off = (ranges - 1) * ATA_DSM_RANGE_SIZE;
1645			req->data[off + 6] = lastcount & 0xff;
1646			req->data[off + 7] =
1647				(lastcount >> 8) & 0xff;
1648			count -= c;
1649			lba += c;
1650		}
1651
1652		while (count > 0) {
1653			c = min(count, ATA_DSM_RANGE_MAX);
1654			off = ranges * ATA_DSM_RANGE_SIZE;
1655			req->data[off + 0] = lba & 0xff;
1656			req->data[off + 1] = (lba >> 8) & 0xff;
1657			req->data[off + 2] = (lba >> 16) & 0xff;
1658			req->data[off + 3] = (lba >> 24) & 0xff;
1659			req->data[off + 4] = (lba >> 32) & 0xff;
1660			req->data[off + 5] = (lba >> 40) & 0xff;
1661			req->data[off + 6] = c & 0xff;
1662			req->data[off + 7] = (c >> 8) & 0xff;
1663			lba += c;
1664			count -= c;
1665			lastcount = c;
1666			ranges++;
1667			/*
1668			 * Its the caller's responsibility to ensure the
1669			 * request will fit so we don't need to check for
1670			 * overrun here
1671			 */
1672		}
1673		lastlba = lba;
1674		TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue);
1675
1676		bp = cam_iosched_next_trim(softc->cam_iosched);
1677		if (bp == NULL)
1678			break;
1679		if (bp->bio_bcount / softc->params.secsize >
1680		    (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
1681			cam_iosched_put_back_trim(softc->cam_iosched, bp);
1682			break;
1683		}
1684	} while (1);
1685
1686	return (ranges);
1687}
1688
1689static void
1690ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
1691{
1692	struct trim_request *req = &softc->trim_req;
1693	int ranges;
1694
1695	ranges = ada_dsmtrim_req_create(softc, bp, req);
1696	cam_fill_ataio(ataio,
1697	    ada_retry_count,
1698	    adadone,
1699	    CAM_DIR_OUT,
1700	    0,
1701	    req->data,
1702	    howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
1703	    ada_default_timeout * 1000);
1704	ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
1705	    ATA_DSM_TRIM, 0, howmany(ranges, ATA_DSM_BLK_RANGES));
1706}
1707
1708static void
1709ada_ncq_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
1710{
1711	struct trim_request *req = &softc->trim_req;
1712	int ranges;
1713
1714	ranges = ada_dsmtrim_req_create(softc, bp, req);
1715	cam_fill_ataio(ataio,
1716	    ada_retry_count,
1717	    adadone,
1718	    CAM_DIR_OUT,
1719	    0,
1720	    req->data,
1721	    howmany(ranges, ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
1722	    ada_default_timeout * 1000);
1723	ata_ncq_cmd(ataio,
1724	    ATA_SEND_FPDMA_QUEUED,
1725	    0,
1726	    howmany(ranges, ATA_DSM_BLK_RANGES));
1727	ataio->cmd.sector_count_exp = ATA_SFPDMA_DSM;
1728	ataio->ata_flags |= ATA_FLAG_AUX;
1729	ataio->aux = 1;
1730}
1731
1732static void
1733ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
1734{
1735	struct trim_request *req = &softc->trim_req;
1736	uint64_t lba = bp->bio_pblkno;
1737	uint16_t count = bp->bio_bcount / softc->params.secsize;
1738
1739	bzero(req, sizeof(*req));
1740	TAILQ_INIT(&req->bps);
1741	TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue);
1742
1743	cam_fill_ataio(ataio,
1744	    ada_retry_count,
1745	    adadone,
1746	    CAM_DIR_NONE,
1747	    0,
1748	    NULL,
1749	    0,
1750	    ada_default_timeout*1000);
1751
1752	if (count >= 256)
1753		count = 0;
1754	ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
1755}
1756
1757static void
1758adastart(struct cam_periph *periph, union ccb *start_ccb)
1759{
1760	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1761	struct ccb_ataio *ataio = &start_ccb->ataio;
1762
1763	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastart\n"));
1764
1765	switch (softc->state) {
1766	case ADA_STATE_NORMAL:
1767	{
1768		struct bio *bp;
1769		u_int8_t tag_code;
1770
1771		bp = cam_iosched_next_bio(softc->cam_iosched);
1772		if (bp == NULL) {
1773			xpt_release_ccb(start_ccb);
1774			break;
1775		}
1776
1777		if ((bp->bio_flags & BIO_ORDERED) != 0 ||
1778		    (bp->bio_cmd != BIO_DELETE && (softc->flags & ADA_FLAG_NEED_OTAG) != 0)) {
1779			softc->flags &= ~ADA_FLAG_NEED_OTAG;
1780			softc->flags |= ADA_FLAG_WAS_OTAG;
1781			tag_code = 0;
1782		} else {
1783			tag_code = 1;
1784		}
1785		switch (bp->bio_cmd) {
1786		case BIO_WRITE:
1787		case BIO_READ:
1788		{
1789			uint64_t lba = bp->bio_pblkno;
1790			uint16_t count = bp->bio_bcount / softc->params.secsize;
1791			void *data_ptr;
1792			int rw_op;
1793
1794			if (bp->bio_cmd == BIO_WRITE) {
1795				softc->flags |= ADA_FLAG_DIRTY;
1796				rw_op = CAM_DIR_OUT;
1797			} else {
1798				rw_op = CAM_DIR_IN;
1799			}
1800
1801			data_ptr = bp->bio_data;
1802			if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
1803				rw_op |= CAM_DATA_BIO;
1804				data_ptr = bp;
1805			}
1806
1807#ifdef ADA_TEST_FAILURE
1808			int fail = 0;
1809
1810			/*
1811			 * Support the failure ioctls.  If the command is a
1812			 * read, and there are pending forced read errors, or
1813			 * if a write and pending write errors, then fail this
1814			 * operation with EIO.  This is useful for testing
1815			 * purposes.  Also, support having every Nth read fail.
1816			 *
1817			 * This is a rather blunt tool.
1818			 */
1819			if (bp->bio_cmd == BIO_READ) {
1820				if (softc->force_read_error) {
1821					softc->force_read_error--;
1822					fail = 1;
1823				}
1824				if (softc->periodic_read_error > 0) {
1825					if (++softc->periodic_read_count >=
1826					    softc->periodic_read_error) {
1827						softc->periodic_read_count = 0;
1828						fail = 1;
1829					}
1830				}
1831			} else {
1832				if (softc->force_write_error) {
1833					softc->force_write_error--;
1834					fail = 1;
1835				}
1836			}
1837			if (fail) {
1838				biofinish(bp, NULL, EIO);
1839				xpt_release_ccb(start_ccb);
1840				adaschedule(periph);
1841				return;
1842			}
1843#endif
1844			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
1845			    round_page(bp->bio_bcount + bp->bio_ma_offset) /
1846			    PAGE_SIZE == bp->bio_ma_n,
1847			    ("Short bio %p", bp));
1848			cam_fill_ataio(ataio,
1849			    ada_retry_count,
1850			    adadone,
1851			    rw_op,
1852			    0,
1853			    data_ptr,
1854			    bp->bio_bcount,
1855			    ada_default_timeout*1000);
1856
1857			if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
1858				if (bp->bio_cmd == BIO_READ) {
1859					ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
1860					    lba, count);
1861				} else {
1862					ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
1863					    lba, count);
1864				}
1865			} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
1866			    (lba + count >= ATA_MAX_28BIT_LBA ||
1867			    count > 256)) {
1868				if (softc->flags & ADA_FLAG_CAN_DMA48) {
1869					if (bp->bio_cmd == BIO_READ) {
1870						ata_48bit_cmd(ataio, ATA_READ_DMA48,
1871						    0, lba, count);
1872					} else {
1873						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
1874						    0, lba, count);
1875					}
1876				} else {
1877					if (bp->bio_cmd == BIO_READ) {
1878						ata_48bit_cmd(ataio, ATA_READ_MUL48,
1879						    0, lba, count);
1880					} else {
1881						ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
1882						    0, lba, count);
1883					}
1884				}
1885			} else {
1886				if (count == 256)
1887					count = 0;
1888				if (softc->flags & ADA_FLAG_CAN_DMA) {
1889					if (bp->bio_cmd == BIO_READ) {
1890						ata_28bit_cmd(ataio, ATA_READ_DMA,
1891						    0, lba, count);
1892					} else {
1893						ata_28bit_cmd(ataio, ATA_WRITE_DMA,
1894						    0, lba, count);
1895					}
1896				} else {
1897					if (bp->bio_cmd == BIO_READ) {
1898						ata_28bit_cmd(ataio, ATA_READ_MUL,
1899						    0, lba, count);
1900					} else {
1901						ata_28bit_cmd(ataio, ATA_WRITE_MUL,
1902						    0, lba, count);
1903					}
1904				}
1905			}
1906			break;
1907		}
1908		case BIO_DELETE:
1909			switch (softc->delete_method) {
1910			case ADA_DELETE_NCQ_DSM_TRIM:
1911				ada_ncq_dsmtrim(softc, bp, ataio);
1912				break;
1913			case ADA_DELETE_DSM_TRIM:
1914				ada_dsmtrim(softc, bp, ataio);
1915				break;
1916			case ADA_DELETE_CFA_ERASE:
1917				ada_cfaerase(softc, bp, ataio);
1918				break;
1919			default:
1920				biofinish(bp, NULL, EOPNOTSUPP);
1921				xpt_release_ccb(start_ccb);
1922				adaschedule(periph);
1923				return;
1924			}
1925			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
1926			start_ccb->ccb_h.flags |= CAM_UNLOCKED;
1927			cam_iosched_submit_trim(softc->cam_iosched);
1928			goto out;
1929		case BIO_FLUSH:
1930			cam_fill_ataio(ataio,
1931			    1,
1932			    adadone,
1933			    CAM_DIR_NONE,
1934			    0,
1935			    NULL,
1936			    0,
1937			    ada_default_timeout*1000);
1938
1939			if (softc->flags & ADA_FLAG_CAN_48BIT)
1940				ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1941			else
1942				ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
1943			break;
1944		}
1945		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
1946		start_ccb->ccb_h.flags |= CAM_UNLOCKED;
1947out:
1948		start_ccb->ccb_h.ccb_bp = bp;
1949		softc->outstanding_cmds++;
1950		softc->refcount++;
1951		cam_periph_unlock(periph);
1952		xpt_action(start_ccb);
1953		cam_periph_lock(periph);
1954		softc->refcount--;
1955
1956		/* May have more work to do, so ensure we stay scheduled */
1957		adaschedule(periph);
1958		break;
1959	}
1960	case ADA_STATE_RAHEAD:
1961	case ADA_STATE_WCACHE:
1962	{
1963		cam_fill_ataio(ataio,
1964		    1,
1965		    adadone,
1966		    CAM_DIR_NONE,
1967		    0,
1968		    NULL,
1969		    0,
1970		    ada_default_timeout*1000);
1971
1972		if (softc->state == ADA_STATE_RAHEAD) {
1973			ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_RA ?
1974			    ATA_SF_ENAB_RCACHE : ATA_SF_DIS_RCACHE, 0, 0);
1975			start_ccb->ccb_h.ccb_state = ADA_CCB_RAHEAD;
1976		} else {
1977			ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_WC ?
1978			    ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0);
1979			start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE;
1980		}
1981		start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1982		xpt_action(start_ccb);
1983		break;
1984	}
1985	}
1986}
1987
1988static void
1989adadone(struct cam_periph *periph, union ccb *done_ccb)
1990{
1991	struct ada_softc *softc;
1992	struct ccb_ataio *ataio;
1993	struct ccb_getdev *cgd;
1994	struct cam_path *path;
1995	int state;
1996
1997	softc = (struct ada_softc *)periph->softc;
1998	ataio = &done_ccb->ataio;
1999	path = done_ccb->ccb_h.path;
2000
2001	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n"));
2002
2003	state = ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK;
2004	switch (state) {
2005	case ADA_CCB_BUFFER_IO:
2006	case ADA_CCB_TRIM:
2007	{
2008		struct bio *bp;
2009		int error;
2010
2011		cam_periph_lock(periph);
2012		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
2013		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2014			error = adaerror(done_ccb, 0, 0);
2015			if (error == ERESTART) {
2016				/* A retry was scheduled, so just return. */
2017				cam_periph_unlock(periph);
2018				return;
2019			}
2020			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2021				cam_release_devq(path,
2022						 /*relsim_flags*/0,
2023						 /*reduction*/0,
2024						 /*timeout*/0,
2025						 /*getcount_only*/0);
2026			/*
2027			 * If we get an error on an NCQ DSM TRIM, fall back
2028			 * to a non-NCQ DSM TRIM forever. Please note that if
2029			 * CAN_NCQ_TRIM is set, CAN_TRIM is necessarily set too.
2030			 * However, for this one trim, we treat it as advisory
2031			 * and return success up the stack.
2032			 */
2033			if (state == ADA_CCB_TRIM &&
2034			    error != 0 &&
2035			    (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) != 0) {
2036				softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM;
2037				error = 0;
2038				adasetdeletemethod(softc);
2039			}
2040		} else {
2041			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2042				panic("REQ_CMP with QFRZN");
2043			error = 0;
2044		}
2045		bp->bio_error = error;
2046		if (error != 0) {
2047			bp->bio_resid = bp->bio_bcount;
2048			bp->bio_flags |= BIO_ERROR;
2049		} else {
2050			if (state == ADA_CCB_TRIM)
2051				bp->bio_resid = 0;
2052			else
2053				bp->bio_resid = ataio->resid;
2054			if (bp->bio_resid > 0)
2055				bp->bio_flags |= BIO_ERROR;
2056		}
2057		softc->outstanding_cmds--;
2058		if (softc->outstanding_cmds == 0)
2059			softc->flags |= ADA_FLAG_WAS_OTAG;
2060
2061		cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
2062		xpt_release_ccb(done_ccb);
2063		if (state == ADA_CCB_TRIM) {
2064			TAILQ_HEAD(, bio) queue;
2065			struct bio *bp1;
2066
2067			TAILQ_INIT(&queue);
2068			TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue);
2069			/*
2070			 * Normally, the xpt_release_ccb() above would make sure
2071			 * that when we have more work to do, that work would
2072			 * get kicked off. However, we specifically keep
2073			 * trim_running set to 0 before the call above to allow
2074			 * other I/O to progress when many BIO_DELETE requests
2075			 * are pushed down. We set trim_running to 0 and call
2076			 * daschedule again so that we don't stall if there are
2077			 * no other I/Os pending apart from BIO_DELETEs.
2078			 */
2079			cam_iosched_trim_done(softc->cam_iosched);
2080			adaschedule(periph);
2081			cam_periph_unlock(periph);
2082			while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
2083				TAILQ_REMOVE(&queue, bp1, bio_queue);
2084				bp1->bio_error = error;
2085				if (error != 0) {
2086					bp1->bio_flags |= BIO_ERROR;
2087					bp1->bio_resid = bp1->bio_bcount;
2088				} else
2089					bp1->bio_resid = 0;
2090				biodone(bp1);
2091			}
2092		} else {
2093			adaschedule(periph);
2094			cam_periph_unlock(periph);
2095			biodone(bp);
2096		}
2097		return;
2098	}
2099	case ADA_CCB_RAHEAD:
2100	{
2101		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2102			if (adaerror(done_ccb, 0, 0) == ERESTART) {
2103out:
2104				/* Drop freeze taken due to CAM_DEV_QFREEZE */
2105				cam_release_devq(path, 0, 0, 0, FALSE);
2106				return;
2107			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
2108				cam_release_devq(path,
2109				    /*relsim_flags*/0,
2110				    /*reduction*/0,
2111				    /*timeout*/0,
2112				    /*getcount_only*/0);
2113			}
2114		}
2115
2116		/*
2117		 * Since our peripheral may be invalidated by an error
2118		 * above or an external event, we must release our CCB
2119		 * before releasing the reference on the peripheral.
2120		 * The peripheral will only go away once the last reference
2121		 * is removed, and we need it around for the CCB release
2122		 * operation.
2123		 */
2124		cgd = (struct ccb_getdev *)done_ccb;
2125		xpt_setup_ccb(&cgd->ccb_h, path, CAM_PRIORITY_NORMAL);
2126		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
2127		xpt_action((union ccb *)cgd);
2128		if (ADA_WC >= 0 &&
2129		    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
2130			softc->state = ADA_STATE_WCACHE;
2131			xpt_release_ccb(done_ccb);
2132			xpt_schedule(periph, CAM_PRIORITY_DEV);
2133			goto out;
2134		}
2135		softc->state = ADA_STATE_NORMAL;
2136		xpt_release_ccb(done_ccb);
2137		/* Drop freeze taken due to CAM_DEV_QFREEZE */
2138		cam_release_devq(path, 0, 0, 0, FALSE);
2139		adaschedule(periph);
2140		cam_periph_release_locked(periph);
2141		return;
2142	}
2143	case ADA_CCB_WCACHE:
2144	{
2145		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2146			if (adaerror(done_ccb, 0, 0) == ERESTART) {
2147				goto out;
2148			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
2149				cam_release_devq(path,
2150				    /*relsim_flags*/0,
2151				    /*reduction*/0,
2152				    /*timeout*/0,
2153				    /*getcount_only*/0);
2154			}
2155		}
2156
2157		softc->state = ADA_STATE_NORMAL;
2158		/*
2159		 * Since our peripheral may be invalidated by an error
2160		 * above or an external event, we must release our CCB
2161		 * before releasing the reference on the peripheral.
2162		 * The peripheral will only go away once the last reference
2163		 * is removed, and we need it around for the CCB release
2164		 * operation.
2165		 */
2166		xpt_release_ccb(done_ccb);
2167		/* Drop freeze taken due to CAM_DEV_QFREEZE */
2168		cam_release_devq(path, 0, 0, 0, FALSE);
2169		adaschedule(periph);
2170		cam_periph_release_locked(periph);
2171		return;
2172	}
2173	case ADA_CCB_DUMP:
2174		/* No-op.  We're polling */
2175		return;
2176	default:
2177		break;
2178	}
2179	xpt_release_ccb(done_ccb);
2180}
2181
2182static int
2183adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
2184{
2185#ifdef CAM_IO_STATS
2186	struct ada_softc *softc;
2187	struct cam_periph *periph;
2188
2189	periph = xpt_path_periph(ccb->ccb_h.path);
2190	softc = (struct ada_softc *)periph->softc;
2191
2192	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
2193	case CAM_CMD_TIMEOUT:
2194		softc->timeouts++;
2195		break;
2196	case CAM_REQ_ABORTED:
2197	case CAM_REQ_CMP_ERR:
2198	case CAM_REQ_TERMIO:
2199	case CAM_UNREC_HBA_ERROR:
2200	case CAM_DATA_RUN_ERR:
2201	case CAM_ATA_STATUS_ERROR:
2202		softc->errors++;
2203		break;
2204	default:
2205		break;
2206	}
2207#endif
2208
2209	return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
2210}
2211
2212static void
2213adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
2214{
2215	struct ada_softc *softc = (struct ada_softc *)periph->softc;
2216	struct disk_params *dp = &softc->params;
2217	u_int64_t lbasize48;
2218	u_int32_t lbasize;
2219
2220	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
2221	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
2222		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
2223		dp->heads = cgd->ident_data.current_heads;
2224		dp->secs_per_track = cgd->ident_data.current_sectors;
2225		dp->cylinders = cgd->ident_data.cylinders;
2226		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
2227			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
2228	} else {
2229		dp->heads = cgd->ident_data.heads;
2230		dp->secs_per_track = cgd->ident_data.sectors;
2231		dp->cylinders = cgd->ident_data.cylinders;
2232		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
2233	}
2234	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
2235		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
2236
2237	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
2238	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
2239		dp->sectors = lbasize;
2240
2241	/* use the 48bit LBA size if valid */
2242	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
2243		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
2244		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
2245		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
2246	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
2247	    lbasize48 > ATA_MAX_28BIT_LBA)
2248		dp->sectors = lbasize48;
2249}
2250
2251static void
2252adasendorderedtag(void *arg)
2253{
2254	struct ada_softc *softc = arg;
2255
2256	if (ada_send_ordered) {
2257		if (softc->outstanding_cmds > 0) {
2258			if ((softc->flags & ADA_FLAG_WAS_OTAG) == 0)
2259				softc->flags |= ADA_FLAG_NEED_OTAG;
2260			softc->flags &= ~ADA_FLAG_WAS_OTAG;
2261		}
2262	}
2263	/* Queue us up again */
2264	callout_reset(&softc->sendordered_c,
2265	    (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
2266	    adasendorderedtag, softc);
2267}
2268
2269/*
2270 * Step through all ADA peripheral drivers, and if the device is still open,
2271 * sync the disk cache to physical media.
2272 */
2273static void
2274adaflush(void)
2275{
2276	struct cam_periph *periph;
2277	struct ada_softc *softc;
2278	union ccb *ccb;
2279	int error;
2280
2281	CAM_PERIPH_FOREACH(periph, &adadriver) {
2282		softc = (struct ada_softc *)periph->softc;
2283		if (SCHEDULER_STOPPED()) {
2284			/* If we paniced with the lock held, do not recurse. */
2285			if (!cam_periph_owned(periph) &&
2286			    (softc->flags & ADA_FLAG_OPEN)) {
2287				adadump(softc->disk, NULL, 0, 0, 0);
2288			}
2289			continue;
2290		}
2291		cam_periph_lock(periph);
2292		/*
2293		 * We only sync the cache if the drive is still open, and
2294		 * if the drive is capable of it..
2295		 */
2296		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
2297		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
2298			cam_periph_unlock(periph);
2299			continue;
2300		}
2301
2302		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
2303		cam_fill_ataio(&ccb->ataio,
2304				    0,
2305				    adadone,
2306				    CAM_DIR_NONE,
2307				    0,
2308				    NULL,
2309				    0,
2310				    ada_default_timeout*1000);
2311		if (softc->flags & ADA_FLAG_CAN_48BIT)
2312			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
2313		else
2314			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
2315
2316		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
2317		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
2318		    softc->disk->d_devstat);
2319		if (error != 0)
2320			xpt_print(periph->path, "Synchronize cache failed\n");
2321		xpt_release_ccb(ccb);
2322		cam_periph_unlock(periph);
2323	}
2324}
2325
2326static void
2327adaspindown(uint8_t cmd, int flags)
2328{
2329	struct cam_periph *periph;
2330	struct ada_softc *softc;
2331	union ccb *ccb;
2332	int error;
2333
2334	CAM_PERIPH_FOREACH(periph, &adadriver) {
2335		/* If we paniced with lock held - not recurse here. */
2336		if (cam_periph_owned(periph))
2337			continue;
2338		cam_periph_lock(periph);
2339		softc = (struct ada_softc *)periph->softc;
2340		/*
2341		 * We only spin-down the drive if it is capable of it..
2342		 */
2343		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
2344			cam_periph_unlock(periph);
2345			continue;
2346		}
2347
2348		if (bootverbose)
2349			xpt_print(periph->path, "spin-down\n");
2350
2351		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
2352		cam_fill_ataio(&ccb->ataio,
2353				    0,
2354				    adadone,
2355				    CAM_DIR_NONE | flags,
2356				    0,
2357				    NULL,
2358				    0,
2359				    ada_default_timeout*1000);
2360		ata_28bit_cmd(&ccb->ataio, cmd, 0, 0, 0);
2361
2362		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
2363		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
2364		    softc->disk->d_devstat);
2365		if (error != 0)
2366			xpt_print(periph->path, "Spin-down disk failed\n");
2367		xpt_release_ccb(ccb);
2368		cam_periph_unlock(periph);
2369	}
2370}
2371
2372static void
2373adashutdown(void *arg, int howto)
2374{
2375
2376	adaflush();
2377	if (ada_spindown_shutdown != 0 &&
2378	    (howto & (RB_HALT | RB_POWEROFF)) != 0)
2379		adaspindown(ATA_STANDBY_IMMEDIATE, 0);
2380}
2381
2382static void
2383adasuspend(void *arg)
2384{
2385
2386	adaflush();
2387	if (ada_spindown_suspend != 0)
2388		adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE);
2389}
2390
2391static void
2392adaresume(void *arg)
2393{
2394	struct cam_periph *periph;
2395	struct ada_softc *softc;
2396
2397	if (ada_spindown_suspend == 0)
2398		return;
2399
2400	CAM_PERIPH_FOREACH(periph, &adadriver) {
2401		cam_periph_lock(periph);
2402		softc = (struct ada_softc *)periph->softc;
2403		/*
2404		 * We only spin-down the drive if it is capable of it..
2405		 */
2406		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
2407			cam_periph_unlock(periph);
2408			continue;
2409		}
2410
2411		if (bootverbose)
2412			xpt_print(periph->path, "resume\n");
2413
2414		/*
2415		 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on
2416		 * sleep request.
2417		 */
2418		cam_release_devq(periph->path,
2419			 /*relsim_flags*/0,
2420			 /*openings*/0,
2421			 /*timeout*/0,
2422			 /*getcount_only*/0);
2423
2424		cam_periph_unlock(periph);
2425	}
2426}
2427
2428#endif /* _KERNEL */
2429