ata_da.c revision 298035
1/*-
2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/cam/ata/ata_da.c 298035 2016-04-15 05:10:31Z imp $");
29
30#include "opt_ada.h"
31
32#include <sys/param.h>
33
34#ifdef _KERNEL
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/taskqueue.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/conf.h>
43#include <sys/devicestat.h>
44#include <sys/eventhandler.h>
45#include <sys/malloc.h>
46#include <sys/cons.h>
47#include <sys/proc.h>
48#include <sys/reboot.h>
49#include <geom/geom_disk.h>
50#endif /* _KERNEL */
51
52#ifndef _KERNEL
53#include <stdio.h>
54#include <string.h>
55#endif /* _KERNEL */
56
57#include <cam/cam.h>
58#include <cam/cam_ccb.h>
59#include <cam/cam_periph.h>
60#include <cam/cam_xpt_periph.h>
61#include <cam/cam_sim.h>
62#include <cam/cam_iosched.h>
63
64#include <cam/ata/ata_all.h>
65
66#include <machine/md_var.h>	/* geometry translation */
67
68#ifdef _KERNEL
69
70#define ATA_MAX_28BIT_LBA               268435455UL
71
72extern int iosched_debug;
73
74typedef enum {
75	ADA_STATE_RAHEAD,
76	ADA_STATE_WCACHE,
77	ADA_STATE_NORMAL
78} ada_state;
79
80typedef enum {
81	ADA_FLAG_CAN_48BIT	= 0x0002,
82	ADA_FLAG_CAN_FLUSHCACHE	= 0x0004,
83	ADA_FLAG_CAN_NCQ	= 0x0008,
84	ADA_FLAG_CAN_DMA	= 0x0010,
85	ADA_FLAG_NEED_OTAG	= 0x0020,
86	ADA_FLAG_WAS_OTAG	= 0x0040,
87	ADA_FLAG_CAN_TRIM	= 0x0080,
88	ADA_FLAG_OPEN		= 0x0100,
89	ADA_FLAG_SCTX_INIT	= 0x0200,
90	ADA_FLAG_CAN_CFA        = 0x0400,
91	ADA_FLAG_CAN_POWERMGT   = 0x0800,
92	ADA_FLAG_CAN_DMA48	= 0x1000,
93	ADA_FLAG_DIRTY		= 0x2000,
94	ADA_FLAG_CAN_NCQ_TRIM	= 0x4000,	/* CAN_TRIM also set */
95	ADA_FLAG_PIM_CAN_NCQ_TRIM = 0x8000
96} ada_flags;
97
98typedef enum {
99	ADA_Q_NONE		= 0x00,
100	ADA_Q_4K		= 0x01,
101	ADA_Q_NCQ_TRIM_BROKEN	= 0x02,
102} ada_quirks;
103
104#define ADA_Q_BIT_STRING	\
105	"\020"			\
106	"\0014K"		\
107	"\002NCQ_TRIM_BROKEN"
108
109typedef enum {
110	ADA_CCB_RAHEAD		= 0x01,
111	ADA_CCB_WCACHE		= 0x02,
112	ADA_CCB_BUFFER_IO	= 0x03,
113	ADA_CCB_DUMP		= 0x05,
114	ADA_CCB_TRIM		= 0x06,
115	ADA_CCB_TYPE_MASK	= 0x0F,
116} ada_ccb_state;
117
118/* Offsets into our private area for storing information */
119#define ccb_state	ppriv_field0
120#define ccb_bp		ppriv_ptr1
121
122typedef enum {
123	ADA_DELETE_NONE,
124	ADA_DELETE_DISABLE,
125	ADA_DELETE_CFA_ERASE,
126	ADA_DELETE_DSM_TRIM,
127	ADA_DELETE_NCQ_DSM_TRIM,
128	ADA_DELETE_MIN = ADA_DELETE_CFA_ERASE,
129	ADA_DELETE_MAX = ADA_DELETE_NCQ_DSM_TRIM,
130} ada_delete_methods;
131
132static const char *ada_delete_method_names[] =
133    { "NONE", "DISABLE", "CFA_ERASE", "DSM_TRIM", "NCQ_DSM_TRIM" };
134#if 0
135static const char *ada_delete_method_desc[] =
136    { "NONE", "DISABLED", "CFA Erase", "DSM Trim", "DSM Trim via NCQ" };
137#endif
138
139struct disk_params {
140	u_int8_t  heads;
141	u_int8_t  secs_per_track;
142	u_int32_t cylinders;
143	u_int32_t secsize;	/* Number of bytes/logical sector */
144	u_int64_t sectors;	/* Total number sectors */
145};
146
147#define TRIM_MAX_BLOCKS	8
148#define TRIM_MAX_RANGES	(TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES)
149struct trim_request {
150	uint8_t		data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE];
151	TAILQ_HEAD(, bio) bps;
152};
153
154struct ada_softc {
155	struct   cam_iosched_softc *cam_iosched;
156	int	 outstanding_cmds;	/* Number of active commands */
157	int	 refcount;		/* Active xpt_action() calls */
158	ada_state state;
159	ada_flags flags;
160	ada_quirks quirks;
161	ada_delete_methods delete_method;
162	int	 trim_max_ranges;
163	int	 read_ahead;
164	int	 write_cache;
165	int	 unmappedio;
166	int	 rotating;
167#ifdef ADA_TEST_FAILURE
168	int      force_read_error;
169	int      force_write_error;
170	int      periodic_read_error;
171	int      periodic_read_count;
172#endif
173	struct	 disk_params params;
174	struct	 disk *disk;
175	struct task		sysctl_task;
176	struct sysctl_ctx_list	sysctl_ctx;
177	struct sysctl_oid	*sysctl_tree;
178	struct callout		sendordered_c;
179	struct trim_request	trim_req;
180#ifdef CAM_IO_STATS
181	struct sysctl_ctx_list	sysctl_stats_ctx;
182	struct sysctl_oid	*sysctl_stats_tree;
183	u_int	timeouts;
184	u_int	errors;
185	u_int	invalidations;
186#endif
187};
188
189struct ada_quirk_entry {
190	struct scsi_inquiry_pattern inq_pat;
191	ada_quirks quirks;
192};
193
194static struct ada_quirk_entry ada_quirk_table[] =
195{
196	{
197		/* Hitachi Advanced Format (4k) drives */
198		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Hitachi H??????????E3*", "*" },
199		/*quirks*/ADA_Q_4K
200	},
201	{
202		/* Samsung Advanced Format (4k) drives */
203		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD155UI*", "*" },
204		/*quirks*/ADA_Q_4K
205	},
206	{
207		/* Samsung Advanced Format (4k) drives */
208		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD204UI*", "*" },
209		/*quirks*/ADA_Q_4K
210	},
211	{
212		/* Seagate Barracuda Green Advanced Format (4k) drives */
213		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DL*", "*" },
214		/*quirks*/ADA_Q_4K
215	},
216	{
217		/* Seagate Barracuda Advanced Format (4k) drives */
218		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???DM*", "*" },
219		/*quirks*/ADA_Q_4K
220	},
221	{
222		/* Seagate Barracuda Advanced Format (4k) drives */
223		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DM*", "*" },
224		/*quirks*/ADA_Q_4K
225	},
226	{
227		/* Seagate Momentus Advanced Format (4k) drives */
228		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500423AS*", "*" },
229		/*quirks*/ADA_Q_4K
230	},
231	{
232		/* Seagate Momentus Advanced Format (4k) drives */
233		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500424AS*", "*" },
234		/*quirks*/ADA_Q_4K
235	},
236	{
237		/* Seagate Momentus Advanced Format (4k) drives */
238		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640423AS*", "*" },
239		/*quirks*/ADA_Q_4K
240	},
241	{
242		/* Seagate Momentus Advanced Format (4k) drives */
243		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640424AS*", "*" },
244		/*quirks*/ADA_Q_4K
245	},
246	{
247		/* Seagate Momentus Advanced Format (4k) drives */
248		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750420AS*", "*" },
249		/*quirks*/ADA_Q_4K
250	},
251	{
252		/* Seagate Momentus Advanced Format (4k) drives */
253		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750422AS*", "*" },
254		/*quirks*/ADA_Q_4K
255	},
256	{
257		/* Seagate Momentus Advanced Format (4k) drives */
258		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750423AS*", "*" },
259		/*quirks*/ADA_Q_4K
260	},
261	{
262		/* Seagate Momentus Thin Advanced Format (4k) drives */
263		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???LT*", "*" },
264		/*quirks*/ADA_Q_4K
265	},
266	{
267		/* WDC Caviar Red Advanced Format (4k) drives */
268		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????CX*", "*" },
269		/*quirks*/ADA_Q_4K
270	},
271	{
272		/* WDC Caviar Green Advanced Format (4k) drives */
273		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RS*", "*" },
274		/*quirks*/ADA_Q_4K
275	},
276	{
277		/* WDC Caviar Green/Red Advanced Format (4k) drives */
278		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RX*", "*" },
279		/*quirks*/ADA_Q_4K
280	},
281	{
282		/* WDC Caviar Red Advanced Format (4k) drives */
283		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????CX*", "*" },
284		/*quirks*/ADA_Q_4K
285	},
286	{
287		/* WDC Caviar Black Advanced Format (4k) drives */
288		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????EX*", "*" },
289		/*quirks*/ADA_Q_4K
290	},
291	{
292		/* WDC Caviar Green Advanced Format (4k) drives */
293		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RS*", "*" },
294		/*quirks*/ADA_Q_4K
295	},
296	{
297		/* WDC Caviar Green Advanced Format (4k) drives */
298		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RX*", "*" },
299		/*quirks*/ADA_Q_4K
300	},
301	{
302		/* WDC Scorpio Black Advanced Format (4k) drives */
303		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PKT*", "*" },
304		/*quirks*/ADA_Q_4K
305	},
306	{
307		/* WDC Scorpio Black Advanced Format (4k) drives */
308		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PKT*", "*" },
309		/*quirks*/ADA_Q_4K
310	},
311	{
312		/* WDC Scorpio Blue Advanced Format (4k) drives */
313		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PVT*", "*" },
314		/*quirks*/ADA_Q_4K
315	},
316	{
317		/* WDC Scorpio Blue Advanced Format (4k) drives */
318		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PVT*", "*" },
319		/*quirks*/ADA_Q_4K
320	},
321	/* SSDs */
322	{
323		/*
324		 * Corsair Force 2 SSDs
325		 * 4k optimised & trim only works in 4k requests + 4k aligned
326		 */
327		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair CSSD-F*", "*" },
328		/*quirks*/ADA_Q_4K
329	},
330	{
331		/*
332		 * Corsair Force 3 SSDs
333		 * 4k optimised & trim only works in 4k requests + 4k aligned
334		 */
335		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force 3*", "*" },
336		/*quirks*/ADA_Q_4K
337	},
338	{
339		/*
340		 * Corsair Neutron GTX SSDs
341		 * 4k optimised & trim only works in 4k requests + 4k aligned
342		 */
343		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
344		/*quirks*/ADA_Q_4K
345	},
346	{
347		/*
348		 * Corsair Force GT & GS SSDs
349		 * 4k optimised & trim only works in 4k requests + 4k aligned
350		 */
351		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force G*", "*" },
352		/*quirks*/ADA_Q_4K
353	},
354	{
355		/*
356		 * Crucial M4 SSDs
357		 * 4k optimised & trim only works in 4k requests + 4k aligned
358		 */
359		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "M4-CT???M4SSD2*", "*" },
360		/*quirks*/ADA_Q_4K
361	},
362	{
363		/*
364		 * Crucial M500 SSDs MU07 firmware
365		 * NCQ Trim works
366		 */
367		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "MU07" },
368		/*quirks*/0
369	},
370	{
371		/*
372		 * Crucial M500 SSDs all other firmware
373		 * NCQ Trim doesn't work
374		 */
375		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M500*", "*" },
376		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
377	},
378	{
379		/*
380		 * Crucial M550 SSDs
381		 * NCQ Trim doesn't work, but only on MU01 firmware
382		 */
383		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*M550*", "MU01" },
384		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
385	},
386	{
387		/*
388		 * Crucial MX100 SSDs
389		 * NCQ Trim doesn't work, but only on MU01 firmware
390		 */
391		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Crucial CT*MX100*", "MU01" },
392		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
393	},
394	{
395		/*
396		 * Crucial RealSSD C300 SSDs
397		 * 4k optimised
398		 */
399		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "C300-CTFDDAC???MAG*",
400		"*" }, /*quirks*/ADA_Q_4K
401	},
402	{
403		/*
404		 * FCCT M500 SSDs
405		 * NCQ Trim doesn't work
406		 */
407		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "FCCT*M500*", "*" },
408		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
409	},
410	{
411		/*
412		 * Intel 320 Series SSDs
413		 * 4k optimised & trim only works in 4k requests + 4k aligned
414		 */
415		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2CW*", "*" },
416		/*quirks*/ADA_Q_4K
417	},
418	{
419		/*
420		 * Intel 330 Series SSDs
421		 * 4k optimised & trim only works in 4k requests + 4k aligned
422		 */
423		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2CT*", "*" },
424		/*quirks*/ADA_Q_4K
425	},
426	{
427		/*
428		 * Intel 510 Series SSDs
429		 * 4k optimised & trim only works in 4k requests + 4k aligned
430		 */
431		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2MH*", "*" },
432		/*quirks*/ADA_Q_4K
433	},
434	{
435		/*
436		 * Intel 520 Series SSDs
437		 * 4k optimised & trim only works in 4k requests + 4k aligned
438		 */
439		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2BW*", "*" },
440		/*quirks*/ADA_Q_4K
441	},
442	{
443		/*
444		 * Intel X25-M Series SSDs
445		 * 4k optimised & trim only works in 4k requests + 4k aligned
446		 */
447		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSA2M*", "*" },
448		/*quirks*/ADA_Q_4K
449	},
450	{
451		/*
452		 * Kingston E100 Series SSDs
453		 * 4k optimised & trim only works in 4k requests + 4k aligned
454		 */
455		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SE100S3*", "*" },
456		/*quirks*/ADA_Q_4K
457	},
458	{
459		/*
460		 * Kingston HyperX 3k SSDs
461		 * 4k optimised & trim only works in 4k requests + 4k aligned
462		 */
463		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SH103S3*", "*" },
464		/*quirks*/ADA_Q_4K
465	},
466	{
467		/*
468		 * Marvell SSDs (entry taken from OpenSolaris)
469		 * 4k optimised & trim only works in 4k requests + 4k aligned
470		 */
471		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "MARVELL SD88SA02*", "*" },
472		/*quirks*/ADA_Q_4K
473	},
474	{
475		/*
476		 * Micron M500 SSDs firmware MU07
477		 * NCQ Trim works?
478		 */
479		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "MU07" },
480		/*quirks*/0
481	},
482	{
483		/*
484		 * Micron M500 SSDs all other firmware
485		 * NCQ Trim doesn't work
486		 */
487		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M500*", "*" },
488		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
489	},
490	{
491		/*
492		 * Micron M5[15]0 SSDs
493		 * NCQ Trim doesn't work, but only MU01 firmware
494		 */
495		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Micron M5[15]0*", "MU01" },
496		/*quirks*/ADA_Q_NCQ_TRIM_BROKEN
497	},
498	{
499		/*
500		 * OCZ Agility 2 SSDs
501		 * 4k optimised & trim only works in 4k requests + 4k aligned
502		 */
503		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
504		/*quirks*/ADA_Q_4K
505	},
506	{
507		/*
508		 * OCZ Agility 3 SSDs
509		 * 4k optimised & trim only works in 4k requests + 4k aligned
510		 */
511		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY3*", "*" },
512		/*quirks*/ADA_Q_4K
513	},
514	{
515		/*
516		 * OCZ Deneva R Series SSDs
517		 * 4k optimised & trim only works in 4k requests + 4k aligned
518		 */
519		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "DENRSTE251M45*", "*" },
520		/*quirks*/ADA_Q_4K
521	},
522	{
523		/*
524		 * OCZ Vertex 2 SSDs (inc pro series)
525		 * 4k optimised & trim only works in 4k requests + 4k aligned
526		 */
527		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ?VERTEX2*", "*" },
528		/*quirks*/ADA_Q_4K
529	},
530	{
531		/*
532		 * OCZ Vertex 3 SSDs
533		 * 4k optimised & trim only works in 4k requests + 4k aligned
534		 */
535		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX3*", "*" },
536		/*quirks*/ADA_Q_4K
537	},
538	{
539		/*
540		 * OCZ Vertex 4 SSDs
541		 * 4k optimised & trim only works in 4k requests + 4k aligned
542		 */
543		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX4*", "*" },
544		/*quirks*/ADA_Q_4K
545	},
546	{
547		/*
548		 * Samsung 830 Series SSDs
549		 * 4k optimised, NCQ TRIM Broken (normal TRIM is fine)
550		 */
551		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG SSD 830 Series*", "*" },
552		/*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
553	},
554	{
555		/*
556		 * Samsung 840 SSDs
557		 * 4k optimised, NCQ TRIM Broken (normal TRIM is fine)
558		 */
559		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 840*", "*" },
560		/*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
561	},
562	{
563		/*
564		 * Samsung 850 SSDs
565		 * 4k optimised, NCQ TRIM broken (normal TRIM fine)
566		 */
567		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Samsung SSD 850*", "*" },
568		/*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
569	},
570	{
571		/*
572		 * Samsung SM863 Series SSDs (MZ7KM*)
573		 * 4k optimised, NCQ believed to be working
574		 */
575		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7KM*", "*" },
576		/*quirks*/ADA_Q_4K
577	},
578	{
579		/*
580		 * Samsung 843T Series SSDs (MZ7WD*)
581		 * Samsung PM851 Series SSDs (MZ7TE*)
582		 * Samsung PM853T Series SSDs (MZ7GE*)
583		 * 4k optimised, NCQ believed to be broken since these are
584		 * appear to be built with the same controllers as the 840/850.
585		 */
586		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG MZ7*", "*" },
587		/*quirks*/ADA_Q_4K | ADA_Q_NCQ_TRIM_BROKEN
588	},
589	{
590		/*
591		 * SuperTalent TeraDrive CT SSDs
592		 * 4k optimised & trim only works in 4k requests + 4k aligned
593		 */
594		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "FTM??CT25H*", "*" },
595		/*quirks*/ADA_Q_4K
596	},
597	{
598		/*
599		 * XceedIOPS SATA SSDs
600		 * 4k optimised
601		 */
602		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SG9XCS2D*", "*" },
603		/*quirks*/ADA_Q_4K
604	},
605	{
606		/* Default */
607		{
608		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
609		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
610		},
611		/*quirks*/0
612	},
613};
614
615static	disk_strategy_t	adastrategy;
616static	dumper_t	adadump;
617static	periph_init_t	adainit;
618static	void		adaasync(void *callback_arg, u_int32_t code,
619				struct cam_path *path, void *arg);
620static	void		adasysctlinit(void *context, int pending);
621static	periph_ctor_t	adaregister;
622static	periph_dtor_t	adacleanup;
623static	periph_start_t	adastart;
624static	periph_oninv_t	adaoninvalidate;
625static	void		adadone(struct cam_periph *periph,
626			       union ccb *done_ccb);
627static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
628				u_int32_t sense_flags);
629static void		adagetparams(struct cam_periph *periph,
630				struct ccb_getdev *cgd);
631static timeout_t	adasendorderedtag;
632static void		adashutdown(void *arg, int howto);
633static void		adasuspend(void *arg);
634static void		adaresume(void *arg);
635
636#ifndef	ADA_DEFAULT_LEGACY_ALIASES
637#define	ADA_DEFAULT_LEGACY_ALIASES	1
638#endif
639
640#ifndef ADA_DEFAULT_TIMEOUT
641#define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
642#endif
643
644#ifndef	ADA_DEFAULT_RETRY
645#define	ADA_DEFAULT_RETRY	4
646#endif
647
648#ifndef	ADA_DEFAULT_SEND_ORDERED
649#define	ADA_DEFAULT_SEND_ORDERED	1
650#endif
651
652#ifndef	ADA_DEFAULT_SPINDOWN_SHUTDOWN
653#define	ADA_DEFAULT_SPINDOWN_SHUTDOWN	1
654#endif
655
656#ifndef	ADA_DEFAULT_SPINDOWN_SUSPEND
657#define	ADA_DEFAULT_SPINDOWN_SUSPEND	1
658#endif
659
660#ifndef	ADA_DEFAULT_READ_AHEAD
661#define	ADA_DEFAULT_READ_AHEAD	1
662#endif
663
664#ifndef	ADA_DEFAULT_WRITE_CACHE
665#define	ADA_DEFAULT_WRITE_CACHE	1
666#endif
667
668#define	ADA_RA	(softc->read_ahead >= 0 ? \
669		 softc->read_ahead : ada_read_ahead)
670#define	ADA_WC	(softc->write_cache >= 0 ? \
671		 softc->write_cache : ada_write_cache)
672
673/*
674 * Most platforms map firmware geometry to actual, but some don't.  If
675 * not overridden, default to nothing.
676 */
677#ifndef ata_disk_firmware_geom_adjust
678#define	ata_disk_firmware_geom_adjust(disk)
679#endif
680
681static int ada_retry_count = ADA_DEFAULT_RETRY;
682static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
683static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
684static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
685static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND;
686static int ada_read_ahead = ADA_DEFAULT_READ_AHEAD;
687static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
688
689static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
690            "CAM Direct Access Disk driver");
691SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RWTUN,
692           &ada_retry_count, 0, "Normal I/O retry count");
693SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
694           &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
695SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
696           &ada_send_ordered, 0, "Send Ordered Tags");
697SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RWTUN,
698           &ada_spindown_shutdown, 0, "Spin down upon shutdown");
699SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RWTUN,
700           &ada_spindown_suspend, 0, "Spin down upon suspend");
701SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RWTUN,
702           &ada_read_ahead, 0, "Enable disk read-ahead");
703SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RWTUN,
704           &ada_write_cache, 0, "Enable disk write cache");
705
706/*
707 * ADA_ORDEREDTAG_INTERVAL determines how often, relative
708 * to the default timeout, we check to see whether an ordered
709 * tagged transaction is appropriate to prevent simple tag
710 * starvation.  Since we'd like to ensure that there is at least
711 * 1/2 of the timeout length left for a starved transaction to
712 * complete after we've sent an ordered tag, we must poll at least
713 * four times in every timeout period.  This takes care of the worst
714 * case where a starved transaction starts during an interval that
715 * meets the requirement "don't send an ordered tag" test so it takes
716 * us two intervals to determine that a tag must be sent.
717 */
718#ifndef ADA_ORDEREDTAG_INTERVAL
719#define ADA_ORDEREDTAG_INTERVAL 4
720#endif
721
722static struct periph_driver adadriver =
723{
724	adainit, "ada",
725	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
726};
727
728static int adadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
729
730PERIPHDRIVER_DECLARE(ada, adadriver);
731
732static int
733adaopen(struct disk *dp)
734{
735	struct cam_periph *periph;
736	struct ada_softc *softc;
737	int error;
738
739	periph = (struct cam_periph *)dp->d_drv1;
740	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
741		return(ENXIO);
742	}
743
744	cam_periph_lock(periph);
745	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
746		cam_periph_unlock(periph);
747		cam_periph_release(periph);
748		return (error);
749	}
750
751	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
752	    ("adaopen\n"));
753
754	softc = (struct ada_softc *)periph->softc;
755	softc->flags |= ADA_FLAG_OPEN;
756
757	cam_periph_unhold(periph);
758	cam_periph_unlock(periph);
759	return (0);
760}
761
762static int
763adaclose(struct disk *dp)
764{
765	struct	cam_periph *periph;
766	struct	ada_softc *softc;
767	union ccb *ccb;
768	int error;
769
770	periph = (struct cam_periph *)dp->d_drv1;
771	softc = (struct ada_softc *)periph->softc;
772	cam_periph_lock(periph);
773
774	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
775	    ("adaclose\n"));
776
777	/* We only sync the cache if the drive is capable of it. */
778	if ((softc->flags & ADA_FLAG_DIRTY) != 0 &&
779	    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 &&
780	    (periph->flags & CAM_PERIPH_INVALID) == 0 &&
781	    cam_periph_hold(periph, PRIBIO) == 0) {
782
783		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
784		cam_fill_ataio(&ccb->ataio,
785				    1,
786				    adadone,
787				    CAM_DIR_NONE,
788				    0,
789				    NULL,
790				    0,
791				    ada_default_timeout*1000);
792
793		if (softc->flags & ADA_FLAG_CAN_48BIT)
794			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
795		else
796			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
797		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
798		    /*sense_flags*/0, softc->disk->d_devstat);
799
800		if (error != 0)
801			xpt_print(periph->path, "Synchronize cache failed\n");
802		else
803			softc->flags &= ~ADA_FLAG_DIRTY;
804		xpt_release_ccb(ccb);
805		cam_periph_unhold(periph);
806	}
807
808	softc->flags &= ~ADA_FLAG_OPEN;
809
810	while (softc->refcount != 0)
811		cam_periph_sleep(periph, &softc->refcount, PRIBIO, "adaclose", 1);
812	cam_periph_unlock(periph);
813	cam_periph_release(periph);
814	return (0);
815}
816
817static void
818adaschedule(struct cam_periph *periph)
819{
820	struct ada_softc *softc = (struct ada_softc *)periph->softc;
821
822	if (softc->state != ADA_STATE_NORMAL)
823		return;
824
825	cam_iosched_schedule(softc->cam_iosched, periph);
826}
827
828/*
829 * Actually translate the requested transfer into one the physical driver
830 * can understand.  The transfer is described by a buf and will include
831 * only one physical transfer.
832 */
833static void
834adastrategy(struct bio *bp)
835{
836	struct cam_periph *periph;
837	struct ada_softc *softc;
838
839	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
840	softc = (struct ada_softc *)periph->softc;
841
842	cam_periph_lock(periph);
843
844	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastrategy(%p)\n", bp));
845
846	/*
847	 * If the device has been made invalid, error out
848	 */
849	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
850		cam_periph_unlock(periph);
851		biofinish(bp, NULL, ENXIO);
852		return;
853	}
854
855	/*
856	 * Place it in the queue of disk activities for this disk
857	 */
858	cam_iosched_queue_work(softc->cam_iosched, bp);
859
860	/*
861	 * Schedule ourselves for performing the work.
862	 */
863	adaschedule(periph);
864	cam_periph_unlock(periph);
865
866	return;
867}
868
869static int
870adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
871{
872	struct	    cam_periph *periph;
873	struct	    ada_softc *softc;
874	u_int	    secsize;
875	union	    ccb ccb;
876	struct	    disk *dp;
877	uint64_t    lba;
878	uint16_t    count;
879	int	    error = 0;
880
881	dp = arg;
882	periph = dp->d_drv1;
883	softc = (struct ada_softc *)periph->softc;
884	cam_periph_lock(periph);
885	secsize = softc->params.secsize;
886	lba = offset / secsize;
887	count = length / secsize;
888
889	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
890		cam_periph_unlock(periph);
891		return (ENXIO);
892	}
893
894	if (length > 0) {
895		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
896		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
897		cam_fill_ataio(&ccb.ataio,
898		    0,
899		    adadone,
900		    CAM_DIR_OUT,
901		    0,
902		    (u_int8_t *) virtual,
903		    length,
904		    ada_default_timeout*1000);
905		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
906		    (lba + count >= ATA_MAX_28BIT_LBA ||
907		    count >= 256)) {
908			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
909			    0, lba, count);
910		} else {
911			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
912			    0, lba, count);
913		}
914		xpt_polled_action(&ccb);
915
916		error = cam_periph_error(&ccb,
917		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
918		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
919			cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0,
920			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
921		if (error != 0)
922			printf("Aborting dump due to I/O error.\n");
923
924		cam_periph_unlock(periph);
925		return (error);
926	}
927
928	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
929		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
930
931		/*
932		 * Tell the drive to flush its internal cache. if we
933		 * can't flush in 5s we have big problems. No need to
934		 * wait the default 60s to detect problems.
935		 */
936		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
937		cam_fill_ataio(&ccb.ataio,
938				    0,
939				    adadone,
940				    CAM_DIR_NONE,
941				    0,
942				    NULL,
943				    0,
944				    5*1000);
945
946		if (softc->flags & ADA_FLAG_CAN_48BIT)
947			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
948		else
949			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
950		xpt_polled_action(&ccb);
951
952		error = cam_periph_error(&ccb,
953		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
954		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
955			cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0,
956			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
957		if (error != 0)
958			xpt_print(periph->path, "Synchronize cache failed\n");
959	}
960	cam_periph_unlock(periph);
961	return (error);
962}
963
964static void
965adainit(void)
966{
967	cam_status status;
968
969	/*
970	 * Install a global async callback.  This callback will
971	 * receive async callbacks like "new device found".
972	 */
973	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
974
975	if (status != CAM_REQ_CMP) {
976		printf("ada: Failed to attach master async callback "
977		       "due to status 0x%x!\n", status);
978	} else if (ada_send_ordered) {
979
980		/* Register our event handlers */
981		if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend,
982					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
983		    printf("adainit: power event registration failed!\n");
984		if ((EVENTHANDLER_REGISTER(power_resume, adaresume,
985					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
986		    printf("adainit: power event registration failed!\n");
987		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
988					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
989		    printf("adainit: shutdown event registration failed!\n");
990	}
991}
992
993/*
994 * Callback from GEOM, called when it has finished cleaning up its
995 * resources.
996 */
997static void
998adadiskgonecb(struct disk *dp)
999{
1000	struct cam_periph *periph;
1001
1002	periph = (struct cam_periph *)dp->d_drv1;
1003
1004	cam_periph_release(periph);
1005}
1006
1007static void
1008adaoninvalidate(struct cam_periph *periph)
1009{
1010	struct ada_softc *softc;
1011
1012	softc = (struct ada_softc *)periph->softc;
1013
1014	/*
1015	 * De-register any async callbacks.
1016	 */
1017	xpt_register_async(0, adaasync, periph, periph->path);
1018#ifdef CAM_IO_STATS
1019	softc->invalidations++;
1020#endif
1021
1022	/*
1023	 * Return all queued I/O with ENXIO.
1024	 * XXX Handle any transactions queued to the card
1025	 *     with XPT_ABORT_CCB.
1026	 */
1027	cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
1028
1029	disk_gone(softc->disk);
1030}
1031
1032static void
1033adacleanup(struct cam_periph *periph)
1034{
1035	struct ada_softc *softc;
1036
1037	softc = (struct ada_softc *)periph->softc;
1038
1039	cam_periph_unlock(periph);
1040
1041	cam_iosched_fini(softc->cam_iosched);
1042
1043	/*
1044	 * If we can't free the sysctl tree, oh well...
1045	 */
1046	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0) {
1047#ifdef CAM_IO_STATS
1048		if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
1049			xpt_print(periph->path,
1050			    "can't remove sysctl stats context\n");
1051#endif
1052		if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
1053			xpt_print(periph->path,
1054			    "can't remove sysctl context\n");
1055	}
1056
1057	disk_destroy(softc->disk);
1058	callout_drain(&softc->sendordered_c);
1059	free(softc, M_DEVBUF);
1060	cam_periph_lock(periph);
1061}
1062
1063static void
1064adasetdeletemethod(struct ada_softc *softc)
1065{
1066
1067	if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM)
1068		softc->delete_method = ADA_DELETE_NCQ_DSM_TRIM;
1069	else if (softc->flags & ADA_FLAG_CAN_TRIM)
1070		softc->delete_method = ADA_DELETE_DSM_TRIM;
1071	else if ((softc->flags & ADA_FLAG_CAN_CFA) && !(softc->flags & ADA_FLAG_CAN_48BIT))
1072		softc->delete_method = ADA_DELETE_CFA_ERASE;
1073	else
1074		softc->delete_method = ADA_DELETE_NONE;
1075}
1076
1077static void
1078adaasync(void *callback_arg, u_int32_t code,
1079	struct cam_path *path, void *arg)
1080{
1081	struct ccb_getdev cgd;
1082	struct cam_periph *periph;
1083	struct ada_softc *softc;
1084
1085	periph = (struct cam_periph *)callback_arg;
1086	switch (code) {
1087	case AC_FOUND_DEVICE:
1088	{
1089		struct ccb_getdev *cgd;
1090		cam_status status;
1091
1092		cgd = (struct ccb_getdev *)arg;
1093		if (cgd == NULL)
1094			break;
1095
1096		if (cgd->protocol != PROTO_ATA)
1097			break;
1098
1099		/*
1100		 * Allocate a peripheral instance for
1101		 * this device and start the probe
1102		 * process.
1103		 */
1104		status = cam_periph_alloc(adaregister, adaoninvalidate,
1105					  adacleanup, adastart,
1106					  "ada", CAM_PERIPH_BIO,
1107					  path, adaasync,
1108					  AC_FOUND_DEVICE, cgd);
1109
1110		if (status != CAM_REQ_CMP
1111		 && status != CAM_REQ_INPROG)
1112			printf("adaasync: Unable to attach to new device "
1113				"due to status 0x%x\n", status);
1114		break;
1115	}
1116	case AC_GETDEV_CHANGED:
1117	{
1118		softc = (struct ada_softc *)periph->softc;
1119		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1120		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1121		xpt_action((union ccb *)&cgd);
1122
1123		if ((cgd.ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
1124		    (cgd.inq_flags & SID_DMA))
1125			softc->flags |= ADA_FLAG_CAN_DMA;
1126		else
1127			softc->flags &= ~ADA_FLAG_CAN_DMA;
1128		if (cgd.ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
1129			softc->flags |= ADA_FLAG_CAN_48BIT;
1130			if (cgd.inq_flags & SID_DMA48)
1131				softc->flags |= ADA_FLAG_CAN_DMA48;
1132			else
1133				softc->flags &= ~ADA_FLAG_CAN_DMA48;
1134		} else
1135			softc->flags &= ~(ADA_FLAG_CAN_48BIT |
1136			    ADA_FLAG_CAN_DMA48);
1137		if ((cgd.ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
1138		    (cgd.inq_flags & SID_DMA) && (cgd.inq_flags & SID_CmdQue))
1139			softc->flags |= ADA_FLAG_CAN_NCQ;
1140		else
1141			softc->flags &= ~ADA_FLAG_CAN_NCQ;
1142
1143		if ((cgd.ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
1144		    (cgd.inq_flags & SID_DMA)) {
1145			softc->flags |= ADA_FLAG_CAN_TRIM;
1146			/*
1147			 * If we can do RCVSND_FPDMA_QUEUED commands, we may be able to do
1148			 * NCQ trims, if we support trims at all. We also need support from
1149			 * the sim do do things properly. Perhaps we should look at log 13
1150			 * dword 0 bit 0 and dword 1 bit 0 are set too...
1151			 */
1152			if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 &&
1153			    (softc->flags & ADA_FLAG_PIM_CAN_NCQ_TRIM) != 0 &&
1154			    (cgd.ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 &&
1155			    (softc->flags & ADA_FLAG_CAN_TRIM) != 0)
1156				softc->flags |= ADA_FLAG_CAN_NCQ_TRIM;
1157			else
1158				softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM;
1159		} else
1160			softc->flags &= ~(ADA_FLAG_CAN_TRIM | ADA_FLAG_CAN_NCQ_TRIM);
1161		adasetdeletemethod(softc);
1162
1163		cam_periph_async(periph, code, path, arg);
1164		break;
1165	}
1166	case AC_ADVINFO_CHANGED:
1167	{
1168		uintptr_t buftype;
1169
1170		buftype = (uintptr_t)arg;
1171		if (buftype == CDAI_TYPE_PHYS_PATH) {
1172			struct ada_softc *softc;
1173
1174			softc = periph->softc;
1175			disk_attr_changed(softc->disk, "GEOM::physpath",
1176					  M_NOWAIT);
1177		}
1178		break;
1179	}
1180	case AC_SENT_BDR:
1181	case AC_BUS_RESET:
1182	{
1183		softc = (struct ada_softc *)periph->softc;
1184		cam_periph_async(periph, code, path, arg);
1185		if (softc->state != ADA_STATE_NORMAL)
1186			break;
1187		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1188		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
1189		xpt_action((union ccb *)&cgd);
1190		if (ADA_RA >= 0 &&
1191		    cgd.ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD)
1192			softc->state = ADA_STATE_RAHEAD;
1193		else if (ADA_WC >= 0 &&
1194		    cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE)
1195			softc->state = ADA_STATE_WCACHE;
1196		else
1197		    break;
1198		if (cam_periph_acquire(periph) != CAM_REQ_CMP)
1199			softc->state = ADA_STATE_NORMAL;
1200		else
1201			xpt_schedule(periph, CAM_PRIORITY_DEV);
1202	}
1203	default:
1204		cam_periph_async(periph, code, path, arg);
1205		break;
1206	}
1207}
1208
1209static void
1210adasysctlinit(void *context, int pending)
1211{
1212	struct cam_periph *periph;
1213	struct ada_softc *softc;
1214	char tmpstr[80], tmpstr2[80];
1215
1216	periph = (struct cam_periph *)context;
1217
1218	/* periph was held for us when this task was enqueued */
1219	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
1220		cam_periph_release(periph);
1221		return;
1222	}
1223
1224	softc = (struct ada_softc *)periph->softc;
1225	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
1226	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
1227
1228	sysctl_ctx_init(&softc->sysctl_ctx);
1229	softc->flags |= ADA_FLAG_SCTX_INIT;
1230	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
1231		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
1232		CTLFLAG_RD, 0, tmpstr);
1233	if (softc->sysctl_tree == NULL) {
1234		printf("adasysctlinit: unable to allocate sysctl tree\n");
1235		cam_periph_release(periph);
1236		return;
1237	}
1238
1239	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1240		OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RW,
1241		softc, 0, adadeletemethodsysctl, "A",
1242		"BIO_DELETE execution method");
1243	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1244		OID_AUTO, "read_ahead", CTLFLAG_RW | CTLFLAG_MPSAFE,
1245		&softc->read_ahead, 0, "Enable disk read ahead.");
1246	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1247		OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE,
1248		&softc->write_cache, 0, "Enable disk write cache.");
1249	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1250		OID_AUTO, "unmapped_io", CTLFLAG_RD | CTLFLAG_MPSAFE,
1251		&softc->unmappedio, 0, "Unmapped I/O leaf");
1252	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1253		OID_AUTO, "rotating", CTLFLAG_RD | CTLFLAG_MPSAFE,
1254		&softc->rotating, 0, "Rotating media");
1255#ifdef ADA_TEST_FAILURE
1256	/*
1257	 * Add a 'door bell' sysctl which allows one to set it from userland
1258	 * and cause something bad to happen.  For the moment, we only allow
1259	 * whacking the next read or write.
1260	 */
1261	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1262		OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1263		&softc->force_read_error, 0,
1264		"Force a read error for the next N reads.");
1265	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1266		OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1267		&softc->force_write_error, 0,
1268		"Force a write error for the next N writes.");
1269	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1270		OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1271		&softc->periodic_read_error, 0,
1272		"Force a read error every N reads (don't set too low).");
1273#endif
1274
1275#ifdef CAM_IO_STATS
1276	softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
1277		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
1278		CTLFLAG_RD, 0, "Statistics");
1279	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
1280		SYSCTL_CHILDREN(softc->sysctl_stats_tree),
1281		OID_AUTO, "timeouts", CTLFLAG_RD | CTLFLAG_MPSAFE,
1282		&softc->timeouts, 0,
1283		"Device timeouts reported by the SIM");
1284	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
1285		SYSCTL_CHILDREN(softc->sysctl_stats_tree),
1286		OID_AUTO, "errors", CTLFLAG_RD | CTLFLAG_MPSAFE,
1287		&softc->errors, 0,
1288		"Transport errors reported by the SIM.");
1289	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
1290		SYSCTL_CHILDREN(softc->sysctl_stats_tree),
1291		OID_AUTO, "pack_invalidations", CTLFLAG_RD | CTLFLAG_MPSAFE,
1292		&softc->invalidations, 0,
1293		"Device pack invalidations.");
1294#endif
1295
1296	cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
1297	    softc->sysctl_tree);
1298
1299	cam_periph_release(periph);
1300}
1301
1302static int
1303adagetattr(struct bio *bp)
1304{
1305	int ret;
1306	struct cam_periph *periph;
1307
1308	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1309	cam_periph_lock(periph);
1310	ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1311	    periph->path);
1312	cam_periph_unlock(periph);
1313	if (ret == 0)
1314		bp->bio_completed = bp->bio_length;
1315	return ret;
1316}
1317
1318static int
1319adadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
1320{
1321	char buf[16];
1322	const char *p;
1323	struct ada_softc *softc;
1324	int i, error, value, methods;
1325
1326	softc = (struct ada_softc *)arg1;
1327
1328	value = softc->delete_method;
1329	if (value < 0 || value > ADA_DELETE_MAX)
1330		p = "UNKNOWN";
1331	else
1332		p = ada_delete_method_names[value];
1333	strncpy(buf, p, sizeof(buf));
1334	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1335	if (error != 0 || req->newptr == NULL)
1336		return (error);
1337	methods = 1 << ADA_DELETE_DISABLE;
1338	if ((softc->flags & ADA_FLAG_CAN_CFA) &&
1339	    !(softc->flags & ADA_FLAG_CAN_48BIT))
1340		methods |= 1 << ADA_DELETE_CFA_ERASE;
1341	if (softc->flags & ADA_FLAG_CAN_TRIM)
1342		methods |= 1 << ADA_DELETE_DSM_TRIM;
1343	if (softc->flags & ADA_FLAG_CAN_NCQ_TRIM)
1344		methods |= 1 << ADA_DELETE_NCQ_DSM_TRIM;
1345	for (i = 0; i <= ADA_DELETE_MAX; i++) {
1346		if (!(methods & (1 << i)) ||
1347		    strcmp(buf, ada_delete_method_names[i]) != 0)
1348			continue;
1349		softc->delete_method = i;
1350		return (0);
1351	}
1352	return (EINVAL);
1353}
1354
1355static cam_status
1356adaregister(struct cam_periph *periph, void *arg)
1357{
1358	struct ada_softc *softc;
1359	struct ccb_pathinq cpi;
1360	struct ccb_getdev *cgd;
1361	char   announce_buf[80];
1362	struct disk_params *dp;
1363	caddr_t match;
1364	u_int maxio;
1365	int quirks;
1366
1367	cgd = (struct ccb_getdev *)arg;
1368	if (cgd == NULL) {
1369		printf("adaregister: no getdev CCB, can't register device\n");
1370		return(CAM_REQ_CMP_ERR);
1371	}
1372
1373	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
1374	    M_NOWAIT|M_ZERO);
1375
1376	if (softc == NULL) {
1377		printf("adaregister: Unable to probe new device. "
1378		    "Unable to allocate softc\n");
1379		return(CAM_REQ_CMP_ERR);
1380	}
1381
1382	if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
1383		printf("adaregister: Unable to probe new device. "
1384		       "Unable to allocate iosched memory\n");
1385		return(CAM_REQ_CMP_ERR);
1386	}
1387
1388	if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
1389	    (cgd->inq_flags & SID_DMA))
1390		softc->flags |= ADA_FLAG_CAN_DMA;
1391	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
1392		softc->flags |= ADA_FLAG_CAN_48BIT;
1393		if (cgd->inq_flags & SID_DMA48)
1394			softc->flags |= ADA_FLAG_CAN_DMA48;
1395	}
1396	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
1397		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
1398	if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
1399		softc->flags |= ADA_FLAG_CAN_POWERMGT;
1400	if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
1401	    (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
1402		softc->flags |= ADA_FLAG_CAN_NCQ;
1403	if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
1404	    (cgd->inq_flags & SID_DMA)) {
1405		softc->flags |= ADA_FLAG_CAN_TRIM;
1406		softc->trim_max_ranges = TRIM_MAX_RANGES;
1407		if (cgd->ident_data.max_dsm_blocks != 0) {
1408			softc->trim_max_ranges =
1409			    min(cgd->ident_data.max_dsm_blocks *
1410				ATA_DSM_BLK_RANGES, softc->trim_max_ranges);
1411		}
1412	}
1413	if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
1414		softc->flags |= ADA_FLAG_CAN_CFA;
1415
1416	adasetdeletemethod(softc);
1417
1418	periph->softc = softc;
1419
1420	/*
1421	 * See if this device has any quirks.
1422	 */
1423	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
1424			       (caddr_t)ada_quirk_table,
1425			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
1426			       sizeof(*ada_quirk_table), ata_identify_match);
1427	if (match != NULL)
1428		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
1429	else
1430		softc->quirks = ADA_Q_NONE;
1431
1432	bzero(&cpi, sizeof(cpi));
1433	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
1434	cpi.ccb_h.func_code = XPT_PATH_INQ;
1435	xpt_action((union ccb *)&cpi);
1436
1437	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
1438
1439	/*
1440	 * Register this media as a disk
1441	 */
1442	(void)cam_periph_hold(periph, PRIBIO);
1443	cam_periph_unlock(periph);
1444	snprintf(announce_buf, sizeof(announce_buf),
1445	    "kern.cam.ada.%d.quirks", periph->unit_number);
1446	quirks = softc->quirks;
1447	TUNABLE_INT_FETCH(announce_buf, &quirks);
1448	softc->quirks = quirks;
1449	softc->read_ahead = -1;
1450	snprintf(announce_buf, sizeof(announce_buf),
1451	    "kern.cam.ada.%d.read_ahead", periph->unit_number);
1452	TUNABLE_INT_FETCH(announce_buf, &softc->read_ahead);
1453	softc->write_cache = -1;
1454	snprintf(announce_buf, sizeof(announce_buf),
1455	    "kern.cam.ada.%d.write_cache", periph->unit_number);
1456	TUNABLE_INT_FETCH(announce_buf, &softc->write_cache);
1457	/* Disable queue sorting for non-rotational media by default. */
1458	if (cgd->ident_data.media_rotation_rate == ATA_RATE_NON_ROTATING) {
1459		softc->rotating = 0;
1460	} else {
1461		softc->rotating = 1;
1462	}
1463	cam_iosched_set_sort_queue(softc->cam_iosched,  softc->rotating ? -1 : 0);
1464	adagetparams(periph, cgd);
1465	softc->disk = disk_alloc();
1466	softc->disk->d_rotation_rate = cgd->ident_data.media_rotation_rate;
1467	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
1468			  periph->unit_number, softc->params.secsize,
1469			  DEVSTAT_ALL_SUPPORTED,
1470			  DEVSTAT_TYPE_DIRECT |
1471			  XPORT_DEVSTAT_TYPE(cpi.transport),
1472			  DEVSTAT_PRIORITY_DISK);
1473	softc->disk->d_open = adaopen;
1474	softc->disk->d_close = adaclose;
1475	softc->disk->d_strategy = adastrategy;
1476	softc->disk->d_getattr = adagetattr;
1477	softc->disk->d_dump = adadump;
1478	softc->disk->d_gone = adadiskgonecb;
1479	softc->disk->d_name = "ada";
1480	softc->disk->d_drv1 = periph;
1481	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
1482	if (maxio == 0)
1483		maxio = DFLTPHYS;	/* traditional default */
1484	else if (maxio > MAXPHYS)
1485		maxio = MAXPHYS;	/* for safety */
1486	if (softc->flags & ADA_FLAG_CAN_48BIT)
1487		maxio = min(maxio, 65536 * softc->params.secsize);
1488	else					/* 28bit ATA command limit */
1489		maxio = min(maxio, 256 * softc->params.secsize);
1490	softc->disk->d_maxsize = maxio;
1491	softc->disk->d_unit = periph->unit_number;
1492	softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION;
1493	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
1494		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
1495	if (softc->flags & ADA_FLAG_CAN_TRIM) {
1496		softc->disk->d_flags |= DISKFLAG_CANDELETE;
1497		softc->disk->d_delmaxsize = softc->params.secsize *
1498					    ATA_DSM_RANGE_MAX *
1499					    softc->trim_max_ranges;
1500	} else if ((softc->flags & ADA_FLAG_CAN_CFA) &&
1501	    !(softc->flags & ADA_FLAG_CAN_48BIT)) {
1502		softc->disk->d_flags |= DISKFLAG_CANDELETE;
1503		softc->disk->d_delmaxsize = 256 * softc->params.secsize;
1504	} else
1505		softc->disk->d_delmaxsize = maxio;
1506	if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
1507		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
1508		softc->unmappedio = 1;
1509	}
1510	/*
1511	 * If we can do RCVSND_FPDMA_QUEUED commands, we may be able to do
1512	 * NCQ trims, if we support trims at all. We also need support from
1513	 * the sim do do things properly. Perhaps we should look at log 13
1514	 * dword 0 bit 0 and dword 1 bit 0 are set too...
1515	 */
1516	if (cpi.hba_misc & PIM_NCQ_KLUDGE)
1517		softc->flags |= ADA_FLAG_PIM_CAN_NCQ_TRIM;
1518	if ((softc->quirks & ADA_Q_NCQ_TRIM_BROKEN) == 0 &&
1519	    (softc->flags & ADA_FLAG_PIM_CAN_NCQ_TRIM) != 0 &&
1520	    (cgd->ident_data.satacapabilities2 & ATA_SUPPORT_RCVSND_FPDMA_QUEUED) != 0 &&
1521	    (softc->flags & ADA_FLAG_CAN_TRIM) != 0)
1522		softc->flags |= ADA_FLAG_CAN_NCQ_TRIM;
1523	strlcpy(softc->disk->d_descr, cgd->ident_data.model,
1524	    MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
1525	strlcpy(softc->disk->d_ident, cgd->ident_data.serial,
1526	    MIN(sizeof(softc->disk->d_ident), sizeof(cgd->ident_data.serial)));
1527	softc->disk->d_hba_vendor = cpi.hba_vendor;
1528	softc->disk->d_hba_device = cpi.hba_device;
1529	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
1530	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
1531
1532	softc->disk->d_sectorsize = softc->params.secsize;
1533	softc->disk->d_mediasize = (off_t)softc->params.sectors *
1534	    softc->params.secsize;
1535	if (ata_physical_sector_size(&cgd->ident_data) !=
1536	    softc->params.secsize) {
1537		softc->disk->d_stripesize =
1538		    ata_physical_sector_size(&cgd->ident_data);
1539		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
1540		    ata_logical_sector_offset(&cgd->ident_data)) %
1541		    softc->disk->d_stripesize;
1542	} else if (softc->quirks & ADA_Q_4K) {
1543		softc->disk->d_stripesize = 4096;
1544		softc->disk->d_stripeoffset = 0;
1545	}
1546	softc->disk->d_fwsectors = softc->params.secs_per_track;
1547	softc->disk->d_fwheads = softc->params.heads;
1548	ata_disk_firmware_geom_adjust(softc->disk);
1549	adasetdeletemethod(softc);
1550
1551	/*
1552	 * Acquire a reference to the periph before we register with GEOM.
1553	 * We'll release this reference once GEOM calls us back (via
1554	 * adadiskgonecb()) telling us that our provider has been freed.
1555	 */
1556	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1557		xpt_print(periph->path, "%s: lost periph during "
1558			  "registration!\n", __func__);
1559		cam_periph_lock(periph);
1560		return (CAM_REQ_CMP_ERR);
1561	}
1562	disk_create(softc->disk, DISK_VERSION);
1563	cam_periph_lock(periph);
1564	cam_periph_unhold(periph);
1565
1566	dp = &softc->params;
1567	snprintf(announce_buf, sizeof(announce_buf),
1568	    "%juMB (%ju %u byte sectors)",
1569	    ((uintmax_t)dp->secsize * dp->sectors) / (1024 * 1024),
1570	    (uintmax_t)dp->sectors, dp->secsize);
1571	xpt_announce_periph(periph, announce_buf);
1572	xpt_announce_quirks(periph, softc->quirks, ADA_Q_BIT_STRING);
1573
1574	/*
1575	 * Create our sysctl variables, now that we know
1576	 * we have successfully attached.
1577	 */
1578	if (cam_periph_acquire(periph) == CAM_REQ_CMP)
1579		taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
1580
1581	/*
1582	 * Add async callbacks for bus reset and
1583	 * bus device reset calls.  I don't bother
1584	 * checking if this fails as, in most cases,
1585	 * the system will function just fine without
1586	 * them and the only alternative would be to
1587	 * not attach the device on failure.
1588	 */
1589	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
1590	    AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED,
1591	    adaasync, periph, periph->path);
1592
1593	/*
1594	 * Schedule a periodic event to occasionally send an
1595	 * ordered tag to a device.
1596	 */
1597	callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
1598	callout_reset(&softc->sendordered_c,
1599	    (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
1600	    adasendorderedtag, softc);
1601
1602	if (ADA_RA >= 0 &&
1603	    cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) {
1604		softc->state = ADA_STATE_RAHEAD;
1605	} else if (ADA_WC >= 0 &&
1606	    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
1607		softc->state = ADA_STATE_WCACHE;
1608	} else {
1609		softc->state = ADA_STATE_NORMAL;
1610		return(CAM_REQ_CMP);
1611	}
1612	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
1613		softc->state = ADA_STATE_NORMAL;
1614	else
1615		xpt_schedule(periph, CAM_PRIORITY_DEV);
1616	return(CAM_REQ_CMP);
1617}
1618
1619static int
1620ada_dsmtrim_req_create(struct ada_softc *softc, struct bio *bp, struct trim_request *req)
1621{
1622	uint64_t lastlba = (uint64_t)-1;
1623	int c, lastcount = 0, off, ranges = 0;
1624
1625	bzero(req, sizeof(*req));
1626	TAILQ_INIT(&req->bps);
1627	do {
1628		uint64_t lba = bp->bio_pblkno;
1629		int count = bp->bio_bcount / softc->params.secsize;
1630
1631		/* Try to extend the previous range. */
1632		if (lba == lastlba) {
1633			c = min(count, ATA_DSM_RANGE_MAX - lastcount);
1634			lastcount += c;
1635			off = (ranges - 1) * ATA_DSM_RANGE_SIZE;
1636			req->data[off + 6] = lastcount & 0xff;
1637			req->data[off + 7] =
1638				(lastcount >> 8) & 0xff;
1639			count -= c;
1640			lba += c;
1641		}
1642
1643		while (count > 0) {
1644			c = min(count, ATA_DSM_RANGE_MAX);
1645			off = ranges * ATA_DSM_RANGE_SIZE;
1646			req->data[off + 0] = lba & 0xff;
1647			req->data[off + 1] = (lba >> 8) & 0xff;
1648			req->data[off + 2] = (lba >> 16) & 0xff;
1649			req->data[off + 3] = (lba >> 24) & 0xff;
1650			req->data[off + 4] = (lba >> 32) & 0xff;
1651			req->data[off + 5] = (lba >> 40) & 0xff;
1652			req->data[off + 6] = c & 0xff;
1653			req->data[off + 7] = (c >> 8) & 0xff;
1654			lba += c;
1655			count -= c;
1656			lastcount = c;
1657			ranges++;
1658			/*
1659			 * Its the caller's responsibility to ensure the
1660			 * request will fit so we don't need to check for
1661			 * overrun here
1662			 */
1663		}
1664		lastlba = lba;
1665		TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue);
1666
1667		bp = cam_iosched_next_trim(softc->cam_iosched);
1668		if (bp == NULL)
1669			break;
1670		if (bp->bio_bcount / softc->params.secsize >
1671		    (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
1672			cam_iosched_put_back_trim(softc->cam_iosched, bp);
1673			break;
1674		}
1675	} while (1);
1676
1677	return (ranges);
1678}
1679
1680static void
1681ada_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
1682{
1683	struct trim_request *req = &softc->trim_req;
1684	int ranges;
1685
1686	ranges = ada_dsmtrim_req_create(softc, bp, req);
1687	cam_fill_ataio(ataio,
1688	    ada_retry_count,
1689	    adadone,
1690	    CAM_DIR_OUT,
1691	    0,
1692	    req->data,
1693	    ((ranges + ATA_DSM_BLK_RANGES - 1) /
1694	    ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
1695	    ada_default_timeout * 1000);
1696	ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
1697	    ATA_DSM_TRIM, 0, (ranges + ATA_DSM_BLK_RANGES -
1698	    1) / ATA_DSM_BLK_RANGES);
1699}
1700
1701static void
1702ada_ncq_dsmtrim(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
1703{
1704	struct trim_request *req = &softc->trim_req;
1705	int ranges;
1706
1707	ranges = ada_dsmtrim_req_create(softc, bp, req);
1708	cam_fill_ataio(ataio,
1709	    ada_retry_count,
1710	    adadone,
1711	    CAM_DIR_OUT,
1712	    0,
1713	    req->data,
1714	    ((ranges + ATA_DSM_BLK_RANGES - 1) /
1715	    ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
1716	    ada_default_timeout * 1000);
1717	ata_ncq_cmd(ataio,
1718	    ATA_SEND_FPDMA_QUEUED,
1719	    0,
1720	    (ranges + ATA_DSM_BLK_RANGES - 1) / ATA_DSM_BLK_RANGES);
1721	ataio->cmd.sector_count_exp = ATA_SFPDMA_DSM;
1722	ataio->cmd.flags |= CAM_ATAIO_AUX_HACK;
1723}
1724
1725static void
1726ada_cfaerase(struct ada_softc *softc, struct bio *bp, struct ccb_ataio *ataio)
1727{
1728	struct trim_request *req = &softc->trim_req;
1729	uint64_t lba = bp->bio_pblkno;
1730	uint16_t count = bp->bio_bcount / softc->params.secsize;
1731
1732	bzero(req, sizeof(*req));
1733	TAILQ_INIT(&req->bps);
1734	TAILQ_INSERT_TAIL(&req->bps, bp, bio_queue);
1735
1736	cam_fill_ataio(ataio,
1737	    ada_retry_count,
1738	    adadone,
1739	    CAM_DIR_NONE,
1740	    0,
1741	    NULL,
1742	    0,
1743	    ada_default_timeout*1000);
1744
1745	if (count >= 256)
1746		count = 0;
1747	ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
1748}
1749
1750static void
1751adastart(struct cam_periph *periph, union ccb *start_ccb)
1752{
1753	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1754	struct ccb_ataio *ataio = &start_ccb->ataio;
1755
1756	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastart\n"));
1757
1758	switch (softc->state) {
1759	case ADA_STATE_NORMAL:
1760	{
1761		struct bio *bp;
1762		u_int8_t tag_code;
1763
1764		bp = cam_iosched_next_bio(softc->cam_iosched);
1765		if (bp == NULL) {
1766			xpt_release_ccb(start_ccb);
1767			break;
1768		}
1769
1770		if ((bp->bio_flags & BIO_ORDERED) != 0 ||
1771		    (bp->bio_cmd != BIO_DELETE && (softc->flags & ADA_FLAG_NEED_OTAG) != 0)) {
1772			softc->flags &= ~ADA_FLAG_NEED_OTAG;
1773			softc->flags |= ADA_FLAG_WAS_OTAG;
1774			tag_code = 0;
1775		} else {
1776			tag_code = 1;
1777		}
1778		switch (bp->bio_cmd) {
1779		case BIO_WRITE:
1780		case BIO_READ:
1781		{
1782			uint64_t lba = bp->bio_pblkno;
1783			uint16_t count = bp->bio_bcount / softc->params.secsize;
1784			void *data_ptr;
1785			int rw_op;
1786
1787			if (bp->bio_cmd == BIO_WRITE) {
1788				softc->flags |= ADA_FLAG_DIRTY;
1789				rw_op = CAM_DIR_OUT;
1790			} else {
1791				rw_op = CAM_DIR_IN;
1792			}
1793
1794			data_ptr = bp->bio_data;
1795			if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
1796				rw_op |= CAM_DATA_BIO;
1797				data_ptr = bp;
1798			}
1799
1800#ifdef ADA_TEST_FAILURE
1801			int fail = 0;
1802
1803			/*
1804			 * Support the failure ioctls.  If the command is a
1805			 * read, and there are pending forced read errors, or
1806			 * if a write and pending write errors, then fail this
1807			 * operation with EIO.  This is useful for testing
1808			 * purposes.  Also, support having every Nth read fail.
1809			 *
1810			 * This is a rather blunt tool.
1811			 */
1812			if (bp->bio_cmd == BIO_READ) {
1813				if (softc->force_read_error) {
1814					softc->force_read_error--;
1815					fail = 1;
1816				}
1817				if (softc->periodic_read_error > 0) {
1818					if (++softc->periodic_read_count >=
1819					    softc->periodic_read_error) {
1820						softc->periodic_read_count = 0;
1821						fail = 1;
1822					}
1823				}
1824			} else {
1825				if (softc->force_write_error) {
1826					softc->force_write_error--;
1827					fail = 1;
1828				}
1829			}
1830			if (fail) {
1831				biofinish(bp, NULL, EIO);
1832				xpt_release_ccb(start_ccb);
1833				adaschedule(periph);
1834				return;
1835			}
1836#endif
1837			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
1838			    round_page(bp->bio_bcount + bp->bio_ma_offset) /
1839			    PAGE_SIZE == bp->bio_ma_n,
1840			    ("Short bio %p", bp));
1841			cam_fill_ataio(ataio,
1842			    ada_retry_count,
1843			    adadone,
1844			    rw_op,
1845			    tag_code,
1846			    data_ptr,
1847			    bp->bio_bcount,
1848			    ada_default_timeout*1000);
1849
1850			if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
1851				if (bp->bio_cmd == BIO_READ) {
1852					ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
1853					    lba, count);
1854				} else {
1855					ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
1856					    lba, count);
1857				}
1858			} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
1859			    (lba + count >= ATA_MAX_28BIT_LBA ||
1860			    count > 256)) {
1861				if (softc->flags & ADA_FLAG_CAN_DMA48) {
1862					if (bp->bio_cmd == BIO_READ) {
1863						ata_48bit_cmd(ataio, ATA_READ_DMA48,
1864						    0, lba, count);
1865					} else {
1866						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
1867						    0, lba, count);
1868					}
1869				} else {
1870					if (bp->bio_cmd == BIO_READ) {
1871						ata_48bit_cmd(ataio, ATA_READ_MUL48,
1872						    0, lba, count);
1873					} else {
1874						ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
1875						    0, lba, count);
1876					}
1877				}
1878			} else {
1879				if (count == 256)
1880					count = 0;
1881				if (softc->flags & ADA_FLAG_CAN_DMA) {
1882					if (bp->bio_cmd == BIO_READ) {
1883						ata_28bit_cmd(ataio, ATA_READ_DMA,
1884						    0, lba, count);
1885					} else {
1886						ata_28bit_cmd(ataio, ATA_WRITE_DMA,
1887						    0, lba, count);
1888					}
1889				} else {
1890					if (bp->bio_cmd == BIO_READ) {
1891						ata_28bit_cmd(ataio, ATA_READ_MUL,
1892						    0, lba, count);
1893					} else {
1894						ata_28bit_cmd(ataio, ATA_WRITE_MUL,
1895						    0, lba, count);
1896					}
1897				}
1898			}
1899			break;
1900		}
1901		case BIO_DELETE:
1902			switch (softc->delete_method) {
1903			case ADA_DELETE_NCQ_DSM_TRIM:
1904				ada_ncq_dsmtrim(softc, bp, ataio);
1905				break;
1906			case ADA_DELETE_DSM_TRIM:
1907				ada_dsmtrim(softc, bp, ataio);
1908				break;
1909			case ADA_DELETE_CFA_ERASE:
1910				ada_cfaerase(softc, bp, ataio);
1911				break;
1912			default:
1913				biofinish(bp, NULL, EOPNOTSUPP);
1914				xpt_release_ccb(start_ccb);
1915				adaschedule(periph);
1916				return;
1917			}
1918			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
1919			start_ccb->ccb_h.flags |= CAM_UNLOCKED;
1920			cam_iosched_submit_trim(softc->cam_iosched);
1921			goto out;
1922		case BIO_FLUSH:
1923			cam_fill_ataio(ataio,
1924			    1,
1925			    adadone,
1926			    CAM_DIR_NONE,
1927			    0,
1928			    NULL,
1929			    0,
1930			    ada_default_timeout*1000);
1931
1932			if (softc->flags & ADA_FLAG_CAN_48BIT)
1933				ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1934			else
1935				ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
1936			break;
1937		}
1938		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
1939		start_ccb->ccb_h.flags |= CAM_UNLOCKED;
1940out:
1941		start_ccb->ccb_h.ccb_bp = bp;
1942		softc->outstanding_cmds++;
1943		softc->refcount++;
1944		cam_periph_unlock(periph);
1945		xpt_action(start_ccb);
1946		cam_periph_lock(periph);
1947		softc->refcount--;
1948
1949		/* May have more work to do, so ensure we stay scheduled */
1950		adaschedule(periph);
1951		break;
1952	}
1953	case ADA_STATE_RAHEAD:
1954	case ADA_STATE_WCACHE:
1955	{
1956		cam_fill_ataio(ataio,
1957		    1,
1958		    adadone,
1959		    CAM_DIR_NONE,
1960		    0,
1961		    NULL,
1962		    0,
1963		    ada_default_timeout*1000);
1964
1965		if (softc->state == ADA_STATE_RAHEAD) {
1966			ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_RA ?
1967			    ATA_SF_ENAB_RCACHE : ATA_SF_DIS_RCACHE, 0, 0);
1968			start_ccb->ccb_h.ccb_state = ADA_CCB_RAHEAD;
1969		} else {
1970			ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_WC ?
1971			    ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0);
1972			start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE;
1973		}
1974		start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1975		xpt_action(start_ccb);
1976		break;
1977	}
1978	}
1979}
1980
1981static void
1982adadone(struct cam_periph *periph, union ccb *done_ccb)
1983{
1984	struct ada_softc *softc;
1985	struct ccb_ataio *ataio;
1986	struct ccb_getdev *cgd;
1987	struct cam_path *path;
1988	int state;
1989
1990	softc = (struct ada_softc *)periph->softc;
1991	ataio = &done_ccb->ataio;
1992	path = done_ccb->ccb_h.path;
1993
1994	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n"));
1995
1996	state = ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK;
1997	switch (state) {
1998	case ADA_CCB_BUFFER_IO:
1999	case ADA_CCB_TRIM:
2000	{
2001		struct bio *bp;
2002		int error;
2003
2004		cam_periph_lock(periph);
2005		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
2006		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2007			error = adaerror(done_ccb, 0, 0);
2008			if (error == ERESTART) {
2009				/* A retry was scheduled, so just return. */
2010				cam_periph_unlock(periph);
2011				return;
2012			}
2013			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2014				cam_release_devq(path,
2015						 /*relsim_flags*/0,
2016						 /*reduction*/0,
2017						 /*timeout*/0,
2018						 /*getcount_only*/0);
2019			/*
2020			 * If we get an error on an NCQ DSM TRIM, fall back
2021			 * to a non-NCQ DSM TRIM forever. Please note that if
2022			 * CAN_NCQ_TRIM is set, CAN_TRIM is necessarily set too.
2023			 * However, for this one trim, we treat it as advisory
2024			 * and return success up the stack.
2025			 */
2026			if (state == ADA_CCB_TRIM &&
2027			    error != 0 &&
2028			    (softc->flags & ADA_FLAG_CAN_NCQ_TRIM) != 0) {
2029				softc->flags &= ~ADA_FLAG_CAN_NCQ_TRIM;
2030				error = 0;
2031				adasetdeletemethod(softc);
2032			}
2033		} else {
2034			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
2035				panic("REQ_CMP with QFRZN");
2036			error = 0;
2037		}
2038		bp->bio_error = error;
2039		if (error != 0) {
2040			bp->bio_resid = bp->bio_bcount;
2041			bp->bio_flags |= BIO_ERROR;
2042		} else {
2043			if (state == ADA_CCB_TRIM)
2044				bp->bio_resid = 0;
2045			else
2046				bp->bio_resid = ataio->resid;
2047			if (bp->bio_resid > 0)
2048				bp->bio_flags |= BIO_ERROR;
2049		}
2050		softc->outstanding_cmds--;
2051		if (softc->outstanding_cmds == 0)
2052			softc->flags |= ADA_FLAG_WAS_OTAG;
2053
2054		cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
2055		xpt_release_ccb(done_ccb);
2056		if (state == ADA_CCB_TRIM) {
2057			TAILQ_HEAD(, bio) queue;
2058			struct bio *bp1;
2059
2060			TAILQ_INIT(&queue);
2061			TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue);
2062			/*
2063			 * Normally, the xpt_release_ccb() above would make sure
2064			 * that when we have more work to do, that work would
2065			 * get kicked off. However, we specifically keep
2066			 * trim_running set to 0 before the call above to allow
2067			 * other I/O to progress when many BIO_DELETE requests
2068			 * are pushed down. We set trim_running to 0 and call
2069			 * daschedule again so that we don't stall if there are
2070			 * no other I/Os pending apart from BIO_DELETEs.
2071			 */
2072			cam_iosched_trim_done(softc->cam_iosched);
2073			adaschedule(periph);
2074			cam_periph_unlock(periph);
2075			while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
2076				TAILQ_REMOVE(&queue, bp1, bio_queue);
2077				bp1->bio_error = error;
2078				if (error != 0) {
2079					bp1->bio_flags |= BIO_ERROR;
2080					bp1->bio_resid = bp1->bio_bcount;
2081				} else
2082					bp1->bio_resid = 0;
2083				biodone(bp1);
2084			}
2085		} else {
2086			adaschedule(periph);
2087			cam_periph_unlock(periph);
2088			biodone(bp);
2089		}
2090		return;
2091	}
2092	case ADA_CCB_RAHEAD:
2093	{
2094		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2095			if (adaerror(done_ccb, 0, 0) == ERESTART) {
2096out:
2097				/* Drop freeze taken due to CAM_DEV_QFREEZE */
2098				cam_release_devq(path, 0, 0, 0, FALSE);
2099				return;
2100			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
2101				cam_release_devq(path,
2102				    /*relsim_flags*/0,
2103				    /*reduction*/0,
2104				    /*timeout*/0,
2105				    /*getcount_only*/0);
2106			}
2107		}
2108
2109		/*
2110		 * Since our peripheral may be invalidated by an error
2111		 * above or an external event, we must release our CCB
2112		 * before releasing the reference on the peripheral.
2113		 * The peripheral will only go away once the last reference
2114		 * is removed, and we need it around for the CCB release
2115		 * operation.
2116		 */
2117		cgd = (struct ccb_getdev *)done_ccb;
2118		xpt_setup_ccb(&cgd->ccb_h, path, CAM_PRIORITY_NORMAL);
2119		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
2120		xpt_action((union ccb *)cgd);
2121		if (ADA_WC >= 0 &&
2122		    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
2123			softc->state = ADA_STATE_WCACHE;
2124			xpt_release_ccb(done_ccb);
2125			xpt_schedule(periph, CAM_PRIORITY_DEV);
2126			goto out;
2127		}
2128		softc->state = ADA_STATE_NORMAL;
2129		xpt_release_ccb(done_ccb);
2130		/* Drop freeze taken due to CAM_DEV_QFREEZE */
2131		cam_release_devq(path, 0, 0, 0, FALSE);
2132		adaschedule(periph);
2133		cam_periph_release_locked(periph);
2134		return;
2135	}
2136	case ADA_CCB_WCACHE:
2137	{
2138		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2139			if (adaerror(done_ccb, 0, 0) == ERESTART) {
2140				goto out;
2141			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
2142				cam_release_devq(path,
2143				    /*relsim_flags*/0,
2144				    /*reduction*/0,
2145				    /*timeout*/0,
2146				    /*getcount_only*/0);
2147			}
2148		}
2149
2150		softc->state = ADA_STATE_NORMAL;
2151		/*
2152		 * Since our peripheral may be invalidated by an error
2153		 * above or an external event, we must release our CCB
2154		 * before releasing the reference on the peripheral.
2155		 * The peripheral will only go away once the last reference
2156		 * is removed, and we need it around for the CCB release
2157		 * operation.
2158		 */
2159		xpt_release_ccb(done_ccb);
2160		/* Drop freeze taken due to CAM_DEV_QFREEZE */
2161		cam_release_devq(path, 0, 0, 0, FALSE);
2162		adaschedule(periph);
2163		cam_periph_release_locked(periph);
2164		return;
2165	}
2166	case ADA_CCB_DUMP:
2167		/* No-op.  We're polling */
2168		return;
2169	default:
2170		break;
2171	}
2172	xpt_release_ccb(done_ccb);
2173}
2174
2175static int
2176adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
2177{
2178	struct ada_softc *softc;
2179	struct cam_periph *periph;
2180
2181	periph = xpt_path_periph(ccb->ccb_h.path);
2182	softc = (struct ada_softc *)periph->softc;
2183
2184	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
2185	case CAM_CMD_TIMEOUT:
2186#ifdef CAM_IO_STATS
2187		softc->timeouts++;
2188#endif
2189		break;
2190	case CAM_REQ_ABORTED:
2191	case CAM_REQ_CMP_ERR:
2192	case CAM_REQ_TERMIO:
2193	case CAM_UNREC_HBA_ERROR:
2194	case CAM_DATA_RUN_ERR:
2195	case CAM_ATA_STATUS_ERROR:
2196#ifdef CAM_IO_STATS
2197		softc->errors++;
2198#endif
2199		break;
2200	default:
2201		break;
2202	}
2203
2204	return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
2205}
2206
2207static void
2208adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
2209{
2210	struct ada_softc *softc = (struct ada_softc *)periph->softc;
2211	struct disk_params *dp = &softc->params;
2212	u_int64_t lbasize48;
2213	u_int32_t lbasize;
2214
2215	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
2216	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
2217		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
2218		dp->heads = cgd->ident_data.current_heads;
2219		dp->secs_per_track = cgd->ident_data.current_sectors;
2220		dp->cylinders = cgd->ident_data.cylinders;
2221		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
2222			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
2223	} else {
2224		dp->heads = cgd->ident_data.heads;
2225		dp->secs_per_track = cgd->ident_data.sectors;
2226		dp->cylinders = cgd->ident_data.cylinders;
2227		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
2228	}
2229	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
2230		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
2231
2232	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
2233	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
2234		dp->sectors = lbasize;
2235
2236	/* use the 48bit LBA size if valid */
2237	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
2238		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
2239		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
2240		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
2241	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
2242	    lbasize48 > ATA_MAX_28BIT_LBA)
2243		dp->sectors = lbasize48;
2244}
2245
2246static void
2247adasendorderedtag(void *arg)
2248{
2249	struct ada_softc *softc = arg;
2250
2251	if (ada_send_ordered) {
2252		if (softc->outstanding_cmds > 0) {
2253			if ((softc->flags & ADA_FLAG_WAS_OTAG) == 0)
2254				softc->flags |= ADA_FLAG_NEED_OTAG;
2255			softc->flags &= ~ADA_FLAG_WAS_OTAG;
2256		}
2257	}
2258	/* Queue us up again */
2259	callout_reset(&softc->sendordered_c,
2260	    (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
2261	    adasendorderedtag, softc);
2262}
2263
2264/*
2265 * Step through all ADA peripheral drivers, and if the device is still open,
2266 * sync the disk cache to physical media.
2267 */
2268static void
2269adaflush(void)
2270{
2271	struct cam_periph *periph;
2272	struct ada_softc *softc;
2273	union ccb *ccb;
2274	int error;
2275
2276	CAM_PERIPH_FOREACH(periph, &adadriver) {
2277		softc = (struct ada_softc *)periph->softc;
2278		if (SCHEDULER_STOPPED()) {
2279			/* If we paniced with the lock held, do not recurse. */
2280			if (!cam_periph_owned(periph) &&
2281			    (softc->flags & ADA_FLAG_OPEN)) {
2282				adadump(softc->disk, NULL, 0, 0, 0);
2283			}
2284			continue;
2285		}
2286		cam_periph_lock(periph);
2287		/*
2288		 * We only sync the cache if the drive is still open, and
2289		 * if the drive is capable of it..
2290		 */
2291		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
2292		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
2293			cam_periph_unlock(periph);
2294			continue;
2295		}
2296
2297		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
2298		cam_fill_ataio(&ccb->ataio,
2299				    0,
2300				    adadone,
2301				    CAM_DIR_NONE,
2302				    0,
2303				    NULL,
2304				    0,
2305				    ada_default_timeout*1000);
2306		if (softc->flags & ADA_FLAG_CAN_48BIT)
2307			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
2308		else
2309			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
2310
2311		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
2312		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
2313		    softc->disk->d_devstat);
2314		if (error != 0)
2315			xpt_print(periph->path, "Synchronize cache failed\n");
2316		xpt_release_ccb(ccb);
2317		cam_periph_unlock(periph);
2318	}
2319}
2320
2321static void
2322adaspindown(uint8_t cmd, int flags)
2323{
2324	struct cam_periph *periph;
2325	struct ada_softc *softc;
2326	union ccb *ccb;
2327	int error;
2328
2329	CAM_PERIPH_FOREACH(periph, &adadriver) {
2330		/* If we paniced with lock held - not recurse here. */
2331		if (cam_periph_owned(periph))
2332			continue;
2333		cam_periph_lock(periph);
2334		softc = (struct ada_softc *)periph->softc;
2335		/*
2336		 * We only spin-down the drive if it is capable of it..
2337		 */
2338		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
2339			cam_periph_unlock(periph);
2340			continue;
2341		}
2342
2343		if (bootverbose)
2344			xpt_print(periph->path, "spin-down\n");
2345
2346		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
2347		cam_fill_ataio(&ccb->ataio,
2348				    0,
2349				    adadone,
2350				    CAM_DIR_NONE | flags,
2351				    0,
2352				    NULL,
2353				    0,
2354				    ada_default_timeout*1000);
2355		ata_28bit_cmd(&ccb->ataio, cmd, 0, 0, 0);
2356
2357		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
2358		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
2359		    softc->disk->d_devstat);
2360		if (error != 0)
2361			xpt_print(periph->path, "Spin-down disk failed\n");
2362		xpt_release_ccb(ccb);
2363		cam_periph_unlock(periph);
2364	}
2365}
2366
2367static void
2368adashutdown(void *arg, int howto)
2369{
2370
2371	adaflush();
2372	if (ada_spindown_shutdown != 0 &&
2373	    (howto & (RB_HALT | RB_POWEROFF)) != 0)
2374		adaspindown(ATA_STANDBY_IMMEDIATE, 0);
2375}
2376
2377static void
2378adasuspend(void *arg)
2379{
2380
2381	adaflush();
2382	if (ada_spindown_suspend != 0)
2383		adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE);
2384}
2385
2386static void
2387adaresume(void *arg)
2388{
2389	struct cam_periph *periph;
2390	struct ada_softc *softc;
2391
2392	if (ada_spindown_suspend == 0)
2393		return;
2394
2395	CAM_PERIPH_FOREACH(periph, &adadriver) {
2396		cam_periph_lock(periph);
2397		softc = (struct ada_softc *)periph->softc;
2398		/*
2399		 * We only spin-down the drive if it is capable of it..
2400		 */
2401		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
2402			cam_periph_unlock(periph);
2403			continue;
2404		}
2405
2406		if (bootverbose)
2407			xpt_print(periph->path, "resume\n");
2408
2409		/*
2410		 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on
2411		 * sleep request.
2412		 */
2413		cam_release_devq(periph->path,
2414			 /*relsim_flags*/0,
2415			 /*openings*/0,
2416			 /*timeout*/0,
2417			 /*getcount_only*/0);
2418
2419		cam_periph_unlock(periph);
2420	}
2421}
2422
2423#endif /* _KERNEL */
2424