scsi_da.c revision 350804
1/*-
2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
3 *
4 * Copyright (c) 1997 Justin T. Gibbs.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions, and the following disclaimer,
12 *    without modification, immediately at the beginning of the file.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD: stable/11/sys/cam/scsi/scsi_da.c 350804 2019-08-08 22:16:19Z mav $");
31
32#include <sys/param.h>
33
34#ifdef _KERNEL
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/taskqueue.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/conf.h>
43#include <sys/devicestat.h>
44#include <sys/eventhandler.h>
45#include <sys/malloc.h>
46#include <sys/cons.h>
47#include <sys/endian.h>
48#include <sys/proc.h>
49#include <sys/sbuf.h>
50#include <geom/geom.h>
51#include <geom/geom_disk.h>
52#endif /* _KERNEL */
53
54#ifndef _KERNEL
55#include <stdio.h>
56#include <string.h>
57#endif /* _KERNEL */
58
59#include <cam/cam.h>
60#include <cam/cam_ccb.h>
61#include <cam/cam_periph.h>
62#include <cam/cam_xpt_periph.h>
63#include <cam/cam_sim.h>
64#include <cam/cam_iosched.h>
65
66#include <cam/scsi/scsi_message.h>
67#include <cam/scsi/scsi_da.h>
68
69#ifdef _KERNEL
70/*
71 * Note that there are probe ordering dependencies here.  The order isn't
72 * controlled by this enumeration, but by explicit state transitions in
73 * dastart() and dadone().  Here are some of the dependencies:
74 *
75 * 1. RC should come first, before RC16, unless there is evidence that RC16
76 *    is supported.
77 * 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
78 * 3. The ATA probes should go in this order:
79 *    ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
80 */
81typedef enum {
82	DA_STATE_PROBE_WP,
83	DA_STATE_PROBE_RC,
84	DA_STATE_PROBE_RC16,
85	DA_STATE_PROBE_LBP,
86	DA_STATE_PROBE_BLK_LIMITS,
87	DA_STATE_PROBE_BDC,
88	DA_STATE_PROBE_ATA,
89	DA_STATE_PROBE_ATA_LOGDIR,
90	DA_STATE_PROBE_ATA_IDDIR,
91	DA_STATE_PROBE_ATA_SUP,
92	DA_STATE_PROBE_ATA_ZONE,
93	DA_STATE_PROBE_ZONE,
94	DA_STATE_NORMAL
95} da_state;
96
97typedef enum {
98	DA_FLAG_PACK_INVALID	= 0x000001,
99	DA_FLAG_NEW_PACK	= 0x000002,
100	DA_FLAG_PACK_LOCKED	= 0x000004,
101	DA_FLAG_PACK_REMOVABLE	= 0x000008,
102	DA_FLAG_NEED_OTAG	= 0x000020,
103	DA_FLAG_WAS_OTAG	= 0x000040,
104	DA_FLAG_RETRY_UA	= 0x000080,
105	DA_FLAG_OPEN		= 0x000100,
106	DA_FLAG_SCTX_INIT	= 0x000200,
107	DA_FLAG_CAN_RC16	= 0x000400,
108	DA_FLAG_PROBED		= 0x000800,
109	DA_FLAG_DIRTY		= 0x001000,
110	DA_FLAG_ANNOUNCED	= 0x002000,
111	DA_FLAG_CAN_ATA_DMA	= 0x004000,
112	DA_FLAG_CAN_ATA_LOG	= 0x008000,
113	DA_FLAG_CAN_ATA_IDLOG	= 0x010000,
114	DA_FLAG_CAN_ATA_SUPCAP	= 0x020000,
115	DA_FLAG_CAN_ATA_ZONE	= 0x040000
116} da_flags;
117
118typedef enum {
119	DA_Q_NONE		= 0x00,
120	DA_Q_NO_SYNC_CACHE	= 0x01,
121	DA_Q_NO_6_BYTE		= 0x02,
122	DA_Q_NO_PREVENT		= 0x04,
123	DA_Q_4K			= 0x08,
124	DA_Q_NO_RC16		= 0x10,
125	DA_Q_NO_UNMAP		= 0x20,
126	DA_Q_RETRY_BUSY		= 0x40,
127	DA_Q_SMR_DM		= 0x80,
128	DA_Q_STRICT_UNMAP	= 0x100,
129	DA_Q_128KB		= 0x200
130} da_quirks;
131
132#define DA_Q_BIT_STRING		\
133	"\020"			\
134	"\001NO_SYNC_CACHE"	\
135	"\002NO_6_BYTE"		\
136	"\003NO_PREVENT"	\
137	"\0044K"		\
138	"\005NO_RC16"		\
139	"\006NO_UNMAP"		\
140	"\007RETRY_BUSY"	\
141	"\010SMR_DM"		\
142	"\011STRICT_UNMAP"	\
143	"\012128KB"
144
145typedef enum {
146	DA_CCB_PROBE_RC		= 0x01,
147	DA_CCB_PROBE_RC16	= 0x02,
148	DA_CCB_PROBE_LBP	= 0x03,
149	DA_CCB_PROBE_BLK_LIMITS	= 0x04,
150	DA_CCB_PROBE_BDC	= 0x05,
151	DA_CCB_PROBE_ATA	= 0x06,
152	DA_CCB_BUFFER_IO	= 0x07,
153	DA_CCB_DUMP		= 0x0A,
154	DA_CCB_DELETE		= 0x0B,
155 	DA_CCB_TUR		= 0x0C,
156	DA_CCB_PROBE_ZONE	= 0x0D,
157	DA_CCB_PROBE_ATA_LOGDIR	= 0x0E,
158	DA_CCB_PROBE_ATA_IDDIR	= 0x0F,
159	DA_CCB_PROBE_ATA_SUP	= 0x10,
160	DA_CCB_PROBE_ATA_ZONE	= 0x11,
161	DA_CCB_PROBE_WP		= 0x12,
162	DA_CCB_TYPE_MASK	= 0x1F,
163	DA_CCB_RETRY_UA		= 0x20
164} da_ccb_state;
165
166/*
167 * Order here is important for method choice
168 *
169 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to
170 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes
171 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql
172 * import taking 5mins.
173 *
174 */
175typedef enum {
176	DA_DELETE_NONE,
177	DA_DELETE_DISABLE,
178	DA_DELETE_ATA_TRIM,
179	DA_DELETE_UNMAP,
180	DA_DELETE_WS16,
181	DA_DELETE_WS10,
182	DA_DELETE_ZERO,
183	DA_DELETE_MIN = DA_DELETE_ATA_TRIM,
184	DA_DELETE_MAX = DA_DELETE_ZERO
185} da_delete_methods;
186
187/*
188 * For SCSI, host managed drives show up as a separate device type.  For
189 * ATA, host managed drives also have a different device signature.
190 * XXX KDM figure out the ATA host managed signature.
191 */
192typedef enum {
193	DA_ZONE_NONE		= 0x00,
194	DA_ZONE_DRIVE_MANAGED	= 0x01,
195	DA_ZONE_HOST_AWARE	= 0x02,
196	DA_ZONE_HOST_MANAGED	= 0x03
197} da_zone_mode;
198
199/*
200 * We distinguish between these interface cases in addition to the drive type:
201 * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
202 * o ATA drive behind a SCSI translation layer that does not know about
203 *   ZBC/ZAC, and so needs to be managed via ATA passthrough.  In this
204 *   case, we would need to share the ATA code with the ada(4) driver.
205 * o SCSI drive.
206 */
207typedef enum {
208	DA_ZONE_IF_SCSI,
209	DA_ZONE_IF_ATA_PASS,
210	DA_ZONE_IF_ATA_SAT,
211} da_zone_interface;
212
213typedef enum {
214	DA_ZONE_FLAG_RZ_SUP		= 0x0001,
215	DA_ZONE_FLAG_OPEN_SUP		= 0x0002,
216	DA_ZONE_FLAG_CLOSE_SUP		= 0x0004,
217	DA_ZONE_FLAG_FINISH_SUP		= 0x0008,
218	DA_ZONE_FLAG_RWP_SUP		= 0x0010,
219	DA_ZONE_FLAG_SUP_MASK		= (DA_ZONE_FLAG_RZ_SUP |
220					   DA_ZONE_FLAG_OPEN_SUP |
221					   DA_ZONE_FLAG_CLOSE_SUP |
222					   DA_ZONE_FLAG_FINISH_SUP |
223					   DA_ZONE_FLAG_RWP_SUP),
224	DA_ZONE_FLAG_URSWRZ		= 0x0020,
225	DA_ZONE_FLAG_OPT_SEQ_SET	= 0x0040,
226	DA_ZONE_FLAG_OPT_NONSEQ_SET	= 0x0080,
227	DA_ZONE_FLAG_MAX_SEQ_SET	= 0x0100,
228	DA_ZONE_FLAG_SET_MASK		= (DA_ZONE_FLAG_OPT_SEQ_SET |
229					   DA_ZONE_FLAG_OPT_NONSEQ_SET |
230					   DA_ZONE_FLAG_MAX_SEQ_SET)
231} da_zone_flags;
232
233static struct da_zone_desc {
234	da_zone_flags value;
235	const char *desc;
236} da_zone_desc_table[] = {
237	{DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
238	{DA_ZONE_FLAG_OPEN_SUP, "Open" },
239	{DA_ZONE_FLAG_CLOSE_SUP, "Close" },
240	{DA_ZONE_FLAG_FINISH_SUP, "Finish" },
241	{DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
242};
243
244typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
245			      struct bio *bp);
246static da_delete_func_t da_delete_trim;
247static da_delete_func_t da_delete_unmap;
248static da_delete_func_t da_delete_ws;
249
250static const void * da_delete_functions[] = {
251	NULL,
252	NULL,
253	da_delete_trim,
254	da_delete_unmap,
255	da_delete_ws,
256	da_delete_ws,
257	da_delete_ws
258};
259
260static const char *da_delete_method_names[] =
261    { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" };
262static const char *da_delete_method_desc[] =
263    { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP",
264      "WRITE SAME(10) with UNMAP", "ZERO" };
265
266/* Offsets into our private area for storing information */
267#define ccb_state	ppriv_field0
268#define ccb_bp		ppriv_ptr1
269
270struct disk_params {
271	u_int8_t  heads;
272	u_int32_t cylinders;
273	u_int8_t  secs_per_track;
274	u_int32_t secsize;	/* Number of bytes/sector */
275	u_int64_t sectors;	/* total number sectors */
276	u_int     stripesize;
277	u_int     stripeoffset;
278};
279
280#define UNMAP_RANGE_MAX		0xffffffff
281#define UNMAP_HEAD_SIZE		8
282#define UNMAP_RANGE_SIZE	16
283#define UNMAP_MAX_RANGES	2048 /* Protocol Max is 4095 */
284#define UNMAP_BUF_SIZE		((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \
285				UNMAP_HEAD_SIZE)
286
287#define WS10_MAX_BLKS		0xffff
288#define WS16_MAX_BLKS		0xffffffff
289#define ATA_TRIM_MAX_RANGES	((UNMAP_BUF_SIZE / \
290	(ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE)
291
292#define DA_WORK_TUR		(1 << 16)
293
294struct da_softc {
295	struct   cam_iosched_softc *cam_iosched;
296	struct	 bio_queue_head delete_run_queue;
297	LIST_HEAD(, ccb_hdr) pending_ccbs;
298	int	 refcount;		/* Active xpt_action() calls */
299	da_state state;
300	da_flags flags;
301	da_quirks quirks;
302	int	 minimum_cmd_size;
303	int	 error_inject;
304	int	 trim_max_ranges;
305	int	 delete_available;	/* Delete methods possibly available */
306	da_zone_mode 			zone_mode;
307	da_zone_interface		zone_interface;
308	da_zone_flags			zone_flags;
309	struct ata_gp_log_dir		ata_logdir;
310	int				valid_logdir_len;
311	struct ata_identify_log_pages	ata_iddir;
312	int				valid_iddir_len;
313	uint64_t			optimal_seq_zones;
314	uint64_t			optimal_nonseq_zones;
315	uint64_t			max_seq_zones;
316	u_int	 		maxio;
317	uint32_t		unmap_max_ranges;
318	uint32_t		unmap_max_lba; /* Max LBAs in UNMAP req */
319	uint32_t		unmap_gran;
320	uint32_t		unmap_gran_align;
321	uint64_t		ws_max_blks;
322	da_delete_methods	delete_method_pref;
323	da_delete_methods	delete_method;
324	da_delete_func_t	*delete_func;
325	int			unmappedio;
326	int			rotating;
327	struct	 disk_params params;
328	struct	 disk *disk;
329	union	 ccb saved_ccb;
330	struct task		sysctl_task;
331	struct sysctl_ctx_list	sysctl_ctx;
332	struct sysctl_oid	*sysctl_tree;
333	struct callout		sendordered_c;
334	uint64_t wwpn;
335	uint8_t	 unmap_buf[UNMAP_BUF_SIZE];
336	struct scsi_read_capacity_data_long rcaplong;
337	struct callout		mediapoll_c;
338#ifdef CAM_IO_STATS
339	struct sysctl_ctx_list	sysctl_stats_ctx;
340	struct sysctl_oid	*sysctl_stats_tree;
341	u_int	errors;
342	u_int	timeouts;
343	u_int	invalidations;
344#endif
345};
346
347#define dadeleteflag(softc, delete_method, enable)			\
348	if (enable) {							\
349		softc->delete_available |= (1 << delete_method);	\
350	} else {							\
351		softc->delete_available &= ~(1 << delete_method);	\
352	}
353
354struct da_quirk_entry {
355	struct scsi_inquiry_pattern inq_pat;
356	da_quirks quirks;
357};
358
359static const char quantum[] = "QUANTUM";
360static const char microp[] = "MICROP";
361
362static struct da_quirk_entry da_quirk_table[] =
363{
364	/* SPI, FC devices */
365	{
366		/*
367		 * Fujitsu M2513A MO drives.
368		 * Tested devices: M2513A2 firmware versions 1200 & 1300.
369		 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
370		 * Reported by: W.Scholten <whs@xs4all.nl>
371		 */
372		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
373		/*quirks*/ DA_Q_NO_SYNC_CACHE
374	},
375	{
376		/* See above. */
377		{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
378		/*quirks*/ DA_Q_NO_SYNC_CACHE
379	},
380	{
381		/*
382		 * This particular Fujitsu drive doesn't like the
383		 * synchronize cache command.
384		 * Reported by: Tom Jackson <toj@gorilla.net>
385		 */
386		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
387		/*quirks*/ DA_Q_NO_SYNC_CACHE
388	},
389	{
390		/*
391		 * This drive doesn't like the synchronize cache command
392		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
393		 * in NetBSD PR kern/6027, August 24, 1998.
394		 */
395		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
396		/*quirks*/ DA_Q_NO_SYNC_CACHE
397	},
398	{
399		/*
400		 * This drive doesn't like the synchronize cache command
401		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
402		 * (PR 8882).
403		 */
404		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
405		/*quirks*/ DA_Q_NO_SYNC_CACHE
406	},
407	{
408		/*
409		 * Doesn't like the synchronize cache command.
410		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
411		 */
412		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
413		/*quirks*/ DA_Q_NO_SYNC_CACHE
414	},
415	{
416		/*
417		 * Doesn't like the synchronize cache command.
418		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
419		 */
420		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
421		/*quirks*/ DA_Q_NO_SYNC_CACHE
422	},
423	{
424		/*
425		 * Doesn't like the synchronize cache command.
426		 */
427		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
428		/*quirks*/ DA_Q_NO_SYNC_CACHE
429	},
430	{
431		/*
432		 * Doesn't like the synchronize cache command.
433		 * Reported by: walter@pelissero.de
434		 */
435		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
436		/*quirks*/ DA_Q_NO_SYNC_CACHE
437	},
438	{
439		/*
440		 * Doesn't work correctly with 6 byte reads/writes.
441		 * Returns illegal request, and points to byte 9 of the
442		 * 6-byte CDB.
443		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
444		 */
445		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
446		/*quirks*/ DA_Q_NO_6_BYTE
447	},
448	{
449		/* See above. */
450		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
451		/*quirks*/ DA_Q_NO_6_BYTE
452	},
453	{
454		/*
455		 * Doesn't like the synchronize cache command.
456		 * Reported by: walter@pelissero.de
457		 */
458		{T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
459		/*quirks*/ DA_Q_NO_SYNC_CACHE
460	},
461	{
462		/*
463		 * The CISS RAID controllers do not support SYNC_CACHE
464		 */
465		{T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
466		/*quirks*/ DA_Q_NO_SYNC_CACHE
467	},
468	{
469		/*
470		 * The STEC SSDs sometimes hang on UNMAP.
471		 */
472		{T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"},
473		/*quirks*/ DA_Q_NO_UNMAP
474	},
475	{
476		/*
477		 * VMware returns BUSY status when storage has transient
478		 * connectivity problems, so better wait.
479		 * Also VMware returns odd errors on misaligned UNMAPs.
480		 */
481		{T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"},
482		/*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP
483	},
484	/* USB mass storage devices supported by umass(4) */
485	{
486		/*
487		 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player
488		 * PR: kern/51675
489		 */
490		{T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"},
491		/*quirks*/ DA_Q_NO_SYNC_CACHE
492	},
493	{
494		/*
495		 * Power Quotient Int. (PQI) USB flash key
496		 * PR: kern/53067
497		 */
498		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*",
499		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
500	},
501 	{
502 		/*
503 		 * Creative Nomad MUVO mp3 player (USB)
504 		 * PR: kern/53094
505 		 */
506 		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
507 		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
508 	},
509	{
510		/*
511		 * Jungsoft NEXDISK USB flash key
512		 * PR: kern/54737
513		 */
514		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"},
515		/*quirks*/ DA_Q_NO_SYNC_CACHE
516	},
517	{
518		/*
519		 * FreeDik USB Mini Data Drive
520		 * PR: kern/54786
521		 */
522		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive",
523		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
524	},
525	{
526		/*
527		 * Sigmatel USB Flash MP3 Player
528		 * PR: kern/57046
529		 */
530		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
531		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
532	},
533	{
534		/*
535		 * Neuros USB Digital Audio Computer
536		 * PR: kern/63645
537		 */
538		{T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.",
539		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
540	},
541	{
542		/*
543		 * SEAGRAND NP-900 MP3 Player
544		 * PR: kern/64563
545		 */
546		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
547		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
548	},
549	{
550		/*
551		 * iRiver iFP MP3 player (with UMS Firmware)
552		 * PR: kern/54881, i386/63941, kern/66124
553		 */
554		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"},
555		/*quirks*/ DA_Q_NO_SYNC_CACHE
556 	},
557	{
558		/*
559		 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01
560		 * PR: kern/70158
561		 */
562		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"},
563		/*quirks*/ DA_Q_NO_SYNC_CACHE
564	},
565	{
566		/*
567		 * ZICPlay USB MP3 Player with FM
568		 * PR: kern/75057
569		 */
570		{T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"},
571		/*quirks*/ DA_Q_NO_SYNC_CACHE
572	},
573	{
574		/*
575		 * TEAC USB floppy mechanisms
576		 */
577		{T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"},
578		/*quirks*/ DA_Q_NO_SYNC_CACHE
579	},
580	{
581		/*
582		 * Kingston DataTraveler II+ USB Pen-Drive.
583		 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org>
584		 */
585		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+",
586		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
587	},
588	{
589		/*
590		 * USB DISK Pro PMAP
591		 * Reported by: jhs
592		 * PR: usb/96381
593		 */
594		{T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"},
595		/*quirks*/ DA_Q_NO_SYNC_CACHE
596	},
597	{
598		/*
599		 * Motorola E398 Mobile Phone (TransFlash memory card).
600		 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl>
601		 * PR: usb/89889
602		 */
603		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone",
604		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
605	},
606	{
607		/*
608		 * Qware BeatZkey! Pro
609		 * PR: usb/79164
610		 */
611		{T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE",
612		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
613	},
614	{
615		/*
616		 * Time DPA20B 1GB MP3 Player
617		 * PR: usb/81846
618		 */
619		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*",
620		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
621	},
622	{
623		/*
624		 * Samsung USB key 128Mb
625		 * PR: usb/90081
626		 */
627		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb",
628		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
629	},
630	{
631		/*
632		 * Kingston DataTraveler 2.0 USB Flash memory.
633		 * PR: usb/89196
634		 */
635		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0",
636		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
637	},
638	{
639		/*
640		 * Creative MUVO Slim mp3 player (USB)
641		 * PR: usb/86131
642		 */
643		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
644		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
645		},
646	{
647		/*
648		 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3)
649		 * PR: usb/80487
650		 */
651		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK",
652		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
653	},
654	{
655		/*
656		 * SanDisk Micro Cruzer 128MB
657		 * PR: usb/75970
658		 */
659		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer",
660		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
661	},
662	{
663		/*
664		 * TOSHIBA TransMemory USB sticks
665		 * PR: kern/94660
666		 */
667		{T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory",
668		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
669	},
670	{
671		/*
672		 * PNY USB 3.0 Flash Drives
673		*/
674		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*",
675		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16
676	},
677	{
678		/*
679		 * PNY USB Flash keys
680		 * PR: usb/75578, usb/72344, usb/65436
681		 */
682		{T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*",
683		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
684	},
685	{
686		/*
687		 * Genesys GL3224
688		 */
689		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
690		"120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16
691	},
692	{
693		/*
694		 * Genesys 6-in-1 Card Reader
695		 * PR: usb/94647
696		 */
697		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
698		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
699	},
700	{
701		/*
702		 * Rekam Digital CAMERA
703		 * PR: usb/98713
704		 */
705		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*",
706		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
707	},
708	{
709		/*
710		 * iRiver H10 MP3 player
711		 * PR: usb/102547
712		 */
713		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*",
714		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
715	},
716	{
717		/*
718		 * iRiver U10 MP3 player
719		 * PR: usb/92306
720		 */
721		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*",
722		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
723	},
724	{
725		/*
726		 * X-Micro Flash Disk
727		 * PR: usb/96901
728		 */
729		{T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk",
730		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
731	},
732	{
733		/*
734		 * EasyMP3 EM732X USB 2.0 Flash MP3 Player
735		 * PR: usb/96546
736		 */
737		{T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*",
738		"1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
739	},
740	{
741		/*
742		 * Denver MP3 player
743		 * PR: usb/107101
744		 */
745		{T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER",
746		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
747	},
748	{
749		/*
750		 * Philips USB Key Audio KEY013
751		 * PR: usb/68412
752		 */
753		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
754		/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
755	},
756	{
757		/*
758		 * JNC MP3 Player
759		 * PR: usb/94439
760		 */
761		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*",
762		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
763	},
764	{
765		/*
766		 * SAMSUNG MP0402H
767		 * PR: usb/108427
768		 */
769		{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"},
770		/*quirks*/ DA_Q_NO_SYNC_CACHE
771	},
772	{
773		/*
774		 * I/O Magic USB flash - Giga Bank
775		 * PR: usb/108810
776		 */
777		{T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"},
778		/*quirks*/ DA_Q_NO_SYNC_CACHE
779	},
780	{
781		/*
782		 * JoyFly 128mb USB Flash Drive
783		 * PR: 96133
784		 */
785		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*",
786		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
787	},
788	{
789		/*
790		 * ChipsBnk usb stick
791		 * PR: 103702
792		 */
793		{T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*",
794		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
795	},
796	{
797		/*
798		 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A
799		 * PR: 129858
800		 */
801		{T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*",
802		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
803	},
804	{
805		/*
806		 * Samsung YP-U3 mp3-player
807		 * PR: 125398
808		 */
809		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3",
810		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
811	},
812	{
813		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*",
814		 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
815	},
816	{
817		/*
818		 * Sony Cyber-Shot DSC cameras
819		 * PR: usb/137035
820		 */
821		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
822		/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
823	},
824	{
825		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3",
826		 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT
827	},
828	{
829		/* At least several Transcent USB sticks lie on RC16. */
830		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
831		 "*"}, /*quirks*/ DA_Q_NO_RC16
832	},
833	{
834		/*
835		 * I-O Data USB Flash Disk
836		 * PR: usb/211716
837		 */
838		{T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
839		 "*"}, /*quirks*/ DA_Q_NO_RC16
840	},
841	{
842		/*
843		 * SLC CHIPFANCIER USB drives
844		 * PR: usb/234503 (RC10 right, RC16 wrong)
845		 * 16GB, 32GB and 128GB confirmed to have same issue
846		 */
847		{T_DIRECT, SIP_MEDIA_REMOVABLE, "*SLC", "CHIPFANCIER",
848		 "*"}, /*quirks*/ DA_Q_NO_RC16
849       },
850	/* ATA/SATA devices over SAS/USB/... */
851	{
852		/* Sandisk X400 */
853		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SanDisk SD8SB8U1*", "*" },
854		/*quirks*/DA_Q_128KB
855	},
856	{
857		/* Hitachi Advanced Format (4k) drives */
858		{ T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" },
859		/*quirks*/DA_Q_4K
860	},
861	{
862		/* Micron Advanced Format (4k) drives */
863		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" },
864		/*quirks*/DA_Q_4K
865	},
866	{
867		/* Samsung Advanced Format (4k) drives */
868		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" },
869		/*quirks*/DA_Q_4K
870	},
871	{
872		/* Samsung Advanced Format (4k) drives */
873		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" },
874		/*quirks*/DA_Q_4K
875	},
876	{
877		/* Samsung Advanced Format (4k) drives */
878		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" },
879		/*quirks*/DA_Q_4K
880	},
881	{
882		/* Samsung Advanced Format (4k) drives */
883		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" },
884		/*quirks*/DA_Q_4K
885	},
886	{
887		/* Seagate Barracuda Green Advanced Format (4k) drives */
888		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" },
889		/*quirks*/DA_Q_4K
890	},
891	{
892		/* Seagate Barracuda Green Advanced Format (4k) drives */
893		{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" },
894		/*quirks*/DA_Q_4K
895	},
896	{
897		/* Seagate Barracuda Green Advanced Format (4k) drives */
898		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" },
899		/*quirks*/DA_Q_4K
900	},
901	{
902		/* Seagate Barracuda Green Advanced Format (4k) drives */
903		{ T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" },
904		/*quirks*/DA_Q_4K
905	},
906	{
907		/* Seagate Barracuda Green Advanced Format (4k) drives */
908		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" },
909		/*quirks*/DA_Q_4K
910	},
911	{
912		/* Seagate Barracuda Green Advanced Format (4k) drives */
913		{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" },
914		/*quirks*/DA_Q_4K
915	},
916	{
917		/* Seagate Momentus Advanced Format (4k) drives */
918		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" },
919		/*quirks*/DA_Q_4K
920	},
921	{
922		/* Seagate Momentus Advanced Format (4k) drives */
923		{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" },
924		/*quirks*/DA_Q_4K
925	},
926	{
927		/* Seagate Momentus Advanced Format (4k) drives */
928		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" },
929		/*quirks*/DA_Q_4K
930	},
931	{
932		/* Seagate Momentus Advanced Format (4k) drives */
933		{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" },
934		/*quirks*/DA_Q_4K
935	},
936	{
937		/* Seagate Momentus Advanced Format (4k) drives */
938		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" },
939		/*quirks*/DA_Q_4K
940	},
941	{
942		/* Seagate Momentus Advanced Format (4k) drives */
943		{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" },
944		/*quirks*/DA_Q_4K
945	},
946	{
947		/* Seagate Momentus Advanced Format (4k) drives */
948		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" },
949		/*quirks*/DA_Q_4K
950	},
951	{
952		/* Seagate Momentus Advanced Format (4k) drives */
953		{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" },
954		/*quirks*/DA_Q_4K
955	},
956	{
957		/* Seagate Momentus Advanced Format (4k) drives */
958		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" },
959		/*quirks*/DA_Q_4K
960	},
961	{
962		/* Seagate Momentus Advanced Format (4k) drives */
963		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" },
964		/*quirks*/DA_Q_4K
965	},
966	{
967		/* Seagate Momentus Advanced Format (4k) drives */
968		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" },
969		/*quirks*/DA_Q_4K
970	},
971	{
972		/* Seagate Momentus Advanced Format (4k) drives */
973		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" },
974		/*quirks*/DA_Q_4K
975	},
976	{
977		/* Seagate Momentus Advanced Format (4k) drives */
978		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" },
979		/*quirks*/DA_Q_4K
980	},
981	{
982		/* Seagate Momentus Advanced Format (4k) drives */
983		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" },
984		/*quirks*/DA_Q_4K
985	},
986	{
987		/* Seagate Momentus Thin Advanced Format (4k) drives */
988		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" },
989		/*quirks*/DA_Q_4K
990	},
991	{
992		/* Seagate Momentus Thin Advanced Format (4k) drives */
993		{ T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" },
994		/*quirks*/DA_Q_4K
995	},
996	{
997		/* WDC Caviar Green Advanced Format (4k) drives */
998		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" },
999		/*quirks*/DA_Q_4K
1000	},
1001	{
1002		/* WDC Caviar Green Advanced Format (4k) drives */
1003		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" },
1004		/*quirks*/DA_Q_4K
1005	},
1006	{
1007		/* WDC Caviar Green Advanced Format (4k) drives */
1008		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" },
1009		/*quirks*/DA_Q_4K
1010	},
1011	{
1012		/* WDC Caviar Green Advanced Format (4k) drives */
1013		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" },
1014		/*quirks*/DA_Q_4K
1015	},
1016	{
1017		/* WDC Caviar Green Advanced Format (4k) drives */
1018		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" },
1019		/*quirks*/DA_Q_4K
1020	},
1021	{
1022		/* WDC Caviar Green Advanced Format (4k) drives */
1023		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" },
1024		/*quirks*/DA_Q_4K
1025	},
1026	{
1027		/* WDC Caviar Green Advanced Format (4k) drives */
1028		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" },
1029		/*quirks*/DA_Q_4K
1030	},
1031	{
1032		/* WDC Caviar Green Advanced Format (4k) drives */
1033		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" },
1034		/*quirks*/DA_Q_4K
1035	},
1036	{
1037		/* WDC Scorpio Black Advanced Format (4k) drives */
1038		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" },
1039		/*quirks*/DA_Q_4K
1040	},
1041	{
1042		/* WDC Scorpio Black Advanced Format (4k) drives */
1043		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" },
1044		/*quirks*/DA_Q_4K
1045	},
1046	{
1047		/* WDC Scorpio Black Advanced Format (4k) drives */
1048		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" },
1049		/*quirks*/DA_Q_4K
1050	},
1051	{
1052		/* WDC Scorpio Black Advanced Format (4k) drives */
1053		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" },
1054		/*quirks*/DA_Q_4K
1055	},
1056	{
1057		/* WDC Scorpio Blue Advanced Format (4k) drives */
1058		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" },
1059		/*quirks*/DA_Q_4K
1060	},
1061	{
1062		/* WDC Scorpio Blue Advanced Format (4k) drives */
1063		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" },
1064		/*quirks*/DA_Q_4K
1065	},
1066	{
1067		/* WDC Scorpio Blue Advanced Format (4k) drives */
1068		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" },
1069		/*quirks*/DA_Q_4K
1070	},
1071	{
1072		/* WDC Scorpio Blue Advanced Format (4k) drives */
1073		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" },
1074		/*quirks*/DA_Q_4K
1075	},
1076	{
1077		/*
1078		 * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1)
1079		 * PR: usb/97472
1080		 */
1081		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C*", "*"},
1082		/*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE
1083	},
1084	{
1085		/*
1086		 * Olympus digital cameras (D-370)
1087		 * PR: usb/97472
1088		 */
1089		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D*", "*"},
1090		/*quirks*/ DA_Q_NO_6_BYTE
1091	},
1092	{
1093		/*
1094		 * Olympus digital cameras (E-100RS, E-10).
1095		 * PR: usb/97472
1096		 */
1097		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E*", "*"},
1098		/*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE
1099	},
1100	{
1101		/*
1102		 * Olympus FE-210 camera
1103		 */
1104		{T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*",
1105		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1106	},
1107	{
1108		/*
1109		 * LG UP3S MP3 player
1110		 */
1111		{T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S",
1112		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1113	},
1114	{
1115		/*
1116		 * Laser MP3-2GA13 MP3 player
1117		 */
1118		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk",
1119		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1120	},
1121	{
1122		/*
1123		 * LaCie external 250GB Hard drive des by Porsche
1124		 * Submitted by: Ben Stuyts <ben@altesco.nl>
1125		 * PR: 121474
1126		 */
1127		{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"},
1128		/*quirks*/ DA_Q_NO_SYNC_CACHE
1129	},
1130	/* SATA SSDs */
1131	{
1132		/*
1133		 * Corsair Force 2 SSDs
1134		 * 4k optimised & trim only works in 4k requests + 4k aligned
1135		 */
1136		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" },
1137		/*quirks*/DA_Q_4K
1138	},
1139	{
1140		/*
1141		 * Corsair Force 3 SSDs
1142		 * 4k optimised & trim only works in 4k requests + 4k aligned
1143		 */
1144		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" },
1145		/*quirks*/DA_Q_4K
1146	},
1147        {
1148		/*
1149		 * Corsair Neutron GTX SSDs
1150		 * 4k optimised & trim only works in 4k requests + 4k aligned
1151		 */
1152		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
1153		/*quirks*/DA_Q_4K
1154	},
1155	{
1156		/*
1157		 * Corsair Force GT & GS SSDs
1158		 * 4k optimised & trim only works in 4k requests + 4k aligned
1159		 */
1160		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" },
1161		/*quirks*/DA_Q_4K
1162	},
1163	{
1164		/*
1165		 * Crucial M4 SSDs
1166		 * 4k optimised & trim only works in 4k requests + 4k aligned
1167		 */
1168		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" },
1169		/*quirks*/DA_Q_4K
1170	},
1171	{
1172		/*
1173		 * Crucial RealSSD C300 SSDs
1174		 * 4k optimised
1175		 */
1176		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*",
1177		"*" }, /*quirks*/DA_Q_4K
1178	},
1179	{
1180		/*
1181		 * Intel 320 Series SSDs
1182		 * 4k optimised & trim only works in 4k requests + 4k aligned
1183		 */
1184		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" },
1185		/*quirks*/DA_Q_4K
1186	},
1187	{
1188		/*
1189		 * Intel 330 Series SSDs
1190		 * 4k optimised & trim only works in 4k requests + 4k aligned
1191		 */
1192		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" },
1193		/*quirks*/DA_Q_4K
1194	},
1195	{
1196		/*
1197		 * Intel 510 Series SSDs
1198		 * 4k optimised & trim only works in 4k requests + 4k aligned
1199		 */
1200		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" },
1201		/*quirks*/DA_Q_4K
1202	},
1203	{
1204		/*
1205		 * Intel 520 Series SSDs
1206		 * 4k optimised & trim only works in 4k requests + 4k aligned
1207		 */
1208		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" },
1209		/*quirks*/DA_Q_4K
1210	},
1211	{
1212		/*
1213		 * Intel S3610 Series SSDs
1214		 * 4k optimised & trim only works in 4k requests + 4k aligned
1215		 */
1216		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" },
1217		/*quirks*/DA_Q_4K
1218	},
1219	{
1220		/*
1221		 * Intel X25-M Series SSDs
1222		 * 4k optimised & trim only works in 4k requests + 4k aligned
1223		 */
1224		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" },
1225		/*quirks*/DA_Q_4K
1226	},
1227	{
1228		/*
1229		 * Kingston E100 Series SSDs
1230		 * 4k optimised & trim only works in 4k requests + 4k aligned
1231		 */
1232		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" },
1233		/*quirks*/DA_Q_4K
1234	},
1235	{
1236		/*
1237		 * Kingston HyperX 3k SSDs
1238		 * 4k optimised & trim only works in 4k requests + 4k aligned
1239		 */
1240		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" },
1241		/*quirks*/DA_Q_4K
1242	},
1243	{
1244		/*
1245		 * Marvell SSDs (entry taken from OpenSolaris)
1246		 * 4k optimised & trim only works in 4k requests + 4k aligned
1247		 */
1248		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" },
1249		/*quirks*/DA_Q_4K
1250	},
1251	{
1252		/*
1253		 * OCZ Agility 2 SSDs
1254		 * 4k optimised & trim only works in 4k requests + 4k aligned
1255		 */
1256		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
1257		/*quirks*/DA_Q_4K
1258	},
1259	{
1260		/*
1261		 * OCZ Agility 3 SSDs
1262		 * 4k optimised & trim only works in 4k requests + 4k aligned
1263		 */
1264		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" },
1265		/*quirks*/DA_Q_4K
1266	},
1267	{
1268		/*
1269		 * OCZ Deneva R Series SSDs
1270		 * 4k optimised & trim only works in 4k requests + 4k aligned
1271		 */
1272		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" },
1273		/*quirks*/DA_Q_4K
1274	},
1275	{
1276		/*
1277		 * OCZ Vertex 2 SSDs (inc pro series)
1278		 * 4k optimised & trim only works in 4k requests + 4k aligned
1279		 */
1280		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" },
1281		/*quirks*/DA_Q_4K
1282	},
1283	{
1284		/*
1285		 * OCZ Vertex 3 SSDs
1286		 * 4k optimised & trim only works in 4k requests + 4k aligned
1287		 */
1288		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" },
1289		/*quirks*/DA_Q_4K
1290	},
1291	{
1292		/*
1293		 * OCZ Vertex 4 SSDs
1294		 * 4k optimised & trim only works in 4k requests + 4k aligned
1295		 */
1296		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" },
1297		/*quirks*/DA_Q_4K
1298	},
1299	{
1300		/*
1301		 * Samsung 750 Series SSDs
1302		 * 4k optimised & trim only works in 4k requests + 4k aligned
1303		 */
1304		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" },
1305		/*quirks*/DA_Q_4K
1306	},
1307	{
1308		/*
1309		 * Samsung 830 Series SSDs
1310		 * 4k optimised & trim only works in 4k requests + 4k aligned
1311		 */
1312		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" },
1313		/*quirks*/DA_Q_4K
1314	},
1315	{
1316		/*
1317		 * Samsung 840 SSDs
1318		 * 4k optimised & trim only works in 4k requests + 4k aligned
1319		 */
1320		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" },
1321		/*quirks*/DA_Q_4K
1322	},
1323	{
1324		/*
1325		 * Samsung 845 SSDs
1326		 * 4k optimised & trim only works in 4k requests + 4k aligned
1327		 */
1328		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" },
1329		/*quirks*/DA_Q_4K
1330	},
1331	{
1332		/*
1333		 * Samsung 850 SSDs
1334		 * 4k optimised & trim only works in 4k requests + 4k aligned
1335		 */
1336		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" },
1337		/*quirks*/DA_Q_4K
1338	},
1339	{
1340		/*
1341		 * Samsung 843T Series SSDs (MZ7WD*)
1342		 * Samsung PM851 Series SSDs (MZ7TE*)
1343		 * Samsung PM853T Series SSDs (MZ7GE*)
1344		 * Samsung SM863 Series SSDs (MZ7KM*)
1345		 * 4k optimised
1346		 */
1347		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" },
1348		/*quirks*/DA_Q_4K
1349	},
1350	{
1351		/*
1352		 * Same as for SAMSUNG MZ7* but enable the quirks for SSD
1353		 * starting with MZ7* too
1354		 */
1355		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" },
1356		/*quirks*/DA_Q_4K
1357	},
1358	{
1359		/*
1360		 * SuperTalent TeraDrive CT SSDs
1361		 * 4k optimised & trim only works in 4k requests + 4k aligned
1362		 */
1363		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" },
1364		/*quirks*/DA_Q_4K
1365	},
1366	{
1367		/*
1368		 * XceedIOPS SATA SSDs
1369		 * 4k optimised
1370		 */
1371		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" },
1372		/*quirks*/DA_Q_4K
1373	},
1374	{
1375		/*
1376		 * Hama Innostor USB-Stick
1377		 */
1378		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" },
1379		/*quirks*/DA_Q_NO_RC16
1380	},
1381	{
1382		/*
1383		 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
1384		 * Drive Managed SATA hard drive.  This drive doesn't report
1385		 * in firmware that it is a drive managed SMR drive.
1386		 */
1387		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" },
1388		/*quirks*/DA_Q_SMR_DM
1389	},
1390	{
1391		/*
1392		 * MX-ES USB Drive by Mach Xtreme
1393		 */
1394		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
1395		/*quirks*/DA_Q_NO_RC16
1396	},
1397};
1398
1399static	disk_strategy_t	dastrategy;
1400static	dumper_t	dadump;
1401static	periph_init_t	dainit;
1402static	void		daasync(void *callback_arg, u_int32_t code,
1403				struct cam_path *path, void *arg);
1404static	void		dasysctlinit(void *context, int pending);
1405static	int		dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
1406static	int		dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
1407static	int		dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
1408static	int		dazonemodesysctl(SYSCTL_HANDLER_ARGS);
1409static	int		dazonesupsysctl(SYSCTL_HANDLER_ARGS);
1410static	int		dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
1411static	void		dadeletemethodset(struct da_softc *softc,
1412					  da_delete_methods delete_method);
1413static	off_t		dadeletemaxsize(struct da_softc *softc,
1414					da_delete_methods delete_method);
1415static	void		dadeletemethodchoose(struct da_softc *softc,
1416					     da_delete_methods default_method);
1417static	void		daprobedone(struct cam_periph *periph, union ccb *ccb);
1418
1419static	periph_ctor_t	daregister;
1420static	periph_dtor_t	dacleanup;
1421static	periph_start_t	dastart;
1422static	periph_oninv_t	daoninvalidate;
1423static	void		dazonedone(struct cam_periph *periph, union ccb *ccb);
1424static	void		dadone(struct cam_periph *periph,
1425			       union ccb *done_ccb);
1426static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
1427				u_int32_t sense_flags);
1428static void		daprevent(struct cam_periph *periph, int action);
1429static void		dareprobe(struct cam_periph *periph);
1430static void		dasetgeom(struct cam_periph *periph, uint32_t block_len,
1431				  uint64_t maxsector,
1432				  struct scsi_read_capacity_data_long *rcaplong,
1433				  size_t rcap_size);
1434static timeout_t	dasendorderedtag;
1435static void		dashutdown(void *arg, int howto);
1436static timeout_t	damediapoll;
1437
1438#ifndef	DA_DEFAULT_POLL_PERIOD
1439#define	DA_DEFAULT_POLL_PERIOD	3
1440#endif
1441
1442#ifndef DA_DEFAULT_TIMEOUT
1443#define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
1444#endif
1445
1446#ifndef DA_DEFAULT_SOFTTIMEOUT
1447#define DA_DEFAULT_SOFTTIMEOUT	0
1448#endif
1449
1450#ifndef	DA_DEFAULT_RETRY
1451#define	DA_DEFAULT_RETRY	4
1452#endif
1453
1454#ifndef	DA_DEFAULT_SEND_ORDERED
1455#define	DA_DEFAULT_SEND_ORDERED	1
1456#endif
1457
1458static int da_poll_period = DA_DEFAULT_POLL_PERIOD;
1459static int da_retry_count = DA_DEFAULT_RETRY;
1460static int da_default_timeout = DA_DEFAULT_TIMEOUT;
1461static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT;
1462static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
1463static int da_disable_wp_detection = 0;
1464
1465static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0,
1466            "CAM Direct Access Disk driver");
1467SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
1468           &da_poll_period, 0, "Media polling period in seconds");
1469SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
1470           &da_retry_count, 0, "Normal I/O retry count");
1471SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
1472           &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
1473SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
1474           &da_send_ordered, 0, "Send Ordered Tags");
1475SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN,
1476           &da_disable_wp_detection, 0,
1477	   "Disable detection of write-protected disks");
1478
1479SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout,
1480    CTLTYPE_UINT | CTLFLAG_RW, NULL, 0, dasysctlsofttimeout, "I",
1481    "Soft I/O timeout (ms)");
1482TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout);
1483
1484/*
1485 * DA_ORDEREDTAG_INTERVAL determines how often, relative
1486 * to the default timeout, we check to see whether an ordered
1487 * tagged transaction is appropriate to prevent simple tag
1488 * starvation.  Since we'd like to ensure that there is at least
1489 * 1/2 of the timeout length left for a starved transaction to
1490 * complete after we've sent an ordered tag, we must poll at least
1491 * four times in every timeout period.  This takes care of the worst
1492 * case where a starved transaction starts during an interval that
1493 * meets the requirement "don't send an ordered tag" test so it takes
1494 * us two intervals to determine that a tag must be sent.
1495 */
1496#ifndef DA_ORDEREDTAG_INTERVAL
1497#define DA_ORDEREDTAG_INTERVAL 4
1498#endif
1499
1500static struct periph_driver dadriver =
1501{
1502	dainit, "da",
1503	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
1504};
1505
1506PERIPHDRIVER_DECLARE(da, dadriver);
1507
1508static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
1509
1510static int
1511daopen(struct disk *dp)
1512{
1513	struct cam_periph *periph;
1514	struct da_softc *softc;
1515	int error;
1516
1517	periph = (struct cam_periph *)dp->d_drv1;
1518	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1519		return (ENXIO);
1520	}
1521
1522	cam_periph_lock(periph);
1523	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
1524		cam_periph_unlock(periph);
1525		cam_periph_release(periph);
1526		return (error);
1527	}
1528
1529	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1530	    ("daopen\n"));
1531
1532	softc = (struct da_softc *)periph->softc;
1533	dareprobe(periph);
1534
1535	/* Wait for the disk size update.  */
1536	error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO,
1537	    "dareprobe", 0);
1538	if (error != 0)
1539		xpt_print(periph->path, "unable to retrieve capacity data\n");
1540
1541	if (periph->flags & CAM_PERIPH_INVALID)
1542		error = ENXIO;
1543
1544	if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1545	    (softc->quirks & DA_Q_NO_PREVENT) == 0)
1546		daprevent(periph, PR_PREVENT);
1547
1548	if (error == 0) {
1549		softc->flags &= ~DA_FLAG_PACK_INVALID;
1550		softc->flags |= DA_FLAG_OPEN;
1551	}
1552
1553	cam_periph_unhold(periph);
1554	cam_periph_unlock(periph);
1555
1556	if (error != 0)
1557		cam_periph_release(periph);
1558
1559	return (error);
1560}
1561
1562static int
1563daclose(struct disk *dp)
1564{
1565	struct	cam_periph *periph;
1566	struct	da_softc *softc;
1567	union	ccb *ccb;
1568	int error;
1569
1570	periph = (struct cam_periph *)dp->d_drv1;
1571	softc = (struct da_softc *)periph->softc;
1572	cam_periph_lock(periph);
1573	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1574	    ("daclose\n"));
1575
1576	if (cam_periph_hold(periph, PRIBIO) == 0) {
1577
1578		/* Flush disk cache. */
1579		if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
1580		    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
1581		    (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
1582			ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1583			scsi_synchronize_cache(&ccb->csio, /*retries*/1,
1584			    /*cbfcnp*/dadone, MSG_SIMPLE_Q_TAG,
1585			    /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
1586			    5 * 60 * 1000);
1587			error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
1588			    /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
1589			    softc->disk->d_devstat);
1590			softc->flags &= ~DA_FLAG_DIRTY;
1591			xpt_release_ccb(ccb);
1592		}
1593
1594		/* Allow medium removal. */
1595		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1596		    (softc->quirks & DA_Q_NO_PREVENT) == 0)
1597			daprevent(periph, PR_ALLOW);
1598
1599		cam_periph_unhold(periph);
1600	}
1601
1602	/*
1603	 * If we've got removeable media, mark the blocksize as
1604	 * unavailable, since it could change when new media is
1605	 * inserted.
1606	 */
1607	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
1608		softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
1609
1610	softc->flags &= ~DA_FLAG_OPEN;
1611	while (softc->refcount != 0)
1612		cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
1613	cam_periph_unlock(periph);
1614	cam_periph_release(periph);
1615	return (0);
1616}
1617
1618static void
1619daschedule(struct cam_periph *periph)
1620{
1621	struct da_softc *softc = (struct da_softc *)periph->softc;
1622
1623	if (softc->state != DA_STATE_NORMAL)
1624		return;
1625
1626	cam_iosched_schedule(softc->cam_iosched, periph);
1627}
1628
1629/*
1630 * Actually translate the requested transfer into one the physical driver
1631 * can understand.  The transfer is described by a buf and will include
1632 * only one physical transfer.
1633 */
1634static void
1635dastrategy(struct bio *bp)
1636{
1637	struct cam_periph *periph;
1638	struct da_softc *softc;
1639
1640	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1641	softc = (struct da_softc *)periph->softc;
1642
1643	cam_periph_lock(periph);
1644
1645	/*
1646	 * If the device has been made invalid, error out
1647	 */
1648	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1649		cam_periph_unlock(periph);
1650		biofinish(bp, NULL, ENXIO);
1651		return;
1652	}
1653
1654	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
1655
1656	/*
1657	 * Zone commands must be ordered, because they can depend on the
1658	 * effects of previously issued commands, and they may affect
1659	 * commands after them.
1660	 */
1661	if (bp->bio_cmd == BIO_ZONE)
1662		bp->bio_flags |= BIO_ORDERED;
1663
1664	/*
1665	 * Place it in the queue of disk activities for this disk
1666	 */
1667	cam_iosched_queue_work(softc->cam_iosched, bp);
1668
1669	/*
1670	 * Schedule ourselves for performing the work.
1671	 */
1672	daschedule(periph);
1673	cam_periph_unlock(periph);
1674
1675	return;
1676}
1677
1678static int
1679dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
1680{
1681	struct	    cam_periph *periph;
1682	struct	    da_softc *softc;
1683	u_int	    secsize;
1684	struct	    ccb_scsiio csio;
1685	struct	    disk *dp;
1686	int	    error = 0;
1687
1688	dp = arg;
1689	periph = dp->d_drv1;
1690	softc = (struct da_softc *)periph->softc;
1691	cam_periph_lock(periph);
1692	secsize = softc->params.secsize;
1693
1694	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) {
1695		cam_periph_unlock(periph);
1696		return (ENXIO);
1697	}
1698
1699	if (length > 0) {
1700		xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1701		csio.ccb_h.ccb_state = DA_CCB_DUMP;
1702		scsi_read_write(&csio,
1703				/*retries*/0,
1704				dadone,
1705				MSG_ORDERED_Q_TAG,
1706				/*read*/SCSI_RW_WRITE,
1707				/*byte2*/0,
1708				/*minimum_cmd_size*/ softc->minimum_cmd_size,
1709				offset / secsize,
1710				length / secsize,
1711				/*data_ptr*/(u_int8_t *) virtual,
1712				/*dxfer_len*/length,
1713				/*sense_len*/SSD_FULL_SIZE,
1714				da_default_timeout * 1000);
1715		xpt_polled_action((union ccb *)&csio);
1716
1717		error = cam_periph_error((union ccb *)&csio,
1718		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1719		if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0)
1720			cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0,
1721			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
1722		if (error != 0)
1723			printf("Aborting dump due to I/O error.\n");
1724		cam_periph_unlock(periph);
1725		return (error);
1726	}
1727
1728	/*
1729	 * Sync the disk cache contents to the physical media.
1730	 */
1731	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
1732
1733		xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1734		csio.ccb_h.ccb_state = DA_CCB_DUMP;
1735		scsi_synchronize_cache(&csio,
1736				       /*retries*/0,
1737				       /*cbfcnp*/dadone,
1738				       MSG_SIMPLE_Q_TAG,
1739				       /*begin_lba*/0,/* Cover the whole disk */
1740				       /*lb_count*/0,
1741				       SSD_FULL_SIZE,
1742				       5 * 1000);
1743		xpt_polled_action((union ccb *)&csio);
1744
1745		error = cam_periph_error((union ccb *)&csio,
1746		    0, SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR, NULL);
1747		if ((csio.ccb_h.status & CAM_DEV_QFRZN) != 0)
1748			cam_release_devq(csio.ccb_h.path, /*relsim_flags*/0,
1749			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
1750		if (error != 0)
1751			xpt_print(periph->path, "Synchronize cache failed\n");
1752	}
1753	cam_periph_unlock(periph);
1754	return (error);
1755}
1756
1757static int
1758dagetattr(struct bio *bp)
1759{
1760	int ret;
1761	struct cam_periph *periph;
1762
1763	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1764	cam_periph_lock(periph);
1765	ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1766	    periph->path);
1767	cam_periph_unlock(periph);
1768	if (ret == 0)
1769		bp->bio_completed = bp->bio_length;
1770	return ret;
1771}
1772
1773static void
1774dainit(void)
1775{
1776	cam_status status;
1777
1778	/*
1779	 * Install a global async callback.  This callback will
1780	 * receive async callbacks like "new device found".
1781	 */
1782	status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
1783
1784	if (status != CAM_REQ_CMP) {
1785		printf("da: Failed to attach master async callback "
1786		       "due to status 0x%x!\n", status);
1787	} else if (da_send_ordered) {
1788
1789		/* Register our shutdown event handler */
1790		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
1791					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
1792		    printf("dainit: shutdown event registration failed!\n");
1793	}
1794}
1795
1796/*
1797 * Callback from GEOM, called when it has finished cleaning up its
1798 * resources.
1799 */
1800static void
1801dadiskgonecb(struct disk *dp)
1802{
1803	struct cam_periph *periph;
1804
1805	periph = (struct cam_periph *)dp->d_drv1;
1806	cam_periph_release(periph);
1807}
1808
1809static void
1810daoninvalidate(struct cam_periph *periph)
1811{
1812	struct da_softc *softc;
1813
1814	softc = (struct da_softc *)periph->softc;
1815
1816	/*
1817	 * De-register any async callbacks.
1818	 */
1819	xpt_register_async(0, daasync, periph, periph->path);
1820
1821	softc->flags |= DA_FLAG_PACK_INVALID;
1822#ifdef CAM_IO_STATS
1823	softc->invalidations++;
1824#endif
1825
1826	/*
1827	 * Return all queued I/O with ENXIO.
1828	 * XXX Handle any transactions queued to the card
1829	 *     with XPT_ABORT_CCB.
1830	 */
1831	cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
1832
1833	/*
1834	 * Tell GEOM that we've gone away, we'll get a callback when it is
1835	 * done cleaning up its resources.
1836	 */
1837	disk_gone(softc->disk);
1838}
1839
1840static void
1841dacleanup(struct cam_periph *periph)
1842{
1843	struct da_softc *softc;
1844
1845	softc = (struct da_softc *)periph->softc;
1846
1847	cam_periph_unlock(periph);
1848
1849	cam_iosched_fini(softc->cam_iosched);
1850
1851	/*
1852	 * If we can't free the sysctl tree, oh well...
1853	 */
1854	if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) {
1855#ifdef CAM_IO_STATS
1856		if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
1857			xpt_print(periph->path,
1858			    "can't remove sysctl stats context\n");
1859#endif
1860		if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
1861			xpt_print(periph->path,
1862			    "can't remove sysctl context\n");
1863	}
1864
1865	callout_drain(&softc->mediapoll_c);
1866	disk_destroy(softc->disk);
1867	callout_drain(&softc->sendordered_c);
1868	free(softc, M_DEVBUF);
1869	cam_periph_lock(periph);
1870}
1871
1872static void
1873daasync(void *callback_arg, u_int32_t code,
1874	struct cam_path *path, void *arg)
1875{
1876	struct cam_periph *periph;
1877	struct da_softc *softc;
1878
1879	periph = (struct cam_periph *)callback_arg;
1880	switch (code) {
1881	case AC_FOUND_DEVICE:
1882	{
1883		struct ccb_getdev *cgd;
1884		cam_status status;
1885
1886		cgd = (struct ccb_getdev *)arg;
1887		if (cgd == NULL)
1888			break;
1889
1890		if (cgd->protocol != PROTO_SCSI)
1891			break;
1892		if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
1893			break;
1894		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
1895		    && SID_TYPE(&cgd->inq_data) != T_RBC
1896		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL
1897		    && SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
1898			break;
1899
1900		/*
1901		 * Allocate a peripheral instance for
1902		 * this device and start the probe
1903		 * process.
1904		 */
1905		status = cam_periph_alloc(daregister, daoninvalidate,
1906					  dacleanup, dastart,
1907					  "da", CAM_PERIPH_BIO,
1908					  path, daasync,
1909					  AC_FOUND_DEVICE, cgd);
1910
1911		if (status != CAM_REQ_CMP
1912		 && status != CAM_REQ_INPROG)
1913			printf("daasync: Unable to attach to new device "
1914				"due to status 0x%x\n", status);
1915		return;
1916	}
1917	case AC_ADVINFO_CHANGED:
1918	{
1919		uintptr_t buftype;
1920
1921		buftype = (uintptr_t)arg;
1922		if (buftype == CDAI_TYPE_PHYS_PATH) {
1923			struct da_softc *softc;
1924
1925			softc = periph->softc;
1926			disk_attr_changed(softc->disk, "GEOM::physpath",
1927					  M_NOWAIT);
1928		}
1929		break;
1930	}
1931	case AC_UNIT_ATTENTION:
1932	{
1933		union ccb *ccb;
1934		int error_code, sense_key, asc, ascq;
1935
1936		softc = (struct da_softc *)periph->softc;
1937		ccb = (union ccb *)arg;
1938
1939		/*
1940		 * Handle all UNIT ATTENTIONs except our own,
1941		 * as they will be handled by daerror().
1942		 */
1943		if (xpt_path_periph(ccb->ccb_h.path) != periph &&
1944		    scsi_extract_sense_ccb(ccb,
1945		     &error_code, &sense_key, &asc, &ascq)) {
1946			if (asc == 0x2A && ascq == 0x09) {
1947				xpt_print(ccb->ccb_h.path,
1948				    "Capacity data has changed\n");
1949				softc->flags &= ~DA_FLAG_PROBED;
1950				dareprobe(periph);
1951			} else if (asc == 0x28 && ascq == 0x00) {
1952				softc->flags &= ~DA_FLAG_PROBED;
1953				disk_media_changed(softc->disk, M_NOWAIT);
1954			} else if (asc == 0x3F && ascq == 0x03) {
1955				xpt_print(ccb->ccb_h.path,
1956				    "INQUIRY data has changed\n");
1957				softc->flags &= ~DA_FLAG_PROBED;
1958				dareprobe(periph);
1959			}
1960		}
1961		cam_periph_async(periph, code, path, arg);
1962		break;
1963	}
1964	case AC_SCSI_AEN:
1965		softc = (struct da_softc *)periph->softc;
1966		if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
1967			if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
1968				cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
1969				daschedule(periph);
1970			}
1971		}
1972		/* FALLTHROUGH */
1973	case AC_SENT_BDR:
1974	case AC_BUS_RESET:
1975	{
1976		struct ccb_hdr *ccbh;
1977
1978		softc = (struct da_softc *)periph->softc;
1979		/*
1980		 * Don't fail on the expected unit attention
1981		 * that will occur.
1982		 */
1983		softc->flags |= DA_FLAG_RETRY_UA;
1984		LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
1985			ccbh->ccb_state |= DA_CCB_RETRY_UA;
1986		break;
1987	}
1988	case AC_INQ_CHANGED:
1989		softc = (struct da_softc *)periph->softc;
1990		softc->flags &= ~DA_FLAG_PROBED;
1991		dareprobe(periph);
1992		break;
1993	default:
1994		break;
1995	}
1996	cam_periph_async(periph, code, path, arg);
1997}
1998
1999static void
2000dasysctlinit(void *context, int pending)
2001{
2002	struct cam_periph *periph;
2003	struct da_softc *softc;
2004	char tmpstr[32], tmpstr2[16];
2005	struct ccb_trans_settings cts;
2006
2007	periph = (struct cam_periph *)context;
2008	/*
2009	 * periph was held for us when this task was enqueued
2010	 */
2011	if (periph->flags & CAM_PERIPH_INVALID) {
2012		cam_periph_release(periph);
2013		return;
2014	}
2015
2016	softc = (struct da_softc *)periph->softc;
2017	snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
2018	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
2019
2020	sysctl_ctx_init(&softc->sysctl_ctx);
2021	softc->flags |= DA_FLAG_SCTX_INIT;
2022	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
2023		SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
2024		CTLFLAG_RD, 0, tmpstr);
2025	if (softc->sysctl_tree == NULL) {
2026		printf("dasysctlinit: unable to allocate sysctl tree\n");
2027		cam_periph_release(periph);
2028		return;
2029	}
2030
2031	/*
2032	 * Now register the sysctl handler, so the user can change the value on
2033	 * the fly.
2034	 */
2035	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2036		OID_AUTO, "delete_method", CTLTYPE_STRING | CTLFLAG_RWTUN,
2037		softc, 0, dadeletemethodsysctl, "A",
2038		"BIO_DELETE execution method");
2039	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2040		OID_AUTO, "delete_max", CTLTYPE_U64 | CTLFLAG_RW,
2041		softc, 0, dadeletemaxsysctl, "Q",
2042		"Maximum BIO_DELETE size");
2043	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2044		OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW,
2045		&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
2046		"Minimum CDB size");
2047
2048	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2049		OID_AUTO, "zone_mode", CTLTYPE_STRING | CTLFLAG_RD,
2050		softc, 0, dazonemodesysctl, "A",
2051		"Zone Mode");
2052	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2053		OID_AUTO, "zone_support", CTLTYPE_STRING | CTLFLAG_RD,
2054		softc, 0, dazonesupsysctl, "A",
2055		"Zone Support");
2056	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2057		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2058		"optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
2059		"Optimal Number of Open Sequential Write Preferred Zones");
2060	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2061		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2062		"optimal_nonseq_zones", CTLFLAG_RD,
2063		&softc->optimal_nonseq_zones,
2064		"Optimal Number of Non-Sequentially Written Sequential Write "
2065		"Preferred Zones");
2066	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2067		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2068		"max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
2069		"Maximum Number of Open Sequential Write Required Zones");
2070
2071	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2072		       SYSCTL_CHILDREN(softc->sysctl_tree),
2073		       OID_AUTO,
2074		       "error_inject",
2075		       CTLFLAG_RW,
2076		       &softc->error_inject,
2077		       0,
2078		       "error_inject leaf");
2079
2080	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2081		       SYSCTL_CHILDREN(softc->sysctl_tree),
2082		       OID_AUTO,
2083		       "unmapped_io",
2084		       CTLFLAG_RD,
2085		       &softc->unmappedio,
2086		       0,
2087		       "Unmapped I/O leaf");
2088
2089	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2090		       SYSCTL_CHILDREN(softc->sysctl_tree),
2091		       OID_AUTO,
2092		       "rotating",
2093		       CTLFLAG_RD,
2094		       &softc->rotating,
2095		       0,
2096		       "Rotating media");
2097
2098	/*
2099	 * Add some addressing info.
2100	 */
2101	memset(&cts, 0, sizeof (cts));
2102	xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
2103	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2104	cts.type = CTS_TYPE_CURRENT_SETTINGS;
2105	cam_periph_lock(periph);
2106	xpt_action((union ccb *)&cts);
2107	cam_periph_unlock(periph);
2108	if (cts.ccb_h.status != CAM_REQ_CMP) {
2109		cam_periph_release(periph);
2110		return;
2111	}
2112	if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) {
2113		struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
2114		if (fc->valid & CTS_FC_VALID_WWPN) {
2115			softc->wwpn = fc->wwpn;
2116			SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2117			    SYSCTL_CHILDREN(softc->sysctl_tree),
2118			    OID_AUTO, "wwpn", CTLFLAG_RD,
2119			    &softc->wwpn, "World Wide Port Name");
2120		}
2121	}
2122
2123#ifdef CAM_IO_STATS
2124	/*
2125	 * Now add some useful stats.
2126	 * XXX These should live in cam_periph and be common to all periphs
2127	 */
2128	softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
2129	    SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
2130	    CTLFLAG_RD, 0, "Statistics");
2131	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2132		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2133		       OID_AUTO,
2134		       "errors",
2135		       CTLFLAG_RD,
2136		       &softc->errors,
2137		       0,
2138		       "Transport errors reported by the SIM");
2139	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2140		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2141		       OID_AUTO,
2142		       "timeouts",
2143		       CTLFLAG_RD,
2144		       &softc->timeouts,
2145		       0,
2146		       "Device timeouts reported by the SIM");
2147	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2148		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2149		       OID_AUTO,
2150		       "pack_invalidations",
2151		       CTLFLAG_RD,
2152		       &softc->invalidations,
2153		       0,
2154		       "Device pack invalidations");
2155#endif
2156
2157	cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
2158	    softc->sysctl_tree);
2159
2160	cam_periph_release(periph);
2161}
2162
2163static int
2164dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
2165{
2166	int error;
2167	uint64_t value;
2168	struct da_softc *softc;
2169
2170	softc = (struct da_softc *)arg1;
2171
2172	value = softc->disk->d_delmaxsize;
2173	error = sysctl_handle_64(oidp, &value, 0, req);
2174	if ((error != 0) || (req->newptr == NULL))
2175		return (error);
2176
2177	/* only accept values smaller than the calculated value */
2178	if (value > dadeletemaxsize(softc, softc->delete_method)) {
2179		return (EINVAL);
2180	}
2181	softc->disk->d_delmaxsize = value;
2182
2183	return (0);
2184}
2185
2186static int
2187dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
2188{
2189	int error, value;
2190
2191	value = *(int *)arg1;
2192
2193	error = sysctl_handle_int(oidp, &value, 0, req);
2194
2195	if ((error != 0)
2196	 || (req->newptr == NULL))
2197		return (error);
2198
2199	/*
2200	 * Acceptable values here are 6, 10, 12 or 16.
2201	 */
2202	if (value < 6)
2203		value = 6;
2204	else if ((value > 6)
2205	      && (value <= 10))
2206		value = 10;
2207	else if ((value > 10)
2208	      && (value <= 12))
2209		value = 12;
2210	else if (value > 12)
2211		value = 16;
2212
2213	*(int *)arg1 = value;
2214
2215	return (0);
2216}
2217
2218static int
2219dasysctlsofttimeout(SYSCTL_HANDLER_ARGS)
2220{
2221	sbintime_t value;
2222	int error;
2223
2224	value = da_default_softtimeout / SBT_1MS;
2225
2226	error = sysctl_handle_int(oidp, (int *)&value, 0, req);
2227	if ((error != 0) || (req->newptr == NULL))
2228		return (error);
2229
2230	/* XXX Should clip this to a reasonable level */
2231	if (value > da_default_timeout * 1000)
2232		return (EINVAL);
2233
2234	da_default_softtimeout = value * SBT_1MS;
2235	return (0);
2236}
2237
2238static void
2239dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
2240{
2241
2242	softc->delete_method = delete_method;
2243	softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method);
2244	softc->delete_func = da_delete_functions[delete_method];
2245
2246	if (softc->delete_method > DA_DELETE_DISABLE)
2247		softc->disk->d_flags |= DISKFLAG_CANDELETE;
2248	else
2249		softc->disk->d_flags &= ~DISKFLAG_CANDELETE;
2250}
2251
2252static off_t
2253dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
2254{
2255	off_t sectors;
2256
2257	switch(delete_method) {
2258	case DA_DELETE_UNMAP:
2259		sectors = (off_t)softc->unmap_max_lba;
2260		break;
2261	case DA_DELETE_ATA_TRIM:
2262		sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
2263		break;
2264	case DA_DELETE_WS16:
2265		sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS);
2266		break;
2267	case DA_DELETE_ZERO:
2268	case DA_DELETE_WS10:
2269		sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS);
2270		break;
2271	default:
2272		return 0;
2273	}
2274
2275	return (off_t)softc->params.secsize *
2276	    omin(sectors, softc->params.sectors);
2277}
2278
2279static void
2280daprobedone(struct cam_periph *periph, union ccb *ccb)
2281{
2282	struct da_softc *softc;
2283
2284	softc = (struct da_softc *)periph->softc;
2285
2286	dadeletemethodchoose(softc, DA_DELETE_NONE);
2287
2288	if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2289		char buf[80];
2290		int i, sep;
2291
2292		snprintf(buf, sizeof(buf), "Delete methods: <");
2293		sep = 0;
2294		for (i = 0; i <= DA_DELETE_MAX; i++) {
2295			if ((softc->delete_available & (1 << i)) == 0 &&
2296			    i != softc->delete_method)
2297				continue;
2298			if (sep)
2299				strlcat(buf, ",", sizeof(buf));
2300			strlcat(buf, da_delete_method_names[i],
2301			    sizeof(buf));
2302			if (i == softc->delete_method)
2303				strlcat(buf, "(*)", sizeof(buf));
2304			sep = 1;
2305		}
2306		strlcat(buf, ">", sizeof(buf));
2307		printf("%s%d: %s\n", periph->periph_name,
2308		    periph->unit_number, buf);
2309	}
2310	if ((softc->disk->d_flags & DISKFLAG_WRITE_PROTECT) != 0 &&
2311	    (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2312		printf("%s%d: Write Protected\n", periph->periph_name,
2313		    periph->unit_number);
2314	}
2315
2316	/*
2317	 * Since our peripheral may be invalidated by an error
2318	 * above or an external event, we must release our CCB
2319	 * before releasing the probe lock on the peripheral.
2320	 * The peripheral will only go away once the last lock
2321	 * is removed, and we need it around for the CCB release
2322	 * operation.
2323	 */
2324	xpt_release_ccb(ccb);
2325	softc->state = DA_STATE_NORMAL;
2326	softc->flags |= DA_FLAG_PROBED;
2327	daschedule(periph);
2328	wakeup(&softc->disk->d_mediasize);
2329	if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2330		softc->flags |= DA_FLAG_ANNOUNCED;
2331		cam_periph_unhold(periph);
2332	} else
2333		cam_periph_release_locked(periph);
2334}
2335
2336static void
2337dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method)
2338{
2339	int i, methods;
2340
2341	/* If available, prefer the method requested by user. */
2342	i = softc->delete_method_pref;
2343	methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2344	if (methods & (1 << i)) {
2345		dadeletemethodset(softc, i);
2346		return;
2347	}
2348
2349	/* Use the pre-defined order to choose the best performing delete. */
2350	for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
2351		if (i == DA_DELETE_ZERO)
2352			continue;
2353		if (softc->delete_available & (1 << i)) {
2354			dadeletemethodset(softc, i);
2355			return;
2356		}
2357	}
2358
2359	/* Fallback to default. */
2360	dadeletemethodset(softc, default_method);
2361}
2362
2363static int
2364dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
2365{
2366	char buf[16];
2367	const char *p;
2368	struct da_softc *softc;
2369	int i, error, methods, value;
2370
2371	softc = (struct da_softc *)arg1;
2372
2373	value = softc->delete_method;
2374	if (value < 0 || value > DA_DELETE_MAX)
2375		p = "UNKNOWN";
2376	else
2377		p = da_delete_method_names[value];
2378	strncpy(buf, p, sizeof(buf));
2379	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2380	if (error != 0 || req->newptr == NULL)
2381		return (error);
2382	methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2383	for (i = 0; i <= DA_DELETE_MAX; i++) {
2384		if (strcmp(buf, da_delete_method_names[i]) == 0)
2385			break;
2386	}
2387	if (i > DA_DELETE_MAX)
2388		return (EINVAL);
2389	softc->delete_method_pref = i;
2390	dadeletemethodchoose(softc, DA_DELETE_NONE);
2391	return (0);
2392}
2393
2394static int
2395dazonemodesysctl(SYSCTL_HANDLER_ARGS)
2396{
2397	char tmpbuf[40];
2398	struct da_softc *softc;
2399	int error;
2400
2401	softc = (struct da_softc *)arg1;
2402
2403	switch (softc->zone_mode) {
2404	case DA_ZONE_DRIVE_MANAGED:
2405		snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
2406		break;
2407	case DA_ZONE_HOST_AWARE:
2408		snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
2409		break;
2410	case DA_ZONE_HOST_MANAGED:
2411		snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
2412		break;
2413	case DA_ZONE_NONE:
2414	default:
2415		snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
2416		break;
2417	}
2418
2419	error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
2420
2421	return (error);
2422}
2423
2424static int
2425dazonesupsysctl(SYSCTL_HANDLER_ARGS)
2426{
2427	char tmpbuf[180];
2428	struct da_softc *softc;
2429	struct sbuf sb;
2430	int error, first;
2431	unsigned int i;
2432
2433	softc = (struct da_softc *)arg1;
2434
2435	error = 0;
2436	first = 1;
2437	sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
2438
2439	for (i = 0; i < sizeof(da_zone_desc_table) /
2440	     sizeof(da_zone_desc_table[0]); i++) {
2441		if (softc->zone_flags & da_zone_desc_table[i].value) {
2442			if (first == 0)
2443				sbuf_printf(&sb, ", ");
2444			else
2445				first = 0;
2446			sbuf_cat(&sb, da_zone_desc_table[i].desc);
2447		}
2448	}
2449
2450	if (first == 1)
2451		sbuf_printf(&sb, "None");
2452
2453	sbuf_finish(&sb);
2454
2455	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2456
2457	return (error);
2458}
2459
2460static cam_status
2461daregister(struct cam_periph *periph, void *arg)
2462{
2463	struct da_softc *softc;
2464	struct ccb_pathinq cpi;
2465	struct ccb_getdev *cgd;
2466	char tmpstr[80];
2467	caddr_t match;
2468
2469	cgd = (struct ccb_getdev *)arg;
2470	if (cgd == NULL) {
2471		printf("daregister: no getdev CCB, can't register device\n");
2472		return(CAM_REQ_CMP_ERR);
2473	}
2474
2475	softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF,
2476	    M_NOWAIT|M_ZERO);
2477
2478	if (softc == NULL) {
2479		printf("daregister: Unable to probe new device. "
2480		       "Unable to allocate softc\n");
2481		return(CAM_REQ_CMP_ERR);
2482	}
2483
2484	if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
2485		printf("daregister: Unable to probe new device. "
2486		       "Unable to allocate iosched memory\n");
2487		free(softc, M_DEVBUF);
2488		return(CAM_REQ_CMP_ERR);
2489	}
2490
2491	LIST_INIT(&softc->pending_ccbs);
2492	softc->state = DA_STATE_PROBE_WP;
2493	bioq_init(&softc->delete_run_queue);
2494	if (SID_IS_REMOVABLE(&cgd->inq_data))
2495		softc->flags |= DA_FLAG_PACK_REMOVABLE;
2496	softc->unmap_max_ranges = UNMAP_MAX_RANGES;
2497	softc->unmap_max_lba = UNMAP_RANGE_MAX;
2498	softc->unmap_gran = 0;
2499	softc->unmap_gran_align = 0;
2500	softc->ws_max_blks = WS16_MAX_BLKS;
2501	softc->trim_max_ranges = ATA_TRIM_MAX_RANGES;
2502	softc->rotating = 1;
2503
2504	periph->softc = softc;
2505
2506	/*
2507	 * See if this device has any quirks.
2508	 */
2509	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
2510			       (caddr_t)da_quirk_table,
2511			       nitems(da_quirk_table),
2512			       sizeof(*da_quirk_table), scsi_inquiry_match);
2513
2514	if (match != NULL)
2515		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
2516	else
2517		softc->quirks = DA_Q_NONE;
2518
2519	/* Check if the SIM does not want 6 byte commands */
2520	xpt_path_inq(&cpi, periph->path);
2521	if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
2522		softc->quirks |= DA_Q_NO_6_BYTE;
2523
2524	if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
2525		softc->zone_mode = DA_ZONE_HOST_MANAGED;
2526	else if (softc->quirks & DA_Q_SMR_DM)
2527		softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
2528	else
2529		softc->zone_mode = DA_ZONE_NONE;
2530
2531	if (softc->zone_mode != DA_ZONE_NONE) {
2532		if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
2533			if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC))
2534				softc->zone_interface = DA_ZONE_IF_ATA_SAT;
2535			else
2536				softc->zone_interface = DA_ZONE_IF_ATA_PASS;
2537		} else
2538			softc->zone_interface = DA_ZONE_IF_SCSI;
2539	}
2540
2541	TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
2542
2543	/*
2544	 * Take an exclusive refcount on the periph while dastart is called
2545	 * to finish the probe.  The reference will be dropped in dadone at
2546	 * the end of probe.
2547	 */
2548	(void)cam_periph_hold(periph, PRIBIO);
2549
2550	/*
2551	 * Schedule a periodic event to occasionally send an
2552	 * ordered tag to a device.
2553	 */
2554	callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
2555	callout_reset(&softc->sendordered_c,
2556	    (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
2557	    dasendorderedtag, softc);
2558
2559	cam_periph_unlock(periph);
2560	/*
2561	 * RBC devices don't have to support READ(6), only READ(10).
2562	 */
2563	if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
2564		softc->minimum_cmd_size = 10;
2565	else
2566		softc->minimum_cmd_size = 6;
2567
2568	/*
2569	 * Load the user's default, if any.
2570	 */
2571	snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
2572		 periph->unit_number);
2573	TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
2574
2575	/*
2576	 * 6, 10, 12 and 16 are the currently permissible values.
2577	 */
2578	if (softc->minimum_cmd_size < 6)
2579		softc->minimum_cmd_size = 6;
2580	else if ((softc->minimum_cmd_size > 6)
2581	      && (softc->minimum_cmd_size <= 10))
2582		softc->minimum_cmd_size = 10;
2583	else if ((softc->minimum_cmd_size > 10)
2584	      && (softc->minimum_cmd_size <= 12))
2585		softc->minimum_cmd_size = 12;
2586	else if (softc->minimum_cmd_size > 12)
2587		softc->minimum_cmd_size = 16;
2588
2589	/* Predict whether device may support READ CAPACITY(16). */
2590	if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 &&
2591	    (softc->quirks & DA_Q_NO_RC16) == 0) {
2592		softc->flags |= DA_FLAG_CAN_RC16;
2593	}
2594
2595	/*
2596	 * Register this media as a disk.
2597	 */
2598	softc->disk = disk_alloc();
2599	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
2600			  periph->unit_number, 0,
2601			  DEVSTAT_BS_UNAVAILABLE,
2602			  SID_TYPE(&cgd->inq_data) |
2603			  XPORT_DEVSTAT_TYPE(cpi.transport),
2604			  DEVSTAT_PRIORITY_DISK);
2605	softc->disk->d_open = daopen;
2606	softc->disk->d_close = daclose;
2607	softc->disk->d_strategy = dastrategy;
2608	softc->disk->d_dump = dadump;
2609	softc->disk->d_getattr = dagetattr;
2610	softc->disk->d_gone = dadiskgonecb;
2611	softc->disk->d_name = "da";
2612	softc->disk->d_drv1 = periph;
2613	if (cpi.maxio == 0)
2614		softc->maxio = DFLTPHYS;	/* traditional default */
2615	else if (cpi.maxio > MAXPHYS)
2616		softc->maxio = MAXPHYS;		/* for safety */
2617	else
2618		softc->maxio = cpi.maxio;
2619	if (softc->quirks & DA_Q_128KB)
2620		softc->maxio = min(softc->maxio, 128 * 1024);
2621	softc->disk->d_maxsize = softc->maxio;
2622	softc->disk->d_unit = periph->unit_number;
2623	softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
2624	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
2625		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
2626	if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
2627		softc->unmappedio = 1;
2628		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
2629	}
2630	cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor,
2631	    sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr));
2632	strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr));
2633	cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)],
2634	    cgd->inq_data.product, sizeof(cgd->inq_data.product),
2635	    sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr));
2636	softc->disk->d_hba_vendor = cpi.hba_vendor;
2637	softc->disk->d_hba_device = cpi.hba_device;
2638	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
2639	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
2640
2641	/*
2642	 * Acquire a reference to the periph before we register with GEOM.
2643	 * We'll release this reference once GEOM calls us back (via
2644	 * dadiskgonecb()) telling us that our provider has been freed.
2645	 */
2646	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
2647		xpt_print(periph->path, "%s: lost periph during "
2648			  "registration!\n", __func__);
2649		cam_periph_lock(periph);
2650		return (CAM_REQ_CMP_ERR);
2651	}
2652
2653	disk_create(softc->disk, DISK_VERSION);
2654	cam_periph_lock(periph);
2655
2656	/*
2657	 * Add async callbacks for events of interest.
2658	 * I don't bother checking if this fails as,
2659	 * in most cases, the system will function just
2660	 * fine without them and the only alternative
2661	 * would be to not attach the device on failure.
2662	 */
2663	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
2664	    AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION |
2665	    AC_INQ_CHANGED, daasync, periph, periph->path);
2666
2667	/*
2668	 * Emit an attribute changed notification just in case
2669	 * physical path information arrived before our async
2670	 * event handler was registered, but after anyone attaching
2671	 * to our disk device polled it.
2672	 */
2673	disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT);
2674
2675	/*
2676	 * Schedule a periodic media polling events.
2677	 */
2678	callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
2679	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
2680	    (cgd->inq_flags & SID_AEN) == 0 &&
2681	    da_poll_period != 0)
2682		callout_reset(&softc->mediapoll_c, da_poll_period * hz,
2683		    damediapoll, periph);
2684
2685	xpt_schedule(periph, CAM_PRIORITY_DEV);
2686
2687	return(CAM_REQ_CMP);
2688}
2689
2690static int
2691da_zone_bio_to_scsi(int disk_zone_cmd)
2692{
2693	switch (disk_zone_cmd) {
2694	case DISK_ZONE_OPEN:
2695		return ZBC_OUT_SA_OPEN;
2696	case DISK_ZONE_CLOSE:
2697		return ZBC_OUT_SA_CLOSE;
2698	case DISK_ZONE_FINISH:
2699		return ZBC_OUT_SA_FINISH;
2700	case DISK_ZONE_RWP:
2701		return ZBC_OUT_SA_RWP;
2702	}
2703
2704	return -1;
2705}
2706
2707static int
2708da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
2709	    int *queue_ccb)
2710{
2711	struct da_softc *softc;
2712	int error;
2713
2714	error = 0;
2715
2716	if (bp->bio_cmd != BIO_ZONE) {
2717		error = EINVAL;
2718		goto bailout;
2719	}
2720
2721	softc = periph->softc;
2722
2723	switch (bp->bio_zone.zone_cmd) {
2724	case DISK_ZONE_OPEN:
2725	case DISK_ZONE_CLOSE:
2726	case DISK_ZONE_FINISH:
2727	case DISK_ZONE_RWP: {
2728		int zone_flags;
2729		int zone_sa;
2730		uint64_t lba;
2731
2732		zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
2733		if (zone_sa == -1) {
2734			xpt_print(periph->path, "Cannot translate zone "
2735			    "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd);
2736			error = EINVAL;
2737			goto bailout;
2738		}
2739
2740		zone_flags = 0;
2741		lba = bp->bio_zone.zone_params.rwp.id;
2742
2743		if (bp->bio_zone.zone_params.rwp.flags &
2744		    DISK_ZONE_RWP_FLAG_ALL)
2745			zone_flags |= ZBC_OUT_ALL;
2746
2747		if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
2748			scsi_zbc_out(&ccb->csio,
2749				     /*retries*/ da_retry_count,
2750				     /*cbfcnp*/ dadone,
2751				     /*tag_action*/ MSG_SIMPLE_Q_TAG,
2752				     /*service_action*/ zone_sa,
2753				     /*zone_id*/ lba,
2754				     /*zone_flags*/ zone_flags,
2755				     /*data_ptr*/ NULL,
2756				     /*dxfer_len*/ 0,
2757				     /*sense_len*/ SSD_FULL_SIZE,
2758				     /*timeout*/ da_default_timeout * 1000);
2759		} else {
2760			/*
2761			 * Note that in this case, even though we can
2762			 * technically use NCQ, we don't bother for several
2763			 * reasons:
2764			 * 1. It hasn't been tested on a SAT layer that
2765			 *    supports it.  This is new as of SAT-4.
2766			 * 2. Even when there is a SAT layer that supports
2767			 *    it, that SAT layer will also probably support
2768			 *    ZBC -> ZAC translation, since they are both
2769			 *    in the SAT-4 spec.
2770			 * 3. Translation will likely be preferable to ATA
2771			 *    passthrough.  LSI / Avago at least single
2772			 *    steps ATA passthrough commands in the HBA,
2773			 *    regardless of protocol, so unless that
2774			 *    changes, there is a performance penalty for
2775			 *    doing ATA passthrough no matter whether
2776			 *    you're using NCQ/FPDMA, DMA or PIO.
2777			 * 4. It requires a 32-byte CDB, which at least at
2778			 *    this point in CAM requires a CDB pointer, which
2779			 *    would require us to allocate an additional bit
2780			 *    of storage separate from the CCB.
2781			 */
2782			error = scsi_ata_zac_mgmt_out(&ccb->csio,
2783			    /*retries*/ da_retry_count,
2784			    /*cbfcnp*/ dadone,
2785			    /*tag_action*/ MSG_SIMPLE_Q_TAG,
2786			    /*use_ncq*/ 0,
2787			    /*zm_action*/ zone_sa,
2788			    /*zone_id*/ lba,
2789			    /*zone_flags*/ zone_flags,
2790			    /*data_ptr*/ NULL,
2791			    /*dxfer_len*/ 0,
2792			    /*cdb_storage*/ NULL,
2793			    /*cdb_storage_len*/ 0,
2794			    /*sense_len*/ SSD_FULL_SIZE,
2795			    /*timeout*/ da_default_timeout * 1000);
2796			if (error != 0) {
2797				error = EINVAL;
2798				xpt_print(periph->path,
2799				    "scsi_ata_zac_mgmt_out() returned an "
2800				    "error!");
2801				goto bailout;
2802			}
2803		}
2804		*queue_ccb = 1;
2805
2806		break;
2807	}
2808	case DISK_ZONE_REPORT_ZONES: {
2809		uint8_t *rz_ptr;
2810		uint32_t num_entries, alloc_size;
2811		struct disk_zone_report *rep;
2812
2813		rep = &bp->bio_zone.zone_params.report;
2814
2815		num_entries = rep->entries_allocated;
2816		if (num_entries == 0) {
2817			xpt_print(periph->path, "No entries allocated for "
2818			    "Report Zones request\n");
2819			error = EINVAL;
2820			goto bailout;
2821		}
2822		alloc_size = sizeof(struct scsi_report_zones_hdr) +
2823		    (sizeof(struct scsi_report_zones_desc) * num_entries);
2824		alloc_size = min(alloc_size, softc->disk->d_maxsize);
2825		rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
2826		if (rz_ptr == NULL) {
2827			xpt_print(periph->path, "Unable to allocate memory "
2828			   "for Report Zones request\n");
2829			error = ENOMEM;
2830			goto bailout;
2831		}
2832
2833		if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
2834			scsi_zbc_in(&ccb->csio,
2835				    /*retries*/ da_retry_count,
2836				    /*cbcfnp*/ dadone,
2837				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
2838				    /*service_action*/ ZBC_IN_SA_REPORT_ZONES,
2839				    /*zone_start_lba*/ rep->starting_id,
2840				    /*zone_options*/ rep->rep_options,
2841				    /*data_ptr*/ rz_ptr,
2842				    /*dxfer_len*/ alloc_size,
2843				    /*sense_len*/ SSD_FULL_SIZE,
2844				    /*timeout*/ da_default_timeout * 1000);
2845		} else {
2846			/*
2847			 * Note that in this case, even though we can
2848			 * technically use NCQ, we don't bother for several
2849			 * reasons:
2850			 * 1. It hasn't been tested on a SAT layer that
2851			 *    supports it.  This is new as of SAT-4.
2852			 * 2. Even when there is a SAT layer that supports
2853			 *    it, that SAT layer will also probably support
2854			 *    ZBC -> ZAC translation, since they are both
2855			 *    in the SAT-4 spec.
2856			 * 3. Translation will likely be preferable to ATA
2857			 *    passthrough.  LSI / Avago at least single
2858			 *    steps ATA passthrough commands in the HBA,
2859			 *    regardless of protocol, so unless that
2860			 *    changes, there is a performance penalty for
2861			 *    doing ATA passthrough no matter whether
2862			 *    you're using NCQ/FPDMA, DMA or PIO.
2863			 * 4. It requires a 32-byte CDB, which at least at
2864			 *    this point in CAM requires a CDB pointer, which
2865			 *    would require us to allocate an additional bit
2866			 *    of storage separate from the CCB.
2867			 */
2868			error = scsi_ata_zac_mgmt_in(&ccb->csio,
2869			    /*retries*/ da_retry_count,
2870			    /*cbcfnp*/ dadone,
2871			    /*tag_action*/ MSG_SIMPLE_Q_TAG,
2872			    /*use_ncq*/ 0,
2873			    /*zm_action*/ ATA_ZM_REPORT_ZONES,
2874			    /*zone_id*/ rep->starting_id,
2875			    /*zone_flags*/ rep->rep_options,
2876			    /*data_ptr*/ rz_ptr,
2877			    /*dxfer_len*/ alloc_size,
2878			    /*cdb_storage*/ NULL,
2879			    /*cdb_storage_len*/ 0,
2880			    /*sense_len*/ SSD_FULL_SIZE,
2881			    /*timeout*/ da_default_timeout * 1000);
2882			if (error != 0) {
2883				error = EINVAL;
2884				xpt_print(periph->path,
2885				    "scsi_ata_zac_mgmt_in() returned an "
2886				    "error!");
2887				goto bailout;
2888			}
2889		}
2890
2891		/*
2892		 * For BIO_ZONE, this isn't normally needed.  However, it
2893		 * is used by devstat_end_transaction_bio() to determine
2894		 * how much data was transferred.
2895		 */
2896		/*
2897		 * XXX KDM we have a problem.  But I'm not sure how to fix
2898		 * it.  devstat uses bio_bcount - bio_resid to calculate
2899		 * the amount of data transferred.   The GEOM disk code
2900		 * uses bio_length - bio_resid to calculate the amount of
2901		 * data in bio_completed.  We have different structure
2902		 * sizes above and below the ada(4) driver.  So, if we
2903		 * use the sizes above, the amount transferred won't be
2904		 * quite accurate for devstat.  If we use different sizes
2905		 * for bio_bcount and bio_length (above and below
2906		 * respectively), then the residual needs to match one or
2907		 * the other.  Everything is calculated after the bio
2908		 * leaves the driver, so changing the values around isn't
2909		 * really an option.  For now, just set the count to the
2910		 * passed in length.  This means that the calculations
2911		 * above (e.g. bio_completed) will be correct, but the
2912		 * amount of data reported to devstat will be slightly
2913		 * under or overstated.
2914		 */
2915		bp->bio_bcount = bp->bio_length;
2916
2917		*queue_ccb = 1;
2918
2919		break;
2920	}
2921	case DISK_ZONE_GET_PARAMS: {
2922		struct disk_zone_disk_params *params;
2923
2924		params = &bp->bio_zone.zone_params.disk_params;
2925		bzero(params, sizeof(*params));
2926
2927		switch (softc->zone_mode) {
2928		case DA_ZONE_DRIVE_MANAGED:
2929			params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
2930			break;
2931		case DA_ZONE_HOST_AWARE:
2932			params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
2933			break;
2934		case DA_ZONE_HOST_MANAGED:
2935			params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
2936			break;
2937		default:
2938		case DA_ZONE_NONE:
2939			params->zone_mode = DISK_ZONE_MODE_NONE;
2940			break;
2941		}
2942
2943		if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
2944			params->flags |= DISK_ZONE_DISK_URSWRZ;
2945
2946		if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
2947			params->optimal_seq_zones = softc->optimal_seq_zones;
2948			params->flags |= DISK_ZONE_OPT_SEQ_SET;
2949		}
2950
2951		if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
2952			params->optimal_nonseq_zones =
2953			    softc->optimal_nonseq_zones;
2954			params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
2955		}
2956
2957		if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
2958			params->max_seq_zones = softc->max_seq_zones;
2959			params->flags |= DISK_ZONE_MAX_SEQ_SET;
2960		}
2961		if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
2962			params->flags |= DISK_ZONE_RZ_SUP;
2963
2964		if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
2965			params->flags |= DISK_ZONE_OPEN_SUP;
2966
2967		if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
2968			params->flags |= DISK_ZONE_CLOSE_SUP;
2969
2970		if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
2971			params->flags |= DISK_ZONE_FINISH_SUP;
2972
2973		if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
2974			params->flags |= DISK_ZONE_RWP_SUP;
2975		break;
2976	}
2977	default:
2978		break;
2979	}
2980bailout:
2981	return (error);
2982}
2983
2984static void
2985dastart(struct cam_periph *periph, union ccb *start_ccb)
2986{
2987	struct da_softc *softc;
2988
2989	softc = (struct da_softc *)periph->softc;
2990
2991	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n"));
2992
2993skipstate:
2994	switch (softc->state) {
2995	case DA_STATE_NORMAL:
2996	{
2997		struct bio *bp;
2998		uint8_t tag_code;
2999
3000more:
3001		bp = cam_iosched_next_bio(softc->cam_iosched);
3002		if (bp == NULL) {
3003			if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
3004				cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR);
3005				scsi_test_unit_ready(&start_ccb->csio,
3006				     /*retries*/ da_retry_count,
3007				     dadone,
3008				     MSG_SIMPLE_Q_TAG,
3009				     SSD_FULL_SIZE,
3010				     da_default_timeout * 1000);
3011				start_ccb->ccb_h.ccb_bp = NULL;
3012				start_ccb->ccb_h.ccb_state = DA_CCB_TUR;
3013				xpt_action(start_ccb);
3014			} else
3015				xpt_release_ccb(start_ccb);
3016			break;
3017		}
3018
3019		if (bp->bio_cmd == BIO_DELETE) {
3020			if (softc->delete_func != NULL) {
3021				softc->delete_func(periph, start_ccb, bp);
3022				goto out;
3023			} else {
3024				/* Not sure this is possible, but failsafe by lying and saying "sure, done." */
3025				biofinish(bp, NULL, 0);
3026				goto more;
3027			}
3028		}
3029
3030		if (cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR)) {
3031			cam_iosched_clr_work_flags(softc->cam_iosched, DA_WORK_TUR);
3032			cam_periph_release_locked(periph);
3033		}
3034
3035		if ((bp->bio_flags & BIO_ORDERED) != 0 ||
3036		    (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
3037			softc->flags &= ~DA_FLAG_NEED_OTAG;
3038			softc->flags |= DA_FLAG_WAS_OTAG;
3039			tag_code = MSG_ORDERED_Q_TAG;
3040		} else {
3041			tag_code = MSG_SIMPLE_Q_TAG;
3042		}
3043
3044		switch (bp->bio_cmd) {
3045		case BIO_WRITE:
3046		case BIO_READ:
3047		{
3048			void *data_ptr;
3049			int rw_op;
3050
3051			if (bp->bio_cmd == BIO_WRITE) {
3052				softc->flags |= DA_FLAG_DIRTY;
3053				rw_op = SCSI_RW_WRITE;
3054			} else {
3055				rw_op = SCSI_RW_READ;
3056			}
3057
3058			data_ptr = bp->bio_data;
3059			if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
3060				rw_op |= SCSI_RW_BIO;
3061				data_ptr = bp;
3062			}
3063
3064			scsi_read_write(&start_ccb->csio,
3065					/*retries*/da_retry_count,
3066					/*cbfcnp*/dadone,
3067					/*tag_action*/tag_code,
3068					rw_op,
3069					/*byte2*/0,
3070					softc->minimum_cmd_size,
3071					/*lba*/bp->bio_pblkno,
3072					/*block_count*/bp->bio_bcount /
3073					softc->params.secsize,
3074					data_ptr,
3075					/*dxfer_len*/ bp->bio_bcount,
3076					/*sense_len*/SSD_FULL_SIZE,
3077					da_default_timeout * 1000);
3078			break;
3079		}
3080		case BIO_FLUSH:
3081			/*
3082			 * If we don't support sync cache, or the disk
3083			 * isn't dirty, FLUSH is a no-op.  Use the
3084			 * allocated * CCB for the next bio if one is
3085			 * available.
3086			 */
3087			if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 ||
3088			    (softc->flags & DA_FLAG_DIRTY) == 0) {
3089				biodone(bp);
3090				goto skipstate;
3091			}
3092
3093			/*
3094			 * BIO_FLUSH doesn't currently communicate
3095			 * range data, so we synchronize the cache
3096			 * over the whole disk.
3097			 */
3098			scsi_synchronize_cache(&start_ccb->csio,
3099					       /*retries*/1,
3100					       /*cbfcnp*/dadone,
3101					       /*tag_action*/tag_code,
3102					       /*begin_lba*/0,
3103					       /*lb_count*/0,
3104					       SSD_FULL_SIZE,
3105					       da_default_timeout*1000);
3106			/*
3107			 * Clear the dirty flag before sending the command.
3108			 * Either this sync cache will be successful, or it
3109			 * will fail after a retry.  If it fails, it is
3110			 * unlikely to be successful if retried later, so
3111			 * we'll save ourselves time by just marking the
3112			 * device clean.
3113			 */
3114			softc->flags &= ~DA_FLAG_DIRTY;
3115			break;
3116		case BIO_ZONE: {
3117			int error, queue_ccb;
3118
3119			queue_ccb = 0;
3120
3121			error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb);
3122			if ((error != 0)
3123			 || (queue_ccb == 0)) {
3124				biofinish(bp, NULL, error);
3125				xpt_release_ccb(start_ccb);
3126				return;
3127			}
3128			break;
3129		}
3130		}
3131		start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
3132		start_ccb->ccb_h.flags |= CAM_UNLOCKED;
3133		start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout);
3134
3135out:
3136		LIST_INSERT_HEAD(&softc->pending_ccbs,
3137				 &start_ccb->ccb_h, periph_links.le);
3138
3139		/* We expect a unit attention from this device */
3140		if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
3141			start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
3142			softc->flags &= ~DA_FLAG_RETRY_UA;
3143		}
3144
3145		start_ccb->ccb_h.ccb_bp = bp;
3146		softc->refcount++;
3147		cam_periph_unlock(periph);
3148		xpt_action(start_ccb);
3149		cam_periph_lock(periph);
3150		softc->refcount--;
3151
3152		/* May have more work to do, so ensure we stay scheduled */
3153		daschedule(periph);
3154		break;
3155	}
3156	case DA_STATE_PROBE_WP:
3157	{
3158		void  *mode_buf;
3159		int    mode_buf_len;
3160
3161		if (da_disable_wp_detection) {
3162			if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3163				softc->state = DA_STATE_PROBE_RC16;
3164			else
3165				softc->state = DA_STATE_PROBE_RC;
3166			goto skipstate;
3167		}
3168		mode_buf_len = 192;
3169		mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT);
3170		if (mode_buf == NULL) {
3171			xpt_print(periph->path, "Unable to send mode sense - "
3172			    "malloc failure\n");
3173			if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3174				softc->state = DA_STATE_PROBE_RC16;
3175			else
3176				softc->state = DA_STATE_PROBE_RC;
3177			goto skipstate;
3178		}
3179		scsi_mode_sense_len(&start_ccb->csio,
3180				    /*retries*/ da_retry_count,
3181				    /*cbfcnp*/ dadone,
3182				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3183				    /*dbd*/ FALSE,
3184				    /*pc*/ SMS_PAGE_CTRL_CURRENT,
3185				    /*page*/ SMS_ALL_PAGES_PAGE,
3186				    /*param_buf*/ mode_buf,
3187				    /*param_len*/ mode_buf_len,
3188				    /*minimum_cmd_size*/ softc->minimum_cmd_size,
3189				    /*sense_len*/ SSD_FULL_SIZE,
3190				    /*timeout*/ da_default_timeout * 1000);
3191		start_ccb->ccb_h.ccb_bp = NULL;
3192		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP;
3193		xpt_action(start_ccb);
3194		break;
3195	}
3196	case DA_STATE_PROBE_RC:
3197	{
3198		struct scsi_read_capacity_data *rcap;
3199
3200		rcap = (struct scsi_read_capacity_data *)
3201		    malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO);
3202		if (rcap == NULL) {
3203			printf("dastart: Couldn't malloc read_capacity data\n");
3204			/* da_free_periph??? */
3205			break;
3206		}
3207		scsi_read_capacity(&start_ccb->csio,
3208				   /*retries*/da_retry_count,
3209				   dadone,
3210				   MSG_SIMPLE_Q_TAG,
3211				   rcap,
3212				   SSD_FULL_SIZE,
3213				   /*timeout*/5000);
3214		start_ccb->ccb_h.ccb_bp = NULL;
3215		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC;
3216		xpt_action(start_ccb);
3217		break;
3218	}
3219	case DA_STATE_PROBE_RC16:
3220	{
3221		struct scsi_read_capacity_data_long *rcaplong;
3222
3223		rcaplong = (struct scsi_read_capacity_data_long *)
3224			malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO);
3225		if (rcaplong == NULL) {
3226			printf("dastart: Couldn't malloc read_capacity data\n");
3227			/* da_free_periph??? */
3228			break;
3229		}
3230		scsi_read_capacity_16(&start_ccb->csio,
3231				      /*retries*/ da_retry_count,
3232				      /*cbfcnp*/ dadone,
3233				      /*tag_action*/ MSG_SIMPLE_Q_TAG,
3234				      /*lba*/ 0,
3235				      /*reladr*/ 0,
3236				      /*pmi*/ 0,
3237				      /*rcap_buf*/ (uint8_t *)rcaplong,
3238				      /*rcap_buf_len*/ sizeof(*rcaplong),
3239				      /*sense_len*/ SSD_FULL_SIZE,
3240				      /*timeout*/ da_default_timeout * 1000);
3241		start_ccb->ccb_h.ccb_bp = NULL;
3242		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16;
3243		xpt_action(start_ccb);
3244		break;
3245	}
3246	case DA_STATE_PROBE_LBP:
3247	{
3248		struct scsi_vpd_logical_block_prov *lbp;
3249
3250		if (!scsi_vpd_supported_page(periph, SVPD_LBP)) {
3251			/*
3252			 * If we get here we don't support any SBC-3 delete
3253			 * methods with UNMAP as the Logical Block Provisioning
3254			 * VPD page support is required for devices which
3255			 * support it according to T10/1799-D Revision 31
3256			 * however older revisions of the spec don't mandate
3257			 * this so we currently don't remove these methods
3258			 * from the available set.
3259			 */
3260			softc->state = DA_STATE_PROBE_BLK_LIMITS;
3261			goto skipstate;
3262		}
3263
3264		lbp = (struct scsi_vpd_logical_block_prov *)
3265			malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO);
3266
3267		if (lbp == NULL) {
3268			printf("dastart: Couldn't malloc lbp data\n");
3269			/* da_free_periph??? */
3270			break;
3271		}
3272
3273		scsi_inquiry(&start_ccb->csio,
3274			     /*retries*/da_retry_count,
3275			     /*cbfcnp*/dadone,
3276			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3277			     /*inq_buf*/(u_int8_t *)lbp,
3278			     /*inq_len*/sizeof(*lbp),
3279			     /*evpd*/TRUE,
3280			     /*page_code*/SVPD_LBP,
3281			     /*sense_len*/SSD_MIN_SIZE,
3282			     /*timeout*/da_default_timeout * 1000);
3283		start_ccb->ccb_h.ccb_bp = NULL;
3284		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP;
3285		xpt_action(start_ccb);
3286		break;
3287	}
3288	case DA_STATE_PROBE_BLK_LIMITS:
3289	{
3290		struct scsi_vpd_block_limits *block_limits;
3291
3292		if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) {
3293			/* Not supported skip to next probe */
3294			softc->state = DA_STATE_PROBE_BDC;
3295			goto skipstate;
3296		}
3297
3298		block_limits = (struct scsi_vpd_block_limits *)
3299			malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO);
3300
3301		if (block_limits == NULL) {
3302			printf("dastart: Couldn't malloc block_limits data\n");
3303			/* da_free_periph??? */
3304			break;
3305		}
3306
3307		scsi_inquiry(&start_ccb->csio,
3308			     /*retries*/da_retry_count,
3309			     /*cbfcnp*/dadone,
3310			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3311			     /*inq_buf*/(u_int8_t *)block_limits,
3312			     /*inq_len*/sizeof(*block_limits),
3313			     /*evpd*/TRUE,
3314			     /*page_code*/SVPD_BLOCK_LIMITS,
3315			     /*sense_len*/SSD_MIN_SIZE,
3316			     /*timeout*/da_default_timeout * 1000);
3317		start_ccb->ccb_h.ccb_bp = NULL;
3318		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS;
3319		xpt_action(start_ccb);
3320		break;
3321	}
3322	case DA_STATE_PROBE_BDC:
3323	{
3324		struct scsi_vpd_block_characteristics *bdc;
3325
3326		if (!scsi_vpd_supported_page(periph, SVPD_BDC)) {
3327			softc->state = DA_STATE_PROBE_ATA;
3328			goto skipstate;
3329		}
3330
3331		bdc = (struct scsi_vpd_block_characteristics *)
3332			malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3333
3334		if (bdc == NULL) {
3335			printf("dastart: Couldn't malloc bdc data\n");
3336			/* da_free_periph??? */
3337			break;
3338		}
3339
3340		scsi_inquiry(&start_ccb->csio,
3341			     /*retries*/da_retry_count,
3342			     /*cbfcnp*/dadone,
3343			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3344			     /*inq_buf*/(u_int8_t *)bdc,
3345			     /*inq_len*/sizeof(*bdc),
3346			     /*evpd*/TRUE,
3347			     /*page_code*/SVPD_BDC,
3348			     /*sense_len*/SSD_MIN_SIZE,
3349			     /*timeout*/da_default_timeout * 1000);
3350		start_ccb->ccb_h.ccb_bp = NULL;
3351		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC;
3352		xpt_action(start_ccb);
3353		break;
3354	}
3355	case DA_STATE_PROBE_ATA:
3356	{
3357		struct ata_params *ata_params;
3358
3359		if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
3360			if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
3361			 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
3362				/*
3363				 * Note that if the ATA VPD page isn't
3364				 * supported, we aren't talking to an ATA
3365				 * device anyway.  Support for that VPD
3366				 * page is mandatory for SCSI to ATA (SAT)
3367				 * translation layers.
3368				 */
3369				softc->state = DA_STATE_PROBE_ZONE;
3370				goto skipstate;
3371			}
3372			daprobedone(periph, start_ccb);
3373			break;
3374		}
3375
3376		ata_params = (struct ata_params*)
3377			malloc(sizeof(*ata_params), M_SCSIDA,M_NOWAIT|M_ZERO);
3378
3379		if (ata_params == NULL) {
3380			xpt_print(periph->path, "Couldn't malloc ata_params "
3381			    "data\n");
3382			/* da_free_periph??? */
3383			break;
3384		}
3385
3386		scsi_ata_identify(&start_ccb->csio,
3387				  /*retries*/da_retry_count,
3388				  /*cbfcnp*/dadone,
3389                                  /*tag_action*/MSG_SIMPLE_Q_TAG,
3390				  /*data_ptr*/(u_int8_t *)ata_params,
3391				  /*dxfer_len*/sizeof(*ata_params),
3392				  /*sense_len*/SSD_FULL_SIZE,
3393				  /*timeout*/da_default_timeout * 1000);
3394		start_ccb->ccb_h.ccb_bp = NULL;
3395		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA;
3396		xpt_action(start_ccb);
3397		break;
3398	}
3399	case DA_STATE_PROBE_ATA_LOGDIR:
3400	{
3401		struct ata_gp_log_dir *log_dir;
3402		int retval;
3403
3404		retval = 0;
3405
3406		if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
3407			/*
3408			 * If we don't have log support, not much point in
3409			 * trying to probe zone support.
3410			 */
3411			daprobedone(periph, start_ccb);
3412			break;
3413		}
3414
3415		/*
3416		 * If we have an ATA device (the SCSI ATA Information VPD
3417		 * page should be present and the ATA identify should have
3418		 * succeeded) and it supports logs, ask for the log directory.
3419		 */
3420
3421		log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
3422		if (log_dir == NULL) {
3423			xpt_print(periph->path, "Couldn't malloc log_dir "
3424			    "data\n");
3425			daprobedone(periph, start_ccb);
3426			break;
3427		}
3428
3429		retval = scsi_ata_read_log(&start_ccb->csio,
3430		    /*retries*/ da_retry_count,
3431		    /*cbfcnp*/ dadone,
3432		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3433		    /*log_address*/ ATA_LOG_DIRECTORY,
3434		    /*page_number*/ 0,
3435		    /*block_count*/ 1,
3436		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3437				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3438		    /*data_ptr*/ (uint8_t *)log_dir,
3439		    /*dxfer_len*/ sizeof(*log_dir),
3440		    /*sense_len*/ SSD_FULL_SIZE,
3441		    /*timeout*/ da_default_timeout * 1000);
3442
3443		if (retval != 0) {
3444			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3445			free(log_dir, M_SCSIDA);
3446			daprobedone(periph, start_ccb);
3447			break;
3448		}
3449		start_ccb->ccb_h.ccb_bp = NULL;
3450		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
3451		xpt_action(start_ccb);
3452		break;
3453	}
3454	case DA_STATE_PROBE_ATA_IDDIR:
3455	{
3456		struct ata_identify_log_pages *id_dir;
3457		int retval;
3458
3459		retval = 0;
3460
3461		/*
3462		 * Check here to see whether the Identify Device log is
3463		 * supported in the directory of logs.  If so, continue
3464		 * with requesting the log of identify device pages.
3465		 */
3466		if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
3467			daprobedone(periph, start_ccb);
3468			break;
3469		}
3470
3471		id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
3472		if (id_dir == NULL) {
3473			xpt_print(periph->path, "Couldn't malloc id_dir "
3474			    "data\n");
3475			daprobedone(periph, start_ccb);
3476			break;
3477		}
3478
3479		retval = scsi_ata_read_log(&start_ccb->csio,
3480		    /*retries*/ da_retry_count,
3481		    /*cbfcnp*/ dadone,
3482		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3483		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3484		    /*page_number*/ ATA_IDL_PAGE_LIST,
3485		    /*block_count*/ 1,
3486		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3487				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3488		    /*data_ptr*/ (uint8_t *)id_dir,
3489		    /*dxfer_len*/ sizeof(*id_dir),
3490		    /*sense_len*/ SSD_FULL_SIZE,
3491		    /*timeout*/ da_default_timeout * 1000);
3492
3493		if (retval != 0) {
3494			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3495			free(id_dir, M_SCSIDA);
3496			daprobedone(periph, start_ccb);
3497			break;
3498		}
3499		start_ccb->ccb_h.ccb_bp = NULL;
3500		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
3501		xpt_action(start_ccb);
3502		break;
3503	}
3504	case DA_STATE_PROBE_ATA_SUP:
3505	{
3506		struct ata_identify_log_sup_cap *sup_cap;
3507		int retval;
3508
3509		retval = 0;
3510
3511		/*
3512		 * Check here to see whether the Supported Capabilities log
3513		 * is in the list of Identify Device logs.
3514		 */
3515		if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
3516			daprobedone(periph, start_ccb);
3517			break;
3518		}
3519
3520		sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
3521		if (sup_cap == NULL) {
3522			xpt_print(periph->path, "Couldn't malloc sup_cap "
3523			    "data\n");
3524			daprobedone(periph, start_ccb);
3525			break;
3526		}
3527
3528		retval = scsi_ata_read_log(&start_ccb->csio,
3529		    /*retries*/ da_retry_count,
3530		    /*cbfcnp*/ dadone,
3531		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3532		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3533		    /*page_number*/ ATA_IDL_SUP_CAP,
3534		    /*block_count*/ 1,
3535		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3536				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3537		    /*data_ptr*/ (uint8_t *)sup_cap,
3538		    /*dxfer_len*/ sizeof(*sup_cap),
3539		    /*sense_len*/ SSD_FULL_SIZE,
3540		    /*timeout*/ da_default_timeout * 1000);
3541
3542		if (retval != 0) {
3543			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3544			free(sup_cap, M_SCSIDA);
3545			daprobedone(periph, start_ccb);
3546			break;
3547
3548		}
3549
3550		start_ccb->ccb_h.ccb_bp = NULL;
3551		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
3552		xpt_action(start_ccb);
3553		break;
3554	}
3555	case DA_STATE_PROBE_ATA_ZONE:
3556	{
3557		struct ata_zoned_info_log *ata_zone;
3558		int retval;
3559
3560		retval = 0;
3561
3562		/*
3563		 * Check here to see whether the zoned device information
3564		 * page is supported.  If so, continue on to request it.
3565		 * If not, skip to DA_STATE_PROBE_LOG or done.
3566		 */
3567		if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
3568			daprobedone(periph, start_ccb);
3569			break;
3570		}
3571		ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
3572				  M_NOWAIT|M_ZERO);
3573		if (ata_zone == NULL) {
3574			xpt_print(periph->path, "Couldn't malloc ata_zone "
3575			    "data\n");
3576			daprobedone(periph, start_ccb);
3577			break;
3578		}
3579
3580		retval = scsi_ata_read_log(&start_ccb->csio,
3581		    /*retries*/ da_retry_count,
3582		    /*cbfcnp*/ dadone,
3583		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3584		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3585		    /*page_number*/ ATA_IDL_ZDI,
3586		    /*block_count*/ 1,
3587		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3588				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3589		    /*data_ptr*/ (uint8_t *)ata_zone,
3590		    /*dxfer_len*/ sizeof(*ata_zone),
3591		    /*sense_len*/ SSD_FULL_SIZE,
3592		    /*timeout*/ da_default_timeout * 1000);
3593
3594		if (retval != 0) {
3595			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3596			free(ata_zone, M_SCSIDA);
3597			daprobedone(periph, start_ccb);
3598			break;
3599		}
3600		start_ccb->ccb_h.ccb_bp = NULL;
3601		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
3602		xpt_action(start_ccb);
3603
3604		break;
3605	}
3606	case DA_STATE_PROBE_ZONE:
3607	{
3608		struct scsi_vpd_zoned_bdc *bdc;
3609
3610		/*
3611		 * Note that this page will be supported for SCSI protocol
3612		 * devices that support ZBC (SMR devices), as well as ATA
3613		 * protocol devices that are behind a SAT (SCSI to ATA
3614		 * Translation) layer that supports converting ZBC commands
3615		 * to their ZAC equivalents.
3616		 */
3617		if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) {
3618			daprobedone(periph, start_ccb);
3619			break;
3620		}
3621		bdc = (struct scsi_vpd_zoned_bdc *)
3622			malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3623
3624		if (bdc == NULL) {
3625			xpt_release_ccb(start_ccb);
3626			xpt_print(periph->path, "Couldn't malloc zone VPD "
3627			    "data\n");
3628			break;
3629		}
3630		scsi_inquiry(&start_ccb->csio,
3631			     /*retries*/da_retry_count,
3632			     /*cbfcnp*/dadone,
3633			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3634			     /*inq_buf*/(u_int8_t *)bdc,
3635			     /*inq_len*/sizeof(*bdc),
3636			     /*evpd*/TRUE,
3637			     /*page_code*/SVPD_ZONED_BDC,
3638			     /*sense_len*/SSD_FULL_SIZE,
3639			     /*timeout*/da_default_timeout * 1000);
3640		start_ccb->ccb_h.ccb_bp = NULL;
3641		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
3642		xpt_action(start_ccb);
3643		break;
3644	}
3645	}
3646}
3647
3648/*
3649 * In each of the methods below, while its the caller's
3650 * responsibility to ensure the request will fit into a
3651 * single device request, we might have changed the delete
3652 * method due to the device incorrectly advertising either
3653 * its supported methods or limits.
3654 *
3655 * To prevent this causing further issues we validate the
3656 * against the methods limits, and warn which would
3657 * otherwise be unnecessary.
3658 */
3659static void
3660da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3661{
3662	struct da_softc *softc = (struct da_softc *)periph->softc;;
3663	struct bio *bp1;
3664	uint8_t *buf = softc->unmap_buf;
3665	struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE];
3666	uint64_t lba, lastlba = (uint64_t)-1;
3667	uint64_t totalcount = 0;
3668	uint64_t count;
3669	uint32_t c, lastcount = 0, ranges = 0;
3670
3671	/*
3672	 * Currently this doesn't take the UNMAP
3673	 * Granularity and Granularity Alignment
3674	 * fields into account.
3675	 *
3676	 * This could result in both unoptimal unmap
3677	 * requests as as well as UNMAP calls unmapping
3678	 * fewer LBA's than requested.
3679	 */
3680
3681	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
3682	bp1 = bp;
3683	do {
3684		/*
3685		 * Note: ada and da are different in how they store the
3686		 * pending bp's in a trim. ada stores all of them in the
3687		 * trim_req.bps. da stores all but the first one in the
3688		 * delete_run_queue. ada then completes all the bps in
3689		 * its adadone() loop. da completes all the bps in the
3690		 * delete_run_queue in dadone, and relies on the biodone
3691		 * after to complete. This should be reconciled since there's
3692		 * no real reason to do it differently. XXX
3693		 */
3694		if (bp1 != bp)
3695			bioq_insert_tail(&softc->delete_run_queue, bp1);
3696		lba = bp1->bio_pblkno;
3697		count = bp1->bio_bcount / softc->params.secsize;
3698
3699		/* Try to extend the previous range. */
3700		if (lba == lastlba) {
3701			c = omin(count, UNMAP_RANGE_MAX - lastcount);
3702			lastlba += c;
3703			lastcount += c;
3704			scsi_ulto4b(lastcount, d[ranges - 1].length);
3705			count -= c;
3706			lba += c;
3707			totalcount += c;
3708		} else if ((softc->quirks & DA_Q_STRICT_UNMAP) &&
3709		    softc->unmap_gran != 0) {
3710			/* Align length of the previous range. */
3711			if ((c = lastcount % softc->unmap_gran) != 0) {
3712				if (lastcount <= c) {
3713					totalcount -= lastcount;
3714					lastlba = (uint64_t)-1;
3715					lastcount = 0;
3716					ranges--;
3717				} else {
3718					totalcount -= c;
3719					lastlba -= c;
3720					lastcount -= c;
3721					scsi_ulto4b(lastcount, d[ranges - 1].length);
3722				}
3723			}
3724			/* Align beginning of the new range. */
3725			c = (lba - softc->unmap_gran_align) % softc->unmap_gran;
3726			if (c != 0) {
3727				c = softc->unmap_gran - c;
3728				if (count <= c) {
3729					count = 0;
3730				} else {
3731					lba += c;
3732					count -= c;
3733				}
3734			}
3735		}
3736
3737		while (count > 0) {
3738			c = omin(count, UNMAP_RANGE_MAX);
3739			if (totalcount + c > softc->unmap_max_lba ||
3740			    ranges >= softc->unmap_max_ranges) {
3741				xpt_print(periph->path,
3742				    "%s issuing short delete %ld > %ld"
3743				    "|| %d >= %d",
3744				    da_delete_method_desc[softc->delete_method],
3745				    totalcount + c, softc->unmap_max_lba,
3746				    ranges, softc->unmap_max_ranges);
3747				break;
3748			}
3749			scsi_u64to8b(lba, d[ranges].lba);
3750			scsi_ulto4b(c, d[ranges].length);
3751			lba += c;
3752			totalcount += c;
3753			ranges++;
3754			count -= c;
3755			lastlba = lba;
3756			lastcount = c;
3757		}
3758		bp1 = cam_iosched_next_trim(softc->cam_iosched);
3759		if (bp1 == NULL)
3760			break;
3761		if (ranges >= softc->unmap_max_ranges ||
3762		    totalcount + bp1->bio_bcount /
3763		    softc->params.secsize > softc->unmap_max_lba) {
3764			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3765			break;
3766		}
3767	} while (1);
3768
3769	/* Align length of the last range. */
3770	if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 &&
3771	    (c = lastcount % softc->unmap_gran) != 0) {
3772		if (lastcount <= c)
3773			ranges--;
3774		else
3775			scsi_ulto4b(lastcount - c, d[ranges - 1].length);
3776	}
3777
3778	scsi_ulto2b(ranges * 16 + 6, &buf[0]);
3779	scsi_ulto2b(ranges * 16, &buf[2]);
3780
3781	scsi_unmap(&ccb->csio,
3782		   /*retries*/da_retry_count,
3783		   /*cbfcnp*/dadone,
3784		   /*tag_action*/MSG_SIMPLE_Q_TAG,
3785		   /*byte2*/0,
3786		   /*data_ptr*/ buf,
3787		   /*dxfer_len*/ ranges * 16 + 8,
3788		   /*sense_len*/SSD_FULL_SIZE,
3789		   da_default_timeout * 1000);
3790	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3791	ccb->ccb_h.flags |= CAM_UNLOCKED;
3792	cam_iosched_submit_trim(softc->cam_iosched);
3793}
3794
3795static void
3796da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3797{
3798	struct da_softc *softc = (struct da_softc *)periph->softc;
3799	struct bio *bp1;
3800	uint8_t *buf = softc->unmap_buf;
3801	uint64_t lastlba = (uint64_t)-1;
3802	uint64_t count;
3803	uint64_t lba;
3804	uint32_t lastcount = 0, c, requestcount;
3805	int ranges = 0, off, block_count;
3806
3807	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
3808	bp1 = bp;
3809	do {
3810		if (bp1 != bp)//XXX imp XXX
3811			bioq_insert_tail(&softc->delete_run_queue, bp1);
3812		lba = bp1->bio_pblkno;
3813		count = bp1->bio_bcount / softc->params.secsize;
3814		requestcount = count;
3815
3816		/* Try to extend the previous range. */
3817		if (lba == lastlba) {
3818			c = omin(count, ATA_DSM_RANGE_MAX - lastcount);
3819			lastcount += c;
3820			off = (ranges - 1) * 8;
3821			buf[off + 6] = lastcount & 0xff;
3822			buf[off + 7] = (lastcount >> 8) & 0xff;
3823			count -= c;
3824			lba += c;
3825		}
3826
3827		while (count > 0) {
3828			c = omin(count, ATA_DSM_RANGE_MAX);
3829			off = ranges * 8;
3830
3831			buf[off + 0] = lba & 0xff;
3832			buf[off + 1] = (lba >> 8) & 0xff;
3833			buf[off + 2] = (lba >> 16) & 0xff;
3834			buf[off + 3] = (lba >> 24) & 0xff;
3835			buf[off + 4] = (lba >> 32) & 0xff;
3836			buf[off + 5] = (lba >> 40) & 0xff;
3837			buf[off + 6] = c & 0xff;
3838			buf[off + 7] = (c >> 8) & 0xff;
3839			lba += c;
3840			ranges++;
3841			count -= c;
3842			lastcount = c;
3843			if (count != 0 && ranges == softc->trim_max_ranges) {
3844				xpt_print(periph->path,
3845				    "%s issuing short delete %ld > %ld\n",
3846				    da_delete_method_desc[softc->delete_method],
3847				    requestcount,
3848				    (softc->trim_max_ranges - ranges) *
3849				    ATA_DSM_RANGE_MAX);
3850				break;
3851			}
3852		}
3853		lastlba = lba;
3854		bp1 = cam_iosched_next_trim(softc->cam_iosched);
3855		if (bp1 == NULL)
3856			break;
3857		if (bp1->bio_bcount / softc->params.secsize >
3858		    (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
3859			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3860			break;
3861		}
3862	} while (1);
3863
3864	block_count = howmany(ranges, ATA_DSM_BLK_RANGES);
3865	scsi_ata_trim(&ccb->csio,
3866		      /*retries*/da_retry_count,
3867		      /*cbfcnp*/dadone,
3868		      /*tag_action*/MSG_SIMPLE_Q_TAG,
3869		      block_count,
3870		      /*data_ptr*/buf,
3871		      /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
3872		      /*sense_len*/SSD_FULL_SIZE,
3873		      da_default_timeout * 1000);
3874	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3875	ccb->ccb_h.flags |= CAM_UNLOCKED;
3876	cam_iosched_submit_trim(softc->cam_iosched);
3877}
3878
3879/*
3880 * We calculate ws_max_blks here based off d_delmaxsize instead
3881 * of using softc->ws_max_blks as it is absolute max for the
3882 * device not the protocol max which may well be lower.
3883 */
3884static void
3885da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3886{
3887	struct da_softc *softc;
3888	struct bio *bp1;
3889	uint64_t ws_max_blks;
3890	uint64_t lba;
3891	uint64_t count; /* forward compat with WS32 */
3892
3893	softc = (struct da_softc *)periph->softc;
3894	ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize;
3895	lba = bp->bio_pblkno;
3896	count = 0;
3897	bp1 = bp;
3898	do {
3899		if (bp1 != bp)//XXX imp XXX
3900			bioq_insert_tail(&softc->delete_run_queue, bp1);
3901		count += bp1->bio_bcount / softc->params.secsize;
3902		if (count > ws_max_blks) {
3903			xpt_print(periph->path,
3904			    "%s issuing short delete %ld > %ld\n",
3905			    da_delete_method_desc[softc->delete_method],
3906			    count, ws_max_blks);
3907			count = omin(count, ws_max_blks);
3908			break;
3909		}
3910		bp1 = cam_iosched_next_trim(softc->cam_iosched);
3911		if (bp1 == NULL)
3912			break;
3913		if (lba + count != bp1->bio_pblkno ||
3914		    count + bp1->bio_bcount /
3915		    softc->params.secsize > ws_max_blks) {
3916			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
3917			break;
3918		}
3919	} while (1);
3920
3921	scsi_write_same(&ccb->csio,
3922			/*retries*/da_retry_count,
3923			/*cbfcnp*/dadone,
3924			/*tag_action*/MSG_SIMPLE_Q_TAG,
3925			/*byte2*/softc->delete_method ==
3926			    DA_DELETE_ZERO ? 0 : SWS_UNMAP,
3927			softc->delete_method == DA_DELETE_WS16 ? 16 : 10,
3928			/*lba*/lba,
3929			/*block_count*/count,
3930			/*data_ptr*/ __DECONST(void *, zero_region),
3931			/*dxfer_len*/ softc->params.secsize,
3932			/*sense_len*/SSD_FULL_SIZE,
3933			da_default_timeout * 1000);
3934	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
3935	ccb->ccb_h.flags |= CAM_UNLOCKED;
3936	cam_iosched_submit_trim(softc->cam_iosched);
3937}
3938
3939static int
3940cmd6workaround(union ccb *ccb)
3941{
3942	struct scsi_rw_6 cmd6;
3943	struct scsi_rw_10 *cmd10;
3944	struct da_softc *softc;
3945	u_int8_t *cdb;
3946	struct bio *bp;
3947	int frozen;
3948
3949	cdb = ccb->csio.cdb_io.cdb_bytes;
3950	softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
3951
3952	if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) {
3953		da_delete_methods old_method = softc->delete_method;
3954
3955		/*
3956		 * Typically there are two reasons for failure here
3957		 * 1. Delete method was detected as supported but isn't
3958		 * 2. Delete failed due to invalid params e.g. too big
3959		 *
3960		 * While we will attempt to choose an alternative delete method
3961		 * this may result in short deletes if the existing delete
3962		 * requests from geom are big for the new method chosen.
3963		 *
3964		 * This method assumes that the error which triggered this
3965		 * will not retry the io otherwise a panic will occur
3966		 */
3967		dadeleteflag(softc, old_method, 0);
3968		dadeletemethodchoose(softc, DA_DELETE_DISABLE);
3969		if (softc->delete_method == DA_DELETE_DISABLE)
3970			xpt_print(ccb->ccb_h.path,
3971				  "%s failed, disabling BIO_DELETE\n",
3972				  da_delete_method_desc[old_method]);
3973		else
3974			xpt_print(ccb->ccb_h.path,
3975				  "%s failed, switching to %s BIO_DELETE\n",
3976				  da_delete_method_desc[old_method],
3977				  da_delete_method_desc[softc->delete_method]);
3978
3979		while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
3980			cam_iosched_queue_work(softc->cam_iosched, bp);
3981		cam_iosched_queue_work(softc->cam_iosched,
3982		    (struct bio *)ccb->ccb_h.ccb_bp);
3983		ccb->ccb_h.ccb_bp = NULL;
3984		return (0);
3985	}
3986
3987	/* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */
3988	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
3989	    (*cdb == PREVENT_ALLOW) &&
3990	    (softc->quirks & DA_Q_NO_PREVENT) == 0) {
3991		if (bootverbose)
3992			xpt_print(ccb->ccb_h.path,
3993			    "PREVENT ALLOW MEDIUM REMOVAL not supported.\n");
3994		softc->quirks |= DA_Q_NO_PREVENT;
3995		return (0);
3996	}
3997
3998	/* Detect unsupported SYNCHRONIZE CACHE(10). */
3999	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
4000	    (*cdb == SYNCHRONIZE_CACHE) &&
4001	    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
4002		if (bootverbose)
4003			xpt_print(ccb->ccb_h.path,
4004			    "SYNCHRONIZE CACHE(10) not supported.\n");
4005		softc->quirks |= DA_Q_NO_SYNC_CACHE;
4006		softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
4007		return (0);
4008	}
4009
4010	/* Translation only possible if CDB is an array and cmd is R/W6 */
4011	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
4012	    (*cdb != READ_6 && *cdb != WRITE_6))
4013		return 0;
4014
4015	xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
4016	    "increasing minimum_cmd_size to 10.\n");
4017 	softc->minimum_cmd_size = 10;
4018
4019	bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
4020	cmd10 = (struct scsi_rw_10 *)cdb;
4021	cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
4022	cmd10->byte2 = 0;
4023	scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
4024	cmd10->reserved = 0;
4025	scsi_ulto2b(cmd6.length, cmd10->length);
4026	cmd10->control = cmd6.control;
4027	ccb->csio.cdb_len = sizeof(*cmd10);
4028
4029	/* Requeue request, unfreezing queue if necessary */
4030	frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
4031 	ccb->ccb_h.status = CAM_REQUEUE_REQ;
4032	xpt_action(ccb);
4033	if (frozen) {
4034		cam_release_devq(ccb->ccb_h.path,
4035				 /*relsim_flags*/0,
4036				 /*reduction*/0,
4037				 /*timeout*/0,
4038				 /*getcount_only*/0);
4039	}
4040	return (ERESTART);
4041}
4042
4043static void
4044dazonedone(struct cam_periph *periph, union ccb *ccb)
4045{
4046	struct da_softc *softc;
4047	struct bio *bp;
4048
4049	softc = periph->softc;
4050	bp = (struct bio *)ccb->ccb_h.ccb_bp;
4051
4052	switch (bp->bio_zone.zone_cmd) {
4053	case DISK_ZONE_OPEN:
4054	case DISK_ZONE_CLOSE:
4055	case DISK_ZONE_FINISH:
4056	case DISK_ZONE_RWP:
4057		break;
4058	case DISK_ZONE_REPORT_ZONES: {
4059		uint32_t avail_len;
4060		struct disk_zone_report *rep;
4061		struct scsi_report_zones_hdr *hdr;
4062		struct scsi_report_zones_desc *desc;
4063		struct disk_zone_rep_entry *entry;
4064		uint32_t num_alloced, hdr_len, num_avail;
4065		uint32_t num_to_fill, i;
4066		int ata;
4067
4068		rep = &bp->bio_zone.zone_params.report;
4069		avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
4070		/*
4071		 * Note that bio_resid isn't normally used for zone
4072		 * commands, but it is used by devstat_end_transaction_bio()
4073		 * to determine how much data was transferred.  Because
4074		 * the size of the SCSI/ATA data structures is different
4075		 * than the size of the BIO interface structures, the
4076		 * amount of data actually transferred from the drive will
4077		 * be different than the amount of data transferred to
4078		 * the user.
4079		 */
4080		bp->bio_resid = ccb->csio.resid;
4081		num_alloced = rep->entries_allocated;
4082		hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
4083		if (avail_len < sizeof(*hdr)) {
4084			/*
4085			 * Is there a better error than EIO here?  We asked
4086			 * for at least the header, and we got less than
4087			 * that.
4088			 */
4089			bp->bio_error = EIO;
4090			bp->bio_flags |= BIO_ERROR;
4091			bp->bio_resid = bp->bio_bcount;
4092			break;
4093		}
4094
4095		if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
4096			ata = 1;
4097		else
4098			ata = 0;
4099
4100		hdr_len = ata ? le32dec(hdr->length) :
4101				scsi_4btoul(hdr->length);
4102		if (hdr_len > 0)
4103			rep->entries_available = hdr_len / sizeof(*desc);
4104		else
4105			rep->entries_available = 0;
4106		/*
4107		 * NOTE: using the same values for the BIO version of the
4108		 * same field as the SCSI/ATA values.  This means we could
4109		 * get some additional values that aren't defined in bio.h
4110		 * if more values of the same field are defined later.
4111		 */
4112		rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
4113		rep->header.maximum_lba = ata ?  le64dec(hdr->maximum_lba) :
4114					  scsi_8btou64(hdr->maximum_lba);
4115		/*
4116		 * If the drive reports no entries that match the query,
4117		 * we're done.
4118		 */
4119		if (hdr_len == 0) {
4120			rep->entries_filled = 0;
4121			break;
4122		}
4123
4124		num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
4125				hdr_len / sizeof(*desc));
4126		/*
4127		 * If the drive didn't return any data, then we're done.
4128		 */
4129		if (num_avail == 0) {
4130			rep->entries_filled = 0;
4131			break;
4132		}
4133
4134		num_to_fill = min(num_avail, rep->entries_allocated);
4135		/*
4136		 * If the user didn't allocate any entries for us to fill,
4137		 * we're done.
4138		 */
4139		if (num_to_fill == 0) {
4140			rep->entries_filled = 0;
4141			break;
4142		}
4143
4144		for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
4145		     i < num_to_fill; i++, desc++, entry++) {
4146			/*
4147			 * NOTE: we're mapping the values here directly
4148			 * from the SCSI/ATA bit definitions to the bio.h
4149			 * definitons.  There is also a warning in
4150			 * disk_zone.h, but the impact is that if
4151			 * additional values are added in the SCSI/ATA
4152			 * specs these will be visible to consumers of
4153			 * this interface.
4154			 */
4155			entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
4156			entry->zone_condition =
4157			    (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
4158			    SRZ_ZONE_COND_SHIFT;
4159			entry->zone_flags |= desc->zone_flags &
4160			    (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
4161			entry->zone_length =
4162			    ata ? le64dec(desc->zone_length) :
4163				  scsi_8btou64(desc->zone_length);
4164			entry->zone_start_lba =
4165			    ata ? le64dec(desc->zone_start_lba) :
4166				  scsi_8btou64(desc->zone_start_lba);
4167			entry->write_pointer_lba =
4168			    ata ? le64dec(desc->write_pointer_lba) :
4169				  scsi_8btou64(desc->write_pointer_lba);
4170		}
4171		rep->entries_filled = num_to_fill;
4172		break;
4173	}
4174	case DISK_ZONE_GET_PARAMS:
4175	default:
4176		/*
4177		 * In theory we should not get a GET_PARAMS bio, since it
4178		 * should be handled without queueing the command to the
4179		 * drive.
4180		 */
4181		panic("%s: Invalid zone command %d", __func__,
4182		    bp->bio_zone.zone_cmd);
4183		break;
4184	}
4185
4186	if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
4187		free(ccb->csio.data_ptr, M_SCSIDA);
4188}
4189
4190static void
4191dadone(struct cam_periph *periph, union ccb *done_ccb)
4192{
4193	struct da_softc *softc;
4194	struct ccb_scsiio *csio;
4195	u_int32_t  priority;
4196	da_ccb_state state;
4197
4198	softc = (struct da_softc *)periph->softc;
4199	priority = done_ccb->ccb_h.pinfo.priority;
4200
4201	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n"));
4202
4203	csio = &done_ccb->csio;
4204	state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4205	switch (state) {
4206	case DA_CCB_BUFFER_IO:
4207	case DA_CCB_DELETE:
4208	{
4209		struct bio *bp, *bp1;
4210
4211		cam_periph_lock(periph);
4212		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4213		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
4214			int error;
4215			int sf;
4216
4217			if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
4218				sf = SF_RETRY_UA;
4219			else
4220				sf = 0;
4221
4222			error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
4223			if (error == ERESTART) {
4224				/*
4225				 * A retry was scheduled, so
4226				 * just return.
4227				 */
4228				cam_periph_unlock(periph);
4229				return;
4230			}
4231			bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4232			if (error != 0) {
4233				int queued_error;
4234
4235				/*
4236				 * return all queued I/O with EIO, so that
4237				 * the client can retry these I/Os in the
4238				 * proper order should it attempt to recover.
4239				 */
4240				queued_error = EIO;
4241
4242				if (error == ENXIO
4243				 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) {
4244					/*
4245					 * Catastrophic error.  Mark our pack as
4246					 * invalid.
4247					 */
4248					/*
4249					 * XXX See if this is really a media
4250					 * XXX change first?
4251					 */
4252					xpt_print(periph->path,
4253					    "Invalidating pack\n");
4254					softc->flags |= DA_FLAG_PACK_INVALID;
4255#ifdef CAM_IO_STATS
4256					softc->invalidations++;
4257#endif
4258					queued_error = ENXIO;
4259				}
4260				cam_iosched_flush(softc->cam_iosched, NULL,
4261					   queued_error);
4262				if (bp != NULL) {
4263					bp->bio_error = error;
4264					bp->bio_resid = bp->bio_bcount;
4265					bp->bio_flags |= BIO_ERROR;
4266				}
4267			} else if (bp != NULL) {
4268				if (state == DA_CCB_DELETE)
4269					bp->bio_resid = 0;
4270				else
4271					bp->bio_resid = csio->resid;
4272				bp->bio_error = 0;
4273				if (bp->bio_resid != 0)
4274					bp->bio_flags |= BIO_ERROR;
4275			}
4276			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4277				cam_release_devq(done_ccb->ccb_h.path,
4278						 /*relsim_flags*/0,
4279						 /*reduction*/0,
4280						 /*timeout*/0,
4281						 /*getcount_only*/0);
4282		} else if (bp != NULL) {
4283			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4284				panic("REQ_CMP with QFRZN");
4285			if (bp->bio_cmd == BIO_ZONE)
4286				dazonedone(periph, done_ccb);
4287			else if (state == DA_CCB_DELETE)
4288				bp->bio_resid = 0;
4289			else
4290				bp->bio_resid = csio->resid;
4291			if ((csio->resid > 0)
4292			 && (bp->bio_cmd != BIO_ZONE))
4293				bp->bio_flags |= BIO_ERROR;
4294			if (softc->error_inject != 0) {
4295				bp->bio_error = softc->error_inject;
4296				bp->bio_resid = bp->bio_bcount;
4297				bp->bio_flags |= BIO_ERROR;
4298				softc->error_inject = 0;
4299			}
4300		}
4301
4302		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
4303		if (LIST_EMPTY(&softc->pending_ccbs))
4304			softc->flags |= DA_FLAG_WAS_OTAG;
4305
4306		cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
4307		xpt_release_ccb(done_ccb);
4308		if (state == DA_CCB_DELETE) {
4309			TAILQ_HEAD(, bio) queue;
4310
4311			TAILQ_INIT(&queue);
4312			TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
4313			softc->delete_run_queue.insert_point = NULL;
4314			/*
4315			 * Normally, the xpt_release_ccb() above would make sure
4316			 * that when we have more work to do, that work would
4317			 * get kicked off. However, we specifically keep
4318			 * delete_running set to 0 before the call above to
4319			 * allow other I/O to progress when many BIO_DELETE
4320			 * requests are pushed down. We set delete_running to 0
4321			 * and call daschedule again so that we don't stall if
4322			 * there are no other I/Os pending apart from BIO_DELETEs.
4323			 */
4324			cam_iosched_trim_done(softc->cam_iosched);
4325			daschedule(periph);
4326			cam_periph_unlock(periph);
4327			while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
4328				TAILQ_REMOVE(&queue, bp1, bio_queue);
4329				bp1->bio_error = bp->bio_error;
4330				if (bp->bio_flags & BIO_ERROR) {
4331					bp1->bio_flags |= BIO_ERROR;
4332					bp1->bio_resid = bp1->bio_bcount;
4333				} else
4334					bp1->bio_resid = 0;
4335				biodone(bp1);
4336			}
4337		} else {
4338			daschedule(periph);
4339			cam_periph_unlock(periph);
4340		}
4341		if (bp != NULL)
4342			biodone(bp);
4343		return;
4344	}
4345	case DA_CCB_PROBE_WP:
4346	{
4347		struct scsi_mode_header_6 *mode_hdr6;
4348		struct scsi_mode_header_10 *mode_hdr10;
4349		uint8_t dev_spec;
4350
4351		if (softc->minimum_cmd_size > 6) {
4352			mode_hdr10 = (struct scsi_mode_header_10 *)csio->data_ptr;
4353			dev_spec = mode_hdr10->dev_spec;
4354		} else {
4355			mode_hdr6 = (struct scsi_mode_header_6 *)csio->data_ptr;
4356			dev_spec = mode_hdr6->dev_spec;
4357		}
4358		if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
4359			if ((dev_spec & 0x80) != 0)
4360				softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT;
4361			else
4362				softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT;
4363		} else {
4364			int error;
4365
4366			error = daerror(done_ccb, CAM_RETRY_SELTO,
4367					SF_RETRY_UA|SF_NO_PRINT);
4368			if (error == ERESTART)
4369				return;
4370			else if (error != 0) {
4371				if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4372					/* Don't wedge this device's queue */
4373					cam_release_devq(done_ccb->ccb_h.path,
4374							 /*relsim_flags*/0,
4375							 /*reduction*/0,
4376							 /*timeout*/0,
4377							 /*getcount_only*/0);
4378				}
4379			}
4380		}
4381
4382		free(csio->data_ptr, M_SCSIDA);
4383		xpt_release_ccb(done_ccb);
4384		if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
4385			softc->state = DA_STATE_PROBE_RC16;
4386		else
4387			softc->state = DA_STATE_PROBE_RC;
4388		xpt_schedule(periph, priority);
4389		return;
4390	}
4391	case DA_CCB_PROBE_RC:
4392	case DA_CCB_PROBE_RC16:
4393	{
4394		struct	   scsi_read_capacity_data *rdcap;
4395		struct     scsi_read_capacity_data_long *rcaplong;
4396		char	   announce_buf[80];
4397		int	   lbp;
4398
4399		lbp = 0;
4400		rdcap = NULL;
4401		rcaplong = NULL;
4402		if (state == DA_CCB_PROBE_RC)
4403			rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
4404		else
4405			rcaplong = (struct scsi_read_capacity_data_long *)
4406				csio->data_ptr;
4407
4408		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4409			struct disk_params *dp;
4410			uint32_t block_size;
4411			uint64_t maxsector;
4412			u_int lalba;	/* Lowest aligned LBA. */
4413
4414			if (state == DA_CCB_PROBE_RC) {
4415				block_size = scsi_4btoul(rdcap->length);
4416				maxsector = scsi_4btoul(rdcap->addr);
4417				lalba = 0;
4418
4419				/*
4420				 * According to SBC-2, if the standard 10
4421				 * byte READ CAPACITY command returns 2^32,
4422				 * we should issue the 16 byte version of
4423				 * the command, since the device in question
4424				 * has more sectors than can be represented
4425				 * with the short version of the command.
4426				 */
4427				if (maxsector == 0xffffffff) {
4428					free(rdcap, M_SCSIDA);
4429					xpt_release_ccb(done_ccb);
4430					softc->state = DA_STATE_PROBE_RC16;
4431					xpt_schedule(periph, priority);
4432					return;
4433				}
4434			} else {
4435				block_size = scsi_4btoul(rcaplong->length);
4436				maxsector = scsi_8btou64(rcaplong->addr);
4437				lalba = scsi_2btoul(rcaplong->lalba_lbp);
4438			}
4439
4440			/*
4441			 * Because GEOM code just will panic us if we
4442			 * give them an 'illegal' value we'll avoid that
4443			 * here.
4444			 */
4445			if (block_size == 0) {
4446				block_size = 512;
4447				if (maxsector == 0)
4448					maxsector = -1;
4449			}
4450			if (block_size >= MAXPHYS) {
4451				xpt_print(periph->path,
4452				    "unsupportable block size %ju\n",
4453				    (uintmax_t) block_size);
4454				announce_buf[0] = '\0';
4455				cam_periph_invalidate(periph);
4456			} else {
4457				/*
4458				 * We pass rcaplong into dasetgeom(),
4459				 * because it will only use it if it is
4460				 * non-NULL.
4461				 */
4462				dasetgeom(periph, block_size, maxsector,
4463					  rcaplong, sizeof(*rcaplong));
4464				lbp = (lalba & SRC16_LBPME_A);
4465				dp = &softc->params;
4466				snprintf(announce_buf, sizeof(announce_buf),
4467				    "%juMB (%ju %u byte sectors)",
4468				    ((uintmax_t)dp->secsize * dp->sectors) /
4469				     (1024 * 1024),
4470				    (uintmax_t)dp->sectors, dp->secsize);
4471			}
4472		} else {
4473			int	error;
4474
4475			announce_buf[0] = '\0';
4476
4477			/*
4478			 * Retry any UNIT ATTENTION type errors.  They
4479			 * are expected at boot.
4480			 */
4481			error = daerror(done_ccb, CAM_RETRY_SELTO,
4482					SF_RETRY_UA|SF_NO_PRINT);
4483			if (error == ERESTART) {
4484				/*
4485				 * A retry was scheuled, so
4486				 * just return.
4487				 */
4488				return;
4489			} else if (error != 0) {
4490				int asc, ascq;
4491				int sense_key, error_code;
4492				int have_sense;
4493				cam_status status;
4494				struct ccb_getdev cgd;
4495
4496				/* Don't wedge this device's queue */
4497				status = done_ccb->ccb_h.status;
4498				if ((status & CAM_DEV_QFRZN) != 0)
4499					cam_release_devq(done_ccb->ccb_h.path,
4500							 /*relsim_flags*/0,
4501							 /*reduction*/0,
4502							 /*timeout*/0,
4503							 /*getcount_only*/0);
4504
4505
4506				xpt_setup_ccb(&cgd.ccb_h,
4507					      done_ccb->ccb_h.path,
4508					      CAM_PRIORITY_NORMAL);
4509				cgd.ccb_h.func_code = XPT_GDEV_TYPE;
4510				xpt_action((union ccb *)&cgd);
4511
4512				if (scsi_extract_sense_ccb(done_ccb,
4513				    &error_code, &sense_key, &asc, &ascq))
4514					have_sense = TRUE;
4515				else
4516					have_sense = FALSE;
4517
4518				/*
4519				 * If we tried READ CAPACITY(16) and failed,
4520				 * fallback to READ CAPACITY(10).
4521				 */
4522				if ((state == DA_CCB_PROBE_RC16) &&
4523				    (softc->flags & DA_FLAG_CAN_RC16) &&
4524				    (((csio->ccb_h.status & CAM_STATUS_MASK) ==
4525					CAM_REQ_INVALID) ||
4526				     ((have_sense) &&
4527				      (error_code == SSD_CURRENT_ERROR ||
4528				       error_code == SSD_DESC_CURRENT_ERROR) &&
4529				      (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) {
4530					softc->flags &= ~DA_FLAG_CAN_RC16;
4531					free(rdcap, M_SCSIDA);
4532					xpt_release_ccb(done_ccb);
4533					softc->state = DA_STATE_PROBE_RC;
4534					xpt_schedule(periph, priority);
4535					return;
4536				}
4537
4538				/*
4539				 * Attach to anything that claims to be a
4540				 * direct access or optical disk device,
4541				 * as long as it doesn't return a "Logical
4542				 * unit not supported" (0x25) error.
4543				 */
4544				if ((have_sense) && (asc != 0x25)
4545				 && (error_code == SSD_CURRENT_ERROR
4546				  || error_code == SSD_DESC_CURRENT_ERROR)) {
4547					const char *sense_key_desc;
4548					const char *asc_desc;
4549
4550					dasetgeom(periph, 512, -1, NULL, 0);
4551					scsi_sense_desc(sense_key, asc, ascq,
4552							&cgd.inq_data,
4553							&sense_key_desc,
4554							&asc_desc);
4555					snprintf(announce_buf,
4556					    sizeof(announce_buf),
4557						"Attempt to query device "
4558						"size failed: %s, %s",
4559						sense_key_desc,
4560						asc_desc);
4561				} else {
4562					if (have_sense)
4563						scsi_sense_print(
4564							&done_ccb->csio);
4565					else {
4566						xpt_print(periph->path,
4567						    "got CAM status %#x\n",
4568						    done_ccb->ccb_h.status);
4569					}
4570
4571					xpt_print(periph->path, "fatal error, "
4572					    "failed to attach to device\n");
4573
4574					/*
4575					 * Free up resources.
4576					 */
4577					cam_periph_invalidate(periph);
4578				}
4579			}
4580		}
4581		free(csio->data_ptr, M_SCSIDA);
4582		if (announce_buf[0] != '\0' &&
4583		    ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) {
4584			/*
4585			 * Create our sysctl variables, now that we know
4586			 * we have successfully attached.
4587			 */
4588			/* increase the refcount */
4589			if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
4590				taskqueue_enqueue(taskqueue_thread,
4591						  &softc->sysctl_task);
4592				xpt_announce_periph(periph, announce_buf);
4593				xpt_announce_quirks(periph, softc->quirks,
4594				    DA_Q_BIT_STRING);
4595			} else {
4596				xpt_print(periph->path, "fatal error, "
4597				    "could not acquire reference count\n");
4598			}
4599		}
4600
4601		/* We already probed the device. */
4602		if (softc->flags & DA_FLAG_PROBED) {
4603			daprobedone(periph, done_ccb);
4604			return;
4605		}
4606
4607		/* Ensure re-probe doesn't see old delete. */
4608		softc->delete_available = 0;
4609		dadeleteflag(softc, DA_DELETE_ZERO, 1);
4610		if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4611			/*
4612			 * Based on older SBC-3 spec revisions
4613			 * any of the UNMAP methods "may" be
4614			 * available via LBP given this flag so
4615			 * we flag all of them as available and
4616			 * then remove those which further
4617			 * probes confirm aren't available
4618			 * later.
4619			 *
4620			 * We could also check readcap(16) p_type
4621			 * flag to exclude one or more invalid
4622			 * write same (X) types here
4623			 */
4624			dadeleteflag(softc, DA_DELETE_WS16, 1);
4625			dadeleteflag(softc, DA_DELETE_WS10, 1);
4626			dadeleteflag(softc, DA_DELETE_UNMAP, 1);
4627
4628			xpt_release_ccb(done_ccb);
4629			softc->state = DA_STATE_PROBE_LBP;
4630			xpt_schedule(periph, priority);
4631			return;
4632		}
4633
4634		xpt_release_ccb(done_ccb);
4635		softc->state = DA_STATE_PROBE_BDC;
4636		xpt_schedule(periph, priority);
4637		return;
4638	}
4639	case DA_CCB_PROBE_LBP:
4640	{
4641		struct scsi_vpd_logical_block_prov *lbp;
4642
4643		lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr;
4644
4645		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4646			/*
4647			 * T10/1799-D Revision 31 states at least one of these
4648			 * must be supported but we don't currently enforce this.
4649			 */
4650			dadeleteflag(softc, DA_DELETE_WS16,
4651				     (lbp->flags & SVPD_LBP_WS16));
4652			dadeleteflag(softc, DA_DELETE_WS10,
4653				     (lbp->flags & SVPD_LBP_WS10));
4654			dadeleteflag(softc, DA_DELETE_UNMAP,
4655				     (lbp->flags & SVPD_LBP_UNMAP));
4656		} else {
4657			int error;
4658			error = daerror(done_ccb, CAM_RETRY_SELTO,
4659					SF_RETRY_UA|SF_NO_PRINT);
4660			if (error == ERESTART)
4661				return;
4662			else if (error != 0) {
4663				if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4664					/* Don't wedge this device's queue */
4665					cam_release_devq(done_ccb->ccb_h.path,
4666							 /*relsim_flags*/0,
4667							 /*reduction*/0,
4668							 /*timeout*/0,
4669							 /*getcount_only*/0);
4670				}
4671
4672				/*
4673				 * Failure indicates we don't support any SBC-3
4674				 * delete methods with UNMAP
4675				 */
4676			}
4677		}
4678
4679		free(lbp, M_SCSIDA);
4680		xpt_release_ccb(done_ccb);
4681		softc->state = DA_STATE_PROBE_BLK_LIMITS;
4682		xpt_schedule(periph, priority);
4683		return;
4684	}
4685	case DA_CCB_PROBE_BLK_LIMITS:
4686	{
4687		struct scsi_vpd_block_limits *block_limits;
4688
4689		block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr;
4690
4691		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4692			uint32_t max_txfer_len = scsi_4btoul(
4693				block_limits->max_txfer_len);
4694			uint32_t max_unmap_lba_cnt = scsi_4btoul(
4695				block_limits->max_unmap_lba_cnt);
4696			uint32_t max_unmap_blk_cnt = scsi_4btoul(
4697				block_limits->max_unmap_blk_cnt);
4698			uint32_t unmap_gran = scsi_4btoul(
4699				block_limits->opt_unmap_grain);
4700			uint32_t unmap_gran_align = scsi_4btoul(
4701				block_limits->unmap_grain_align);
4702			uint64_t ws_max_blks = scsi_8btou64(
4703				block_limits->max_write_same_length);
4704
4705			if (max_txfer_len != 0) {
4706				softc->disk->d_maxsize = MIN(softc->maxio,
4707				    (off_t)max_txfer_len * softc->params.secsize);
4708			}
4709
4710			/*
4711			 * We should already support UNMAP but we check lba
4712			 * and block count to be sure
4713			 */
4714			if (max_unmap_lba_cnt != 0x00L &&
4715			    max_unmap_blk_cnt != 0x00L) {
4716				softc->unmap_max_lba = max_unmap_lba_cnt;
4717				softc->unmap_max_ranges = min(max_unmap_blk_cnt,
4718					UNMAP_MAX_RANGES);
4719				if (unmap_gran > 1) {
4720					softc->unmap_gran = unmap_gran;
4721					if (unmap_gran_align & 0x80000000) {
4722						softc->unmap_gran_align =
4723						    unmap_gran_align &
4724						    0x7fffffff;
4725					}
4726				}
4727			} else {
4728				/*
4729				 * Unexpected UNMAP limits which means the
4730				 * device doesn't actually support UNMAP
4731				 */
4732				dadeleteflag(softc, DA_DELETE_UNMAP, 0);
4733			}
4734
4735			if (ws_max_blks != 0x00L)
4736				softc->ws_max_blks = ws_max_blks;
4737		} else {
4738			int error;
4739			error = daerror(done_ccb, CAM_RETRY_SELTO,
4740					SF_RETRY_UA|SF_NO_PRINT);
4741			if (error == ERESTART)
4742				return;
4743			else if (error != 0) {
4744				if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4745					/* Don't wedge this device's queue */
4746					cam_release_devq(done_ccb->ccb_h.path,
4747							 /*relsim_flags*/0,
4748							 /*reduction*/0,
4749							 /*timeout*/0,
4750							 /*getcount_only*/0);
4751				}
4752
4753				/*
4754				 * Failure here doesn't mean UNMAP is not
4755				 * supported as this is an optional page.
4756				 */
4757				softc->unmap_max_lba = 1;
4758				softc->unmap_max_ranges = 1;
4759			}
4760		}
4761
4762		free(block_limits, M_SCSIDA);
4763		xpt_release_ccb(done_ccb);
4764		softc->state = DA_STATE_PROBE_BDC;
4765		xpt_schedule(periph, priority);
4766		return;
4767	}
4768	case DA_CCB_PROBE_BDC:
4769	{
4770		struct scsi_vpd_block_device_characteristics *bdc;
4771
4772		bdc = (struct scsi_vpd_block_device_characteristics *)
4773		    csio->data_ptr;
4774
4775		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4776			uint32_t valid_len;
4777
4778			/*
4779			 * Disable queue sorting for non-rotational media
4780			 * by default.
4781			 */
4782			u_int16_t old_rate = softc->disk->d_rotation_rate;
4783
4784			valid_len = csio->dxfer_len - csio->resid;
4785			if (SBDC_IS_PRESENT(bdc, valid_len,
4786			    medium_rotation_rate)) {
4787				softc->disk->d_rotation_rate =
4788					scsi_2btoul(bdc->medium_rotation_rate);
4789				if (softc->disk->d_rotation_rate ==
4790				    SVPD_BDC_RATE_NON_ROTATING) {
4791					cam_iosched_set_sort_queue(
4792					    softc->cam_iosched, 0);
4793					softc->rotating = 0;
4794				}
4795				if (softc->disk->d_rotation_rate != old_rate) {
4796					disk_attr_changed(softc->disk,
4797					    "GEOM::rotation_rate", M_NOWAIT);
4798				}
4799			}
4800			if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
4801			 && (softc->zone_mode == DA_ZONE_NONE)) {
4802				int ata_proto;
4803
4804				if (scsi_vpd_supported_page(periph,
4805				    SVPD_ATA_INFORMATION))
4806					ata_proto = 1;
4807				else
4808					ata_proto = 0;
4809
4810				/*
4811				 * The Zoned field will only be set for
4812				 * Drive Managed and Host Aware drives.  If
4813				 * they are Host Managed, the device type
4814				 * in the standard INQUIRY data should be
4815				 * set to T_ZBC_HM (0x14).
4816				 */
4817				if ((bdc->flags & SVPD_ZBC_MASK) ==
4818				     SVPD_HAW_ZBC) {
4819					softc->zone_mode = DA_ZONE_HOST_AWARE;
4820					softc->zone_interface = (ata_proto) ?
4821					   DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
4822				} else if ((bdc->flags & SVPD_ZBC_MASK) ==
4823				     SVPD_DM_ZBC) {
4824					softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
4825					softc->zone_interface = (ata_proto) ?
4826					   DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
4827				} else if ((bdc->flags & SVPD_ZBC_MASK) !=
4828					  SVPD_ZBC_NR) {
4829					xpt_print(periph->path, "Unknown zoned "
4830					    "type %#x",
4831					    bdc->flags & SVPD_ZBC_MASK);
4832				}
4833			}
4834		} else {
4835			int error;
4836			error = daerror(done_ccb, CAM_RETRY_SELTO,
4837					SF_RETRY_UA|SF_NO_PRINT);
4838			if (error == ERESTART)
4839				return;
4840			else if (error != 0) {
4841				if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4842					/* Don't wedge this device's queue */
4843					cam_release_devq(done_ccb->ccb_h.path,
4844							 /*relsim_flags*/0,
4845							 /*reduction*/0,
4846							 /*timeout*/0,
4847							 /*getcount_only*/0);
4848				}
4849			}
4850		}
4851
4852		free(bdc, M_SCSIDA);
4853		xpt_release_ccb(done_ccb);
4854		softc->state = DA_STATE_PROBE_ATA;
4855		xpt_schedule(periph, priority);
4856		return;
4857	}
4858	case DA_CCB_PROBE_ATA:
4859	{
4860		int i;
4861		struct ata_params *ata_params;
4862		int continue_probe;
4863		int error;
4864		int16_t *ptr;
4865
4866		ata_params = (struct ata_params *)csio->data_ptr;
4867		ptr = (uint16_t *)ata_params;
4868		continue_probe = 0;
4869		error = 0;
4870
4871		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4872			uint16_t old_rate;
4873
4874			for (i = 0; i < sizeof(*ata_params) / 2; i++)
4875				ptr[i] = le16toh(ptr[i]);
4876			if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM &&
4877			    (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4878				dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1);
4879				if (ata_params->max_dsm_blocks != 0)
4880					softc->trim_max_ranges = min(
4881					  softc->trim_max_ranges,
4882					  ata_params->max_dsm_blocks *
4883					  ATA_DSM_BLK_RANGES);
4884			}
4885			/*
4886			 * Disable queue sorting for non-rotational media
4887			 * by default.
4888			 */
4889			old_rate = softc->disk->d_rotation_rate;
4890			softc->disk->d_rotation_rate =
4891			    ata_params->media_rotation_rate;
4892			if (softc->disk->d_rotation_rate ==
4893			    ATA_RATE_NON_ROTATING) {
4894				cam_iosched_set_sort_queue(softc->cam_iosched, 0);
4895				softc->rotating = 0;
4896			}
4897			if (softc->disk->d_rotation_rate != old_rate) {
4898				disk_attr_changed(softc->disk,
4899				    "GEOM::rotation_rate", M_NOWAIT);
4900			}
4901
4902			if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
4903				softc->flags |= DA_FLAG_CAN_ATA_DMA;
4904
4905			if (ata_params->support.extension &
4906			    ATA_SUPPORT_GENLOG)
4907				softc->flags |= DA_FLAG_CAN_ATA_LOG;
4908
4909			/*
4910			 * At this point, if we have a SATA host aware drive,
4911			 * we communicate via ATA passthrough unless the
4912			 * SAT layer supports ZBC -> ZAC translation.  In
4913			 * that case,
4914			 */
4915			/*
4916			 * XXX KDM figure out how to detect a host managed
4917			 * SATA drive.
4918			 */
4919			if (softc->zone_mode == DA_ZONE_NONE) {
4920				/*
4921				 * Note that we don't override the zone
4922				 * mode or interface if it has already been
4923				 * set.  This is because it has either been
4924				 * set as a quirk, or when we probed the
4925				 * SCSI Block Device Characteristics page,
4926				 * the zoned field was set.  The latter
4927				 * means that the SAT layer supports ZBC to
4928				 * ZAC translation, and we would prefer to
4929				 * use that if it is available.
4930				 */
4931				if ((ata_params->support3 &
4932				    ATA_SUPPORT_ZONE_MASK) ==
4933				    ATA_SUPPORT_ZONE_HOST_AWARE) {
4934					softc->zone_mode = DA_ZONE_HOST_AWARE;
4935					softc->zone_interface =
4936					    DA_ZONE_IF_ATA_PASS;
4937				} else if ((ata_params->support3 &
4938					    ATA_SUPPORT_ZONE_MASK) ==
4939					    ATA_SUPPORT_ZONE_DEV_MANAGED) {
4940					softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
4941					softc->zone_interface =
4942					    DA_ZONE_IF_ATA_PASS;
4943				}
4944			}
4945
4946		} else {
4947			error = daerror(done_ccb, CAM_RETRY_SELTO,
4948					SF_RETRY_UA|SF_NO_PRINT);
4949			if (error == ERESTART)
4950				return;
4951			else if (error != 0) {
4952				if ((done_ccb->ccb_h.status &
4953				     CAM_DEV_QFRZN) != 0) {
4954					/* Don't wedge this device's queue */
4955					cam_release_devq(done_ccb->ccb_h.path,
4956							 /*relsim_flags*/0,
4957							 /*reduction*/0,
4958							 /*timeout*/0,
4959							 /*getcount_only*/0);
4960				}
4961			}
4962		}
4963
4964		free(ata_params, M_SCSIDA);
4965		if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
4966		 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
4967			/*
4968			 * If the ATA IDENTIFY failed, we could be talking
4969			 * to a SCSI drive, although that seems unlikely,
4970			 * since the drive did report that it supported the
4971			 * ATA Information VPD page.  If the ATA IDENTIFY
4972			 * succeeded, and the SAT layer doesn't support
4973			 * ZBC -> ZAC translation, continue on to get the
4974			 * directory of ATA logs, and complete the rest of
4975			 * the ZAC probe.  If the SAT layer does support
4976			 * ZBC -> ZAC translation, we want to use that,
4977			 * and we'll probe the SCSI Zoned Block Device
4978			 * Characteristics VPD page next.
4979			 */
4980			if ((error == 0)
4981			 && (softc->flags & DA_FLAG_CAN_ATA_LOG)
4982			 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
4983				softc->state = DA_STATE_PROBE_ATA_LOGDIR;
4984			else
4985				softc->state = DA_STATE_PROBE_ZONE;
4986			continue_probe = 1;
4987		}
4988		if (continue_probe != 0) {
4989			xpt_release_ccb(done_ccb);
4990			xpt_schedule(periph, priority);
4991			return;
4992		} else
4993			daprobedone(periph, done_ccb);
4994		return;
4995	}
4996	case DA_CCB_PROBE_ATA_LOGDIR:
4997	{
4998		int error;
4999
5000		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5001			error = 0;
5002			softc->valid_logdir_len = 0;
5003			bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
5004			softc->valid_logdir_len =
5005				csio->dxfer_len - csio->resid;
5006			if (softc->valid_logdir_len > 0)
5007				bcopy(csio->data_ptr, &softc->ata_logdir,
5008				    min(softc->valid_logdir_len,
5009					sizeof(softc->ata_logdir)));
5010			/*
5011			 * Figure out whether the Identify Device log is
5012			 * supported.  The General Purpose log directory
5013			 * has a header, and lists the number of pages
5014			 * available for each GP log identified by the
5015			 * offset into the list.
5016			 */
5017			if ((softc->valid_logdir_len >=
5018			    ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
5019			 && (le16dec(softc->ata_logdir.header) ==
5020			     ATA_GP_LOG_DIR_VERSION)
5021			 && (le16dec(&softc->ata_logdir.num_pages[
5022			     (ATA_IDENTIFY_DATA_LOG *
5023			     sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
5024				softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
5025			} else {
5026				softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5027			}
5028		} else {
5029			error = daerror(done_ccb, CAM_RETRY_SELTO,
5030					SF_RETRY_UA|SF_NO_PRINT);
5031			if (error == ERESTART)
5032				return;
5033			else if (error != 0) {
5034				/*
5035				 * If we can't get the ATA log directory,
5036				 * then ATA logs are effectively not
5037				 * supported even if the bit is set in the
5038				 * identify data.
5039				 */
5040				softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
5041						  DA_FLAG_CAN_ATA_IDLOG);
5042				if ((done_ccb->ccb_h.status &
5043				     CAM_DEV_QFRZN) != 0) {
5044					/* Don't wedge this device's queue */
5045					cam_release_devq(done_ccb->ccb_h.path,
5046							 /*relsim_flags*/0,
5047							 /*reduction*/0,
5048							 /*timeout*/0,
5049							 /*getcount_only*/0);
5050				}
5051			}
5052		}
5053
5054		free(csio->data_ptr, M_SCSIDA);
5055
5056		if ((error == 0)
5057		 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
5058			softc->state = DA_STATE_PROBE_ATA_IDDIR;
5059			xpt_release_ccb(done_ccb);
5060			xpt_schedule(periph, priority);
5061			return;
5062		}
5063		daprobedone(periph, done_ccb);
5064		return;
5065	}
5066	case DA_CCB_PROBE_ATA_IDDIR:
5067	{
5068		int error;
5069
5070		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5071			off_t entries_offset, max_entries;
5072			error = 0;
5073
5074			softc->valid_iddir_len = 0;
5075			bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
5076			softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
5077					  DA_FLAG_CAN_ATA_ZONE);
5078			softc->valid_iddir_len =
5079				csio->dxfer_len - csio->resid;
5080			if (softc->valid_iddir_len > 0)
5081				bcopy(csio->data_ptr, &softc->ata_iddir,
5082				    min(softc->valid_iddir_len,
5083					sizeof(softc->ata_iddir)));
5084
5085			entries_offset =
5086			    __offsetof(struct ata_identify_log_pages,entries);
5087			max_entries = softc->valid_iddir_len - entries_offset;
5088			if ((softc->valid_iddir_len > (entries_offset + 1))
5089			 && (le64dec(softc->ata_iddir.header) ==
5090			     ATA_IDLOG_REVISION)
5091			 && (softc->ata_iddir.entry_count > 0)) {
5092				int num_entries, i;
5093
5094				num_entries = softc->ata_iddir.entry_count;
5095				num_entries = min(num_entries,
5096				   softc->valid_iddir_len - entries_offset);
5097				for (i = 0; i < num_entries &&
5098				     i < max_entries; i++) {
5099					if (softc->ata_iddir.entries[i] ==
5100					    ATA_IDL_SUP_CAP)
5101						softc->flags |=
5102						    DA_FLAG_CAN_ATA_SUPCAP;
5103					else if (softc->ata_iddir.entries[i]==
5104						 ATA_IDL_ZDI)
5105						softc->flags |=
5106						    DA_FLAG_CAN_ATA_ZONE;
5107
5108					if ((softc->flags &
5109					     DA_FLAG_CAN_ATA_SUPCAP)
5110					 && (softc->flags &
5111					     DA_FLAG_CAN_ATA_ZONE))
5112						break;
5113				}
5114			}
5115		} else {
5116			error = daerror(done_ccb, CAM_RETRY_SELTO,
5117					SF_RETRY_UA|SF_NO_PRINT);
5118			if (error == ERESTART)
5119				return;
5120			else if (error != 0) {
5121				/*
5122				 * If we can't get the ATA Identify Data log
5123				 * directory, then it effectively isn't
5124				 * supported even if the ATA Log directory
5125				 * a non-zero number of pages present for
5126				 * this log.
5127				 */
5128				softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5129				if ((done_ccb->ccb_h.status &
5130				     CAM_DEV_QFRZN) != 0) {
5131					/* Don't wedge this device's queue */
5132					cam_release_devq(done_ccb->ccb_h.path,
5133							 /*relsim_flags*/0,
5134							 /*reduction*/0,
5135							 /*timeout*/0,
5136							 /*getcount_only*/0);
5137				}
5138			}
5139		}
5140
5141		free(csio->data_ptr, M_SCSIDA);
5142
5143		if ((error == 0)
5144		 && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
5145			softc->state = DA_STATE_PROBE_ATA_SUP;
5146			xpt_release_ccb(done_ccb);
5147			xpt_schedule(periph, priority);
5148			return;
5149		}
5150		daprobedone(periph, done_ccb);
5151		return;
5152	}
5153	case DA_CCB_PROBE_ATA_SUP:
5154	{
5155		int error;
5156
5157		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5158			uint32_t valid_len;
5159			size_t needed_size;
5160			struct ata_identify_log_sup_cap *sup_cap;
5161			error = 0;
5162
5163			sup_cap = (struct ata_identify_log_sup_cap *)
5164			    csio->data_ptr;
5165			valid_len = csio->dxfer_len - csio->resid;
5166			needed_size =
5167			    __offsetof(struct ata_identify_log_sup_cap,
5168			    sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
5169			if (valid_len >= needed_size) {
5170				uint64_t zoned, zac_cap;
5171
5172				zoned = le64dec(sup_cap->zoned_cap);
5173				if (zoned & ATA_ZONED_VALID) {
5174					/*
5175					 * This should have already been
5176					 * set, because this is also in the
5177					 * ATA identify data.
5178					 */
5179					if ((zoned & ATA_ZONED_MASK) ==
5180					    ATA_SUPPORT_ZONE_HOST_AWARE)
5181						softc->zone_mode =
5182						    DA_ZONE_HOST_AWARE;
5183					else if ((zoned & ATA_ZONED_MASK) ==
5184					    ATA_SUPPORT_ZONE_DEV_MANAGED)
5185						softc->zone_mode =
5186						    DA_ZONE_DRIVE_MANAGED;
5187				}
5188
5189				zac_cap = le64dec(sup_cap->sup_zac_cap);
5190				if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
5191					if (zac_cap & ATA_REPORT_ZONES_SUP)
5192						softc->zone_flags |=
5193						    DA_ZONE_FLAG_RZ_SUP;
5194					if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
5195						softc->zone_flags |=
5196						    DA_ZONE_FLAG_OPEN_SUP;
5197					if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
5198						softc->zone_flags |=
5199						    DA_ZONE_FLAG_CLOSE_SUP;
5200					if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
5201						softc->zone_flags |=
5202						    DA_ZONE_FLAG_FINISH_SUP;
5203					if (zac_cap & ATA_ND_RWP_SUP)
5204						softc->zone_flags |=
5205						    DA_ZONE_FLAG_RWP_SUP;
5206				} else {
5207					/*
5208					 * This field was introduced in
5209					 * ACS-4, r08 on April 28th, 2015.
5210					 * If the drive firmware was written
5211					 * to an earlier spec, it won't have
5212					 * the field.  So, assume all
5213					 * commands are supported.
5214					 */
5215					softc->zone_flags |=
5216					    DA_ZONE_FLAG_SUP_MASK;
5217				}
5218
5219			}
5220		} else {
5221			error = daerror(done_ccb, CAM_RETRY_SELTO,
5222					SF_RETRY_UA|SF_NO_PRINT);
5223			if (error == ERESTART)
5224				return;
5225			else if (error != 0) {
5226				/*
5227				 * If we can't get the ATA Identify Data
5228				 * Supported Capabilities page, clear the
5229				 * flag...
5230				 */
5231				softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
5232				/*
5233				 * And clear zone capabilities.
5234				 */
5235				softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
5236				if ((done_ccb->ccb_h.status &
5237				     CAM_DEV_QFRZN) != 0) {
5238					/* Don't wedge this device's queue */
5239					cam_release_devq(done_ccb->ccb_h.path,
5240							 /*relsim_flags*/0,
5241							 /*reduction*/0,
5242							 /*timeout*/0,
5243							 /*getcount_only*/0);
5244				}
5245			}
5246		}
5247
5248		free(csio->data_ptr, M_SCSIDA);
5249
5250		if ((error == 0)
5251		 && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
5252			softc->state = DA_STATE_PROBE_ATA_ZONE;
5253			xpt_release_ccb(done_ccb);
5254			xpt_schedule(periph, priority);
5255			return;
5256		}
5257		daprobedone(periph, done_ccb);
5258		return;
5259	}
5260	case DA_CCB_PROBE_ATA_ZONE:
5261	{
5262		int error;
5263
5264		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5265			struct ata_zoned_info_log *zi_log;
5266			uint32_t valid_len;
5267			size_t needed_size;
5268
5269			zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
5270
5271			valid_len = csio->dxfer_len - csio->resid;
5272			needed_size = __offsetof(struct ata_zoned_info_log,
5273			    version_info) + 1 + sizeof(zi_log->version_info);
5274			if (valid_len >= needed_size) {
5275				uint64_t tmpvar;
5276
5277				tmpvar = le64dec(zi_log->zoned_cap);
5278				if (tmpvar & ATA_ZDI_CAP_VALID) {
5279					if (tmpvar & ATA_ZDI_CAP_URSWRZ)
5280						softc->zone_flags |=
5281						    DA_ZONE_FLAG_URSWRZ;
5282					else
5283						softc->zone_flags &=
5284						    ~DA_ZONE_FLAG_URSWRZ;
5285				}
5286				tmpvar = le64dec(zi_log->optimal_seq_zones);
5287				if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
5288					softc->zone_flags |=
5289					    DA_ZONE_FLAG_OPT_SEQ_SET;
5290					softc->optimal_seq_zones = (tmpvar &
5291					    ATA_ZDI_OPT_SEQ_MASK);
5292				} else {
5293					softc->zone_flags &=
5294					    ~DA_ZONE_FLAG_OPT_SEQ_SET;
5295					softc->optimal_seq_zones = 0;
5296				}
5297
5298				tmpvar =le64dec(zi_log->optimal_nonseq_zones);
5299				if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
5300					softc->zone_flags |=
5301					    DA_ZONE_FLAG_OPT_NONSEQ_SET;
5302					softc->optimal_nonseq_zones =
5303					    (tmpvar & ATA_ZDI_OPT_NS_MASK);
5304				} else {
5305					softc->zone_flags &=
5306					    ~DA_ZONE_FLAG_OPT_NONSEQ_SET;
5307					softc->optimal_nonseq_zones = 0;
5308				}
5309
5310				tmpvar = le64dec(zi_log->max_seq_req_zones);
5311				if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
5312					softc->zone_flags |=
5313					    DA_ZONE_FLAG_MAX_SEQ_SET;
5314					softc->max_seq_zones =
5315					    (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
5316				} else {
5317					softc->zone_flags &=
5318					    ~DA_ZONE_FLAG_MAX_SEQ_SET;
5319					softc->max_seq_zones = 0;
5320				}
5321			}
5322		} else {
5323			error = daerror(done_ccb, CAM_RETRY_SELTO,
5324					SF_RETRY_UA|SF_NO_PRINT);
5325			if (error == ERESTART)
5326				return;
5327			else if (error != 0) {
5328				softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
5329				softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
5330
5331				if ((done_ccb->ccb_h.status &
5332				     CAM_DEV_QFRZN) != 0) {
5333					/* Don't wedge this device's queue */
5334					cam_release_devq(done_ccb->ccb_h.path,
5335							 /*relsim_flags*/0,
5336							 /*reduction*/0,
5337							 /*timeout*/0,
5338							 /*getcount_only*/0);
5339				}
5340			}
5341
5342		}
5343		free(csio->data_ptr, M_SCSIDA);
5344
5345		daprobedone(periph, done_ccb);
5346		return;
5347	}
5348	case DA_CCB_PROBE_ZONE:
5349	{
5350		int error;
5351
5352		if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5353			uint32_t valid_len;
5354			size_t needed_len;
5355			struct scsi_vpd_zoned_bdc *zoned_bdc;
5356
5357			error = 0;
5358			zoned_bdc = (struct scsi_vpd_zoned_bdc *)
5359				csio->data_ptr;
5360			valid_len = csio->dxfer_len - csio->resid;
5361			needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
5362			    max_seq_req_zones) + 1 +
5363			    sizeof(zoned_bdc->max_seq_req_zones);
5364			if ((valid_len >= needed_len)
5365			 && (scsi_2btoul(zoned_bdc->page_length) >=
5366			     SVPD_ZBDC_PL)) {
5367				if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
5368					softc->zone_flags |=
5369					    DA_ZONE_FLAG_URSWRZ;
5370				else
5371					softc->zone_flags &=
5372					    ~DA_ZONE_FLAG_URSWRZ;
5373				softc->optimal_seq_zones =
5374				    scsi_4btoul(zoned_bdc->optimal_seq_zones);
5375				softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5376				softc->optimal_nonseq_zones = scsi_4btoul(
5377				    zoned_bdc->optimal_nonseq_zones);
5378				softc->zone_flags |=
5379				    DA_ZONE_FLAG_OPT_NONSEQ_SET;
5380				softc->max_seq_zones =
5381				    scsi_4btoul(zoned_bdc->max_seq_req_zones);
5382				softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5383			}
5384			/*
5385			 * All of the zone commands are mandatory for SCSI
5386			 * devices.
5387			 *
5388			 * XXX KDM this is valid as of September 2015.
5389			 * Re-check this assumption once the SAT spec is
5390			 * updated to support SCSI ZBC to ATA ZAC mapping.
5391			 * Since ATA allows zone commands to be reported
5392			 * as supported or not, this may not necessarily
5393			 * be true for an ATA device behind a SAT (SCSI to
5394			 * ATA Translation) layer.
5395			 */
5396			softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5397		} else {
5398			error = daerror(done_ccb, CAM_RETRY_SELTO,
5399					SF_RETRY_UA|SF_NO_PRINT);
5400			if (error == ERESTART)
5401				return;
5402			else if (error != 0) {
5403				if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5404					/* Don't wedge this device's queue */
5405					cam_release_devq(done_ccb->ccb_h.path,
5406							 /*relsim_flags*/0,
5407							 /*reduction*/0,
5408							 /*timeout*/0,
5409							 /*getcount_only*/0);
5410				}
5411			}
5412		}
5413
5414		free(csio->data_ptr, M_SCSIDA);
5415
5416		daprobedone(periph, done_ccb);
5417		return;
5418	}
5419	case DA_CCB_DUMP:
5420		/* No-op.  We're polling */
5421		return;
5422	case DA_CCB_TUR:
5423	{
5424		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5425
5426			if (daerror(done_ccb, CAM_RETRY_SELTO,
5427			    SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) ==
5428			    ERESTART)
5429				return;
5430			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5431				cam_release_devq(done_ccb->ccb_h.path,
5432						 /*relsim_flags*/0,
5433						 /*reduction*/0,
5434						 /*timeout*/0,
5435						 /*getcount_only*/0);
5436		}
5437		xpt_release_ccb(done_ccb);
5438		cam_periph_release_locked(periph);
5439		return;
5440	}
5441	default:
5442		break;
5443	}
5444	xpt_release_ccb(done_ccb);
5445}
5446
5447static void
5448dareprobe(struct cam_periph *periph)
5449{
5450	struct da_softc	  *softc;
5451	cam_status status;
5452
5453	softc = (struct da_softc *)periph->softc;
5454
5455	/* Probe in progress; don't interfere. */
5456	if (softc->state != DA_STATE_NORMAL)
5457		return;
5458
5459	status = cam_periph_acquire(periph);
5460	KASSERT(status == CAM_REQ_CMP,
5461	    ("dareprobe: cam_periph_acquire failed"));
5462
5463	softc->state = DA_STATE_PROBE_WP;
5464	xpt_schedule(periph, CAM_PRIORITY_DEV);
5465}
5466
5467static int
5468daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
5469{
5470	struct da_softc	  *softc;
5471	struct cam_periph *periph;
5472	int error, error_code, sense_key, asc, ascq;
5473
5474	periph = xpt_path_periph(ccb->ccb_h.path);
5475	softc = (struct da_softc *)periph->softc;
5476
5477 	/*
5478	 * Automatically detect devices that do not support
5479 	 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
5480 	 */
5481	error = 0;
5482	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
5483		error = cmd6workaround(ccb);
5484	} else if (scsi_extract_sense_ccb(ccb,
5485	    &error_code, &sense_key, &asc, &ascq)) {
5486		if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
5487 			error = cmd6workaround(ccb);
5488		/*
5489		 * If the target replied with CAPACITY DATA HAS CHANGED UA,
5490		 * query the capacity and notify upper layers.
5491		 */
5492		else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5493		    asc == 0x2A && ascq == 0x09) {
5494			xpt_print(periph->path, "Capacity data has changed\n");
5495			softc->flags &= ~DA_FLAG_PROBED;
5496			dareprobe(periph);
5497			sense_flags |= SF_NO_PRINT;
5498		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5499		    asc == 0x28 && ascq == 0x00) {
5500			softc->flags &= ~DA_FLAG_PROBED;
5501			disk_media_changed(softc->disk, M_NOWAIT);
5502		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5503		    asc == 0x3F && ascq == 0x03) {
5504			xpt_print(periph->path, "INQUIRY data has changed\n");
5505			softc->flags &= ~DA_FLAG_PROBED;
5506			dareprobe(periph);
5507			sense_flags |= SF_NO_PRINT;
5508		} else if (sense_key == SSD_KEY_NOT_READY &&
5509		    asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
5510			softc->flags |= DA_FLAG_PACK_INVALID;
5511			disk_media_gone(softc->disk, M_NOWAIT);
5512		}
5513	}
5514	if (error == ERESTART)
5515		return (ERESTART);
5516
5517#ifdef CAM_IO_STATS
5518	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
5519	case CAM_CMD_TIMEOUT:
5520		softc->timeouts++;
5521		break;
5522	case CAM_REQ_ABORTED:
5523	case CAM_REQ_CMP_ERR:
5524	case CAM_REQ_TERMIO:
5525	case CAM_UNREC_HBA_ERROR:
5526	case CAM_DATA_RUN_ERR:
5527		softc->errors++;
5528		break;
5529	default:
5530		break;
5531	}
5532#endif
5533
5534	/*
5535	 * XXX
5536	 * Until we have a better way of doing pack validation,
5537	 * don't treat UAs as errors.
5538	 */
5539	sense_flags |= SF_RETRY_UA;
5540
5541	if (softc->quirks & DA_Q_RETRY_BUSY)
5542		sense_flags |= SF_RETRY_BUSY;
5543	return(cam_periph_error(ccb, cam_flags, sense_flags,
5544				&softc->saved_ccb));
5545}
5546
5547static void
5548damediapoll(void *arg)
5549{
5550	struct cam_periph *periph = arg;
5551	struct da_softc *softc = periph->softc;
5552
5553	if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
5554	    LIST_EMPTY(&softc->pending_ccbs)) {
5555		if (cam_periph_acquire(periph) == CAM_REQ_CMP) {
5556			cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
5557			daschedule(periph);
5558		}
5559	}
5560	/* Queue us up again */
5561	if (da_poll_period != 0)
5562		callout_schedule(&softc->mediapoll_c, da_poll_period * hz);
5563}
5564
5565static void
5566daprevent(struct cam_periph *periph, int action)
5567{
5568	struct	da_softc *softc;
5569	union	ccb *ccb;
5570	int	error;
5571
5572	softc = (struct da_softc *)periph->softc;
5573
5574	if (((action == PR_ALLOW)
5575	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
5576	 || ((action == PR_PREVENT)
5577	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
5578		return;
5579	}
5580
5581	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
5582
5583	scsi_prevent(&ccb->csio,
5584		     /*retries*/1,
5585		     /*cbcfp*/dadone,
5586		     MSG_SIMPLE_Q_TAG,
5587		     action,
5588		     SSD_FULL_SIZE,
5589		     5000);
5590
5591	error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO,
5592	    SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat);
5593
5594	if (error == 0) {
5595		if (action == PR_ALLOW)
5596			softc->flags &= ~DA_FLAG_PACK_LOCKED;
5597		else
5598			softc->flags |= DA_FLAG_PACK_LOCKED;
5599	}
5600
5601	xpt_release_ccb(ccb);
5602}
5603
5604static void
5605dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
5606	  struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len)
5607{
5608	struct ccb_calc_geometry ccg;
5609	struct da_softc *softc;
5610	struct disk_params *dp;
5611	u_int lbppbe, lalba;
5612	int error;
5613
5614	softc = (struct da_softc *)periph->softc;
5615
5616	dp = &softc->params;
5617	dp->secsize = block_len;
5618	dp->sectors = maxsector + 1;
5619	if (rcaplong != NULL) {
5620		lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE;
5621		lalba = scsi_2btoul(rcaplong->lalba_lbp);
5622		lalba &= SRC16_LALBA_A;
5623	} else {
5624		lbppbe = 0;
5625		lalba = 0;
5626	}
5627
5628	if (lbppbe > 0) {
5629		dp->stripesize = block_len << lbppbe;
5630		dp->stripeoffset = (dp->stripesize - block_len * lalba) %
5631		    dp->stripesize;
5632	} else if (softc->quirks & DA_Q_4K) {
5633		dp->stripesize = 4096;
5634		dp->stripeoffset = 0;
5635	} else if (softc->unmap_gran != 0) {
5636		dp->stripesize = block_len * softc->unmap_gran;
5637		dp->stripeoffset = (dp->stripesize - block_len *
5638		    softc->unmap_gran_align) % dp->stripesize;
5639	} else {
5640		dp->stripesize = 0;
5641		dp->stripeoffset = 0;
5642	}
5643	/*
5644	 * Have the controller provide us with a geometry
5645	 * for this disk.  The only time the geometry
5646	 * matters is when we boot and the controller
5647	 * is the only one knowledgeable enough to come
5648	 * up with something that will make this a bootable
5649	 * device.
5650	 */
5651	xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
5652	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
5653	ccg.block_size = dp->secsize;
5654	ccg.volume_size = dp->sectors;
5655	ccg.heads = 0;
5656	ccg.secs_per_track = 0;
5657	ccg.cylinders = 0;
5658	xpt_action((union ccb*)&ccg);
5659	if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5660		/*
5661		 * We don't know what went wrong here- but just pick
5662		 * a geometry so we don't have nasty things like divide
5663		 * by zero.
5664		 */
5665		dp->heads = 255;
5666		dp->secs_per_track = 255;
5667		dp->cylinders = dp->sectors / (255 * 255);
5668		if (dp->cylinders == 0) {
5669			dp->cylinders = 1;
5670		}
5671	} else {
5672		dp->heads = ccg.heads;
5673		dp->secs_per_track = ccg.secs_per_track;
5674		dp->cylinders = ccg.cylinders;
5675	}
5676
5677	/*
5678	 * If the user supplied a read capacity buffer, and if it is
5679	 * different than the previous buffer, update the data in the EDT.
5680	 * If it's the same, we don't bother.  This avoids sending an
5681	 * update every time someone opens this device.
5682	 */
5683	if ((rcaplong != NULL)
5684	 && (bcmp(rcaplong, &softc->rcaplong,
5685		  min(sizeof(softc->rcaplong), rcap_len)) != 0)) {
5686		struct ccb_dev_advinfo cdai;
5687
5688		xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
5689		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
5690		cdai.buftype = CDAI_TYPE_RCAPLONG;
5691		cdai.flags = CDAI_FLAG_STORE;
5692		cdai.bufsiz = rcap_len;
5693		cdai.buf = (uint8_t *)rcaplong;
5694		xpt_action((union ccb *)&cdai);
5695		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
5696			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
5697		if (cdai.ccb_h.status != CAM_REQ_CMP) {
5698			xpt_print(periph->path, "%s: failed to set read "
5699				  "capacity advinfo\n", __func__);
5700			/* Use cam_error_print() to decode the status */
5701			cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS,
5702					CAM_EPF_ALL);
5703		} else {
5704			bcopy(rcaplong, &softc->rcaplong,
5705			      min(sizeof(softc->rcaplong), rcap_len));
5706		}
5707	}
5708
5709	softc->disk->d_sectorsize = softc->params.secsize;
5710	softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
5711	softc->disk->d_stripesize = softc->params.stripesize;
5712	softc->disk->d_stripeoffset = softc->params.stripeoffset;
5713	/* XXX: these are not actually "firmware" values, so they may be wrong */
5714	softc->disk->d_fwsectors = softc->params.secs_per_track;
5715	softc->disk->d_fwheads = softc->params.heads;
5716	softc->disk->d_devstat->block_size = softc->params.secsize;
5717	softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
5718
5719	error = disk_resize(softc->disk, M_NOWAIT);
5720	if (error != 0)
5721		xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error);
5722}
5723
5724static void
5725dasendorderedtag(void *arg)
5726{
5727	struct da_softc *softc = arg;
5728
5729	if (da_send_ordered) {
5730		if (!LIST_EMPTY(&softc->pending_ccbs)) {
5731			if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
5732				softc->flags |= DA_FLAG_NEED_OTAG;
5733			softc->flags &= ~DA_FLAG_WAS_OTAG;
5734		}
5735	}
5736	/* Queue us up again */
5737	callout_reset(&softc->sendordered_c,
5738	    (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
5739	    dasendorderedtag, softc);
5740}
5741
5742/*
5743 * Step through all DA peripheral drivers, and if the device is still open,
5744 * sync the disk cache to physical media.
5745 */
5746static void
5747dashutdown(void * arg, int howto)
5748{
5749	struct cam_periph *periph;
5750	struct da_softc *softc;
5751	union ccb *ccb;
5752	int error;
5753
5754	CAM_PERIPH_FOREACH(periph, &dadriver) {
5755		softc = (struct da_softc *)periph->softc;
5756		if (SCHEDULER_STOPPED()) {
5757			/* If we paniced with the lock held, do not recurse. */
5758			if (!cam_periph_owned(periph) &&
5759			    (softc->flags & DA_FLAG_OPEN)) {
5760				dadump(softc->disk, NULL, 0, 0, 0);
5761			}
5762			continue;
5763		}
5764		cam_periph_lock(periph);
5765
5766		/*
5767		 * We only sync the cache if the drive is still open, and
5768		 * if the drive is capable of it..
5769		 */
5770		if (((softc->flags & DA_FLAG_OPEN) == 0)
5771		 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
5772			cam_periph_unlock(periph);
5773			continue;
5774		}
5775
5776		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
5777		scsi_synchronize_cache(&ccb->csio,
5778				       /*retries*/0,
5779				       /*cbfcnp*/dadone,
5780				       MSG_SIMPLE_Q_TAG,
5781				       /*begin_lba*/0, /* whole disk */
5782				       /*lb_count*/0,
5783				       SSD_FULL_SIZE,
5784				       60 * 60 * 1000);
5785
5786		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
5787		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR,
5788		    softc->disk->d_devstat);
5789		if (error != 0)
5790			xpt_print(periph->path, "Synchronize cache failed\n");
5791		xpt_release_ccb(ccb);
5792		cam_periph_unlock(periph);
5793	}
5794}
5795
5796#else /* !_KERNEL */
5797
5798/*
5799 * XXX These are only left out of the kernel build to silence warnings.  If,
5800 * for some reason these functions are used in the kernel, the ifdefs should
5801 * be moved so they are included both in the kernel and userland.
5802 */
5803void
5804scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
5805		 void (*cbfcnp)(struct cam_periph *, union ccb *),
5806		 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
5807		 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
5808		 u_int32_t timeout)
5809{
5810	struct scsi_format_unit *scsi_cmd;
5811
5812	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
5813	scsi_cmd->opcode = FORMAT_UNIT;
5814	scsi_cmd->byte2 = byte2;
5815	scsi_ulto2b(ileave, scsi_cmd->interleave);
5816
5817	cam_fill_csio(csio,
5818		      retries,
5819		      cbfcnp,
5820		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5821		      tag_action,
5822		      data_ptr,
5823		      dxfer_len,
5824		      sense_len,
5825		      sizeof(*scsi_cmd),
5826		      timeout);
5827}
5828
5829void
5830scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
5831		  void (*cbfcnp)(struct cam_periph *, union ccb *),
5832		  uint8_t tag_action, uint8_t list_format,
5833		  uint32_t addr_desc_index, uint8_t *data_ptr,
5834		  uint32_t dxfer_len, int minimum_cmd_size,
5835		  uint8_t sense_len, uint32_t timeout)
5836{
5837	uint8_t cdb_len;
5838
5839	/*
5840	 * These conditions allow using the 10 byte command.  Otherwise we
5841	 * need to use the 12 byte command.
5842	 */
5843	if ((minimum_cmd_size <= 10)
5844	 && (addr_desc_index == 0)
5845	 && (dxfer_len <= SRDD10_MAX_LENGTH)) {
5846		struct scsi_read_defect_data_10 *cdb10;
5847
5848		cdb10 = (struct scsi_read_defect_data_10 *)
5849			&csio->cdb_io.cdb_bytes;
5850
5851		cdb_len = sizeof(*cdb10);
5852		bzero(cdb10, cdb_len);
5853                cdb10->opcode = READ_DEFECT_DATA_10;
5854                cdb10->format = list_format;
5855                scsi_ulto2b(dxfer_len, cdb10->alloc_length);
5856	} else {
5857		struct scsi_read_defect_data_12 *cdb12;
5858
5859		cdb12 = (struct scsi_read_defect_data_12 *)
5860			&csio->cdb_io.cdb_bytes;
5861
5862		cdb_len = sizeof(*cdb12);
5863		bzero(cdb12, cdb_len);
5864                cdb12->opcode = READ_DEFECT_DATA_12;
5865                cdb12->format = list_format;
5866                scsi_ulto4b(dxfer_len, cdb12->alloc_length);
5867		scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index);
5868	}
5869
5870	cam_fill_csio(csio,
5871		      retries,
5872		      cbfcnp,
5873		      /*flags*/ CAM_DIR_IN,
5874		      tag_action,
5875		      data_ptr,
5876		      dxfer_len,
5877		      sense_len,
5878		      cdb_len,
5879		      timeout);
5880}
5881
5882void
5883scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
5884	      void (*cbfcnp)(struct cam_periph *, union ccb *),
5885	      u_int8_t tag_action, u_int8_t byte2, u_int16_t control,
5886	      u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
5887	      u_int32_t timeout)
5888{
5889	struct scsi_sanitize *scsi_cmd;
5890
5891	scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes;
5892	scsi_cmd->opcode = SANITIZE;
5893	scsi_cmd->byte2 = byte2;
5894	scsi_cmd->control = control;
5895	scsi_ulto2b(dxfer_len, scsi_cmd->length);
5896
5897	cam_fill_csio(csio,
5898		      retries,
5899		      cbfcnp,
5900		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5901		      tag_action,
5902		      data_ptr,
5903		      dxfer_len,
5904		      sense_len,
5905		      sizeof(*scsi_cmd),
5906		      timeout);
5907}
5908
5909#endif /* _KERNEL */
5910
5911void
5912scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
5913	     void (*cbfcnp)(struct cam_periph *, union ccb *),
5914	     uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
5915	     uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
5916	     uint8_t sense_len, uint32_t timeout)
5917{
5918	struct scsi_zbc_out *scsi_cmd;
5919
5920	scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
5921	scsi_cmd->opcode = ZBC_OUT;
5922	scsi_cmd->service_action = service_action;
5923	scsi_u64to8b(zone_id, scsi_cmd->zone_id);
5924	scsi_cmd->zone_flags = zone_flags;
5925
5926	cam_fill_csio(csio,
5927		      retries,
5928		      cbfcnp,
5929		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
5930		      tag_action,
5931		      data_ptr,
5932		      dxfer_len,
5933		      sense_len,
5934		      sizeof(*scsi_cmd),
5935		      timeout);
5936}
5937
5938void
5939scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
5940	    void (*cbfcnp)(struct cam_periph *, union ccb *),
5941	    uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
5942	    uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
5943	    uint8_t sense_len, uint32_t timeout)
5944{
5945	struct scsi_zbc_in *scsi_cmd;
5946
5947	scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
5948	scsi_cmd->opcode = ZBC_IN;
5949	scsi_cmd->service_action = service_action;
5950	scsi_ulto4b(dxfer_len, scsi_cmd->length);
5951	scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba);
5952	scsi_cmd->zone_options = zone_options;
5953
5954	cam_fill_csio(csio,
5955		      retries,
5956		      cbfcnp,
5957		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
5958		      tag_action,
5959		      data_ptr,
5960		      dxfer_len,
5961		      sense_len,
5962		      sizeof(*scsi_cmd),
5963		      timeout);
5964
5965}
5966
5967int
5968scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
5969		      void (*cbfcnp)(struct cam_periph *, union ccb *),
5970		      uint8_t tag_action, int use_ncq,
5971		      uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
5972		      uint8_t *data_ptr, uint32_t dxfer_len,
5973		      uint8_t *cdb_storage, size_t cdb_storage_len,
5974		      uint8_t sense_len, uint32_t timeout)
5975{
5976	uint8_t command_out, protocol, ata_flags;
5977	uint16_t features_out;
5978	uint32_t sectors_out, auxiliary;
5979	int retval;
5980
5981	retval = 0;
5982
5983	if (use_ncq == 0) {
5984		command_out = ATA_ZAC_MANAGEMENT_OUT;
5985		features_out = (zm_action & 0xf) | (zone_flags << 8);
5986		ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
5987		if (dxfer_len == 0) {
5988			protocol = AP_PROTO_NON_DATA;
5989			ata_flags |= AP_FLAG_TLEN_NO_DATA;
5990			sectors_out = 0;
5991		} else {
5992			protocol = AP_PROTO_DMA;
5993			ata_flags |= AP_FLAG_TLEN_SECT_CNT |
5994				     AP_FLAG_TDIR_TO_DEV;
5995			sectors_out = ((dxfer_len >> 9) & 0xffff);
5996		}
5997		auxiliary = 0;
5998	} else {
5999		ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
6000		if (dxfer_len == 0) {
6001			command_out = ATA_NCQ_NON_DATA;
6002			features_out = ATA_NCQ_ZAC_MGMT_OUT;
6003			/*
6004			 * We're assuming the SCSI to ATA translation layer
6005			 * will set the NCQ tag number in the tag field.
6006			 * That isn't clear from the SAT-4 spec (as of rev 05).
6007			 */
6008			sectors_out = 0;
6009			ata_flags |= AP_FLAG_TLEN_NO_DATA;
6010		} else {
6011			command_out = ATA_SEND_FPDMA_QUEUED;
6012			/*
6013			 * Note that we're defaulting to normal priority,
6014			 * and assuming that the SCSI to ATA translation
6015			 * layer will insert the NCQ tag number in the tag
6016			 * field.  That isn't clear in the SAT-4 spec (as
6017			 * of rev 05).
6018			 */
6019			sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
6020
6021			ata_flags |= AP_FLAG_TLEN_FEAT |
6022				     AP_FLAG_TDIR_TO_DEV;
6023
6024			/*
6025			 * For SEND FPDMA QUEUED, the transfer length is
6026			 * encoded in the FEATURE register, and 0 means
6027			 * that 65536 512 byte blocks are to be tranferred.
6028			 * In practice, it seems unlikely that we'll see
6029			 * a transfer that large, and it may confuse the
6030			 * the SAT layer, because generally that means that
6031			 * 0 bytes should be transferred.
6032			 */
6033			if (dxfer_len == (65536 * 512)) {
6034				features_out = 0;
6035			} else if (dxfer_len <= (65535 * 512)) {
6036				features_out = ((dxfer_len >> 9) & 0xffff);
6037			} else {
6038				/* The transfer is too big. */
6039				retval = 1;
6040				goto bailout;
6041			}
6042
6043		}
6044
6045		auxiliary = (zm_action & 0xf) | (zone_flags << 8);
6046		protocol = AP_PROTO_FPDMA;
6047	}
6048
6049	protocol |= AP_EXTEND;
6050
6051	retval = scsi_ata_pass(csio,
6052	    retries,
6053	    cbfcnp,
6054	    /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6055	    tag_action,
6056	    /*protocol*/ protocol,
6057	    /*ata_flags*/ ata_flags,
6058	    /*features*/ features_out,
6059	    /*sector_count*/ sectors_out,
6060	    /*lba*/ zone_id,
6061	    /*command*/ command_out,
6062	    /*device*/ 0,
6063	    /*icc*/ 0,
6064	    /*auxiliary*/ auxiliary,
6065	    /*control*/ 0,
6066	    /*data_ptr*/ data_ptr,
6067	    /*dxfer_len*/ dxfer_len,
6068	    /*cdb_storage*/ cdb_storage,
6069	    /*cdb_storage_len*/ cdb_storage_len,
6070	    /*minimum_cmd_size*/ 0,
6071	    /*sense_len*/ SSD_FULL_SIZE,
6072	    /*timeout*/ timeout);
6073
6074bailout:
6075
6076	return (retval);
6077}
6078
6079int
6080scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
6081		     void (*cbfcnp)(struct cam_periph *, union ccb *),
6082		     uint8_t tag_action, int use_ncq,
6083		     uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6084		     uint8_t *data_ptr, uint32_t dxfer_len,
6085		     uint8_t *cdb_storage, size_t cdb_storage_len,
6086		     uint8_t sense_len, uint32_t timeout)
6087{
6088	uint8_t command_out, protocol;
6089	uint16_t features_out, sectors_out;
6090	uint32_t auxiliary;
6091	int ata_flags;
6092	int retval;
6093
6094	retval = 0;
6095	ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS;
6096
6097	if (use_ncq == 0) {
6098		command_out = ATA_ZAC_MANAGEMENT_IN;
6099		/* XXX KDM put a macro here */
6100		features_out = (zm_action & 0xf) | (zone_flags << 8);
6101		sectors_out = dxfer_len >> 9; /* XXX KDM macro */
6102		protocol = AP_PROTO_DMA;
6103		ata_flags |= AP_FLAG_TLEN_SECT_CNT;
6104		auxiliary = 0;
6105	} else {
6106		ata_flags |= AP_FLAG_TLEN_FEAT;
6107
6108		command_out = ATA_RECV_FPDMA_QUEUED;
6109		sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
6110
6111		/*
6112		 * For RECEIVE FPDMA QUEUED, the transfer length is
6113		 * encoded in the FEATURE register, and 0 means
6114		 * that 65536 512 byte blocks are to be tranferred.
6115		 * In practice, it seems unlikely that we'll see
6116		 * a transfer that large, and it may confuse the
6117		 * the SAT layer, because generally that means that
6118		 * 0 bytes should be transferred.
6119		 */
6120		if (dxfer_len == (65536 * 512)) {
6121			features_out = 0;
6122		} else if (dxfer_len <= (65535 * 512)) {
6123			features_out = ((dxfer_len >> 9) & 0xffff);
6124		} else {
6125			/* The transfer is too big. */
6126			retval = 1;
6127			goto bailout;
6128		}
6129		auxiliary = (zm_action & 0xf) | (zone_flags << 8),
6130		protocol = AP_PROTO_FPDMA;
6131	}
6132
6133	protocol |= AP_EXTEND;
6134
6135	retval = scsi_ata_pass(csio,
6136	    retries,
6137	    cbfcnp,
6138	    /*flags*/ CAM_DIR_IN,
6139	    tag_action,
6140	    /*protocol*/ protocol,
6141	    /*ata_flags*/ ata_flags,
6142	    /*features*/ features_out,
6143	    /*sector_count*/ sectors_out,
6144	    /*lba*/ zone_id,
6145	    /*command*/ command_out,
6146	    /*device*/ 0,
6147	    /*icc*/ 0,
6148	    /*auxiliary*/ auxiliary,
6149	    /*control*/ 0,
6150	    /*data_ptr*/ data_ptr,
6151	    /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
6152	    /*cdb_storage*/ cdb_storage,
6153	    /*cdb_storage_len*/ cdb_storage_len,
6154	    /*minimum_cmd_size*/ 0,
6155	    /*sense_len*/ SSD_FULL_SIZE,
6156	    /*timeout*/ timeout);
6157
6158bailout:
6159	return (retval);
6160}
6161