1/*-
2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
3 *
4 * SPDX-License-Identifier: BSD-2-Clause
5 *
6 * Copyright (c) 1997 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification, immediately at the beginning of the file.
15 * 2. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/param.h>
32
33#ifdef _KERNEL
34#include "opt_da.h"
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/taskqueue.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/conf.h>
43#include <sys/devicestat.h>
44#include <sys/eventhandler.h>
45#include <sys/malloc.h>
46#include <sys/cons.h>
47#include <sys/endian.h>
48#include <sys/proc.h>
49#include <sys/reboot.h>
50#include <sys/sbuf.h>
51#include <geom/geom.h>
52#include <geom/geom_disk.h>
53#include <machine/atomic.h>
54#endif /* _KERNEL */
55
56#ifndef _KERNEL
57#include <stdio.h>
58#include <string.h>
59#endif /* _KERNEL */
60
61#include <cam/cam.h>
62#include <cam/cam_ccb.h>
63#include <cam/cam_periph.h>
64#include <cam/cam_xpt_periph.h>
65#ifdef _KERNEL
66#include <cam/cam_xpt_internal.h>
67#endif /* _KERNEL */
68#include <cam/cam_sim.h>
69#include <cam/cam_iosched.h>
70
71#include <cam/scsi/scsi_message.h>
72#include <cam/scsi/scsi_da.h>
73
74#ifdef _KERNEL
75/*
76 * Note that there are probe ordering dependencies here.  The order isn't
77 * controlled by this enumeration, but by explicit state transitions in
78 * dastart() and dadone().  Here are some of the dependencies:
79 *
80 * 1. RC should come first, before RC16, unless there is evidence that RC16
81 *    is supported.
82 * 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
83 * 3. The ATA probes should go in this order:
84 *    ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
85 */
86typedef enum {
87	DA_STATE_PROBE_WP,
88	DA_STATE_PROBE_RC,
89	DA_STATE_PROBE_RC16,
90	DA_STATE_PROBE_LBP,
91	DA_STATE_PROBE_BLK_LIMITS,
92	DA_STATE_PROBE_BDC,
93	DA_STATE_PROBE_ATA,
94	DA_STATE_PROBE_ATA_LOGDIR,
95	DA_STATE_PROBE_ATA_IDDIR,
96	DA_STATE_PROBE_ATA_SUP,
97	DA_STATE_PROBE_ATA_ZONE,
98	DA_STATE_PROBE_ZONE,
99	DA_STATE_NORMAL
100} da_state;
101
102typedef enum {
103	DA_FLAG_PACK_INVALID	= 0x000001,
104	DA_FLAG_NEW_PACK	= 0x000002,
105	DA_FLAG_PACK_LOCKED	= 0x000004,
106	DA_FLAG_PACK_REMOVABLE	= 0x000008,
107	DA_FLAG_ROTATING	= 0x000010,
108	DA_FLAG_NEED_OTAG	= 0x000020,
109	DA_FLAG_WAS_OTAG	= 0x000040,
110	DA_FLAG_RETRY_UA	= 0x000080,
111	DA_FLAG_OPEN		= 0x000100,
112	DA_FLAG_SCTX_INIT	= 0x000200,
113	DA_FLAG_CAN_RC16	= 0x000400,
114	DA_FLAG_PROBED		= 0x000800,
115	DA_FLAG_DIRTY		= 0x001000,
116	DA_FLAG_ANNOUNCED	= 0x002000,
117	DA_FLAG_CAN_ATA_DMA	= 0x004000,
118	DA_FLAG_CAN_ATA_LOG	= 0x008000,
119	DA_FLAG_CAN_ATA_IDLOG	= 0x010000,
120	DA_FLAG_CAN_ATA_SUPCAP	= 0x020000,
121	DA_FLAG_CAN_ATA_ZONE	= 0x040000,
122	DA_FLAG_TUR_PENDING	= 0x080000,
123	DA_FLAG_UNMAPPEDIO	= 0x100000
124} da_flags;
125#define DA_FLAG_STRING		\
126	"\020"			\
127	"\001PACK_INVALID"	\
128	"\002NEW_PACK"		\
129	"\003PACK_LOCKED"	\
130	"\004PACK_REMOVABLE"	\
131	"\005ROTATING"		\
132	"\006NEED_OTAG"		\
133	"\007WAS_OTAG"		\
134	"\010RETRY_UA"		\
135	"\011OPEN"		\
136	"\012SCTX_INIT"		\
137	"\013CAN_RC16"		\
138	"\014PROBED"		\
139	"\015DIRTY"		\
140	"\016ANNOUNCED"		\
141	"\017CAN_ATA_DMA"	\
142	"\020CAN_ATA_LOG"	\
143	"\021CAN_ATA_IDLOG"	\
144	"\022CAN_ATA_SUPACP"	\
145	"\023CAN_ATA_ZONE"	\
146	"\024TUR_PENDING"	\
147	"\025UNMAPPEDIO"
148
149typedef enum {
150	DA_Q_NONE		= 0x00,
151	DA_Q_NO_SYNC_CACHE	= 0x01,
152	DA_Q_NO_6_BYTE		= 0x02,
153	DA_Q_NO_PREVENT		= 0x04,
154	DA_Q_4K			= 0x08,
155	DA_Q_NO_RC16		= 0x10,
156	DA_Q_NO_UNMAP		= 0x20,
157	DA_Q_RETRY_BUSY		= 0x40,
158	DA_Q_SMR_DM		= 0x80,
159	DA_Q_STRICT_UNMAP	= 0x100,
160	DA_Q_128KB		= 0x200
161} da_quirks;
162
163#define DA_Q_BIT_STRING		\
164	"\020"			\
165	"\001NO_SYNC_CACHE"	\
166	"\002NO_6_BYTE"		\
167	"\003NO_PREVENT"	\
168	"\0044K"		\
169	"\005NO_RC16"		\
170	"\006NO_UNMAP"		\
171	"\007RETRY_BUSY"	\
172	"\010SMR_DM"		\
173	"\011STRICT_UNMAP"	\
174	"\012128KB"
175
176typedef enum {
177	DA_CCB_PROBE_RC		= 0x01,
178	DA_CCB_PROBE_RC16	= 0x02,
179	DA_CCB_PROBE_LBP	= 0x03,
180	DA_CCB_PROBE_BLK_LIMITS	= 0x04,
181	DA_CCB_PROBE_BDC	= 0x05,
182	DA_CCB_PROBE_ATA	= 0x06,
183	DA_CCB_BUFFER_IO	= 0x07,
184	DA_CCB_DUMP		= 0x0A,
185	DA_CCB_DELETE		= 0x0B,
186	DA_CCB_TUR		= 0x0C,
187	DA_CCB_PROBE_ZONE	= 0x0D,
188	DA_CCB_PROBE_ATA_LOGDIR	= 0x0E,
189	DA_CCB_PROBE_ATA_IDDIR	= 0x0F,
190	DA_CCB_PROBE_ATA_SUP	= 0x10,
191	DA_CCB_PROBE_ATA_ZONE	= 0x11,
192	DA_CCB_PROBE_WP		= 0x12,
193	DA_CCB_TYPE_MASK	= 0x1F,
194	DA_CCB_RETRY_UA		= 0x20
195} da_ccb_state;
196
197/*
198 * Order here is important for method choice
199 *
200 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to
201 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes
202 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql
203 * import taking 5mins.
204 *
205 */
206typedef enum {
207	DA_DELETE_NONE,
208	DA_DELETE_DISABLE,
209	DA_DELETE_ATA_TRIM,
210	DA_DELETE_UNMAP,
211	DA_DELETE_WS16,
212	DA_DELETE_WS10,
213	DA_DELETE_ZERO,
214	DA_DELETE_MIN = DA_DELETE_ATA_TRIM,
215	DA_DELETE_MAX = DA_DELETE_ZERO
216} da_delete_methods;
217
218/*
219 * For SCSI, host managed drives show up as a separate device type.  For
220 * ATA, host managed drives also have a different device signature.
221 * XXX KDM figure out the ATA host managed signature.
222 */
223typedef enum {
224	DA_ZONE_NONE		= 0x00,
225	DA_ZONE_DRIVE_MANAGED	= 0x01,
226	DA_ZONE_HOST_AWARE	= 0x02,
227	DA_ZONE_HOST_MANAGED	= 0x03
228} da_zone_mode;
229
230/*
231 * We distinguish between these interface cases in addition to the drive type:
232 * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
233 * o ATA drive behind a SCSI translation layer that does not know about
234 *   ZBC/ZAC, and so needs to be managed via ATA passthrough.  In this
235 *   case, we would need to share the ATA code with the ada(4) driver.
236 * o SCSI drive.
237 */
238typedef enum {
239	DA_ZONE_IF_SCSI,
240	DA_ZONE_IF_ATA_PASS,
241	DA_ZONE_IF_ATA_SAT,
242} da_zone_interface;
243
244typedef enum {
245	DA_ZONE_FLAG_RZ_SUP		= 0x0001,
246	DA_ZONE_FLAG_OPEN_SUP		= 0x0002,
247	DA_ZONE_FLAG_CLOSE_SUP		= 0x0004,
248	DA_ZONE_FLAG_FINISH_SUP		= 0x0008,
249	DA_ZONE_FLAG_RWP_SUP		= 0x0010,
250	DA_ZONE_FLAG_SUP_MASK		= (DA_ZONE_FLAG_RZ_SUP |
251					   DA_ZONE_FLAG_OPEN_SUP |
252					   DA_ZONE_FLAG_CLOSE_SUP |
253					   DA_ZONE_FLAG_FINISH_SUP |
254					   DA_ZONE_FLAG_RWP_SUP),
255	DA_ZONE_FLAG_URSWRZ		= 0x0020,
256	DA_ZONE_FLAG_OPT_SEQ_SET	= 0x0040,
257	DA_ZONE_FLAG_OPT_NONSEQ_SET	= 0x0080,
258	DA_ZONE_FLAG_MAX_SEQ_SET	= 0x0100,
259	DA_ZONE_FLAG_SET_MASK		= (DA_ZONE_FLAG_OPT_SEQ_SET |
260					   DA_ZONE_FLAG_OPT_NONSEQ_SET |
261					   DA_ZONE_FLAG_MAX_SEQ_SET)
262} da_zone_flags;
263
264static struct da_zone_desc {
265	da_zone_flags value;
266	const char *desc;
267} da_zone_desc_table[] = {
268	{DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
269	{DA_ZONE_FLAG_OPEN_SUP, "Open" },
270	{DA_ZONE_FLAG_CLOSE_SUP, "Close" },
271	{DA_ZONE_FLAG_FINISH_SUP, "Finish" },
272	{DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
273};
274
275typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
276			      struct bio *bp);
277static da_delete_func_t da_delete_trim;
278static da_delete_func_t da_delete_unmap;
279static da_delete_func_t da_delete_ws;
280
281static const void * da_delete_functions[] = {
282	NULL,
283	NULL,
284	da_delete_trim,
285	da_delete_unmap,
286	da_delete_ws,
287	da_delete_ws,
288	da_delete_ws
289};
290
291static const char *da_delete_method_names[] =
292    { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" };
293static const char *da_delete_method_desc[] =
294    { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP",
295      "WRITE SAME(10) with UNMAP", "ZERO" };
296
297/* Offsets into our private area for storing information */
298#define ccb_state	ppriv_field0
299#define ccb_bp		ppriv_ptr1
300
301struct disk_params {
302	uint8_t  heads;
303	uint32_t cylinders;
304	uint8_t  secs_per_track;
305	uint32_t secsize;	/* Number of bytes/sector */
306	uint64_t sectors;	/* total number sectors */
307	u_int     stripesize;
308	u_int     stripeoffset;
309};
310
311#define UNMAP_RANGE_MAX		0xffffffff
312#define UNMAP_HEAD_SIZE		8
313#define UNMAP_RANGE_SIZE	16
314#define UNMAP_MAX_RANGES	2048 /* Protocol Max is 4095 */
315#define UNMAP_BUF_SIZE		((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \
316				UNMAP_HEAD_SIZE)
317
318#define WS10_MAX_BLKS		0xffff
319#define WS16_MAX_BLKS		0xffffffff
320#define ATA_TRIM_MAX_RANGES	((UNMAP_BUF_SIZE / \
321	(ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE)
322
323#define DA_WORK_TUR		(1 << 16)
324
325typedef enum {
326	DA_REF_OPEN = 1,
327	DA_REF_OPEN_HOLD,
328	DA_REF_CLOSE_HOLD,
329	DA_REF_TUR,
330	DA_REF_GEOM,
331	DA_REF_SYSCTL,
332	DA_REF_REPROBE,
333	DA_REF_MAX		/* KEEP LAST */
334} da_ref_token;
335
336struct da_softc {
337	struct   cam_iosched_softc *cam_iosched;
338	struct	 bio_queue_head delete_run_queue;
339	LIST_HEAD(, ccb_hdr) pending_ccbs;
340	int	 refcount;		/* Active xpt_action() calls */
341	da_state state;
342	da_flags flags;
343	da_quirks quirks;
344	int	 minimum_cmd_size;
345	int	 mode_page;
346	int	 error_inject;
347	int	 trim_max_ranges;
348	int	 delete_available;	/* Delete methods possibly available */
349	da_zone_mode			zone_mode;
350	da_zone_interface		zone_interface;
351	da_zone_flags			zone_flags;
352	struct ata_gp_log_dir		ata_logdir;
353	int				valid_logdir_len;
354	struct ata_identify_log_pages	ata_iddir;
355	int				valid_iddir_len;
356	uint64_t			optimal_seq_zones;
357	uint64_t			optimal_nonseq_zones;
358	uint64_t			max_seq_zones;
359	u_int			maxio;
360	uint32_t		unmap_max_ranges;
361	uint32_t		unmap_max_lba; /* Max LBAs in UNMAP req */
362	uint32_t		unmap_gran;
363	uint32_t		unmap_gran_align;
364	uint64_t		ws_max_blks;
365	uint64_t		trim_count;
366	uint64_t		trim_ranges;
367	uint64_t		trim_lbas;
368	da_delete_methods	delete_method_pref;
369	da_delete_methods	delete_method;
370	da_delete_func_t	*delete_func;
371	int			p_type;
372	struct	 disk_params params;
373	struct	 disk *disk;
374	struct task		sysctl_task;
375	struct sysctl_ctx_list	sysctl_ctx;
376	struct sysctl_oid	*sysctl_tree;
377	struct callout		sendordered_c;
378	uint64_t wwpn;
379	uint8_t	 unmap_buf[UNMAP_BUF_SIZE];
380	struct scsi_read_capacity_data_long rcaplong;
381	struct callout		mediapoll_c;
382	int			ref_flags[DA_REF_MAX];
383#ifdef CAM_IO_STATS
384	struct sysctl_ctx_list	sysctl_stats_ctx;
385	struct sysctl_oid	*sysctl_stats_tree;
386	u_int	errors;
387	u_int	timeouts;
388	u_int	invalidations;
389#endif
390#define DA_ANNOUNCETMP_SZ 160
391	char			announce_temp[DA_ANNOUNCETMP_SZ];
392#define DA_ANNOUNCE_SZ 400
393	char			announcebuf[DA_ANNOUNCE_SZ];
394};
395
396#define dadeleteflag(softc, delete_method, enable)			\
397	if (enable) {							\
398		softc->delete_available |= (1 << delete_method);	\
399	} else {							\
400		softc->delete_available &= ~(1 << delete_method);	\
401	}
402
403static uma_zone_t da_ccb_zone;
404
405struct da_quirk_entry {
406	struct scsi_inquiry_pattern inq_pat;
407	da_quirks quirks;
408};
409
410static const char quantum[] = "QUANTUM";
411static const char microp[] = "MICROP";
412
413static struct da_quirk_entry da_quirk_table[] =
414{
415	/* SPI, FC devices */
416	{
417		/*
418		 * Fujitsu M2513A MO drives.
419		 * Tested devices: M2513A2 firmware versions 1200 & 1300.
420		 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
421		 * Reported by: W.Scholten <whs@xs4all.nl>
422		 */
423		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
424		/*quirks*/ DA_Q_NO_SYNC_CACHE
425	},
426	{
427		/* See above. */
428		{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
429		/*quirks*/ DA_Q_NO_SYNC_CACHE
430	},
431	{
432		/*
433		 * This particular Fujitsu drive doesn't like the
434		 * synchronize cache command.
435		 * Reported by: Tom Jackson <toj@gorilla.net>
436		 */
437		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
438		/*quirks*/ DA_Q_NO_SYNC_CACHE
439	},
440	{
441		/*
442		 * This drive doesn't like the synchronize cache command
443		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
444		 * in NetBSD PR kern/6027, August 24, 1998.
445		 */
446		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
447		/*quirks*/ DA_Q_NO_SYNC_CACHE
448	},
449	{
450		/*
451		 * This drive doesn't like the synchronize cache command
452		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
453		 * (PR 8882).
454		 */
455		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
456		/*quirks*/ DA_Q_NO_SYNC_CACHE
457	},
458	{
459		/*
460		 * Doesn't like the synchronize cache command.
461		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
462		 */
463		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
464		/*quirks*/ DA_Q_NO_SYNC_CACHE
465	},
466	{
467		/*
468		 * Doesn't like the synchronize cache command.
469		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
470		 */
471		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
472		/*quirks*/ DA_Q_NO_SYNC_CACHE
473	},
474	{
475		/*
476		 * Doesn't like the synchronize cache command.
477		 */
478		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
479		/*quirks*/ DA_Q_NO_SYNC_CACHE
480	},
481	{
482		/*
483		 * Doesn't like the synchronize cache command.
484		 * Reported by: walter@pelissero.de
485		 */
486		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
487		/*quirks*/ DA_Q_NO_SYNC_CACHE
488	},
489	{
490		/*
491		 * Doesn't work correctly with 6 byte reads/writes.
492		 * Returns illegal request, and points to byte 9 of the
493		 * 6-byte CDB.
494		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
495		 */
496		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
497		/*quirks*/ DA_Q_NO_6_BYTE
498	},
499	{
500		/* See above. */
501		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
502		/*quirks*/ DA_Q_NO_6_BYTE
503	},
504	{
505		/*
506		 * Doesn't like the synchronize cache command.
507		 * Reported by: walter@pelissero.de
508		 */
509		{T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
510		/*quirks*/ DA_Q_NO_SYNC_CACHE
511	},
512	{
513		/*
514		 * The CISS RAID controllers do not support SYNC_CACHE
515		 */
516		{T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
517		/*quirks*/ DA_Q_NO_SYNC_CACHE
518	},
519	{
520		/*
521		 * The STEC SSDs sometimes hang on UNMAP.
522		 */
523		{T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"},
524		/*quirks*/ DA_Q_NO_UNMAP
525	},
526	{
527		/*
528		 * VMware returns BUSY status when storage has transient
529		 * connectivity problems, so better wait.
530		 * Also VMware returns odd errors on misaligned UNMAPs.
531		 */
532		{T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"},
533		/*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP
534	},
535	/* USB mass storage devices supported by umass(4) */
536	{
537		/*
538		 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player
539		 * PR: kern/51675
540		 */
541		{T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"},
542		/*quirks*/ DA_Q_NO_SYNC_CACHE
543	},
544	{
545		/*
546		 * Power Quotient Int. (PQI) USB flash key
547		 * PR: kern/53067
548		 */
549		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*",
550		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
551	},
552	{
553		/*
554		 * Creative Nomad MUVO mp3 player (USB)
555		 * PR: kern/53094
556		 */
557		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
558		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
559	},
560	{
561		/*
562		 * Jungsoft NEXDISK USB flash key
563		 * PR: kern/54737
564		 */
565		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"},
566		/*quirks*/ DA_Q_NO_SYNC_CACHE
567	},
568	{
569		/*
570		 * FreeDik USB Mini Data Drive
571		 * PR: kern/54786
572		 */
573		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive",
574		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
575	},
576	{
577		/*
578		 * Sigmatel USB Flash MP3 Player
579		 * PR: kern/57046
580		 */
581		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
582		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
583	},
584	{
585		/*
586		 * Neuros USB Digital Audio Computer
587		 * PR: kern/63645
588		 */
589		{T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.",
590		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
591	},
592	{
593		/*
594		 * SEAGRAND NP-900 MP3 Player
595		 * PR: kern/64563
596		 */
597		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
598		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
599	},
600	{
601		/*
602		 * iRiver iFP MP3 player (with UMS Firmware)
603		 * PR: kern/54881, i386/63941, kern/66124
604		 */
605		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"},
606		/*quirks*/ DA_Q_NO_SYNC_CACHE
607	},
608	{
609		/*
610		 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01
611		 * PR: kern/70158
612		 */
613		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"},
614		/*quirks*/ DA_Q_NO_SYNC_CACHE
615	},
616	{
617		/*
618		 * ZICPlay USB MP3 Player with FM
619		 * PR: kern/75057
620		 */
621		{T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"},
622		/*quirks*/ DA_Q_NO_SYNC_CACHE
623	},
624	{
625		/*
626		 * TEAC USB floppy mechanisms
627		 */
628		{T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"},
629		/*quirks*/ DA_Q_NO_SYNC_CACHE
630	},
631	{
632		/*
633		 * Kingston DataTraveler II+ USB Pen-Drive.
634		 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org>
635		 */
636		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+",
637		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
638	},
639	{
640		/*
641		 * USB DISK Pro PMAP
642		 * Reported by: jhs
643		 * PR: usb/96381
644		 */
645		{T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"},
646		/*quirks*/ DA_Q_NO_SYNC_CACHE
647	},
648	{
649		/*
650		 * Motorola E398 Mobile Phone (TransFlash memory card).
651		 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl>
652		 * PR: usb/89889
653		 */
654		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone",
655		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
656	},
657	{
658		/*
659		 * Qware BeatZkey! Pro
660		 * PR: usb/79164
661		 */
662		{T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE",
663		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
664	},
665	{
666		/*
667		 * Time DPA20B 1GB MP3 Player
668		 * PR: usb/81846
669		 */
670		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*",
671		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
672	},
673	{
674		/*
675		 * Samsung USB key 128Mb
676		 * PR: usb/90081
677		 */
678		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb",
679		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
680	},
681	{
682		/*
683		 * Kingston DataTraveler 2.0 USB Flash memory.
684		 * PR: usb/89196
685		 */
686		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0",
687		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
688	},
689	{
690		/*
691		 * Creative MUVO Slim mp3 player (USB)
692		 * PR: usb/86131
693		 */
694		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
695		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
696		},
697	{
698		/*
699		 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3)
700		 * PR: usb/80487
701		 */
702		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK",
703		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
704	},
705	{
706		/*
707		 * SanDisk Micro Cruzer 128MB
708		 * PR: usb/75970
709		 */
710		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer",
711		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
712	},
713	{
714		/*
715		 * TOSHIBA TransMemory USB sticks
716		 * PR: kern/94660
717		 */
718		{T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory",
719		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
720	},
721	{
722		/*
723		 * PNY USB 3.0 Flash Drives
724		*/
725		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*",
726		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16
727	},
728	{
729		/*
730		 * PNY USB Flash keys
731		 * PR: usb/75578, usb/72344, usb/65436
732		 */
733		{T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*",
734		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
735	},
736	{
737		/*
738		 * Genesys GL3224
739		 */
740		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
741		"120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16
742	},
743	{
744		/*
745		 * Genesys 6-in-1 Card Reader
746		 * PR: usb/94647
747		 */
748		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
749		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
750	},
751	{
752		/*
753		 * Rekam Digital CAMERA
754		 * PR: usb/98713
755		 */
756		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*",
757		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
758	},
759	{
760		/*
761		 * iRiver H10 MP3 player
762		 * PR: usb/102547
763		 */
764		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*",
765		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
766	},
767	{
768		/*
769		 * iRiver U10 MP3 player
770		 * PR: usb/92306
771		 */
772		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*",
773		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
774	},
775	{
776		/*
777		 * X-Micro Flash Disk
778		 * PR: usb/96901
779		 */
780		{T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk",
781		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
782	},
783	{
784		/*
785		 * EasyMP3 EM732X USB 2.0 Flash MP3 Player
786		 * PR: usb/96546
787		 */
788		{T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*",
789		"1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
790	},
791	{
792		/*
793		 * Denver MP3 player
794		 * PR: usb/107101
795		 */
796		{T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER",
797		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
798	},
799	{
800		/*
801		 * Philips USB Key Audio KEY013
802		 * PR: usb/68412
803		 */
804		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
805		/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
806	},
807	{
808		/*
809		 * JNC MP3 Player
810		 * PR: usb/94439
811		 */
812		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*",
813		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
814	},
815	{
816		/*
817		 * SAMSUNG MP0402H
818		 * PR: usb/108427
819		 */
820		{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"},
821		/*quirks*/ DA_Q_NO_SYNC_CACHE
822	},
823	{
824		/*
825		 * I/O Magic USB flash - Giga Bank
826		 * PR: usb/108810
827		 */
828		{T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"},
829		/*quirks*/ DA_Q_NO_SYNC_CACHE
830	},
831	{
832		/*
833		 * JoyFly 128mb USB Flash Drive
834		 * PR: 96133
835		 */
836		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*",
837		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
838	},
839	{
840		/*
841		 * ChipsBnk usb stick
842		 * PR: 103702
843		 */
844		{T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*",
845		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
846	},
847	{
848		/*
849		 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A
850		 * PR: 129858
851		 */
852		{T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*",
853		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
854	},
855	{
856		/*
857		 * Samsung YP-U3 mp3-player
858		 * PR: 125398
859		 */
860		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3",
861		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
862	},
863	{
864		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*",
865		 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
866	},
867	{
868		/*
869		 * Sony Cyber-Shot DSC cameras
870		 * PR: usb/137035
871		 */
872		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
873		/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
874	},
875	{
876		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3",
877		 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT
878	},
879	{
880		/* At least several Transcent USB sticks lie on RC16. */
881		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
882		 "*"}, /*quirks*/ DA_Q_NO_RC16
883	},
884	{
885		/*
886		 * I-O Data USB Flash Disk
887		 * PR: usb/211716
888		 */
889		{T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
890		 "*"}, /*quirks*/ DA_Q_NO_RC16
891	},
892	{
893		/*
894		 * SLC CHIPFANCIER USB drives
895		 * PR: usb/234503 (RC10 right, RC16 wrong)
896		 * 16GB, 32GB and 128GB confirmed to have same issue
897		 */
898		{T_DIRECT, SIP_MEDIA_REMOVABLE, "*SLC", "CHIPFANCIER",
899		 "*"}, /*quirks*/ DA_Q_NO_RC16
900       },
901	/* ATA/SATA devices over SAS/USB/... */
902	{
903		/* Sandisk X400 */
904		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SanDisk SD8SB8U1*", "*" },
905		/*quirks*/DA_Q_128KB
906	},
907	{
908		/* Hitachi Advanced Format (4k) drives */
909		{ T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" },
910		/*quirks*/DA_Q_4K
911	},
912	{
913		/* Micron Advanced Format (4k) drives */
914		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" },
915		/*quirks*/DA_Q_4K
916	},
917	{
918		/* Samsung Advanced Format (4k) drives */
919		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" },
920		/*quirks*/DA_Q_4K
921	},
922	{
923		/* Samsung Advanced Format (4k) drives */
924		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" },
925		/*quirks*/DA_Q_4K
926	},
927	{
928		/* Samsung Advanced Format (4k) drives */
929		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" },
930		/*quirks*/DA_Q_4K
931	},
932	{
933		/* Samsung Advanced Format (4k) drives */
934		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" },
935		/*quirks*/DA_Q_4K
936	},
937	{
938		/* Seagate Barracuda Green Advanced Format (4k) drives */
939		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" },
940		/*quirks*/DA_Q_4K
941	},
942	{
943		/* Seagate Barracuda Green Advanced Format (4k) drives */
944		{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" },
945		/*quirks*/DA_Q_4K
946	},
947	{
948		/* Seagate Barracuda Green Advanced Format (4k) drives */
949		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" },
950		/*quirks*/DA_Q_4K
951	},
952	{
953		/* Seagate Barracuda Green Advanced Format (4k) drives */
954		{ T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" },
955		/*quirks*/DA_Q_4K
956	},
957	{
958		/* Seagate Barracuda Green Advanced Format (4k) drives */
959		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" },
960		/*quirks*/DA_Q_4K
961	},
962	{
963		/* Seagate Barracuda Green Advanced Format (4k) drives */
964		{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" },
965		/*quirks*/DA_Q_4K
966	},
967	{
968		/* Seagate Momentus Advanced Format (4k) drives */
969		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" },
970		/*quirks*/DA_Q_4K
971	},
972	{
973		/* Seagate Momentus Advanced Format (4k) drives */
974		{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" },
975		/*quirks*/DA_Q_4K
976	},
977	{
978		/* Seagate Momentus Advanced Format (4k) drives */
979		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" },
980		/*quirks*/DA_Q_4K
981	},
982	{
983		/* Seagate Momentus Advanced Format (4k) drives */
984		{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" },
985		/*quirks*/DA_Q_4K
986	},
987	{
988		/* Seagate Momentus Advanced Format (4k) drives */
989		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" },
990		/*quirks*/DA_Q_4K
991	},
992	{
993		/* Seagate Momentus Advanced Format (4k) drives */
994		{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" },
995		/*quirks*/DA_Q_4K
996	},
997	{
998		/* Seagate Momentus Advanced Format (4k) drives */
999		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" },
1000		/*quirks*/DA_Q_4K
1001	},
1002	{
1003		/* Seagate Momentus Advanced Format (4k) drives */
1004		{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" },
1005		/*quirks*/DA_Q_4K
1006	},
1007	{
1008		/* Seagate Momentus Advanced Format (4k) drives */
1009		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" },
1010		/*quirks*/DA_Q_4K
1011	},
1012	{
1013		/* Seagate Momentus Advanced Format (4k) drives */
1014		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" },
1015		/*quirks*/DA_Q_4K
1016	},
1017	{
1018		/* Seagate Momentus Advanced Format (4k) drives */
1019		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" },
1020		/*quirks*/DA_Q_4K
1021	},
1022	{
1023		/* Seagate Momentus Advanced Format (4k) drives */
1024		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" },
1025		/*quirks*/DA_Q_4K
1026	},
1027	{
1028		/* Seagate Momentus Advanced Format (4k) drives */
1029		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" },
1030		/*quirks*/DA_Q_4K
1031	},
1032	{
1033		/* Seagate Momentus Advanced Format (4k) drives */
1034		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" },
1035		/*quirks*/DA_Q_4K
1036	},
1037	{
1038		/* Seagate Momentus Thin Advanced Format (4k) drives */
1039		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" },
1040		/*quirks*/DA_Q_4K
1041	},
1042	{
1043		/* Seagate Momentus Thin Advanced Format (4k) drives */
1044		{ T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" },
1045		/*quirks*/DA_Q_4K
1046	},
1047	{
1048		/* WDC Caviar Green Advanced Format (4k) drives */
1049		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" },
1050		/*quirks*/DA_Q_4K
1051	},
1052	{
1053		/* WDC Caviar Green Advanced Format (4k) drives */
1054		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" },
1055		/*quirks*/DA_Q_4K
1056	},
1057	{
1058		/* WDC Caviar Green Advanced Format (4k) drives */
1059		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" },
1060		/*quirks*/DA_Q_4K
1061	},
1062	{
1063		/* WDC Caviar Green Advanced Format (4k) drives */
1064		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" },
1065		/*quirks*/DA_Q_4K
1066	},
1067	{
1068		/* WDC Caviar Green Advanced Format (4k) drives */
1069		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" },
1070		/*quirks*/DA_Q_4K
1071	},
1072	{
1073		/* WDC Caviar Green Advanced Format (4k) drives */
1074		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" },
1075		/*quirks*/DA_Q_4K
1076	},
1077	{
1078		/* WDC Caviar Green Advanced Format (4k) drives */
1079		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" },
1080		/*quirks*/DA_Q_4K
1081	},
1082	{
1083		/* WDC Caviar Green Advanced Format (4k) drives */
1084		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" },
1085		/*quirks*/DA_Q_4K
1086	},
1087	{
1088		/* WDC Scorpio Black Advanced Format (4k) drives */
1089		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" },
1090		/*quirks*/DA_Q_4K
1091	},
1092	{
1093		/* WDC Scorpio Black Advanced Format (4k) drives */
1094		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" },
1095		/*quirks*/DA_Q_4K
1096	},
1097	{
1098		/* WDC Scorpio Black Advanced Format (4k) drives */
1099		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" },
1100		/*quirks*/DA_Q_4K
1101	},
1102	{
1103		/* WDC Scorpio Black Advanced Format (4k) drives */
1104		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" },
1105		/*quirks*/DA_Q_4K
1106	},
1107	{
1108		/* WDC Scorpio Blue Advanced Format (4k) drives */
1109		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" },
1110		/*quirks*/DA_Q_4K
1111	},
1112	{
1113		/* WDC Scorpio Blue Advanced Format (4k) drives */
1114		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" },
1115		/*quirks*/DA_Q_4K
1116	},
1117	{
1118		/* WDC Scorpio Blue Advanced Format (4k) drives */
1119		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" },
1120		/*quirks*/DA_Q_4K
1121	},
1122	{
1123		/* WDC Scorpio Blue Advanced Format (4k) drives */
1124		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" },
1125		/*quirks*/DA_Q_4K
1126	},
1127	{
1128		/*
1129		 * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1)
1130		 * PR: usb/97472
1131		 */
1132		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C*", "*"},
1133		/*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE
1134	},
1135	{
1136		/*
1137		 * Olympus digital cameras (D-370)
1138		 * PR: usb/97472
1139		 */
1140		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D*", "*"},
1141		/*quirks*/ DA_Q_NO_6_BYTE
1142	},
1143	{
1144		/*
1145		 * Olympus digital cameras (E-100RS, E-10).
1146		 * PR: usb/97472
1147		 */
1148		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E*", "*"},
1149		/*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE
1150	},
1151	{
1152		/*
1153		 * Olympus FE-210 camera
1154		 */
1155		{T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*",
1156		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1157	},
1158	{
1159		/*
1160		* Pentax Digital Camera
1161		* PR: usb/93389
1162		*/
1163		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PENTAX", "DIGITAL CAMERA",
1164		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1165	},
1166	{
1167		/*
1168		 * LG UP3S MP3 player
1169		 */
1170		{T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S",
1171		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1172	},
1173	{
1174		/*
1175		 * Laser MP3-2GA13 MP3 player
1176		 */
1177		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk",
1178		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1179	},
1180	{
1181		/*
1182		 * LaCie external 250GB Hard drive des by Porsche
1183		 * Submitted by: Ben Stuyts <ben@altesco.nl>
1184		 * PR: 121474
1185		 */
1186		{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"},
1187		/*quirks*/ DA_Q_NO_SYNC_CACHE
1188	},
1189	/* SATA SSDs */
1190	{
1191		/*
1192		 * Corsair Force 2 SSDs
1193		 * 4k optimised & trim only works in 4k requests + 4k aligned
1194		 */
1195		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" },
1196		/*quirks*/DA_Q_4K
1197	},
1198	{
1199		/*
1200		 * Corsair Force 3 SSDs
1201		 * 4k optimised & trim only works in 4k requests + 4k aligned
1202		 */
1203		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" },
1204		/*quirks*/DA_Q_4K
1205	},
1206        {
1207		/*
1208		 * Corsair Neutron GTX SSDs
1209		 * 4k optimised & trim only works in 4k requests + 4k aligned
1210		 */
1211		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
1212		/*quirks*/DA_Q_4K
1213	},
1214	{
1215		/*
1216		 * Corsair Force GT & GS SSDs
1217		 * 4k optimised & trim only works in 4k requests + 4k aligned
1218		 */
1219		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" },
1220		/*quirks*/DA_Q_4K
1221	},
1222	{
1223		/*
1224		 * Crucial M4 SSDs
1225		 * 4k optimised & trim only works in 4k requests + 4k aligned
1226		 */
1227		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" },
1228		/*quirks*/DA_Q_4K
1229	},
1230	{
1231		/*
1232		 * Crucial RealSSD C300 SSDs
1233		 * 4k optimised
1234		 */
1235		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*",
1236		"*" }, /*quirks*/DA_Q_4K
1237	},
1238	{
1239		/*
1240		 * Intel 320 Series SSDs
1241		 * 4k optimised & trim only works in 4k requests + 4k aligned
1242		 */
1243		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" },
1244		/*quirks*/DA_Q_4K
1245	},
1246	{
1247		/*
1248		 * Intel 330 Series SSDs
1249		 * 4k optimised & trim only works in 4k requests + 4k aligned
1250		 */
1251		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" },
1252		/*quirks*/DA_Q_4K
1253	},
1254	{
1255		/*
1256		 * Intel 510 Series SSDs
1257		 * 4k optimised & trim only works in 4k requests + 4k aligned
1258		 */
1259		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" },
1260		/*quirks*/DA_Q_4K
1261	},
1262	{
1263		/*
1264		 * Intel 520 Series SSDs
1265		 * 4k optimised & trim only works in 4k requests + 4k aligned
1266		 */
1267		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" },
1268		/*quirks*/DA_Q_4K
1269	},
1270	{
1271		/*
1272		 * Intel S3610 Series SSDs
1273		 * 4k optimised & trim only works in 4k requests + 4k aligned
1274		 */
1275		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" },
1276		/*quirks*/DA_Q_4K
1277	},
1278	{
1279		/*
1280		 * Intel X25-M Series SSDs
1281		 * 4k optimised & trim only works in 4k requests + 4k aligned
1282		 */
1283		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" },
1284		/*quirks*/DA_Q_4K
1285	},
1286	{
1287		/*
1288		 * Kingston E100 Series SSDs
1289		 * 4k optimised & trim only works in 4k requests + 4k aligned
1290		 */
1291		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" },
1292		/*quirks*/DA_Q_4K
1293	},
1294	{
1295		/*
1296		 * Kingston HyperX 3k SSDs
1297		 * 4k optimised & trim only works in 4k requests + 4k aligned
1298		 */
1299		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" },
1300		/*quirks*/DA_Q_4K
1301	},
1302	{
1303		/*
1304		 * Marvell SSDs (entry taken from OpenSolaris)
1305		 * 4k optimised & trim only works in 4k requests + 4k aligned
1306		 */
1307		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" },
1308		/*quirks*/DA_Q_4K
1309	},
1310	{
1311		/*
1312		 * OCZ Agility 2 SSDs
1313		 * 4k optimised & trim only works in 4k requests + 4k aligned
1314		 */
1315		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
1316		/*quirks*/DA_Q_4K
1317	},
1318	{
1319		/*
1320		 * OCZ Agility 3 SSDs
1321		 * 4k optimised & trim only works in 4k requests + 4k aligned
1322		 */
1323		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" },
1324		/*quirks*/DA_Q_4K
1325	},
1326	{
1327		/*
1328		 * OCZ Deneva R Series SSDs
1329		 * 4k optimised & trim only works in 4k requests + 4k aligned
1330		 */
1331		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" },
1332		/*quirks*/DA_Q_4K
1333	},
1334	{
1335		/*
1336		 * OCZ Vertex 2 SSDs (inc pro series)
1337		 * 4k optimised & trim only works in 4k requests + 4k aligned
1338		 */
1339		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" },
1340		/*quirks*/DA_Q_4K
1341	},
1342	{
1343		/*
1344		 * OCZ Vertex 3 SSDs
1345		 * 4k optimised & trim only works in 4k requests + 4k aligned
1346		 */
1347		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" },
1348		/*quirks*/DA_Q_4K
1349	},
1350	{
1351		/*
1352		 * OCZ Vertex 4 SSDs
1353		 * 4k optimised & trim only works in 4k requests + 4k aligned
1354		 */
1355		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" },
1356		/*quirks*/DA_Q_4K
1357	},
1358	{
1359		/*
1360		 * Samsung 750 Series SSDs
1361		 * 4k optimised & trim only works in 4k requests + 4k aligned
1362		 */
1363		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" },
1364		/*quirks*/DA_Q_4K
1365	},
1366	{
1367		/*
1368		 * Samsung 830 Series SSDs
1369		 * 4k optimised & trim only works in 4k requests + 4k aligned
1370		 */
1371		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" },
1372		/*quirks*/DA_Q_4K
1373	},
1374	{
1375		/*
1376		 * Samsung 840 SSDs
1377		 * 4k optimised & trim only works in 4k requests + 4k aligned
1378		 */
1379		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" },
1380		/*quirks*/DA_Q_4K
1381	},
1382	{
1383		/*
1384		 * Samsung 845 SSDs
1385		 * 4k optimised & trim only works in 4k requests + 4k aligned
1386		 */
1387		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" },
1388		/*quirks*/DA_Q_4K
1389	},
1390	{
1391		/*
1392		 * Samsung 850 SSDs
1393		 * 4k optimised & trim only works in 4k requests + 4k aligned
1394		 */
1395		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" },
1396		/*quirks*/DA_Q_4K
1397	},
1398	{
1399		/*
1400		 * Samsung 860 SSDs
1401		 * 4k optimised & trim only works in 4k requests + 4k aligned
1402		 */
1403		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 860*", "*" },
1404		/*quirks*/DA_Q_4K
1405	},
1406	{
1407		/*
1408		 * Samsung 870 SSDs
1409		 * 4k optimised & trim only works in 4k requests + 4k aligned
1410		 */
1411		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 870*", "*" },
1412		/*quirks*/DA_Q_4K
1413	},
1414	{
1415		/*
1416		 * Samsung 843T Series SSDs (MZ7WD*)
1417		 * Samsung PM851 Series SSDs (MZ7TE*)
1418		 * Samsung PM853T Series SSDs (MZ7GE*)
1419		 * Samsung SM863 Series SSDs (MZ7KM*)
1420		 * 4k optimised
1421		 */
1422		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" },
1423		/*quirks*/DA_Q_4K
1424	},
1425	{
1426		/*
1427		 * Same as for SAMSUNG MZ7* but enable the quirks for SSD
1428		 * starting with MZ7* too
1429		 */
1430		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" },
1431		/*quirks*/DA_Q_4K
1432	},
1433	{
1434		/*
1435                 * Same as above but enable the quirks for SSD SAMSUNG MZ7*
1436                 * connected via SATA-to-SAS interposer and because of this
1437                 * starting without "ATA"
1438		 */
1439		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MZ7*", "*" },
1440		/*quirks*/DA_Q_4K
1441	},
1442	{
1443		/*
1444		 * SuperTalent TeraDrive CT SSDs
1445		 * 4k optimised & trim only works in 4k requests + 4k aligned
1446		 */
1447		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" },
1448		/*quirks*/DA_Q_4K
1449	},
1450	{
1451		/*
1452		 * XceedIOPS SATA SSDs
1453		 * 4k optimised
1454		 */
1455		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" },
1456		/*quirks*/DA_Q_4K
1457	},
1458	{
1459		/*
1460		 * Hama Innostor USB-Stick
1461		 */
1462		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" },
1463		/*quirks*/DA_Q_NO_RC16
1464	},
1465	{
1466		/*
1467		 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
1468		 * Drive Managed SATA hard drive.  This drive doesn't report
1469		 * in firmware that it is a drive managed SMR drive.
1470		 */
1471		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" },
1472		/*quirks*/DA_Q_SMR_DM
1473	},
1474	{
1475		/*
1476		 * MX-ES USB Drive by Mach Xtreme
1477		 */
1478		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
1479		/*quirks*/DA_Q_NO_RC16
1480	},
1481};
1482
1483static	disk_strategy_t	dastrategy;
1484static	dumper_t	dadump;
1485static	periph_init_t	dainit;
1486static	void		daasync(void *callback_arg, uint32_t code,
1487				struct cam_path *path, void *arg);
1488static	void		dasysctlinit(void *context, int pending);
1489static	int		dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
1490static	int		dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
1491static	int		dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
1492static	int		dabitsysctl(SYSCTL_HANDLER_ARGS);
1493static	int		daflagssysctl(SYSCTL_HANDLER_ARGS);
1494static	int		dazonemodesysctl(SYSCTL_HANDLER_ARGS);
1495static	int		dazonesupsysctl(SYSCTL_HANDLER_ARGS);
1496static	int		dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
1497static	void		dadeletemethodset(struct da_softc *softc,
1498					  da_delete_methods delete_method);
1499static	off_t		dadeletemaxsize(struct da_softc *softc,
1500					da_delete_methods delete_method);
1501static	void		dadeletemethodchoose(struct da_softc *softc,
1502					     da_delete_methods default_method);
1503static	void		daprobedone(struct cam_periph *periph, union ccb *ccb);
1504
1505static	periph_ctor_t	daregister;
1506static	periph_dtor_t	dacleanup;
1507static	periph_start_t	dastart;
1508static	periph_oninv_t	daoninvalidate;
1509static	void		dazonedone(struct cam_periph *periph, union ccb *ccb);
1510static	void		dadone(struct cam_periph *periph,
1511			       union ccb *done_ccb);
1512static void		dadone_probewp(struct cam_periph *periph,
1513				       union ccb *done_ccb);
1514static void		dadone_proberc(struct cam_periph *periph,
1515				       union ccb *done_ccb);
1516static void		dadone_probelbp(struct cam_periph *periph,
1517					union ccb *done_ccb);
1518static void		dadone_probeblklimits(struct cam_periph *periph,
1519					      union ccb *done_ccb);
1520static void		dadone_probebdc(struct cam_periph *periph,
1521					union ccb *done_ccb);
1522static void		dadone_probeata(struct cam_periph *periph,
1523					union ccb *done_ccb);
1524static void		dadone_probeatalogdir(struct cam_periph *periph,
1525					      union ccb *done_ccb);
1526static void		dadone_probeataiddir(struct cam_periph *periph,
1527					     union ccb *done_ccb);
1528static void		dadone_probeatasup(struct cam_periph *periph,
1529					   union ccb *done_ccb);
1530static void		dadone_probeatazone(struct cam_periph *periph,
1531					    union ccb *done_ccb);
1532static void		dadone_probezone(struct cam_periph *periph,
1533					 union ccb *done_ccb);
1534static void		dadone_tur(struct cam_periph *periph,
1535				   union ccb *done_ccb);
1536static  int		daerror(union ccb *ccb, uint32_t cam_flags,
1537				uint32_t sense_flags);
1538static void		daprevent(struct cam_periph *periph, int action);
1539static void		dareprobe(struct cam_periph *periph);
1540static void		dasetgeom(struct cam_periph *periph, uint32_t block_len,
1541				  uint64_t maxsector,
1542				  struct scsi_read_capacity_data_long *rcaplong,
1543				  size_t rcap_size);
1544static callout_func_t	dasendorderedtag;
1545static void		dashutdown(void *arg, int howto);
1546static callout_func_t	damediapoll;
1547
1548#ifndef	DA_DEFAULT_POLL_PERIOD
1549#define	DA_DEFAULT_POLL_PERIOD	3
1550#endif
1551
1552#ifndef DA_DEFAULT_TIMEOUT
1553#define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
1554#endif
1555
1556#ifndef DA_DEFAULT_SOFTTIMEOUT
1557#define DA_DEFAULT_SOFTTIMEOUT	0
1558#endif
1559
1560#ifndef	DA_DEFAULT_RETRY
1561#define	DA_DEFAULT_RETRY	4
1562#endif
1563
1564#ifndef	DA_DEFAULT_SEND_ORDERED
1565#define	DA_DEFAULT_SEND_ORDERED	1
1566#endif
1567
1568static int da_poll_period = DA_DEFAULT_POLL_PERIOD;
1569static int da_retry_count = DA_DEFAULT_RETRY;
1570static int da_default_timeout = DA_DEFAULT_TIMEOUT;
1571static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT;
1572static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
1573static int da_disable_wp_detection = 0;
1574static int da_enable_biospeedup = 1;
1575static int da_enable_uma_ccbs = 1;
1576
1577static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1578    "CAM Direct Access Disk driver");
1579SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
1580           &da_poll_period, 0, "Media polling period in seconds");
1581SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
1582           &da_retry_count, 0, "Normal I/O retry count");
1583SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
1584           &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
1585SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
1586           &da_send_ordered, 0, "Send Ordered Tags");
1587SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN,
1588           &da_disable_wp_detection, 0,
1589	   "Disable detection of write-protected disks");
1590SYSCTL_INT(_kern_cam_da, OID_AUTO, enable_biospeedup, CTLFLAG_RDTUN,
1591	    &da_enable_biospeedup, 0, "Enable BIO_SPEEDUP processing");
1592SYSCTL_INT(_kern_cam_da, OID_AUTO, enable_uma_ccbs, CTLFLAG_RWTUN,
1593	    &da_enable_uma_ccbs, 0, "Use UMA for CCBs");
1594
1595SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout,
1596    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
1597    dasysctlsofttimeout, "I",
1598    "Soft I/O timeout (ms)");
1599TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout);
1600
1601/*
1602 * DA_ORDEREDTAG_INTERVAL determines how often, relative
1603 * to the default timeout, we check to see whether an ordered
1604 * tagged transaction is appropriate to prevent simple tag
1605 * starvation.  Since we'd like to ensure that there is at least
1606 * 1/2 of the timeout length left for a starved transaction to
1607 * complete after we've sent an ordered tag, we must poll at least
1608 * four times in every timeout period.  This takes care of the worst
1609 * case where a starved transaction starts during an interval that
1610 * meets the requirement "don't send an ordered tag" test so it takes
1611 * us two intervals to determine that a tag must be sent.
1612 */
1613#ifndef DA_ORDEREDTAG_INTERVAL
1614#define DA_ORDEREDTAG_INTERVAL 4
1615#endif
1616
1617static struct periph_driver dadriver =
1618{
1619	dainit, "da",
1620	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
1621};
1622
1623PERIPHDRIVER_DECLARE(da, dadriver);
1624
1625static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
1626
1627/*
1628 * This driver takes out references / holds in well defined pairs, never
1629 * recursively. These macros / inline functions enforce those rules. They
1630 * are only enabled with DA_TRACK_REFS or INVARIANTS. If DA_TRACK_REFS is
1631 * defined to be 2 or larger, the tracking also includes debug printfs.
1632 */
1633#if defined(DA_TRACK_REFS) || defined(INVARIANTS)
1634
1635#ifndef DA_TRACK_REFS
1636#define DA_TRACK_REFS 1
1637#endif
1638
1639#if DA_TRACK_REFS > 1
1640static const char *da_ref_text[] = {
1641	"bogus",
1642	"open",
1643	"open hold",
1644	"close hold",
1645	"reprobe hold",
1646	"Test Unit Ready",
1647	"Geom",
1648	"sysctl",
1649	"reprobe",
1650	"max -- also bogus"
1651};
1652
1653#define DA_PERIPH_PRINT(periph, msg, args...)		\
1654	CAM_PERIPH_PRINT(periph, msg, ##args)
1655#else
1656#define DA_PERIPH_PRINT(periph, msg, args...)
1657#endif
1658
1659static inline void
1660token_sanity(da_ref_token token)
1661{
1662	if ((unsigned)token >= DA_REF_MAX)
1663		panic("Bad token value passed in %d\n", token);
1664}
1665
1666static inline int
1667da_periph_hold(struct cam_periph *periph, int priority, da_ref_token token)
1668{
1669	int err = cam_periph_hold(periph, priority);
1670
1671	token_sanity(token);
1672	DA_PERIPH_PRINT(periph, "Holding device %s (%d): %d\n",
1673	    da_ref_text[token], token, err);
1674	if (err == 0) {
1675		int cnt;
1676		struct da_softc *softc = periph->softc;
1677
1678		cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1);
1679		if (cnt != 0)
1680			panic("Re-holding for reason %d, cnt = %d", token, cnt);
1681	}
1682	return (err);
1683}
1684
1685static inline void
1686da_periph_unhold(struct cam_periph *periph, da_ref_token token)
1687{
1688	int cnt;
1689	struct da_softc *softc = periph->softc;
1690
1691	token_sanity(token);
1692	DA_PERIPH_PRINT(periph, "Unholding device %s (%d)\n",
1693	    da_ref_text[token], token);
1694	cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1695	if (cnt != 1)
1696		panic("Unholding %d with cnt = %d", token, cnt);
1697	cam_periph_unhold(periph);
1698}
1699
1700static inline int
1701da_periph_acquire(struct cam_periph *periph, da_ref_token token)
1702{
1703	int err = cam_periph_acquire(periph);
1704
1705	token_sanity(token);
1706	DA_PERIPH_PRINT(periph, "acquiring device %s (%d): %d\n",
1707	    da_ref_text[token], token, err);
1708	if (err == 0) {
1709		int cnt;
1710		struct da_softc *softc = periph->softc;
1711
1712		cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1);
1713		if (cnt != 0)
1714			panic("Re-refing for reason %d, cnt = %d", token, cnt);
1715	}
1716	return (err);
1717}
1718
1719static inline void
1720da_periph_release(struct cam_periph *periph, da_ref_token token)
1721{
1722	int cnt;
1723	struct da_softc *softc = periph->softc;
1724
1725	token_sanity(token);
1726	DA_PERIPH_PRINT(periph, "releasing device %s (%d)\n",
1727	    da_ref_text[token], token);
1728	cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1729	if (cnt != 1)
1730		panic("Releasing %d with cnt = %d", token, cnt);
1731	cam_periph_release(periph);
1732}
1733
1734static inline void
1735da_periph_release_locked(struct cam_periph *periph, da_ref_token token)
1736{
1737	int cnt;
1738	struct da_softc *softc = periph->softc;
1739
1740	token_sanity(token);
1741	DA_PERIPH_PRINT(periph, "releasing device (locked) %s (%d)\n",
1742	    da_ref_text[token], token);
1743	cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1744	if (cnt != 1)
1745		panic("releasing (locked) %d with cnt = %d", token, cnt);
1746	cam_periph_release_locked(periph);
1747}
1748
1749#define cam_periph_hold POISON
1750#define cam_periph_unhold POISON
1751#define cam_periph_acquire POISON
1752#define cam_periph_release POISON
1753#define cam_periph_release_locked POISON
1754
1755#else
1756#define	da_periph_hold(periph, prio, token)	cam_periph_hold((periph), (prio))
1757#define da_periph_unhold(periph, token)		cam_periph_unhold((periph))
1758#define da_periph_acquire(periph, token)	cam_periph_acquire((periph))
1759#define da_periph_release(periph, token)	cam_periph_release((periph))
1760#define da_periph_release_locked(periph, token)	cam_periph_release_locked((periph))
1761#endif
1762
1763static int
1764daopen(struct disk *dp)
1765{
1766	struct cam_periph *periph;
1767	struct da_softc *softc;
1768	int error;
1769
1770	periph = (struct cam_periph *)dp->d_drv1;
1771	if (da_periph_acquire(periph, DA_REF_OPEN) != 0) {
1772		return (ENXIO);
1773	}
1774
1775	cam_periph_lock(periph);
1776	if ((error = da_periph_hold(periph, PRIBIO|PCATCH, DA_REF_OPEN_HOLD)) != 0) {
1777		cam_periph_unlock(periph);
1778		da_periph_release(periph, DA_REF_OPEN);
1779		return (error);
1780	}
1781
1782	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1783	    ("daopen\n"));
1784
1785	softc = (struct da_softc *)periph->softc;
1786	dareprobe(periph);
1787
1788	/* Wait for the disk size update.  */
1789	error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO,
1790	    "dareprobe", 0);
1791	if (error != 0)
1792		xpt_print(periph->path, "unable to retrieve capacity data\n");
1793
1794	if (periph->flags & CAM_PERIPH_INVALID)
1795		error = ENXIO;
1796
1797	if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1798	    (softc->quirks & DA_Q_NO_PREVENT) == 0)
1799		daprevent(periph, PR_PREVENT);
1800
1801	if (error == 0) {
1802		softc->flags &= ~DA_FLAG_PACK_INVALID;
1803		softc->flags |= DA_FLAG_OPEN;
1804	}
1805
1806	da_periph_unhold(periph, DA_REF_OPEN_HOLD);
1807	cam_periph_unlock(periph);
1808
1809	if (error != 0)
1810		da_periph_release(periph, DA_REF_OPEN);
1811
1812	return (error);
1813}
1814
1815static int
1816daclose(struct disk *dp)
1817{
1818	struct	cam_periph *periph;
1819	struct	da_softc *softc;
1820	union	ccb *ccb;
1821
1822	periph = (struct cam_periph *)dp->d_drv1;
1823	softc = (struct da_softc *)periph->softc;
1824	cam_periph_lock(periph);
1825	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1826	    ("daclose\n"));
1827
1828	if (da_periph_hold(periph, PRIBIO, DA_REF_CLOSE_HOLD) == 0) {
1829		/* Flush disk cache. */
1830		if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
1831		    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
1832		    (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
1833			ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1834			scsi_synchronize_cache(&ccb->csio, /*retries*/1,
1835			    /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG,
1836			    /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
1837			    5 * 60 * 1000);
1838			cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
1839			    /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
1840			    softc->disk->d_devstat);
1841			softc->flags &= ~DA_FLAG_DIRTY;
1842			xpt_release_ccb(ccb);
1843		}
1844
1845		/* Allow medium removal. */
1846		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1847		    (softc->quirks & DA_Q_NO_PREVENT) == 0)
1848			daprevent(periph, PR_ALLOW);
1849
1850		da_periph_unhold(periph, DA_REF_CLOSE_HOLD);
1851	}
1852
1853	/*
1854	 * If we've got removable media, mark the blocksize as
1855	 * unavailable, since it could change when new media is
1856	 * inserted.
1857	 */
1858	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
1859		softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
1860
1861	softc->flags &= ~DA_FLAG_OPEN;
1862	while (softc->refcount != 0)
1863		cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
1864	cam_periph_unlock(periph);
1865	da_periph_release(periph, DA_REF_OPEN);
1866	return (0);
1867}
1868
1869static void
1870daschedule(struct cam_periph *periph)
1871{
1872	struct da_softc *softc = (struct da_softc *)periph->softc;
1873
1874	if (softc->state != DA_STATE_NORMAL)
1875		return;
1876
1877	cam_iosched_schedule(softc->cam_iosched, periph);
1878}
1879
1880/*
1881 * Actually translate the requested transfer into one the physical driver
1882 * can understand.  The transfer is described by a buf and will include
1883 * only one physical transfer.
1884 */
1885static void
1886dastrategy(struct bio *bp)
1887{
1888	struct cam_periph *periph;
1889	struct da_softc *softc;
1890
1891	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1892	softc = (struct da_softc *)periph->softc;
1893
1894	cam_periph_lock(periph);
1895
1896	/*
1897	 * If the device has been made invalid, error out
1898	 */
1899	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1900		cam_periph_unlock(periph);
1901		biofinish(bp, NULL, ENXIO);
1902		return;
1903	}
1904
1905	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
1906
1907	/*
1908	 * Zone commands must be ordered, because they can depend on the
1909	 * effects of previously issued commands, and they may affect
1910	 * commands after them.
1911	 */
1912	if (bp->bio_cmd == BIO_ZONE)
1913		bp->bio_flags |= BIO_ORDERED;
1914
1915	/*
1916	 * Place it in the queue of disk activities for this disk
1917	 */
1918	cam_iosched_queue_work(softc->cam_iosched, bp);
1919
1920	/*
1921	 * Schedule ourselves for performing the work.
1922	 */
1923	daschedule(periph);
1924	cam_periph_unlock(periph);
1925
1926	return;
1927}
1928
1929static int
1930dadump(void *arg, void *virtual, off_t offset, size_t length)
1931{
1932	struct	    cam_periph *periph;
1933	struct	    da_softc *softc;
1934	u_int	    secsize;
1935	struct	    ccb_scsiio csio;
1936	struct	    disk *dp;
1937	int	    error = 0;
1938
1939	dp = arg;
1940	periph = dp->d_drv1;
1941	softc = (struct da_softc *)periph->softc;
1942	secsize = softc->params.secsize;
1943
1944	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0)
1945		return (ENXIO);
1946
1947	memset(&csio, 0, sizeof(csio));
1948	if (length > 0) {
1949		xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1950		csio.ccb_h.ccb_state = DA_CCB_DUMP;
1951		scsi_read_write(&csio,
1952				/*retries*/0,
1953				/*cbfcnp*/NULL,
1954				MSG_ORDERED_Q_TAG,
1955				/*read*/SCSI_RW_WRITE,
1956				/*byte2*/0,
1957				/*minimum_cmd_size*/ softc->minimum_cmd_size,
1958				offset / secsize,
1959				length / secsize,
1960				/*data_ptr*/(uint8_t *) virtual,
1961				/*dxfer_len*/length,
1962				/*sense_len*/SSD_FULL_SIZE,
1963				da_default_timeout * 1000);
1964		error = cam_periph_runccb((union ccb *)&csio, cam_periph_error,
1965		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1966		if (error != 0)
1967			printf("Aborting dump due to I/O error.\n");
1968		return (error);
1969	}
1970
1971	/*
1972	 * Sync the disk cache contents to the physical media.
1973	 */
1974	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
1975		xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1976		csio.ccb_h.ccb_state = DA_CCB_DUMP;
1977		scsi_synchronize_cache(&csio,
1978				       /*retries*/0,
1979				       /*cbfcnp*/NULL,
1980				       MSG_SIMPLE_Q_TAG,
1981				       /*begin_lba*/0,/* Cover the whole disk */
1982				       /*lb_count*/0,
1983				       SSD_FULL_SIZE,
1984				       5 * 1000);
1985		error = cam_periph_runccb((union ccb *)&csio, cam_periph_error,
1986		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1987		if (error != 0)
1988			xpt_print(periph->path, "Synchronize cache failed\n");
1989	}
1990	return (error);
1991}
1992
1993static int
1994dagetattr(struct bio *bp)
1995{
1996	int ret;
1997	struct cam_periph *periph;
1998
1999	if (g_handleattr_int(bp, "GEOM::canspeedup", da_enable_biospeedup))
2000		return (EJUSTRETURN);
2001
2002	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
2003	cam_periph_lock(periph);
2004	ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
2005	    periph->path);
2006	cam_periph_unlock(periph);
2007	if (ret == 0)
2008		bp->bio_completed = bp->bio_length;
2009	return ret;
2010}
2011
2012static void
2013dainit(void)
2014{
2015	cam_status status;
2016
2017	da_ccb_zone = uma_zcreate("da_ccb",
2018	    sizeof(struct ccb_scsiio), NULL, NULL, NULL, NULL,
2019	    UMA_ALIGN_PTR, 0);
2020
2021	/*
2022	 * Install a global async callback.  This callback will
2023	 * receive async callbacks like "new device found".
2024	 */
2025	status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
2026
2027	if (status != CAM_REQ_CMP) {
2028		printf("da: Failed to attach master async callback "
2029		       "due to status 0x%x!\n", status);
2030	} else if (da_send_ordered) {
2031		/* Register our shutdown event handler */
2032		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
2033					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
2034		    printf("dainit: shutdown event registration failed!\n");
2035	}
2036}
2037
2038/*
2039 * Callback from GEOM, called when it has finished cleaning up its
2040 * resources.
2041 */
2042static void
2043dadiskgonecb(struct disk *dp)
2044{
2045	struct cam_periph *periph;
2046
2047	periph = (struct cam_periph *)dp->d_drv1;
2048	da_periph_release(periph, DA_REF_GEOM);
2049}
2050
2051static void
2052daoninvalidate(struct cam_periph *periph)
2053{
2054	struct da_softc *softc;
2055
2056	cam_periph_assert(periph, MA_OWNED);
2057	softc = (struct da_softc *)periph->softc;
2058
2059	/*
2060	 * De-register any async callbacks.
2061	 */
2062	xpt_register_async(0, daasync, periph, periph->path);
2063
2064	softc->flags |= DA_FLAG_PACK_INVALID;
2065#ifdef CAM_IO_STATS
2066	softc->invalidations++;
2067#endif
2068
2069	/*
2070	 * Return all queued I/O with ENXIO. Transactions may be queued up here
2071	 * for retry (since we are called while there's other transactions
2072	 * pending). Any requests in the hardware will drain before dacleanup
2073	 * is called.
2074	 */
2075	cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
2076
2077	/*
2078	 * Tell GEOM that we've gone away, we'll get a callback when it is
2079	 * done cleaning up its resources.
2080	 */
2081	disk_gone(softc->disk);
2082}
2083
2084static void
2085dacleanup(struct cam_periph *periph)
2086{
2087	struct da_softc *softc;
2088
2089	softc = (struct da_softc *)periph->softc;
2090
2091	cam_periph_unlock(periph);
2092
2093	cam_iosched_fini(softc->cam_iosched);
2094
2095	/*
2096	 * If we can't free the sysctl tree, oh well...
2097	 */
2098	if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) {
2099#ifdef CAM_IO_STATS
2100		if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
2101			xpt_print(periph->path,
2102			    "can't remove sysctl stats context\n");
2103#endif
2104		if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
2105			xpt_print(periph->path,
2106			    "can't remove sysctl context\n");
2107	}
2108
2109	callout_drain(&softc->mediapoll_c);
2110	disk_destroy(softc->disk);
2111	callout_drain(&softc->sendordered_c);
2112	free(softc, M_DEVBUF);
2113	cam_periph_lock(periph);
2114}
2115
2116static void
2117daasync(void *callback_arg, uint32_t code,
2118	struct cam_path *path, void *arg)
2119{
2120	struct cam_periph *periph;
2121	struct da_softc *softc;
2122
2123	periph = (struct cam_periph *)callback_arg;
2124	switch (code) {
2125	case AC_FOUND_DEVICE:	/* callback to create periph, no locking yet */
2126	{
2127		struct ccb_getdev *cgd;
2128		cam_status status;
2129
2130		cgd = (struct ccb_getdev *)arg;
2131		if (cgd == NULL)
2132			break;
2133
2134		if (cgd->protocol != PROTO_SCSI)
2135			break;
2136		if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
2137			break;
2138		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
2139		    && SID_TYPE(&cgd->inq_data) != T_RBC
2140		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL
2141		    && SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
2142			break;
2143
2144		/*
2145		 * Allocate a peripheral instance for
2146		 * this device and start the probe
2147		 * process.
2148		 */
2149		status = cam_periph_alloc(daregister, daoninvalidate,
2150					  dacleanup, dastart,
2151					  "da", CAM_PERIPH_BIO,
2152					  path, daasync,
2153					  AC_FOUND_DEVICE, cgd);
2154
2155		if (status != CAM_REQ_CMP
2156		 && status != CAM_REQ_INPROG)
2157			printf("daasync: Unable to attach to new device "
2158				"due to status 0x%x\n", status);
2159		return;
2160	}
2161	case AC_ADVINFO_CHANGED:	/* Doesn't touch periph */
2162	{
2163		uintptr_t buftype;
2164
2165		buftype = (uintptr_t)arg;
2166		if (buftype == CDAI_TYPE_PHYS_PATH) {
2167			struct da_softc *softc;
2168
2169			softc = periph->softc;
2170			disk_attr_changed(softc->disk, "GEOM::physpath",
2171					  M_NOWAIT);
2172		}
2173		break;
2174	}
2175	case AC_UNIT_ATTENTION:		/* Called for this path: periph locked */
2176	{
2177		union ccb *ccb;
2178		int error_code, sense_key, asc, ascq;
2179
2180		softc = (struct da_softc *)periph->softc;
2181		ccb = (union ccb *)arg;
2182
2183		/*
2184		 * Handle all UNIT ATTENTIONs except our own, as they will be
2185		 * handled by daerror().
2186		 */
2187		if (xpt_path_periph(ccb->ccb_h.path) != periph &&
2188		    scsi_extract_sense_ccb(ccb,
2189		     &error_code, &sense_key, &asc, &ascq)) {
2190			if (asc == 0x2A && ascq == 0x09) {
2191				xpt_print(ccb->ccb_h.path,
2192				    "Capacity data has changed\n");
2193				cam_periph_assert(periph, MA_OWNED);
2194				softc->flags &= ~DA_FLAG_PROBED;
2195				dareprobe(periph);
2196			} else if (asc == 0x28 && ascq == 0x00) {
2197				cam_periph_assert(periph, MA_OWNED);
2198				softc->flags &= ~DA_FLAG_PROBED;
2199				disk_media_changed(softc->disk, M_NOWAIT);
2200			} else if (asc == 0x3F && ascq == 0x03) {
2201				xpt_print(ccb->ccb_h.path,
2202				    "INQUIRY data has changed\n");
2203				cam_periph_assert(periph, MA_OWNED);
2204				softc->flags &= ~DA_FLAG_PROBED;
2205				dareprobe(periph);
2206			}
2207		}
2208		break;
2209	}
2210	case AC_SCSI_AEN:		/* Called for this path: periph locked */
2211		/*
2212		 * Appears to be currently unused for SCSI devices, only ata SIMs
2213		 * generate this.
2214		 */
2215		cam_periph_assert(periph, MA_OWNED);
2216		softc = (struct da_softc *)periph->softc;
2217		if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
2218		    (softc->flags & DA_FLAG_TUR_PENDING) == 0) {
2219			if (da_periph_acquire(periph, DA_REF_TUR) == 0) {
2220				cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
2221				daschedule(periph);
2222			}
2223		}
2224		/* FALLTHROUGH */
2225	case AC_SENT_BDR:		/* Called for this path: periph locked */
2226	case AC_BUS_RESET:		/* Called for this path: periph locked */
2227	{
2228		struct ccb_hdr *ccbh;
2229
2230		cam_periph_assert(periph, MA_OWNED);
2231		softc = (struct da_softc *)periph->softc;
2232		/*
2233		 * Don't fail on the expected unit attention
2234		 * that will occur.
2235		 */
2236		softc->flags |= DA_FLAG_RETRY_UA;
2237		LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
2238			ccbh->ccb_state |= DA_CCB_RETRY_UA;
2239		break;
2240	}
2241	case AC_INQ_CHANGED:		/* Called for this path: periph locked */
2242		cam_periph_assert(periph, MA_OWNED);
2243		softc = (struct da_softc *)periph->softc;
2244		softc->flags &= ~DA_FLAG_PROBED;
2245		dareprobe(periph);
2246		break;
2247	default:
2248		break;
2249	}
2250	cam_periph_async(periph, code, path, arg);
2251}
2252
2253static void
2254dasysctlinit(void *context, int pending)
2255{
2256	struct cam_periph *periph;
2257	struct da_softc *softc;
2258	char tmpstr[32], tmpstr2[16];
2259	struct ccb_trans_settings cts;
2260
2261	periph = (struct cam_periph *)context;
2262	/*
2263	 * periph was held for us when this task was enqueued
2264	 */
2265	if (periph->flags & CAM_PERIPH_INVALID) {
2266		da_periph_release(periph, DA_REF_SYSCTL);
2267		return;
2268	}
2269
2270	softc = (struct da_softc *)periph->softc;
2271	snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
2272	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
2273
2274	sysctl_ctx_init(&softc->sysctl_ctx);
2275	cam_periph_lock(periph);
2276	softc->flags |= DA_FLAG_SCTX_INIT;
2277	cam_periph_unlock(periph);
2278	softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx,
2279		SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
2280		CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr, "device_index");
2281	if (softc->sysctl_tree == NULL) {
2282		printf("dasysctlinit: unable to allocate sysctl tree\n");
2283		da_periph_release(periph, DA_REF_SYSCTL);
2284		return;
2285	}
2286
2287	/*
2288	 * Now register the sysctl handler, so the user can change the value on
2289	 * the fly.
2290	 */
2291	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2292		OID_AUTO, "delete_method",
2293		CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
2294		softc, 0, dadeletemethodsysctl, "A",
2295		"BIO_DELETE execution method");
2296	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2297		OID_AUTO, "delete_max",
2298		CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE,
2299		softc, 0, dadeletemaxsysctl, "Q",
2300		"Maximum BIO_DELETE size");
2301	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2302		OID_AUTO, "minimum_cmd_size",
2303		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2304		&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
2305		"Minimum CDB size");
2306	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2307		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2308		"trim_count", CTLFLAG_RD, &softc->trim_count,
2309		"Total number of unmap/dsm commands sent");
2310	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2311		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2312		"trim_ranges", CTLFLAG_RD, &softc->trim_ranges,
2313		"Total number of ranges in unmap/dsm commands");
2314	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2315		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2316		"trim_lbas", CTLFLAG_RD, &softc->trim_lbas,
2317		"Total lbas in the unmap/dsm commands sent");
2318
2319	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2320		OID_AUTO, "zone_mode",
2321		CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
2322		softc, 0, dazonemodesysctl, "A",
2323		"Zone Mode");
2324	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2325		OID_AUTO, "zone_support",
2326		CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
2327		softc, 0, dazonesupsysctl, "A",
2328		"Zone Support");
2329	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2330		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2331		"optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
2332		"Optimal Number of Open Sequential Write Preferred Zones");
2333	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2334		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2335		"optimal_nonseq_zones", CTLFLAG_RD,
2336		&softc->optimal_nonseq_zones,
2337		"Optimal Number of Non-Sequentially Written Sequential Write "
2338		"Preferred Zones");
2339	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2340		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2341		"max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
2342		"Maximum Number of Open Sequential Write Required Zones");
2343
2344	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2345		       SYSCTL_CHILDREN(softc->sysctl_tree),
2346		       OID_AUTO,
2347		       "error_inject",
2348		       CTLFLAG_RW,
2349		       &softc->error_inject,
2350		       0,
2351		       "error_inject leaf");
2352
2353	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2354		       SYSCTL_CHILDREN(softc->sysctl_tree),
2355		       OID_AUTO,
2356		       "p_type",
2357		       CTLFLAG_RD,
2358		       &softc->p_type,
2359		       0,
2360		       "DIF protection type");
2361
2362	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2363	    OID_AUTO, "flags", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
2364	    softc, 0, daflagssysctl, "A",
2365	    "Flags for drive");
2366	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2367	    OID_AUTO, "rotating", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
2368	    &softc->flags, (u_int)DA_FLAG_ROTATING, dabitsysctl, "I",
2369	    "Rotating media *DEPRECATED* gone in FreeBSD 15");
2370	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2371	    OID_AUTO, "unmapped_io", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
2372	    &softc->flags, (u_int)DA_FLAG_UNMAPPEDIO, dabitsysctl, "I",
2373	    "Unmapped I/O support *DEPRECATED* gone in FreeBSD 15");
2374
2375#ifdef CAM_TEST_FAILURE
2376	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2377		OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE,
2378		periph, 0, cam_periph_invalidate_sysctl, "I",
2379		"Write 1 to invalidate the drive immediately");
2380#endif
2381
2382	/*
2383	 * Add some addressing info.
2384	 */
2385	memset(&cts, 0, sizeof (cts));
2386	xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
2387	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2388	cts.type = CTS_TYPE_CURRENT_SETTINGS;
2389	cam_periph_lock(periph);
2390	xpt_action((union ccb *)&cts);
2391	cam_periph_unlock(periph);
2392	if (cts.ccb_h.status != CAM_REQ_CMP) {
2393		da_periph_release(periph, DA_REF_SYSCTL);
2394		return;
2395	}
2396	if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) {
2397		struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
2398		if (fc->valid & CTS_FC_VALID_WWPN) {
2399			softc->wwpn = fc->wwpn;
2400			SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2401			    SYSCTL_CHILDREN(softc->sysctl_tree),
2402			    OID_AUTO, "wwpn", CTLFLAG_RD,
2403			    &softc->wwpn, "World Wide Port Name");
2404		}
2405	}
2406
2407#ifdef CAM_IO_STATS
2408	/*
2409	 * Now add some useful stats.
2410	 * XXX These should live in cam_periph and be common to all periphs
2411	 */
2412	softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
2413	    SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
2414	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Statistics");
2415	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2416		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2417		       OID_AUTO,
2418		       "errors",
2419		       CTLFLAG_RD,
2420		       &softc->errors,
2421		       0,
2422		       "Transport errors reported by the SIM");
2423	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2424		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2425		       OID_AUTO,
2426		       "timeouts",
2427		       CTLFLAG_RD,
2428		       &softc->timeouts,
2429		       0,
2430		       "Device timeouts reported by the SIM");
2431	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2432		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2433		       OID_AUTO,
2434		       "pack_invalidations",
2435		       CTLFLAG_RD,
2436		       &softc->invalidations,
2437		       0,
2438		       "Device pack invalidations");
2439#endif
2440
2441	cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
2442	    softc->sysctl_tree);
2443
2444	da_periph_release(periph, DA_REF_SYSCTL);
2445}
2446
2447static int
2448dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
2449{
2450	int error;
2451	uint64_t value;
2452	struct da_softc *softc;
2453
2454	softc = (struct da_softc *)arg1;
2455
2456	value = softc->disk->d_delmaxsize;
2457	error = sysctl_handle_64(oidp, &value, 0, req);
2458	if ((error != 0) || (req->newptr == NULL))
2459		return (error);
2460
2461	/* only accept values smaller than the calculated value */
2462	if (value > dadeletemaxsize(softc, softc->delete_method)) {
2463		return (EINVAL);
2464	}
2465	softc->disk->d_delmaxsize = value;
2466
2467	return (0);
2468}
2469
2470static int
2471dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
2472{
2473	int error, value;
2474
2475	value = *(int *)arg1;
2476
2477	error = sysctl_handle_int(oidp, &value, 0, req);
2478
2479	if ((error != 0)
2480	 || (req->newptr == NULL))
2481		return (error);
2482
2483	/*
2484	 * Acceptable values here are 6, 10, 12 or 16.
2485	 */
2486	if (value < 6)
2487		value = 6;
2488	else if ((value > 6)
2489	      && (value <= 10))
2490		value = 10;
2491	else if ((value > 10)
2492	      && (value <= 12))
2493		value = 12;
2494	else if (value > 12)
2495		value = 16;
2496
2497	*(int *)arg1 = value;
2498
2499	return (0);
2500}
2501
2502static int
2503dasysctlsofttimeout(SYSCTL_HANDLER_ARGS)
2504{
2505	sbintime_t value;
2506	int error;
2507
2508	value = da_default_softtimeout / SBT_1MS;
2509
2510	error = sysctl_handle_int(oidp, (int *)&value, 0, req);
2511	if ((error != 0) || (req->newptr == NULL))
2512		return (error);
2513
2514	/* XXX Should clip this to a reasonable level */
2515	if (value > da_default_timeout * 1000)
2516		return (EINVAL);
2517
2518	da_default_softtimeout = value * SBT_1MS;
2519	return (0);
2520}
2521
2522static void
2523dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
2524{
2525
2526	softc->delete_method = delete_method;
2527	softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method);
2528	softc->delete_func = da_delete_functions[delete_method];
2529
2530	if (softc->delete_method > DA_DELETE_DISABLE)
2531		softc->disk->d_flags |= DISKFLAG_CANDELETE;
2532	else
2533		softc->disk->d_flags &= ~DISKFLAG_CANDELETE;
2534}
2535
2536static off_t
2537dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
2538{
2539	off_t sectors;
2540
2541	switch(delete_method) {
2542	case DA_DELETE_UNMAP:
2543		sectors = (off_t)softc->unmap_max_lba;
2544		break;
2545	case DA_DELETE_ATA_TRIM:
2546		sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
2547		break;
2548	case DA_DELETE_WS16:
2549		sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS);
2550		break;
2551	case DA_DELETE_ZERO:
2552	case DA_DELETE_WS10:
2553		sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS);
2554		break;
2555	default:
2556		return 0;
2557	}
2558
2559	return (off_t)softc->params.secsize *
2560	    omin(sectors, softc->params.sectors);
2561}
2562
2563static void
2564daprobedone(struct cam_periph *periph, union ccb *ccb)
2565{
2566	struct da_softc *softc;
2567
2568	softc = (struct da_softc *)periph->softc;
2569
2570	cam_periph_assert(periph, MA_OWNED);
2571
2572	dadeletemethodchoose(softc, DA_DELETE_NONE);
2573
2574	if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2575		char buf[80];
2576		int i, sep;
2577
2578		snprintf(buf, sizeof(buf), "Delete methods: <");
2579		sep = 0;
2580		for (i = 0; i <= DA_DELETE_MAX; i++) {
2581			if ((softc->delete_available & (1 << i)) == 0 &&
2582			    i != softc->delete_method)
2583				continue;
2584			if (sep)
2585				strlcat(buf, ",", sizeof(buf));
2586			strlcat(buf, da_delete_method_names[i],
2587			    sizeof(buf));
2588			if (i == softc->delete_method)
2589				strlcat(buf, "(*)", sizeof(buf));
2590			sep = 1;
2591		}
2592		strlcat(buf, ">", sizeof(buf));
2593		printf("%s%d: %s\n", periph->periph_name,
2594		    periph->unit_number, buf);
2595	}
2596	if ((softc->disk->d_flags & DISKFLAG_WRITE_PROTECT) != 0 &&
2597	    (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2598		printf("%s%d: Write Protected\n", periph->periph_name,
2599		    periph->unit_number);
2600	}
2601
2602	/*
2603	 * Since our peripheral may be invalidated by an error
2604	 * above or an external event, we must release our CCB
2605	 * before releasing the probe lock on the peripheral.
2606	 * The peripheral will only go away once the last lock
2607	 * is removed, and we need it around for the CCB release
2608	 * operation.
2609	 */
2610	xpt_release_ccb(ccb);
2611	softc->state = DA_STATE_NORMAL;
2612	softc->flags |= DA_FLAG_PROBED;
2613	daschedule(periph);
2614	wakeup(&softc->disk->d_mediasize);
2615	if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2616		softc->flags |= DA_FLAG_ANNOUNCED;
2617
2618		/*
2619		 * We'll release this reference once GEOM calls us back via
2620		 * dadiskgonecb(), telling us that our provider has been freed.
2621		 */
2622		if (da_periph_acquire(periph, DA_REF_GEOM) == 0)
2623			disk_create(softc->disk, DISK_VERSION);
2624
2625		cam_periph_release_boot(periph);
2626	}
2627	da_periph_release_locked(periph, DA_REF_REPROBE);
2628}
2629
2630static void
2631dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method)
2632{
2633	int i, methods;
2634
2635	/* If available, prefer the method requested by user. */
2636	i = softc->delete_method_pref;
2637	methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2638	if (methods & (1 << i)) {
2639		dadeletemethodset(softc, i);
2640		return;
2641	}
2642
2643	/* Use the pre-defined order to choose the best performing delete. */
2644	for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
2645		if (i == DA_DELETE_ZERO)
2646			continue;
2647		if (softc->delete_available & (1 << i)) {
2648			dadeletemethodset(softc, i);
2649			return;
2650		}
2651	}
2652
2653	/* Fallback to default. */
2654	dadeletemethodset(softc, default_method);
2655}
2656
2657static int
2658dabitsysctl(SYSCTL_HANDLER_ARGS)
2659{
2660	u_int *flags = arg1;
2661	u_int test = arg2;
2662	int tmpout, error;
2663
2664	tmpout = !!(*flags & test);
2665	error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
2666	if (error || !req->newptr)
2667		return (error);
2668
2669	return (EPERM);
2670}
2671
2672static int
2673daflagssysctl(SYSCTL_HANDLER_ARGS)
2674{
2675	struct sbuf sbuf;
2676	struct da_softc *softc = arg1;
2677	int error;
2678
2679	sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
2680	if (softc->flags != 0)
2681		sbuf_printf(&sbuf, "0x%b", (unsigned)softc->flags, DA_FLAG_STRING);
2682	else
2683		sbuf_putc(&sbuf, '0');
2684	error = sbuf_finish(&sbuf);
2685	sbuf_delete(&sbuf);
2686
2687	return (error);
2688}
2689
2690static int
2691dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
2692{
2693	char buf[16];
2694	const char *p;
2695	struct da_softc *softc;
2696	int i, error, value;
2697
2698	softc = (struct da_softc *)arg1;
2699
2700	value = softc->delete_method;
2701	if (value < 0 || value > DA_DELETE_MAX)
2702		p = "UNKNOWN";
2703	else
2704		p = da_delete_method_names[value];
2705	strncpy(buf, p, sizeof(buf));
2706	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2707	if (error != 0 || req->newptr == NULL)
2708		return (error);
2709	for (i = 0; i <= DA_DELETE_MAX; i++) {
2710		if (strcmp(buf, da_delete_method_names[i]) == 0)
2711			break;
2712	}
2713	if (i > DA_DELETE_MAX)
2714		return (EINVAL);
2715	softc->delete_method_pref = i;
2716	dadeletemethodchoose(softc, DA_DELETE_NONE);
2717	return (0);
2718}
2719
2720static int
2721dazonemodesysctl(SYSCTL_HANDLER_ARGS)
2722{
2723	char tmpbuf[40];
2724	struct da_softc *softc;
2725	int error;
2726
2727	softc = (struct da_softc *)arg1;
2728
2729	switch (softc->zone_mode) {
2730	case DA_ZONE_DRIVE_MANAGED:
2731		snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
2732		break;
2733	case DA_ZONE_HOST_AWARE:
2734		snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
2735		break;
2736	case DA_ZONE_HOST_MANAGED:
2737		snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
2738		break;
2739	case DA_ZONE_NONE:
2740	default:
2741		snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
2742		break;
2743	}
2744
2745	error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
2746
2747	return (error);
2748}
2749
2750static int
2751dazonesupsysctl(SYSCTL_HANDLER_ARGS)
2752{
2753	struct da_softc *softc;
2754	struct sbuf sb;
2755	int error, first;
2756	unsigned int i;
2757
2758	softc = (struct da_softc *)arg1;
2759
2760	first = 1;
2761	sbuf_new_for_sysctl(&sb, NULL, 0, req);
2762
2763	for (i = 0; i < sizeof(da_zone_desc_table) /
2764	     sizeof(da_zone_desc_table[0]); i++) {
2765		if (softc->zone_flags & da_zone_desc_table[i].value) {
2766			if (first == 0)
2767				sbuf_cat(&sb, ", ");
2768			else
2769				first = 0;
2770			sbuf_cat(&sb, da_zone_desc_table[i].desc);
2771		}
2772	}
2773
2774	if (first == 1)
2775		sbuf_cat(&sb, "None");
2776
2777	error = sbuf_finish(&sb);
2778	sbuf_delete(&sb);
2779	return (error);
2780}
2781
2782static cam_status
2783daregister(struct cam_periph *periph, void *arg)
2784{
2785	struct da_softc *softc;
2786	struct ccb_pathinq cpi;
2787	struct ccb_getdev *cgd;
2788	char tmpstr[80];
2789	caddr_t match;
2790	int quirks;
2791
2792	cgd = (struct ccb_getdev *)arg;
2793	if (cgd == NULL) {
2794		printf("daregister: no getdev CCB, can't register device\n");
2795		return(CAM_REQ_CMP_ERR);
2796	}
2797
2798	softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF,
2799	    M_NOWAIT|M_ZERO);
2800
2801	if (softc == NULL) {
2802		printf("daregister: Unable to probe new device. "
2803		       "Unable to allocate softc\n");
2804		return(CAM_REQ_CMP_ERR);
2805	}
2806
2807	if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
2808		printf("daregister: Unable to probe new device. "
2809		       "Unable to allocate iosched memory\n");
2810		free(softc, M_DEVBUF);
2811		return(CAM_REQ_CMP_ERR);
2812	}
2813
2814	LIST_INIT(&softc->pending_ccbs);
2815	softc->state = DA_STATE_PROBE_WP;
2816	bioq_init(&softc->delete_run_queue);
2817	if (SID_IS_REMOVABLE(&cgd->inq_data))
2818		softc->flags |= DA_FLAG_PACK_REMOVABLE;
2819	softc->unmap_max_ranges = UNMAP_MAX_RANGES;
2820	softc->unmap_max_lba = UNMAP_RANGE_MAX;
2821	softc->unmap_gran = 0;
2822	softc->unmap_gran_align = 0;
2823	softc->ws_max_blks = WS16_MAX_BLKS;
2824	softc->trim_max_ranges = ATA_TRIM_MAX_RANGES;
2825	softc->flags |= DA_FLAG_ROTATING;
2826
2827	periph->softc = softc;
2828
2829	/*
2830	 * See if this device has any quirks.
2831	 */
2832	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
2833			       (caddr_t)da_quirk_table,
2834			       nitems(da_quirk_table),
2835			       sizeof(*da_quirk_table), scsi_inquiry_match);
2836
2837	if (match != NULL)
2838		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
2839	else
2840		softc->quirks = DA_Q_NONE;
2841
2842	/* Check if the SIM does not want 6 byte commands */
2843	xpt_path_inq(&cpi, periph->path);
2844	if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
2845		softc->quirks |= DA_Q_NO_6_BYTE;
2846
2847	/* Override quirks if tunable is set */
2848	snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.quirks",
2849		 periph->unit_number);
2850	quirks = softc->quirks;
2851	TUNABLE_INT_FETCH(tmpstr, &quirks);
2852	softc->quirks = quirks;
2853
2854	if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
2855		softc->zone_mode = DA_ZONE_HOST_MANAGED;
2856	else if (softc->quirks & DA_Q_SMR_DM)
2857		softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
2858	else
2859		softc->zone_mode = DA_ZONE_NONE;
2860
2861	if (softc->zone_mode != DA_ZONE_NONE) {
2862		if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
2863			if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC))
2864				softc->zone_interface = DA_ZONE_IF_ATA_SAT;
2865			else
2866				softc->zone_interface = DA_ZONE_IF_ATA_PASS;
2867		} else
2868			softc->zone_interface = DA_ZONE_IF_SCSI;
2869	}
2870
2871	TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
2872
2873	/*
2874	 * Let XPT know we can use UMA-allocated CCBs.
2875	 */
2876	if (da_enable_uma_ccbs) {
2877		KASSERT(da_ccb_zone != NULL,
2878		    ("%s: NULL da_ccb_zone", __func__));
2879		periph->ccb_zone = da_ccb_zone;
2880	}
2881
2882	/*
2883	 * Take a reference on the periph while dastart is called to finish the
2884	 * probe.  The reference will be dropped in dadone at the end of probe.
2885	 */
2886	(void)da_periph_acquire(periph, DA_REF_REPROBE);
2887
2888	/*
2889	 * Schedule a periodic event to occasionally send an
2890	 * ordered tag to a device.
2891	 */
2892	callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
2893	callout_reset_sbt(&softc->sendordered_c,
2894	    SBT_1S / DA_ORDEREDTAG_INTERVAL * da_default_timeout, 0,
2895	    dasendorderedtag, periph, C_PREL(1));
2896
2897	cam_periph_unlock(periph);
2898	/*
2899	 * RBC devices don't have to support READ(6), only READ(10).
2900	 */
2901	if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
2902		softc->minimum_cmd_size = 10;
2903	else
2904		softc->minimum_cmd_size = 6;
2905
2906	/*
2907	 * Load the user's default, if any.
2908	 */
2909	snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
2910		 periph->unit_number);
2911	TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
2912
2913	/*
2914	 * 6, 10, 12 and 16 are the currently permissible values.
2915	 */
2916	if (softc->minimum_cmd_size > 12)
2917		softc->minimum_cmd_size = 16;
2918	else if (softc->minimum_cmd_size > 10)
2919		softc->minimum_cmd_size = 12;
2920	else if (softc->minimum_cmd_size > 6)
2921		softc->minimum_cmd_size = 10;
2922	else
2923		softc->minimum_cmd_size = 6;
2924
2925	/* On first PROBE_WP request all more pages, then adjust. */
2926	softc->mode_page = SMS_ALL_PAGES_PAGE;
2927
2928	/* Predict whether device may support READ CAPACITY(16). */
2929	if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 &&
2930	    (softc->quirks & DA_Q_NO_RC16) == 0) {
2931		softc->flags |= DA_FLAG_CAN_RC16;
2932	}
2933
2934	/*
2935	 * Register this media as a disk.
2936	 */
2937	softc->disk = disk_alloc();
2938	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
2939			  periph->unit_number, 0,
2940			  DEVSTAT_BS_UNAVAILABLE,
2941			  SID_TYPE(&cgd->inq_data) |
2942			  XPORT_DEVSTAT_TYPE(cpi.transport),
2943			  DEVSTAT_PRIORITY_DISK);
2944	softc->disk->d_open = daopen;
2945	softc->disk->d_close = daclose;
2946	softc->disk->d_strategy = dastrategy;
2947	if (cam_sim_pollable(periph->sim))
2948		softc->disk->d_dump = dadump;
2949	softc->disk->d_getattr = dagetattr;
2950	softc->disk->d_gone = dadiskgonecb;
2951	softc->disk->d_name = "da";
2952	softc->disk->d_drv1 = periph;
2953	if (cpi.maxio == 0)
2954		softc->maxio = DFLTPHYS;	/* traditional default */
2955	else if (cpi.maxio > maxphys)
2956		softc->maxio = maxphys;		/* for safety */
2957	else
2958		softc->maxio = cpi.maxio;
2959	if (softc->quirks & DA_Q_128KB)
2960		softc->maxio = min(softc->maxio, 128 * 1024);
2961	softc->disk->d_maxsize = softc->maxio;
2962	softc->disk->d_unit = periph->unit_number;
2963	softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
2964	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
2965		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
2966	if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
2967		softc->flags |= DA_FLAG_UNMAPPEDIO;
2968		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
2969	}
2970	cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor,
2971	    sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr));
2972	strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr));
2973	cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)],
2974	    cgd->inq_data.product, sizeof(cgd->inq_data.product),
2975	    sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr));
2976	softc->disk->d_hba_vendor = cpi.hba_vendor;
2977	softc->disk->d_hba_device = cpi.hba_device;
2978	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
2979	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
2980	snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment),
2981	    "%s%d", cpi.dev_name, cpi.unit_number);
2982	cam_periph_lock(periph);
2983
2984	/*
2985	 * Add async callbacks for events of interest.
2986	 * I don't bother checking if this fails as,
2987	 * in most cases, the system will function just
2988	 * fine without them and the only alternative
2989	 * would be to not attach the device on failure.
2990	 */
2991	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
2992	    AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION |
2993	    AC_INQ_CHANGED, daasync, periph, periph->path);
2994
2995	/*
2996	 * Schedule a periodic media polling events.
2997	 */
2998	callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
2999	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
3000	    (cgd->inq_flags & SID_AEN) == 0 &&
3001	    da_poll_period != 0) {
3002		callout_reset_sbt(&softc->mediapoll_c, da_poll_period * SBT_1S,
3003		    0, damediapoll, periph, C_PREL(1));
3004	}
3005
3006	/* Released after probe when disk_create() call pass it to GEOM. */
3007	cam_periph_hold_boot(periph);
3008
3009	xpt_schedule(periph, CAM_PRIORITY_DEV);
3010	return(CAM_REQ_CMP);
3011}
3012
3013static int
3014da_zone_bio_to_scsi(int disk_zone_cmd)
3015{
3016	switch (disk_zone_cmd) {
3017	case DISK_ZONE_OPEN:
3018		return ZBC_OUT_SA_OPEN;
3019	case DISK_ZONE_CLOSE:
3020		return ZBC_OUT_SA_CLOSE;
3021	case DISK_ZONE_FINISH:
3022		return ZBC_OUT_SA_FINISH;
3023	case DISK_ZONE_RWP:
3024		return ZBC_OUT_SA_RWP;
3025	}
3026
3027	return -1;
3028}
3029
3030static int
3031da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
3032	    int *queue_ccb)
3033{
3034	struct da_softc *softc;
3035	int error;
3036
3037	error = 0;
3038
3039	if (bp->bio_cmd != BIO_ZONE) {
3040		error = EINVAL;
3041		goto bailout;
3042	}
3043
3044	softc = periph->softc;
3045
3046	switch (bp->bio_zone.zone_cmd) {
3047	case DISK_ZONE_OPEN:
3048	case DISK_ZONE_CLOSE:
3049	case DISK_ZONE_FINISH:
3050	case DISK_ZONE_RWP: {
3051		int zone_flags;
3052		int zone_sa;
3053		uint64_t lba;
3054
3055		zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
3056		if (zone_sa == -1) {
3057			xpt_print(periph->path, "Cannot translate zone "
3058			    "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd);
3059			error = EINVAL;
3060			goto bailout;
3061		}
3062
3063		zone_flags = 0;
3064		lba = bp->bio_zone.zone_params.rwp.id;
3065
3066		if (bp->bio_zone.zone_params.rwp.flags &
3067		    DISK_ZONE_RWP_FLAG_ALL)
3068			zone_flags |= ZBC_OUT_ALL;
3069
3070		if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
3071			scsi_zbc_out(&ccb->csio,
3072				     /*retries*/ da_retry_count,
3073				     /*cbfcnp*/ dadone,
3074				     /*tag_action*/ MSG_SIMPLE_Q_TAG,
3075				     /*service_action*/ zone_sa,
3076				     /*zone_id*/ lba,
3077				     /*zone_flags*/ zone_flags,
3078				     /*data_ptr*/ NULL,
3079				     /*dxfer_len*/ 0,
3080				     /*sense_len*/ SSD_FULL_SIZE,
3081				     /*timeout*/ da_default_timeout * 1000);
3082		} else {
3083			/*
3084			 * Note that in this case, even though we can
3085			 * technically use NCQ, we don't bother for several
3086			 * reasons:
3087			 * 1. It hasn't been tested on a SAT layer that
3088			 *    supports it.  This is new as of SAT-4.
3089			 * 2. Even when there is a SAT layer that supports
3090			 *    it, that SAT layer will also probably support
3091			 *    ZBC -> ZAC translation, since they are both
3092			 *    in the SAT-4 spec.
3093			 * 3. Translation will likely be preferable to ATA
3094			 *    passthrough.  LSI / Avago at least single
3095			 *    steps ATA passthrough commands in the HBA,
3096			 *    regardless of protocol, so unless that
3097			 *    changes, there is a performance penalty for
3098			 *    doing ATA passthrough no matter whether
3099			 *    you're using NCQ/FPDMA, DMA or PIO.
3100			 * 4. It requires a 32-byte CDB, which at least at
3101			 *    this point in CAM requires a CDB pointer, which
3102			 *    would require us to allocate an additional bit
3103			 *    of storage separate from the CCB.
3104			 */
3105			error = scsi_ata_zac_mgmt_out(&ccb->csio,
3106			    /*retries*/ da_retry_count,
3107			    /*cbfcnp*/ dadone,
3108			    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3109			    /*use_ncq*/ 0,
3110			    /*zm_action*/ zone_sa,
3111			    /*zone_id*/ lba,
3112			    /*zone_flags*/ zone_flags,
3113			    /*data_ptr*/ NULL,
3114			    /*dxfer_len*/ 0,
3115			    /*cdb_storage*/ NULL,
3116			    /*cdb_storage_len*/ 0,
3117			    /*sense_len*/ SSD_FULL_SIZE,
3118			    /*timeout*/ da_default_timeout * 1000);
3119			if (error != 0) {
3120				error = EINVAL;
3121				xpt_print(periph->path,
3122				    "scsi_ata_zac_mgmt_out() returned an "
3123				    "error!");
3124				goto bailout;
3125			}
3126		}
3127		*queue_ccb = 1;
3128
3129		break;
3130	}
3131	case DISK_ZONE_REPORT_ZONES: {
3132		uint8_t *rz_ptr;
3133		uint32_t num_entries, alloc_size;
3134		struct disk_zone_report *rep;
3135
3136		rep = &bp->bio_zone.zone_params.report;
3137
3138		num_entries = rep->entries_allocated;
3139		if (num_entries == 0) {
3140			xpt_print(periph->path, "No entries allocated for "
3141			    "Report Zones request\n");
3142			error = EINVAL;
3143			goto bailout;
3144		}
3145		alloc_size = sizeof(struct scsi_report_zones_hdr) +
3146		    (sizeof(struct scsi_report_zones_desc) * num_entries);
3147		alloc_size = min(alloc_size, softc->disk->d_maxsize);
3148		rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
3149		if (rz_ptr == NULL) {
3150			xpt_print(periph->path, "Unable to allocate memory "
3151			   "for Report Zones request\n");
3152			error = ENOMEM;
3153			goto bailout;
3154		}
3155
3156		if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
3157			scsi_zbc_in(&ccb->csio,
3158				    /*retries*/ da_retry_count,
3159				    /*cbcfnp*/ dadone,
3160				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3161				    /*service_action*/ ZBC_IN_SA_REPORT_ZONES,
3162				    /*zone_start_lba*/ rep->starting_id,
3163				    /*zone_options*/ rep->rep_options,
3164				    /*data_ptr*/ rz_ptr,
3165				    /*dxfer_len*/ alloc_size,
3166				    /*sense_len*/ SSD_FULL_SIZE,
3167				    /*timeout*/ da_default_timeout * 1000);
3168		} else {
3169			/*
3170			 * Note that in this case, even though we can
3171			 * technically use NCQ, we don't bother for several
3172			 * reasons:
3173			 * 1. It hasn't been tested on a SAT layer that
3174			 *    supports it.  This is new as of SAT-4.
3175			 * 2. Even when there is a SAT layer that supports
3176			 *    it, that SAT layer will also probably support
3177			 *    ZBC -> ZAC translation, since they are both
3178			 *    in the SAT-4 spec.
3179			 * 3. Translation will likely be preferable to ATA
3180			 *    passthrough.  LSI / Avago at least single
3181			 *    steps ATA passthrough commands in the HBA,
3182			 *    regardless of protocol, so unless that
3183			 *    changes, there is a performance penalty for
3184			 *    doing ATA passthrough no matter whether
3185			 *    you're using NCQ/FPDMA, DMA or PIO.
3186			 * 4. It requires a 32-byte CDB, which at least at
3187			 *    this point in CAM requires a CDB pointer, which
3188			 *    would require us to allocate an additional bit
3189			 *    of storage separate from the CCB.
3190			 */
3191			error = scsi_ata_zac_mgmt_in(&ccb->csio,
3192			    /*retries*/ da_retry_count,
3193			    /*cbcfnp*/ dadone,
3194			    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3195			    /*use_ncq*/ 0,
3196			    /*zm_action*/ ATA_ZM_REPORT_ZONES,
3197			    /*zone_id*/ rep->starting_id,
3198			    /*zone_flags*/ rep->rep_options,
3199			    /*data_ptr*/ rz_ptr,
3200			    /*dxfer_len*/ alloc_size,
3201			    /*cdb_storage*/ NULL,
3202			    /*cdb_storage_len*/ 0,
3203			    /*sense_len*/ SSD_FULL_SIZE,
3204			    /*timeout*/ da_default_timeout * 1000);
3205			if (error != 0) {
3206				error = EINVAL;
3207				xpt_print(periph->path,
3208				    "scsi_ata_zac_mgmt_in() returned an "
3209				    "error!");
3210				goto bailout;
3211			}
3212		}
3213
3214		/*
3215		 * For BIO_ZONE, this isn't normally needed.  However, it
3216		 * is used by devstat_end_transaction_bio() to determine
3217		 * how much data was transferred.
3218		 */
3219		/*
3220		 * XXX KDM we have a problem.  But I'm not sure how to fix
3221		 * it.  devstat uses bio_bcount - bio_resid to calculate
3222		 * the amount of data transferred.   The GEOM disk code
3223		 * uses bio_length - bio_resid to calculate the amount of
3224		 * data in bio_completed.  We have different structure
3225		 * sizes above and below the ada(4) driver.  So, if we
3226		 * use the sizes above, the amount transferred won't be
3227		 * quite accurate for devstat.  If we use different sizes
3228		 * for bio_bcount and bio_length (above and below
3229		 * respectively), then the residual needs to match one or
3230		 * the other.  Everything is calculated after the bio
3231		 * leaves the driver, so changing the values around isn't
3232		 * really an option.  For now, just set the count to the
3233		 * passed in length.  This means that the calculations
3234		 * above (e.g. bio_completed) will be correct, but the
3235		 * amount of data reported to devstat will be slightly
3236		 * under or overstated.
3237		 */
3238		bp->bio_bcount = bp->bio_length;
3239
3240		*queue_ccb = 1;
3241
3242		break;
3243	}
3244	case DISK_ZONE_GET_PARAMS: {
3245		struct disk_zone_disk_params *params;
3246
3247		params = &bp->bio_zone.zone_params.disk_params;
3248		bzero(params, sizeof(*params));
3249
3250		switch (softc->zone_mode) {
3251		case DA_ZONE_DRIVE_MANAGED:
3252			params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
3253			break;
3254		case DA_ZONE_HOST_AWARE:
3255			params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
3256			break;
3257		case DA_ZONE_HOST_MANAGED:
3258			params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
3259			break;
3260		default:
3261		case DA_ZONE_NONE:
3262			params->zone_mode = DISK_ZONE_MODE_NONE;
3263			break;
3264		}
3265
3266		if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
3267			params->flags |= DISK_ZONE_DISK_URSWRZ;
3268
3269		if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
3270			params->optimal_seq_zones = softc->optimal_seq_zones;
3271			params->flags |= DISK_ZONE_OPT_SEQ_SET;
3272		}
3273
3274		if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
3275			params->optimal_nonseq_zones =
3276			    softc->optimal_nonseq_zones;
3277			params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
3278		}
3279
3280		if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
3281			params->max_seq_zones = softc->max_seq_zones;
3282			params->flags |= DISK_ZONE_MAX_SEQ_SET;
3283		}
3284		if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
3285			params->flags |= DISK_ZONE_RZ_SUP;
3286
3287		if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
3288			params->flags |= DISK_ZONE_OPEN_SUP;
3289
3290		if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
3291			params->flags |= DISK_ZONE_CLOSE_SUP;
3292
3293		if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
3294			params->flags |= DISK_ZONE_FINISH_SUP;
3295
3296		if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
3297			params->flags |= DISK_ZONE_RWP_SUP;
3298		break;
3299	}
3300	default:
3301		break;
3302	}
3303bailout:
3304	return (error);
3305}
3306
3307static void
3308dastart(struct cam_periph *periph, union ccb *start_ccb)
3309{
3310	struct da_softc *softc;
3311
3312	cam_periph_assert(periph, MA_OWNED);
3313	softc = (struct da_softc *)periph->softc;
3314
3315	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n"));
3316
3317skipstate:
3318	switch (softc->state) {
3319	case DA_STATE_NORMAL:
3320	{
3321		struct bio *bp;
3322		uint8_t tag_code;
3323
3324more:
3325		bp = cam_iosched_next_bio(softc->cam_iosched);
3326		if (bp == NULL) {
3327			if (cam_iosched_has_work_flags(softc->cam_iosched,
3328			    DA_WORK_TUR)) {
3329				softc->flags |= DA_FLAG_TUR_PENDING;
3330				cam_iosched_clr_work_flags(softc->cam_iosched,
3331				    DA_WORK_TUR);
3332				scsi_test_unit_ready(&start_ccb->csio,
3333				     /*retries*/ da_retry_count,
3334				     dadone_tur,
3335				     MSG_SIMPLE_Q_TAG,
3336				     SSD_FULL_SIZE,
3337				     da_default_timeout * 1000);
3338				start_ccb->ccb_h.ccb_bp = NULL;
3339				start_ccb->ccb_h.ccb_state = DA_CCB_TUR;
3340				xpt_action(start_ccb);
3341			} else
3342				xpt_release_ccb(start_ccb);
3343			break;
3344		}
3345
3346		if (bp->bio_cmd == BIO_DELETE) {
3347			if (softc->delete_func != NULL) {
3348				softc->delete_func(periph, start_ccb, bp);
3349				goto out;
3350			} else {
3351				/*
3352				 * Not sure this is possible, but failsafe by
3353				 * lying and saying "sure, done."
3354				 */
3355				biofinish(bp, NULL, 0);
3356				goto more;
3357			}
3358		}
3359
3360		if (cam_iosched_has_work_flags(softc->cam_iosched,
3361		    DA_WORK_TUR)) {
3362			cam_iosched_clr_work_flags(softc->cam_iosched,
3363			    DA_WORK_TUR);
3364			da_periph_release_locked(periph, DA_REF_TUR);
3365		}
3366
3367		if ((bp->bio_flags & BIO_ORDERED) != 0 ||
3368		    (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
3369			softc->flags &= ~DA_FLAG_NEED_OTAG;
3370			softc->flags |= DA_FLAG_WAS_OTAG;
3371			tag_code = MSG_ORDERED_Q_TAG;
3372		} else {
3373			tag_code = MSG_SIMPLE_Q_TAG;
3374		}
3375
3376		switch (bp->bio_cmd) {
3377		case BIO_WRITE:
3378		case BIO_READ:
3379		{
3380			void *data_ptr;
3381			int rw_op;
3382
3383			biotrack(bp, __func__);
3384
3385			if (bp->bio_cmd == BIO_WRITE) {
3386				softc->flags |= DA_FLAG_DIRTY;
3387				rw_op = SCSI_RW_WRITE;
3388			} else {
3389				rw_op = SCSI_RW_READ;
3390			}
3391
3392			data_ptr = bp->bio_data;
3393			if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
3394				rw_op |= SCSI_RW_BIO;
3395				data_ptr = bp;
3396			}
3397
3398			scsi_read_write(&start_ccb->csio,
3399					/*retries*/da_retry_count,
3400					/*cbfcnp*/dadone,
3401					/*tag_action*/tag_code,
3402					rw_op,
3403					/*byte2*/0,
3404					softc->minimum_cmd_size,
3405					/*lba*/bp->bio_pblkno,
3406					/*block_count*/bp->bio_bcount /
3407					softc->params.secsize,
3408					data_ptr,
3409					/*dxfer_len*/ bp->bio_bcount,
3410					/*sense_len*/SSD_FULL_SIZE,
3411					da_default_timeout * 1000);
3412#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
3413			start_ccb->csio.bio = bp;
3414#endif
3415			break;
3416		}
3417		case BIO_FLUSH:
3418			/*
3419			 * If we don't support sync cache, or the disk
3420			 * isn't dirty, FLUSH is a no-op.  Use the
3421			 * allocated CCB for the next bio if one is
3422			 * available.
3423			 */
3424			if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 ||
3425			    (softc->flags & DA_FLAG_DIRTY) == 0) {
3426				biodone(bp);
3427				goto skipstate;
3428			}
3429
3430			/*
3431			 * BIO_FLUSH doesn't currently communicate
3432			 * range data, so we synchronize the cache
3433			 * over the whole disk.
3434			 */
3435			scsi_synchronize_cache(&start_ccb->csio,
3436					       /*retries*/1,
3437					       /*cbfcnp*/dadone,
3438					       /*tag_action*/tag_code,
3439					       /*begin_lba*/0,
3440					       /*lb_count*/0,
3441					       SSD_FULL_SIZE,
3442					       da_default_timeout*1000);
3443			/*
3444			 * Clear the dirty flag before sending the command.
3445			 * Either this sync cache will be successful, or it
3446			 * will fail after a retry.  If it fails, it is
3447			 * unlikely to be successful if retried later, so
3448			 * we'll save ourselves time by just marking the
3449			 * device clean.
3450			 */
3451			softc->flags &= ~DA_FLAG_DIRTY;
3452			break;
3453		case BIO_ZONE: {
3454			int error, queue_ccb;
3455
3456			queue_ccb = 0;
3457
3458			error = da_zone_cmd(periph, start_ccb, bp, &queue_ccb);
3459			if ((error != 0)
3460			 || (queue_ccb == 0)) {
3461				/*
3462				 * g_io_deliver will recurisvely call start
3463				 * routine for ENOMEM, so drop the periph
3464				 * lock to allow that recursion.
3465				 */
3466				if (error == ENOMEM)
3467					cam_periph_unlock(periph);
3468				biofinish(bp, NULL, error);
3469				if (error == ENOMEM)
3470					cam_periph_lock(periph);
3471				xpt_release_ccb(start_ccb);
3472				return;
3473			}
3474			break;
3475		}
3476		default:
3477			biofinish(bp, NULL, EOPNOTSUPP);
3478			xpt_release_ccb(start_ccb);
3479			return;
3480		}
3481		start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
3482		start_ccb->ccb_h.flags |= CAM_UNLOCKED;
3483		start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout);
3484
3485out:
3486		LIST_INSERT_HEAD(&softc->pending_ccbs,
3487				 &start_ccb->ccb_h, periph_links.le);
3488
3489		/* We expect a unit attention from this device */
3490		if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
3491			start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
3492			softc->flags &= ~DA_FLAG_RETRY_UA;
3493		}
3494
3495		start_ccb->ccb_h.ccb_bp = bp;
3496		softc->refcount++;
3497		cam_periph_unlock(periph);
3498		xpt_action(start_ccb);
3499		cam_periph_lock(periph);
3500
3501		/* May have more work to do, so ensure we stay scheduled */
3502		daschedule(periph);
3503		break;
3504	}
3505	case DA_STATE_PROBE_WP:
3506	{
3507		void  *mode_buf;
3508		int    mode_buf_len;
3509
3510		if (da_disable_wp_detection || softc->mode_page < 0) {
3511			if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3512				softc->state = DA_STATE_PROBE_RC16;
3513			else
3514				softc->state = DA_STATE_PROBE_RC;
3515			goto skipstate;
3516		}
3517		mode_buf_len = 192;
3518		mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT);
3519		if (mode_buf == NULL) {
3520			xpt_print(periph->path, "Unable to send mode sense - "
3521			    "malloc failure\n");
3522			if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3523				softc->state = DA_STATE_PROBE_RC16;
3524			else
3525				softc->state = DA_STATE_PROBE_RC;
3526			goto skipstate;
3527		}
3528		scsi_mode_sense_len(&start_ccb->csio,
3529				    /*retries*/ da_retry_count,
3530				    /*cbfcnp*/ dadone_probewp,
3531				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3532				    /*dbd*/ FALSE,
3533				    /*pc*/ SMS_PAGE_CTRL_CURRENT,
3534				    /*page*/ softc->mode_page,
3535				    /*param_buf*/ mode_buf,
3536				    /*param_len*/ mode_buf_len,
3537				    /*minimum_cmd_size*/ softc->minimum_cmd_size,
3538				    /*sense_len*/ SSD_FULL_SIZE,
3539				    /*timeout*/ da_default_timeout * 1000);
3540		start_ccb->ccb_h.ccb_bp = NULL;
3541		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP;
3542		xpt_action(start_ccb);
3543		break;
3544	}
3545	case DA_STATE_PROBE_RC:
3546	{
3547		struct scsi_read_capacity_data *rcap;
3548
3549		rcap = (struct scsi_read_capacity_data *)
3550		    malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO);
3551		if (rcap == NULL) {
3552			printf("dastart: Couldn't malloc read_capacity data\n");
3553			/* da_free_periph??? */
3554			break;
3555		}
3556		scsi_read_capacity(&start_ccb->csio,
3557				   /*retries*/da_retry_count,
3558				   dadone_proberc,
3559				   MSG_SIMPLE_Q_TAG,
3560				   rcap,
3561				   SSD_FULL_SIZE,
3562				   /*timeout*/5000);
3563		start_ccb->ccb_h.ccb_bp = NULL;
3564		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC;
3565		xpt_action(start_ccb);
3566		break;
3567	}
3568	case DA_STATE_PROBE_RC16:
3569	{
3570		struct scsi_read_capacity_data_long *rcaplong;
3571
3572		rcaplong = (struct scsi_read_capacity_data_long *)
3573			malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO);
3574		if (rcaplong == NULL) {
3575			printf("dastart: Couldn't malloc read_capacity data\n");
3576			/* da_free_periph??? */
3577			break;
3578		}
3579		scsi_read_capacity_16(&start_ccb->csio,
3580				      /*retries*/ da_retry_count,
3581				      /*cbfcnp*/ dadone_proberc,
3582				      /*tag_action*/ MSG_SIMPLE_Q_TAG,
3583				      /*lba*/ 0,
3584				      /*reladr*/ 0,
3585				      /*pmi*/ 0,
3586				      /*rcap_buf*/ (uint8_t *)rcaplong,
3587				      /*rcap_buf_len*/ sizeof(*rcaplong),
3588				      /*sense_len*/ SSD_FULL_SIZE,
3589				      /*timeout*/ da_default_timeout * 1000);
3590		start_ccb->ccb_h.ccb_bp = NULL;
3591		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16;
3592		xpt_action(start_ccb);
3593		break;
3594	}
3595	case DA_STATE_PROBE_LBP:
3596	{
3597		struct scsi_vpd_logical_block_prov *lbp;
3598
3599		if (!scsi_vpd_supported_page(periph, SVPD_LBP)) {
3600			/*
3601			 * If we get here we don't support any SBC-3 delete
3602			 * methods with UNMAP as the Logical Block Provisioning
3603			 * VPD page support is required for devices which
3604			 * support it according to T10/1799-D Revision 31
3605			 * however older revisions of the spec don't mandate
3606			 * this so we currently don't remove these methods
3607			 * from the available set.
3608			 */
3609			softc->state = DA_STATE_PROBE_BLK_LIMITS;
3610			goto skipstate;
3611		}
3612
3613		lbp = (struct scsi_vpd_logical_block_prov *)
3614			malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO);
3615
3616		if (lbp == NULL) {
3617			printf("dastart: Couldn't malloc lbp data\n");
3618			/* da_free_periph??? */
3619			break;
3620		}
3621
3622		scsi_inquiry(&start_ccb->csio,
3623			     /*retries*/da_retry_count,
3624			     /*cbfcnp*/dadone_probelbp,
3625			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3626			     /*inq_buf*/(uint8_t *)lbp,
3627			     /*inq_len*/sizeof(*lbp),
3628			     /*evpd*/TRUE,
3629			     /*page_code*/SVPD_LBP,
3630			     /*sense_len*/SSD_MIN_SIZE,
3631			     /*timeout*/da_default_timeout * 1000);
3632		start_ccb->ccb_h.ccb_bp = NULL;
3633		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP;
3634		xpt_action(start_ccb);
3635		break;
3636	}
3637	case DA_STATE_PROBE_BLK_LIMITS:
3638	{
3639		struct scsi_vpd_block_limits *block_limits;
3640
3641		if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) {
3642			/* Not supported skip to next probe */
3643			softc->state = DA_STATE_PROBE_BDC;
3644			goto skipstate;
3645		}
3646
3647		block_limits = (struct scsi_vpd_block_limits *)
3648			malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO);
3649
3650		if (block_limits == NULL) {
3651			printf("dastart: Couldn't malloc block_limits data\n");
3652			/* da_free_periph??? */
3653			break;
3654		}
3655
3656		scsi_inquiry(&start_ccb->csio,
3657			     /*retries*/da_retry_count,
3658			     /*cbfcnp*/dadone_probeblklimits,
3659			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3660			     /*inq_buf*/(uint8_t *)block_limits,
3661			     /*inq_len*/sizeof(*block_limits),
3662			     /*evpd*/TRUE,
3663			     /*page_code*/SVPD_BLOCK_LIMITS,
3664			     /*sense_len*/SSD_MIN_SIZE,
3665			     /*timeout*/da_default_timeout * 1000);
3666		start_ccb->ccb_h.ccb_bp = NULL;
3667		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS;
3668		xpt_action(start_ccb);
3669		break;
3670	}
3671	case DA_STATE_PROBE_BDC:
3672	{
3673		struct scsi_vpd_block_device_characteristics *bdc;
3674
3675		if (!scsi_vpd_supported_page(periph, SVPD_BDC)) {
3676			softc->state = DA_STATE_PROBE_ATA;
3677			goto skipstate;
3678		}
3679
3680		bdc = (struct scsi_vpd_block_device_characteristics *)
3681			malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3682
3683		if (bdc == NULL) {
3684			printf("dastart: Couldn't malloc bdc data\n");
3685			/* da_free_periph??? */
3686			break;
3687		}
3688
3689		scsi_inquiry(&start_ccb->csio,
3690			     /*retries*/da_retry_count,
3691			     /*cbfcnp*/dadone_probebdc,
3692			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3693			     /*inq_buf*/(uint8_t *)bdc,
3694			     /*inq_len*/sizeof(*bdc),
3695			     /*evpd*/TRUE,
3696			     /*page_code*/SVPD_BDC,
3697			     /*sense_len*/SSD_MIN_SIZE,
3698			     /*timeout*/da_default_timeout * 1000);
3699		start_ccb->ccb_h.ccb_bp = NULL;
3700		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC;
3701		xpt_action(start_ccb);
3702		break;
3703	}
3704	case DA_STATE_PROBE_ATA:
3705	{
3706		struct ata_params *ata_params;
3707
3708		if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
3709			if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
3710			 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
3711				/*
3712				 * Note that if the ATA VPD page isn't
3713				 * supported, we aren't talking to an ATA
3714				 * device anyway.  Support for that VPD
3715				 * page is mandatory for SCSI to ATA (SAT)
3716				 * translation layers.
3717				 */
3718				softc->state = DA_STATE_PROBE_ZONE;
3719				goto skipstate;
3720			}
3721			daprobedone(periph, start_ccb);
3722			break;
3723		}
3724
3725		ata_params = &periph->path->device->ident_data;
3726
3727		scsi_ata_identify(&start_ccb->csio,
3728				  /*retries*/da_retry_count,
3729				  /*cbfcnp*/dadone_probeata,
3730                                  /*tag_action*/MSG_SIMPLE_Q_TAG,
3731				  /*data_ptr*/(uint8_t *)ata_params,
3732				  /*dxfer_len*/sizeof(*ata_params),
3733				  /*sense_len*/SSD_FULL_SIZE,
3734				  /*timeout*/da_default_timeout * 1000);
3735		start_ccb->ccb_h.ccb_bp = NULL;
3736		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA;
3737		xpt_action(start_ccb);
3738		break;
3739	}
3740	case DA_STATE_PROBE_ATA_LOGDIR:
3741	{
3742		struct ata_gp_log_dir *log_dir;
3743		int retval;
3744
3745		retval = 0;
3746
3747		if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
3748			/*
3749			 * If we don't have log support, not much point in
3750			 * trying to probe zone support.
3751			 */
3752			daprobedone(periph, start_ccb);
3753			break;
3754		}
3755
3756		/*
3757		 * If we have an ATA device (the SCSI ATA Information VPD
3758		 * page should be present and the ATA identify should have
3759		 * succeeded) and it supports logs, ask for the log directory.
3760		 */
3761
3762		log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
3763		if (log_dir == NULL) {
3764			xpt_print(periph->path, "Couldn't malloc log_dir "
3765			    "data\n");
3766			daprobedone(periph, start_ccb);
3767			break;
3768		}
3769
3770		retval = scsi_ata_read_log(&start_ccb->csio,
3771		    /*retries*/ da_retry_count,
3772		    /*cbfcnp*/ dadone_probeatalogdir,
3773		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3774		    /*log_address*/ ATA_LOG_DIRECTORY,
3775		    /*page_number*/ 0,
3776		    /*block_count*/ 1,
3777		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3778				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3779		    /*data_ptr*/ (uint8_t *)log_dir,
3780		    /*dxfer_len*/ sizeof(*log_dir),
3781		    /*sense_len*/ SSD_FULL_SIZE,
3782		    /*timeout*/ da_default_timeout * 1000);
3783
3784		if (retval != 0) {
3785			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3786			free(log_dir, M_SCSIDA);
3787			daprobedone(periph, start_ccb);
3788			break;
3789		}
3790		start_ccb->ccb_h.ccb_bp = NULL;
3791		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
3792		xpt_action(start_ccb);
3793		break;
3794	}
3795	case DA_STATE_PROBE_ATA_IDDIR:
3796	{
3797		struct ata_identify_log_pages *id_dir;
3798		int retval;
3799
3800		retval = 0;
3801
3802		/*
3803		 * Check here to see whether the Identify Device log is
3804		 * supported in the directory of logs.  If so, continue
3805		 * with requesting the log of identify device pages.
3806		 */
3807		if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
3808			daprobedone(periph, start_ccb);
3809			break;
3810		}
3811
3812		id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
3813		if (id_dir == NULL) {
3814			xpt_print(periph->path, "Couldn't malloc id_dir "
3815			    "data\n");
3816			daprobedone(periph, start_ccb);
3817			break;
3818		}
3819
3820		retval = scsi_ata_read_log(&start_ccb->csio,
3821		    /*retries*/ da_retry_count,
3822		    /*cbfcnp*/ dadone_probeataiddir,
3823		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3824		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3825		    /*page_number*/ ATA_IDL_PAGE_LIST,
3826		    /*block_count*/ 1,
3827		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3828				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3829		    /*data_ptr*/ (uint8_t *)id_dir,
3830		    /*dxfer_len*/ sizeof(*id_dir),
3831		    /*sense_len*/ SSD_FULL_SIZE,
3832		    /*timeout*/ da_default_timeout * 1000);
3833
3834		if (retval != 0) {
3835			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3836			free(id_dir, M_SCSIDA);
3837			daprobedone(periph, start_ccb);
3838			break;
3839		}
3840		start_ccb->ccb_h.ccb_bp = NULL;
3841		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
3842		xpt_action(start_ccb);
3843		break;
3844	}
3845	case DA_STATE_PROBE_ATA_SUP:
3846	{
3847		struct ata_identify_log_sup_cap *sup_cap;
3848		int retval;
3849
3850		retval = 0;
3851
3852		/*
3853		 * Check here to see whether the Supported Capabilities log
3854		 * is in the list of Identify Device logs.
3855		 */
3856		if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
3857			daprobedone(periph, start_ccb);
3858			break;
3859		}
3860
3861		sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
3862		if (sup_cap == NULL) {
3863			xpt_print(periph->path, "Couldn't malloc sup_cap "
3864			    "data\n");
3865			daprobedone(periph, start_ccb);
3866			break;
3867		}
3868
3869		retval = scsi_ata_read_log(&start_ccb->csio,
3870		    /*retries*/ da_retry_count,
3871		    /*cbfcnp*/ dadone_probeatasup,
3872		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3873		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3874		    /*page_number*/ ATA_IDL_SUP_CAP,
3875		    /*block_count*/ 1,
3876		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3877				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3878		    /*data_ptr*/ (uint8_t *)sup_cap,
3879		    /*dxfer_len*/ sizeof(*sup_cap),
3880		    /*sense_len*/ SSD_FULL_SIZE,
3881		    /*timeout*/ da_default_timeout * 1000);
3882
3883		if (retval != 0) {
3884			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3885			free(sup_cap, M_SCSIDA);
3886			daprobedone(periph, start_ccb);
3887			break;
3888		}
3889
3890		start_ccb->ccb_h.ccb_bp = NULL;
3891		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
3892		xpt_action(start_ccb);
3893		break;
3894	}
3895	case DA_STATE_PROBE_ATA_ZONE:
3896	{
3897		struct ata_zoned_info_log *ata_zone;
3898		int retval;
3899
3900		retval = 0;
3901
3902		/*
3903		 * Check here to see whether the zoned device information
3904		 * page is supported.  If so, continue on to request it.
3905		 * If not, skip to DA_STATE_PROBE_LOG or done.
3906		 */
3907		if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
3908			daprobedone(periph, start_ccb);
3909			break;
3910		}
3911		ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
3912				  M_NOWAIT|M_ZERO);
3913		if (ata_zone == NULL) {
3914			xpt_print(periph->path, "Couldn't malloc ata_zone "
3915			    "data\n");
3916			daprobedone(periph, start_ccb);
3917			break;
3918		}
3919
3920		retval = scsi_ata_read_log(&start_ccb->csio,
3921		    /*retries*/ da_retry_count,
3922		    /*cbfcnp*/ dadone_probeatazone,
3923		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3924		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3925		    /*page_number*/ ATA_IDL_ZDI,
3926		    /*block_count*/ 1,
3927		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3928				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3929		    /*data_ptr*/ (uint8_t *)ata_zone,
3930		    /*dxfer_len*/ sizeof(*ata_zone),
3931		    /*sense_len*/ SSD_FULL_SIZE,
3932		    /*timeout*/ da_default_timeout * 1000);
3933
3934		if (retval != 0) {
3935			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3936			free(ata_zone, M_SCSIDA);
3937			daprobedone(periph, start_ccb);
3938			break;
3939		}
3940		start_ccb->ccb_h.ccb_bp = NULL;
3941		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
3942		xpt_action(start_ccb);
3943
3944		break;
3945	}
3946	case DA_STATE_PROBE_ZONE:
3947	{
3948		struct scsi_vpd_zoned_bdc *bdc;
3949
3950		/*
3951		 * Note that this page will be supported for SCSI protocol
3952		 * devices that support ZBC (SMR devices), as well as ATA
3953		 * protocol devices that are behind a SAT (SCSI to ATA
3954		 * Translation) layer that supports converting ZBC commands
3955		 * to their ZAC equivalents.
3956		 */
3957		if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) {
3958			daprobedone(periph, start_ccb);
3959			break;
3960		}
3961		bdc = (struct scsi_vpd_zoned_bdc *)
3962			malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3963
3964		if (bdc == NULL) {
3965			xpt_release_ccb(start_ccb);
3966			xpt_print(periph->path, "Couldn't malloc zone VPD "
3967			    "data\n");
3968			break;
3969		}
3970		scsi_inquiry(&start_ccb->csio,
3971			     /*retries*/da_retry_count,
3972			     /*cbfcnp*/dadone_probezone,
3973			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3974			     /*inq_buf*/(uint8_t *)bdc,
3975			     /*inq_len*/sizeof(*bdc),
3976			     /*evpd*/TRUE,
3977			     /*page_code*/SVPD_ZONED_BDC,
3978			     /*sense_len*/SSD_FULL_SIZE,
3979			     /*timeout*/da_default_timeout * 1000);
3980		start_ccb->ccb_h.ccb_bp = NULL;
3981		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
3982		xpt_action(start_ccb);
3983		break;
3984	}
3985	}
3986}
3987
3988/*
3989 * In each of the methods below, while its the caller's
3990 * responsibility to ensure the request will fit into a
3991 * single device request, we might have changed the delete
3992 * method due to the device incorrectly advertising either
3993 * its supported methods or limits.
3994 *
3995 * To prevent this causing further issues we validate the
3996 * against the methods limits, and warn which would
3997 * otherwise be unnecessary.
3998 */
3999static void
4000da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4001{
4002	struct da_softc *softc = (struct da_softc *)periph->softc;
4003	struct bio *bp1;
4004	uint8_t *buf = softc->unmap_buf;
4005	struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE];
4006	uint64_t lba, lastlba = (uint64_t)-1;
4007	uint64_t totalcount = 0;
4008	uint64_t count;
4009	uint32_t c, lastcount = 0, ranges = 0;
4010
4011	/*
4012	 * Currently this doesn't take the UNMAP
4013	 * Granularity and Granularity Alignment
4014	 * fields into account.
4015	 *
4016	 * This could result in both unoptimal unmap
4017	 * requests as as well as UNMAP calls unmapping
4018	 * fewer LBA's than requested.
4019	 */
4020
4021	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
4022	bp1 = bp;
4023	do {
4024		/*
4025		 * Note: ada and da are different in how they store the
4026		 * pending bp's in a trim. ada stores all of them in the
4027		 * trim_req.bps. da stores all but the first one in the
4028		 * delete_run_queue. ada then completes all the bps in
4029		 * its adadone() loop. da completes all the bps in the
4030		 * delete_run_queue in dadone, and relies on the biodone
4031		 * after to complete. This should be reconciled since there's
4032		 * no real reason to do it differently. XXX
4033		 */
4034		if (bp1 != bp)
4035			bioq_insert_tail(&softc->delete_run_queue, bp1);
4036		lba = bp1->bio_pblkno;
4037		count = bp1->bio_bcount / softc->params.secsize;
4038
4039		/* Try to extend the previous range. */
4040		if (lba == lastlba) {
4041			c = omin(count, UNMAP_RANGE_MAX - lastcount);
4042			lastlba += c;
4043			lastcount += c;
4044			scsi_ulto4b(lastcount, d[ranges - 1].length);
4045			count -= c;
4046			lba += c;
4047			totalcount += c;
4048		} else if ((softc->quirks & DA_Q_STRICT_UNMAP) &&
4049		    softc->unmap_gran != 0) {
4050			/* Align length of the previous range. */
4051			if ((c = lastcount % softc->unmap_gran) != 0) {
4052				if (lastcount <= c) {
4053					totalcount -= lastcount;
4054					lastlba = (uint64_t)-1;
4055					lastcount = 0;
4056					ranges--;
4057				} else {
4058					totalcount -= c;
4059					lastlba -= c;
4060					lastcount -= c;
4061					scsi_ulto4b(lastcount,
4062					    d[ranges - 1].length);
4063				}
4064			}
4065			/* Align beginning of the new range. */
4066			c = (lba - softc->unmap_gran_align) % softc->unmap_gran;
4067			if (c != 0) {
4068				c = softc->unmap_gran - c;
4069				if (count <= c) {
4070					count = 0;
4071				} else {
4072					lba += c;
4073					count -= c;
4074				}
4075			}
4076		}
4077
4078		while (count > 0) {
4079			c = omin(count, UNMAP_RANGE_MAX);
4080			if (totalcount + c > softc->unmap_max_lba ||
4081			    ranges >= softc->unmap_max_ranges) {
4082				xpt_print(periph->path,
4083				    "%s issuing short delete %ld > %ld"
4084				    "|| %d >= %d",
4085				    da_delete_method_desc[softc->delete_method],
4086				    totalcount + c, softc->unmap_max_lba,
4087				    ranges, softc->unmap_max_ranges);
4088				break;
4089			}
4090			scsi_u64to8b(lba, d[ranges].lba);
4091			scsi_ulto4b(c, d[ranges].length);
4092			lba += c;
4093			totalcount += c;
4094			ranges++;
4095			count -= c;
4096			lastlba = lba;
4097			lastcount = c;
4098		}
4099		bp1 = cam_iosched_next_trim(softc->cam_iosched);
4100		if (bp1 == NULL)
4101			break;
4102		if (ranges >= softc->unmap_max_ranges ||
4103		    totalcount + bp1->bio_bcount /
4104		    softc->params.secsize > softc->unmap_max_lba) {
4105			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
4106			break;
4107		}
4108	} while (1);
4109
4110	/* Align length of the last range. */
4111	if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 &&
4112	    (c = lastcount % softc->unmap_gran) != 0) {
4113		if (lastcount <= c)
4114			ranges--;
4115		else
4116			scsi_ulto4b(lastcount - c, d[ranges - 1].length);
4117	}
4118
4119	scsi_ulto2b(ranges * 16 + 6, &buf[0]);
4120	scsi_ulto2b(ranges * 16, &buf[2]);
4121
4122	scsi_unmap(&ccb->csio,
4123		   /*retries*/da_retry_count,
4124		   /*cbfcnp*/dadone,
4125		   /*tag_action*/MSG_SIMPLE_Q_TAG,
4126		   /*byte2*/0,
4127		   /*data_ptr*/ buf,
4128		   /*dxfer_len*/ ranges * 16 + 8,
4129		   /*sense_len*/SSD_FULL_SIZE,
4130		   da_default_timeout * 1000);
4131	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4132	ccb->ccb_h.flags |= CAM_UNLOCKED;
4133	softc->trim_count++;
4134	softc->trim_ranges += ranges;
4135	softc->trim_lbas += totalcount;
4136	cam_iosched_submit_trim(softc->cam_iosched);
4137}
4138
4139static void
4140da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4141{
4142	struct da_softc *softc = (struct da_softc *)periph->softc;
4143	struct bio *bp1;
4144	uint8_t *buf = softc->unmap_buf;
4145	uint64_t lastlba = (uint64_t)-1;
4146	uint64_t count;
4147	uint64_t lba;
4148	uint32_t lastcount = 0, c, requestcount;
4149	int ranges = 0, off, block_count;
4150
4151	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
4152	bp1 = bp;
4153	do {
4154		if (bp1 != bp)//XXX imp XXX
4155			bioq_insert_tail(&softc->delete_run_queue, bp1);
4156		lba = bp1->bio_pblkno;
4157		count = bp1->bio_bcount / softc->params.secsize;
4158		requestcount = count;
4159
4160		/* Try to extend the previous range. */
4161		if (lba == lastlba) {
4162			c = omin(count, ATA_DSM_RANGE_MAX - lastcount);
4163			lastcount += c;
4164			off = (ranges - 1) * 8;
4165			buf[off + 6] = lastcount & 0xff;
4166			buf[off + 7] = (lastcount >> 8) & 0xff;
4167			count -= c;
4168			lba += c;
4169		}
4170
4171		while (count > 0) {
4172			c = omin(count, ATA_DSM_RANGE_MAX);
4173			off = ranges * 8;
4174
4175			buf[off + 0] = lba & 0xff;
4176			buf[off + 1] = (lba >> 8) & 0xff;
4177			buf[off + 2] = (lba >> 16) & 0xff;
4178			buf[off + 3] = (lba >> 24) & 0xff;
4179			buf[off + 4] = (lba >> 32) & 0xff;
4180			buf[off + 5] = (lba >> 40) & 0xff;
4181			buf[off + 6] = c & 0xff;
4182			buf[off + 7] = (c >> 8) & 0xff;
4183			lba += c;
4184			ranges++;
4185			count -= c;
4186			lastcount = c;
4187			if (count != 0 && ranges == softc->trim_max_ranges) {
4188				xpt_print(periph->path,
4189				    "%s issuing short delete %ld > %ld\n",
4190				    da_delete_method_desc[softc->delete_method],
4191				    requestcount,
4192				    (softc->trim_max_ranges - ranges) *
4193				    ATA_DSM_RANGE_MAX);
4194				break;
4195			}
4196		}
4197		lastlba = lba;
4198		bp1 = cam_iosched_next_trim(softc->cam_iosched);
4199		if (bp1 == NULL)
4200			break;
4201		if (bp1->bio_bcount / softc->params.secsize >
4202		    (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
4203			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
4204			break;
4205		}
4206	} while (1);
4207
4208	block_count = howmany(ranges, ATA_DSM_BLK_RANGES);
4209	scsi_ata_trim(&ccb->csio,
4210		      /*retries*/da_retry_count,
4211		      /*cbfcnp*/dadone,
4212		      /*tag_action*/MSG_SIMPLE_Q_TAG,
4213		      block_count,
4214		      /*data_ptr*/buf,
4215		      /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
4216		      /*sense_len*/SSD_FULL_SIZE,
4217		      da_default_timeout * 1000);
4218	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4219	ccb->ccb_h.flags |= CAM_UNLOCKED;
4220	softc->trim_count++;
4221	softc->trim_ranges += ranges;
4222	softc->trim_lbas += block_count;
4223	cam_iosched_submit_trim(softc->cam_iosched);
4224}
4225
4226/*
4227 * We calculate ws_max_blks here based off d_delmaxsize instead
4228 * of using softc->ws_max_blks as it is absolute max for the
4229 * device not the protocol max which may well be lower.
4230 */
4231static void
4232da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4233{
4234	struct da_softc *softc;
4235	struct bio *bp1;
4236	uint64_t ws_max_blks;
4237	uint64_t lba;
4238	uint64_t count; /* forward compat with WS32 */
4239
4240	softc = (struct da_softc *)periph->softc;
4241	ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize;
4242	lba = bp->bio_pblkno;
4243	count = 0;
4244	bp1 = bp;
4245	do {
4246		if (bp1 != bp)//XXX imp XXX
4247			bioq_insert_tail(&softc->delete_run_queue, bp1);
4248		count += bp1->bio_bcount / softc->params.secsize;
4249		if (count > ws_max_blks) {
4250			xpt_print(periph->path,
4251			    "%s issuing short delete %ld > %ld\n",
4252			    da_delete_method_desc[softc->delete_method],
4253			    count, ws_max_blks);
4254			count = omin(count, ws_max_blks);
4255			break;
4256		}
4257		bp1 = cam_iosched_next_trim(softc->cam_iosched);
4258		if (bp1 == NULL)
4259			break;
4260		if (lba + count != bp1->bio_pblkno ||
4261		    count + bp1->bio_bcount /
4262		    softc->params.secsize > ws_max_blks) {
4263			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
4264			break;
4265		}
4266	} while (1);
4267
4268	scsi_write_same(&ccb->csio,
4269			/*retries*/da_retry_count,
4270			/*cbfcnp*/dadone,
4271			/*tag_action*/MSG_SIMPLE_Q_TAG,
4272			/*byte2*/softc->delete_method ==
4273			    DA_DELETE_ZERO ? 0 : SWS_UNMAP,
4274			softc->delete_method == DA_DELETE_WS16 ? 16 : 10,
4275			/*lba*/lba,
4276			/*block_count*/count,
4277			/*data_ptr*/ __DECONST(void *, zero_region),
4278			/*dxfer_len*/ softc->params.secsize,
4279			/*sense_len*/SSD_FULL_SIZE,
4280			da_default_timeout * 1000);
4281	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4282	ccb->ccb_h.flags |= CAM_UNLOCKED;
4283	softc->trim_count++;
4284	softc->trim_ranges++;
4285	softc->trim_lbas += count;
4286	cam_iosched_submit_trim(softc->cam_iosched);
4287}
4288
4289static int
4290cmd6workaround(union ccb *ccb)
4291{
4292	struct scsi_rw_6 cmd6;
4293	struct scsi_rw_10 *cmd10;
4294	struct da_softc *softc;
4295	uint8_t *cdb;
4296	struct bio *bp;
4297	int frozen;
4298
4299	cdb = ccb->csio.cdb_io.cdb_bytes;
4300	softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
4301
4302	if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) {
4303		da_delete_methods old_method = softc->delete_method;
4304
4305		/*
4306		 * Typically there are two reasons for failure here
4307		 * 1. Delete method was detected as supported but isn't
4308		 * 2. Delete failed due to invalid params e.g. too big
4309		 *
4310		 * While we will attempt to choose an alternative delete method
4311		 * this may result in short deletes if the existing delete
4312		 * requests from geom are big for the new method chosen.
4313		 *
4314		 * This method assumes that the error which triggered this
4315		 * will not retry the io otherwise a panic will occur
4316		 */
4317		dadeleteflag(softc, old_method, 0);
4318		dadeletemethodchoose(softc, DA_DELETE_DISABLE);
4319		if (softc->delete_method == DA_DELETE_DISABLE)
4320			xpt_print(ccb->ccb_h.path,
4321				  "%s failed, disabling BIO_DELETE\n",
4322				  da_delete_method_desc[old_method]);
4323		else
4324			xpt_print(ccb->ccb_h.path,
4325				  "%s failed, switching to %s BIO_DELETE\n",
4326				  da_delete_method_desc[old_method],
4327				  da_delete_method_desc[softc->delete_method]);
4328
4329		while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
4330			cam_iosched_queue_work(softc->cam_iosched, bp);
4331		cam_iosched_queue_work(softc->cam_iosched,
4332		    (struct bio *)ccb->ccb_h.ccb_bp);
4333		ccb->ccb_h.ccb_bp = NULL;
4334		return (0);
4335	}
4336
4337	/* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */
4338	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
4339	    (*cdb == PREVENT_ALLOW) &&
4340	    (softc->quirks & DA_Q_NO_PREVENT) == 0) {
4341		if (bootverbose)
4342			xpt_print(ccb->ccb_h.path,
4343			    "PREVENT ALLOW MEDIUM REMOVAL not supported.\n");
4344		softc->quirks |= DA_Q_NO_PREVENT;
4345		return (0);
4346	}
4347
4348	/* Detect unsupported SYNCHRONIZE CACHE(10). */
4349	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
4350	    (*cdb == SYNCHRONIZE_CACHE) &&
4351	    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
4352		if (bootverbose)
4353			xpt_print(ccb->ccb_h.path,
4354			    "SYNCHRONIZE CACHE(10) not supported.\n");
4355		softc->quirks |= DA_Q_NO_SYNC_CACHE;
4356		softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
4357		return (0);
4358	}
4359
4360	/* Translation only possible if CDB is an array and cmd is R/W6 */
4361	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
4362	    (*cdb != READ_6 && *cdb != WRITE_6))
4363		return 0;
4364
4365	xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
4366	    "increasing minimum_cmd_size to 10.\n");
4367	softc->minimum_cmd_size = 10;
4368
4369	bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
4370	cmd10 = (struct scsi_rw_10 *)cdb;
4371	cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
4372	cmd10->byte2 = 0;
4373	scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
4374	cmd10->reserved = 0;
4375	scsi_ulto2b(cmd6.length, cmd10->length);
4376	cmd10->control = cmd6.control;
4377	ccb->csio.cdb_len = sizeof(*cmd10);
4378
4379	/* Requeue request, unfreezing queue if necessary */
4380	frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
4381	ccb->ccb_h.status = CAM_REQUEUE_REQ;
4382	xpt_action(ccb);
4383	if (frozen) {
4384		cam_release_devq(ccb->ccb_h.path,
4385				 /*relsim_flags*/0,
4386				 /*reduction*/0,
4387				 /*timeout*/0,
4388				 /*getcount_only*/0);
4389	}
4390	return (ERESTART);
4391}
4392
4393static void
4394dazonedone(struct cam_periph *periph, union ccb *ccb)
4395{
4396	struct da_softc *softc;
4397	struct bio *bp;
4398
4399	softc = periph->softc;
4400	bp = (struct bio *)ccb->ccb_h.ccb_bp;
4401
4402	switch (bp->bio_zone.zone_cmd) {
4403	case DISK_ZONE_OPEN:
4404	case DISK_ZONE_CLOSE:
4405	case DISK_ZONE_FINISH:
4406	case DISK_ZONE_RWP:
4407		break;
4408	case DISK_ZONE_REPORT_ZONES: {
4409		uint32_t avail_len;
4410		struct disk_zone_report *rep;
4411		struct scsi_report_zones_hdr *hdr;
4412		struct scsi_report_zones_desc *desc;
4413		struct disk_zone_rep_entry *entry;
4414		uint32_t hdr_len, num_avail;
4415		uint32_t num_to_fill, i;
4416		int ata;
4417
4418		rep = &bp->bio_zone.zone_params.report;
4419		avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
4420		/*
4421		 * Note that bio_resid isn't normally used for zone
4422		 * commands, but it is used by devstat_end_transaction_bio()
4423		 * to determine how much data was transferred.  Because
4424		 * the size of the SCSI/ATA data structures is different
4425		 * than the size of the BIO interface structures, the
4426		 * amount of data actually transferred from the drive will
4427		 * be different than the amount of data transferred to
4428		 * the user.
4429		 */
4430		bp->bio_resid = ccb->csio.resid;
4431		hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
4432		if (avail_len < sizeof(*hdr)) {
4433			/*
4434			 * Is there a better error than EIO here?  We asked
4435			 * for at least the header, and we got less than
4436			 * that.
4437			 */
4438			bp->bio_error = EIO;
4439			bp->bio_flags |= BIO_ERROR;
4440			bp->bio_resid = bp->bio_bcount;
4441			break;
4442		}
4443
4444		if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
4445			ata = 1;
4446		else
4447			ata = 0;
4448
4449		hdr_len = ata ? le32dec(hdr->length) :
4450				scsi_4btoul(hdr->length);
4451		if (hdr_len > 0)
4452			rep->entries_available = hdr_len / sizeof(*desc);
4453		else
4454			rep->entries_available = 0;
4455		/*
4456		 * NOTE: using the same values for the BIO version of the
4457		 * same field as the SCSI/ATA values.  This means we could
4458		 * get some additional values that aren't defined in bio.h
4459		 * if more values of the same field are defined later.
4460		 */
4461		rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
4462		rep->header.maximum_lba = ata ?  le64dec(hdr->maximum_lba) :
4463					  scsi_8btou64(hdr->maximum_lba);
4464		/*
4465		 * If the drive reports no entries that match the query,
4466		 * we're done.
4467		 */
4468		if (hdr_len == 0) {
4469			rep->entries_filled = 0;
4470			break;
4471		}
4472
4473		num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
4474				hdr_len / sizeof(*desc));
4475		/*
4476		 * If the drive didn't return any data, then we're done.
4477		 */
4478		if (num_avail == 0) {
4479			rep->entries_filled = 0;
4480			break;
4481		}
4482
4483		num_to_fill = min(num_avail, rep->entries_allocated);
4484		/*
4485		 * If the user didn't allocate any entries for us to fill,
4486		 * we're done.
4487		 */
4488		if (num_to_fill == 0) {
4489			rep->entries_filled = 0;
4490			break;
4491		}
4492
4493		for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
4494		     i < num_to_fill; i++, desc++, entry++) {
4495			/*
4496			 * NOTE: we're mapping the values here directly
4497			 * from the SCSI/ATA bit definitions to the bio.h
4498			 * definitions. There is also a warning in
4499			 * disk_zone.h, but the impact is that if
4500			 * additional values are added in the SCSI/ATA
4501			 * specs these will be visible to consumers of
4502			 * this interface.
4503			 */
4504			entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
4505			entry->zone_condition =
4506			    (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
4507			    SRZ_ZONE_COND_SHIFT;
4508			entry->zone_flags |= desc->zone_flags &
4509			    (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
4510			entry->zone_length =
4511			    ata ? le64dec(desc->zone_length) :
4512				  scsi_8btou64(desc->zone_length);
4513			entry->zone_start_lba =
4514			    ata ? le64dec(desc->zone_start_lba) :
4515				  scsi_8btou64(desc->zone_start_lba);
4516			entry->write_pointer_lba =
4517			    ata ? le64dec(desc->write_pointer_lba) :
4518				  scsi_8btou64(desc->write_pointer_lba);
4519		}
4520		rep->entries_filled = num_to_fill;
4521		break;
4522	}
4523	case DISK_ZONE_GET_PARAMS:
4524	default:
4525		/*
4526		 * In theory we should not get a GET_PARAMS bio, since it
4527		 * should be handled without queueing the command to the
4528		 * drive.
4529		 */
4530		panic("%s: Invalid zone command %d", __func__,
4531		    bp->bio_zone.zone_cmd);
4532		break;
4533	}
4534
4535	if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
4536		free(ccb->csio.data_ptr, M_SCSIDA);
4537}
4538
4539static void
4540dadone(struct cam_periph *periph, union ccb *done_ccb)
4541{
4542	struct bio *bp, *bp1;
4543	struct da_softc *softc;
4544	struct ccb_scsiio *csio;
4545	da_ccb_state state;
4546
4547	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n"));
4548
4549	softc = (struct da_softc *)periph->softc;
4550	csio = &done_ccb->csio;
4551
4552#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4553	if (csio->bio != NULL)
4554		biotrack(csio->bio, __func__);
4555#endif
4556	state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4557
4558	cam_periph_lock(periph);
4559	bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4560	if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
4561		int error;
4562		int sf;
4563
4564		if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
4565			sf = SF_RETRY_UA;
4566		else
4567			sf = 0;
4568
4569		error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
4570		if (error == ERESTART) {
4571			/* A retry was scheduled, so just return. */
4572			cam_periph_unlock(periph);
4573			return;
4574		}
4575		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4576		if (error != 0) {
4577			int queued_error;
4578
4579			/*
4580			 * return all queued I/O with EIO, so that
4581			 * the client can retry these I/Os in the
4582			 * proper order should it attempt to recover.
4583			 */
4584			queued_error = EIO;
4585
4586			if (error == ENXIO
4587			 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) {
4588				/*
4589				 * Catastrophic error.  Mark our pack as
4590				 * invalid.
4591				 *
4592				 * XXX See if this is really a media
4593				 * XXX change first?
4594				 */
4595				xpt_print(periph->path, "Invalidating pack\n");
4596				softc->flags |= DA_FLAG_PACK_INVALID;
4597#ifdef CAM_IO_STATS
4598				softc->invalidations++;
4599#endif
4600				queued_error = ENXIO;
4601			}
4602			cam_iosched_flush(softc->cam_iosched, NULL,
4603			   queued_error);
4604			if (bp != NULL) {
4605				bp->bio_error = error;
4606				bp->bio_resid = bp->bio_bcount;
4607				bp->bio_flags |= BIO_ERROR;
4608			}
4609		} else if (bp != NULL) {
4610			if (state == DA_CCB_DELETE)
4611				bp->bio_resid = 0;
4612			else
4613				bp->bio_resid = csio->resid;
4614			bp->bio_error = 0;
4615			if (bp->bio_resid != 0)
4616				bp->bio_flags |= BIO_ERROR;
4617		}
4618		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4619			cam_release_devq(done_ccb->ccb_h.path,
4620					 /*relsim_flags*/0,
4621					 /*reduction*/0,
4622					 /*timeout*/0,
4623					 /*getcount_only*/0);
4624	} else if (bp != NULL) {
4625		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4626			panic("REQ_CMP with QFRZN");
4627		if (bp->bio_cmd == BIO_ZONE)
4628			dazonedone(periph, done_ccb);
4629		else if (state == DA_CCB_DELETE)
4630			bp->bio_resid = 0;
4631		else
4632			bp->bio_resid = csio->resid;
4633		if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE))
4634			bp->bio_flags |= BIO_ERROR;
4635		if (softc->error_inject != 0) {
4636			bp->bio_error = softc->error_inject;
4637			bp->bio_resid = bp->bio_bcount;
4638			bp->bio_flags |= BIO_ERROR;
4639			softc->error_inject = 0;
4640		}
4641	}
4642
4643	if (bp != NULL)
4644		biotrack(bp, __func__);
4645	LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
4646	if (LIST_EMPTY(&softc->pending_ccbs))
4647		softc->flags |= DA_FLAG_WAS_OTAG;
4648
4649	/*
4650	 * We need to call cam_iosched before we call biodone so that we don't
4651	 * measure any activity that happens in the completion routine, which in
4652	 * the case of sendfile can be quite extensive. Release the periph
4653	 * refcount taken in dastart() for each CCB.
4654	 */
4655	cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
4656	xpt_release_ccb(done_ccb);
4657	KASSERT(softc->refcount >= 1, ("dadone softc %p refcount %d", softc, softc->refcount));
4658	softc->refcount--;
4659	if (state == DA_CCB_DELETE) {
4660		TAILQ_HEAD(, bio) queue;
4661
4662		TAILQ_INIT(&queue);
4663		TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
4664		softc->delete_run_queue.insert_point = NULL;
4665		/*
4666		 * Normally, the xpt_release_ccb() above would make sure
4667		 * that when we have more work to do, that work would
4668		 * get kicked off. However, we specifically keep
4669		 * delete_running set to 0 before the call above to
4670		 * allow other I/O to progress when many BIO_DELETE
4671		 * requests are pushed down. We set delete_running to 0
4672		 * and call daschedule again so that we don't stall if
4673		 * there are no other I/Os pending apart from BIO_DELETEs.
4674		 */
4675		cam_iosched_trim_done(softc->cam_iosched);
4676		daschedule(periph);
4677		cam_periph_unlock(periph);
4678		while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
4679			TAILQ_REMOVE(&queue, bp1, bio_queue);
4680			bp1->bio_error = bp->bio_error;
4681			if (bp->bio_flags & BIO_ERROR) {
4682				bp1->bio_flags |= BIO_ERROR;
4683				bp1->bio_resid = bp1->bio_bcount;
4684			} else
4685				bp1->bio_resid = 0;
4686			biodone(bp1);
4687		}
4688	} else {
4689		daschedule(periph);
4690		cam_periph_unlock(periph);
4691	}
4692	if (bp != NULL)
4693		biodone(bp);
4694	return;
4695}
4696
4697static void
4698dadone_probewp(struct cam_periph *periph, union ccb *done_ccb)
4699{
4700	struct da_softc *softc;
4701	struct ccb_scsiio *csio;
4702	uint32_t  priority;
4703
4704	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probewp\n"));
4705
4706	softc = (struct da_softc *)periph->softc;
4707	priority = done_ccb->ccb_h.pinfo.priority;
4708	csio = &done_ccb->csio;
4709
4710	cam_periph_assert(periph, MA_OWNED);
4711
4712	KASSERT(softc->state == DA_STATE_PROBE_WP,
4713	    ("State (%d) not PROBE_WP in dadone_probewp, periph %p ccb %p",
4714		softc->state, periph, done_ccb));
4715        KASSERT((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) == DA_CCB_PROBE_WP,
4716	    ("CCB State (%lu) not PROBE_WP in dadone_probewp, periph %p ccb %p",
4717		(unsigned long)csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK, periph,
4718		done_ccb));
4719
4720	if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
4721		int len, off;
4722		uint8_t dev_spec;
4723
4724		if (csio->cdb_len > 6) {
4725			struct scsi_mode_header_10 *mh =
4726			    (struct scsi_mode_header_10 *)csio->data_ptr;
4727			len = 2 + scsi_2btoul(mh->data_length);
4728			off = sizeof(*mh) + scsi_2btoul(mh->blk_desc_len);
4729			dev_spec = mh->dev_spec;
4730		} else {
4731			struct scsi_mode_header_6 *mh =
4732			    (struct scsi_mode_header_6 *)csio->data_ptr;
4733			len = 1 + mh->data_length;
4734			off = sizeof(*mh) + mh->blk_desc_len;
4735			dev_spec = mh->dev_spec;
4736		}
4737		if ((dev_spec & 0x80) != 0)
4738			softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT;
4739		else
4740			softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT;
4741
4742		/* Next time request only the first of returned mode pages. */
4743		if (off < len && off < csio->dxfer_len - csio->resid)
4744			softc->mode_page = csio->data_ptr[off] & SMPH_PC_MASK;
4745	} else {
4746		int error;
4747
4748		error = daerror(done_ccb, CAM_RETRY_SELTO,
4749				SF_RETRY_UA|SF_NO_PRINT);
4750		if (error == ERESTART)
4751			return;
4752		else if (error != 0) {
4753			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4754				/* Don't wedge this device's queue */
4755				cam_release_devq(done_ccb->ccb_h.path,
4756						 /*relsim_flags*/0,
4757						 /*reduction*/0,
4758						 /*timeout*/0,
4759						 /*getcount_only*/0);
4760			}
4761
4762			/* We don't depend on it, so don't try again. */
4763			softc->mode_page = -1;
4764		}
4765	}
4766
4767	free(csio->data_ptr, M_SCSIDA);
4768	if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
4769		softc->state = DA_STATE_PROBE_RC16;
4770	else
4771		softc->state = DA_STATE_PROBE_RC;
4772	xpt_release_ccb(done_ccb);
4773	xpt_schedule(periph, priority);
4774	return;
4775}
4776
4777static void
4778dadone_proberc(struct cam_periph *periph, union ccb *done_ccb)
4779{
4780	struct scsi_read_capacity_data *rdcap;
4781	struct scsi_read_capacity_data_long *rcaplong;
4782	struct da_softc *softc;
4783	struct ccb_scsiio *csio;
4784	da_ccb_state state;
4785	char *announce_buf;
4786	uint32_t  priority;
4787	int lbp, n;
4788
4789	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_proberc\n"));
4790
4791	softc = (struct da_softc *)periph->softc;
4792	priority = done_ccb->ccb_h.pinfo.priority;
4793	csio = &done_ccb->csio;
4794	state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4795
4796	KASSERT(softc->state == DA_STATE_PROBE_RC || softc->state == DA_STATE_PROBE_RC16,
4797	    ("State (%d) not PROBE_RC* in dadone_proberc, periph %p ccb %p",
4798		softc->state, periph, done_ccb));
4799	KASSERT(state == DA_CCB_PROBE_RC || state == DA_CCB_PROBE_RC16,
4800	    ("CCB State (%lu) not PROBE_RC* in dadone_probewp, periph %p ccb %p",
4801		(unsigned long)state, periph, done_ccb));
4802
4803	lbp = 0;
4804	rdcap = NULL;
4805	rcaplong = NULL;
4806	/* XXX TODO: can this be a malloc? */
4807	announce_buf = softc->announce_temp;
4808	bzero(announce_buf, DA_ANNOUNCETMP_SZ);
4809
4810	if (state == DA_CCB_PROBE_RC)
4811		rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
4812	else
4813		rcaplong = (struct scsi_read_capacity_data_long *)
4814			csio->data_ptr;
4815
4816	cam_periph_assert(periph, MA_OWNED);
4817
4818	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4819		struct disk_params *dp;
4820		uint32_t block_size;
4821		uint64_t maxsector;
4822		u_int lalba;	/* Lowest aligned LBA. */
4823
4824		if (state == DA_CCB_PROBE_RC) {
4825			block_size = scsi_4btoul(rdcap->length);
4826			maxsector = scsi_4btoul(rdcap->addr);
4827			lalba = 0;
4828
4829			/*
4830			 * According to SBC-2, if the standard 10
4831			 * byte READ CAPACITY command returns 2^32,
4832			 * we should issue the 16 byte version of
4833			 * the command, since the device in question
4834			 * has more sectors than can be represented
4835			 * with the short version of the command.
4836			 */
4837			if (maxsector == 0xffffffff) {
4838				free(rdcap, M_SCSIDA);
4839				softc->state = DA_STATE_PROBE_RC16;
4840				xpt_release_ccb(done_ccb);
4841				xpt_schedule(periph, priority);
4842				return;
4843			}
4844		} else {
4845			block_size = scsi_4btoul(rcaplong->length);
4846			maxsector = scsi_8btou64(rcaplong->addr);
4847			lalba = scsi_2btoul(rcaplong->lalba_lbp);
4848		}
4849
4850		/*
4851		 * Because GEOM code just will panic us if we
4852		 * give them an 'illegal' value we'll avoid that
4853		 * here.
4854		 */
4855		if (block_size == 0) {
4856			block_size = 512;
4857			if (maxsector == 0)
4858				maxsector = -1;
4859		}
4860		if (block_size >= maxphys) {
4861			xpt_print(periph->path,
4862			    "unsupportable block size %ju\n",
4863			    (uintmax_t) block_size);
4864			announce_buf = NULL;
4865			cam_periph_invalidate(periph);
4866		} else {
4867			/*
4868			 * We pass rcaplong into dasetgeom(),
4869			 * because it will only use it if it is
4870			 * non-NULL.
4871			 */
4872			dasetgeom(periph, block_size, maxsector,
4873				  rcaplong, sizeof(*rcaplong));
4874			lbp = (lalba & SRC16_LBPME_A);
4875			dp = &softc->params;
4876			n = snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
4877			    "%juMB (%ju %u byte sectors",
4878			    ((uintmax_t)dp->secsize * dp->sectors) /
4879			     (1024 * 1024),
4880			    (uintmax_t)dp->sectors, dp->secsize);
4881			if (softc->p_type != 0) {
4882				n += snprintf(announce_buf + n,
4883				    DA_ANNOUNCETMP_SZ - n,
4884				    ", DIF type %d", softc->p_type);
4885			}
4886			snprintf(announce_buf + n, DA_ANNOUNCETMP_SZ - n, ")");
4887		}
4888	} else {
4889		int error;
4890
4891		/*
4892		 * Retry any UNIT ATTENTION type errors.  They
4893		 * are expected at boot.
4894		 */
4895		error = daerror(done_ccb, CAM_RETRY_SELTO,
4896				SF_RETRY_UA|SF_NO_PRINT);
4897		if (error == ERESTART) {
4898			/*
4899			 * A retry was scheuled, so
4900			 * just return.
4901			 */
4902			return;
4903		} else if (error != 0) {
4904			int asc, ascq;
4905			int sense_key, error_code;
4906			int have_sense;
4907			cam_status status;
4908			struct ccb_getdev cgd;
4909
4910			/* Don't wedge this device's queue */
4911			status = done_ccb->ccb_h.status;
4912			if ((status & CAM_DEV_QFRZN) != 0)
4913				cam_release_devq(done_ccb->ccb_h.path,
4914						 /*relsim_flags*/0,
4915						 /*reduction*/0,
4916						 /*timeout*/0,
4917						 /*getcount_only*/0);
4918
4919			memset(&cgd, 0, sizeof(cgd));
4920			xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
4921				      CAM_PRIORITY_NORMAL);
4922			cgd.ccb_h.func_code = XPT_GDEV_TYPE;
4923			xpt_action((union ccb *)&cgd);
4924
4925			if (scsi_extract_sense_ccb(done_ccb,
4926			    &error_code, &sense_key, &asc, &ascq))
4927				have_sense = TRUE;
4928			else
4929				have_sense = FALSE;
4930
4931			/*
4932			 * If we tried READ CAPACITY(16) and failed,
4933			 * fallback to READ CAPACITY(10).
4934			 */
4935			if ((state == DA_CCB_PROBE_RC16) &&
4936			    (softc->flags & DA_FLAG_CAN_RC16) &&
4937			    (((csio->ccb_h.status & CAM_STATUS_MASK) ==
4938				CAM_REQ_INVALID) ||
4939			     ((have_sense) &&
4940			      (error_code == SSD_CURRENT_ERROR ||
4941			       error_code == SSD_DESC_CURRENT_ERROR) &&
4942			      (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) {
4943				cam_periph_assert(periph, MA_OWNED);
4944				softc->flags &= ~DA_FLAG_CAN_RC16;
4945				free(rdcap, M_SCSIDA);
4946				softc->state = DA_STATE_PROBE_RC;
4947				xpt_release_ccb(done_ccb);
4948				xpt_schedule(periph, priority);
4949				return;
4950			}
4951
4952			/*
4953			 * Attach to anything that claims to be a
4954			 * direct access or optical disk device,
4955			 * as long as it doesn't return a "Logical
4956			 * unit not supported" (0x25) error.
4957			 * "Internal Target Failure" (0x44) is also
4958			 * special and typically means that the
4959			 * device is a SATA drive behind a SATL
4960			 * translation that's fallen into a
4961			 * terminally fatal state.
4962			 */
4963			if ((have_sense)
4964			 && (asc != 0x25) && (asc != 0x44)
4965			 && (error_code == SSD_CURRENT_ERROR
4966			  || error_code == SSD_DESC_CURRENT_ERROR)) {
4967				const char *sense_key_desc;
4968				const char *asc_desc;
4969
4970				dasetgeom(periph, 512, -1, NULL, 0);
4971				scsi_sense_desc(sense_key, asc, ascq,
4972						&cgd.inq_data, &sense_key_desc,
4973						&asc_desc);
4974				snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
4975				    "Attempt to query device "
4976				    "size failed: %s, %s",
4977				    sense_key_desc, asc_desc);
4978			} else {
4979				if (have_sense)
4980					scsi_sense_print(&done_ccb->csio);
4981				else {
4982					xpt_print(periph->path,
4983					    "got CAM status %#x\n",
4984					    done_ccb->ccb_h.status);
4985				}
4986
4987				xpt_print(periph->path, "fatal error, "
4988				    "failed to attach to device\n");
4989
4990				announce_buf = NULL;
4991
4992				/*
4993				 * Free up resources.
4994				 */
4995				cam_periph_invalidate(periph);
4996			}
4997		}
4998	}
4999	free(csio->data_ptr, M_SCSIDA);
5000	if (announce_buf != NULL &&
5001	    ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) {
5002		struct sbuf sb;
5003
5004		sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ,
5005		    SBUF_FIXEDLEN);
5006		xpt_announce_periph_sbuf(periph, &sb, announce_buf);
5007		xpt_announce_quirks_sbuf(periph, &sb, softc->quirks,
5008		    DA_Q_BIT_STRING);
5009		sbuf_finish(&sb);
5010		sbuf_putbuf(&sb);
5011
5012		/*
5013		 * Create our sysctl variables, now that we know
5014		 * we have successfully attached.
5015		 */
5016		/* increase the refcount */
5017		if (da_periph_acquire(periph, DA_REF_SYSCTL) == 0) {
5018			taskqueue_enqueue(taskqueue_thread,
5019					  &softc->sysctl_task);
5020		} else {
5021			/* XXX This message is useless! */
5022			xpt_print(periph->path, "fatal error, "
5023			    "could not acquire reference count\n");
5024		}
5025	}
5026
5027	/* We already probed the device. */
5028	if (softc->flags & DA_FLAG_PROBED) {
5029		daprobedone(periph, done_ccb);
5030		return;
5031	}
5032
5033	/* Ensure re-probe doesn't see old delete. */
5034	softc->delete_available = 0;
5035	dadeleteflag(softc, DA_DELETE_ZERO, 1);
5036	if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) {
5037		/*
5038		 * Based on older SBC-3 spec revisions
5039		 * any of the UNMAP methods "may" be
5040		 * available via LBP given this flag so
5041		 * we flag all of them as available and
5042		 * then remove those which further
5043		 * probes confirm aren't available
5044		 * later.
5045		 *
5046		 * We could also check readcap(16) p_type
5047		 * flag to exclude one or more invalid
5048		 * write same (X) types here
5049		 */
5050		dadeleteflag(softc, DA_DELETE_WS16, 1);
5051		dadeleteflag(softc, DA_DELETE_WS10, 1);
5052		dadeleteflag(softc, DA_DELETE_UNMAP, 1);
5053
5054		softc->state = DA_STATE_PROBE_LBP;
5055		xpt_release_ccb(done_ccb);
5056		xpt_schedule(periph, priority);
5057		return;
5058	}
5059
5060	softc->state = DA_STATE_PROBE_BDC;
5061	xpt_release_ccb(done_ccb);
5062	xpt_schedule(periph, priority);
5063	return;
5064}
5065
5066static void
5067dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb)
5068{
5069	struct scsi_vpd_logical_block_prov *lbp;
5070	struct da_softc *softc;
5071	struct ccb_scsiio *csio;
5072	uint32_t  priority;
5073
5074	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probelbp\n"));
5075
5076	softc = (struct da_softc *)periph->softc;
5077	priority = done_ccb->ccb_h.pinfo.priority;
5078	csio = &done_ccb->csio;
5079	lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr;
5080
5081	cam_periph_assert(periph, MA_OWNED);
5082
5083	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5084		/*
5085		 * T10/1799-D Revision 31 states at least one of these
5086		 * must be supported but we don't currently enforce this.
5087		 */
5088		dadeleteflag(softc, DA_DELETE_WS16,
5089		     (lbp->flags & SVPD_LBP_WS16));
5090		dadeleteflag(softc, DA_DELETE_WS10,
5091			     (lbp->flags & SVPD_LBP_WS10));
5092		dadeleteflag(softc, DA_DELETE_UNMAP,
5093			     (lbp->flags & SVPD_LBP_UNMAP));
5094	} else {
5095		int error;
5096		error = daerror(done_ccb, CAM_RETRY_SELTO,
5097				SF_RETRY_UA|SF_NO_PRINT);
5098		if (error == ERESTART)
5099			return;
5100		else if (error != 0) {
5101			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5102				/* Don't wedge this device's queue */
5103				cam_release_devq(done_ccb->ccb_h.path,
5104						 /*relsim_flags*/0,
5105						 /*reduction*/0,
5106						 /*timeout*/0,
5107						 /*getcount_only*/0);
5108			}
5109
5110			/*
5111			 * Failure indicates we don't support any SBC-3
5112			 * delete methods with UNMAP
5113			 */
5114		}
5115	}
5116
5117	free(lbp, M_SCSIDA);
5118	softc->state = DA_STATE_PROBE_BLK_LIMITS;
5119	xpt_release_ccb(done_ccb);
5120	xpt_schedule(periph, priority);
5121	return;
5122}
5123
5124static void
5125dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb)
5126{
5127	struct scsi_vpd_block_limits *block_limits;
5128	struct da_softc *softc;
5129	struct ccb_scsiio *csio;
5130	uint32_t  priority;
5131
5132	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeblklimits\n"));
5133
5134	softc = (struct da_softc *)periph->softc;
5135	priority = done_ccb->ccb_h.pinfo.priority;
5136	csio = &done_ccb->csio;
5137	block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr;
5138
5139	cam_periph_assert(periph, MA_OWNED);
5140
5141	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5142		uint32_t max_txfer_len = scsi_4btoul(
5143			block_limits->max_txfer_len);
5144		uint32_t max_unmap_lba_cnt = scsi_4btoul(
5145			block_limits->max_unmap_lba_cnt);
5146		uint32_t max_unmap_blk_cnt = scsi_4btoul(
5147			block_limits->max_unmap_blk_cnt);
5148		uint32_t unmap_gran = scsi_4btoul(
5149			block_limits->opt_unmap_grain);
5150		uint32_t unmap_gran_align = scsi_4btoul(
5151			block_limits->unmap_grain_align);
5152		uint64_t ws_max_blks = scsi_8btou64(
5153			block_limits->max_write_same_length);
5154
5155		if (max_txfer_len != 0) {
5156			softc->disk->d_maxsize = MIN(softc->maxio,
5157			    (off_t)max_txfer_len * softc->params.secsize);
5158		}
5159
5160		/*
5161		 * We should already support UNMAP but we check lba
5162		 * and block count to be sure
5163		 */
5164		if (max_unmap_lba_cnt != 0x00L &&
5165		    max_unmap_blk_cnt != 0x00L) {
5166			softc->unmap_max_lba = max_unmap_lba_cnt;
5167			softc->unmap_max_ranges = min(max_unmap_blk_cnt,
5168				UNMAP_MAX_RANGES);
5169			if (unmap_gran > 1) {
5170				softc->unmap_gran = unmap_gran;
5171				if (unmap_gran_align & 0x80000000) {
5172					softc->unmap_gran_align =
5173					    unmap_gran_align & 0x7fffffff;
5174				}
5175			}
5176		} else {
5177			/*
5178			 * Unexpected UNMAP limits which means the
5179			 * device doesn't actually support UNMAP
5180			 */
5181			dadeleteflag(softc, DA_DELETE_UNMAP, 0);
5182		}
5183
5184		if (ws_max_blks != 0x00L)
5185			softc->ws_max_blks = ws_max_blks;
5186	} else {
5187		int error;
5188		error = daerror(done_ccb, CAM_RETRY_SELTO,
5189				SF_RETRY_UA|SF_NO_PRINT);
5190		if (error == ERESTART)
5191			return;
5192		else if (error != 0) {
5193			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5194				/* Don't wedge this device's queue */
5195				cam_release_devq(done_ccb->ccb_h.path,
5196						 /*relsim_flags*/0,
5197						 /*reduction*/0,
5198						 /*timeout*/0,
5199						 /*getcount_only*/0);
5200			}
5201
5202			/*
5203			 * Failure here doesn't mean UNMAP is not
5204			 * supported as this is an optional page.
5205			 */
5206			softc->unmap_max_lba = 1;
5207			softc->unmap_max_ranges = 1;
5208		}
5209	}
5210
5211	free(block_limits, M_SCSIDA);
5212	softc->state = DA_STATE_PROBE_BDC;
5213	xpt_release_ccb(done_ccb);
5214	xpt_schedule(periph, priority);
5215	return;
5216}
5217
5218static void
5219dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb)
5220{
5221	struct scsi_vpd_block_device_characteristics *bdc;
5222	struct da_softc *softc;
5223	struct ccb_scsiio *csio;
5224	uint32_t  priority;
5225
5226	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probebdc\n"));
5227
5228	softc = (struct da_softc *)periph->softc;
5229	priority = done_ccb->ccb_h.pinfo.priority;
5230	csio = &done_ccb->csio;
5231	bdc = (struct scsi_vpd_block_device_characteristics *)csio->data_ptr;
5232
5233	cam_periph_assert(periph, MA_OWNED);
5234
5235	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5236		uint32_t valid_len;
5237
5238		/*
5239		 * Disable queue sorting for non-rotational media
5240		 * by default.
5241		 */
5242		uint16_t old_rate = softc->disk->d_rotation_rate;
5243
5244		valid_len = csio->dxfer_len - csio->resid;
5245		if (SBDC_IS_PRESENT(bdc, valid_len,
5246		    medium_rotation_rate)) {
5247			softc->disk->d_rotation_rate =
5248				scsi_2btoul(bdc->medium_rotation_rate);
5249			if (softc->disk->d_rotation_rate == SVPD_NON_ROTATING) {
5250				cam_iosched_set_sort_queue(
5251				    softc->cam_iosched, 0);
5252				softc->flags &= ~DA_FLAG_ROTATING;
5253			}
5254			if (softc->disk->d_rotation_rate != old_rate) {
5255				disk_attr_changed(softc->disk,
5256				    "GEOM::rotation_rate", M_NOWAIT);
5257			}
5258		}
5259		if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
5260		 && (softc->zone_mode == DA_ZONE_NONE)) {
5261			int ata_proto;
5262
5263			if (scsi_vpd_supported_page(periph,
5264			    SVPD_ATA_INFORMATION))
5265				ata_proto = 1;
5266			else
5267				ata_proto = 0;
5268
5269			/*
5270			 * The Zoned field will only be set for
5271			 * Drive Managed and Host Aware drives.  If
5272			 * they are Host Managed, the device type
5273			 * in the standard INQUIRY data should be
5274			 * set to T_ZBC_HM (0x14).
5275			 */
5276			if ((bdc->flags & SVPD_ZBC_MASK) ==
5277			     SVPD_HAW_ZBC) {
5278				softc->zone_mode = DA_ZONE_HOST_AWARE;
5279				softc->zone_interface = (ata_proto) ?
5280				   DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
5281			} else if ((bdc->flags & SVPD_ZBC_MASK) ==
5282			     SVPD_DM_ZBC) {
5283				softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
5284				softc->zone_interface = (ata_proto) ?
5285				   DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
5286			} else if ((bdc->flags & SVPD_ZBC_MASK) !=
5287				  SVPD_ZBC_NR) {
5288				xpt_print(periph->path, "Unknown zoned "
5289				    "type %#x",
5290				    bdc->flags & SVPD_ZBC_MASK);
5291			}
5292		}
5293	} else {
5294		int error;
5295		error = daerror(done_ccb, CAM_RETRY_SELTO,
5296				SF_RETRY_UA|SF_NO_PRINT);
5297		if (error == ERESTART)
5298			return;
5299		else if (error != 0) {
5300			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5301				/* Don't wedge this device's queue */
5302				cam_release_devq(done_ccb->ccb_h.path,
5303						 /*relsim_flags*/0,
5304						 /*reduction*/0,
5305						 /*timeout*/0,
5306						 /*getcount_only*/0);
5307			}
5308		}
5309	}
5310
5311	free(bdc, M_SCSIDA);
5312	softc->state = DA_STATE_PROBE_ATA;
5313	xpt_release_ccb(done_ccb);
5314	xpt_schedule(periph, priority);
5315	return;
5316}
5317
5318static void
5319dadone_probeata(struct cam_periph *periph, union ccb *done_ccb)
5320{
5321	struct ata_params *ata_params;
5322	struct ccb_scsiio *csio;
5323	struct da_softc *softc;
5324	uint32_t  priority;
5325	int continue_probe;
5326	int error;
5327
5328	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeata\n"));
5329
5330	softc = (struct da_softc *)periph->softc;
5331	priority = done_ccb->ccb_h.pinfo.priority;
5332	csio = &done_ccb->csio;
5333	ata_params = (struct ata_params *)csio->data_ptr;
5334	continue_probe = 0;
5335	error = 0;
5336
5337	cam_periph_assert(periph, MA_OWNED);
5338
5339	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5340		uint16_t old_rate;
5341
5342		ata_param_fixup(ata_params);
5343		if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM &&
5344		    (softc->quirks & DA_Q_NO_UNMAP) == 0) {
5345			dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1);
5346			if (ata_params->max_dsm_blocks != 0)
5347				softc->trim_max_ranges = min(
5348				  softc->trim_max_ranges,
5349				  ata_params->max_dsm_blocks *
5350				  ATA_DSM_BLK_RANGES);
5351		}
5352		/*
5353		 * Disable queue sorting for non-rotational media
5354		 * by default.
5355		 */
5356		old_rate = softc->disk->d_rotation_rate;
5357		softc->disk->d_rotation_rate = ata_params->media_rotation_rate;
5358		if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) {
5359			cam_iosched_set_sort_queue(softc->cam_iosched, 0);
5360			softc->flags &= ~DA_FLAG_ROTATING;
5361		}
5362		if (softc->disk->d_rotation_rate != old_rate) {
5363			disk_attr_changed(softc->disk,
5364			    "GEOM::rotation_rate", M_NOWAIT);
5365		}
5366
5367		cam_periph_assert(periph, MA_OWNED);
5368		if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
5369			softc->flags |= DA_FLAG_CAN_ATA_DMA;
5370
5371		if (ata_params->support.extension & ATA_SUPPORT_GENLOG)
5372			softc->flags |= DA_FLAG_CAN_ATA_LOG;
5373
5374		/*
5375		 * At this point, if we have a SATA host aware drive,
5376		 * we communicate via ATA passthrough unless the
5377		 * SAT layer supports ZBC -> ZAC translation.  In
5378		 * that case,
5379		 *
5380		 * XXX KDM figure out how to detect a host managed
5381		 * SATA drive.
5382		 */
5383		if (softc->zone_mode == DA_ZONE_NONE) {
5384			/*
5385			 * Note that we don't override the zone
5386			 * mode or interface if it has already been
5387			 * set.  This is because it has either been
5388			 * set as a quirk, or when we probed the
5389			 * SCSI Block Device Characteristics page,
5390			 * the zoned field was set.  The latter
5391			 * means that the SAT layer supports ZBC to
5392			 * ZAC translation, and we would prefer to
5393			 * use that if it is available.
5394			 */
5395			if ((ata_params->support3 &
5396			    ATA_SUPPORT_ZONE_MASK) ==
5397			    ATA_SUPPORT_ZONE_HOST_AWARE) {
5398				softc->zone_mode = DA_ZONE_HOST_AWARE;
5399				softc->zone_interface =
5400				    DA_ZONE_IF_ATA_PASS;
5401			} else if ((ata_params->support3 &
5402				    ATA_SUPPORT_ZONE_MASK) ==
5403				    ATA_SUPPORT_ZONE_DEV_MANAGED) {
5404				softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
5405				softc->zone_interface = DA_ZONE_IF_ATA_PASS;
5406			}
5407		}
5408
5409	} else {
5410		error = daerror(done_ccb, CAM_RETRY_SELTO,
5411				SF_RETRY_UA|SF_NO_PRINT);
5412		if (error == ERESTART)
5413			return;
5414		else if (error != 0) {
5415			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5416				/* Don't wedge this device's queue */
5417				cam_release_devq(done_ccb->ccb_h.path,
5418						 /*relsim_flags*/0,
5419						 /*reduction*/0,
5420						 /*timeout*/0,
5421						 /*getcount_only*/0);
5422			}
5423		}
5424	}
5425
5426	if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
5427	 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
5428		/*
5429		 * If the ATA IDENTIFY failed, we could be talking
5430		 * to a SCSI drive, although that seems unlikely,
5431		 * since the drive did report that it supported the
5432		 * ATA Information VPD page.  If the ATA IDENTIFY
5433		 * succeeded, and the SAT layer doesn't support
5434		 * ZBC -> ZAC translation, continue on to get the
5435		 * directory of ATA logs, and complete the rest of
5436		 * the ZAC probe.  If the SAT layer does support
5437		 * ZBC -> ZAC translation, we want to use that,
5438		 * and we'll probe the SCSI Zoned Block Device
5439		 * Characteristics VPD page next.
5440		 */
5441		if ((error == 0)
5442		 && (softc->flags & DA_FLAG_CAN_ATA_LOG)
5443		 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
5444			softc->state = DA_STATE_PROBE_ATA_LOGDIR;
5445		else
5446			softc->state = DA_STATE_PROBE_ZONE;
5447		continue_probe = 1;
5448	}
5449	if (continue_probe != 0) {
5450		xpt_schedule(periph, priority);
5451		xpt_release_ccb(done_ccb);
5452		return;
5453	} else
5454		daprobedone(periph, done_ccb);
5455	return;
5456}
5457
5458static void
5459dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb)
5460{
5461	struct da_softc *softc;
5462	struct ccb_scsiio *csio;
5463	uint32_t  priority;
5464	int error;
5465
5466	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatalogdir\n"));
5467
5468	softc = (struct da_softc *)periph->softc;
5469	priority = done_ccb->ccb_h.pinfo.priority;
5470	csio = &done_ccb->csio;
5471
5472	cam_periph_assert(periph, MA_OWNED);
5473	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5474		error = 0;
5475		softc->valid_logdir_len = 0;
5476		bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
5477		softc->valid_logdir_len = csio->dxfer_len - csio->resid;
5478		if (softc->valid_logdir_len > 0)
5479			bcopy(csio->data_ptr, &softc->ata_logdir,
5480			    min(softc->valid_logdir_len,
5481				sizeof(softc->ata_logdir)));
5482		/*
5483		 * Figure out whether the Identify Device log is
5484		 * supported.  The General Purpose log directory
5485		 * has a header, and lists the number of pages
5486		 * available for each GP log identified by the
5487		 * offset into the list.
5488		 */
5489		if ((softc->valid_logdir_len >=
5490		    ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
5491		 && (le16dec(softc->ata_logdir.header) ==
5492		     ATA_GP_LOG_DIR_VERSION)
5493		 && (le16dec(&softc->ata_logdir.num_pages[
5494		     (ATA_IDENTIFY_DATA_LOG *
5495		     sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
5496			softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
5497		} else {
5498			softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5499		}
5500	} else {
5501		error = daerror(done_ccb, CAM_RETRY_SELTO,
5502				SF_RETRY_UA|SF_NO_PRINT);
5503		if (error == ERESTART)
5504			return;
5505		else if (error != 0) {
5506			/*
5507			 * If we can't get the ATA log directory,
5508			 * then ATA logs are effectively not
5509			 * supported even if the bit is set in the
5510			 * identify data.
5511			 */
5512			softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
5513					  DA_FLAG_CAN_ATA_IDLOG);
5514			if ((done_ccb->ccb_h.status &
5515			     CAM_DEV_QFRZN) != 0) {
5516				/* Don't wedge this device's queue */
5517				cam_release_devq(done_ccb->ccb_h.path,
5518						 /*relsim_flags*/0,
5519						 /*reduction*/0,
5520						 /*timeout*/0,
5521						 /*getcount_only*/0);
5522			}
5523		}
5524	}
5525
5526	free(csio->data_ptr, M_SCSIDA);
5527
5528	if ((error == 0)
5529	 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
5530		softc->state = DA_STATE_PROBE_ATA_IDDIR;
5531		xpt_release_ccb(done_ccb);
5532		xpt_schedule(periph, priority);
5533		return;
5534	}
5535	daprobedone(periph, done_ccb);
5536	return;
5537}
5538
5539static void
5540dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb)
5541{
5542	struct da_softc *softc;
5543	struct ccb_scsiio *csio;
5544	uint32_t  priority;
5545	int error;
5546
5547	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeataiddir\n"));
5548
5549	softc = (struct da_softc *)periph->softc;
5550	priority = done_ccb->ccb_h.pinfo.priority;
5551	csio = &done_ccb->csio;
5552
5553	cam_periph_assert(periph, MA_OWNED);
5554
5555	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5556		off_t entries_offset, max_entries;
5557		error = 0;
5558
5559		softc->valid_iddir_len = 0;
5560		bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
5561		softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
5562				  DA_FLAG_CAN_ATA_ZONE);
5563		softc->valid_iddir_len = csio->dxfer_len - csio->resid;
5564		if (softc->valid_iddir_len > 0)
5565			bcopy(csio->data_ptr, &softc->ata_iddir,
5566			    min(softc->valid_iddir_len,
5567				sizeof(softc->ata_iddir)));
5568
5569		entries_offset =
5570		    __offsetof(struct ata_identify_log_pages,entries);
5571		max_entries = softc->valid_iddir_len - entries_offset;
5572		if ((softc->valid_iddir_len > (entries_offset + 1))
5573		 && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION)
5574		 && (softc->ata_iddir.entry_count > 0)) {
5575			int num_entries, i;
5576
5577			num_entries = softc->ata_iddir.entry_count;
5578			num_entries = min(num_entries,
5579			   softc->valid_iddir_len - entries_offset);
5580			for (i = 0; i < num_entries && i < max_entries; i++) {
5581				if (softc->ata_iddir.entries[i] ==
5582				    ATA_IDL_SUP_CAP)
5583					softc->flags |= DA_FLAG_CAN_ATA_SUPCAP;
5584				else if (softc->ata_iddir.entries[i] ==
5585					 ATA_IDL_ZDI)
5586					softc->flags |= DA_FLAG_CAN_ATA_ZONE;
5587
5588				if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP)
5589				 && (softc->flags & DA_FLAG_CAN_ATA_ZONE))
5590					break;
5591			}
5592		}
5593	} else {
5594		error = daerror(done_ccb, CAM_RETRY_SELTO,
5595				SF_RETRY_UA|SF_NO_PRINT);
5596		if (error == ERESTART)
5597			return;
5598		else if (error != 0) {
5599			/*
5600			 * If we can't get the ATA Identify Data log
5601			 * directory, then it effectively isn't
5602			 * supported even if the ATA Log directory
5603			 * a non-zero number of pages present for
5604			 * this log.
5605			 */
5606			softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5607			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5608				/* Don't wedge this device's queue */
5609				cam_release_devq(done_ccb->ccb_h.path,
5610						 /*relsim_flags*/0,
5611						 /*reduction*/0,
5612						 /*timeout*/0,
5613						 /*getcount_only*/0);
5614			}
5615		}
5616	}
5617
5618	free(csio->data_ptr, M_SCSIDA);
5619
5620	if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
5621		softc->state = DA_STATE_PROBE_ATA_SUP;
5622		xpt_release_ccb(done_ccb);
5623		xpt_schedule(periph, priority);
5624		return;
5625	}
5626	daprobedone(periph, done_ccb);
5627	return;
5628}
5629
5630static void
5631dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb)
5632{
5633	struct da_softc *softc;
5634	struct ccb_scsiio *csio;
5635	uint32_t  priority;
5636	int error;
5637
5638	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatasup\n"));
5639
5640	softc = (struct da_softc *)periph->softc;
5641	priority = done_ccb->ccb_h.pinfo.priority;
5642	csio = &done_ccb->csio;
5643
5644	cam_periph_assert(periph, MA_OWNED);
5645
5646	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5647		uint32_t valid_len;
5648		size_t needed_size;
5649		struct ata_identify_log_sup_cap *sup_cap;
5650		error = 0;
5651
5652		sup_cap = (struct ata_identify_log_sup_cap *)csio->data_ptr;
5653		valid_len = csio->dxfer_len - csio->resid;
5654		needed_size = __offsetof(struct ata_identify_log_sup_cap,
5655		    sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
5656		if (valid_len >= needed_size) {
5657			uint64_t zoned, zac_cap;
5658
5659			zoned = le64dec(sup_cap->zoned_cap);
5660			if (zoned & ATA_ZONED_VALID) {
5661				/*
5662				 * This should have already been
5663				 * set, because this is also in the
5664				 * ATA identify data.
5665				 */
5666				if ((zoned & ATA_ZONED_MASK) ==
5667				    ATA_SUPPORT_ZONE_HOST_AWARE)
5668					softc->zone_mode = DA_ZONE_HOST_AWARE;
5669				else if ((zoned & ATA_ZONED_MASK) ==
5670				    ATA_SUPPORT_ZONE_DEV_MANAGED)
5671					softc->zone_mode =
5672					    DA_ZONE_DRIVE_MANAGED;
5673			}
5674
5675			zac_cap = le64dec(sup_cap->sup_zac_cap);
5676			if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
5677				if (zac_cap & ATA_REPORT_ZONES_SUP)
5678					softc->zone_flags |=
5679					    DA_ZONE_FLAG_RZ_SUP;
5680				if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
5681					softc->zone_flags |=
5682					    DA_ZONE_FLAG_OPEN_SUP;
5683				if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
5684					softc->zone_flags |=
5685					    DA_ZONE_FLAG_CLOSE_SUP;
5686				if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
5687					softc->zone_flags |=
5688					    DA_ZONE_FLAG_FINISH_SUP;
5689				if (zac_cap & ATA_ND_RWP_SUP)
5690					softc->zone_flags |=
5691					    DA_ZONE_FLAG_RWP_SUP;
5692			} else {
5693				/*
5694				 * This field was introduced in
5695				 * ACS-4, r08 on April 28th, 2015.
5696				 * If the drive firmware was written
5697				 * to an earlier spec, it won't have
5698				 * the field.  So, assume all
5699				 * commands are supported.
5700				 */
5701				softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5702			}
5703		}
5704	} else {
5705		error = daerror(done_ccb, CAM_RETRY_SELTO,
5706				SF_RETRY_UA|SF_NO_PRINT);
5707		if (error == ERESTART)
5708			return;
5709		else if (error != 0) {
5710			/*
5711			 * If we can't get the ATA Identify Data
5712			 * Supported Capabilities page, clear the
5713			 * flag...
5714			 */
5715			softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
5716			/*
5717			 * And clear zone capabilities.
5718			 */
5719			softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
5720			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5721				/* Don't wedge this device's queue */
5722				cam_release_devq(done_ccb->ccb_h.path,
5723						 /*relsim_flags*/0,
5724						 /*reduction*/0,
5725						 /*timeout*/0,
5726						 /*getcount_only*/0);
5727			}
5728		}
5729	}
5730
5731	free(csio->data_ptr, M_SCSIDA);
5732
5733	if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
5734		softc->state = DA_STATE_PROBE_ATA_ZONE;
5735		xpt_release_ccb(done_ccb);
5736		xpt_schedule(periph, priority);
5737		return;
5738	}
5739	daprobedone(periph, done_ccb);
5740	return;
5741}
5742
5743static void
5744dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb)
5745{
5746	struct da_softc *softc;
5747	struct ccb_scsiio *csio;
5748	int error;
5749
5750	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatazone\n"));
5751
5752	softc = (struct da_softc *)periph->softc;
5753	csio = &done_ccb->csio;
5754
5755	cam_periph_assert(periph, MA_OWNED);
5756
5757	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5758		struct ata_zoned_info_log *zi_log;
5759		uint32_t valid_len;
5760		size_t needed_size;
5761
5762		zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
5763
5764		valid_len = csio->dxfer_len - csio->resid;
5765		needed_size = __offsetof(struct ata_zoned_info_log,
5766		    version_info) + 1 + sizeof(zi_log->version_info);
5767		if (valid_len >= needed_size) {
5768			uint64_t tmpvar;
5769
5770			tmpvar = le64dec(zi_log->zoned_cap);
5771			if (tmpvar & ATA_ZDI_CAP_VALID) {
5772				if (tmpvar & ATA_ZDI_CAP_URSWRZ)
5773					softc->zone_flags |=
5774					    DA_ZONE_FLAG_URSWRZ;
5775				else
5776					softc->zone_flags &=
5777					    ~DA_ZONE_FLAG_URSWRZ;
5778			}
5779			tmpvar = le64dec(zi_log->optimal_seq_zones);
5780			if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
5781				softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5782				softc->optimal_seq_zones = (tmpvar &
5783				    ATA_ZDI_OPT_SEQ_MASK);
5784			} else {
5785				softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET;
5786				softc->optimal_seq_zones = 0;
5787			}
5788
5789			tmpvar =le64dec(zi_log->optimal_nonseq_zones);
5790			if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
5791				softc->zone_flags |=
5792				    DA_ZONE_FLAG_OPT_NONSEQ_SET;
5793				softc->optimal_nonseq_zones =
5794				    (tmpvar & ATA_ZDI_OPT_NS_MASK);
5795			} else {
5796				softc->zone_flags &=
5797				    ~DA_ZONE_FLAG_OPT_NONSEQ_SET;
5798				softc->optimal_nonseq_zones = 0;
5799			}
5800
5801			tmpvar = le64dec(zi_log->max_seq_req_zones);
5802			if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
5803				softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5804				softc->max_seq_zones =
5805				    (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
5806			} else {
5807				softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET;
5808				softc->max_seq_zones = 0;
5809			}
5810		}
5811	} else {
5812		error = daerror(done_ccb, CAM_RETRY_SELTO,
5813				SF_RETRY_UA|SF_NO_PRINT);
5814		if (error == ERESTART)
5815			return;
5816		else if (error != 0) {
5817			softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
5818			softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
5819
5820			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5821				/* Don't wedge this device's queue */
5822				cam_release_devq(done_ccb->ccb_h.path,
5823						 /*relsim_flags*/0,
5824						 /*reduction*/0,
5825						 /*timeout*/0,
5826						 /*getcount_only*/0);
5827			}
5828		}
5829	}
5830
5831	free(csio->data_ptr, M_SCSIDA);
5832
5833	daprobedone(periph, done_ccb);
5834	return;
5835}
5836
5837static void
5838dadone_probezone(struct cam_periph *periph, union ccb *done_ccb)
5839{
5840	struct da_softc *softc;
5841	struct ccb_scsiio *csio;
5842	int error;
5843
5844	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probezone\n"));
5845
5846	softc = (struct da_softc *)periph->softc;
5847	csio = &done_ccb->csio;
5848
5849	cam_periph_assert(periph, MA_OWNED);
5850
5851	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5852		uint32_t valid_len;
5853		size_t needed_len;
5854		struct scsi_vpd_zoned_bdc *zoned_bdc;
5855
5856		error = 0;
5857		zoned_bdc = (struct scsi_vpd_zoned_bdc *)csio->data_ptr;
5858		valid_len = csio->dxfer_len - csio->resid;
5859		needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
5860		    max_seq_req_zones) + 1 +
5861		    sizeof(zoned_bdc->max_seq_req_zones);
5862		if ((valid_len >= needed_len)
5863		 && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) {
5864			if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
5865				softc->zone_flags |= DA_ZONE_FLAG_URSWRZ;
5866			else
5867				softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ;
5868			softc->optimal_seq_zones =
5869			    scsi_4btoul(zoned_bdc->optimal_seq_zones);
5870			softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5871			softc->optimal_nonseq_zones = scsi_4btoul(
5872			    zoned_bdc->optimal_nonseq_zones);
5873			softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET;
5874			softc->max_seq_zones =
5875			    scsi_4btoul(zoned_bdc->max_seq_req_zones);
5876			softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5877		}
5878		/*
5879		 * All of the zone commands are mandatory for SCSI
5880		 * devices.
5881		 *
5882		 * XXX KDM this is valid as of September 2015.
5883		 * Re-check this assumption once the SAT spec is
5884		 * updated to support SCSI ZBC to ATA ZAC mapping.
5885		 * Since ATA allows zone commands to be reported
5886		 * as supported or not, this may not necessarily
5887		 * be true for an ATA device behind a SAT (SCSI to
5888		 * ATA Translation) layer.
5889		 */
5890		softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5891	} else {
5892		error = daerror(done_ccb, CAM_RETRY_SELTO,
5893				SF_RETRY_UA|SF_NO_PRINT);
5894		if (error == ERESTART)
5895			return;
5896		else if (error != 0) {
5897			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5898				/* Don't wedge this device's queue */
5899				cam_release_devq(done_ccb->ccb_h.path,
5900						 /*relsim_flags*/0,
5901						 /*reduction*/0,
5902						 /*timeout*/0,
5903						 /*getcount_only*/0);
5904			}
5905		}
5906	}
5907
5908	free(csio->data_ptr, M_SCSIDA);
5909
5910	daprobedone(periph, done_ccb);
5911	return;
5912}
5913
5914static void
5915dadone_tur(struct cam_periph *periph, union ccb *done_ccb)
5916{
5917	struct da_softc *softc;
5918
5919	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_tur\n"));
5920
5921	softc = (struct da_softc *)periph->softc;
5922
5923	cam_periph_assert(periph, MA_OWNED);
5924
5925	if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5926		if (daerror(done_ccb, CAM_RETRY_SELTO,
5927		    SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART)
5928			return;	/* Will complete again, keep reference */
5929		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5930			cam_release_devq(done_ccb->ccb_h.path,
5931					 /*relsim_flags*/0,
5932					 /*reduction*/0,
5933					 /*timeout*/0,
5934					 /*getcount_only*/0);
5935	}
5936	softc->flags &= ~DA_FLAG_TUR_PENDING;
5937	xpt_release_ccb(done_ccb);
5938	da_periph_release_locked(periph, DA_REF_TUR);
5939	return;
5940}
5941
5942static void
5943dareprobe(struct cam_periph *periph)
5944{
5945	struct da_softc	  *softc;
5946	int status __diagused;
5947
5948	softc = (struct da_softc *)periph->softc;
5949
5950	cam_periph_assert(periph, MA_OWNED);
5951
5952	/* Probe in progress; don't interfere. */
5953	if (softc->state != DA_STATE_NORMAL)
5954		return;
5955
5956	status = da_periph_acquire(periph, DA_REF_REPROBE);
5957	KASSERT(status == 0, ("dareprobe: cam_periph_acquire failed"));
5958
5959	softc->state = DA_STATE_PROBE_WP;
5960	xpt_schedule(periph, CAM_PRIORITY_DEV);
5961}
5962
5963static int
5964daerror(union ccb *ccb, uint32_t cam_flags, uint32_t sense_flags)
5965{
5966	struct da_softc	  *softc;
5967	struct cam_periph *periph;
5968	int error, error_code, sense_key, asc, ascq;
5969
5970#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5971	if (ccb->csio.bio != NULL)
5972		biotrack(ccb->csio.bio, __func__);
5973#endif
5974
5975	periph = xpt_path_periph(ccb->ccb_h.path);
5976	softc = (struct da_softc *)periph->softc;
5977
5978	cam_periph_assert(periph, MA_OWNED);
5979
5980	/*
5981	 * Automatically detect devices that do not support
5982	 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
5983	 */
5984	error = 0;
5985	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
5986		error = cmd6workaround(ccb);
5987	} else if (scsi_extract_sense_ccb(ccb,
5988	    &error_code, &sense_key, &asc, &ascq)) {
5989		if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
5990			error = cmd6workaround(ccb);
5991		/*
5992		 * If the target replied with CAPACITY DATA HAS CHANGED UA,
5993		 * query the capacity and notify upper layers.
5994		 */
5995		else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5996		    asc == 0x2A && ascq == 0x09) {
5997			xpt_print(periph->path, "Capacity data has changed\n");
5998			softc->flags &= ~DA_FLAG_PROBED;
5999			dareprobe(periph);
6000			sense_flags |= SF_NO_PRINT;
6001		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
6002		    asc == 0x28 && ascq == 0x00) {
6003			softc->flags &= ~DA_FLAG_PROBED;
6004			disk_media_changed(softc->disk, M_NOWAIT);
6005		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
6006		    asc == 0x3F && ascq == 0x03) {
6007			xpt_print(periph->path, "INQUIRY data has changed\n");
6008			softc->flags &= ~DA_FLAG_PROBED;
6009			dareprobe(periph);
6010			sense_flags |= SF_NO_PRINT;
6011		} else if (sense_key == SSD_KEY_NOT_READY &&
6012		    asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
6013			softc->flags |= DA_FLAG_PACK_INVALID;
6014			disk_media_gone(softc->disk, M_NOWAIT);
6015		}
6016	}
6017	if (error == ERESTART)
6018		return (ERESTART);
6019
6020#ifdef CAM_IO_STATS
6021	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
6022	case CAM_CMD_TIMEOUT:
6023		softc->timeouts++;
6024		break;
6025	case CAM_REQ_ABORTED:
6026	case CAM_REQ_CMP_ERR:
6027	case CAM_REQ_TERMIO:
6028	case CAM_UNREC_HBA_ERROR:
6029	case CAM_DATA_RUN_ERR:
6030	case CAM_SCSI_STATUS_ERROR:
6031	case CAM_ATA_STATUS_ERROR:
6032		softc->errors++;
6033		break;
6034	default:
6035		break;
6036	}
6037#endif
6038
6039	/*
6040	 * XXX
6041	 * Until we have a better way of doing pack validation,
6042	 * don't treat UAs as errors.
6043	 */
6044	sense_flags |= SF_RETRY_UA;
6045
6046	if (softc->quirks & DA_Q_RETRY_BUSY)
6047		sense_flags |= SF_RETRY_BUSY;
6048	return(cam_periph_error(ccb, cam_flags, sense_flags));
6049}
6050
6051static void
6052damediapoll(void *arg)
6053{
6054	struct cam_periph *periph = arg;
6055	struct da_softc *softc = periph->softc;
6056
6057	if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
6058	    (softc->flags & DA_FLAG_TUR_PENDING) == 0 &&
6059	    softc->state == DA_STATE_NORMAL &&
6060	    LIST_EMPTY(&softc->pending_ccbs)) {
6061		if (da_periph_acquire(periph, DA_REF_TUR) == 0) {
6062			cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
6063			daschedule(periph);
6064		}
6065	}
6066
6067	/* Queue us up again */
6068	if (da_poll_period != 0) {
6069		callout_schedule_sbt(&softc->mediapoll_c,
6070		    da_poll_period * SBT_1S, 0, C_PREL(1));
6071	}
6072}
6073
6074static void
6075daprevent(struct cam_periph *periph, int action)
6076{
6077	struct	da_softc *softc;
6078	union	ccb *ccb;
6079	int	error;
6080
6081	cam_periph_assert(periph, MA_OWNED);
6082	softc = (struct da_softc *)periph->softc;
6083
6084	if (((action == PR_ALLOW)
6085	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
6086	 || ((action == PR_PREVENT)
6087	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
6088		return;
6089	}
6090
6091	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
6092
6093	scsi_prevent(&ccb->csio,
6094		     /*retries*/1,
6095		     /*cbcfp*/NULL,
6096		     MSG_SIMPLE_Q_TAG,
6097		     action,
6098		     SSD_FULL_SIZE,
6099		     5000);
6100
6101	error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO,
6102	    SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat);
6103
6104	if (error == 0) {
6105		if (action == PR_ALLOW)
6106			softc->flags &= ~DA_FLAG_PACK_LOCKED;
6107		else
6108			softc->flags |= DA_FLAG_PACK_LOCKED;
6109	}
6110
6111	xpt_release_ccb(ccb);
6112}
6113
6114static void
6115dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
6116	  struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len)
6117{
6118	struct ccb_calc_geometry ccg;
6119	struct da_softc *softc;
6120	struct disk_params *dp;
6121	u_int lbppbe, lalba;
6122	int error;
6123
6124	softc = (struct da_softc *)periph->softc;
6125
6126	dp = &softc->params;
6127	dp->secsize = block_len;
6128	dp->sectors = maxsector + 1;
6129	if (rcaplong != NULL) {
6130		lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE;
6131		lalba = scsi_2btoul(rcaplong->lalba_lbp);
6132		lalba &= SRC16_LALBA_A;
6133		if (rcaplong->prot & SRC16_PROT_EN)
6134			softc->p_type = ((rcaplong->prot & SRC16_P_TYPE) >>
6135			    SRC16_P_TYPE_SHIFT) + 1;
6136		else
6137			softc->p_type = 0;
6138	} else {
6139		lbppbe = 0;
6140		lalba = 0;
6141		softc->p_type = 0;
6142	}
6143
6144	if (lbppbe > 0) {
6145		dp->stripesize = block_len << lbppbe;
6146		dp->stripeoffset = (dp->stripesize - block_len * lalba) %
6147		    dp->stripesize;
6148	} else if (softc->quirks & DA_Q_4K) {
6149		dp->stripesize = 4096;
6150		dp->stripeoffset = 0;
6151	} else if (softc->unmap_gran != 0) {
6152		dp->stripesize = block_len * softc->unmap_gran;
6153		dp->stripeoffset = (dp->stripesize - block_len *
6154		    softc->unmap_gran_align) % dp->stripesize;
6155	} else {
6156		dp->stripesize = 0;
6157		dp->stripeoffset = 0;
6158	}
6159	/*
6160	 * Have the controller provide us with a geometry
6161	 * for this disk.  The only time the geometry
6162	 * matters is when we boot and the controller
6163	 * is the only one knowledgeable enough to come
6164	 * up with something that will make this a bootable
6165	 * device.
6166	 */
6167	memset(&ccg, 0, sizeof(ccg));
6168	xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
6169	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
6170	ccg.block_size = dp->secsize;
6171	ccg.volume_size = dp->sectors;
6172	ccg.heads = 0;
6173	ccg.secs_per_track = 0;
6174	ccg.cylinders = 0;
6175	xpt_action((union ccb*)&ccg);
6176	if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6177		/*
6178		 * We don't know what went wrong here- but just pick
6179		 * a geometry so we don't have nasty things like divide
6180		 * by zero.
6181		 */
6182		dp->heads = 255;
6183		dp->secs_per_track = 255;
6184		dp->cylinders = dp->sectors / (255 * 255);
6185		if (dp->cylinders == 0) {
6186			dp->cylinders = 1;
6187		}
6188	} else {
6189		dp->heads = ccg.heads;
6190		dp->secs_per_track = ccg.secs_per_track;
6191		dp->cylinders = ccg.cylinders;
6192	}
6193
6194	/*
6195	 * If the user supplied a read capacity buffer, and if it is
6196	 * different than the previous buffer, update the data in the EDT.
6197	 * If it's the same, we don't bother.  This avoids sending an
6198	 * update every time someone opens this device.
6199	 */
6200	if ((rcaplong != NULL)
6201	 && (bcmp(rcaplong, &softc->rcaplong,
6202		  min(sizeof(softc->rcaplong), rcap_len)) != 0)) {
6203		struct ccb_dev_advinfo cdai;
6204
6205		memset(&cdai, 0, sizeof(cdai));
6206		xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
6207		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
6208		cdai.buftype = CDAI_TYPE_RCAPLONG;
6209		cdai.flags = CDAI_FLAG_STORE;
6210		cdai.bufsiz = rcap_len;
6211		cdai.buf = (uint8_t *)rcaplong;
6212		xpt_action((union ccb *)&cdai);
6213		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
6214			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
6215		if (cdai.ccb_h.status != CAM_REQ_CMP) {
6216			xpt_print(periph->path, "%s: failed to set read "
6217				  "capacity advinfo\n", __func__);
6218			/* Use cam_error_print() to decode the status */
6219			cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS,
6220					CAM_EPF_ALL);
6221		} else {
6222			bcopy(rcaplong, &softc->rcaplong,
6223			      min(sizeof(softc->rcaplong), rcap_len));
6224		}
6225	}
6226
6227	softc->disk->d_sectorsize = softc->params.secsize;
6228	softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
6229	softc->disk->d_stripesize = softc->params.stripesize;
6230	softc->disk->d_stripeoffset = softc->params.stripeoffset;
6231	/* XXX: these are not actually "firmware" values, so they may be wrong */
6232	softc->disk->d_fwsectors = softc->params.secs_per_track;
6233	softc->disk->d_fwheads = softc->params.heads;
6234	softc->disk->d_devstat->block_size = softc->params.secsize;
6235	softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
6236
6237	error = disk_resize(softc->disk, M_NOWAIT);
6238	if (error != 0)
6239		xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error);
6240}
6241
6242static void
6243dasendorderedtag(void *arg)
6244{
6245	struct cam_periph *periph = arg;
6246	struct da_softc *softc = periph->softc;
6247
6248	cam_periph_assert(periph, MA_OWNED);
6249	if (da_send_ordered) {
6250		if (!LIST_EMPTY(&softc->pending_ccbs)) {
6251			if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
6252				softc->flags |= DA_FLAG_NEED_OTAG;
6253			softc->flags &= ~DA_FLAG_WAS_OTAG;
6254		}
6255	}
6256
6257	/* Queue us up again */
6258	callout_schedule_sbt(&softc->sendordered_c,
6259	    SBT_1S / DA_ORDEREDTAG_INTERVAL * da_default_timeout, 0,
6260	    C_PREL(1));
6261}
6262
6263/*
6264 * Step through all DA peripheral drivers, and if the device is still open,
6265 * sync the disk cache to physical media.
6266 */
6267static void
6268dashutdown(void * arg, int howto)
6269{
6270	struct cam_periph *periph;
6271	struct da_softc *softc;
6272	union ccb *ccb;
6273	int error;
6274
6275	if ((howto & RB_NOSYNC) != 0)
6276		return;
6277
6278	CAM_PERIPH_FOREACH(periph, &dadriver) {
6279		softc = (struct da_softc *)periph->softc;
6280		if (SCHEDULER_STOPPED()) {
6281			/* If we paniced with the lock held, do not recurse. */
6282			if (!cam_periph_owned(periph) &&
6283			    (softc->flags & DA_FLAG_OPEN)) {
6284				dadump(softc->disk, NULL, 0, 0);
6285			}
6286			continue;
6287		}
6288		cam_periph_lock(periph);
6289
6290		/*
6291		 * We only sync the cache if the drive is still open, and
6292		 * if the drive is capable of it..
6293		 */
6294		if (((softc->flags & DA_FLAG_OPEN) == 0)
6295		 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
6296			cam_periph_unlock(periph);
6297			continue;
6298		}
6299
6300		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
6301		scsi_synchronize_cache(&ccb->csio,
6302				       /*retries*/0,
6303				       /*cbfcnp*/NULL,
6304				       MSG_SIMPLE_Q_TAG,
6305				       /*begin_lba*/0, /* whole disk */
6306				       /*lb_count*/0,
6307				       SSD_FULL_SIZE,
6308				       60 * 60 * 1000);
6309
6310		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
6311		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR,
6312		    softc->disk->d_devstat);
6313		if (error != 0)
6314			xpt_print(periph->path, "Synchronize cache failed\n");
6315		xpt_release_ccb(ccb);
6316		cam_periph_unlock(periph);
6317	}
6318}
6319
6320#else /* !_KERNEL */
6321
6322/*
6323 * XXX These are only left out of the kernel build to silence warnings.  If,
6324 * for some reason these functions are used in the kernel, the ifdefs should
6325 * be moved so they are included both in the kernel and userland.
6326 */
6327void
6328scsi_format_unit(struct ccb_scsiio *csio, uint32_t retries,
6329		 void (*cbfcnp)(struct cam_periph *, union ccb *),
6330		 uint8_t tag_action, uint8_t byte2, uint16_t ileave,
6331		 uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len,
6332		 uint32_t timeout)
6333{
6334	struct scsi_format_unit *scsi_cmd;
6335
6336	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
6337	scsi_cmd->opcode = FORMAT_UNIT;
6338	scsi_cmd->byte2 = byte2;
6339	scsi_ulto2b(ileave, scsi_cmd->interleave);
6340
6341	cam_fill_csio(csio,
6342		      retries,
6343		      cbfcnp,
6344		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6345		      tag_action,
6346		      data_ptr,
6347		      dxfer_len,
6348		      sense_len,
6349		      sizeof(*scsi_cmd),
6350		      timeout);
6351}
6352
6353void
6354scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
6355		  void (*cbfcnp)(struct cam_periph *, union ccb *),
6356		  uint8_t tag_action, uint8_t list_format,
6357		  uint32_t addr_desc_index, uint8_t *data_ptr,
6358		  uint32_t dxfer_len, int minimum_cmd_size,
6359		  uint8_t sense_len, uint32_t timeout)
6360{
6361	uint8_t cdb_len;
6362
6363	/*
6364	 * These conditions allow using the 10 byte command.  Otherwise we
6365	 * need to use the 12 byte command.
6366	 */
6367	if ((minimum_cmd_size <= 10)
6368	 && (addr_desc_index == 0)
6369	 && (dxfer_len <= SRDD10_MAX_LENGTH)) {
6370		struct scsi_read_defect_data_10 *cdb10;
6371
6372		cdb10 = (struct scsi_read_defect_data_10 *)
6373			&csio->cdb_io.cdb_bytes;
6374
6375		cdb_len = sizeof(*cdb10);
6376		bzero(cdb10, cdb_len);
6377                cdb10->opcode = READ_DEFECT_DATA_10;
6378                cdb10->format = list_format;
6379                scsi_ulto2b(dxfer_len, cdb10->alloc_length);
6380	} else {
6381		struct scsi_read_defect_data_12 *cdb12;
6382
6383		cdb12 = (struct scsi_read_defect_data_12 *)
6384			&csio->cdb_io.cdb_bytes;
6385
6386		cdb_len = sizeof(*cdb12);
6387		bzero(cdb12, cdb_len);
6388                cdb12->opcode = READ_DEFECT_DATA_12;
6389                cdb12->format = list_format;
6390                scsi_ulto4b(dxfer_len, cdb12->alloc_length);
6391		scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index);
6392	}
6393
6394	cam_fill_csio(csio,
6395		      retries,
6396		      cbfcnp,
6397		      /*flags*/ CAM_DIR_IN,
6398		      tag_action,
6399		      data_ptr,
6400		      dxfer_len,
6401		      sense_len,
6402		      cdb_len,
6403		      timeout);
6404}
6405
6406void
6407scsi_sanitize(struct ccb_scsiio *csio, uint32_t retries,
6408	      void (*cbfcnp)(struct cam_periph *, union ccb *),
6409	      uint8_t tag_action, uint8_t byte2, uint16_t control,
6410	      uint8_t *data_ptr, uint32_t dxfer_len, uint8_t sense_len,
6411	      uint32_t timeout)
6412{
6413	struct scsi_sanitize *scsi_cmd;
6414
6415	scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes;
6416	scsi_cmd->opcode = SANITIZE;
6417	scsi_cmd->byte2 = byte2;
6418	scsi_cmd->control = control;
6419	scsi_ulto2b(dxfer_len, scsi_cmd->length);
6420
6421	cam_fill_csio(csio,
6422		      retries,
6423		      cbfcnp,
6424		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6425		      tag_action,
6426		      data_ptr,
6427		      dxfer_len,
6428		      sense_len,
6429		      sizeof(*scsi_cmd),
6430		      timeout);
6431}
6432
6433#endif /* _KERNEL */
6434
6435void
6436scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
6437	     void (*cbfcnp)(struct cam_periph *, union ccb *),
6438	     uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
6439	     uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
6440	     uint8_t sense_len, uint32_t timeout)
6441{
6442	struct scsi_zbc_out *scsi_cmd;
6443
6444	scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
6445	scsi_cmd->opcode = ZBC_OUT;
6446	scsi_cmd->service_action = service_action;
6447	scsi_u64to8b(zone_id, scsi_cmd->zone_id);
6448	scsi_cmd->zone_flags = zone_flags;
6449
6450	cam_fill_csio(csio,
6451		      retries,
6452		      cbfcnp,
6453		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6454		      tag_action,
6455		      data_ptr,
6456		      dxfer_len,
6457		      sense_len,
6458		      sizeof(*scsi_cmd),
6459		      timeout);
6460}
6461
6462void
6463scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
6464	    void (*cbfcnp)(struct cam_periph *, union ccb *),
6465	    uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
6466	    uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
6467	    uint8_t sense_len, uint32_t timeout)
6468{
6469	struct scsi_zbc_in *scsi_cmd;
6470
6471	scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
6472	scsi_cmd->opcode = ZBC_IN;
6473	scsi_cmd->service_action = service_action;
6474	scsi_ulto4b(dxfer_len, scsi_cmd->length);
6475	scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba);
6476	scsi_cmd->zone_options = zone_options;
6477
6478	cam_fill_csio(csio,
6479		      retries,
6480		      cbfcnp,
6481		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
6482		      tag_action,
6483		      data_ptr,
6484		      dxfer_len,
6485		      sense_len,
6486		      sizeof(*scsi_cmd),
6487		      timeout);
6488
6489}
6490
6491int
6492scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
6493		      void (*cbfcnp)(struct cam_periph *, union ccb *),
6494		      uint8_t tag_action, int use_ncq,
6495		      uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6496		      uint8_t *data_ptr, uint32_t dxfer_len,
6497		      uint8_t *cdb_storage, size_t cdb_storage_len,
6498		      uint8_t sense_len, uint32_t timeout)
6499{
6500	uint8_t command_out, protocol, ata_flags;
6501	uint16_t features_out;
6502	uint32_t sectors_out, auxiliary;
6503	int retval;
6504
6505	retval = 0;
6506
6507	if (use_ncq == 0) {
6508		command_out = ATA_ZAC_MANAGEMENT_OUT;
6509		features_out = (zm_action & 0xf) | (zone_flags << 8);
6510		ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
6511		if (dxfer_len == 0) {
6512			protocol = AP_PROTO_NON_DATA;
6513			ata_flags |= AP_FLAG_TLEN_NO_DATA;
6514			sectors_out = 0;
6515		} else {
6516			protocol = AP_PROTO_DMA;
6517			ata_flags |= AP_FLAG_TLEN_SECT_CNT |
6518				     AP_FLAG_TDIR_TO_DEV;
6519			sectors_out = ((dxfer_len >> 9) & 0xffff);
6520		}
6521		auxiliary = 0;
6522	} else {
6523		ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
6524		if (dxfer_len == 0) {
6525			command_out = ATA_NCQ_NON_DATA;
6526			features_out = ATA_NCQ_ZAC_MGMT_OUT;
6527			/*
6528			 * We're assuming the SCSI to ATA translation layer
6529			 * will set the NCQ tag number in the tag field.
6530			 * That isn't clear from the SAT-4 spec (as of rev 05).
6531			 */
6532			sectors_out = 0;
6533			ata_flags |= AP_FLAG_TLEN_NO_DATA;
6534		} else {
6535			command_out = ATA_SEND_FPDMA_QUEUED;
6536			/*
6537			 * Note that we're defaulting to normal priority,
6538			 * and assuming that the SCSI to ATA translation
6539			 * layer will insert the NCQ tag number in the tag
6540			 * field.  That isn't clear in the SAT-4 spec (as
6541			 * of rev 05).
6542			 */
6543			sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
6544
6545			ata_flags |= AP_FLAG_TLEN_FEAT |
6546				     AP_FLAG_TDIR_TO_DEV;
6547
6548			/*
6549			 * For SEND FPDMA QUEUED, the transfer length is
6550			 * encoded in the FEATURE register, and 0 means
6551			 * that 65536 512 byte blocks are to be tranferred.
6552			 * In practice, it seems unlikely that we'll see
6553			 * a transfer that large, and it may confuse the
6554			 * the SAT layer, because generally that means that
6555			 * 0 bytes should be transferred.
6556			 */
6557			if (dxfer_len == (65536 * 512)) {
6558				features_out = 0;
6559			} else if (dxfer_len <= (65535 * 512)) {
6560				features_out = ((dxfer_len >> 9) & 0xffff);
6561			} else {
6562				/* The transfer is too big. */
6563				retval = 1;
6564				goto bailout;
6565			}
6566		}
6567
6568		auxiliary = (zm_action & 0xf) | (zone_flags << 8);
6569		protocol = AP_PROTO_FPDMA;
6570	}
6571
6572	protocol |= AP_EXTEND;
6573
6574	retval = scsi_ata_pass(csio,
6575	    retries,
6576	    cbfcnp,
6577	    /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6578	    tag_action,
6579	    /*protocol*/ protocol,
6580	    /*ata_flags*/ ata_flags,
6581	    /*features*/ features_out,
6582	    /*sector_count*/ sectors_out,
6583	    /*lba*/ zone_id,
6584	    /*command*/ command_out,
6585	    /*device*/ 0,
6586	    /*icc*/ 0,
6587	    /*auxiliary*/ auxiliary,
6588	    /*control*/ 0,
6589	    /*data_ptr*/ data_ptr,
6590	    /*dxfer_len*/ dxfer_len,
6591	    /*cdb_storage*/ cdb_storage,
6592	    /*cdb_storage_len*/ cdb_storage_len,
6593	    /*minimum_cmd_size*/ 0,
6594	    /*sense_len*/ SSD_FULL_SIZE,
6595	    /*timeout*/ timeout);
6596
6597bailout:
6598
6599	return (retval);
6600}
6601
6602int
6603scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
6604		     void (*cbfcnp)(struct cam_periph *, union ccb *),
6605		     uint8_t tag_action, int use_ncq,
6606		     uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6607		     uint8_t *data_ptr, uint32_t dxfer_len,
6608		     uint8_t *cdb_storage, size_t cdb_storage_len,
6609		     uint8_t sense_len, uint32_t timeout)
6610{
6611	uint8_t command_out, protocol;
6612	uint16_t features_out, sectors_out;
6613	uint32_t auxiliary;
6614	int ata_flags;
6615	int retval;
6616
6617	retval = 0;
6618	ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS;
6619
6620	if (use_ncq == 0) {
6621		command_out = ATA_ZAC_MANAGEMENT_IN;
6622		/* XXX KDM put a macro here */
6623		features_out = (zm_action & 0xf) | (zone_flags << 8);
6624		sectors_out = dxfer_len >> 9; /* XXX KDM macro */
6625		protocol = AP_PROTO_DMA;
6626		ata_flags |= AP_FLAG_TLEN_SECT_CNT;
6627		auxiliary = 0;
6628	} else {
6629		ata_flags |= AP_FLAG_TLEN_FEAT;
6630
6631		command_out = ATA_RECV_FPDMA_QUEUED;
6632		sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
6633
6634		/*
6635		 * For RECEIVE FPDMA QUEUED, the transfer length is
6636		 * encoded in the FEATURE register, and 0 means
6637		 * that 65536 512 byte blocks are to be tranferred.
6638		 * In practice, it seems unlikely that we'll see
6639		 * a transfer that large, and it may confuse the
6640		 * the SAT layer, because generally that means that
6641		 * 0 bytes should be transferred.
6642		 */
6643		if (dxfer_len == (65536 * 512)) {
6644			features_out = 0;
6645		} else if (dxfer_len <= (65535 * 512)) {
6646			features_out = ((dxfer_len >> 9) & 0xffff);
6647		} else {
6648			/* The transfer is too big. */
6649			retval = 1;
6650			goto bailout;
6651		}
6652		auxiliary = (zm_action & 0xf) | (zone_flags << 8),
6653		protocol = AP_PROTO_FPDMA;
6654	}
6655
6656	protocol |= AP_EXTEND;
6657
6658	retval = scsi_ata_pass(csio,
6659	    retries,
6660	    cbfcnp,
6661	    /*flags*/ CAM_DIR_IN,
6662	    tag_action,
6663	    /*protocol*/ protocol,
6664	    /*ata_flags*/ ata_flags,
6665	    /*features*/ features_out,
6666	    /*sector_count*/ sectors_out,
6667	    /*lba*/ zone_id,
6668	    /*command*/ command_out,
6669	    /*device*/ 0,
6670	    /*icc*/ 0,
6671	    /*auxiliary*/ auxiliary,
6672	    /*control*/ 0,
6673	    /*data_ptr*/ data_ptr,
6674	    /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
6675	    /*cdb_storage*/ cdb_storage,
6676	    /*cdb_storage_len*/ cdb_storage_len,
6677	    /*minimum_cmd_size*/ 0,
6678	    /*sense_len*/ SSD_FULL_SIZE,
6679	    /*timeout*/ timeout);
6680
6681bailout:
6682	return (retval);
6683}
6684