1/*-
2 * Implementation of SCSI Direct Access Peripheral driver for CAM.
3 *
4 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
5 *
6 * Copyright (c) 1997 Justin T. Gibbs.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions, and the following disclaimer,
14 *    without modification, immediately at the beginning of the file.
15 * 2. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include <sys/param.h>
35
36#ifdef _KERNEL
37#include "opt_da.h"
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/bio.h>
41#include <sys/sysctl.h>
42#include <sys/taskqueue.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/conf.h>
46#include <sys/devicestat.h>
47#include <sys/eventhandler.h>
48#include <sys/malloc.h>
49#include <sys/cons.h>
50#include <sys/endian.h>
51#include <sys/proc.h>
52#include <sys/sbuf.h>
53#include <geom/geom.h>
54#include <geom/geom_disk.h>
55#include <machine/atomic.h>
56#endif /* _KERNEL */
57
58#ifndef _KERNEL
59#include <stdio.h>
60#include <string.h>
61#endif /* _KERNEL */
62
63#include <cam/cam.h>
64#include <cam/cam_ccb.h>
65#include <cam/cam_periph.h>
66#include <cam/cam_xpt_periph.h>
67#ifdef _KERNEL
68#include <cam/cam_xpt_internal.h>
69#endif /* _KERNEL */
70#include <cam/cam_sim.h>
71#include <cam/cam_iosched.h>
72
73#include <cam/scsi/scsi_message.h>
74#include <cam/scsi/scsi_da.h>
75
76#ifdef _KERNEL
77/*
78 * Note that there are probe ordering dependencies here.  The order isn't
79 * controlled by this enumeration, but by explicit state transitions in
80 * dastart() and dadone().  Here are some of the dependencies:
81 *
82 * 1. RC should come first, before RC16, unless there is evidence that RC16
83 *    is supported.
84 * 2. BDC needs to come before any of the ATA probes, or the ZONE probe.
85 * 3. The ATA probes should go in this order:
86 *    ATA -> LOGDIR -> IDDIR -> SUP -> ATA_ZONE
87 */
88typedef enum {
89	DA_STATE_PROBE_WP,
90	DA_STATE_PROBE_RC,
91	DA_STATE_PROBE_RC16,
92	DA_STATE_PROBE_LBP,
93	DA_STATE_PROBE_BLK_LIMITS,
94	DA_STATE_PROBE_BDC,
95	DA_STATE_PROBE_ATA,
96	DA_STATE_PROBE_ATA_LOGDIR,
97	DA_STATE_PROBE_ATA_IDDIR,
98	DA_STATE_PROBE_ATA_SUP,
99	DA_STATE_PROBE_ATA_ZONE,
100	DA_STATE_PROBE_ZONE,
101	DA_STATE_NORMAL
102} da_state;
103
104typedef enum {
105	DA_FLAG_PACK_INVALID	= 0x000001,
106	DA_FLAG_NEW_PACK	= 0x000002,
107	DA_FLAG_PACK_LOCKED	= 0x000004,
108	DA_FLAG_PACK_REMOVABLE	= 0x000008,
109	DA_FLAG_ROTATING	= 0x000010,
110	DA_FLAG_NEED_OTAG	= 0x000020,
111	DA_FLAG_WAS_OTAG	= 0x000040,
112	DA_FLAG_RETRY_UA	= 0x000080,
113	DA_FLAG_OPEN		= 0x000100,
114	DA_FLAG_SCTX_INIT	= 0x000200,
115	DA_FLAG_CAN_RC16	= 0x000400,
116	DA_FLAG_PROBED		= 0x000800,
117	DA_FLAG_DIRTY		= 0x001000,
118	DA_FLAG_ANNOUNCED	= 0x002000,
119	DA_FLAG_CAN_ATA_DMA	= 0x004000,
120	DA_FLAG_CAN_ATA_LOG	= 0x008000,
121	DA_FLAG_CAN_ATA_IDLOG	= 0x010000,
122	DA_FLAG_CAN_ATA_SUPCAP	= 0x020000,
123	DA_FLAG_CAN_ATA_ZONE	= 0x040000,
124	DA_FLAG_TUR_PENDING	= 0x080000,
125	DA_FLAG_UNMAPPEDIO	= 0x100000
126} da_flags;
127#define DA_FLAG_STRING		\
128	"\020"			\
129	"\001PACK_INVALID"	\
130	"\002NEW_PACK"		\
131	"\003PACK_LOCKED"	\
132	"\004PACK_REMOVABLE"	\
133	"\005ROTATING"		\
134	"\006NEED_OTAG"		\
135	"\007WAS_OTAG"		\
136	"\010RETRY_UA"		\
137	"\011OPEN"		\
138	"\012SCTX_INIT"		\
139	"\013CAN_RC16"		\
140	"\014PROBED"		\
141	"\015DIRTY"		\
142	"\016ANNOUCNED"		\
143	"\017CAN_ATA_DMA"	\
144	"\020CAN_ATA_LOG"	\
145	"\021CAN_ATA_IDLOG"	\
146	"\022CAN_ATA_SUPACP"	\
147	"\023CAN_ATA_ZONE"	\
148	"\024TUR_PENDING"	\
149	"\025UNMAPPEDIO"
150
151typedef enum {
152	DA_Q_NONE		= 0x00,
153	DA_Q_NO_SYNC_CACHE	= 0x01,
154	DA_Q_NO_6_BYTE		= 0x02,
155	DA_Q_NO_PREVENT		= 0x04,
156	DA_Q_4K			= 0x08,
157	DA_Q_NO_RC16		= 0x10,
158	DA_Q_NO_UNMAP		= 0x20,
159	DA_Q_RETRY_BUSY		= 0x40,
160	DA_Q_SMR_DM		= 0x80,
161	DA_Q_STRICT_UNMAP	= 0x100,
162	DA_Q_128KB		= 0x200
163} da_quirks;
164
165#define DA_Q_BIT_STRING		\
166	"\020"			\
167	"\001NO_SYNC_CACHE"	\
168	"\002NO_6_BYTE"		\
169	"\003NO_PREVENT"	\
170	"\0044K"		\
171	"\005NO_RC16"		\
172	"\006NO_UNMAP"		\
173	"\007RETRY_BUSY"	\
174	"\010SMR_DM"		\
175	"\011STRICT_UNMAP"	\
176	"\012128KB"
177
178typedef enum {
179	DA_CCB_PROBE_RC		= 0x01,
180	DA_CCB_PROBE_RC16	= 0x02,
181	DA_CCB_PROBE_LBP	= 0x03,
182	DA_CCB_PROBE_BLK_LIMITS	= 0x04,
183	DA_CCB_PROBE_BDC	= 0x05,
184	DA_CCB_PROBE_ATA	= 0x06,
185	DA_CCB_BUFFER_IO	= 0x07,
186	DA_CCB_DUMP		= 0x0A,
187	DA_CCB_DELETE		= 0x0B,
188	DA_CCB_TUR		= 0x0C,
189	DA_CCB_PROBE_ZONE	= 0x0D,
190	DA_CCB_PROBE_ATA_LOGDIR	= 0x0E,
191	DA_CCB_PROBE_ATA_IDDIR	= 0x0F,
192	DA_CCB_PROBE_ATA_SUP	= 0x10,
193	DA_CCB_PROBE_ATA_ZONE	= 0x11,
194	DA_CCB_PROBE_WP		= 0x12,
195	DA_CCB_TYPE_MASK	= 0x1F,
196	DA_CCB_RETRY_UA		= 0x20
197} da_ccb_state;
198
199/*
200 * Order here is important for method choice
201 *
202 * We prefer ATA_TRIM as tests run against a Sandforce 2281 SSD attached to
203 * LSI 2008 (mps) controller (FW: v12, Drv: v14) resulted 20% quicker deletes
204 * using ATA_TRIM than the corresponding UNMAP results for a real world mysql
205 * import taking 5mins.
206 *
207 */
208typedef enum {
209	DA_DELETE_NONE,
210	DA_DELETE_DISABLE,
211	DA_DELETE_ATA_TRIM,
212	DA_DELETE_UNMAP,
213	DA_DELETE_WS16,
214	DA_DELETE_WS10,
215	DA_DELETE_ZERO,
216	DA_DELETE_MIN = DA_DELETE_ATA_TRIM,
217	DA_DELETE_MAX = DA_DELETE_ZERO
218} da_delete_methods;
219
220/*
221 * For SCSI, host managed drives show up as a separate device type.  For
222 * ATA, host managed drives also have a different device signature.
223 * XXX KDM figure out the ATA host managed signature.
224 */
225typedef enum {
226	DA_ZONE_NONE		= 0x00,
227	DA_ZONE_DRIVE_MANAGED	= 0x01,
228	DA_ZONE_HOST_AWARE	= 0x02,
229	DA_ZONE_HOST_MANAGED	= 0x03
230} da_zone_mode;
231
232/*
233 * We distinguish between these interface cases in addition to the drive type:
234 * o ATA drive behind a SCSI translation layer that knows about ZBC/ZAC
235 * o ATA drive behind a SCSI translation layer that does not know about
236 *   ZBC/ZAC, and so needs to be managed via ATA passthrough.  In this
237 *   case, we would need to share the ATA code with the ada(4) driver.
238 * o SCSI drive.
239 */
240typedef enum {
241	DA_ZONE_IF_SCSI,
242	DA_ZONE_IF_ATA_PASS,
243	DA_ZONE_IF_ATA_SAT,
244} da_zone_interface;
245
246typedef enum {
247	DA_ZONE_FLAG_RZ_SUP		= 0x0001,
248	DA_ZONE_FLAG_OPEN_SUP		= 0x0002,
249	DA_ZONE_FLAG_CLOSE_SUP		= 0x0004,
250	DA_ZONE_FLAG_FINISH_SUP		= 0x0008,
251	DA_ZONE_FLAG_RWP_SUP		= 0x0010,
252	DA_ZONE_FLAG_SUP_MASK		= (DA_ZONE_FLAG_RZ_SUP |
253					   DA_ZONE_FLAG_OPEN_SUP |
254					   DA_ZONE_FLAG_CLOSE_SUP |
255					   DA_ZONE_FLAG_FINISH_SUP |
256					   DA_ZONE_FLAG_RWP_SUP),
257	DA_ZONE_FLAG_URSWRZ		= 0x0020,
258	DA_ZONE_FLAG_OPT_SEQ_SET	= 0x0040,
259	DA_ZONE_FLAG_OPT_NONSEQ_SET	= 0x0080,
260	DA_ZONE_FLAG_MAX_SEQ_SET	= 0x0100,
261	DA_ZONE_FLAG_SET_MASK		= (DA_ZONE_FLAG_OPT_SEQ_SET |
262					   DA_ZONE_FLAG_OPT_NONSEQ_SET |
263					   DA_ZONE_FLAG_MAX_SEQ_SET)
264} da_zone_flags;
265
266static struct da_zone_desc {
267	da_zone_flags value;
268	const char *desc;
269} da_zone_desc_table[] = {
270	{DA_ZONE_FLAG_RZ_SUP, "Report Zones" },
271	{DA_ZONE_FLAG_OPEN_SUP, "Open" },
272	{DA_ZONE_FLAG_CLOSE_SUP, "Close" },
273	{DA_ZONE_FLAG_FINISH_SUP, "Finish" },
274	{DA_ZONE_FLAG_RWP_SUP, "Reset Write Pointer" },
275};
276
277typedef void da_delete_func_t (struct cam_periph *periph, union ccb *ccb,
278			      struct bio *bp);
279static da_delete_func_t da_delete_trim;
280static da_delete_func_t da_delete_unmap;
281static da_delete_func_t da_delete_ws;
282
283static const void * da_delete_functions[] = {
284	NULL,
285	NULL,
286	da_delete_trim,
287	da_delete_unmap,
288	da_delete_ws,
289	da_delete_ws,
290	da_delete_ws
291};
292
293static const char *da_delete_method_names[] =
294    { "NONE", "DISABLE", "ATA_TRIM", "UNMAP", "WS16", "WS10", "ZERO" };
295static const char *da_delete_method_desc[] =
296    { "NONE", "DISABLED", "ATA TRIM", "UNMAP", "WRITE SAME(16) with UNMAP",
297      "WRITE SAME(10) with UNMAP", "ZERO" };
298
299/* Offsets into our private area for storing information */
300#define ccb_state	ppriv_field0
301#define ccb_bp		ppriv_ptr1
302
303struct disk_params {
304	u_int8_t  heads;
305	u_int32_t cylinders;
306	u_int8_t  secs_per_track;
307	u_int32_t secsize;	/* Number of bytes/sector */
308	u_int64_t sectors;	/* total number sectors */
309	u_int     stripesize;
310	u_int     stripeoffset;
311};
312
313#define UNMAP_RANGE_MAX		0xffffffff
314#define UNMAP_HEAD_SIZE		8
315#define UNMAP_RANGE_SIZE	16
316#define UNMAP_MAX_RANGES	2048 /* Protocol Max is 4095 */
317#define UNMAP_BUF_SIZE		((UNMAP_MAX_RANGES * UNMAP_RANGE_SIZE) + \
318				UNMAP_HEAD_SIZE)
319
320#define WS10_MAX_BLKS		0xffff
321#define WS16_MAX_BLKS		0xffffffff
322#define ATA_TRIM_MAX_RANGES	((UNMAP_BUF_SIZE / \
323	(ATA_DSM_RANGE_SIZE * ATA_DSM_BLK_SIZE)) * ATA_DSM_BLK_SIZE)
324
325#define DA_WORK_TUR		(1 << 16)
326
327typedef enum {
328	DA_REF_OPEN = 1,
329	DA_REF_OPEN_HOLD,
330	DA_REF_CLOSE_HOLD,
331	DA_REF_PROBE_HOLD,
332	DA_REF_TUR,
333	DA_REF_GEOM,
334	DA_REF_SYSCTL,
335	DA_REF_REPROBE,
336	DA_REF_MAX		/* KEEP LAST */
337} da_ref_token;
338
339struct da_softc {
340	struct   cam_iosched_softc *cam_iosched;
341	struct	 bio_queue_head delete_run_queue;
342	LIST_HEAD(, ccb_hdr) pending_ccbs;
343	int	 refcount;		/* Active xpt_action() calls */
344	da_state state;
345	da_flags flags;
346	da_quirks quirks;
347	int	 minimum_cmd_size;
348	int	 error_inject;
349	int	 trim_max_ranges;
350	int	 delete_available;	/* Delete methods possibly available */
351	da_zone_mode			zone_mode;
352	da_zone_interface		zone_interface;
353	da_zone_flags			zone_flags;
354	struct ata_gp_log_dir		ata_logdir;
355	int				valid_logdir_len;
356	struct ata_identify_log_pages	ata_iddir;
357	int				valid_iddir_len;
358	uint64_t			optimal_seq_zones;
359	uint64_t			optimal_nonseq_zones;
360	uint64_t			max_seq_zones;
361	u_int			maxio;
362	uint32_t		unmap_max_ranges;
363	uint32_t		unmap_max_lba; /* Max LBAs in UNMAP req */
364	uint32_t		unmap_gran;
365	uint32_t		unmap_gran_align;
366	uint64_t		ws_max_blks;
367	uint64_t		trim_count;
368	uint64_t		trim_ranges;
369	uint64_t		trim_lbas;
370	da_delete_methods	delete_method_pref;
371	da_delete_methods	delete_method;
372	da_delete_func_t	*delete_func;
373	int			p_type;
374	struct	 disk_params params;
375	struct	 disk *disk;
376	union	 ccb saved_ccb;
377	struct task		sysctl_task;
378	struct sysctl_ctx_list	sysctl_ctx;
379	struct sysctl_oid	*sysctl_tree;
380	struct callout		sendordered_c;
381	uint64_t wwpn;
382	uint8_t	 unmap_buf[UNMAP_BUF_SIZE];
383	struct scsi_read_capacity_data_long rcaplong;
384	struct callout		mediapoll_c;
385	int			ref_flags[DA_REF_MAX];
386#ifdef CAM_IO_STATS
387	struct sysctl_ctx_list	sysctl_stats_ctx;
388	struct sysctl_oid	*sysctl_stats_tree;
389	u_int	errors;
390	u_int	timeouts;
391	u_int	invalidations;
392#endif
393#define DA_ANNOUNCETMP_SZ 160
394	char			announce_temp[DA_ANNOUNCETMP_SZ];
395#define DA_ANNOUNCE_SZ 400
396	char			announcebuf[DA_ANNOUNCE_SZ];
397};
398
399#define dadeleteflag(softc, delete_method, enable)			\
400	if (enable) {							\
401		softc->delete_available |= (1 << delete_method);	\
402	} else {							\
403		softc->delete_available &= ~(1 << delete_method);	\
404	}
405
406struct da_quirk_entry {
407	struct scsi_inquiry_pattern inq_pat;
408	da_quirks quirks;
409};
410
411static const char quantum[] = "QUANTUM";
412static const char microp[] = "MICROP";
413
414static struct da_quirk_entry da_quirk_table[] =
415{
416	/* SPI, FC devices */
417	{
418		/*
419		 * Fujitsu M2513A MO drives.
420		 * Tested devices: M2513A2 firmware versions 1200 & 1300.
421		 * (dip switch selects whether T_DIRECT or T_OPTICAL device)
422		 * Reported by: W.Scholten <whs@xs4all.nl>
423		 */
424		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
425		/*quirks*/ DA_Q_NO_SYNC_CACHE
426	},
427	{
428		/* See above. */
429		{T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"},
430		/*quirks*/ DA_Q_NO_SYNC_CACHE
431	},
432	{
433		/*
434		 * This particular Fujitsu drive doesn't like the
435		 * synchronize cache command.
436		 * Reported by: Tom Jackson <toj@gorilla.net>
437		 */
438		{T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"},
439		/*quirks*/ DA_Q_NO_SYNC_CACHE
440	},
441	{
442		/*
443		 * This drive doesn't like the synchronize cache command
444		 * either.  Reported by: Matthew Jacob <mjacob@feral.com>
445		 * in NetBSD PR kern/6027, August 24, 1998.
446		 */
447		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"},
448		/*quirks*/ DA_Q_NO_SYNC_CACHE
449	},
450	{
451		/*
452		 * This drive doesn't like the synchronize cache command
453		 * either.  Reported by: Hellmuth Michaelis (hm@kts.org)
454		 * (PR 8882).
455		 */
456		{T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"},
457		/*quirks*/ DA_Q_NO_SYNC_CACHE
458	},
459	{
460		/*
461		 * Doesn't like the synchronize cache command.
462		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
463		 */
464		{T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"},
465		/*quirks*/ DA_Q_NO_SYNC_CACHE
466	},
467	{
468		/*
469		 * Doesn't like the synchronize cache command.
470		 * Reported by: Blaz Zupan <blaz@gold.amis.net>
471		 */
472		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"},
473		/*quirks*/ DA_Q_NO_SYNC_CACHE
474	},
475	{
476		/*
477		 * Doesn't like the synchronize cache command.
478		 */
479		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"},
480		/*quirks*/ DA_Q_NO_SYNC_CACHE
481	},
482	{
483		/*
484		 * Doesn't like the synchronize cache command.
485		 * Reported by: walter@pelissero.de
486		 */
487		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"},
488		/*quirks*/ DA_Q_NO_SYNC_CACHE
489	},
490	{
491		/*
492		 * Doesn't work correctly with 6 byte reads/writes.
493		 * Returns illegal request, and points to byte 9 of the
494		 * 6-byte CDB.
495		 * Reported by:  Adam McDougall <bsdx@spawnet.com>
496		 */
497		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"},
498		/*quirks*/ DA_Q_NO_6_BYTE
499	},
500	{
501		/* See above. */
502		{T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"},
503		/*quirks*/ DA_Q_NO_6_BYTE
504	},
505	{
506		/*
507		 * Doesn't like the synchronize cache command.
508		 * Reported by: walter@pelissero.de
509		 */
510		{T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"},
511		/*quirks*/ DA_Q_NO_SYNC_CACHE
512	},
513	{
514		/*
515		 * The CISS RAID controllers do not support SYNC_CACHE
516		 */
517		{T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"},
518		/*quirks*/ DA_Q_NO_SYNC_CACHE
519	},
520	{
521		/*
522		 * The STEC SSDs sometimes hang on UNMAP.
523		 */
524		{T_DIRECT, SIP_MEDIA_FIXED, "STEC", "*", "*"},
525		/*quirks*/ DA_Q_NO_UNMAP
526	},
527	{
528		/*
529		 * VMware returns BUSY status when storage has transient
530		 * connectivity problems, so better wait.
531		 * Also VMware returns odd errors on misaligned UNMAPs.
532		 */
533		{T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*"},
534		/*quirks*/ DA_Q_RETRY_BUSY | DA_Q_STRICT_UNMAP
535	},
536	/* USB mass storage devices supported by umass(4) */
537	{
538		/*
539		 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player
540		 * PR: kern/51675
541		 */
542		{T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"},
543		/*quirks*/ DA_Q_NO_SYNC_CACHE
544	},
545	{
546		/*
547		 * Power Quotient Int. (PQI) USB flash key
548		 * PR: kern/53067
549		 */
550		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*",
551		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
552	},
553	{
554		/*
555		 * Creative Nomad MUVO mp3 player (USB)
556		 * PR: kern/53094
557		 */
558		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"},
559		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
560	},
561	{
562		/*
563		 * Jungsoft NEXDISK USB flash key
564		 * PR: kern/54737
565		 */
566		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"},
567		/*quirks*/ DA_Q_NO_SYNC_CACHE
568	},
569	{
570		/*
571		 * FreeDik USB Mini Data Drive
572		 * PR: kern/54786
573		 */
574		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive",
575		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
576	},
577	{
578		/*
579		 * Sigmatel USB Flash MP3 Player
580		 * PR: kern/57046
581		 */
582		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"},
583		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
584	},
585	{
586		/*
587		 * Neuros USB Digital Audio Computer
588		 * PR: kern/63645
589		 */
590		{T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.",
591		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
592	},
593	{
594		/*
595		 * SEAGRAND NP-900 MP3 Player
596		 * PR: kern/64563
597		 */
598		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"},
599		/*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
600	},
601	{
602		/*
603		 * iRiver iFP MP3 player (with UMS Firmware)
604		 * PR: kern/54881, i386/63941, kern/66124
605		 */
606		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"},
607		/*quirks*/ DA_Q_NO_SYNC_CACHE
608	},
609	{
610		/*
611		 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01
612		 * PR: kern/70158
613		 */
614		{T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"},
615		/*quirks*/ DA_Q_NO_SYNC_CACHE
616	},
617	{
618		/*
619		 * ZICPlay USB MP3 Player with FM
620		 * PR: kern/75057
621		 */
622		{T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"},
623		/*quirks*/ DA_Q_NO_SYNC_CACHE
624	},
625	{
626		/*
627		 * TEAC USB floppy mechanisms
628		 */
629		{T_DIRECT, SIP_MEDIA_REMOVABLE, "TEAC" , "FD-05*", "*"},
630		/*quirks*/ DA_Q_NO_SYNC_CACHE
631	},
632	{
633		/*
634		 * Kingston DataTraveler II+ USB Pen-Drive.
635		 * Reported by: Pawel Jakub Dawidek <pjd@FreeBSD.org>
636		 */
637		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston" , "DataTraveler II+",
638		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
639	},
640	{
641		/*
642		 * USB DISK Pro PMAP
643		 * Reported by: jhs
644		 * PR: usb/96381
645		 */
646		{T_DIRECT, SIP_MEDIA_REMOVABLE, " ", "USB DISK Pro", "PMAP"},
647		/*quirks*/ DA_Q_NO_SYNC_CACHE
648	},
649	{
650		/*
651		 * Motorola E398 Mobile Phone (TransFlash memory card).
652		 * Reported by: Wojciech A. Koszek <dunstan@FreeBSD.czest.pl>
653		 * PR: usb/89889
654		 */
655		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Motorola" , "Motorola Phone",
656		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
657	},
658	{
659		/*
660		 * Qware BeatZkey! Pro
661		 * PR: usb/79164
662		 */
663		{T_DIRECT, SIP_MEDIA_REMOVABLE, "GENERIC", "USB DISK DEVICE",
664		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
665	},
666	{
667		/*
668		 * Time DPA20B 1GB MP3 Player
669		 * PR: usb/81846
670		 */
671		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB2.0*", "(FS) FLASH DISK*",
672		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
673	},
674	{
675		/*
676		 * Samsung USB key 128Mb
677		 * PR: usb/90081
678		 */
679		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB-DISK", "FreeDik-FlashUsb",
680		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
681	},
682	{
683		/*
684		 * Kingston DataTraveler 2.0 USB Flash memory.
685		 * PR: usb/89196
686		 */
687		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler 2.0",
688		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
689	},
690	{
691		/*
692		 * Creative MUVO Slim mp3 player (USB)
693		 * PR: usb/86131
694		 */
695		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "MuVo Slim",
696		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT
697		},
698	{
699		/*
700		 * United MP5512 Portable MP3 Player (2-in-1 USB DISK/MP3)
701		 * PR: usb/80487
702		 */
703		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "MUSIC DISK",
704		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
705	},
706	{
707		/*
708		 * SanDisk Micro Cruzer 128MB
709		 * PR: usb/75970
710		 */
711		{T_DIRECT, SIP_MEDIA_REMOVABLE, "SanDisk" , "Micro Cruzer",
712		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
713	},
714	{
715		/*
716		 * TOSHIBA TransMemory USB sticks
717		 * PR: kern/94660
718		 */
719		{T_DIRECT, SIP_MEDIA_REMOVABLE, "TOSHIBA", "TransMemory",
720		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
721	},
722	{
723		/*
724		 * PNY USB 3.0 Flash Drives
725		*/
726		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PNY", "USB 3.0 FD*",
727		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_RC16
728	},
729	{
730		/*
731		 * PNY USB Flash keys
732		 * PR: usb/75578, usb/72344, usb/65436
733		 */
734		{T_DIRECT, SIP_MEDIA_REMOVABLE, "*" , "USB DISK*",
735		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
736	},
737	{
738		/*
739		 * Genesys GL3224
740		 */
741		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
742		"120?"}, /*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_4K | DA_Q_NO_RC16
743	},
744	{
745		/*
746		 * Genesys 6-in-1 Card Reader
747		 * PR: usb/94647
748		 */
749		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "STORAGE DEVICE*",
750		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
751	},
752	{
753		/*
754		 * Rekam Digital CAMERA
755		 * PR: usb/98713
756		 */
757		{T_DIRECT, SIP_MEDIA_REMOVABLE, "CAMERA*", "4MP-9J6*",
758		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
759	},
760	{
761		/*
762		 * iRiver H10 MP3 player
763		 * PR: usb/102547
764		 */
765		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "H10*",
766		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
767	},
768	{
769		/*
770		 * iRiver U10 MP3 player
771		 * PR: usb/92306
772		 */
773		{T_DIRECT, SIP_MEDIA_REMOVABLE, "iriver", "U10*",
774		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
775	},
776	{
777		/*
778		 * X-Micro Flash Disk
779		 * PR: usb/96901
780		 */
781		{T_DIRECT, SIP_MEDIA_REMOVABLE, "X-Micro", "Flash Disk",
782		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
783	},
784	{
785		/*
786		 * EasyMP3 EM732X USB 2.0 Flash MP3 Player
787		 * PR: usb/96546
788		 */
789		{T_DIRECT, SIP_MEDIA_REMOVABLE, "EM732X", "MP3 Player*",
790		"1.00"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
791	},
792	{
793		/*
794		 * Denver MP3 player
795		 * PR: usb/107101
796		 */
797		{T_DIRECT, SIP_MEDIA_REMOVABLE, "DENVER", "MP3 PLAYER",
798		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
799	},
800	{
801		/*
802		 * Philips USB Key Audio KEY013
803		 * PR: usb/68412
804		 */
805		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PHILIPS", "Key*", "*"},
806		/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
807	},
808	{
809		/*
810		 * JNC MP3 Player
811		 * PR: usb/94439
812		 */
813		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JNC*" , "MP3 Player*",
814		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
815	},
816	{
817		/*
818		 * SAMSUNG MP0402H
819		 * PR: usb/108427
820		 */
821		{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MP0402H", "*"},
822		/*quirks*/ DA_Q_NO_SYNC_CACHE
823	},
824	{
825		/*
826		 * I/O Magic USB flash - Giga Bank
827		 * PR: usb/108810
828		 */
829		{T_DIRECT, SIP_MEDIA_FIXED, "GS-Magic", "stor*", "*"},
830		/*quirks*/ DA_Q_NO_SYNC_CACHE
831	},
832	{
833		/*
834		 * JoyFly 128mb USB Flash Drive
835		 * PR: 96133
836		 */
837		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "Flash Disk*",
838		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
839	},
840	{
841		/*
842		 * ChipsBnk usb stick
843		 * PR: 103702
844		 */
845		{T_DIRECT, SIP_MEDIA_REMOVABLE, "ChipsBnk", "USB*",
846		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
847	},
848	{
849		/*
850		 * Storcase (Kingston) InfoStation IFS FC2/SATA-R 201A
851		 * PR: 129858
852		 */
853		{T_DIRECT, SIP_MEDIA_FIXED, "IFS", "FC2/SATA-R*",
854		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
855	},
856	{
857		/*
858		 * Samsung YP-U3 mp3-player
859		 * PR: 125398
860		 */
861		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Samsung", "YP-U3",
862		 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
863	},
864	{
865		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Netac", "OnlyDisk*",
866		 "2000"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
867	},
868	{
869		/*
870		 * Sony Cyber-Shot DSC cameras
871		 * PR: usb/137035
872		 */
873		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"},
874		/*quirks*/ DA_Q_NO_SYNC_CACHE | DA_Q_NO_PREVENT
875	},
876	{
877		{T_DIRECT, SIP_MEDIA_REMOVABLE, "Kingston", "DataTraveler G3",
878		 "1.00"}, /*quirks*/ DA_Q_NO_PREVENT
879	},
880	{
881		/* At least several Transcent USB sticks lie on RC16. */
882		{T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
883		 "*"}, /*quirks*/ DA_Q_NO_RC16
884	},
885	{
886		/*
887		 * I-O Data USB Flash Disk
888		 * PR: usb/211716
889		 */
890		{T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
891		 "*"}, /*quirks*/ DA_Q_NO_RC16
892	},
893	{
894		/*
895		 * SLC CHIPFANCIER USB drives
896		 * PR: usb/234503 (RC10 right, RC16 wrong)
897		 * 16GB, 32GB and 128GB confirmed to have same issue
898		 */
899		{T_DIRECT, SIP_MEDIA_REMOVABLE, "*SLC", "CHIPFANCIER",
900		 "*"}, /*quirks*/ DA_Q_NO_RC16
901       },
902	/* ATA/SATA devices over SAS/USB/... */
903	{
904		/* Sandisk X400 */
905		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SanDisk SD8SB8U1*", "*" },
906		/*quirks*/DA_Q_128KB
907	},
908	{
909		/* Hitachi Advanced Format (4k) drives */
910		{ T_DIRECT, SIP_MEDIA_FIXED, "Hitachi", "H??????????E3*", "*" },
911		/*quirks*/DA_Q_4K
912	},
913	{
914		/* Micron Advanced Format (4k) drives */
915		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Micron 5100 MTFDDAK*", "*" },
916		/*quirks*/DA_Q_4K
917	},
918	{
919		/* Samsung Advanced Format (4k) drives */
920		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD155UI*", "*" },
921		/*quirks*/DA_Q_4K
922	},
923	{
924		/* Samsung Advanced Format (4k) drives */
925		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD155UI*", "*" },
926		/*quirks*/DA_Q_4K
927	},
928	{
929		/* Samsung Advanced Format (4k) drives */
930		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG HD204UI*", "*" },
931		/*quirks*/DA_Q_4K
932	},
933	{
934		/* Samsung Advanced Format (4k) drives */
935		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HD204UI*", "*" },
936		/*quirks*/DA_Q_4K
937	},
938	{
939		/* Seagate Barracuda Green Advanced Format (4k) drives */
940		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DL*", "*" },
941		/*quirks*/DA_Q_4K
942	},
943	{
944		/* Seagate Barracuda Green Advanced Format (4k) drives */
945		{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DL", "*", "*" },
946		/*quirks*/DA_Q_4K
947	},
948	{
949		/* Seagate Barracuda Green Advanced Format (4k) drives */
950		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???DM*", "*" },
951		/*quirks*/DA_Q_4K
952	},
953	{
954		/* Seagate Barracuda Green Advanced Format (4k) drives */
955		{ T_DIRECT, SIP_MEDIA_FIXED, "ST???DM*", "*", "*" },
956		/*quirks*/DA_Q_4K
957	},
958	{
959		/* Seagate Barracuda Green Advanced Format (4k) drives */
960		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST????DM*", "*" },
961		/*quirks*/DA_Q_4K
962	},
963	{
964		/* Seagate Barracuda Green Advanced Format (4k) drives */
965		{ T_DIRECT, SIP_MEDIA_FIXED, "ST????DM", "*", "*" },
966		/*quirks*/DA_Q_4K
967	},
968	{
969		/* Seagate Momentus Advanced Format (4k) drives */
970		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500423AS*", "*" },
971		/*quirks*/DA_Q_4K
972	},
973	{
974		/* Seagate Momentus Advanced Format (4k) drives */
975		{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "3AS*", "*" },
976		/*quirks*/DA_Q_4K
977	},
978	{
979		/* Seagate Momentus Advanced Format (4k) drives */
980		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9500424AS*", "*" },
981		/*quirks*/DA_Q_4K
982	},
983	{
984		/* Seagate Momentus Advanced Format (4k) drives */
985		{ T_DIRECT, SIP_MEDIA_FIXED, "ST950042", "4AS*", "*" },
986		/*quirks*/DA_Q_4K
987	},
988	{
989		/* Seagate Momentus Advanced Format (4k) drives */
990		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640423AS*", "*" },
991		/*quirks*/DA_Q_4K
992	},
993	{
994		/* Seagate Momentus Advanced Format (4k) drives */
995		{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "3AS*", "*" },
996		/*quirks*/DA_Q_4K
997	},
998	{
999		/* Seagate Momentus Advanced Format (4k) drives */
1000		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9640424AS*", "*" },
1001		/*quirks*/DA_Q_4K
1002	},
1003	{
1004		/* Seagate Momentus Advanced Format (4k) drives */
1005		{ T_DIRECT, SIP_MEDIA_FIXED, "ST964042", "4AS*", "*" },
1006		/*quirks*/DA_Q_4K
1007	},
1008	{
1009		/* Seagate Momentus Advanced Format (4k) drives */
1010		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750420AS*", "*" },
1011		/*quirks*/DA_Q_4K
1012	},
1013	{
1014		/* Seagate Momentus Advanced Format (4k) drives */
1015		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "0AS*", "*" },
1016		/*quirks*/DA_Q_4K
1017	},
1018	{
1019		/* Seagate Momentus Advanced Format (4k) drives */
1020		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750422AS*", "*" },
1021		/*quirks*/DA_Q_4K
1022	},
1023	{
1024		/* Seagate Momentus Advanced Format (4k) drives */
1025		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "2AS*", "*" },
1026		/*quirks*/DA_Q_4K
1027	},
1028	{
1029		/* Seagate Momentus Advanced Format (4k) drives */
1030		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST9750423AS*", "*" },
1031		/*quirks*/DA_Q_4K
1032	},
1033	{
1034		/* Seagate Momentus Advanced Format (4k) drives */
1035		{ T_DIRECT, SIP_MEDIA_FIXED, "ST975042", "3AS*", "*" },
1036		/*quirks*/DA_Q_4K
1037	},
1038	{
1039		/* Seagate Momentus Thin Advanced Format (4k) drives */
1040		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST???LT*", "*" },
1041		/*quirks*/DA_Q_4K
1042	},
1043	{
1044		/* Seagate Momentus Thin Advanced Format (4k) drives */
1045		{ T_DIRECT, SIP_MEDIA_FIXED, "ST???LT*", "*", "*" },
1046		/*quirks*/DA_Q_4K
1047	},
1048	{
1049		/* WDC Caviar Green Advanced Format (4k) drives */
1050		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RS*", "*" },
1051		/*quirks*/DA_Q_4K
1052	},
1053	{
1054		/* WDC Caviar Green Advanced Format (4k) drives */
1055		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RS*", "*" },
1056		/*quirks*/DA_Q_4K
1057	},
1058	{
1059		/* WDC Caviar Green Advanced Format (4k) drives */
1060		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD????RX*", "*" },
1061		/*quirks*/DA_Q_4K
1062	},
1063	{
1064		/* WDC Caviar Green Advanced Format (4k) drives */
1065		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "??RX*", "*" },
1066		/*quirks*/DA_Q_4K
1067	},
1068	{
1069		/* WDC Caviar Green Advanced Format (4k) drives */
1070		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RS*", "*" },
1071		/*quirks*/DA_Q_4K
1072	},
1073	{
1074		/* WDC Caviar Green Advanced Format (4k) drives */
1075		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RS*", "*" },
1076		/*quirks*/DA_Q_4K
1077	},
1078	{
1079		/* WDC Caviar Green Advanced Format (4k) drives */
1080		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD??????RX*", "*" },
1081		/*quirks*/DA_Q_4K
1082	},
1083	{
1084		/* WDC Caviar Green Advanced Format (4k) drives */
1085		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "????RX*", "*" },
1086		/*quirks*/DA_Q_4K
1087	},
1088	{
1089		/* WDC Scorpio Black Advanced Format (4k) drives */
1090		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PKT*", "*" },
1091		/*quirks*/DA_Q_4K
1092	},
1093	{
1094		/* WDC Scorpio Black Advanced Format (4k) drives */
1095		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PKT*", "*" },
1096		/*quirks*/DA_Q_4K
1097	},
1098	{
1099		/* WDC Scorpio Black Advanced Format (4k) drives */
1100		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PKT*", "*" },
1101		/*quirks*/DA_Q_4K
1102	},
1103	{
1104		/* WDC Scorpio Black Advanced Format (4k) drives */
1105		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PKT*", "*" },
1106		/*quirks*/DA_Q_4K
1107	},
1108	{
1109		/* WDC Scorpio Blue Advanced Format (4k) drives */
1110		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD???PVT*", "*" },
1111		/*quirks*/DA_Q_4K
1112	},
1113	{
1114		/* WDC Scorpio Blue Advanced Format (4k) drives */
1115		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "?PVT*", "*" },
1116		/*quirks*/DA_Q_4K
1117	},
1118	{
1119		/* WDC Scorpio Blue Advanced Format (4k) drives */
1120		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "WDC WD?????PVT*", "*" },
1121		/*quirks*/DA_Q_4K
1122	},
1123	{
1124		/* WDC Scorpio Blue Advanced Format (4k) drives */
1125		{ T_DIRECT, SIP_MEDIA_FIXED, "WDC WD??", "???PVT*", "*" },
1126		/*quirks*/DA_Q_4K
1127	},
1128	{
1129		/*
1130		 * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1)
1131		 * PR: usb/97472
1132		 */
1133		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C*", "*"},
1134		/*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE
1135	},
1136	{
1137		/*
1138		 * Olympus digital cameras (D-370)
1139		 * PR: usb/97472
1140		 */
1141		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D*", "*"},
1142		/*quirks*/ DA_Q_NO_6_BYTE
1143	},
1144	{
1145		/*
1146		 * Olympus digital cameras (E-100RS, E-10).
1147		 * PR: usb/97472
1148		 */
1149		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E*", "*"},
1150		/*quirks*/ DA_Q_NO_6_BYTE | DA_Q_NO_SYNC_CACHE
1151	},
1152	{
1153		/*
1154		 * Olympus FE-210 camera
1155		 */
1156		{T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "FE210*",
1157		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1158	},
1159	{
1160		/*
1161		* Pentax Digital Camera
1162		* PR: usb/93389
1163		*/
1164		{T_DIRECT, SIP_MEDIA_REMOVABLE, "PENTAX", "DIGITAL CAMERA",
1165		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1166	},
1167	{
1168		/*
1169		 * LG UP3S MP3 player
1170		 */
1171		{T_DIRECT, SIP_MEDIA_REMOVABLE, "LG", "UP3S",
1172		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1173	},
1174	{
1175		/*
1176		 * Laser MP3-2GA13 MP3 player
1177		 */
1178		{T_DIRECT, SIP_MEDIA_REMOVABLE, "USB 2.0", "(HS) Flash Disk",
1179		"*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE
1180	},
1181	{
1182		/*
1183		 * LaCie external 250GB Hard drive des by Porsche
1184		 * Submitted by: Ben Stuyts <ben@altesco.nl>
1185		 * PR: 121474
1186		 */
1187		{T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "HM250JI", "*"},
1188		/*quirks*/ DA_Q_NO_SYNC_CACHE
1189	},
1190	/* SATA SSDs */
1191	{
1192		/*
1193		 * Corsair Force 2 SSDs
1194		 * 4k optimised & trim only works in 4k requests + 4k aligned
1195		 */
1196		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair CSSD-F*", "*" },
1197		/*quirks*/DA_Q_4K
1198	},
1199	{
1200		/*
1201		 * Corsair Force 3 SSDs
1202		 * 4k optimised & trim only works in 4k requests + 4k aligned
1203		 */
1204		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force 3*", "*" },
1205		/*quirks*/DA_Q_4K
1206	},
1207        {
1208		/*
1209		 * Corsair Neutron GTX SSDs
1210		 * 4k optimised & trim only works in 4k requests + 4k aligned
1211		 */
1212		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Neutron GTX*", "*" },
1213		/*quirks*/DA_Q_4K
1214	},
1215	{
1216		/*
1217		 * Corsair Force GT & GS SSDs
1218		 * 4k optimised & trim only works in 4k requests + 4k aligned
1219		 */
1220		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Corsair Force G*", "*" },
1221		/*quirks*/DA_Q_4K
1222	},
1223	{
1224		/*
1225		 * Crucial M4 SSDs
1226		 * 4k optimised & trim only works in 4k requests + 4k aligned
1227		 */
1228		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "M4-CT???M4SSD2*", "*" },
1229		/*quirks*/DA_Q_4K
1230	},
1231	{
1232		/*
1233		 * Crucial RealSSD C300 SSDs
1234		 * 4k optimised
1235		 */
1236		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "C300-CTFDDAC???MAG*",
1237		"*" }, /*quirks*/DA_Q_4K
1238	},
1239	{
1240		/*
1241		 * Intel 320 Series SSDs
1242		 * 4k optimised & trim only works in 4k requests + 4k aligned
1243		 */
1244		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2CW*", "*" },
1245		/*quirks*/DA_Q_4K
1246	},
1247	{
1248		/*
1249		 * Intel 330 Series SSDs
1250		 * 4k optimised & trim only works in 4k requests + 4k aligned
1251		 */
1252		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2CT*", "*" },
1253		/*quirks*/DA_Q_4K
1254	},
1255	{
1256		/*
1257		 * Intel 510 Series SSDs
1258		 * 4k optimised & trim only works in 4k requests + 4k aligned
1259		 */
1260		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2MH*", "*" },
1261		/*quirks*/DA_Q_4K
1262	},
1263	{
1264		/*
1265		 * Intel 520 Series SSDs
1266		 * 4k optimised & trim only works in 4k requests + 4k aligned
1267		 */
1268		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BW*", "*" },
1269		/*quirks*/DA_Q_4K
1270	},
1271	{
1272		/*
1273		 * Intel S3610 Series SSDs
1274		 * 4k optimised & trim only works in 4k requests + 4k aligned
1275		 */
1276		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSC2BX*", "*" },
1277		/*quirks*/DA_Q_4K
1278	},
1279	{
1280		/*
1281		 * Intel X25-M Series SSDs
1282		 * 4k optimised & trim only works in 4k requests + 4k aligned
1283		 */
1284		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "INTEL SSDSA2M*", "*" },
1285		/*quirks*/DA_Q_4K
1286	},
1287	{
1288		/*
1289		 * Kingston E100 Series SSDs
1290		 * 4k optimised & trim only works in 4k requests + 4k aligned
1291		 */
1292		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SE100S3*", "*" },
1293		/*quirks*/DA_Q_4K
1294	},
1295	{
1296		/*
1297		 * Kingston HyperX 3k SSDs
1298		 * 4k optimised & trim only works in 4k requests + 4k aligned
1299		 */
1300		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "KINGSTON SH103S3*", "*" },
1301		/*quirks*/DA_Q_4K
1302	},
1303	{
1304		/*
1305		 * Marvell SSDs (entry taken from OpenSolaris)
1306		 * 4k optimised & trim only works in 4k requests + 4k aligned
1307		 */
1308		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MARVELL SD88SA02*", "*" },
1309		/*quirks*/DA_Q_4K
1310	},
1311	{
1312		/*
1313		 * OCZ Agility 2 SSDs
1314		 * 4k optimised & trim only works in 4k requests + 4k aligned
1315		 */
1316		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY2*", "*" },
1317		/*quirks*/DA_Q_4K
1318	},
1319	{
1320		/*
1321		 * OCZ Agility 3 SSDs
1322		 * 4k optimised & trim only works in 4k requests + 4k aligned
1323		 */
1324		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-AGILITY3*", "*" },
1325		/*quirks*/DA_Q_4K
1326	},
1327	{
1328		/*
1329		 * OCZ Deneva R Series SSDs
1330		 * 4k optimised & trim only works in 4k requests + 4k aligned
1331		 */
1332		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "DENRSTE251M45*", "*" },
1333		/*quirks*/DA_Q_4K
1334	},
1335	{
1336		/*
1337		 * OCZ Vertex 2 SSDs (inc pro series)
1338		 * 4k optimised & trim only works in 4k requests + 4k aligned
1339		 */
1340		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ?VERTEX2*", "*" },
1341		/*quirks*/DA_Q_4K
1342	},
1343	{
1344		/*
1345		 * OCZ Vertex 3 SSDs
1346		 * 4k optimised & trim only works in 4k requests + 4k aligned
1347		 */
1348		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX3*", "*" },
1349		/*quirks*/DA_Q_4K
1350	},
1351	{
1352		/*
1353		 * OCZ Vertex 4 SSDs
1354		 * 4k optimised & trim only works in 4k requests + 4k aligned
1355		 */
1356		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "OCZ-VERTEX4*", "*" },
1357		/*quirks*/DA_Q_4K
1358	},
1359	{
1360		/*
1361		 * Samsung 750 Series SSDs
1362		 * 4k optimised & trim only works in 4k requests + 4k aligned
1363		 */
1364		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 750*", "*" },
1365		/*quirks*/DA_Q_4K
1366	},
1367	{
1368		/*
1369		 * Samsung 830 Series SSDs
1370		 * 4k optimised & trim only works in 4k requests + 4k aligned
1371		 */
1372		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG SSD 830 Series*", "*" },
1373		/*quirks*/DA_Q_4K
1374	},
1375	{
1376		/*
1377		 * Samsung 840 SSDs
1378		 * 4k optimised & trim only works in 4k requests + 4k aligned
1379		 */
1380		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 840*", "*" },
1381		/*quirks*/DA_Q_4K
1382	},
1383	{
1384		/*
1385		 * Samsung 845 SSDs
1386		 * 4k optimised & trim only works in 4k requests + 4k aligned
1387		 */
1388		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 845*", "*" },
1389		/*quirks*/DA_Q_4K
1390	},
1391	{
1392		/*
1393		 * Samsung 850 SSDs
1394		 * 4k optimised & trim only works in 4k requests + 4k aligned
1395		 */
1396		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "Samsung SSD 850*", "*" },
1397		/*quirks*/DA_Q_4K
1398	},
1399	{
1400		/*
1401		 * Samsung 843T Series SSDs (MZ7WD*)
1402		 * Samsung PM851 Series SSDs (MZ7TE*)
1403		 * Samsung PM853T Series SSDs (MZ7GE*)
1404		 * Samsung SM863 Series SSDs (MZ7KM*)
1405		 * 4k optimised
1406		 */
1407		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SAMSUNG MZ7*", "*" },
1408		/*quirks*/DA_Q_4K
1409	},
1410	{
1411		/*
1412		 * Same as for SAMSUNG MZ7* but enable the quirks for SSD
1413		 * starting with MZ7* too
1414		 */
1415		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "MZ7*", "*" },
1416		/*quirks*/DA_Q_4K
1417	},
1418	{
1419		/*
1420                 * Same as above but enable the quirks for SSD SAMSUNG MZ7*
1421                 * connected via SATA-to-SAS interposer and because of this
1422                 * starting without "ATA"
1423		 */
1424		{ T_DIRECT, SIP_MEDIA_FIXED, "SAMSUNG", "MZ7*", "*" },
1425		/*quirks*/DA_Q_4K
1426	},
1427	{
1428		/*
1429		 * SuperTalent TeraDrive CT SSDs
1430		 * 4k optimised & trim only works in 4k requests + 4k aligned
1431		 */
1432		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "FTM??CT25H*", "*" },
1433		/*quirks*/DA_Q_4K
1434	},
1435	{
1436		/*
1437		 * XceedIOPS SATA SSDs
1438		 * 4k optimised
1439		 */
1440		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "SG9XCS2D*", "*" },
1441		/*quirks*/DA_Q_4K
1442	},
1443	{
1444		/*
1445		 * Hama Innostor USB-Stick
1446		 */
1447		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "Innostor", "Innostor*", "*" },
1448		/*quirks*/DA_Q_NO_RC16
1449	},
1450	{
1451		/*
1452		 * Seagate Lamarr 8TB Shingled Magnetic Recording (SMR)
1453		 * Drive Managed SATA hard drive.  This drive doesn't report
1454		 * in firmware that it is a drive managed SMR drive.
1455		 */
1456		{ T_DIRECT, SIP_MEDIA_FIXED, "ATA", "ST8000AS000[23]*", "*" },
1457		/*quirks*/DA_Q_SMR_DM
1458	},
1459	{
1460		/*
1461		 * MX-ES USB Drive by Mach Xtreme
1462		 */
1463		{ T_DIRECT, SIP_MEDIA_REMOVABLE, "MX", "MXUB3*", "*"},
1464		/*quirks*/DA_Q_NO_RC16
1465	},
1466};
1467
1468static	disk_strategy_t	dastrategy;
1469static	dumper_t	dadump;
1470static	periph_init_t	dainit;
1471static	void		daasync(void *callback_arg, u_int32_t code,
1472				struct cam_path *path, void *arg);
1473static	void		dasysctlinit(void *context, int pending);
1474static	int		dasysctlsofttimeout(SYSCTL_HANDLER_ARGS);
1475static	int		dacmdsizesysctl(SYSCTL_HANDLER_ARGS);
1476static	int		dadeletemethodsysctl(SYSCTL_HANDLER_ARGS);
1477static	int		dabitsysctl(SYSCTL_HANDLER_ARGS);
1478static	int		daflagssysctl(SYSCTL_HANDLER_ARGS);
1479static	int		dazonemodesysctl(SYSCTL_HANDLER_ARGS);
1480static	int		dazonesupsysctl(SYSCTL_HANDLER_ARGS);
1481static	int		dadeletemaxsysctl(SYSCTL_HANDLER_ARGS);
1482static	void		dadeletemethodset(struct da_softc *softc,
1483					  da_delete_methods delete_method);
1484static	off_t		dadeletemaxsize(struct da_softc *softc,
1485					da_delete_methods delete_method);
1486static	void		dadeletemethodchoose(struct da_softc *softc,
1487					     da_delete_methods default_method);
1488static	void		daprobedone(struct cam_periph *periph, union ccb *ccb);
1489
1490static	periph_ctor_t	daregister;
1491static	periph_dtor_t	dacleanup;
1492static	periph_start_t	dastart;
1493static	periph_oninv_t	daoninvalidate;
1494static	void		dazonedone(struct cam_periph *periph, union ccb *ccb);
1495static	void		dadone(struct cam_periph *periph,
1496			       union ccb *done_ccb);
1497static void		dadone_probewp(struct cam_periph *periph,
1498				       union ccb *done_ccb);
1499static void		dadone_proberc(struct cam_periph *periph,
1500				       union ccb *done_ccb);
1501static void		dadone_probelbp(struct cam_periph *periph,
1502					union ccb *done_ccb);
1503static void		dadone_probeblklimits(struct cam_periph *periph,
1504					      union ccb *done_ccb);
1505static void		dadone_probebdc(struct cam_periph *periph,
1506					union ccb *done_ccb);
1507static void		dadone_probeata(struct cam_periph *periph,
1508					union ccb *done_ccb);
1509static void		dadone_probeatalogdir(struct cam_periph *periph,
1510					      union ccb *done_ccb);
1511static void		dadone_probeataiddir(struct cam_periph *periph,
1512					     union ccb *done_ccb);
1513static void		dadone_probeatasup(struct cam_periph *periph,
1514					   union ccb *done_ccb);
1515static void		dadone_probeatazone(struct cam_periph *periph,
1516					    union ccb *done_ccb);
1517static void		dadone_probezone(struct cam_periph *periph,
1518					 union ccb *done_ccb);
1519static void		dadone_tur(struct cam_periph *periph,
1520				   union ccb *done_ccb);
1521static  int		daerror(union ccb *ccb, u_int32_t cam_flags,
1522				u_int32_t sense_flags);
1523static void		daprevent(struct cam_periph *periph, int action);
1524static void		dareprobe(struct cam_periph *periph);
1525static void		dasetgeom(struct cam_periph *periph, uint32_t block_len,
1526				  uint64_t maxsector,
1527				  struct scsi_read_capacity_data_long *rcaplong,
1528				  size_t rcap_size);
1529static callout_func_t	dasendorderedtag;
1530static void		dashutdown(void *arg, int howto);
1531static callout_func_t	damediapoll;
1532
1533#ifndef	DA_DEFAULT_POLL_PERIOD
1534#define	DA_DEFAULT_POLL_PERIOD	3
1535#endif
1536
1537#ifndef DA_DEFAULT_TIMEOUT
1538#define DA_DEFAULT_TIMEOUT 60	/* Timeout in seconds */
1539#endif
1540
1541#ifndef DA_DEFAULT_SOFTTIMEOUT
1542#define DA_DEFAULT_SOFTTIMEOUT	0
1543#endif
1544
1545#ifndef	DA_DEFAULT_RETRY
1546#define	DA_DEFAULT_RETRY	4
1547#endif
1548
1549#ifndef	DA_DEFAULT_SEND_ORDERED
1550#define	DA_DEFAULT_SEND_ORDERED	1
1551#endif
1552
1553static int da_poll_period = DA_DEFAULT_POLL_PERIOD;
1554static int da_retry_count = DA_DEFAULT_RETRY;
1555static int da_default_timeout = DA_DEFAULT_TIMEOUT;
1556static sbintime_t da_default_softtimeout = DA_DEFAULT_SOFTTIMEOUT;
1557static int da_send_ordered = DA_DEFAULT_SEND_ORDERED;
1558static int da_disable_wp_detection = 0;
1559static int da_enable_biospeedup = 1;
1560
1561static SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1562    "CAM Direct Access Disk driver");
1563SYSCTL_INT(_kern_cam_da, OID_AUTO, poll_period, CTLFLAG_RWTUN,
1564           &da_poll_period, 0, "Media polling period in seconds");
1565SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RWTUN,
1566           &da_retry_count, 0, "Normal I/O retry count");
1567SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RWTUN,
1568           &da_default_timeout, 0, "Normal I/O timeout (in seconds)");
1569SYSCTL_INT(_kern_cam_da, OID_AUTO, send_ordered, CTLFLAG_RWTUN,
1570           &da_send_ordered, 0, "Send Ordered Tags");
1571SYSCTL_INT(_kern_cam_da, OID_AUTO, disable_wp_detection, CTLFLAG_RWTUN,
1572           &da_disable_wp_detection, 0,
1573	   "Disable detection of write-protected disks");
1574SYSCTL_INT(_kern_cam_da, OID_AUTO, enable_biospeedup, CTLFLAG_RDTUN,
1575	    &da_enable_biospeedup, 0, "Enable BIO_SPEEDUP processing");
1576
1577SYSCTL_PROC(_kern_cam_da, OID_AUTO, default_softtimeout,
1578    CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
1579    dasysctlsofttimeout, "I",
1580    "Soft I/O timeout (ms)");
1581TUNABLE_INT64("kern.cam.da.default_softtimeout", &da_default_softtimeout);
1582
1583/*
1584 * DA_ORDEREDTAG_INTERVAL determines how often, relative
1585 * to the default timeout, we check to see whether an ordered
1586 * tagged transaction is appropriate to prevent simple tag
1587 * starvation.  Since we'd like to ensure that there is at least
1588 * 1/2 of the timeout length left for a starved transaction to
1589 * complete after we've sent an ordered tag, we must poll at least
1590 * four times in every timeout period.  This takes care of the worst
1591 * case where a starved transaction starts during an interval that
1592 * meets the requirement "don't send an ordered tag" test so it takes
1593 * us two intervals to determine that a tag must be sent.
1594 */
1595#ifndef DA_ORDEREDTAG_INTERVAL
1596#define DA_ORDEREDTAG_INTERVAL 4
1597#endif
1598
1599static struct periph_driver dadriver =
1600{
1601	dainit, "da",
1602	TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0
1603};
1604
1605PERIPHDRIVER_DECLARE(da, dadriver);
1606
1607static MALLOC_DEFINE(M_SCSIDA, "scsi_da", "scsi_da buffers");
1608
1609/*
1610 * This driver takes out references / holds in well defined pairs, never
1611 * recursively. These macros / inline functions enforce those rules. They
1612 * are only enabled with DA_TRACK_REFS or INVARIANTS. If DA_TRACK_REFS is
1613 * defined to be 2 or larger, the tracking also includes debug printfs.
1614 */
1615#if defined(DA_TRACK_REFS) || defined(INVARIANTS)
1616
1617#ifndef DA_TRACK_REFS
1618#define DA_TRACK_REFS 1
1619#endif
1620
1621#if DA_TRACK_REFS > 1
1622static const char *da_ref_text[] = {
1623	"bogus",
1624	"open",
1625	"open hold",
1626	"close hold",
1627	"reprobe hold",
1628	"Test Unit Ready",
1629	"Geom",
1630	"sysctl",
1631	"reprobe",
1632	"max -- also bogus"
1633};
1634
1635#define DA_PERIPH_PRINT(periph, msg, args...)		\
1636	CAM_PERIPH_PRINT(periph, msg, ##args)
1637#else
1638#define DA_PERIPH_PRINT(periph, msg, args...)
1639#endif
1640
1641static inline void
1642token_sanity(da_ref_token token)
1643{
1644	if ((unsigned)token >= DA_REF_MAX)
1645		panic("Bad token value passed in %d\n", token);
1646}
1647
1648static inline int
1649da_periph_hold(struct cam_periph *periph, int priority, da_ref_token token)
1650{
1651	int err = cam_periph_hold(periph, priority);
1652
1653	token_sanity(token);
1654	DA_PERIPH_PRINT(periph, "Holding device %s (%d): %d\n",
1655	    da_ref_text[token], token, err);
1656	if (err == 0) {
1657		int cnt;
1658		struct da_softc *softc = periph->softc;
1659
1660		cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1);
1661		if (cnt != 0)
1662			panic("Re-holding for reason %d, cnt = %d", token, cnt);
1663	}
1664	return (err);
1665}
1666
1667static inline void
1668da_periph_unhold(struct cam_periph *periph, da_ref_token token)
1669{
1670	int cnt;
1671	struct da_softc *softc = periph->softc;
1672
1673	token_sanity(token);
1674	DA_PERIPH_PRINT(periph, "Unholding device %s (%d)\n",
1675	    da_ref_text[token], token);
1676	cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1677	if (cnt != 1)
1678		panic("Unholding %d with cnt = %d", token, cnt);
1679	cam_periph_unhold(periph);
1680}
1681
1682static inline int
1683da_periph_acquire(struct cam_periph *periph, da_ref_token token)
1684{
1685	int err = cam_periph_acquire(periph);
1686
1687	token_sanity(token);
1688	DA_PERIPH_PRINT(periph, "acquiring device %s (%d): %d\n",
1689	    da_ref_text[token], token, err);
1690	if (err == 0) {
1691		int cnt;
1692		struct da_softc *softc = periph->softc;
1693
1694		cnt = atomic_fetchadd_int(&softc->ref_flags[token], 1);
1695		if (cnt != 0)
1696			panic("Re-refing for reason %d, cnt = %d", token, cnt);
1697	}
1698	return (err);
1699}
1700
1701static inline void
1702da_periph_release(struct cam_periph *periph, da_ref_token token)
1703{
1704	int cnt;
1705	struct da_softc *softc = periph->softc;
1706
1707	token_sanity(token);
1708	DA_PERIPH_PRINT(periph, "releasing device %s (%d)\n",
1709	    da_ref_text[token], token);
1710	cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1711	if (cnt != 1)
1712		panic("Releasing %d with cnt = %d", token, cnt);
1713	cam_periph_release(periph);
1714}
1715
1716static inline void
1717da_periph_release_locked(struct cam_periph *periph, da_ref_token token)
1718{
1719	int cnt;
1720	struct da_softc *softc = periph->softc;
1721
1722	token_sanity(token);
1723	DA_PERIPH_PRINT(periph, "releasing device (locked) %s (%d)\n",
1724	    da_ref_text[token], token);
1725	cnt = atomic_fetchadd_int(&softc->ref_flags[token], -1);
1726	if (cnt != 1)
1727		panic("releasing (locked) %d with cnt = %d", token, cnt);
1728	cam_periph_release_locked(periph);
1729}
1730
1731#define cam_periph_hold POISON
1732#define cam_periph_unhold POISON
1733#define cam_periph_acquire POISON
1734#define cam_periph_release POISON
1735#define cam_periph_release_locked POISON
1736
1737#else
1738#define	da_periph_hold(periph, prio, token)	cam_periph_hold((periph), (prio))
1739#define da_periph_unhold(periph, token)		cam_periph_unhold((periph))
1740#define da_periph_acquire(periph, token)	cam_periph_acquire((periph))
1741#define da_periph_release(periph, token)	cam_periph_release((periph))
1742#define da_periph_release_locked(periph, token)	cam_periph_release_locked((periph))
1743#endif
1744
1745static int
1746daopen(struct disk *dp)
1747{
1748	struct cam_periph *periph;
1749	struct da_softc *softc;
1750	int error;
1751
1752	periph = (struct cam_periph *)dp->d_drv1;
1753	if (da_periph_acquire(periph, DA_REF_OPEN) != 0) {
1754		return (ENXIO);
1755	}
1756
1757	cam_periph_lock(periph);
1758	if ((error = da_periph_hold(periph, PRIBIO|PCATCH, DA_REF_OPEN_HOLD)) != 0) {
1759		cam_periph_unlock(periph);
1760		da_periph_release(periph, DA_REF_OPEN);
1761		return (error);
1762	}
1763
1764	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1765	    ("daopen\n"));
1766
1767	softc = (struct da_softc *)periph->softc;
1768	dareprobe(periph);
1769
1770	/* Wait for the disk size update.  */
1771	error = cam_periph_sleep(periph, &softc->disk->d_mediasize, PRIBIO,
1772	    "dareprobe", 0);
1773	if (error != 0)
1774		xpt_print(periph->path, "unable to retrieve capacity data\n");
1775
1776	if (periph->flags & CAM_PERIPH_INVALID)
1777		error = ENXIO;
1778
1779	if (error == 0 && (softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1780	    (softc->quirks & DA_Q_NO_PREVENT) == 0)
1781		daprevent(periph, PR_PREVENT);
1782
1783	if (error == 0) {
1784		softc->flags &= ~DA_FLAG_PACK_INVALID;
1785		softc->flags |= DA_FLAG_OPEN;
1786	}
1787
1788	da_periph_unhold(periph, DA_REF_OPEN_HOLD);
1789	cam_periph_unlock(periph);
1790
1791	if (error != 0)
1792		da_periph_release(periph, DA_REF_OPEN);
1793
1794	return (error);
1795}
1796
1797static int
1798daclose(struct disk *dp)
1799{
1800	struct	cam_periph *periph;
1801	struct	da_softc *softc;
1802	union	ccb *ccb;
1803
1804	periph = (struct cam_periph *)dp->d_drv1;
1805	softc = (struct da_softc *)periph->softc;
1806	cam_periph_lock(periph);
1807	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
1808	    ("daclose\n"));
1809
1810	if (da_periph_hold(periph, PRIBIO, DA_REF_CLOSE_HOLD) == 0) {
1811		/* Flush disk cache. */
1812		if ((softc->flags & DA_FLAG_DIRTY) != 0 &&
1813		    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0 &&
1814		    (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
1815			ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1816			scsi_synchronize_cache(&ccb->csio, /*retries*/1,
1817			    /*cbfcnp*/NULL, MSG_SIMPLE_Q_TAG,
1818			    /*begin_lba*/0, /*lb_count*/0, SSD_FULL_SIZE,
1819			    5 * 60 * 1000);
1820			cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
1821			    /*sense_flags*/SF_RETRY_UA | SF_QUIET_IR,
1822			    softc->disk->d_devstat);
1823			softc->flags &= ~DA_FLAG_DIRTY;
1824			xpt_release_ccb(ccb);
1825		}
1826
1827		/* Allow medium removal. */
1828		if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 &&
1829		    (softc->quirks & DA_Q_NO_PREVENT) == 0)
1830			daprevent(periph, PR_ALLOW);
1831
1832		da_periph_unhold(periph, DA_REF_CLOSE_HOLD);
1833	}
1834
1835	/*
1836	 * If we've got removable media, mark the blocksize as
1837	 * unavailable, since it could change when new media is
1838	 * inserted.
1839	 */
1840	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)
1841		softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE;
1842
1843	softc->flags &= ~DA_FLAG_OPEN;
1844	while (softc->refcount != 0)
1845		cam_periph_sleep(periph, &softc->refcount, PRIBIO, "daclose", 1);
1846	cam_periph_unlock(periph);
1847	da_periph_release(periph, DA_REF_OPEN);
1848	return (0);
1849}
1850
1851static void
1852daschedule(struct cam_periph *periph)
1853{
1854	struct da_softc *softc = (struct da_softc *)periph->softc;
1855
1856	if (softc->state != DA_STATE_NORMAL)
1857		return;
1858
1859	cam_iosched_schedule(softc->cam_iosched, periph);
1860}
1861
1862/*
1863 * Actually translate the requested transfer into one the physical driver
1864 * can understand.  The transfer is described by a buf and will include
1865 * only one physical transfer.
1866 */
1867static void
1868dastrategy(struct bio *bp)
1869{
1870	struct cam_periph *periph;
1871	struct da_softc *softc;
1872
1873	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1874	softc = (struct da_softc *)periph->softc;
1875
1876	cam_periph_lock(periph);
1877
1878	/*
1879	 * If the device has been made invalid, error out
1880	 */
1881	if ((softc->flags & DA_FLAG_PACK_INVALID)) {
1882		cam_periph_unlock(periph);
1883		biofinish(bp, NULL, ENXIO);
1884		return;
1885	}
1886
1887	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastrategy(%p)\n", bp));
1888
1889	/*
1890	 * Zone commands must be ordered, because they can depend on the
1891	 * effects of previously issued commands, and they may affect
1892	 * commands after them.
1893	 */
1894	if (bp->bio_cmd == BIO_ZONE)
1895		bp->bio_flags |= BIO_ORDERED;
1896
1897	/*
1898	 * Place it in the queue of disk activities for this disk
1899	 */
1900	cam_iosched_queue_work(softc->cam_iosched, bp);
1901
1902	/*
1903	 * Schedule ourselves for performing the work.
1904	 */
1905	daschedule(periph);
1906	cam_periph_unlock(periph);
1907
1908	return;
1909}
1910
1911static int
1912dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
1913{
1914	struct	    cam_periph *periph;
1915	struct	    da_softc *softc;
1916	u_int	    secsize;
1917	struct	    ccb_scsiio csio;
1918	struct	    disk *dp;
1919	int	    error = 0;
1920
1921	dp = arg;
1922	periph = dp->d_drv1;
1923	softc = (struct da_softc *)periph->softc;
1924	secsize = softc->params.secsize;
1925
1926	if ((softc->flags & DA_FLAG_PACK_INVALID) != 0)
1927		return (ENXIO);
1928
1929	memset(&csio, 0, sizeof(csio));
1930	if (length > 0) {
1931		xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1932		csio.ccb_h.ccb_state = DA_CCB_DUMP;
1933		scsi_read_write(&csio,
1934				/*retries*/0,
1935				/*cbfcnp*/NULL,
1936				MSG_ORDERED_Q_TAG,
1937				/*read*/SCSI_RW_WRITE,
1938				/*byte2*/0,
1939				/*minimum_cmd_size*/ softc->minimum_cmd_size,
1940				offset / secsize,
1941				length / secsize,
1942				/*data_ptr*/(u_int8_t *) virtual,
1943				/*dxfer_len*/length,
1944				/*sense_len*/SSD_FULL_SIZE,
1945				da_default_timeout * 1000);
1946		error = cam_periph_runccb((union ccb *)&csio, cam_periph_error,
1947		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1948		if (error != 0)
1949			printf("Aborting dump due to I/O error.\n");
1950		return (error);
1951	}
1952
1953	/*
1954	 * Sync the disk cache contents to the physical media.
1955	 */
1956	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
1957		xpt_setup_ccb(&csio.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1958		csio.ccb_h.ccb_state = DA_CCB_DUMP;
1959		scsi_synchronize_cache(&csio,
1960				       /*retries*/0,
1961				       /*cbfcnp*/NULL,
1962				       MSG_SIMPLE_Q_TAG,
1963				       /*begin_lba*/0,/* Cover the whole disk */
1964				       /*lb_count*/0,
1965				       SSD_FULL_SIZE,
1966				       5 * 1000);
1967		error = cam_periph_runccb((union ccb *)&csio, cam_periph_error,
1968		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
1969		if (error != 0)
1970			xpt_print(periph->path, "Synchronize cache failed\n");
1971	}
1972	return (error);
1973}
1974
1975static int
1976dagetattr(struct bio *bp)
1977{
1978	int ret;
1979	struct cam_periph *periph;
1980
1981	if (g_handleattr_int(bp, "GEOM::canspeedup", da_enable_biospeedup))
1982		return (EJUSTRETURN);
1983
1984	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1985	cam_periph_lock(periph);
1986	ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1987	    periph->path);
1988	cam_periph_unlock(periph);
1989	if (ret == 0)
1990		bp->bio_completed = bp->bio_length;
1991	return ret;
1992}
1993
1994static void
1995dainit(void)
1996{
1997	cam_status status;
1998
1999	/*
2000	 * Install a global async callback.  This callback will
2001	 * receive async callbacks like "new device found".
2002	 */
2003	status = xpt_register_async(AC_FOUND_DEVICE, daasync, NULL, NULL);
2004
2005	if (status != CAM_REQ_CMP) {
2006		printf("da: Failed to attach master async callback "
2007		       "due to status 0x%x!\n", status);
2008	} else if (da_send_ordered) {
2009		/* Register our shutdown event handler */
2010		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown,
2011					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
2012		    printf("dainit: shutdown event registration failed!\n");
2013	}
2014}
2015
2016/*
2017 * Callback from GEOM, called when it has finished cleaning up its
2018 * resources.
2019 */
2020static void
2021dadiskgonecb(struct disk *dp)
2022{
2023	struct cam_periph *periph;
2024
2025	periph = (struct cam_periph *)dp->d_drv1;
2026	da_periph_release(periph, DA_REF_GEOM);
2027}
2028
2029static void
2030daoninvalidate(struct cam_periph *periph)
2031{
2032	struct da_softc *softc;
2033
2034	cam_periph_assert(periph, MA_OWNED);
2035	softc = (struct da_softc *)periph->softc;
2036
2037	/*
2038	 * De-register any async callbacks.
2039	 */
2040	xpt_register_async(0, daasync, periph, periph->path);
2041
2042	softc->flags |= DA_FLAG_PACK_INVALID;
2043#ifdef CAM_IO_STATS
2044	softc->invalidations++;
2045#endif
2046
2047	/*
2048	 * Return all queued I/O with ENXIO.
2049	 * XXX Handle any transactions queued to the card
2050	 *     with XPT_ABORT_CCB.
2051	 */
2052	cam_iosched_flush(softc->cam_iosched, NULL, ENXIO);
2053
2054	/*
2055	 * Tell GEOM that we've gone away, we'll get a callback when it is
2056	 * done cleaning up its resources.
2057	 */
2058	disk_gone(softc->disk);
2059}
2060
2061static void
2062dacleanup(struct cam_periph *periph)
2063{
2064	struct da_softc *softc;
2065
2066	softc = (struct da_softc *)periph->softc;
2067
2068	cam_periph_unlock(periph);
2069
2070	cam_iosched_fini(softc->cam_iosched);
2071
2072	/*
2073	 * If we can't free the sysctl tree, oh well...
2074	 */
2075	if ((softc->flags & DA_FLAG_SCTX_INIT) != 0) {
2076#ifdef CAM_IO_STATS
2077		if (sysctl_ctx_free(&softc->sysctl_stats_ctx) != 0)
2078			xpt_print(periph->path,
2079			    "can't remove sysctl stats context\n");
2080#endif
2081		if (sysctl_ctx_free(&softc->sysctl_ctx) != 0)
2082			xpt_print(periph->path,
2083			    "can't remove sysctl context\n");
2084	}
2085
2086	callout_drain(&softc->mediapoll_c);
2087	disk_destroy(softc->disk);
2088	callout_drain(&softc->sendordered_c);
2089	free(softc, M_DEVBUF);
2090	cam_periph_lock(periph);
2091}
2092
2093static void
2094daasync(void *callback_arg, u_int32_t code,
2095	struct cam_path *path, void *arg)
2096{
2097	struct cam_periph *periph;
2098	struct da_softc *softc;
2099
2100	periph = (struct cam_periph *)callback_arg;
2101	switch (code) {
2102	case AC_FOUND_DEVICE:	/* callback to create periph, no locking yet */
2103	{
2104		struct ccb_getdev *cgd;
2105		cam_status status;
2106
2107		cgd = (struct ccb_getdev *)arg;
2108		if (cgd == NULL)
2109			break;
2110
2111		if (cgd->protocol != PROTO_SCSI)
2112			break;
2113		if (SID_QUAL(&cgd->inq_data) != SID_QUAL_LU_CONNECTED)
2114			break;
2115		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
2116		    && SID_TYPE(&cgd->inq_data) != T_RBC
2117		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL
2118		    && SID_TYPE(&cgd->inq_data) != T_ZBC_HM)
2119			break;
2120
2121		/*
2122		 * Allocate a peripheral instance for
2123		 * this device and start the probe
2124		 * process.
2125		 */
2126		status = cam_periph_alloc(daregister, daoninvalidate,
2127					  dacleanup, dastart,
2128					  "da", CAM_PERIPH_BIO,
2129					  path, daasync,
2130					  AC_FOUND_DEVICE, cgd);
2131
2132		if (status != CAM_REQ_CMP
2133		 && status != CAM_REQ_INPROG)
2134			printf("daasync: Unable to attach to new device "
2135				"due to status 0x%x\n", status);
2136		return;
2137	}
2138	case AC_ADVINFO_CHANGED:	/* Doesn't touch periph */
2139	{
2140		uintptr_t buftype;
2141
2142		buftype = (uintptr_t)arg;
2143		if (buftype == CDAI_TYPE_PHYS_PATH) {
2144			struct da_softc *softc;
2145
2146			softc = periph->softc;
2147			disk_attr_changed(softc->disk, "GEOM::physpath",
2148					  M_NOWAIT);
2149		}
2150		break;
2151	}
2152	case AC_UNIT_ATTENTION:
2153	{
2154		union ccb *ccb;
2155		int error_code, sense_key, asc, ascq;
2156
2157		softc = (struct da_softc *)periph->softc;
2158		ccb = (union ccb *)arg;
2159
2160		/*
2161		 * Handle all UNIT ATTENTIONs except our own, as they will be
2162		 * handled by daerror(). Since this comes from a different periph,
2163		 * that periph's lock is held, not ours, so we have to take it ours
2164		 * out to touch softc flags.
2165		 */
2166		if (xpt_path_periph(ccb->ccb_h.path) != periph &&
2167		    scsi_extract_sense_ccb(ccb,
2168		     &error_code, &sense_key, &asc, &ascq)) {
2169			if (asc == 0x2A && ascq == 0x09) {
2170				xpt_print(ccb->ccb_h.path,
2171				    "Capacity data has changed\n");
2172				cam_periph_lock(periph);
2173				softc->flags &= ~DA_FLAG_PROBED;
2174				dareprobe(periph);
2175				cam_periph_unlock(periph);
2176			} else if (asc == 0x28 && ascq == 0x00) {
2177				cam_periph_lock(periph);
2178				softc->flags &= ~DA_FLAG_PROBED;
2179				cam_periph_unlock(periph);
2180				disk_media_changed(softc->disk, M_NOWAIT);
2181			} else if (asc == 0x3F && ascq == 0x03) {
2182				xpt_print(ccb->ccb_h.path,
2183				    "INQUIRY data has changed\n");
2184				cam_periph_lock(periph);
2185				softc->flags &= ~DA_FLAG_PROBED;
2186				dareprobe(periph);
2187				cam_periph_unlock(periph);
2188			}
2189		}
2190		break;
2191	}
2192	case AC_SCSI_AEN:		/* Called for this path: periph locked */
2193		/*
2194		 * Appears to be currently unused for SCSI devices, only ata SIMs
2195		 * generate this.
2196		 */
2197		cam_periph_assert(periph, MA_OWNED);
2198		softc = (struct da_softc *)periph->softc;
2199		if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
2200		    (softc->flags & DA_FLAG_TUR_PENDING) == 0) {
2201			if (da_periph_acquire(periph, DA_REF_TUR) == 0) {
2202				cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
2203				daschedule(periph);
2204			}
2205		}
2206		/* FALLTHROUGH */
2207	case AC_SENT_BDR:		/* Called for this path: periph locked */
2208	case AC_BUS_RESET:		/* Called for this path: periph locked */
2209	{
2210		struct ccb_hdr *ccbh;
2211
2212		cam_periph_assert(periph, MA_OWNED);
2213		softc = (struct da_softc *)periph->softc;
2214		/*
2215		 * Don't fail on the expected unit attention
2216		 * that will occur.
2217		 */
2218		softc->flags |= DA_FLAG_RETRY_UA;
2219		LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
2220			ccbh->ccb_state |= DA_CCB_RETRY_UA;
2221		break;
2222	}
2223	case AC_INQ_CHANGED:		/* Called for this path: periph locked */
2224		cam_periph_assert(periph, MA_OWNED);
2225		softc = (struct da_softc *)periph->softc;
2226		softc->flags &= ~DA_FLAG_PROBED;
2227		dareprobe(periph);
2228		break;
2229	default:
2230		break;
2231	}
2232	cam_periph_async(periph, code, path, arg);
2233}
2234
2235static void
2236dasysctlinit(void *context, int pending)
2237{
2238	struct cam_periph *periph;
2239	struct da_softc *softc;
2240	char tmpstr[32], tmpstr2[16];
2241	struct ccb_trans_settings cts;
2242
2243	periph = (struct cam_periph *)context;
2244	/*
2245	 * periph was held for us when this task was enqueued
2246	 */
2247	if (periph->flags & CAM_PERIPH_INVALID) {
2248		da_periph_release(periph, DA_REF_SYSCTL);
2249		return;
2250	}
2251
2252	softc = (struct da_softc *)periph->softc;
2253	snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number);
2254	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
2255
2256	sysctl_ctx_init(&softc->sysctl_ctx);
2257	cam_periph_lock(periph);
2258	softc->flags |= DA_FLAG_SCTX_INIT;
2259	cam_periph_unlock(periph);
2260	softc->sysctl_tree = SYSCTL_ADD_NODE_WITH_LABEL(&softc->sysctl_ctx,
2261		SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2,
2262		CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr, "device_index");
2263	if (softc->sysctl_tree == NULL) {
2264		printf("dasysctlinit: unable to allocate sysctl tree\n");
2265		da_periph_release(periph, DA_REF_SYSCTL);
2266		return;
2267	}
2268
2269	/*
2270	 * Now register the sysctl handler, so the user can change the value on
2271	 * the fly.
2272	 */
2273	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2274		OID_AUTO, "delete_method",
2275		CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT,
2276		softc, 0, dadeletemethodsysctl, "A",
2277		"BIO_DELETE execution method");
2278	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2279		OID_AUTO, "delete_max",
2280		CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2281		softc, 0, dadeletemaxsysctl, "Q",
2282		"Maximum BIO_DELETE size");
2283	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2284		OID_AUTO, "minimum_cmd_size",
2285		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2286		&softc->minimum_cmd_size, 0, dacmdsizesysctl, "I",
2287		"Minimum CDB size");
2288	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2289		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2290		"trim_count", CTLFLAG_RD, &softc->trim_count,
2291		"Total number of unmap/dsm commands sent");
2292	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2293		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2294		"trim_ranges", CTLFLAG_RD, &softc->trim_ranges,
2295		"Total number of ranges in unmap/dsm commands");
2296	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2297		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2298		"trim_lbas", CTLFLAG_RD, &softc->trim_lbas,
2299		"Total lbas in the unmap/dsm commands sent");
2300
2301	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2302		OID_AUTO, "zone_mode",
2303		CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2304		softc, 0, dazonemodesysctl, "A",
2305		"Zone Mode");
2306	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2307		OID_AUTO, "zone_support",
2308		CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2309		softc, 0, dazonesupsysctl, "A",
2310		"Zone Support");
2311	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2312		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2313		"optimal_seq_zones", CTLFLAG_RD, &softc->optimal_seq_zones,
2314		"Optimal Number of Open Sequential Write Preferred Zones");
2315	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2316		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2317		"optimal_nonseq_zones", CTLFLAG_RD,
2318		&softc->optimal_nonseq_zones,
2319		"Optimal Number of Non-Sequentially Written Sequential Write "
2320		"Preferred Zones");
2321	SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2322		SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO,
2323		"max_seq_zones", CTLFLAG_RD, &softc->max_seq_zones,
2324		"Maximum Number of Open Sequential Write Required Zones");
2325
2326	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2327		       SYSCTL_CHILDREN(softc->sysctl_tree),
2328		       OID_AUTO,
2329		       "error_inject",
2330		       CTLFLAG_RW,
2331		       &softc->error_inject,
2332		       0,
2333		       "error_inject leaf");
2334
2335	SYSCTL_ADD_INT(&softc->sysctl_ctx,
2336		       SYSCTL_CHILDREN(softc->sysctl_tree),
2337		       OID_AUTO,
2338		       "p_type",
2339		       CTLFLAG_RD,
2340		       &softc->p_type,
2341		       0,
2342		       "DIF protection type");
2343
2344	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2345	    OID_AUTO, "flags", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
2346	    softc, 0, daflagssysctl, "A",
2347	    "Flags for drive");
2348	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2349	    OID_AUTO, "rotating", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
2350	    &softc->flags, (u_int)DA_FLAG_ROTATING, dabitsysctl, "I",
2351	    "Rotating media *DEPRECATED* gone in FreeBSD 14");
2352	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2353	    OID_AUTO, "unmapped_io", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE,
2354	    &softc->flags, (u_int)DA_FLAG_UNMAPPEDIO, dabitsysctl, "I",
2355	    "Unmapped I/O support *DEPRECATED* gone in FreeBSD 14");
2356
2357#ifdef CAM_TEST_FAILURE
2358	SYSCTL_ADD_PROC(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
2359		OID_AUTO, "invalidate", CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE,
2360		periph, 0, cam_periph_invalidate_sysctl, "I",
2361		"Write 1 to invalidate the drive immediately");
2362#endif
2363
2364	/*
2365	 * Add some addressing info.
2366	 */
2367	memset(&cts, 0, sizeof (cts));
2368	xpt_setup_ccb(&cts.ccb_h, periph->path, CAM_PRIORITY_NONE);
2369	cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
2370	cts.type = CTS_TYPE_CURRENT_SETTINGS;
2371	cam_periph_lock(periph);
2372	xpt_action((union ccb *)&cts);
2373	cam_periph_unlock(periph);
2374	if (cts.ccb_h.status != CAM_REQ_CMP) {
2375		da_periph_release(periph, DA_REF_SYSCTL);
2376		return;
2377	}
2378	if (cts.protocol == PROTO_SCSI && cts.transport == XPORT_FC) {
2379		struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
2380		if (fc->valid & CTS_FC_VALID_WWPN) {
2381			softc->wwpn = fc->wwpn;
2382			SYSCTL_ADD_UQUAD(&softc->sysctl_ctx,
2383			    SYSCTL_CHILDREN(softc->sysctl_tree),
2384			    OID_AUTO, "wwpn", CTLFLAG_RD,
2385			    &softc->wwpn, "World Wide Port Name");
2386		}
2387	}
2388
2389#ifdef CAM_IO_STATS
2390	/*
2391	 * Now add some useful stats.
2392	 * XXX These should live in cam_periph and be common to all periphs
2393	 */
2394	softc->sysctl_stats_tree = SYSCTL_ADD_NODE(&softc->sysctl_stats_ctx,
2395	    SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, "stats",
2396	    CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Statistics");
2397	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2398		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2399		       OID_AUTO,
2400		       "errors",
2401		       CTLFLAG_RD,
2402		       &softc->errors,
2403		       0,
2404		       "Transport errors reported by the SIM");
2405	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2406		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2407		       OID_AUTO,
2408		       "timeouts",
2409		       CTLFLAG_RD,
2410		       &softc->timeouts,
2411		       0,
2412		       "Device timeouts reported by the SIM");
2413	SYSCTL_ADD_INT(&softc->sysctl_stats_ctx,
2414		       SYSCTL_CHILDREN(softc->sysctl_stats_tree),
2415		       OID_AUTO,
2416		       "pack_invalidations",
2417		       CTLFLAG_RD,
2418		       &softc->invalidations,
2419		       0,
2420		       "Device pack invalidations");
2421#endif
2422
2423	cam_iosched_sysctl_init(softc->cam_iosched, &softc->sysctl_ctx,
2424	    softc->sysctl_tree);
2425
2426	da_periph_release(periph, DA_REF_SYSCTL);
2427}
2428
2429static int
2430dadeletemaxsysctl(SYSCTL_HANDLER_ARGS)
2431{
2432	int error;
2433	uint64_t value;
2434	struct da_softc *softc;
2435
2436	softc = (struct da_softc *)arg1;
2437
2438	value = softc->disk->d_delmaxsize;
2439	error = sysctl_handle_64(oidp, &value, 0, req);
2440	if ((error != 0) || (req->newptr == NULL))
2441		return (error);
2442
2443	/* only accept values smaller than the calculated value */
2444	if (value > dadeletemaxsize(softc, softc->delete_method)) {
2445		return (EINVAL);
2446	}
2447	softc->disk->d_delmaxsize = value;
2448
2449	return (0);
2450}
2451
2452static int
2453dacmdsizesysctl(SYSCTL_HANDLER_ARGS)
2454{
2455	int error, value;
2456
2457	value = *(int *)arg1;
2458
2459	error = sysctl_handle_int(oidp, &value, 0, req);
2460
2461	if ((error != 0)
2462	 || (req->newptr == NULL))
2463		return (error);
2464
2465	/*
2466	 * Acceptable values here are 6, 10, 12 or 16.
2467	 */
2468	if (value < 6)
2469		value = 6;
2470	else if ((value > 6)
2471	      && (value <= 10))
2472		value = 10;
2473	else if ((value > 10)
2474	      && (value <= 12))
2475		value = 12;
2476	else if (value > 12)
2477		value = 16;
2478
2479	*(int *)arg1 = value;
2480
2481	return (0);
2482}
2483
2484static int
2485dasysctlsofttimeout(SYSCTL_HANDLER_ARGS)
2486{
2487	sbintime_t value;
2488	int error;
2489
2490	value = da_default_softtimeout / SBT_1MS;
2491
2492	error = sysctl_handle_int(oidp, (int *)&value, 0, req);
2493	if ((error != 0) || (req->newptr == NULL))
2494		return (error);
2495
2496	/* XXX Should clip this to a reasonable level */
2497	if (value > da_default_timeout * 1000)
2498		return (EINVAL);
2499
2500	da_default_softtimeout = value * SBT_1MS;
2501	return (0);
2502}
2503
2504static void
2505dadeletemethodset(struct da_softc *softc, da_delete_methods delete_method)
2506{
2507
2508	softc->delete_method = delete_method;
2509	softc->disk->d_delmaxsize = dadeletemaxsize(softc, delete_method);
2510	softc->delete_func = da_delete_functions[delete_method];
2511
2512	if (softc->delete_method > DA_DELETE_DISABLE)
2513		softc->disk->d_flags |= DISKFLAG_CANDELETE;
2514	else
2515		softc->disk->d_flags &= ~DISKFLAG_CANDELETE;
2516}
2517
2518static off_t
2519dadeletemaxsize(struct da_softc *softc, da_delete_methods delete_method)
2520{
2521	off_t sectors;
2522
2523	switch(delete_method) {
2524	case DA_DELETE_UNMAP:
2525		sectors = (off_t)softc->unmap_max_lba;
2526		break;
2527	case DA_DELETE_ATA_TRIM:
2528		sectors = (off_t)ATA_DSM_RANGE_MAX * softc->trim_max_ranges;
2529		break;
2530	case DA_DELETE_WS16:
2531		sectors = omin(softc->ws_max_blks, WS16_MAX_BLKS);
2532		break;
2533	case DA_DELETE_ZERO:
2534	case DA_DELETE_WS10:
2535		sectors = omin(softc->ws_max_blks, WS10_MAX_BLKS);
2536		break;
2537	default:
2538		return 0;
2539	}
2540
2541	return (off_t)softc->params.secsize *
2542	    omin(sectors, softc->params.sectors);
2543}
2544
2545static void
2546daprobedone(struct cam_periph *periph, union ccb *ccb)
2547{
2548	struct da_softc *softc;
2549
2550	softc = (struct da_softc *)periph->softc;
2551
2552	cam_periph_assert(periph, MA_OWNED);
2553
2554	dadeletemethodchoose(softc, DA_DELETE_NONE);
2555
2556	if (bootverbose && (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2557		char buf[80];
2558		int i, sep;
2559
2560		snprintf(buf, sizeof(buf), "Delete methods: <");
2561		sep = 0;
2562		for (i = 0; i <= DA_DELETE_MAX; i++) {
2563			if ((softc->delete_available & (1 << i)) == 0 &&
2564			    i != softc->delete_method)
2565				continue;
2566			if (sep)
2567				strlcat(buf, ",", sizeof(buf));
2568			strlcat(buf, da_delete_method_names[i],
2569			    sizeof(buf));
2570			if (i == softc->delete_method)
2571				strlcat(buf, "(*)", sizeof(buf));
2572			sep = 1;
2573		}
2574		strlcat(buf, ">", sizeof(buf));
2575		printf("%s%d: %s\n", periph->periph_name,
2576		    periph->unit_number, buf);
2577	}
2578	if ((softc->disk->d_flags & DISKFLAG_WRITE_PROTECT) != 0 &&
2579	    (softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2580		printf("%s%d: Write Protected\n", periph->periph_name,
2581		    periph->unit_number);
2582	}
2583
2584	/*
2585	 * Since our peripheral may be invalidated by an error
2586	 * above or an external event, we must release our CCB
2587	 * before releasing the probe lock on the peripheral.
2588	 * The peripheral will only go away once the last lock
2589	 * is removed, and we need it around for the CCB release
2590	 * operation.
2591	 */
2592	xpt_release_ccb(ccb);
2593	softc->state = DA_STATE_NORMAL;
2594	softc->flags |= DA_FLAG_PROBED;
2595	daschedule(periph);
2596	wakeup(&softc->disk->d_mediasize);
2597	if ((softc->flags & DA_FLAG_ANNOUNCED) == 0) {
2598		softc->flags |= DA_FLAG_ANNOUNCED;
2599		da_periph_unhold(periph, DA_REF_PROBE_HOLD);
2600	} else
2601		da_periph_release_locked(periph, DA_REF_REPROBE);
2602}
2603
2604static void
2605dadeletemethodchoose(struct da_softc *softc, da_delete_methods default_method)
2606{
2607	int i, methods;
2608
2609	/* If available, prefer the method requested by user. */
2610	i = softc->delete_method_pref;
2611	methods = softc->delete_available | (1 << DA_DELETE_DISABLE);
2612	if (methods & (1 << i)) {
2613		dadeletemethodset(softc, i);
2614		return;
2615	}
2616
2617	/* Use the pre-defined order to choose the best performing delete. */
2618	for (i = DA_DELETE_MIN; i <= DA_DELETE_MAX; i++) {
2619		if (i == DA_DELETE_ZERO)
2620			continue;
2621		if (softc->delete_available & (1 << i)) {
2622			dadeletemethodset(softc, i);
2623			return;
2624		}
2625	}
2626
2627	/* Fallback to default. */
2628	dadeletemethodset(softc, default_method);
2629}
2630
2631static int
2632dabitsysctl(SYSCTL_HANDLER_ARGS)
2633{
2634	u_int *flags = arg1;
2635	u_int test = arg2;
2636	int tmpout, error;
2637
2638	tmpout = !!(*flags & test);
2639	error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout));
2640	if (error || !req->newptr)
2641		return (error);
2642
2643	return (EPERM);
2644}
2645
2646static int
2647daflagssysctl(SYSCTL_HANDLER_ARGS)
2648{
2649	struct sbuf sbuf;
2650	struct da_softc *softc = arg1;
2651	int error;
2652
2653	sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
2654	if (softc->flags != 0)
2655		sbuf_printf(&sbuf, "0x%b", (unsigned)softc->flags, DA_FLAG_STRING);
2656	else
2657		sbuf_printf(&sbuf, "0");
2658	error = sbuf_finish(&sbuf);
2659	sbuf_delete(&sbuf);
2660
2661	return (error);
2662}
2663
2664static int
2665dadeletemethodsysctl(SYSCTL_HANDLER_ARGS)
2666{
2667	char buf[16];
2668	const char *p;
2669	struct da_softc *softc;
2670	int i, error, value;
2671
2672	softc = (struct da_softc *)arg1;
2673
2674	value = softc->delete_method;
2675	if (value < 0 || value > DA_DELETE_MAX)
2676		p = "UNKNOWN";
2677	else
2678		p = da_delete_method_names[value];
2679	strncpy(buf, p, sizeof(buf));
2680	error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
2681	if (error != 0 || req->newptr == NULL)
2682		return (error);
2683	for (i = 0; i <= DA_DELETE_MAX; i++) {
2684		if (strcmp(buf, da_delete_method_names[i]) == 0)
2685			break;
2686	}
2687	if (i > DA_DELETE_MAX)
2688		return (EINVAL);
2689	softc->delete_method_pref = i;
2690	dadeletemethodchoose(softc, DA_DELETE_NONE);
2691	return (0);
2692}
2693
2694static int
2695dazonemodesysctl(SYSCTL_HANDLER_ARGS)
2696{
2697	char tmpbuf[40];
2698	struct da_softc *softc;
2699	int error;
2700
2701	softc = (struct da_softc *)arg1;
2702
2703	switch (softc->zone_mode) {
2704	case DA_ZONE_DRIVE_MANAGED:
2705		snprintf(tmpbuf, sizeof(tmpbuf), "Drive Managed");
2706		break;
2707	case DA_ZONE_HOST_AWARE:
2708		snprintf(tmpbuf, sizeof(tmpbuf), "Host Aware");
2709		break;
2710	case DA_ZONE_HOST_MANAGED:
2711		snprintf(tmpbuf, sizeof(tmpbuf), "Host Managed");
2712		break;
2713	case DA_ZONE_NONE:
2714	default:
2715		snprintf(tmpbuf, sizeof(tmpbuf), "Not Zoned");
2716		break;
2717	}
2718
2719	error = sysctl_handle_string(oidp, tmpbuf, sizeof(tmpbuf), req);
2720
2721	return (error);
2722}
2723
2724static int
2725dazonesupsysctl(SYSCTL_HANDLER_ARGS)
2726{
2727	char tmpbuf[180];
2728	struct da_softc *softc;
2729	struct sbuf sb;
2730	int error, first;
2731	unsigned int i;
2732
2733	softc = (struct da_softc *)arg1;
2734
2735	error = 0;
2736	first = 1;
2737	sbuf_new(&sb, tmpbuf, sizeof(tmpbuf), 0);
2738
2739	for (i = 0; i < sizeof(da_zone_desc_table) /
2740	     sizeof(da_zone_desc_table[0]); i++) {
2741		if (softc->zone_flags & da_zone_desc_table[i].value) {
2742			if (first == 0)
2743				sbuf_printf(&sb, ", ");
2744			else
2745				first = 0;
2746			sbuf_cat(&sb, da_zone_desc_table[i].desc);
2747		}
2748	}
2749
2750	if (first == 1)
2751		sbuf_printf(&sb, "None");
2752
2753	sbuf_finish(&sb);
2754
2755	error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
2756
2757	return (error);
2758}
2759
2760static cam_status
2761daregister(struct cam_periph *periph, void *arg)
2762{
2763	struct da_softc *softc;
2764	struct ccb_pathinq cpi;
2765	struct ccb_getdev *cgd;
2766	char tmpstr[80];
2767	caddr_t match;
2768	int quirks;
2769
2770	cgd = (struct ccb_getdev *)arg;
2771	if (cgd == NULL) {
2772		printf("daregister: no getdev CCB, can't register device\n");
2773		return(CAM_REQ_CMP_ERR);
2774	}
2775
2776	softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF,
2777	    M_NOWAIT|M_ZERO);
2778
2779	if (softc == NULL) {
2780		printf("daregister: Unable to probe new device. "
2781		       "Unable to allocate softc\n");
2782		return(CAM_REQ_CMP_ERR);
2783	}
2784
2785	if (cam_iosched_init(&softc->cam_iosched, periph) != 0) {
2786		printf("daregister: Unable to probe new device. "
2787		       "Unable to allocate iosched memory\n");
2788		free(softc, M_DEVBUF);
2789		return(CAM_REQ_CMP_ERR);
2790	}
2791
2792	LIST_INIT(&softc->pending_ccbs);
2793	softc->state = DA_STATE_PROBE_WP;
2794	bioq_init(&softc->delete_run_queue);
2795	if (SID_IS_REMOVABLE(&cgd->inq_data))
2796		softc->flags |= DA_FLAG_PACK_REMOVABLE;
2797	softc->unmap_max_ranges = UNMAP_MAX_RANGES;
2798	softc->unmap_max_lba = UNMAP_RANGE_MAX;
2799	softc->unmap_gran = 0;
2800	softc->unmap_gran_align = 0;
2801	softc->ws_max_blks = WS16_MAX_BLKS;
2802	softc->trim_max_ranges = ATA_TRIM_MAX_RANGES;
2803	softc->flags |= DA_FLAG_ROTATING;
2804
2805	periph->softc = softc;
2806
2807	/*
2808	 * See if this device has any quirks.
2809	 */
2810	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
2811			       (caddr_t)da_quirk_table,
2812			       nitems(da_quirk_table),
2813			       sizeof(*da_quirk_table), scsi_inquiry_match);
2814
2815	if (match != NULL)
2816		softc->quirks = ((struct da_quirk_entry *)match)->quirks;
2817	else
2818		softc->quirks = DA_Q_NONE;
2819
2820	/* Check if the SIM does not want 6 byte commands */
2821	xpt_path_inq(&cpi, periph->path);
2822	if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE))
2823		softc->quirks |= DA_Q_NO_6_BYTE;
2824
2825	/* Override quirks if tunable is set */
2826	snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.quirks",
2827		 periph->unit_number);
2828	quirks = softc->quirks;
2829	TUNABLE_INT_FETCH(tmpstr, &quirks);
2830	softc->quirks = quirks;
2831
2832	if (SID_TYPE(&cgd->inq_data) == T_ZBC_HM)
2833		softc->zone_mode = DA_ZONE_HOST_MANAGED;
2834	else if (softc->quirks & DA_Q_SMR_DM)
2835		softc->zone_mode = DA_ZONE_DRIVE_MANAGED;
2836	else
2837		softc->zone_mode = DA_ZONE_NONE;
2838
2839	if (softc->zone_mode != DA_ZONE_NONE) {
2840		if (scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
2841			if (scsi_vpd_supported_page(periph, SVPD_ZONED_BDC))
2842				softc->zone_interface = DA_ZONE_IF_ATA_SAT;
2843			else
2844				softc->zone_interface = DA_ZONE_IF_ATA_PASS;
2845		} else
2846			softc->zone_interface = DA_ZONE_IF_SCSI;
2847	}
2848
2849	TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph);
2850
2851	/*
2852	 * Take an exclusive section lock on the periph while dastart is called
2853	 * to finish the probe.  The lock will be dropped in dadone at the end
2854	 * of probe. This locks out daopen and daclose from racing with the
2855	 * probe.
2856	 *
2857	 * XXX if cam_periph_hold returns an error, we don't hold a refcount.
2858	 */
2859	(void)da_periph_hold(periph, PRIBIO, DA_REF_PROBE_HOLD);
2860
2861	/*
2862	 * Schedule a periodic event to occasionally send an
2863	 * ordered tag to a device.
2864	 */
2865	callout_init_mtx(&softc->sendordered_c, cam_periph_mtx(periph), 0);
2866	callout_reset(&softc->sendordered_c,
2867	    (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
2868	    dasendorderedtag, periph);
2869
2870	cam_periph_unlock(periph);
2871	/*
2872	 * RBC devices don't have to support READ(6), only READ(10).
2873	 */
2874	if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC)
2875		softc->minimum_cmd_size = 10;
2876	else
2877		softc->minimum_cmd_size = 6;
2878
2879	/*
2880	 * Load the user's default, if any.
2881	 */
2882	snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size",
2883		 periph->unit_number);
2884	TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size);
2885
2886	/*
2887	 * 6, 10, 12 and 16 are the currently permissible values.
2888	 */
2889	if (softc->minimum_cmd_size > 12)
2890		softc->minimum_cmd_size = 16;
2891	else if (softc->minimum_cmd_size > 10)
2892		softc->minimum_cmd_size = 12;
2893	else if (softc->minimum_cmd_size > 6)
2894		softc->minimum_cmd_size = 10;
2895	else
2896		softc->minimum_cmd_size = 6;
2897
2898	/* Predict whether device may support READ CAPACITY(16). */
2899	if (SID_ANSI_REV(&cgd->inq_data) >= SCSI_REV_SPC3 &&
2900	    (softc->quirks & DA_Q_NO_RC16) == 0) {
2901		softc->flags |= DA_FLAG_CAN_RC16;
2902	}
2903
2904	/*
2905	 * Register this media as a disk.
2906	 */
2907	softc->disk = disk_alloc();
2908	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
2909			  periph->unit_number, 0,
2910			  DEVSTAT_BS_UNAVAILABLE,
2911			  SID_TYPE(&cgd->inq_data) |
2912			  XPORT_DEVSTAT_TYPE(cpi.transport),
2913			  DEVSTAT_PRIORITY_DISK);
2914	softc->disk->d_open = daopen;
2915	softc->disk->d_close = daclose;
2916	softc->disk->d_strategy = dastrategy;
2917	if (cam_sim_pollable(periph->sim))
2918		softc->disk->d_dump = dadump;
2919	softc->disk->d_getattr = dagetattr;
2920	softc->disk->d_gone = dadiskgonecb;
2921	softc->disk->d_name = "da";
2922	softc->disk->d_drv1 = periph;
2923	if (cpi.maxio == 0)
2924		softc->maxio = DFLTPHYS;	/* traditional default */
2925	else if (cpi.maxio > maxphys)
2926		softc->maxio = maxphys;		/* for safety */
2927	else
2928		softc->maxio = cpi.maxio;
2929	if (softc->quirks & DA_Q_128KB)
2930		softc->maxio = min(softc->maxio, 128 * 1024);
2931	softc->disk->d_maxsize = softc->maxio;
2932	softc->disk->d_unit = periph->unit_number;
2933	softc->disk->d_flags = DISKFLAG_DIRECT_COMPLETION | DISKFLAG_CANZONE;
2934	if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0)
2935		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
2936	if ((cpi.hba_misc & PIM_UNMAPPED) != 0) {
2937		softc->flags |= DA_FLAG_UNMAPPEDIO;
2938		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
2939	}
2940	cam_strvis(softc->disk->d_descr, cgd->inq_data.vendor,
2941	    sizeof(cgd->inq_data.vendor), sizeof(softc->disk->d_descr));
2942	strlcat(softc->disk->d_descr, " ", sizeof(softc->disk->d_descr));
2943	cam_strvis(&softc->disk->d_descr[strlen(softc->disk->d_descr)],
2944	    cgd->inq_data.product, sizeof(cgd->inq_data.product),
2945	    sizeof(softc->disk->d_descr) - strlen(softc->disk->d_descr));
2946	softc->disk->d_hba_vendor = cpi.hba_vendor;
2947	softc->disk->d_hba_device = cpi.hba_device;
2948	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
2949	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
2950	snprintf(softc->disk->d_attachment, sizeof(softc->disk->d_attachment),
2951	    "%s%d", cpi.dev_name, cpi.unit_number);
2952
2953	/*
2954	 * Acquire a reference to the periph before we register with GEOM.
2955	 * We'll release this reference once GEOM calls us back (via
2956	 * dadiskgonecb()) telling us that our provider has been freed.
2957	 */
2958	if (da_periph_acquire(periph, DA_REF_GEOM) != 0) {
2959		xpt_print(periph->path, "%s: lost periph during "
2960			  "registration!\n", __func__);
2961		cam_periph_lock(periph);
2962		return (CAM_REQ_CMP_ERR);
2963	}
2964
2965	disk_create(softc->disk, DISK_VERSION);
2966	cam_periph_lock(periph);
2967
2968	/*
2969	 * Add async callbacks for events of interest.
2970	 * I don't bother checking if this fails as,
2971	 * in most cases, the system will function just
2972	 * fine without them and the only alternative
2973	 * would be to not attach the device on failure.
2974	 */
2975	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
2976	    AC_ADVINFO_CHANGED | AC_SCSI_AEN | AC_UNIT_ATTENTION |
2977	    AC_INQ_CHANGED, daasync, periph, periph->path);
2978
2979	/*
2980	 * Emit an attribute changed notification just in case
2981	 * physical path information arrived before our async
2982	 * event handler was registered, but after anyone attaching
2983	 * to our disk device polled it.
2984	 */
2985	disk_attr_changed(softc->disk, "GEOM::physpath", M_NOWAIT);
2986
2987	/*
2988	 * Schedule a periodic media polling events.
2989	 */
2990	callout_init_mtx(&softc->mediapoll_c, cam_periph_mtx(periph), 0);
2991	if ((softc->flags & DA_FLAG_PACK_REMOVABLE) &&
2992	    (cgd->inq_flags & SID_AEN) == 0 &&
2993	    da_poll_period != 0)
2994		callout_reset(&softc->mediapoll_c, da_poll_period * hz,
2995		    damediapoll, periph);
2996
2997	xpt_schedule(periph, CAM_PRIORITY_DEV);
2998
2999	return(CAM_REQ_CMP);
3000}
3001
3002static int
3003da_zone_bio_to_scsi(int disk_zone_cmd)
3004{
3005	switch (disk_zone_cmd) {
3006	case DISK_ZONE_OPEN:
3007		return ZBC_OUT_SA_OPEN;
3008	case DISK_ZONE_CLOSE:
3009		return ZBC_OUT_SA_CLOSE;
3010	case DISK_ZONE_FINISH:
3011		return ZBC_OUT_SA_FINISH;
3012	case DISK_ZONE_RWP:
3013		return ZBC_OUT_SA_RWP;
3014	}
3015
3016	return -1;
3017}
3018
3019static int
3020da_zone_cmd(struct cam_periph *periph, union ccb *ccb, struct bio *bp,
3021	    int *queue_ccb)
3022{
3023	struct da_softc *softc;
3024	int error;
3025
3026	error = 0;
3027
3028	if (bp->bio_cmd != BIO_ZONE) {
3029		error = EINVAL;
3030		goto bailout;
3031	}
3032
3033	softc = periph->softc;
3034
3035	switch (bp->bio_zone.zone_cmd) {
3036	case DISK_ZONE_OPEN:
3037	case DISK_ZONE_CLOSE:
3038	case DISK_ZONE_FINISH:
3039	case DISK_ZONE_RWP: {
3040		int zone_flags;
3041		int zone_sa;
3042		uint64_t lba;
3043
3044		zone_sa = da_zone_bio_to_scsi(bp->bio_zone.zone_cmd);
3045		if (zone_sa == -1) {
3046			xpt_print(periph->path, "Cannot translate zone "
3047			    "cmd %#x to SCSI\n", bp->bio_zone.zone_cmd);
3048			error = EINVAL;
3049			goto bailout;
3050		}
3051
3052		zone_flags = 0;
3053		lba = bp->bio_zone.zone_params.rwp.id;
3054
3055		if (bp->bio_zone.zone_params.rwp.flags &
3056		    DISK_ZONE_RWP_FLAG_ALL)
3057			zone_flags |= ZBC_OUT_ALL;
3058
3059		if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
3060			scsi_zbc_out(&ccb->csio,
3061				     /*retries*/ da_retry_count,
3062				     /*cbfcnp*/ dadone,
3063				     /*tag_action*/ MSG_SIMPLE_Q_TAG,
3064				     /*service_action*/ zone_sa,
3065				     /*zone_id*/ lba,
3066				     /*zone_flags*/ zone_flags,
3067				     /*data_ptr*/ NULL,
3068				     /*dxfer_len*/ 0,
3069				     /*sense_len*/ SSD_FULL_SIZE,
3070				     /*timeout*/ da_default_timeout * 1000);
3071		} else {
3072			/*
3073			 * Note that in this case, even though we can
3074			 * technically use NCQ, we don't bother for several
3075			 * reasons:
3076			 * 1. It hasn't been tested on a SAT layer that
3077			 *    supports it.  This is new as of SAT-4.
3078			 * 2. Even when there is a SAT layer that supports
3079			 *    it, that SAT layer will also probably support
3080			 *    ZBC -> ZAC translation, since they are both
3081			 *    in the SAT-4 spec.
3082			 * 3. Translation will likely be preferable to ATA
3083			 *    passthrough.  LSI / Avago at least single
3084			 *    steps ATA passthrough commands in the HBA,
3085			 *    regardless of protocol, so unless that
3086			 *    changes, there is a performance penalty for
3087			 *    doing ATA passthrough no matter whether
3088			 *    you're using NCQ/FPDMA, DMA or PIO.
3089			 * 4. It requires a 32-byte CDB, which at least at
3090			 *    this point in CAM requires a CDB pointer, which
3091			 *    would require us to allocate an additional bit
3092			 *    of storage separate from the CCB.
3093			 */
3094			error = scsi_ata_zac_mgmt_out(&ccb->csio,
3095			    /*retries*/ da_retry_count,
3096			    /*cbfcnp*/ dadone,
3097			    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3098			    /*use_ncq*/ 0,
3099			    /*zm_action*/ zone_sa,
3100			    /*zone_id*/ lba,
3101			    /*zone_flags*/ zone_flags,
3102			    /*data_ptr*/ NULL,
3103			    /*dxfer_len*/ 0,
3104			    /*cdb_storage*/ NULL,
3105			    /*cdb_storage_len*/ 0,
3106			    /*sense_len*/ SSD_FULL_SIZE,
3107			    /*timeout*/ da_default_timeout * 1000);
3108			if (error != 0) {
3109				error = EINVAL;
3110				xpt_print(periph->path,
3111				    "scsi_ata_zac_mgmt_out() returned an "
3112				    "error!");
3113				goto bailout;
3114			}
3115		}
3116		*queue_ccb = 1;
3117
3118		break;
3119	}
3120	case DISK_ZONE_REPORT_ZONES: {
3121		uint8_t *rz_ptr;
3122		uint32_t num_entries, alloc_size;
3123		struct disk_zone_report *rep;
3124
3125		rep = &bp->bio_zone.zone_params.report;
3126
3127		num_entries = rep->entries_allocated;
3128		if (num_entries == 0) {
3129			xpt_print(periph->path, "No entries allocated for "
3130			    "Report Zones request\n");
3131			error = EINVAL;
3132			goto bailout;
3133		}
3134		alloc_size = sizeof(struct scsi_report_zones_hdr) +
3135		    (sizeof(struct scsi_report_zones_desc) * num_entries);
3136		alloc_size = min(alloc_size, softc->disk->d_maxsize);
3137		rz_ptr = malloc(alloc_size, M_SCSIDA, M_NOWAIT | M_ZERO);
3138		if (rz_ptr == NULL) {
3139			xpt_print(periph->path, "Unable to allocate memory "
3140			   "for Report Zones request\n");
3141			error = ENOMEM;
3142			goto bailout;
3143		}
3144
3145		if (softc->zone_interface != DA_ZONE_IF_ATA_PASS) {
3146			scsi_zbc_in(&ccb->csio,
3147				    /*retries*/ da_retry_count,
3148				    /*cbcfnp*/ dadone,
3149				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3150				    /*service_action*/ ZBC_IN_SA_REPORT_ZONES,
3151				    /*zone_start_lba*/ rep->starting_id,
3152				    /*zone_options*/ rep->rep_options,
3153				    /*data_ptr*/ rz_ptr,
3154				    /*dxfer_len*/ alloc_size,
3155				    /*sense_len*/ SSD_FULL_SIZE,
3156				    /*timeout*/ da_default_timeout * 1000);
3157		} else {
3158			/*
3159			 * Note that in this case, even though we can
3160			 * technically use NCQ, we don't bother for several
3161			 * reasons:
3162			 * 1. It hasn't been tested on a SAT layer that
3163			 *    supports it.  This is new as of SAT-4.
3164			 * 2. Even when there is a SAT layer that supports
3165			 *    it, that SAT layer will also probably support
3166			 *    ZBC -> ZAC translation, since they are both
3167			 *    in the SAT-4 spec.
3168			 * 3. Translation will likely be preferable to ATA
3169			 *    passthrough.  LSI / Avago at least single
3170			 *    steps ATA passthrough commands in the HBA,
3171			 *    regardless of protocol, so unless that
3172			 *    changes, there is a performance penalty for
3173			 *    doing ATA passthrough no matter whether
3174			 *    you're using NCQ/FPDMA, DMA or PIO.
3175			 * 4. It requires a 32-byte CDB, which at least at
3176			 *    this point in CAM requires a CDB pointer, which
3177			 *    would require us to allocate an additional bit
3178			 *    of storage separate from the CCB.
3179			 */
3180			error = scsi_ata_zac_mgmt_in(&ccb->csio,
3181			    /*retries*/ da_retry_count,
3182			    /*cbcfnp*/ dadone,
3183			    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3184			    /*use_ncq*/ 0,
3185			    /*zm_action*/ ATA_ZM_REPORT_ZONES,
3186			    /*zone_id*/ rep->starting_id,
3187			    /*zone_flags*/ rep->rep_options,
3188			    /*data_ptr*/ rz_ptr,
3189			    /*dxfer_len*/ alloc_size,
3190			    /*cdb_storage*/ NULL,
3191			    /*cdb_storage_len*/ 0,
3192			    /*sense_len*/ SSD_FULL_SIZE,
3193			    /*timeout*/ da_default_timeout * 1000);
3194			if (error != 0) {
3195				error = EINVAL;
3196				xpt_print(periph->path,
3197				    "scsi_ata_zac_mgmt_in() returned an "
3198				    "error!");
3199				goto bailout;
3200			}
3201		}
3202
3203		/*
3204		 * For BIO_ZONE, this isn't normally needed.  However, it
3205		 * is used by devstat_end_transaction_bio() to determine
3206		 * how much data was transferred.
3207		 */
3208		/*
3209		 * XXX KDM we have a problem.  But I'm not sure how to fix
3210		 * it.  devstat uses bio_bcount - bio_resid to calculate
3211		 * the amount of data transferred.   The GEOM disk code
3212		 * uses bio_length - bio_resid to calculate the amount of
3213		 * data in bio_completed.  We have different structure
3214		 * sizes above and below the ada(4) driver.  So, if we
3215		 * use the sizes above, the amount transferred won't be
3216		 * quite accurate for devstat.  If we use different sizes
3217		 * for bio_bcount and bio_length (above and below
3218		 * respectively), then the residual needs to match one or
3219		 * the other.  Everything is calculated after the bio
3220		 * leaves the driver, so changing the values around isn't
3221		 * really an option.  For now, just set the count to the
3222		 * passed in length.  This means that the calculations
3223		 * above (e.g. bio_completed) will be correct, but the
3224		 * amount of data reported to devstat will be slightly
3225		 * under or overstated.
3226		 */
3227		bp->bio_bcount = bp->bio_length;
3228
3229		*queue_ccb = 1;
3230
3231		break;
3232	}
3233	case DISK_ZONE_GET_PARAMS: {
3234		struct disk_zone_disk_params *params;
3235
3236		params = &bp->bio_zone.zone_params.disk_params;
3237		bzero(params, sizeof(*params));
3238
3239		switch (softc->zone_mode) {
3240		case DA_ZONE_DRIVE_MANAGED:
3241			params->zone_mode = DISK_ZONE_MODE_DRIVE_MANAGED;
3242			break;
3243		case DA_ZONE_HOST_AWARE:
3244			params->zone_mode = DISK_ZONE_MODE_HOST_AWARE;
3245			break;
3246		case DA_ZONE_HOST_MANAGED:
3247			params->zone_mode = DISK_ZONE_MODE_HOST_MANAGED;
3248			break;
3249		default:
3250		case DA_ZONE_NONE:
3251			params->zone_mode = DISK_ZONE_MODE_NONE;
3252			break;
3253		}
3254
3255		if (softc->zone_flags & DA_ZONE_FLAG_URSWRZ)
3256			params->flags |= DISK_ZONE_DISK_URSWRZ;
3257
3258		if (softc->zone_flags & DA_ZONE_FLAG_OPT_SEQ_SET) {
3259			params->optimal_seq_zones = softc->optimal_seq_zones;
3260			params->flags |= DISK_ZONE_OPT_SEQ_SET;
3261		}
3262
3263		if (softc->zone_flags & DA_ZONE_FLAG_OPT_NONSEQ_SET) {
3264			params->optimal_nonseq_zones =
3265			    softc->optimal_nonseq_zones;
3266			params->flags |= DISK_ZONE_OPT_NONSEQ_SET;
3267		}
3268
3269		if (softc->zone_flags & DA_ZONE_FLAG_MAX_SEQ_SET) {
3270			params->max_seq_zones = softc->max_seq_zones;
3271			params->flags |= DISK_ZONE_MAX_SEQ_SET;
3272		}
3273		if (softc->zone_flags & DA_ZONE_FLAG_RZ_SUP)
3274			params->flags |= DISK_ZONE_RZ_SUP;
3275
3276		if (softc->zone_flags & DA_ZONE_FLAG_OPEN_SUP)
3277			params->flags |= DISK_ZONE_OPEN_SUP;
3278
3279		if (softc->zone_flags & DA_ZONE_FLAG_CLOSE_SUP)
3280			params->flags |= DISK_ZONE_CLOSE_SUP;
3281
3282		if (softc->zone_flags & DA_ZONE_FLAG_FINISH_SUP)
3283			params->flags |= DISK_ZONE_FINISH_SUP;
3284
3285		if (softc->zone_flags & DA_ZONE_FLAG_RWP_SUP)
3286			params->flags |= DISK_ZONE_RWP_SUP;
3287		break;
3288	}
3289	default:
3290		break;
3291	}
3292bailout:
3293	return (error);
3294}
3295
3296static void
3297dastart(struct cam_periph *periph, union ccb *start_ccb)
3298{
3299	struct da_softc *softc;
3300
3301	cam_periph_assert(periph, MA_OWNED);
3302	softc = (struct da_softc *)periph->softc;
3303
3304	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dastart\n"));
3305
3306skipstate:
3307	switch (softc->state) {
3308	case DA_STATE_NORMAL:
3309	{
3310		struct bio *bp;
3311		uint8_t tag_code;
3312
3313more:
3314		bp = cam_iosched_next_bio(softc->cam_iosched);
3315		if (bp == NULL) {
3316			if (cam_iosched_has_work_flags(softc->cam_iosched,
3317			    DA_WORK_TUR)) {
3318				softc->flags |= DA_FLAG_TUR_PENDING;
3319				cam_iosched_clr_work_flags(softc->cam_iosched,
3320				    DA_WORK_TUR);
3321				scsi_test_unit_ready(&start_ccb->csio,
3322				     /*retries*/ da_retry_count,
3323				     dadone_tur,
3324				     MSG_SIMPLE_Q_TAG,
3325				     SSD_FULL_SIZE,
3326				     da_default_timeout * 1000);
3327				start_ccb->ccb_h.ccb_bp = NULL;
3328				start_ccb->ccb_h.ccb_state = DA_CCB_TUR;
3329				xpt_action(start_ccb);
3330			} else
3331				xpt_release_ccb(start_ccb);
3332			break;
3333		}
3334
3335		if (bp->bio_cmd == BIO_DELETE) {
3336			if (softc->delete_func != NULL) {
3337				softc->delete_func(periph, start_ccb, bp);
3338				goto out;
3339			} else {
3340				/*
3341				 * Not sure this is possible, but failsafe by
3342				 * lying and saying "sure, done."
3343				 */
3344				biofinish(bp, NULL, 0);
3345				goto more;
3346			}
3347		}
3348
3349		if (cam_iosched_has_work_flags(softc->cam_iosched,
3350		    DA_WORK_TUR)) {
3351			cam_iosched_clr_work_flags(softc->cam_iosched,
3352			    DA_WORK_TUR);
3353			da_periph_release_locked(periph, DA_REF_TUR);
3354		}
3355
3356		if ((bp->bio_flags & BIO_ORDERED) != 0 ||
3357		    (softc->flags & DA_FLAG_NEED_OTAG) != 0) {
3358			softc->flags &= ~DA_FLAG_NEED_OTAG;
3359			softc->flags |= DA_FLAG_WAS_OTAG;
3360			tag_code = MSG_ORDERED_Q_TAG;
3361		} else {
3362			tag_code = MSG_SIMPLE_Q_TAG;
3363		}
3364
3365		switch (bp->bio_cmd) {
3366		case BIO_WRITE:
3367		case BIO_READ:
3368		{
3369			void *data_ptr;
3370			int rw_op;
3371
3372			biotrack(bp, __func__);
3373
3374			if (bp->bio_cmd == BIO_WRITE) {
3375				softc->flags |= DA_FLAG_DIRTY;
3376				rw_op = SCSI_RW_WRITE;
3377			} else {
3378				rw_op = SCSI_RW_READ;
3379			}
3380
3381			data_ptr = bp->bio_data;
3382			if ((bp->bio_flags & (BIO_UNMAPPED|BIO_VLIST)) != 0) {
3383				rw_op |= SCSI_RW_BIO;
3384				data_ptr = bp;
3385			}
3386
3387			scsi_read_write(&start_ccb->csio,
3388					/*retries*/da_retry_count,
3389					/*cbfcnp*/dadone,
3390					/*tag_action*/tag_code,
3391					rw_op,
3392					/*byte2*/0,
3393					softc->minimum_cmd_size,
3394					/*lba*/bp->bio_pblkno,
3395					/*block_count*/bp->bio_bcount /
3396					softc->params.secsize,
3397					data_ptr,
3398					/*dxfer_len*/ bp->bio_bcount,
3399					/*sense_len*/SSD_FULL_SIZE,
3400					da_default_timeout * 1000);
3401#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
3402			start_ccb->csio.bio = bp;
3403#endif
3404			break;
3405		}
3406		case BIO_FLUSH:
3407			/*
3408			 * If we don't support sync cache, or the disk
3409			 * isn't dirty, FLUSH is a no-op.  Use the
3410			 * allocated CCB for the next bio if one is
3411			 * available.
3412			 */
3413			if ((softc->quirks & DA_Q_NO_SYNC_CACHE) != 0 ||
3414			    (softc->flags & DA_FLAG_DIRTY) == 0) {
3415				biodone(bp);
3416				goto skipstate;
3417			}
3418
3419			/*
3420			 * BIO_FLUSH doesn't currently communicate
3421			 * range data, so we synchronize the cache
3422			 * over the whole disk.
3423			 */
3424			scsi_synchronize_cache(&start_ccb->csio,
3425					       /*retries*/1,
3426					       /*cbfcnp*/dadone,
3427					       /*tag_action*/tag_code,
3428					       /*begin_lba*/0,
3429					       /*lb_count*/0,
3430					       SSD_FULL_SIZE,
3431					       da_default_timeout*1000);
3432			/*
3433			 * Clear the dirty flag before sending the command.
3434			 * Either this sync cache will be successful, or it
3435			 * will fail after a retry.  If it fails, it is
3436			 * unlikely to be successful if retried later, so
3437			 * we'll save ourselves time by just marking the
3438			 * device clean.
3439			 */
3440			softc->flags &= ~DA_FLAG_DIRTY;
3441			break;
3442		case BIO_ZONE: {
3443			int error, queue_ccb;
3444
3445			queue_ccb = 0;
3446
3447			error = da_zone_cmd(periph, start_ccb, bp,&queue_ccb);
3448			if ((error != 0)
3449			 || (queue_ccb == 0)) {
3450				biofinish(bp, NULL, error);
3451				xpt_release_ccb(start_ccb);
3452				return;
3453			}
3454			break;
3455		}
3456		default:
3457			biofinish(bp, NULL, EOPNOTSUPP);
3458			xpt_release_ccb(start_ccb);
3459			return;
3460		}
3461		start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO;
3462		start_ccb->ccb_h.flags |= CAM_UNLOCKED;
3463		start_ccb->ccb_h.softtimeout = sbttotv(da_default_softtimeout);
3464
3465out:
3466		LIST_INSERT_HEAD(&softc->pending_ccbs,
3467				 &start_ccb->ccb_h, periph_links.le);
3468
3469		/* We expect a unit attention from this device */
3470		if ((softc->flags & DA_FLAG_RETRY_UA) != 0) {
3471			start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA;
3472			softc->flags &= ~DA_FLAG_RETRY_UA;
3473		}
3474
3475		start_ccb->ccb_h.ccb_bp = bp;
3476		softc->refcount++;
3477		cam_periph_unlock(periph);
3478		xpt_action(start_ccb);
3479		cam_periph_lock(periph);
3480
3481		/* May have more work to do, so ensure we stay scheduled */
3482		daschedule(periph);
3483		break;
3484	}
3485	case DA_STATE_PROBE_WP:
3486	{
3487		void  *mode_buf;
3488		int    mode_buf_len;
3489
3490		if (da_disable_wp_detection) {
3491			if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3492				softc->state = DA_STATE_PROBE_RC16;
3493			else
3494				softc->state = DA_STATE_PROBE_RC;
3495			goto skipstate;
3496		}
3497		mode_buf_len = 192;
3498		mode_buf = malloc(mode_buf_len, M_SCSIDA, M_NOWAIT);
3499		if (mode_buf == NULL) {
3500			xpt_print(periph->path, "Unable to send mode sense - "
3501			    "malloc failure\n");
3502			if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
3503				softc->state = DA_STATE_PROBE_RC16;
3504			else
3505				softc->state = DA_STATE_PROBE_RC;
3506			goto skipstate;
3507		}
3508		scsi_mode_sense_len(&start_ccb->csio,
3509				    /*retries*/ da_retry_count,
3510				    /*cbfcnp*/ dadone_probewp,
3511				    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3512				    /*dbd*/ FALSE,
3513				    /*pc*/ SMS_PAGE_CTRL_CURRENT,
3514				    /*page*/ SMS_ALL_PAGES_PAGE,
3515				    /*param_buf*/ mode_buf,
3516				    /*param_len*/ mode_buf_len,
3517				    /*minimum_cmd_size*/ softc->minimum_cmd_size,
3518				    /*sense_len*/ SSD_FULL_SIZE,
3519				    /*timeout*/ da_default_timeout * 1000);
3520		start_ccb->ccb_h.ccb_bp = NULL;
3521		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_WP;
3522		xpt_action(start_ccb);
3523		break;
3524	}
3525	case DA_STATE_PROBE_RC:
3526	{
3527		struct scsi_read_capacity_data *rcap;
3528
3529		rcap = (struct scsi_read_capacity_data *)
3530		    malloc(sizeof(*rcap), M_SCSIDA, M_NOWAIT|M_ZERO);
3531		if (rcap == NULL) {
3532			printf("dastart: Couldn't malloc read_capacity data\n");
3533			/* da_free_periph??? */
3534			break;
3535		}
3536		scsi_read_capacity(&start_ccb->csio,
3537				   /*retries*/da_retry_count,
3538				   dadone_proberc,
3539				   MSG_SIMPLE_Q_TAG,
3540				   rcap,
3541				   SSD_FULL_SIZE,
3542				   /*timeout*/5000);
3543		start_ccb->ccb_h.ccb_bp = NULL;
3544		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC;
3545		xpt_action(start_ccb);
3546		break;
3547	}
3548	case DA_STATE_PROBE_RC16:
3549	{
3550		struct scsi_read_capacity_data_long *rcaplong;
3551
3552		rcaplong = (struct scsi_read_capacity_data_long *)
3553			malloc(sizeof(*rcaplong), M_SCSIDA, M_NOWAIT|M_ZERO);
3554		if (rcaplong == NULL) {
3555			printf("dastart: Couldn't malloc read_capacity data\n");
3556			/* da_free_periph??? */
3557			break;
3558		}
3559		scsi_read_capacity_16(&start_ccb->csio,
3560				      /*retries*/ da_retry_count,
3561				      /*cbfcnp*/ dadone_proberc,
3562				      /*tag_action*/ MSG_SIMPLE_Q_TAG,
3563				      /*lba*/ 0,
3564				      /*reladr*/ 0,
3565				      /*pmi*/ 0,
3566				      /*rcap_buf*/ (uint8_t *)rcaplong,
3567				      /*rcap_buf_len*/ sizeof(*rcaplong),
3568				      /*sense_len*/ SSD_FULL_SIZE,
3569				      /*timeout*/ da_default_timeout * 1000);
3570		start_ccb->ccb_h.ccb_bp = NULL;
3571		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_RC16;
3572		xpt_action(start_ccb);
3573		break;
3574	}
3575	case DA_STATE_PROBE_LBP:
3576	{
3577		struct scsi_vpd_logical_block_prov *lbp;
3578
3579		if (!scsi_vpd_supported_page(periph, SVPD_LBP)) {
3580			/*
3581			 * If we get here we don't support any SBC-3 delete
3582			 * methods with UNMAP as the Logical Block Provisioning
3583			 * VPD page support is required for devices which
3584			 * support it according to T10/1799-D Revision 31
3585			 * however older revisions of the spec don't mandate
3586			 * this so we currently don't remove these methods
3587			 * from the available set.
3588			 */
3589			softc->state = DA_STATE_PROBE_BLK_LIMITS;
3590			goto skipstate;
3591		}
3592
3593		lbp = (struct scsi_vpd_logical_block_prov *)
3594			malloc(sizeof(*lbp), M_SCSIDA, M_NOWAIT|M_ZERO);
3595
3596		if (lbp == NULL) {
3597			printf("dastart: Couldn't malloc lbp data\n");
3598			/* da_free_periph??? */
3599			break;
3600		}
3601
3602		scsi_inquiry(&start_ccb->csio,
3603			     /*retries*/da_retry_count,
3604			     /*cbfcnp*/dadone_probelbp,
3605			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3606			     /*inq_buf*/(u_int8_t *)lbp,
3607			     /*inq_len*/sizeof(*lbp),
3608			     /*evpd*/TRUE,
3609			     /*page_code*/SVPD_LBP,
3610			     /*sense_len*/SSD_MIN_SIZE,
3611			     /*timeout*/da_default_timeout * 1000);
3612		start_ccb->ccb_h.ccb_bp = NULL;
3613		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_LBP;
3614		xpt_action(start_ccb);
3615		break;
3616	}
3617	case DA_STATE_PROBE_BLK_LIMITS:
3618	{
3619		struct scsi_vpd_block_limits *block_limits;
3620
3621		if (!scsi_vpd_supported_page(periph, SVPD_BLOCK_LIMITS)) {
3622			/* Not supported skip to next probe */
3623			softc->state = DA_STATE_PROBE_BDC;
3624			goto skipstate;
3625		}
3626
3627		block_limits = (struct scsi_vpd_block_limits *)
3628			malloc(sizeof(*block_limits), M_SCSIDA, M_NOWAIT|M_ZERO);
3629
3630		if (block_limits == NULL) {
3631			printf("dastart: Couldn't malloc block_limits data\n");
3632			/* da_free_periph??? */
3633			break;
3634		}
3635
3636		scsi_inquiry(&start_ccb->csio,
3637			     /*retries*/da_retry_count,
3638			     /*cbfcnp*/dadone_probeblklimits,
3639			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3640			     /*inq_buf*/(u_int8_t *)block_limits,
3641			     /*inq_len*/sizeof(*block_limits),
3642			     /*evpd*/TRUE,
3643			     /*page_code*/SVPD_BLOCK_LIMITS,
3644			     /*sense_len*/SSD_MIN_SIZE,
3645			     /*timeout*/da_default_timeout * 1000);
3646		start_ccb->ccb_h.ccb_bp = NULL;
3647		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BLK_LIMITS;
3648		xpt_action(start_ccb);
3649		break;
3650	}
3651	case DA_STATE_PROBE_BDC:
3652	{
3653		struct scsi_vpd_block_device_characteristics *bdc;
3654
3655		if (!scsi_vpd_supported_page(periph, SVPD_BDC)) {
3656			softc->state = DA_STATE_PROBE_ATA;
3657			goto skipstate;
3658		}
3659
3660		bdc = (struct scsi_vpd_block_device_characteristics *)
3661			malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3662
3663		if (bdc == NULL) {
3664			printf("dastart: Couldn't malloc bdc data\n");
3665			/* da_free_periph??? */
3666			break;
3667		}
3668
3669		scsi_inquiry(&start_ccb->csio,
3670			     /*retries*/da_retry_count,
3671			     /*cbfcnp*/dadone_probebdc,
3672			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3673			     /*inq_buf*/(u_int8_t *)bdc,
3674			     /*inq_len*/sizeof(*bdc),
3675			     /*evpd*/TRUE,
3676			     /*page_code*/SVPD_BDC,
3677			     /*sense_len*/SSD_MIN_SIZE,
3678			     /*timeout*/da_default_timeout * 1000);
3679		start_ccb->ccb_h.ccb_bp = NULL;
3680		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_BDC;
3681		xpt_action(start_ccb);
3682		break;
3683	}
3684	case DA_STATE_PROBE_ATA:
3685	{
3686		struct ata_params *ata_params;
3687
3688		if (!scsi_vpd_supported_page(periph, SVPD_ATA_INFORMATION)) {
3689			if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
3690			 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
3691				/*
3692				 * Note that if the ATA VPD page isn't
3693				 * supported, we aren't talking to an ATA
3694				 * device anyway.  Support for that VPD
3695				 * page is mandatory for SCSI to ATA (SAT)
3696				 * translation layers.
3697				 */
3698				softc->state = DA_STATE_PROBE_ZONE;
3699				goto skipstate;
3700			}
3701			daprobedone(periph, start_ccb);
3702			break;
3703		}
3704
3705		ata_params = &periph->path->device->ident_data;
3706
3707		scsi_ata_identify(&start_ccb->csio,
3708				  /*retries*/da_retry_count,
3709				  /*cbfcnp*/dadone_probeata,
3710                                  /*tag_action*/MSG_SIMPLE_Q_TAG,
3711				  /*data_ptr*/(u_int8_t *)ata_params,
3712				  /*dxfer_len*/sizeof(*ata_params),
3713				  /*sense_len*/SSD_FULL_SIZE,
3714				  /*timeout*/da_default_timeout * 1000);
3715		start_ccb->ccb_h.ccb_bp = NULL;
3716		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA;
3717		xpt_action(start_ccb);
3718		break;
3719	}
3720	case DA_STATE_PROBE_ATA_LOGDIR:
3721	{
3722		struct ata_gp_log_dir *log_dir;
3723		int retval;
3724
3725		retval = 0;
3726
3727		if ((softc->flags & DA_FLAG_CAN_ATA_LOG) == 0) {
3728			/*
3729			 * If we don't have log support, not much point in
3730			 * trying to probe zone support.
3731			 */
3732			daprobedone(periph, start_ccb);
3733			break;
3734		}
3735
3736		/*
3737		 * If we have an ATA device (the SCSI ATA Information VPD
3738		 * page should be present and the ATA identify should have
3739		 * succeeded) and it supports logs, ask for the log directory.
3740		 */
3741
3742		log_dir = malloc(sizeof(*log_dir), M_SCSIDA, M_NOWAIT|M_ZERO);
3743		if (log_dir == NULL) {
3744			xpt_print(periph->path, "Couldn't malloc log_dir "
3745			    "data\n");
3746			daprobedone(periph, start_ccb);
3747			break;
3748		}
3749
3750		retval = scsi_ata_read_log(&start_ccb->csio,
3751		    /*retries*/ da_retry_count,
3752		    /*cbfcnp*/ dadone_probeatalogdir,
3753		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3754		    /*log_address*/ ATA_LOG_DIRECTORY,
3755		    /*page_number*/ 0,
3756		    /*block_count*/ 1,
3757		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3758				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3759		    /*data_ptr*/ (uint8_t *)log_dir,
3760		    /*dxfer_len*/ sizeof(*log_dir),
3761		    /*sense_len*/ SSD_FULL_SIZE,
3762		    /*timeout*/ da_default_timeout * 1000);
3763
3764		if (retval != 0) {
3765			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3766			free(log_dir, M_SCSIDA);
3767			daprobedone(periph, start_ccb);
3768			break;
3769		}
3770		start_ccb->ccb_h.ccb_bp = NULL;
3771		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_LOGDIR;
3772		xpt_action(start_ccb);
3773		break;
3774	}
3775	case DA_STATE_PROBE_ATA_IDDIR:
3776	{
3777		struct ata_identify_log_pages *id_dir;
3778		int retval;
3779
3780		retval = 0;
3781
3782		/*
3783		 * Check here to see whether the Identify Device log is
3784		 * supported in the directory of logs.  If so, continue
3785		 * with requesting the log of identify device pages.
3786		 */
3787		if ((softc->flags & DA_FLAG_CAN_ATA_IDLOG) == 0) {
3788			daprobedone(periph, start_ccb);
3789			break;
3790		}
3791
3792		id_dir = malloc(sizeof(*id_dir), M_SCSIDA, M_NOWAIT | M_ZERO);
3793		if (id_dir == NULL) {
3794			xpt_print(periph->path, "Couldn't malloc id_dir "
3795			    "data\n");
3796			daprobedone(periph, start_ccb);
3797			break;
3798		}
3799
3800		retval = scsi_ata_read_log(&start_ccb->csio,
3801		    /*retries*/ da_retry_count,
3802		    /*cbfcnp*/ dadone_probeataiddir,
3803		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3804		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3805		    /*page_number*/ ATA_IDL_PAGE_LIST,
3806		    /*block_count*/ 1,
3807		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3808				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3809		    /*data_ptr*/ (uint8_t *)id_dir,
3810		    /*dxfer_len*/ sizeof(*id_dir),
3811		    /*sense_len*/ SSD_FULL_SIZE,
3812		    /*timeout*/ da_default_timeout * 1000);
3813
3814		if (retval != 0) {
3815			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3816			free(id_dir, M_SCSIDA);
3817			daprobedone(periph, start_ccb);
3818			break;
3819		}
3820		start_ccb->ccb_h.ccb_bp = NULL;
3821		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_IDDIR;
3822		xpt_action(start_ccb);
3823		break;
3824	}
3825	case DA_STATE_PROBE_ATA_SUP:
3826	{
3827		struct ata_identify_log_sup_cap *sup_cap;
3828		int retval;
3829
3830		retval = 0;
3831
3832		/*
3833		 * Check here to see whether the Supported Capabilities log
3834		 * is in the list of Identify Device logs.
3835		 */
3836		if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP) == 0) {
3837			daprobedone(periph, start_ccb);
3838			break;
3839		}
3840
3841		sup_cap = malloc(sizeof(*sup_cap), M_SCSIDA, M_NOWAIT|M_ZERO);
3842		if (sup_cap == NULL) {
3843			xpt_print(periph->path, "Couldn't malloc sup_cap "
3844			    "data\n");
3845			daprobedone(periph, start_ccb);
3846			break;
3847		}
3848
3849		retval = scsi_ata_read_log(&start_ccb->csio,
3850		    /*retries*/ da_retry_count,
3851		    /*cbfcnp*/ dadone_probeatasup,
3852		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3853		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3854		    /*page_number*/ ATA_IDL_SUP_CAP,
3855		    /*block_count*/ 1,
3856		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3857				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3858		    /*data_ptr*/ (uint8_t *)sup_cap,
3859		    /*dxfer_len*/ sizeof(*sup_cap),
3860		    /*sense_len*/ SSD_FULL_SIZE,
3861		    /*timeout*/ da_default_timeout * 1000);
3862
3863		if (retval != 0) {
3864			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3865			free(sup_cap, M_SCSIDA);
3866			daprobedone(periph, start_ccb);
3867			break;
3868		}
3869
3870		start_ccb->ccb_h.ccb_bp = NULL;
3871		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_SUP;
3872		xpt_action(start_ccb);
3873		break;
3874	}
3875	case DA_STATE_PROBE_ATA_ZONE:
3876	{
3877		struct ata_zoned_info_log *ata_zone;
3878		int retval;
3879
3880		retval = 0;
3881
3882		/*
3883		 * Check here to see whether the zoned device information
3884		 * page is supported.  If so, continue on to request it.
3885		 * If not, skip to DA_STATE_PROBE_LOG or done.
3886		 */
3887		if ((softc->flags & DA_FLAG_CAN_ATA_ZONE) == 0) {
3888			daprobedone(periph, start_ccb);
3889			break;
3890		}
3891		ata_zone = malloc(sizeof(*ata_zone), M_SCSIDA,
3892				  M_NOWAIT|M_ZERO);
3893		if (ata_zone == NULL) {
3894			xpt_print(periph->path, "Couldn't malloc ata_zone "
3895			    "data\n");
3896			daprobedone(periph, start_ccb);
3897			break;
3898		}
3899
3900		retval = scsi_ata_read_log(&start_ccb->csio,
3901		    /*retries*/ da_retry_count,
3902		    /*cbfcnp*/ dadone_probeatazone,
3903		    /*tag_action*/ MSG_SIMPLE_Q_TAG,
3904		    /*log_address*/ ATA_IDENTIFY_DATA_LOG,
3905		    /*page_number*/ ATA_IDL_ZDI,
3906		    /*block_count*/ 1,
3907		    /*protocol*/ softc->flags & DA_FLAG_CAN_ATA_DMA ?
3908				 AP_PROTO_DMA : AP_PROTO_PIO_IN,
3909		    /*data_ptr*/ (uint8_t *)ata_zone,
3910		    /*dxfer_len*/ sizeof(*ata_zone),
3911		    /*sense_len*/ SSD_FULL_SIZE,
3912		    /*timeout*/ da_default_timeout * 1000);
3913
3914		if (retval != 0) {
3915			xpt_print(periph->path, "scsi_ata_read_log() failed!");
3916			free(ata_zone, M_SCSIDA);
3917			daprobedone(periph, start_ccb);
3918			break;
3919		}
3920		start_ccb->ccb_h.ccb_bp = NULL;
3921		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ATA_ZONE;
3922		xpt_action(start_ccb);
3923
3924		break;
3925	}
3926	case DA_STATE_PROBE_ZONE:
3927	{
3928		struct scsi_vpd_zoned_bdc *bdc;
3929
3930		/*
3931		 * Note that this page will be supported for SCSI protocol
3932		 * devices that support ZBC (SMR devices), as well as ATA
3933		 * protocol devices that are behind a SAT (SCSI to ATA
3934		 * Translation) layer that supports converting ZBC commands
3935		 * to their ZAC equivalents.
3936		 */
3937		if (!scsi_vpd_supported_page(periph, SVPD_ZONED_BDC)) {
3938			daprobedone(periph, start_ccb);
3939			break;
3940		}
3941		bdc = (struct scsi_vpd_zoned_bdc *)
3942			malloc(sizeof(*bdc), M_SCSIDA, M_NOWAIT|M_ZERO);
3943
3944		if (bdc == NULL) {
3945			xpt_release_ccb(start_ccb);
3946			xpt_print(periph->path, "Couldn't malloc zone VPD "
3947			    "data\n");
3948			break;
3949		}
3950		scsi_inquiry(&start_ccb->csio,
3951			     /*retries*/da_retry_count,
3952			     /*cbfcnp*/dadone_probezone,
3953			     /*tag_action*/MSG_SIMPLE_Q_TAG,
3954			     /*inq_buf*/(u_int8_t *)bdc,
3955			     /*inq_len*/sizeof(*bdc),
3956			     /*evpd*/TRUE,
3957			     /*page_code*/SVPD_ZONED_BDC,
3958			     /*sense_len*/SSD_FULL_SIZE,
3959			     /*timeout*/da_default_timeout * 1000);
3960		start_ccb->ccb_h.ccb_bp = NULL;
3961		start_ccb->ccb_h.ccb_state = DA_CCB_PROBE_ZONE;
3962		xpt_action(start_ccb);
3963		break;
3964	}
3965	}
3966}
3967
3968/*
3969 * In each of the methods below, while its the caller's
3970 * responsibility to ensure the request will fit into a
3971 * single device request, we might have changed the delete
3972 * method due to the device incorrectly advertising either
3973 * its supported methods or limits.
3974 *
3975 * To prevent this causing further issues we validate the
3976 * against the methods limits, and warn which would
3977 * otherwise be unnecessary.
3978 */
3979static void
3980da_delete_unmap(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
3981{
3982	struct da_softc *softc = (struct da_softc *)periph->softc;
3983	struct bio *bp1;
3984	uint8_t *buf = softc->unmap_buf;
3985	struct scsi_unmap_desc *d = (void *)&buf[UNMAP_HEAD_SIZE];
3986	uint64_t lba, lastlba = (uint64_t)-1;
3987	uint64_t totalcount = 0;
3988	uint64_t count;
3989	uint32_t c, lastcount = 0, ranges = 0;
3990
3991	/*
3992	 * Currently this doesn't take the UNMAP
3993	 * Granularity and Granularity Alignment
3994	 * fields into account.
3995	 *
3996	 * This could result in both unoptimal unmap
3997	 * requests as as well as UNMAP calls unmapping
3998	 * fewer LBA's than requested.
3999	 */
4000
4001	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
4002	bp1 = bp;
4003	do {
4004		/*
4005		 * Note: ada and da are different in how they store the
4006		 * pending bp's in a trim. ada stores all of them in the
4007		 * trim_req.bps. da stores all but the first one in the
4008		 * delete_run_queue. ada then completes all the bps in
4009		 * its adadone() loop. da completes all the bps in the
4010		 * delete_run_queue in dadone, and relies on the biodone
4011		 * after to complete. This should be reconciled since there's
4012		 * no real reason to do it differently. XXX
4013		 */
4014		if (bp1 != bp)
4015			bioq_insert_tail(&softc->delete_run_queue, bp1);
4016		lba = bp1->bio_pblkno;
4017		count = bp1->bio_bcount / softc->params.secsize;
4018
4019		/* Try to extend the previous range. */
4020		if (lba == lastlba) {
4021			c = omin(count, UNMAP_RANGE_MAX - lastcount);
4022			lastlba += c;
4023			lastcount += c;
4024			scsi_ulto4b(lastcount, d[ranges - 1].length);
4025			count -= c;
4026			lba += c;
4027			totalcount += c;
4028		} else if ((softc->quirks & DA_Q_STRICT_UNMAP) &&
4029		    softc->unmap_gran != 0) {
4030			/* Align length of the previous range. */
4031			if ((c = lastcount % softc->unmap_gran) != 0) {
4032				if (lastcount <= c) {
4033					totalcount -= lastcount;
4034					lastlba = (uint64_t)-1;
4035					lastcount = 0;
4036					ranges--;
4037				} else {
4038					totalcount -= c;
4039					lastlba -= c;
4040					lastcount -= c;
4041					scsi_ulto4b(lastcount,
4042					    d[ranges - 1].length);
4043				}
4044			}
4045			/* Align beginning of the new range. */
4046			c = (lba - softc->unmap_gran_align) % softc->unmap_gran;
4047			if (c != 0) {
4048				c = softc->unmap_gran - c;
4049				if (count <= c) {
4050					count = 0;
4051				} else {
4052					lba += c;
4053					count -= c;
4054				}
4055			}
4056		}
4057
4058		while (count > 0) {
4059			c = omin(count, UNMAP_RANGE_MAX);
4060			if (totalcount + c > softc->unmap_max_lba ||
4061			    ranges >= softc->unmap_max_ranges) {
4062				xpt_print(periph->path,
4063				    "%s issuing short delete %ld > %ld"
4064				    "|| %d >= %d",
4065				    da_delete_method_desc[softc->delete_method],
4066				    totalcount + c, softc->unmap_max_lba,
4067				    ranges, softc->unmap_max_ranges);
4068				break;
4069			}
4070			scsi_u64to8b(lba, d[ranges].lba);
4071			scsi_ulto4b(c, d[ranges].length);
4072			lba += c;
4073			totalcount += c;
4074			ranges++;
4075			count -= c;
4076			lastlba = lba;
4077			lastcount = c;
4078		}
4079		bp1 = cam_iosched_next_trim(softc->cam_iosched);
4080		if (bp1 == NULL)
4081			break;
4082		if (ranges >= softc->unmap_max_ranges ||
4083		    totalcount + bp1->bio_bcount /
4084		    softc->params.secsize > softc->unmap_max_lba) {
4085			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
4086			break;
4087		}
4088	} while (1);
4089
4090	/* Align length of the last range. */
4091	if ((softc->quirks & DA_Q_STRICT_UNMAP) && softc->unmap_gran != 0 &&
4092	    (c = lastcount % softc->unmap_gran) != 0) {
4093		if (lastcount <= c)
4094			ranges--;
4095		else
4096			scsi_ulto4b(lastcount - c, d[ranges - 1].length);
4097	}
4098
4099	scsi_ulto2b(ranges * 16 + 6, &buf[0]);
4100	scsi_ulto2b(ranges * 16, &buf[2]);
4101
4102	scsi_unmap(&ccb->csio,
4103		   /*retries*/da_retry_count,
4104		   /*cbfcnp*/dadone,
4105		   /*tag_action*/MSG_SIMPLE_Q_TAG,
4106		   /*byte2*/0,
4107		   /*data_ptr*/ buf,
4108		   /*dxfer_len*/ ranges * 16 + 8,
4109		   /*sense_len*/SSD_FULL_SIZE,
4110		   da_default_timeout * 1000);
4111	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4112	ccb->ccb_h.flags |= CAM_UNLOCKED;
4113	softc->trim_count++;
4114	softc->trim_ranges += ranges;
4115	softc->trim_lbas += totalcount;
4116	cam_iosched_submit_trim(softc->cam_iosched);
4117}
4118
4119static void
4120da_delete_trim(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4121{
4122	struct da_softc *softc = (struct da_softc *)periph->softc;
4123	struct bio *bp1;
4124	uint8_t *buf = softc->unmap_buf;
4125	uint64_t lastlba = (uint64_t)-1;
4126	uint64_t count;
4127	uint64_t lba;
4128	uint32_t lastcount = 0, c, requestcount;
4129	int ranges = 0, off, block_count;
4130
4131	bzero(softc->unmap_buf, sizeof(softc->unmap_buf));
4132	bp1 = bp;
4133	do {
4134		if (bp1 != bp)//XXX imp XXX
4135			bioq_insert_tail(&softc->delete_run_queue, bp1);
4136		lba = bp1->bio_pblkno;
4137		count = bp1->bio_bcount / softc->params.secsize;
4138		requestcount = count;
4139
4140		/* Try to extend the previous range. */
4141		if (lba == lastlba) {
4142			c = omin(count, ATA_DSM_RANGE_MAX - lastcount);
4143			lastcount += c;
4144			off = (ranges - 1) * 8;
4145			buf[off + 6] = lastcount & 0xff;
4146			buf[off + 7] = (lastcount >> 8) & 0xff;
4147			count -= c;
4148			lba += c;
4149		}
4150
4151		while (count > 0) {
4152			c = omin(count, ATA_DSM_RANGE_MAX);
4153			off = ranges * 8;
4154
4155			buf[off + 0] = lba & 0xff;
4156			buf[off + 1] = (lba >> 8) & 0xff;
4157			buf[off + 2] = (lba >> 16) & 0xff;
4158			buf[off + 3] = (lba >> 24) & 0xff;
4159			buf[off + 4] = (lba >> 32) & 0xff;
4160			buf[off + 5] = (lba >> 40) & 0xff;
4161			buf[off + 6] = c & 0xff;
4162			buf[off + 7] = (c >> 8) & 0xff;
4163			lba += c;
4164			ranges++;
4165			count -= c;
4166			lastcount = c;
4167			if (count != 0 && ranges == softc->trim_max_ranges) {
4168				xpt_print(periph->path,
4169				    "%s issuing short delete %ld > %ld\n",
4170				    da_delete_method_desc[softc->delete_method],
4171				    requestcount,
4172				    (softc->trim_max_ranges - ranges) *
4173				    ATA_DSM_RANGE_MAX);
4174				break;
4175			}
4176		}
4177		lastlba = lba;
4178		bp1 = cam_iosched_next_trim(softc->cam_iosched);
4179		if (bp1 == NULL)
4180			break;
4181		if (bp1->bio_bcount / softc->params.secsize >
4182		    (softc->trim_max_ranges - ranges) * ATA_DSM_RANGE_MAX) {
4183			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
4184			break;
4185		}
4186	} while (1);
4187
4188	block_count = howmany(ranges, ATA_DSM_BLK_RANGES);
4189	scsi_ata_trim(&ccb->csio,
4190		      /*retries*/da_retry_count,
4191		      /*cbfcnp*/dadone,
4192		      /*tag_action*/MSG_SIMPLE_Q_TAG,
4193		      block_count,
4194		      /*data_ptr*/buf,
4195		      /*dxfer_len*/block_count * ATA_DSM_BLK_SIZE,
4196		      /*sense_len*/SSD_FULL_SIZE,
4197		      da_default_timeout * 1000);
4198	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4199	ccb->ccb_h.flags |= CAM_UNLOCKED;
4200	cam_iosched_submit_trim(softc->cam_iosched);
4201}
4202
4203/*
4204 * We calculate ws_max_blks here based off d_delmaxsize instead
4205 * of using softc->ws_max_blks as it is absolute max for the
4206 * device not the protocol max which may well be lower.
4207 */
4208static void
4209da_delete_ws(struct cam_periph *periph, union ccb *ccb, struct bio *bp)
4210{
4211	struct da_softc *softc;
4212	struct bio *bp1;
4213	uint64_t ws_max_blks;
4214	uint64_t lba;
4215	uint64_t count; /* forward compat with WS32 */
4216
4217	softc = (struct da_softc *)periph->softc;
4218	ws_max_blks = softc->disk->d_delmaxsize / softc->params.secsize;
4219	lba = bp->bio_pblkno;
4220	count = 0;
4221	bp1 = bp;
4222	do {
4223		if (bp1 != bp)//XXX imp XXX
4224			bioq_insert_tail(&softc->delete_run_queue, bp1);
4225		count += bp1->bio_bcount / softc->params.secsize;
4226		if (count > ws_max_blks) {
4227			xpt_print(periph->path,
4228			    "%s issuing short delete %ld > %ld\n",
4229			    da_delete_method_desc[softc->delete_method],
4230			    count, ws_max_blks);
4231			count = omin(count, ws_max_blks);
4232			break;
4233		}
4234		bp1 = cam_iosched_next_trim(softc->cam_iosched);
4235		if (bp1 == NULL)
4236			break;
4237		if (lba + count != bp1->bio_pblkno ||
4238		    count + bp1->bio_bcount /
4239		    softc->params.secsize > ws_max_blks) {
4240			cam_iosched_put_back_trim(softc->cam_iosched, bp1);
4241			break;
4242		}
4243	} while (1);
4244
4245	scsi_write_same(&ccb->csio,
4246			/*retries*/da_retry_count,
4247			/*cbfcnp*/dadone,
4248			/*tag_action*/MSG_SIMPLE_Q_TAG,
4249			/*byte2*/softc->delete_method ==
4250			    DA_DELETE_ZERO ? 0 : SWS_UNMAP,
4251			softc->delete_method == DA_DELETE_WS16 ? 16 : 10,
4252			/*lba*/lba,
4253			/*block_count*/count,
4254			/*data_ptr*/ __DECONST(void *, zero_region),
4255			/*dxfer_len*/ softc->params.secsize,
4256			/*sense_len*/SSD_FULL_SIZE,
4257			da_default_timeout * 1000);
4258	ccb->ccb_h.ccb_state = DA_CCB_DELETE;
4259	ccb->ccb_h.flags |= CAM_UNLOCKED;
4260	cam_iosched_submit_trim(softc->cam_iosched);
4261}
4262
4263static int
4264cmd6workaround(union ccb *ccb)
4265{
4266	struct scsi_rw_6 cmd6;
4267	struct scsi_rw_10 *cmd10;
4268	struct da_softc *softc;
4269	u_int8_t *cdb;
4270	struct bio *bp;
4271	int frozen;
4272
4273	cdb = ccb->csio.cdb_io.cdb_bytes;
4274	softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc;
4275
4276	if (ccb->ccb_h.ccb_state == DA_CCB_DELETE) {
4277		da_delete_methods old_method = softc->delete_method;
4278
4279		/*
4280		 * Typically there are two reasons for failure here
4281		 * 1. Delete method was detected as supported but isn't
4282		 * 2. Delete failed due to invalid params e.g. too big
4283		 *
4284		 * While we will attempt to choose an alternative delete method
4285		 * this may result in short deletes if the existing delete
4286		 * requests from geom are big for the new method chosen.
4287		 *
4288		 * This method assumes that the error which triggered this
4289		 * will not retry the io otherwise a panic will occur
4290		 */
4291		dadeleteflag(softc, old_method, 0);
4292		dadeletemethodchoose(softc, DA_DELETE_DISABLE);
4293		if (softc->delete_method == DA_DELETE_DISABLE)
4294			xpt_print(ccb->ccb_h.path,
4295				  "%s failed, disabling BIO_DELETE\n",
4296				  da_delete_method_desc[old_method]);
4297		else
4298			xpt_print(ccb->ccb_h.path,
4299				  "%s failed, switching to %s BIO_DELETE\n",
4300				  da_delete_method_desc[old_method],
4301				  da_delete_method_desc[softc->delete_method]);
4302
4303		while ((bp = bioq_takefirst(&softc->delete_run_queue)) != NULL)
4304			cam_iosched_queue_work(softc->cam_iosched, bp);
4305		cam_iosched_queue_work(softc->cam_iosched,
4306		    (struct bio *)ccb->ccb_h.ccb_bp);
4307		ccb->ccb_h.ccb_bp = NULL;
4308		return (0);
4309	}
4310
4311	/* Detect unsupported PREVENT ALLOW MEDIUM REMOVAL. */
4312	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
4313	    (*cdb == PREVENT_ALLOW) &&
4314	    (softc->quirks & DA_Q_NO_PREVENT) == 0) {
4315		if (bootverbose)
4316			xpt_print(ccb->ccb_h.path,
4317			    "PREVENT ALLOW MEDIUM REMOVAL not supported.\n");
4318		softc->quirks |= DA_Q_NO_PREVENT;
4319		return (0);
4320	}
4321
4322	/* Detect unsupported SYNCHRONIZE CACHE(10). */
4323	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) == 0 &&
4324	    (*cdb == SYNCHRONIZE_CACHE) &&
4325	    (softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) {
4326		if (bootverbose)
4327			xpt_print(ccb->ccb_h.path,
4328			    "SYNCHRONIZE CACHE(10) not supported.\n");
4329		softc->quirks |= DA_Q_NO_SYNC_CACHE;
4330		softc->disk->d_flags &= ~DISKFLAG_CANFLUSHCACHE;
4331		return (0);
4332	}
4333
4334	/* Translation only possible if CDB is an array and cmd is R/W6 */
4335	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 ||
4336	    (*cdb != READ_6 && *cdb != WRITE_6))
4337		return 0;
4338
4339	xpt_print(ccb->ccb_h.path, "READ(6)/WRITE(6) not supported, "
4340	    "increasing minimum_cmd_size to 10.\n");
4341	softc->minimum_cmd_size = 10;
4342
4343	bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6));
4344	cmd10 = (struct scsi_rw_10 *)cdb;
4345	cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10;
4346	cmd10->byte2 = 0;
4347	scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr);
4348	cmd10->reserved = 0;
4349	scsi_ulto2b(cmd6.length, cmd10->length);
4350	cmd10->control = cmd6.control;
4351	ccb->csio.cdb_len = sizeof(*cmd10);
4352
4353	/* Requeue request, unfreezing queue if necessary */
4354	frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0;
4355	ccb->ccb_h.status = CAM_REQUEUE_REQ;
4356	xpt_action(ccb);
4357	if (frozen) {
4358		cam_release_devq(ccb->ccb_h.path,
4359				 /*relsim_flags*/0,
4360				 /*reduction*/0,
4361				 /*timeout*/0,
4362				 /*getcount_only*/0);
4363	}
4364	return (ERESTART);
4365}
4366
4367static void
4368dazonedone(struct cam_periph *periph, union ccb *ccb)
4369{
4370	struct da_softc *softc;
4371	struct bio *bp;
4372
4373	softc = periph->softc;
4374	bp = (struct bio *)ccb->ccb_h.ccb_bp;
4375
4376	switch (bp->bio_zone.zone_cmd) {
4377	case DISK_ZONE_OPEN:
4378	case DISK_ZONE_CLOSE:
4379	case DISK_ZONE_FINISH:
4380	case DISK_ZONE_RWP:
4381		break;
4382	case DISK_ZONE_REPORT_ZONES: {
4383		uint32_t avail_len;
4384		struct disk_zone_report *rep;
4385		struct scsi_report_zones_hdr *hdr;
4386		struct scsi_report_zones_desc *desc;
4387		struct disk_zone_rep_entry *entry;
4388		uint32_t hdr_len, num_avail;
4389		uint32_t num_to_fill, i;
4390		int ata;
4391
4392		rep = &bp->bio_zone.zone_params.report;
4393		avail_len = ccb->csio.dxfer_len - ccb->csio.resid;
4394		/*
4395		 * Note that bio_resid isn't normally used for zone
4396		 * commands, but it is used by devstat_end_transaction_bio()
4397		 * to determine how much data was transferred.  Because
4398		 * the size of the SCSI/ATA data structures is different
4399		 * than the size of the BIO interface structures, the
4400		 * amount of data actually transferred from the drive will
4401		 * be different than the amount of data transferred to
4402		 * the user.
4403		 */
4404		bp->bio_resid = ccb->csio.resid;
4405		hdr = (struct scsi_report_zones_hdr *)ccb->csio.data_ptr;
4406		if (avail_len < sizeof(*hdr)) {
4407			/*
4408			 * Is there a better error than EIO here?  We asked
4409			 * for at least the header, and we got less than
4410			 * that.
4411			 */
4412			bp->bio_error = EIO;
4413			bp->bio_flags |= BIO_ERROR;
4414			bp->bio_resid = bp->bio_bcount;
4415			break;
4416		}
4417
4418		if (softc->zone_interface == DA_ZONE_IF_ATA_PASS)
4419			ata = 1;
4420		else
4421			ata = 0;
4422
4423		hdr_len = ata ? le32dec(hdr->length) :
4424				scsi_4btoul(hdr->length);
4425		if (hdr_len > 0)
4426			rep->entries_available = hdr_len / sizeof(*desc);
4427		else
4428			rep->entries_available = 0;
4429		/*
4430		 * NOTE: using the same values for the BIO version of the
4431		 * same field as the SCSI/ATA values.  This means we could
4432		 * get some additional values that aren't defined in bio.h
4433		 * if more values of the same field are defined later.
4434		 */
4435		rep->header.same = hdr->byte4 & SRZ_SAME_MASK;
4436		rep->header.maximum_lba = ata ?  le64dec(hdr->maximum_lba) :
4437					  scsi_8btou64(hdr->maximum_lba);
4438		/*
4439		 * If the drive reports no entries that match the query,
4440		 * we're done.
4441		 */
4442		if (hdr_len == 0) {
4443			rep->entries_filled = 0;
4444			break;
4445		}
4446
4447		num_avail = min((avail_len - sizeof(*hdr)) / sizeof(*desc),
4448				hdr_len / sizeof(*desc));
4449		/*
4450		 * If the drive didn't return any data, then we're done.
4451		 */
4452		if (num_avail == 0) {
4453			rep->entries_filled = 0;
4454			break;
4455		}
4456
4457		num_to_fill = min(num_avail, rep->entries_allocated);
4458		/*
4459		 * If the user didn't allocate any entries for us to fill,
4460		 * we're done.
4461		 */
4462		if (num_to_fill == 0) {
4463			rep->entries_filled = 0;
4464			break;
4465		}
4466
4467		for (i = 0, desc = &hdr->desc_list[0], entry=&rep->entries[0];
4468		     i < num_to_fill; i++, desc++, entry++) {
4469			/*
4470			 * NOTE: we're mapping the values here directly
4471			 * from the SCSI/ATA bit definitions to the bio.h
4472			 * definitons.  There is also a warning in
4473			 * disk_zone.h, but the impact is that if
4474			 * additional values are added in the SCSI/ATA
4475			 * specs these will be visible to consumers of
4476			 * this interface.
4477			 */
4478			entry->zone_type = desc->zone_type & SRZ_TYPE_MASK;
4479			entry->zone_condition =
4480			    (desc->zone_flags & SRZ_ZONE_COND_MASK) >>
4481			    SRZ_ZONE_COND_SHIFT;
4482			entry->zone_flags |= desc->zone_flags &
4483			    (SRZ_ZONE_NON_SEQ|SRZ_ZONE_RESET);
4484			entry->zone_length =
4485			    ata ? le64dec(desc->zone_length) :
4486				  scsi_8btou64(desc->zone_length);
4487			entry->zone_start_lba =
4488			    ata ? le64dec(desc->zone_start_lba) :
4489				  scsi_8btou64(desc->zone_start_lba);
4490			entry->write_pointer_lba =
4491			    ata ? le64dec(desc->write_pointer_lba) :
4492				  scsi_8btou64(desc->write_pointer_lba);
4493		}
4494		rep->entries_filled = num_to_fill;
4495		break;
4496	}
4497	case DISK_ZONE_GET_PARAMS:
4498	default:
4499		/*
4500		 * In theory we should not get a GET_PARAMS bio, since it
4501		 * should be handled without queueing the command to the
4502		 * drive.
4503		 */
4504		panic("%s: Invalid zone command %d", __func__,
4505		    bp->bio_zone.zone_cmd);
4506		break;
4507	}
4508
4509	if (bp->bio_zone.zone_cmd == DISK_ZONE_REPORT_ZONES)
4510		free(ccb->csio.data_ptr, M_SCSIDA);
4511}
4512
4513static void
4514dadone(struct cam_periph *periph, union ccb *done_ccb)
4515{
4516	struct bio *bp, *bp1;
4517	struct da_softc *softc;
4518	struct ccb_scsiio *csio;
4519	u_int32_t  priority;
4520	da_ccb_state state;
4521
4522	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n"));
4523
4524	softc = (struct da_softc *)periph->softc;
4525	priority = done_ccb->ccb_h.pinfo.priority;
4526	csio = &done_ccb->csio;
4527
4528#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4529	if (csio->bio != NULL)
4530		biotrack(csio->bio, __func__);
4531#endif
4532	state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4533
4534	cam_periph_lock(periph);
4535	bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4536	if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
4537		int error;
4538		int sf;
4539
4540		if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0)
4541			sf = SF_RETRY_UA;
4542		else
4543			sf = 0;
4544
4545		error = daerror(done_ccb, CAM_RETRY_SELTO, sf);
4546		if (error == ERESTART) {
4547			/* A retry was scheduled, so just return. */
4548			cam_periph_unlock(periph);
4549			return;
4550		}
4551		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
4552		if (error != 0) {
4553			int queued_error;
4554
4555			/*
4556			 * return all queued I/O with EIO, so that
4557			 * the client can retry these I/Os in the
4558			 * proper order should it attempt to recover.
4559			 */
4560			queued_error = EIO;
4561
4562			if (error == ENXIO
4563			 && (softc->flags & DA_FLAG_PACK_INVALID)== 0) {
4564				/*
4565				 * Catastrophic error.  Mark our pack as
4566				 * invalid.
4567				 *
4568				 * XXX See if this is really a media
4569				 * XXX change first?
4570				 */
4571				xpt_print(periph->path, "Invalidating pack\n");
4572				softc->flags |= DA_FLAG_PACK_INVALID;
4573#ifdef CAM_IO_STATS
4574				softc->invalidations++;
4575#endif
4576				queued_error = ENXIO;
4577			}
4578			cam_iosched_flush(softc->cam_iosched, NULL,
4579			   queued_error);
4580			if (bp != NULL) {
4581				bp->bio_error = error;
4582				bp->bio_resid = bp->bio_bcount;
4583				bp->bio_flags |= BIO_ERROR;
4584			}
4585		} else if (bp != NULL) {
4586			if (state == DA_CCB_DELETE)
4587				bp->bio_resid = 0;
4588			else
4589				bp->bio_resid = csio->resid;
4590			bp->bio_error = 0;
4591			if (bp->bio_resid != 0)
4592				bp->bio_flags |= BIO_ERROR;
4593		}
4594		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4595			cam_release_devq(done_ccb->ccb_h.path,
4596					 /*relsim_flags*/0,
4597					 /*reduction*/0,
4598					 /*timeout*/0,
4599					 /*getcount_only*/0);
4600	} else if (bp != NULL) {
4601		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
4602			panic("REQ_CMP with QFRZN");
4603		if (bp->bio_cmd == BIO_ZONE)
4604			dazonedone(periph, done_ccb);
4605		else if (state == DA_CCB_DELETE)
4606			bp->bio_resid = 0;
4607		else
4608			bp->bio_resid = csio->resid;
4609		if ((csio->resid > 0) && (bp->bio_cmd != BIO_ZONE))
4610			bp->bio_flags |= BIO_ERROR;
4611		if (softc->error_inject != 0) {
4612			bp->bio_error = softc->error_inject;
4613			bp->bio_resid = bp->bio_bcount;
4614			bp->bio_flags |= BIO_ERROR;
4615			softc->error_inject = 0;
4616		}
4617	}
4618
4619	if (bp != NULL)
4620		biotrack(bp, __func__);
4621	LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
4622	if (LIST_EMPTY(&softc->pending_ccbs))
4623		softc->flags |= DA_FLAG_WAS_OTAG;
4624
4625	/*
4626	 * We need to call cam_iosched before we call biodone so that we don't
4627	 * measure any activity that happens in the completion routine, which in
4628	 * the case of sendfile can be quite extensive. Release the periph
4629	 * refcount taken in dastart() for each CCB.
4630	 */
4631	cam_iosched_bio_complete(softc->cam_iosched, bp, done_ccb);
4632	xpt_release_ccb(done_ccb);
4633	KASSERT(softc->refcount >= 1, ("dadone softc %p refcount %d", softc, softc->refcount));
4634	softc->refcount--;
4635	if (state == DA_CCB_DELETE) {
4636		TAILQ_HEAD(, bio) queue;
4637
4638		TAILQ_INIT(&queue);
4639		TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
4640		softc->delete_run_queue.insert_point = NULL;
4641		/*
4642		 * Normally, the xpt_release_ccb() above would make sure
4643		 * that when we have more work to do, that work would
4644		 * get kicked off. However, we specifically keep
4645		 * delete_running set to 0 before the call above to
4646		 * allow other I/O to progress when many BIO_DELETE
4647		 * requests are pushed down. We set delete_running to 0
4648		 * and call daschedule again so that we don't stall if
4649		 * there are no other I/Os pending apart from BIO_DELETEs.
4650		 */
4651		cam_iosched_trim_done(softc->cam_iosched);
4652		daschedule(periph);
4653		cam_periph_unlock(periph);
4654		while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
4655			TAILQ_REMOVE(&queue, bp1, bio_queue);
4656			bp1->bio_error = bp->bio_error;
4657			if (bp->bio_flags & BIO_ERROR) {
4658				bp1->bio_flags |= BIO_ERROR;
4659				bp1->bio_resid = bp1->bio_bcount;
4660			} else
4661				bp1->bio_resid = 0;
4662			biodone(bp1);
4663		}
4664	} else {
4665		daschedule(periph);
4666		cam_periph_unlock(periph);
4667	}
4668	if (bp != NULL)
4669		biodone(bp);
4670	return;
4671}
4672
4673static void
4674dadone_probewp(struct cam_periph *periph, union ccb *done_ccb)
4675{
4676	struct scsi_mode_header_6 *mode_hdr6;
4677	struct scsi_mode_header_10 *mode_hdr10;
4678	struct da_softc *softc;
4679	struct ccb_scsiio *csio;
4680	u_int32_t  priority;
4681	uint8_t dev_spec;
4682
4683	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probewp\n"));
4684
4685	softc = (struct da_softc *)periph->softc;
4686	priority = done_ccb->ccb_h.pinfo.priority;
4687	csio = &done_ccb->csio;
4688
4689	cam_periph_assert(periph, MA_OWNED);
4690
4691	KASSERT(softc->state == DA_STATE_PROBE_WP,
4692	    ("State (%d) not PROBE_WP in dadone_probewp, periph %p ccb %p",
4693		softc->state, periph, done_ccb));
4694        KASSERT((csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) == DA_CCB_PROBE_WP,
4695	    ("CCB State (%lu) not PROBE_WP in dadone_probewp, periph %p ccb %p",
4696		(unsigned long)csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK, periph,
4697		done_ccb));
4698
4699	if (softc->minimum_cmd_size > 6) {
4700		mode_hdr10 = (struct scsi_mode_header_10 *)csio->data_ptr;
4701		dev_spec = mode_hdr10->dev_spec;
4702	} else {
4703		mode_hdr6 = (struct scsi_mode_header_6 *)csio->data_ptr;
4704		dev_spec = mode_hdr6->dev_spec;
4705	}
4706	if (cam_ccb_status(done_ccb) == CAM_REQ_CMP) {
4707		if ((dev_spec & 0x80) != 0)
4708			softc->disk->d_flags |= DISKFLAG_WRITE_PROTECT;
4709		else
4710			softc->disk->d_flags &= ~DISKFLAG_WRITE_PROTECT;
4711	} else {
4712		int error;
4713
4714		error = daerror(done_ccb, CAM_RETRY_SELTO,
4715				SF_RETRY_UA|SF_NO_PRINT);
4716		if (error == ERESTART)
4717			return;
4718		else if (error != 0) {
4719			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
4720				/* Don't wedge this device's queue */
4721				cam_release_devq(done_ccb->ccb_h.path,
4722						 /*relsim_flags*/0,
4723						 /*reduction*/0,
4724						 /*timeout*/0,
4725						 /*getcount_only*/0);
4726			}
4727		}
4728	}
4729
4730	free(csio->data_ptr, M_SCSIDA);
4731	if ((softc->flags & DA_FLAG_CAN_RC16) != 0)
4732		softc->state = DA_STATE_PROBE_RC16;
4733	else
4734		softc->state = DA_STATE_PROBE_RC;
4735	xpt_release_ccb(done_ccb);
4736	xpt_schedule(periph, priority);
4737	return;
4738}
4739
4740static void
4741dadone_proberc(struct cam_periph *periph, union ccb *done_ccb)
4742{
4743	struct scsi_read_capacity_data *rdcap;
4744	struct scsi_read_capacity_data_long *rcaplong;
4745	struct da_softc *softc;
4746	struct ccb_scsiio *csio;
4747	da_ccb_state state;
4748	char *announce_buf;
4749	u_int32_t  priority;
4750	int lbp, n;
4751
4752	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_proberc\n"));
4753
4754	softc = (struct da_softc *)periph->softc;
4755	priority = done_ccb->ccb_h.pinfo.priority;
4756	csio = &done_ccb->csio;
4757	state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK;
4758
4759	KASSERT(softc->state == DA_STATE_PROBE_RC || softc->state == DA_STATE_PROBE_RC16,
4760	    ("State (%d) not PROBE_RC* in dadone_proberc, periph %p ccb %p",
4761		softc->state, periph, done_ccb));
4762	KASSERT(state == DA_CCB_PROBE_RC || state == DA_CCB_PROBE_RC16,
4763	    ("CCB State (%lu) not PROBE_RC* in dadone_probewp, periph %p ccb %p",
4764		(unsigned long)state, periph, done_ccb));
4765
4766	lbp = 0;
4767	rdcap = NULL;
4768	rcaplong = NULL;
4769	/* XXX TODO: can this be a malloc? */
4770	announce_buf = softc->announce_temp;
4771	bzero(announce_buf, DA_ANNOUNCETMP_SZ);
4772
4773	if (state == DA_CCB_PROBE_RC)
4774		rdcap =(struct scsi_read_capacity_data *)csio->data_ptr;
4775	else
4776		rcaplong = (struct scsi_read_capacity_data_long *)
4777			csio->data_ptr;
4778
4779	cam_periph_assert(periph, MA_OWNED);
4780
4781	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
4782		struct disk_params *dp;
4783		uint32_t block_size;
4784		uint64_t maxsector;
4785		u_int lalba;	/* Lowest aligned LBA. */
4786
4787		if (state == DA_CCB_PROBE_RC) {
4788			block_size = scsi_4btoul(rdcap->length);
4789			maxsector = scsi_4btoul(rdcap->addr);
4790			lalba = 0;
4791
4792			/*
4793			 * According to SBC-2, if the standard 10
4794			 * byte READ CAPACITY command returns 2^32,
4795			 * we should issue the 16 byte version of
4796			 * the command, since the device in question
4797			 * has more sectors than can be represented
4798			 * with the short version of the command.
4799			 */
4800			if (maxsector == 0xffffffff) {
4801				free(rdcap, M_SCSIDA);
4802				softc->state = DA_STATE_PROBE_RC16;
4803				xpt_release_ccb(done_ccb);
4804				xpt_schedule(periph, priority);
4805				return;
4806			}
4807		} else {
4808			block_size = scsi_4btoul(rcaplong->length);
4809			maxsector = scsi_8btou64(rcaplong->addr);
4810			lalba = scsi_2btoul(rcaplong->lalba_lbp);
4811		}
4812
4813		/*
4814		 * Because GEOM code just will panic us if we
4815		 * give them an 'illegal' value we'll avoid that
4816		 * here.
4817		 */
4818		if (block_size == 0) {
4819			block_size = 512;
4820			if (maxsector == 0)
4821				maxsector = -1;
4822		}
4823		if (block_size >= maxphys) {
4824			xpt_print(periph->path,
4825			    "unsupportable block size %ju\n",
4826			    (uintmax_t) block_size);
4827			announce_buf = NULL;
4828			cam_periph_invalidate(periph);
4829		} else {
4830			/*
4831			 * We pass rcaplong into dasetgeom(),
4832			 * because it will only use it if it is
4833			 * non-NULL.
4834			 */
4835			dasetgeom(periph, block_size, maxsector,
4836				  rcaplong, sizeof(*rcaplong));
4837			lbp = (lalba & SRC16_LBPME_A);
4838			dp = &softc->params;
4839			n = snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
4840			    "%juMB (%ju %u byte sectors",
4841			    ((uintmax_t)dp->secsize * dp->sectors) /
4842			     (1024 * 1024),
4843			    (uintmax_t)dp->sectors, dp->secsize);
4844			if (softc->p_type != 0) {
4845				n += snprintf(announce_buf + n,
4846				    DA_ANNOUNCETMP_SZ - n,
4847				    ", DIF type %d", softc->p_type);
4848			}
4849			snprintf(announce_buf + n, DA_ANNOUNCETMP_SZ - n, ")");
4850		}
4851	} else {
4852		int error;
4853
4854		/*
4855		 * Retry any UNIT ATTENTION type errors.  They
4856		 * are expected at boot.
4857		 */
4858		error = daerror(done_ccb, CAM_RETRY_SELTO,
4859				SF_RETRY_UA|SF_NO_PRINT);
4860		if (error == ERESTART) {
4861			/*
4862			 * A retry was scheuled, so
4863			 * just return.
4864			 */
4865			return;
4866		} else if (error != 0) {
4867			int asc, ascq;
4868			int sense_key, error_code;
4869			int have_sense;
4870			cam_status status;
4871			struct ccb_getdev cgd;
4872
4873			/* Don't wedge this device's queue */
4874			status = done_ccb->ccb_h.status;
4875			if ((status & CAM_DEV_QFRZN) != 0)
4876				cam_release_devq(done_ccb->ccb_h.path,
4877						 /*relsim_flags*/0,
4878						 /*reduction*/0,
4879						 /*timeout*/0,
4880						 /*getcount_only*/0);
4881
4882			xpt_setup_ccb(&cgd.ccb_h, done_ccb->ccb_h.path,
4883				      CAM_PRIORITY_NORMAL);
4884			cgd.ccb_h.func_code = XPT_GDEV_TYPE;
4885			xpt_action((union ccb *)&cgd);
4886
4887			if (scsi_extract_sense_ccb(done_ccb,
4888			    &error_code, &sense_key, &asc, &ascq))
4889				have_sense = TRUE;
4890			else
4891				have_sense = FALSE;
4892
4893			/*
4894			 * If we tried READ CAPACITY(16) and failed,
4895			 * fallback to READ CAPACITY(10).
4896			 */
4897			if ((state == DA_CCB_PROBE_RC16) &&
4898			    (softc->flags & DA_FLAG_CAN_RC16) &&
4899			    (((csio->ccb_h.status & CAM_STATUS_MASK) ==
4900				CAM_REQ_INVALID) ||
4901			     ((have_sense) &&
4902			      (error_code == SSD_CURRENT_ERROR ||
4903			       error_code == SSD_DESC_CURRENT_ERROR) &&
4904			      (sense_key == SSD_KEY_ILLEGAL_REQUEST)))) {
4905				cam_periph_assert(periph, MA_OWNED);
4906				softc->flags &= ~DA_FLAG_CAN_RC16;
4907				free(rdcap, M_SCSIDA);
4908				softc->state = DA_STATE_PROBE_RC;
4909				xpt_release_ccb(done_ccb);
4910				xpt_schedule(periph, priority);
4911				return;
4912			}
4913
4914			/*
4915			 * Attach to anything that claims to be a
4916			 * direct access or optical disk device,
4917			 * as long as it doesn't return a "Logical
4918			 * unit not supported" (0x25) error.
4919			 * "Internal Target Failure" (0x44) is also
4920			 * special and typically means that the
4921			 * device is a SATA drive behind a SATL
4922			 * translation that's fallen into a
4923			 * terminally fatal state.
4924			 */
4925			if ((have_sense)
4926			 && (asc != 0x25) && (asc != 0x44)
4927			 && (error_code == SSD_CURRENT_ERROR
4928			  || error_code == SSD_DESC_CURRENT_ERROR)) {
4929				const char *sense_key_desc;
4930				const char *asc_desc;
4931
4932				dasetgeom(periph, 512, -1, NULL, 0);
4933				scsi_sense_desc(sense_key, asc, ascq,
4934						&cgd.inq_data, &sense_key_desc,
4935						&asc_desc);
4936				snprintf(announce_buf, DA_ANNOUNCETMP_SZ,
4937				    "Attempt to query device "
4938				    "size failed: %s, %s",
4939				    sense_key_desc, asc_desc);
4940			} else {
4941				if (have_sense)
4942					scsi_sense_print(&done_ccb->csio);
4943				else {
4944					xpt_print(periph->path,
4945					    "got CAM status %#x\n",
4946					    done_ccb->ccb_h.status);
4947				}
4948
4949				xpt_print(periph->path, "fatal error, "
4950				    "failed to attach to device\n");
4951
4952				announce_buf = NULL;
4953
4954				/*
4955				 * Free up resources.
4956				 */
4957				cam_periph_invalidate(periph);
4958			}
4959		}
4960	}
4961	free(csio->data_ptr, M_SCSIDA);
4962	if (announce_buf != NULL &&
4963	    ((softc->flags & DA_FLAG_ANNOUNCED) == 0)) {
4964		struct sbuf sb;
4965
4966		sbuf_new(&sb, softc->announcebuf, DA_ANNOUNCE_SZ,
4967		    SBUF_FIXEDLEN);
4968		xpt_announce_periph_sbuf(periph, &sb, announce_buf);
4969		xpt_announce_quirks_sbuf(periph, &sb, softc->quirks,
4970		    DA_Q_BIT_STRING);
4971		sbuf_finish(&sb);
4972		sbuf_putbuf(&sb);
4973
4974		/*
4975		 * Create our sysctl variables, now that we know
4976		 * we have successfully attached.
4977		 */
4978		/* increase the refcount */
4979		if (da_periph_acquire(periph, DA_REF_SYSCTL) == 0) {
4980			taskqueue_enqueue(taskqueue_thread,
4981					  &softc->sysctl_task);
4982		} else {
4983			/* XXX This message is useless! */
4984			xpt_print(periph->path, "fatal error, "
4985			    "could not acquire reference count\n");
4986		}
4987	}
4988
4989	/* We already probed the device. */
4990	if (softc->flags & DA_FLAG_PROBED) {
4991		daprobedone(periph, done_ccb);
4992		return;
4993	}
4994
4995	/* Ensure re-probe doesn't see old delete. */
4996	softc->delete_available = 0;
4997	dadeleteflag(softc, DA_DELETE_ZERO, 1);
4998	if (lbp && (softc->quirks & DA_Q_NO_UNMAP) == 0) {
4999		/*
5000		 * Based on older SBC-3 spec revisions
5001		 * any of the UNMAP methods "may" be
5002		 * available via LBP given this flag so
5003		 * we flag all of them as available and
5004		 * then remove those which further
5005		 * probes confirm aren't available
5006		 * later.
5007		 *
5008		 * We could also check readcap(16) p_type
5009		 * flag to exclude one or more invalid
5010		 * write same (X) types here
5011		 */
5012		dadeleteflag(softc, DA_DELETE_WS16, 1);
5013		dadeleteflag(softc, DA_DELETE_WS10, 1);
5014		dadeleteflag(softc, DA_DELETE_UNMAP, 1);
5015
5016		softc->state = DA_STATE_PROBE_LBP;
5017		xpt_release_ccb(done_ccb);
5018		xpt_schedule(periph, priority);
5019		return;
5020	}
5021
5022	softc->state = DA_STATE_PROBE_BDC;
5023	xpt_release_ccb(done_ccb);
5024	xpt_schedule(periph, priority);
5025	return;
5026}
5027
5028static void
5029dadone_probelbp(struct cam_periph *periph, union ccb *done_ccb)
5030{
5031	struct scsi_vpd_logical_block_prov *lbp;
5032	struct da_softc *softc;
5033	struct ccb_scsiio *csio;
5034	u_int32_t  priority;
5035
5036	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probelbp\n"));
5037
5038	softc = (struct da_softc *)periph->softc;
5039	priority = done_ccb->ccb_h.pinfo.priority;
5040	csio = &done_ccb->csio;
5041	lbp = (struct scsi_vpd_logical_block_prov *)csio->data_ptr;
5042
5043	cam_periph_assert(periph, MA_OWNED);
5044
5045	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5046		/*
5047		 * T10/1799-D Revision 31 states at least one of these
5048		 * must be supported but we don't currently enforce this.
5049		 */
5050		dadeleteflag(softc, DA_DELETE_WS16,
5051		     (lbp->flags & SVPD_LBP_WS16));
5052		dadeleteflag(softc, DA_DELETE_WS10,
5053			     (lbp->flags & SVPD_LBP_WS10));
5054		dadeleteflag(softc, DA_DELETE_UNMAP,
5055			     (lbp->flags & SVPD_LBP_UNMAP));
5056	} else {
5057		int error;
5058		error = daerror(done_ccb, CAM_RETRY_SELTO,
5059				SF_RETRY_UA|SF_NO_PRINT);
5060		if (error == ERESTART)
5061			return;
5062		else if (error != 0) {
5063			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5064				/* Don't wedge this device's queue */
5065				cam_release_devq(done_ccb->ccb_h.path,
5066						 /*relsim_flags*/0,
5067						 /*reduction*/0,
5068						 /*timeout*/0,
5069						 /*getcount_only*/0);
5070			}
5071
5072			/*
5073			 * Failure indicates we don't support any SBC-3
5074			 * delete methods with UNMAP
5075			 */
5076		}
5077	}
5078
5079	free(lbp, M_SCSIDA);
5080	softc->state = DA_STATE_PROBE_BLK_LIMITS;
5081	xpt_release_ccb(done_ccb);
5082	xpt_schedule(periph, priority);
5083	return;
5084}
5085
5086static void
5087dadone_probeblklimits(struct cam_periph *periph, union ccb *done_ccb)
5088{
5089	struct scsi_vpd_block_limits *block_limits;
5090	struct da_softc *softc;
5091	struct ccb_scsiio *csio;
5092	u_int32_t  priority;
5093
5094	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeblklimits\n"));
5095
5096	softc = (struct da_softc *)periph->softc;
5097	priority = done_ccb->ccb_h.pinfo.priority;
5098	csio = &done_ccb->csio;
5099	block_limits = (struct scsi_vpd_block_limits *)csio->data_ptr;
5100
5101	cam_periph_assert(periph, MA_OWNED);
5102
5103	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5104		uint32_t max_txfer_len = scsi_4btoul(
5105			block_limits->max_txfer_len);
5106		uint32_t max_unmap_lba_cnt = scsi_4btoul(
5107			block_limits->max_unmap_lba_cnt);
5108		uint32_t max_unmap_blk_cnt = scsi_4btoul(
5109			block_limits->max_unmap_blk_cnt);
5110		uint32_t unmap_gran = scsi_4btoul(
5111			block_limits->opt_unmap_grain);
5112		uint32_t unmap_gran_align = scsi_4btoul(
5113			block_limits->unmap_grain_align);
5114		uint64_t ws_max_blks = scsi_8btou64(
5115			block_limits->max_write_same_length);
5116
5117		if (max_txfer_len != 0) {
5118			softc->disk->d_maxsize = MIN(softc->maxio,
5119			    (off_t)max_txfer_len * softc->params.secsize);
5120		}
5121
5122		/*
5123		 * We should already support UNMAP but we check lba
5124		 * and block count to be sure
5125		 */
5126		if (max_unmap_lba_cnt != 0x00L &&
5127		    max_unmap_blk_cnt != 0x00L) {
5128			softc->unmap_max_lba = max_unmap_lba_cnt;
5129			softc->unmap_max_ranges = min(max_unmap_blk_cnt,
5130				UNMAP_MAX_RANGES);
5131			if (unmap_gran > 1) {
5132				softc->unmap_gran = unmap_gran;
5133				if (unmap_gran_align & 0x80000000) {
5134					softc->unmap_gran_align =
5135					    unmap_gran_align & 0x7fffffff;
5136				}
5137			}
5138		} else {
5139			/*
5140			 * Unexpected UNMAP limits which means the
5141			 * device doesn't actually support UNMAP
5142			 */
5143			dadeleteflag(softc, DA_DELETE_UNMAP, 0);
5144		}
5145
5146		if (ws_max_blks != 0x00L)
5147			softc->ws_max_blks = ws_max_blks;
5148	} else {
5149		int error;
5150		error = daerror(done_ccb, CAM_RETRY_SELTO,
5151				SF_RETRY_UA|SF_NO_PRINT);
5152		if (error == ERESTART)
5153			return;
5154		else if (error != 0) {
5155			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5156				/* Don't wedge this device's queue */
5157				cam_release_devq(done_ccb->ccb_h.path,
5158						 /*relsim_flags*/0,
5159						 /*reduction*/0,
5160						 /*timeout*/0,
5161						 /*getcount_only*/0);
5162			}
5163
5164			/*
5165			 * Failure here doesn't mean UNMAP is not
5166			 * supported as this is an optional page.
5167			 */
5168			softc->unmap_max_lba = 1;
5169			softc->unmap_max_ranges = 1;
5170		}
5171	}
5172
5173	free(block_limits, M_SCSIDA);
5174	softc->state = DA_STATE_PROBE_BDC;
5175	xpt_release_ccb(done_ccb);
5176	xpt_schedule(periph, priority);
5177	return;
5178}
5179
5180static void
5181dadone_probebdc(struct cam_periph *periph, union ccb *done_ccb)
5182{
5183	struct scsi_vpd_block_device_characteristics *bdc;
5184	struct da_softc *softc;
5185	struct ccb_scsiio *csio;
5186	u_int32_t  priority;
5187
5188	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probebdc\n"));
5189
5190	softc = (struct da_softc *)periph->softc;
5191	priority = done_ccb->ccb_h.pinfo.priority;
5192	csio = &done_ccb->csio;
5193	bdc = (struct scsi_vpd_block_device_characteristics *)csio->data_ptr;
5194
5195	cam_periph_assert(periph, MA_OWNED);
5196
5197	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5198		uint32_t valid_len;
5199
5200		/*
5201		 * Disable queue sorting for non-rotational media
5202		 * by default.
5203		 */
5204		u_int16_t old_rate = softc->disk->d_rotation_rate;
5205
5206		valid_len = csio->dxfer_len - csio->resid;
5207		if (SBDC_IS_PRESENT(bdc, valid_len,
5208		    medium_rotation_rate)) {
5209			softc->disk->d_rotation_rate =
5210				scsi_2btoul(bdc->medium_rotation_rate);
5211			if (softc->disk->d_rotation_rate == SVPD_NON_ROTATING) {
5212				cam_iosched_set_sort_queue(
5213				    softc->cam_iosched, 0);
5214				softc->flags &= ~DA_FLAG_ROTATING;
5215			}
5216			if (softc->disk->d_rotation_rate != old_rate) {
5217				disk_attr_changed(softc->disk,
5218				    "GEOM::rotation_rate", M_NOWAIT);
5219			}
5220		}
5221		if ((SBDC_IS_PRESENT(bdc, valid_len, flags))
5222		 && (softc->zone_mode == DA_ZONE_NONE)) {
5223			int ata_proto;
5224
5225			if (scsi_vpd_supported_page(periph,
5226			    SVPD_ATA_INFORMATION))
5227				ata_proto = 1;
5228			else
5229				ata_proto = 0;
5230
5231			/*
5232			 * The Zoned field will only be set for
5233			 * Drive Managed and Host Aware drives.  If
5234			 * they are Host Managed, the device type
5235			 * in the standard INQUIRY data should be
5236			 * set to T_ZBC_HM (0x14).
5237			 */
5238			if ((bdc->flags & SVPD_ZBC_MASK) ==
5239			     SVPD_HAW_ZBC) {
5240				softc->zone_mode = DA_ZONE_HOST_AWARE;
5241				softc->zone_interface = (ata_proto) ?
5242				   DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
5243			} else if ((bdc->flags & SVPD_ZBC_MASK) ==
5244			     SVPD_DM_ZBC) {
5245				softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
5246				softc->zone_interface = (ata_proto) ?
5247				   DA_ZONE_IF_ATA_SAT : DA_ZONE_IF_SCSI;
5248			} else if ((bdc->flags & SVPD_ZBC_MASK) !=
5249				  SVPD_ZBC_NR) {
5250				xpt_print(periph->path, "Unknown zoned "
5251				    "type %#x",
5252				    bdc->flags & SVPD_ZBC_MASK);
5253			}
5254		}
5255	} else {
5256		int error;
5257		error = daerror(done_ccb, CAM_RETRY_SELTO,
5258				SF_RETRY_UA|SF_NO_PRINT);
5259		if (error == ERESTART)
5260			return;
5261		else if (error != 0) {
5262			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5263				/* Don't wedge this device's queue */
5264				cam_release_devq(done_ccb->ccb_h.path,
5265						 /*relsim_flags*/0,
5266						 /*reduction*/0,
5267						 /*timeout*/0,
5268						 /*getcount_only*/0);
5269			}
5270		}
5271	}
5272
5273	free(bdc, M_SCSIDA);
5274	softc->state = DA_STATE_PROBE_ATA;
5275	xpt_release_ccb(done_ccb);
5276	xpt_schedule(periph, priority);
5277	return;
5278}
5279
5280static void
5281dadone_probeata(struct cam_periph *periph, union ccb *done_ccb)
5282{
5283	struct ata_params *ata_params;
5284	struct ccb_scsiio *csio;
5285	struct da_softc *softc;
5286	u_int32_t  priority;
5287	int continue_probe;
5288	int error;
5289	int16_t *ptr;
5290
5291	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeata\n"));
5292
5293	softc = (struct da_softc *)periph->softc;
5294	priority = done_ccb->ccb_h.pinfo.priority;
5295	csio = &done_ccb->csio;
5296	ata_params = (struct ata_params *)csio->data_ptr;
5297	ptr = (uint16_t *)ata_params;
5298	continue_probe = 0;
5299	error = 0;
5300
5301	cam_periph_assert(periph, MA_OWNED);
5302
5303	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5304		uint16_t old_rate;
5305
5306		ata_param_fixup(ata_params);
5307		if (ata_params->support_dsm & ATA_SUPPORT_DSM_TRIM &&
5308		    (softc->quirks & DA_Q_NO_UNMAP) == 0) {
5309			dadeleteflag(softc, DA_DELETE_ATA_TRIM, 1);
5310			if (ata_params->max_dsm_blocks != 0)
5311				softc->trim_max_ranges = min(
5312				  softc->trim_max_ranges,
5313				  ata_params->max_dsm_blocks *
5314				  ATA_DSM_BLK_RANGES);
5315		}
5316		/*
5317		 * Disable queue sorting for non-rotational media
5318		 * by default.
5319		 */
5320		old_rate = softc->disk->d_rotation_rate;
5321		softc->disk->d_rotation_rate = ata_params->media_rotation_rate;
5322		if (softc->disk->d_rotation_rate == ATA_RATE_NON_ROTATING) {
5323			cam_iosched_set_sort_queue(softc->cam_iosched, 0);
5324			softc->flags &= ~DA_FLAG_ROTATING;
5325		}
5326		if (softc->disk->d_rotation_rate != old_rate) {
5327			disk_attr_changed(softc->disk,
5328			    "GEOM::rotation_rate", M_NOWAIT);
5329		}
5330
5331		cam_periph_assert(periph, MA_OWNED);
5332		if (ata_params->capabilities1 & ATA_SUPPORT_DMA)
5333			softc->flags |= DA_FLAG_CAN_ATA_DMA;
5334
5335		if (ata_params->support.extension & ATA_SUPPORT_GENLOG)
5336			softc->flags |= DA_FLAG_CAN_ATA_LOG;
5337
5338		/*
5339		 * At this point, if we have a SATA host aware drive,
5340		 * we communicate via ATA passthrough unless the
5341		 * SAT layer supports ZBC -> ZAC translation.  In
5342		 * that case,
5343		 *
5344		 * XXX KDM figure out how to detect a host managed
5345		 * SATA drive.
5346		 */
5347		if (softc->zone_mode == DA_ZONE_NONE) {
5348			/*
5349			 * Note that we don't override the zone
5350			 * mode or interface if it has already been
5351			 * set.  This is because it has either been
5352			 * set as a quirk, or when we probed the
5353			 * SCSI Block Device Characteristics page,
5354			 * the zoned field was set.  The latter
5355			 * means that the SAT layer supports ZBC to
5356			 * ZAC translation, and we would prefer to
5357			 * use that if it is available.
5358			 */
5359			if ((ata_params->support3 &
5360			    ATA_SUPPORT_ZONE_MASK) ==
5361			    ATA_SUPPORT_ZONE_HOST_AWARE) {
5362				softc->zone_mode = DA_ZONE_HOST_AWARE;
5363				softc->zone_interface =
5364				    DA_ZONE_IF_ATA_PASS;
5365			} else if ((ata_params->support3 &
5366				    ATA_SUPPORT_ZONE_MASK) ==
5367				    ATA_SUPPORT_ZONE_DEV_MANAGED) {
5368				softc->zone_mode =DA_ZONE_DRIVE_MANAGED;
5369				softc->zone_interface = DA_ZONE_IF_ATA_PASS;
5370			}
5371		}
5372
5373	} else {
5374		error = daerror(done_ccb, CAM_RETRY_SELTO,
5375				SF_RETRY_UA|SF_NO_PRINT);
5376		if (error == ERESTART)
5377			return;
5378		else if (error != 0) {
5379			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5380				/* Don't wedge this device's queue */
5381				cam_release_devq(done_ccb->ccb_h.path,
5382						 /*relsim_flags*/0,
5383						 /*reduction*/0,
5384						 /*timeout*/0,
5385						 /*getcount_only*/0);
5386			}
5387		}
5388	}
5389
5390	if ((softc->zone_mode == DA_ZONE_HOST_AWARE)
5391	 || (softc->zone_mode == DA_ZONE_HOST_MANAGED)) {
5392		/*
5393		 * If the ATA IDENTIFY failed, we could be talking
5394		 * to a SCSI drive, although that seems unlikely,
5395		 * since the drive did report that it supported the
5396		 * ATA Information VPD page.  If the ATA IDENTIFY
5397		 * succeeded, and the SAT layer doesn't support
5398		 * ZBC -> ZAC translation, continue on to get the
5399		 * directory of ATA logs, and complete the rest of
5400		 * the ZAC probe.  If the SAT layer does support
5401		 * ZBC -> ZAC translation, we want to use that,
5402		 * and we'll probe the SCSI Zoned Block Device
5403		 * Characteristics VPD page next.
5404		 */
5405		if ((error == 0)
5406		 && (softc->flags & DA_FLAG_CAN_ATA_LOG)
5407		 && (softc->zone_interface == DA_ZONE_IF_ATA_PASS))
5408			softc->state = DA_STATE_PROBE_ATA_LOGDIR;
5409		else
5410			softc->state = DA_STATE_PROBE_ZONE;
5411		continue_probe = 1;
5412	}
5413	if (continue_probe != 0) {
5414		xpt_schedule(periph, priority);
5415		xpt_release_ccb(done_ccb);
5416		return;
5417	} else
5418		daprobedone(periph, done_ccb);
5419	return;
5420}
5421
5422static void
5423dadone_probeatalogdir(struct cam_periph *periph, union ccb *done_ccb)
5424{
5425	struct da_softc *softc;
5426	struct ccb_scsiio *csio;
5427	u_int32_t  priority;
5428	int error;
5429
5430	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatalogdir\n"));
5431
5432	softc = (struct da_softc *)periph->softc;
5433	priority = done_ccb->ccb_h.pinfo.priority;
5434	csio = &done_ccb->csio;
5435
5436	cam_periph_assert(periph, MA_OWNED);
5437	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5438		error = 0;
5439		softc->valid_logdir_len = 0;
5440		bzero(&softc->ata_logdir, sizeof(softc->ata_logdir));
5441		softc->valid_logdir_len = csio->dxfer_len - csio->resid;
5442		if (softc->valid_logdir_len > 0)
5443			bcopy(csio->data_ptr, &softc->ata_logdir,
5444			    min(softc->valid_logdir_len,
5445				sizeof(softc->ata_logdir)));
5446		/*
5447		 * Figure out whether the Identify Device log is
5448		 * supported.  The General Purpose log directory
5449		 * has a header, and lists the number of pages
5450		 * available for each GP log identified by the
5451		 * offset into the list.
5452		 */
5453		if ((softc->valid_logdir_len >=
5454		    ((ATA_IDENTIFY_DATA_LOG + 1) * sizeof(uint16_t)))
5455		 && (le16dec(softc->ata_logdir.header) ==
5456		     ATA_GP_LOG_DIR_VERSION)
5457		 && (le16dec(&softc->ata_logdir.num_pages[
5458		     (ATA_IDENTIFY_DATA_LOG *
5459		     sizeof(uint16_t)) - sizeof(uint16_t)]) > 0)){
5460			softc->flags |= DA_FLAG_CAN_ATA_IDLOG;
5461		} else {
5462			softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5463		}
5464	} else {
5465		error = daerror(done_ccb, CAM_RETRY_SELTO,
5466				SF_RETRY_UA|SF_NO_PRINT);
5467		if (error == ERESTART)
5468			return;
5469		else if (error != 0) {
5470			/*
5471			 * If we can't get the ATA log directory,
5472			 * then ATA logs are effectively not
5473			 * supported even if the bit is set in the
5474			 * identify data.
5475			 */
5476			softc->flags &= ~(DA_FLAG_CAN_ATA_LOG |
5477					  DA_FLAG_CAN_ATA_IDLOG);
5478			if ((done_ccb->ccb_h.status &
5479			     CAM_DEV_QFRZN) != 0) {
5480				/* Don't wedge this device's queue */
5481				cam_release_devq(done_ccb->ccb_h.path,
5482						 /*relsim_flags*/0,
5483						 /*reduction*/0,
5484						 /*timeout*/0,
5485						 /*getcount_only*/0);
5486			}
5487		}
5488	}
5489
5490	free(csio->data_ptr, M_SCSIDA);
5491
5492	if ((error == 0)
5493	 && (softc->flags & DA_FLAG_CAN_ATA_IDLOG)) {
5494		softc->state = DA_STATE_PROBE_ATA_IDDIR;
5495		xpt_release_ccb(done_ccb);
5496		xpt_schedule(periph, priority);
5497		return;
5498	}
5499	daprobedone(periph, done_ccb);
5500	return;
5501}
5502
5503static void
5504dadone_probeataiddir(struct cam_periph *periph, union ccb *done_ccb)
5505{
5506	struct da_softc *softc;
5507	struct ccb_scsiio *csio;
5508	u_int32_t  priority;
5509	int error;
5510
5511	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeataiddir\n"));
5512
5513	softc = (struct da_softc *)periph->softc;
5514	priority = done_ccb->ccb_h.pinfo.priority;
5515	csio = &done_ccb->csio;
5516
5517	cam_periph_assert(periph, MA_OWNED);
5518
5519	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5520		off_t entries_offset, max_entries;
5521		error = 0;
5522
5523		softc->valid_iddir_len = 0;
5524		bzero(&softc->ata_iddir, sizeof(softc->ata_iddir));
5525		softc->flags &= ~(DA_FLAG_CAN_ATA_SUPCAP |
5526				  DA_FLAG_CAN_ATA_ZONE);
5527		softc->valid_iddir_len = csio->dxfer_len - csio->resid;
5528		if (softc->valid_iddir_len > 0)
5529			bcopy(csio->data_ptr, &softc->ata_iddir,
5530			    min(softc->valid_iddir_len,
5531				sizeof(softc->ata_iddir)));
5532
5533		entries_offset =
5534		    __offsetof(struct ata_identify_log_pages,entries);
5535		max_entries = softc->valid_iddir_len - entries_offset;
5536		if ((softc->valid_iddir_len > (entries_offset + 1))
5537		 && (le64dec(softc->ata_iddir.header) == ATA_IDLOG_REVISION)
5538		 && (softc->ata_iddir.entry_count > 0)) {
5539			int num_entries, i;
5540
5541			num_entries = softc->ata_iddir.entry_count;
5542			num_entries = min(num_entries,
5543			   softc->valid_iddir_len - entries_offset);
5544			for (i = 0; i < num_entries && i < max_entries; i++) {
5545				if (softc->ata_iddir.entries[i] ==
5546				    ATA_IDL_SUP_CAP)
5547					softc->flags |= DA_FLAG_CAN_ATA_SUPCAP;
5548				else if (softc->ata_iddir.entries[i] ==
5549					 ATA_IDL_ZDI)
5550					softc->flags |= DA_FLAG_CAN_ATA_ZONE;
5551
5552				if ((softc->flags & DA_FLAG_CAN_ATA_SUPCAP)
5553				 && (softc->flags & DA_FLAG_CAN_ATA_ZONE))
5554					break;
5555			}
5556		}
5557	} else {
5558		error = daerror(done_ccb, CAM_RETRY_SELTO,
5559				SF_RETRY_UA|SF_NO_PRINT);
5560		if (error == ERESTART)
5561			return;
5562		else if (error != 0) {
5563			/*
5564			 * If we can't get the ATA Identify Data log
5565			 * directory, then it effectively isn't
5566			 * supported even if the ATA Log directory
5567			 * a non-zero number of pages present for
5568			 * this log.
5569			 */
5570			softc->flags &= ~DA_FLAG_CAN_ATA_IDLOG;
5571			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5572				/* Don't wedge this device's queue */
5573				cam_release_devq(done_ccb->ccb_h.path,
5574						 /*relsim_flags*/0,
5575						 /*reduction*/0,
5576						 /*timeout*/0,
5577						 /*getcount_only*/0);
5578			}
5579		}
5580	}
5581
5582	free(csio->data_ptr, M_SCSIDA);
5583
5584	if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_SUPCAP)) {
5585		softc->state = DA_STATE_PROBE_ATA_SUP;
5586		xpt_release_ccb(done_ccb);
5587		xpt_schedule(periph, priority);
5588		return;
5589	}
5590	daprobedone(periph, done_ccb);
5591	return;
5592}
5593
5594static void
5595dadone_probeatasup(struct cam_periph *periph, union ccb *done_ccb)
5596{
5597	struct da_softc *softc;
5598	struct ccb_scsiio *csio;
5599	u_int32_t  priority;
5600	int error;
5601
5602	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatasup\n"));
5603
5604	softc = (struct da_softc *)periph->softc;
5605	priority = done_ccb->ccb_h.pinfo.priority;
5606	csio = &done_ccb->csio;
5607
5608	cam_periph_assert(periph, MA_OWNED);
5609
5610	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5611		uint32_t valid_len;
5612		size_t needed_size;
5613		struct ata_identify_log_sup_cap *sup_cap;
5614		error = 0;
5615
5616		sup_cap = (struct ata_identify_log_sup_cap *)csio->data_ptr;
5617		valid_len = csio->dxfer_len - csio->resid;
5618		needed_size = __offsetof(struct ata_identify_log_sup_cap,
5619		    sup_zac_cap) + 1 + sizeof(sup_cap->sup_zac_cap);
5620		if (valid_len >= needed_size) {
5621			uint64_t zoned, zac_cap;
5622
5623			zoned = le64dec(sup_cap->zoned_cap);
5624			if (zoned & ATA_ZONED_VALID) {
5625				/*
5626				 * This should have already been
5627				 * set, because this is also in the
5628				 * ATA identify data.
5629				 */
5630				if ((zoned & ATA_ZONED_MASK) ==
5631				    ATA_SUPPORT_ZONE_HOST_AWARE)
5632					softc->zone_mode = DA_ZONE_HOST_AWARE;
5633				else if ((zoned & ATA_ZONED_MASK) ==
5634				    ATA_SUPPORT_ZONE_DEV_MANAGED)
5635					softc->zone_mode =
5636					    DA_ZONE_DRIVE_MANAGED;
5637			}
5638
5639			zac_cap = le64dec(sup_cap->sup_zac_cap);
5640			if (zac_cap & ATA_SUP_ZAC_CAP_VALID) {
5641				if (zac_cap & ATA_REPORT_ZONES_SUP)
5642					softc->zone_flags |=
5643					    DA_ZONE_FLAG_RZ_SUP;
5644				if (zac_cap & ATA_ND_OPEN_ZONE_SUP)
5645					softc->zone_flags |=
5646					    DA_ZONE_FLAG_OPEN_SUP;
5647				if (zac_cap & ATA_ND_CLOSE_ZONE_SUP)
5648					softc->zone_flags |=
5649					    DA_ZONE_FLAG_CLOSE_SUP;
5650				if (zac_cap & ATA_ND_FINISH_ZONE_SUP)
5651					softc->zone_flags |=
5652					    DA_ZONE_FLAG_FINISH_SUP;
5653				if (zac_cap & ATA_ND_RWP_SUP)
5654					softc->zone_flags |=
5655					    DA_ZONE_FLAG_RWP_SUP;
5656			} else {
5657				/*
5658				 * This field was introduced in
5659				 * ACS-4, r08 on April 28th, 2015.
5660				 * If the drive firmware was written
5661				 * to an earlier spec, it won't have
5662				 * the field.  So, assume all
5663				 * commands are supported.
5664				 */
5665				softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5666			}
5667		}
5668	} else {
5669		error = daerror(done_ccb, CAM_RETRY_SELTO,
5670				SF_RETRY_UA|SF_NO_PRINT);
5671		if (error == ERESTART)
5672			return;
5673		else if (error != 0) {
5674			/*
5675			 * If we can't get the ATA Identify Data
5676			 * Supported Capabilities page, clear the
5677			 * flag...
5678			 */
5679			softc->flags &= ~DA_FLAG_CAN_ATA_SUPCAP;
5680			/*
5681			 * And clear zone capabilities.
5682			 */
5683			softc->zone_flags &= ~DA_ZONE_FLAG_SUP_MASK;
5684			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5685				/* Don't wedge this device's queue */
5686				cam_release_devq(done_ccb->ccb_h.path,
5687						 /*relsim_flags*/0,
5688						 /*reduction*/0,
5689						 /*timeout*/0,
5690						 /*getcount_only*/0);
5691			}
5692		}
5693	}
5694
5695	free(csio->data_ptr, M_SCSIDA);
5696
5697	if ((error == 0) && (softc->flags & DA_FLAG_CAN_ATA_ZONE)) {
5698		softc->state = DA_STATE_PROBE_ATA_ZONE;
5699		xpt_release_ccb(done_ccb);
5700		xpt_schedule(periph, priority);
5701		return;
5702	}
5703	daprobedone(periph, done_ccb);
5704	return;
5705}
5706
5707static void
5708dadone_probeatazone(struct cam_periph *periph, union ccb *done_ccb)
5709{
5710	struct da_softc *softc;
5711	struct ccb_scsiio *csio;
5712	int error;
5713
5714	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probeatazone\n"));
5715
5716	softc = (struct da_softc *)periph->softc;
5717	csio = &done_ccb->csio;
5718
5719	cam_periph_assert(periph, MA_OWNED);
5720
5721	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5722		struct ata_zoned_info_log *zi_log;
5723		uint32_t valid_len;
5724		size_t needed_size;
5725
5726		zi_log = (struct ata_zoned_info_log *)csio->data_ptr;
5727
5728		valid_len = csio->dxfer_len - csio->resid;
5729		needed_size = __offsetof(struct ata_zoned_info_log,
5730		    version_info) + 1 + sizeof(zi_log->version_info);
5731		if (valid_len >= needed_size) {
5732			uint64_t tmpvar;
5733
5734			tmpvar = le64dec(zi_log->zoned_cap);
5735			if (tmpvar & ATA_ZDI_CAP_VALID) {
5736				if (tmpvar & ATA_ZDI_CAP_URSWRZ)
5737					softc->zone_flags |=
5738					    DA_ZONE_FLAG_URSWRZ;
5739				else
5740					softc->zone_flags &=
5741					    ~DA_ZONE_FLAG_URSWRZ;
5742			}
5743			tmpvar = le64dec(zi_log->optimal_seq_zones);
5744			if (tmpvar & ATA_ZDI_OPT_SEQ_VALID) {
5745				softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5746				softc->optimal_seq_zones = (tmpvar &
5747				    ATA_ZDI_OPT_SEQ_MASK);
5748			} else {
5749				softc->zone_flags &= ~DA_ZONE_FLAG_OPT_SEQ_SET;
5750				softc->optimal_seq_zones = 0;
5751			}
5752
5753			tmpvar =le64dec(zi_log->optimal_nonseq_zones);
5754			if (tmpvar & ATA_ZDI_OPT_NS_VALID) {
5755				softc->zone_flags |=
5756				    DA_ZONE_FLAG_OPT_NONSEQ_SET;
5757				softc->optimal_nonseq_zones =
5758				    (tmpvar & ATA_ZDI_OPT_NS_MASK);
5759			} else {
5760				softc->zone_flags &=
5761				    ~DA_ZONE_FLAG_OPT_NONSEQ_SET;
5762				softc->optimal_nonseq_zones = 0;
5763			}
5764
5765			tmpvar = le64dec(zi_log->max_seq_req_zones);
5766			if (tmpvar & ATA_ZDI_MAX_SEQ_VALID) {
5767				softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5768				softc->max_seq_zones =
5769				    (tmpvar & ATA_ZDI_MAX_SEQ_MASK);
5770			} else {
5771				softc->zone_flags &= ~DA_ZONE_FLAG_MAX_SEQ_SET;
5772				softc->max_seq_zones = 0;
5773			}
5774		}
5775	} else {
5776		error = daerror(done_ccb, CAM_RETRY_SELTO,
5777				SF_RETRY_UA|SF_NO_PRINT);
5778		if (error == ERESTART)
5779			return;
5780		else if (error != 0) {
5781			softc->flags &= ~DA_FLAG_CAN_ATA_ZONE;
5782			softc->flags &= ~DA_ZONE_FLAG_SET_MASK;
5783
5784			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5785				/* Don't wedge this device's queue */
5786				cam_release_devq(done_ccb->ccb_h.path,
5787						 /*relsim_flags*/0,
5788						 /*reduction*/0,
5789						 /*timeout*/0,
5790						 /*getcount_only*/0);
5791			}
5792		}
5793	}
5794
5795	free(csio->data_ptr, M_SCSIDA);
5796
5797	daprobedone(periph, done_ccb);
5798	return;
5799}
5800
5801static void
5802dadone_probezone(struct cam_periph *periph, union ccb *done_ccb)
5803{
5804	struct da_softc *softc;
5805	struct ccb_scsiio *csio;
5806	int error;
5807
5808	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_probezone\n"));
5809
5810	softc = (struct da_softc *)periph->softc;
5811	csio = &done_ccb->csio;
5812
5813	cam_periph_assert(periph, MA_OWNED);
5814
5815	if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5816		uint32_t valid_len;
5817		size_t needed_len;
5818		struct scsi_vpd_zoned_bdc *zoned_bdc;
5819
5820		error = 0;
5821		zoned_bdc = (struct scsi_vpd_zoned_bdc *)csio->data_ptr;
5822		valid_len = csio->dxfer_len - csio->resid;
5823		needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
5824		    max_seq_req_zones) + 1 +
5825		    sizeof(zoned_bdc->max_seq_req_zones);
5826		if ((valid_len >= needed_len)
5827		 && (scsi_2btoul(zoned_bdc->page_length) >= SVPD_ZBDC_PL)) {
5828			if (zoned_bdc->flags & SVPD_ZBDC_URSWRZ)
5829				softc->zone_flags |= DA_ZONE_FLAG_URSWRZ;
5830			else
5831				softc->zone_flags &= ~DA_ZONE_FLAG_URSWRZ;
5832			softc->optimal_seq_zones =
5833			    scsi_4btoul(zoned_bdc->optimal_seq_zones);
5834			softc->zone_flags |= DA_ZONE_FLAG_OPT_SEQ_SET;
5835			softc->optimal_nonseq_zones = scsi_4btoul(
5836			    zoned_bdc->optimal_nonseq_zones);
5837			softc->zone_flags |= DA_ZONE_FLAG_OPT_NONSEQ_SET;
5838			softc->max_seq_zones =
5839			    scsi_4btoul(zoned_bdc->max_seq_req_zones);
5840			softc->zone_flags |= DA_ZONE_FLAG_MAX_SEQ_SET;
5841		}
5842		/*
5843		 * All of the zone commands are mandatory for SCSI
5844		 * devices.
5845		 *
5846		 * XXX KDM this is valid as of September 2015.
5847		 * Re-check this assumption once the SAT spec is
5848		 * updated to support SCSI ZBC to ATA ZAC mapping.
5849		 * Since ATA allows zone commands to be reported
5850		 * as supported or not, this may not necessarily
5851		 * be true for an ATA device behind a SAT (SCSI to
5852		 * ATA Translation) layer.
5853		 */
5854		softc->zone_flags |= DA_ZONE_FLAG_SUP_MASK;
5855	} else {
5856		error = daerror(done_ccb, CAM_RETRY_SELTO,
5857				SF_RETRY_UA|SF_NO_PRINT);
5858		if (error == ERESTART)
5859			return;
5860		else if (error != 0) {
5861			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
5862				/* Don't wedge this device's queue */
5863				cam_release_devq(done_ccb->ccb_h.path,
5864						 /*relsim_flags*/0,
5865						 /*reduction*/0,
5866						 /*timeout*/0,
5867						 /*getcount_only*/0);
5868			}
5869		}
5870	}
5871
5872	free(csio->data_ptr, M_SCSIDA);
5873
5874	daprobedone(periph, done_ccb);
5875	return;
5876}
5877
5878static void
5879dadone_tur(struct cam_periph *periph, union ccb *done_ccb)
5880{
5881	struct da_softc *softc;
5882	struct ccb_scsiio *csio;
5883
5884	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone_tur\n"));
5885
5886	softc = (struct da_softc *)periph->softc;
5887	csio = &done_ccb->csio;
5888
5889	cam_periph_assert(periph, MA_OWNED);
5890
5891	if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5892		if (daerror(done_ccb, CAM_RETRY_SELTO,
5893		    SF_RETRY_UA | SF_NO_RECOVERY | SF_NO_PRINT) == ERESTART)
5894			return;	/* Will complete again, keep reference */
5895		if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5896			cam_release_devq(done_ccb->ccb_h.path,
5897					 /*relsim_flags*/0,
5898					 /*reduction*/0,
5899					 /*timeout*/0,
5900					 /*getcount_only*/0);
5901	}
5902	softc->flags &= ~DA_FLAG_TUR_PENDING;
5903	xpt_release_ccb(done_ccb);
5904	da_periph_release_locked(periph, DA_REF_TUR);
5905	return;
5906}
5907
5908static void
5909dareprobe(struct cam_periph *periph)
5910{
5911	struct da_softc	  *softc;
5912	int status;
5913
5914	softc = (struct da_softc *)periph->softc;
5915
5916	cam_periph_assert(periph, MA_OWNED);
5917
5918	/* Probe in progress; don't interfere. */
5919	if (softc->state != DA_STATE_NORMAL)
5920		return;
5921
5922	status = da_periph_acquire(periph, DA_REF_REPROBE);
5923	KASSERT(status == 0, ("dareprobe: cam_periph_acquire failed"));
5924
5925	softc->state = DA_STATE_PROBE_WP;
5926	xpt_schedule(periph, CAM_PRIORITY_DEV);
5927}
5928
5929static int
5930daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
5931{
5932	struct da_softc	  *softc;
5933	struct cam_periph *periph;
5934	int error, error_code, sense_key, asc, ascq;
5935
5936#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
5937	if (ccb->csio.bio != NULL)
5938		biotrack(ccb->csio.bio, __func__);
5939#endif
5940
5941	periph = xpt_path_periph(ccb->ccb_h.path);
5942	softc = (struct da_softc *)periph->softc;
5943
5944	cam_periph_assert(periph, MA_OWNED);
5945
5946	/*
5947	 * Automatically detect devices that do not support
5948	 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs.
5949	 */
5950	error = 0;
5951	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) {
5952		error = cmd6workaround(ccb);
5953	} else if (scsi_extract_sense_ccb(ccb,
5954	    &error_code, &sense_key, &asc, &ascq)) {
5955		if (sense_key == SSD_KEY_ILLEGAL_REQUEST)
5956			error = cmd6workaround(ccb);
5957		/*
5958		 * If the target replied with CAPACITY DATA HAS CHANGED UA,
5959		 * query the capacity and notify upper layers.
5960		 */
5961		else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5962		    asc == 0x2A && ascq == 0x09) {
5963			xpt_print(periph->path, "Capacity data has changed\n");
5964			softc->flags &= ~DA_FLAG_PROBED;
5965			dareprobe(periph);
5966			sense_flags |= SF_NO_PRINT;
5967		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5968		    asc == 0x28 && ascq == 0x00) {
5969			softc->flags &= ~DA_FLAG_PROBED;
5970			disk_media_changed(softc->disk, M_NOWAIT);
5971		} else if (sense_key == SSD_KEY_UNIT_ATTENTION &&
5972		    asc == 0x3F && ascq == 0x03) {
5973			xpt_print(periph->path, "INQUIRY data has changed\n");
5974			softc->flags &= ~DA_FLAG_PROBED;
5975			dareprobe(periph);
5976			sense_flags |= SF_NO_PRINT;
5977		} else if (sense_key == SSD_KEY_NOT_READY &&
5978		    asc == 0x3a && (softc->flags & DA_FLAG_PACK_INVALID) == 0) {
5979			softc->flags |= DA_FLAG_PACK_INVALID;
5980			disk_media_gone(softc->disk, M_NOWAIT);
5981		}
5982	}
5983	if (error == ERESTART)
5984		return (ERESTART);
5985
5986#ifdef CAM_IO_STATS
5987	switch (ccb->ccb_h.status & CAM_STATUS_MASK) {
5988	case CAM_CMD_TIMEOUT:
5989		softc->timeouts++;
5990		break;
5991	case CAM_REQ_ABORTED:
5992	case CAM_REQ_CMP_ERR:
5993	case CAM_REQ_TERMIO:
5994	case CAM_UNREC_HBA_ERROR:
5995	case CAM_DATA_RUN_ERR:
5996		softc->errors++;
5997		break;
5998	default:
5999		break;
6000	}
6001#endif
6002
6003	/*
6004	 * XXX
6005	 * Until we have a better way of doing pack validation,
6006	 * don't treat UAs as errors.
6007	 */
6008	sense_flags |= SF_RETRY_UA;
6009
6010	if (softc->quirks & DA_Q_RETRY_BUSY)
6011		sense_flags |= SF_RETRY_BUSY;
6012	return(cam_periph_error(ccb, cam_flags, sense_flags));
6013}
6014
6015static void
6016damediapoll(void *arg)
6017{
6018	struct cam_periph *periph = arg;
6019	struct da_softc *softc = periph->softc;
6020
6021	if (!cam_iosched_has_work_flags(softc->cam_iosched, DA_WORK_TUR) &&
6022	    (softc->flags & DA_FLAG_TUR_PENDING) == 0 &&
6023	    softc->state == DA_STATE_NORMAL &&
6024	    LIST_EMPTY(&softc->pending_ccbs)) {
6025		if (da_periph_acquire(periph, DA_REF_TUR) == 0) {
6026			cam_iosched_set_work_flags(softc->cam_iosched, DA_WORK_TUR);
6027			daschedule(periph);
6028		}
6029	}
6030	/* Queue us up again */
6031	if (da_poll_period != 0)
6032		callout_schedule(&softc->mediapoll_c, da_poll_period * hz);
6033}
6034
6035static void
6036daprevent(struct cam_periph *periph, int action)
6037{
6038	struct	da_softc *softc;
6039	union	ccb *ccb;
6040	int	error;
6041
6042	cam_periph_assert(periph, MA_OWNED);
6043	softc = (struct da_softc *)periph->softc;
6044
6045	if (((action == PR_ALLOW)
6046	  && (softc->flags & DA_FLAG_PACK_LOCKED) == 0)
6047	 || ((action == PR_PREVENT)
6048	  && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) {
6049		return;
6050	}
6051
6052	ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
6053
6054	scsi_prevent(&ccb->csio,
6055		     /*retries*/1,
6056		     /*cbcfp*/NULL,
6057		     MSG_SIMPLE_Q_TAG,
6058		     action,
6059		     SSD_FULL_SIZE,
6060		     5000);
6061
6062	error = cam_periph_runccb(ccb, daerror, CAM_RETRY_SELTO,
6063	    SF_RETRY_UA | SF_NO_PRINT, softc->disk->d_devstat);
6064
6065	if (error == 0) {
6066		if (action == PR_ALLOW)
6067			softc->flags &= ~DA_FLAG_PACK_LOCKED;
6068		else
6069			softc->flags |= DA_FLAG_PACK_LOCKED;
6070	}
6071
6072	xpt_release_ccb(ccb);
6073}
6074
6075static void
6076dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector,
6077	  struct scsi_read_capacity_data_long *rcaplong, size_t rcap_len)
6078{
6079	struct ccb_calc_geometry ccg;
6080	struct da_softc *softc;
6081	struct disk_params *dp;
6082	u_int lbppbe, lalba;
6083	int error;
6084
6085	softc = (struct da_softc *)periph->softc;
6086
6087	dp = &softc->params;
6088	dp->secsize = block_len;
6089	dp->sectors = maxsector + 1;
6090	if (rcaplong != NULL) {
6091		lbppbe = rcaplong->prot_lbppbe & SRC16_LBPPBE;
6092		lalba = scsi_2btoul(rcaplong->lalba_lbp);
6093		lalba &= SRC16_LALBA_A;
6094		if (rcaplong->prot & SRC16_PROT_EN)
6095			softc->p_type = ((rcaplong->prot & SRC16_P_TYPE) >>
6096			    SRC16_P_TYPE_SHIFT) + 1;
6097		else
6098			softc->p_type = 0;
6099	} else {
6100		lbppbe = 0;
6101		lalba = 0;
6102		softc->p_type = 0;
6103	}
6104
6105	if (lbppbe > 0) {
6106		dp->stripesize = block_len << lbppbe;
6107		dp->stripeoffset = (dp->stripesize - block_len * lalba) %
6108		    dp->stripesize;
6109	} else if (softc->quirks & DA_Q_4K) {
6110		dp->stripesize = 4096;
6111		dp->stripeoffset = 0;
6112	} else if (softc->unmap_gran != 0) {
6113		dp->stripesize = block_len * softc->unmap_gran;
6114		dp->stripeoffset = (dp->stripesize - block_len *
6115		    softc->unmap_gran_align) % dp->stripesize;
6116	} else {
6117		dp->stripesize = 0;
6118		dp->stripeoffset = 0;
6119	}
6120	/*
6121	 * Have the controller provide us with a geometry
6122	 * for this disk.  The only time the geometry
6123	 * matters is when we boot and the controller
6124	 * is the only one knowledgeable enough to come
6125	 * up with something that will make this a bootable
6126	 * device.
6127	 */
6128	xpt_setup_ccb(&ccg.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
6129	ccg.ccb_h.func_code = XPT_CALC_GEOMETRY;
6130	ccg.block_size = dp->secsize;
6131	ccg.volume_size = dp->sectors;
6132	ccg.heads = 0;
6133	ccg.secs_per_track = 0;
6134	ccg.cylinders = 0;
6135	xpt_action((union ccb*)&ccg);
6136	if ((ccg.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6137		/*
6138		 * We don't know what went wrong here- but just pick
6139		 * a geometry so we don't have nasty things like divide
6140		 * by zero.
6141		 */
6142		dp->heads = 255;
6143		dp->secs_per_track = 255;
6144		dp->cylinders = dp->sectors / (255 * 255);
6145		if (dp->cylinders == 0) {
6146			dp->cylinders = 1;
6147		}
6148	} else {
6149		dp->heads = ccg.heads;
6150		dp->secs_per_track = ccg.secs_per_track;
6151		dp->cylinders = ccg.cylinders;
6152	}
6153
6154	/*
6155	 * If the user supplied a read capacity buffer, and if it is
6156	 * different than the previous buffer, update the data in the EDT.
6157	 * If it's the same, we don't bother.  This avoids sending an
6158	 * update every time someone opens this device.
6159	 */
6160	if ((rcaplong != NULL)
6161	 && (bcmp(rcaplong, &softc->rcaplong,
6162		  min(sizeof(softc->rcaplong), rcap_len)) != 0)) {
6163		struct ccb_dev_advinfo cdai;
6164
6165		xpt_setup_ccb(&cdai.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
6166		cdai.ccb_h.func_code = XPT_DEV_ADVINFO;
6167		cdai.buftype = CDAI_TYPE_RCAPLONG;
6168		cdai.flags = CDAI_FLAG_STORE;
6169		cdai.bufsiz = rcap_len;
6170		cdai.buf = (uint8_t *)rcaplong;
6171		xpt_action((union ccb *)&cdai);
6172		if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0)
6173			cam_release_devq(cdai.ccb_h.path, 0, 0, 0, FALSE);
6174		if (cdai.ccb_h.status != CAM_REQ_CMP) {
6175			xpt_print(periph->path, "%s: failed to set read "
6176				  "capacity advinfo\n", __func__);
6177			/* Use cam_error_print() to decode the status */
6178			cam_error_print((union ccb *)&cdai, CAM_ESF_CAM_STATUS,
6179					CAM_EPF_ALL);
6180		} else {
6181			bcopy(rcaplong, &softc->rcaplong,
6182			      min(sizeof(softc->rcaplong), rcap_len));
6183		}
6184	}
6185
6186	softc->disk->d_sectorsize = softc->params.secsize;
6187	softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
6188	softc->disk->d_stripesize = softc->params.stripesize;
6189	softc->disk->d_stripeoffset = softc->params.stripeoffset;
6190	/* XXX: these are not actually "firmware" values, so they may be wrong */
6191	softc->disk->d_fwsectors = softc->params.secs_per_track;
6192	softc->disk->d_fwheads = softc->params.heads;
6193	softc->disk->d_devstat->block_size = softc->params.secsize;
6194	softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
6195
6196	error = disk_resize(softc->disk, M_NOWAIT);
6197	if (error != 0)
6198		xpt_print(periph->path, "disk_resize(9) failed, error = %d\n", error);
6199}
6200
6201static void
6202dasendorderedtag(void *arg)
6203{
6204	struct cam_periph *periph = arg;
6205	struct da_softc *softc = periph->softc;
6206
6207	cam_periph_assert(periph, MA_OWNED);
6208	if (da_send_ordered) {
6209		if (!LIST_EMPTY(&softc->pending_ccbs)) {
6210			if ((softc->flags & DA_FLAG_WAS_OTAG) == 0)
6211				softc->flags |= DA_FLAG_NEED_OTAG;
6212			softc->flags &= ~DA_FLAG_WAS_OTAG;
6213		}
6214	}
6215
6216	/* Queue us up again */
6217	callout_reset(&softc->sendordered_c,
6218	    (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL,
6219	    dasendorderedtag, periph);
6220}
6221
6222/*
6223 * Step through all DA peripheral drivers, and if the device is still open,
6224 * sync the disk cache to physical media.
6225 */
6226static void
6227dashutdown(void * arg, int howto)
6228{
6229	struct cam_periph *periph;
6230	struct da_softc *softc;
6231	union ccb *ccb;
6232	int error;
6233
6234	CAM_PERIPH_FOREACH(periph, &dadriver) {
6235		softc = (struct da_softc *)periph->softc;
6236		if (SCHEDULER_STOPPED()) {
6237			/* If we paniced with the lock held, do not recurse. */
6238			if (!cam_periph_owned(periph) &&
6239			    (softc->flags & DA_FLAG_OPEN)) {
6240				dadump(softc->disk, NULL, 0, 0, 0);
6241			}
6242			continue;
6243		}
6244		cam_periph_lock(periph);
6245
6246		/*
6247		 * We only sync the cache if the drive is still open, and
6248		 * if the drive is capable of it..
6249		 */
6250		if (((softc->flags & DA_FLAG_OPEN) == 0)
6251		 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) {
6252			cam_periph_unlock(periph);
6253			continue;
6254		}
6255
6256		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
6257		scsi_synchronize_cache(&ccb->csio,
6258				       /*retries*/0,
6259				       /*cbfcnp*/NULL,
6260				       MSG_SIMPLE_Q_TAG,
6261				       /*begin_lba*/0, /* whole disk */
6262				       /*lb_count*/0,
6263				       SSD_FULL_SIZE,
6264				       60 * 60 * 1000);
6265
6266		error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0,
6267		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY | SF_QUIET_IR,
6268		    softc->disk->d_devstat);
6269		if (error != 0)
6270			xpt_print(periph->path, "Synchronize cache failed\n");
6271		xpt_release_ccb(ccb);
6272		cam_periph_unlock(periph);
6273	}
6274}
6275
6276#else /* !_KERNEL */
6277
6278/*
6279 * XXX These are only left out of the kernel build to silence warnings.  If,
6280 * for some reason these functions are used in the kernel, the ifdefs should
6281 * be moved so they are included both in the kernel and userland.
6282 */
6283void
6284scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries,
6285		 void (*cbfcnp)(struct cam_periph *, union ccb *),
6286		 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave,
6287		 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
6288		 u_int32_t timeout)
6289{
6290	struct scsi_format_unit *scsi_cmd;
6291
6292	scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes;
6293	scsi_cmd->opcode = FORMAT_UNIT;
6294	scsi_cmd->byte2 = byte2;
6295	scsi_ulto2b(ileave, scsi_cmd->interleave);
6296
6297	cam_fill_csio(csio,
6298		      retries,
6299		      cbfcnp,
6300		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6301		      tag_action,
6302		      data_ptr,
6303		      dxfer_len,
6304		      sense_len,
6305		      sizeof(*scsi_cmd),
6306		      timeout);
6307}
6308
6309void
6310scsi_read_defects(struct ccb_scsiio *csio, uint32_t retries,
6311		  void (*cbfcnp)(struct cam_periph *, union ccb *),
6312		  uint8_t tag_action, uint8_t list_format,
6313		  uint32_t addr_desc_index, uint8_t *data_ptr,
6314		  uint32_t dxfer_len, int minimum_cmd_size,
6315		  uint8_t sense_len, uint32_t timeout)
6316{
6317	uint8_t cdb_len;
6318
6319	/*
6320	 * These conditions allow using the 10 byte command.  Otherwise we
6321	 * need to use the 12 byte command.
6322	 */
6323	if ((minimum_cmd_size <= 10)
6324	 && (addr_desc_index == 0)
6325	 && (dxfer_len <= SRDD10_MAX_LENGTH)) {
6326		struct scsi_read_defect_data_10 *cdb10;
6327
6328		cdb10 = (struct scsi_read_defect_data_10 *)
6329			&csio->cdb_io.cdb_bytes;
6330
6331		cdb_len = sizeof(*cdb10);
6332		bzero(cdb10, cdb_len);
6333                cdb10->opcode = READ_DEFECT_DATA_10;
6334                cdb10->format = list_format;
6335                scsi_ulto2b(dxfer_len, cdb10->alloc_length);
6336	} else {
6337		struct scsi_read_defect_data_12 *cdb12;
6338
6339		cdb12 = (struct scsi_read_defect_data_12 *)
6340			&csio->cdb_io.cdb_bytes;
6341
6342		cdb_len = sizeof(*cdb12);
6343		bzero(cdb12, cdb_len);
6344                cdb12->opcode = READ_DEFECT_DATA_12;
6345                cdb12->format = list_format;
6346                scsi_ulto4b(dxfer_len, cdb12->alloc_length);
6347		scsi_ulto4b(addr_desc_index, cdb12->address_descriptor_index);
6348	}
6349
6350	cam_fill_csio(csio,
6351		      retries,
6352		      cbfcnp,
6353		      /*flags*/ CAM_DIR_IN,
6354		      tag_action,
6355		      data_ptr,
6356		      dxfer_len,
6357		      sense_len,
6358		      cdb_len,
6359		      timeout);
6360}
6361
6362void
6363scsi_sanitize(struct ccb_scsiio *csio, u_int32_t retries,
6364	      void (*cbfcnp)(struct cam_periph *, union ccb *),
6365	      u_int8_t tag_action, u_int8_t byte2, u_int16_t control,
6366	      u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len,
6367	      u_int32_t timeout)
6368{
6369	struct scsi_sanitize *scsi_cmd;
6370
6371	scsi_cmd = (struct scsi_sanitize *)&csio->cdb_io.cdb_bytes;
6372	scsi_cmd->opcode = SANITIZE;
6373	scsi_cmd->byte2 = byte2;
6374	scsi_cmd->control = control;
6375	scsi_ulto2b(dxfer_len, scsi_cmd->length);
6376
6377	cam_fill_csio(csio,
6378		      retries,
6379		      cbfcnp,
6380		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6381		      tag_action,
6382		      data_ptr,
6383		      dxfer_len,
6384		      sense_len,
6385		      sizeof(*scsi_cmd),
6386		      timeout);
6387}
6388
6389#endif /* _KERNEL */
6390
6391void
6392scsi_zbc_out(struct ccb_scsiio *csio, uint32_t retries,
6393	     void (*cbfcnp)(struct cam_periph *, union ccb *),
6394	     uint8_t tag_action, uint8_t service_action, uint64_t zone_id,
6395	     uint8_t zone_flags, uint8_t *data_ptr, uint32_t dxfer_len,
6396	     uint8_t sense_len, uint32_t timeout)
6397{
6398	struct scsi_zbc_out *scsi_cmd;
6399
6400	scsi_cmd = (struct scsi_zbc_out *)&csio->cdb_io.cdb_bytes;
6401	scsi_cmd->opcode = ZBC_OUT;
6402	scsi_cmd->service_action = service_action;
6403	scsi_u64to8b(zone_id, scsi_cmd->zone_id);
6404	scsi_cmd->zone_flags = zone_flags;
6405
6406	cam_fill_csio(csio,
6407		      retries,
6408		      cbfcnp,
6409		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6410		      tag_action,
6411		      data_ptr,
6412		      dxfer_len,
6413		      sense_len,
6414		      sizeof(*scsi_cmd),
6415		      timeout);
6416}
6417
6418void
6419scsi_zbc_in(struct ccb_scsiio *csio, uint32_t retries,
6420	    void (*cbfcnp)(struct cam_periph *, union ccb *),
6421	    uint8_t tag_action, uint8_t service_action, uint64_t zone_start_lba,
6422	    uint8_t zone_options, uint8_t *data_ptr, uint32_t dxfer_len,
6423	    uint8_t sense_len, uint32_t timeout)
6424{
6425	struct scsi_zbc_in *scsi_cmd;
6426
6427	scsi_cmd = (struct scsi_zbc_in *)&csio->cdb_io.cdb_bytes;
6428	scsi_cmd->opcode = ZBC_IN;
6429	scsi_cmd->service_action = service_action;
6430	scsi_ulto4b(dxfer_len, scsi_cmd->length);
6431	scsi_u64to8b(zone_start_lba, scsi_cmd->zone_start_lba);
6432	scsi_cmd->zone_options = zone_options;
6433
6434	cam_fill_csio(csio,
6435		      retries,
6436		      cbfcnp,
6437		      /*flags*/ (dxfer_len > 0) ? CAM_DIR_IN : CAM_DIR_NONE,
6438		      tag_action,
6439		      data_ptr,
6440		      dxfer_len,
6441		      sense_len,
6442		      sizeof(*scsi_cmd),
6443		      timeout);
6444
6445}
6446
6447int
6448scsi_ata_zac_mgmt_out(struct ccb_scsiio *csio, uint32_t retries,
6449		      void (*cbfcnp)(struct cam_periph *, union ccb *),
6450		      uint8_t tag_action, int use_ncq,
6451		      uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6452		      uint8_t *data_ptr, uint32_t dxfer_len,
6453		      uint8_t *cdb_storage, size_t cdb_storage_len,
6454		      uint8_t sense_len, uint32_t timeout)
6455{
6456	uint8_t command_out, protocol, ata_flags;
6457	uint16_t features_out;
6458	uint32_t sectors_out, auxiliary;
6459	int retval;
6460
6461	retval = 0;
6462
6463	if (use_ncq == 0) {
6464		command_out = ATA_ZAC_MANAGEMENT_OUT;
6465		features_out = (zm_action & 0xf) | (zone_flags << 8);
6466		ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
6467		if (dxfer_len == 0) {
6468			protocol = AP_PROTO_NON_DATA;
6469			ata_flags |= AP_FLAG_TLEN_NO_DATA;
6470			sectors_out = 0;
6471		} else {
6472			protocol = AP_PROTO_DMA;
6473			ata_flags |= AP_FLAG_TLEN_SECT_CNT |
6474				     AP_FLAG_TDIR_TO_DEV;
6475			sectors_out = ((dxfer_len >> 9) & 0xffff);
6476		}
6477		auxiliary = 0;
6478	} else {
6479		ata_flags = AP_FLAG_BYT_BLOK_BLOCKS;
6480		if (dxfer_len == 0) {
6481			command_out = ATA_NCQ_NON_DATA;
6482			features_out = ATA_NCQ_ZAC_MGMT_OUT;
6483			/*
6484			 * We're assuming the SCSI to ATA translation layer
6485			 * will set the NCQ tag number in the tag field.
6486			 * That isn't clear from the SAT-4 spec (as of rev 05).
6487			 */
6488			sectors_out = 0;
6489			ata_flags |= AP_FLAG_TLEN_NO_DATA;
6490		} else {
6491			command_out = ATA_SEND_FPDMA_QUEUED;
6492			/*
6493			 * Note that we're defaulting to normal priority,
6494			 * and assuming that the SCSI to ATA translation
6495			 * layer will insert the NCQ tag number in the tag
6496			 * field.  That isn't clear in the SAT-4 spec (as
6497			 * of rev 05).
6498			 */
6499			sectors_out = ATA_SFPDMA_ZAC_MGMT_OUT << 8;
6500
6501			ata_flags |= AP_FLAG_TLEN_FEAT |
6502				     AP_FLAG_TDIR_TO_DEV;
6503
6504			/*
6505			 * For SEND FPDMA QUEUED, the transfer length is
6506			 * encoded in the FEATURE register, and 0 means
6507			 * that 65536 512 byte blocks are to be tranferred.
6508			 * In practice, it seems unlikely that we'll see
6509			 * a transfer that large, and it may confuse the
6510			 * the SAT layer, because generally that means that
6511			 * 0 bytes should be transferred.
6512			 */
6513			if (dxfer_len == (65536 * 512)) {
6514				features_out = 0;
6515			} else if (dxfer_len <= (65535 * 512)) {
6516				features_out = ((dxfer_len >> 9) & 0xffff);
6517			} else {
6518				/* The transfer is too big. */
6519				retval = 1;
6520				goto bailout;
6521			}
6522		}
6523
6524		auxiliary = (zm_action & 0xf) | (zone_flags << 8);
6525		protocol = AP_PROTO_FPDMA;
6526	}
6527
6528	protocol |= AP_EXTEND;
6529
6530	retval = scsi_ata_pass(csio,
6531	    retries,
6532	    cbfcnp,
6533	    /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE,
6534	    tag_action,
6535	    /*protocol*/ protocol,
6536	    /*ata_flags*/ ata_flags,
6537	    /*features*/ features_out,
6538	    /*sector_count*/ sectors_out,
6539	    /*lba*/ zone_id,
6540	    /*command*/ command_out,
6541	    /*device*/ 0,
6542	    /*icc*/ 0,
6543	    /*auxiliary*/ auxiliary,
6544	    /*control*/ 0,
6545	    /*data_ptr*/ data_ptr,
6546	    /*dxfer_len*/ dxfer_len,
6547	    /*cdb_storage*/ cdb_storage,
6548	    /*cdb_storage_len*/ cdb_storage_len,
6549	    /*minimum_cmd_size*/ 0,
6550	    /*sense_len*/ SSD_FULL_SIZE,
6551	    /*timeout*/ timeout);
6552
6553bailout:
6554
6555	return (retval);
6556}
6557
6558int
6559scsi_ata_zac_mgmt_in(struct ccb_scsiio *csio, uint32_t retries,
6560		     void (*cbfcnp)(struct cam_periph *, union ccb *),
6561		     uint8_t tag_action, int use_ncq,
6562		     uint8_t zm_action, uint64_t zone_id, uint8_t zone_flags,
6563		     uint8_t *data_ptr, uint32_t dxfer_len,
6564		     uint8_t *cdb_storage, size_t cdb_storage_len,
6565		     uint8_t sense_len, uint32_t timeout)
6566{
6567	uint8_t command_out, protocol;
6568	uint16_t features_out, sectors_out;
6569	uint32_t auxiliary;
6570	int ata_flags;
6571	int retval;
6572
6573	retval = 0;
6574	ata_flags = AP_FLAG_TDIR_FROM_DEV | AP_FLAG_BYT_BLOK_BLOCKS;
6575
6576	if (use_ncq == 0) {
6577		command_out = ATA_ZAC_MANAGEMENT_IN;
6578		/* XXX KDM put a macro here */
6579		features_out = (zm_action & 0xf) | (zone_flags << 8);
6580		sectors_out = dxfer_len >> 9; /* XXX KDM macro */
6581		protocol = AP_PROTO_DMA;
6582		ata_flags |= AP_FLAG_TLEN_SECT_CNT;
6583		auxiliary = 0;
6584	} else {
6585		ata_flags |= AP_FLAG_TLEN_FEAT;
6586
6587		command_out = ATA_RECV_FPDMA_QUEUED;
6588		sectors_out = ATA_RFPDMA_ZAC_MGMT_IN << 8;
6589
6590		/*
6591		 * For RECEIVE FPDMA QUEUED, the transfer length is
6592		 * encoded in the FEATURE register, and 0 means
6593		 * that 65536 512 byte blocks are to be tranferred.
6594		 * In practice, it seems unlikely that we'll see
6595		 * a transfer that large, and it may confuse the
6596		 * the SAT layer, because generally that means that
6597		 * 0 bytes should be transferred.
6598		 */
6599		if (dxfer_len == (65536 * 512)) {
6600			features_out = 0;
6601		} else if (dxfer_len <= (65535 * 512)) {
6602			features_out = ((dxfer_len >> 9) & 0xffff);
6603		} else {
6604			/* The transfer is too big. */
6605			retval = 1;
6606			goto bailout;
6607		}
6608		auxiliary = (zm_action & 0xf) | (zone_flags << 8),
6609		protocol = AP_PROTO_FPDMA;
6610	}
6611
6612	protocol |= AP_EXTEND;
6613
6614	retval = scsi_ata_pass(csio,
6615	    retries,
6616	    cbfcnp,
6617	    /*flags*/ CAM_DIR_IN,
6618	    tag_action,
6619	    /*protocol*/ protocol,
6620	    /*ata_flags*/ ata_flags,
6621	    /*features*/ features_out,
6622	    /*sector_count*/ sectors_out,
6623	    /*lba*/ zone_id,
6624	    /*command*/ command_out,
6625	    /*device*/ 0,
6626	    /*icc*/ 0,
6627	    /*auxiliary*/ auxiliary,
6628	    /*control*/ 0,
6629	    /*data_ptr*/ data_ptr,
6630	    /*dxfer_len*/ (dxfer_len >> 9) * 512, /* XXX KDM */
6631	    /*cdb_storage*/ cdb_storage,
6632	    /*cdb_storage_len*/ cdb_storage_len,
6633	    /*minimum_cmd_size*/ 0,
6634	    /*sense_len*/ SSD_FULL_SIZE,
6635	    /*timeout*/ timeout);
6636
6637bailout:
6638	return (retval);
6639}
6640