ata_da.c revision 222520
1/*-
2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/cam/ata/ata_da.c 222520 2011-05-31 09:22:52Z mav $");
29
30#include "opt_ada.h"
31#include "opt_ata.h"
32
33#include <sys/param.h>
34
35#ifdef _KERNEL
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/bio.h>
39#include <sys/sysctl.h>
40#include <sys/taskqueue.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/conf.h>
44#include <sys/devicestat.h>
45#include <sys/eventhandler.h>
46#include <sys/malloc.h>
47#include <sys/cons.h>
48#include <sys/reboot.h>
49#include <geom/geom_disk.h>
50#endif /* _KERNEL */
51
52#ifndef _KERNEL
53#include <stdio.h>
54#include <string.h>
55#endif /* _KERNEL */
56
57#include <cam/cam.h>
58#include <cam/cam_ccb.h>
59#include <cam/cam_periph.h>
60#include <cam/cam_xpt_periph.h>
61#include <cam/cam_sim.h>
62
63#include <cam/ata/ata_all.h>
64
65#include <machine/md_var.h>	/* geometry translation */
66
67#ifdef _KERNEL
68
69#define ATA_MAX_28BIT_LBA               268435455UL
70
71typedef enum {
72	ADA_STATE_WCACHE,
73	ADA_STATE_NORMAL
74} ada_state;
75
76typedef enum {
77	ADA_FLAG_PACK_INVALID	= 0x001,
78	ADA_FLAG_CAN_48BIT	= 0x002,
79	ADA_FLAG_CAN_FLUSHCACHE	= 0x004,
80	ADA_FLAG_CAN_NCQ	= 0x008,
81	ADA_FLAG_CAN_DMA	= 0x010,
82	ADA_FLAG_NEED_OTAG	= 0x020,
83	ADA_FLAG_WENT_IDLE	= 0x040,
84	ADA_FLAG_CAN_TRIM	= 0x080,
85	ADA_FLAG_OPEN		= 0x100,
86	ADA_FLAG_SCTX_INIT	= 0x200,
87	ADA_FLAG_CAN_CFA        = 0x400,
88	ADA_FLAG_CAN_POWERMGT   = 0x800
89} ada_flags;
90
91typedef enum {
92	ADA_Q_NONE		= 0x00,
93	ADA_Q_4K		= 0x01,
94} ada_quirks;
95
96typedef enum {
97	ADA_CCB_WCACHE		= 0x01,
98	ADA_CCB_BUFFER_IO	= 0x03,
99	ADA_CCB_WAITING		= 0x04,
100	ADA_CCB_DUMP		= 0x05,
101	ADA_CCB_TRIM		= 0x06,
102	ADA_CCB_TYPE_MASK	= 0x0F,
103} ada_ccb_state;
104
105/* Offsets into our private area for storing information */
106#define ccb_state	ppriv_field0
107#define ccb_bp		ppriv_ptr1
108
109struct disk_params {
110	u_int8_t  heads;
111	u_int8_t  secs_per_track;
112	u_int32_t cylinders;
113	u_int32_t secsize;	/* Number of bytes/logical sector */
114	u_int64_t sectors;	/* Total number sectors */
115};
116
117#define TRIM_MAX_BLOCKS	4
118#define TRIM_MAX_RANGES	TRIM_MAX_BLOCKS * 64
119struct trim_request {
120	uint8_t		data[TRIM_MAX_RANGES * 8];
121	struct bio	*bps[TRIM_MAX_RANGES];
122};
123
124struct ada_softc {
125	struct	 bio_queue_head bio_queue;
126	struct	 bio_queue_head trim_queue;
127	ada_state state;
128	ada_flags flags;
129	ada_quirks quirks;
130	int	 ordered_tag_count;
131	int	 outstanding_cmds;
132	int	 trim_max_ranges;
133	int	 trim_running;
134	int	 write_cache;
135#ifdef ADA_TEST_FAILURE
136	int      force_read_error;
137	int      force_write_error;
138	int      periodic_read_error;
139	int      periodic_read_count;
140#endif
141	struct	 disk_params params;
142	struct	 disk *disk;
143	struct task		sysctl_task;
144	struct sysctl_ctx_list	sysctl_ctx;
145	struct sysctl_oid	*sysctl_tree;
146	struct callout		sendordered_c;
147	struct trim_request	trim_req;
148};
149
150struct ada_quirk_entry {
151	struct scsi_inquiry_pattern inq_pat;
152	ada_quirks quirks;
153};
154
155static struct ada_quirk_entry ada_quirk_table[] =
156{
157	{
158		/* Hitachi Advanced Format (4k) drives */
159		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Hitachi H??????????E3*", "*" },
160		/*quirks*/ADA_Q_4K
161	},
162	{
163		/* Samsung Advanced Format (4k) drives */
164		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD204UI*", "*" },
165		/*quirks*/ADA_Q_4K
166	},
167	{
168		/* Seagate Barracuda Green Advanced Format (4k) drives */
169		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DL*", "*" },
170		/*quirks*/ADA_Q_4K
171	},
172	{
173		/* Seagate Momentus Advanced Format (4k) drives */
174		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500423AS*", "*" },
175		/*quirks*/ADA_Q_4K
176	},
177	{
178		/* Seagate Momentus Advanced Format (4k) drives */
179		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500424AS*", "*" },
180		/*quirks*/ADA_Q_4K
181	},
182	{
183		/* Seagate Momentus Advanced Format (4k) drives */
184		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750420AS*", "*" },
185		/*quirks*/ADA_Q_4K
186	},
187	{
188		/* Seagate Momentus Advanced Format (4k) drives */
189		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750422AS*", "*" },
190		/*quirks*/ADA_Q_4K
191	},
192	{
193		/* Seagate Momentus Thin Advanced Format (4k) drives */
194		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???LT*", "*" },
195		/*quirks*/ADA_Q_4K
196	},
197	{
198		/* WDC Caviar Green Advanced Format (4k) drives */
199		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RS*", "*" },
200		/*quirks*/ADA_Q_4K
201	},
202	{
203		/* WDC Caviar Green Advanced Format (4k) drives */
204		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RX*", "*" },
205		/*quirks*/ADA_Q_4K
206	},
207	{
208		/* WDC Caviar Green Advanced Format (4k) drives */
209		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RS*", "*" },
210		/*quirks*/ADA_Q_4K
211	},
212	{
213		/* WDC Caviar Green Advanced Format (4k) drives */
214		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RX*", "*" },
215		/*quirks*/ADA_Q_4K
216	},
217	{
218		/* WDC Scorpio Black Advanced Format (4k) drives */
219		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PKT*", "*" },
220		/*quirks*/ADA_Q_4K
221	},
222	{
223		/* WDC Scorpio Black Advanced Format (4k) drives */
224		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PKT*", "*" },
225		/*quirks*/ADA_Q_4K
226	},
227	{
228		/* WDC Scorpio Blue Advanced Format (4k) drives */
229		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PVT*", "*" },
230		/*quirks*/ADA_Q_4K
231	},
232	{
233		/* WDC Scorpio Blue Advanced Format (4k) drives */
234		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PVT*", "*" },
235		/*quirks*/ADA_Q_4K
236	},
237	{
238		/* Default */
239		{
240		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
241		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
242		},
243		/*quirks*/0
244	},
245};
246
247static	disk_strategy_t	adastrategy;
248static	dumper_t	adadump;
249static	periph_init_t	adainit;
250static	void		adaasync(void *callback_arg, u_int32_t code,
251				struct cam_path *path, void *arg);
252static	void		adasysctlinit(void *context, int pending);
253static	periph_ctor_t	adaregister;
254static	periph_dtor_t	adacleanup;
255static	periph_start_t	adastart;
256static	periph_oninv_t	adaoninvalidate;
257static	void		adadone(struct cam_periph *periph,
258			       union ccb *done_ccb);
259static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
260				u_int32_t sense_flags);
261static void		adagetparams(struct cam_periph *periph,
262				struct ccb_getdev *cgd);
263static timeout_t	adasendorderedtag;
264static void		adashutdown(void *arg, int howto);
265static void		adasuspend(void *arg);
266static void		adaresume(void *arg);
267
268#ifndef	ADA_DEFAULT_LEGACY_ALIASES
269#ifdef ATA_CAM
270#define	ADA_DEFAULT_LEGACY_ALIASES	1
271#else
272#define	ADA_DEFAULT_LEGACY_ALIASES	0
273#endif
274#endif
275
276#ifndef ADA_DEFAULT_TIMEOUT
277#define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
278#endif
279
280#ifndef	ADA_DEFAULT_RETRY
281#define	ADA_DEFAULT_RETRY	4
282#endif
283
284#ifndef	ADA_DEFAULT_SEND_ORDERED
285#define	ADA_DEFAULT_SEND_ORDERED	1
286#endif
287
288#ifndef	ADA_DEFAULT_SPINDOWN_SHUTDOWN
289#define	ADA_DEFAULT_SPINDOWN_SHUTDOWN	1
290#endif
291
292#ifndef	ADA_DEFAULT_SPINDOWN_SUSPEND
293#define	ADA_DEFAULT_SPINDOWN_SUSPEND	1
294#endif
295
296#ifndef	ADA_DEFAULT_WRITE_CACHE
297#define	ADA_DEFAULT_WRITE_CACHE	1
298#endif
299
300/*
301 * Most platforms map firmware geometry to actual, but some don't.  If
302 * not overridden, default to nothing.
303 */
304#ifndef ata_disk_firmware_geom_adjust
305#define	ata_disk_firmware_geom_adjust(disk)
306#endif
307
308static int ada_legacy_aliases = ADA_DEFAULT_LEGACY_ALIASES;
309static int ada_retry_count = ADA_DEFAULT_RETRY;
310static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
311static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
312static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
313static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND;
314static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
315
316SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
317            "CAM Direct Access Disk driver");
318SYSCTL_INT(_kern_cam_ada, OID_AUTO, legacy_aliases, CTLFLAG_RW,
319           &ada_legacy_aliases, 0, "Create legacy-like device aliases");
320TUNABLE_INT("kern.cam.ada.legacy_aliases", &ada_legacy_aliases);
321SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
322           &ada_retry_count, 0, "Normal I/O retry count");
323TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
324SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
325           &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
326TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
327SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
328           &ada_send_ordered, 0, "Send Ordered Tags");
329TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
330SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
331           &ada_spindown_shutdown, 0, "Spin down upon shutdown");
332TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
333SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RW,
334           &ada_spindown_suspend, 0, "Spin down upon suspend");
335TUNABLE_INT("kern.cam.ada.spindown_suspend", &ada_spindown_suspend);
336SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RW,
337           &ada_write_cache, 0, "Enable disk write cache");
338TUNABLE_INT("kern.cam.ada.write_cache", &ada_write_cache);
339
340/*
341 * ADA_ORDEREDTAG_INTERVAL determines how often, relative
342 * to the default timeout, we check to see whether an ordered
343 * tagged transaction is appropriate to prevent simple tag
344 * starvation.  Since we'd like to ensure that there is at least
345 * 1/2 of the timeout length left for a starved transaction to
346 * complete after we've sent an ordered tag, we must poll at least
347 * four times in every timeout period.  This takes care of the worst
348 * case where a starved transaction starts during an interval that
349 * meets the requirement "don't send an ordered tag" test so it takes
350 * us two intervals to determine that a tag must be sent.
351 */
352#ifndef ADA_ORDEREDTAG_INTERVAL
353#define ADA_ORDEREDTAG_INTERVAL 4
354#endif
355
356static struct periph_driver adadriver =
357{
358	adainit, "ada",
359	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
360};
361
362PERIPHDRIVER_DECLARE(ada, adadriver);
363
364MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
365
366static int
367adaopen(struct disk *dp)
368{
369	struct cam_periph *periph;
370	struct ada_softc *softc;
371	int error;
372
373	periph = (struct cam_periph *)dp->d_drv1;
374	if (periph == NULL) {
375		return (ENXIO);
376	}
377
378	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
379		return(ENXIO);
380	}
381
382	cam_periph_lock(periph);
383	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
384		cam_periph_unlock(periph);
385		cam_periph_release(periph);
386		return (error);
387	}
388
389	softc = (struct ada_softc *)periph->softc;
390	softc->flags |= ADA_FLAG_OPEN;
391
392	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
393	    ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
394	     periph->unit_number));
395
396	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
397		/* Invalidate our pack information. */
398		softc->flags &= ~ADA_FLAG_PACK_INVALID;
399	}
400
401	cam_periph_unhold(periph);
402	cam_periph_unlock(periph);
403	return (0);
404}
405
406static int
407adaclose(struct disk *dp)
408{
409	struct	cam_periph *periph;
410	struct	ada_softc *softc;
411	union ccb *ccb;
412	int error;
413
414	periph = (struct cam_periph *)dp->d_drv1;
415	if (periph == NULL)
416		return (ENXIO);
417
418	cam_periph_lock(periph);
419	if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
420		cam_periph_unlock(periph);
421		cam_periph_release(periph);
422		return (error);
423	}
424
425	softc = (struct ada_softc *)periph->softc;
426	/* We only sync the cache if the drive is capable of it. */
427	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
428
429		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
430		cam_fill_ataio(&ccb->ataio,
431				    1,
432				    adadone,
433				    CAM_DIR_NONE,
434				    0,
435				    NULL,
436				    0,
437				    ada_default_timeout*1000);
438
439		if (softc->flags & ADA_FLAG_CAN_48BIT)
440			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
441		else
442			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
443		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
444		    /*sense_flags*/0, softc->disk->d_devstat);
445
446		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
447			xpt_print(periph->path, "Synchronize cache failed\n");
448
449		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
450			cam_release_devq(ccb->ccb_h.path,
451					 /*relsim_flags*/0,
452					 /*reduction*/0,
453					 /*timeout*/0,
454					 /*getcount_only*/0);
455		xpt_release_ccb(ccb);
456	}
457
458	softc->flags &= ~ADA_FLAG_OPEN;
459	cam_periph_unhold(periph);
460	cam_periph_unlock(periph);
461	cam_periph_release(periph);
462	return (0);
463}
464
465static void
466adaschedule(struct cam_periph *periph)
467{
468	struct ada_softc *softc = (struct ada_softc *)periph->softc;
469
470	if (bioq_first(&softc->bio_queue) ||
471	    (!softc->trim_running && bioq_first(&softc->trim_queue))) {
472		/* Have more work to do, so ensure we stay scheduled */
473		xpt_schedule(periph, CAM_PRIORITY_NORMAL);
474	}
475}
476
477/*
478 * Actually translate the requested transfer into one the physical driver
479 * can understand.  The transfer is described by a buf and will include
480 * only one physical transfer.
481 */
482static void
483adastrategy(struct bio *bp)
484{
485	struct cam_periph *periph;
486	struct ada_softc *softc;
487
488	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
489	if (periph == NULL) {
490		biofinish(bp, NULL, ENXIO);
491		return;
492	}
493	softc = (struct ada_softc *)periph->softc;
494
495	cam_periph_lock(periph);
496
497	/*
498	 * If the device has been made invalid, error out
499	 */
500	if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
501		cam_periph_unlock(periph);
502		biofinish(bp, NULL, ENXIO);
503		return;
504	}
505
506	/*
507	 * Place it in the queue of disk activities for this disk
508	 */
509	if (bp->bio_cmd == BIO_DELETE &&
510	    (softc->flags & ADA_FLAG_CAN_TRIM))
511		bioq_disksort(&softc->trim_queue, bp);
512	else
513		bioq_disksort(&softc->bio_queue, bp);
514
515	/*
516	 * Schedule ourselves for performing the work.
517	 */
518	adaschedule(periph);
519	cam_periph_unlock(periph);
520
521	return;
522}
523
524static int
525adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
526{
527	struct	    cam_periph *periph;
528	struct	    ada_softc *softc;
529	u_int	    secsize;
530	union	    ccb ccb;
531	struct	    disk *dp;
532	uint64_t    lba;
533	uint16_t    count;
534
535	dp = arg;
536	periph = dp->d_drv1;
537	if (periph == NULL)
538		return (ENXIO);
539	softc = (struct ada_softc *)periph->softc;
540	cam_periph_lock(periph);
541	secsize = softc->params.secsize;
542	lba = offset / secsize;
543	count = length / secsize;
544
545	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
546		cam_periph_unlock(periph);
547		return (ENXIO);
548	}
549
550	if (length > 0) {
551		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
552		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
553		cam_fill_ataio(&ccb.ataio,
554		    0,
555		    adadone,
556		    CAM_DIR_OUT,
557		    0,
558		    (u_int8_t *) virtual,
559		    length,
560		    ada_default_timeout*1000);
561		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
562		    (lba + count >= ATA_MAX_28BIT_LBA ||
563		    count >= 256)) {
564			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
565			    0, lba, count);
566		} else {
567			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
568			    0, lba, count);
569		}
570		xpt_polled_action(&ccb);
571
572		if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
573			printf("Aborting dump due to I/O error.\n");
574			cam_periph_unlock(periph);
575			return(EIO);
576		}
577		cam_periph_unlock(periph);
578		return(0);
579	}
580
581	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
582		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
583
584		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
585		cam_fill_ataio(&ccb.ataio,
586				    1,
587				    adadone,
588				    CAM_DIR_NONE,
589				    0,
590				    NULL,
591				    0,
592				    ada_default_timeout*1000);
593
594		if (softc->flags & ADA_FLAG_CAN_48BIT)
595			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
596		else
597			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
598		xpt_polled_action(&ccb);
599
600		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
601			xpt_print(periph->path, "Synchronize cache failed\n");
602
603		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
604			cam_release_devq(ccb.ccb_h.path,
605					 /*relsim_flags*/0,
606					 /*reduction*/0,
607					 /*timeout*/0,
608					 /*getcount_only*/0);
609	}
610	cam_periph_unlock(periph);
611	return (0);
612}
613
614static void
615adainit(void)
616{
617	cam_status status;
618
619	/*
620	 * Install a global async callback.  This callback will
621	 * receive async callbacks like "new device found".
622	 */
623	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
624
625	if (status != CAM_REQ_CMP) {
626		printf("ada: Failed to attach master async callback "
627		       "due to status 0x%x!\n", status);
628	} else if (ada_send_ordered) {
629
630		/* Register our event handlers */
631		if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend,
632					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
633		    printf("adainit: power event registration failed!\n");
634		if ((EVENTHANDLER_REGISTER(power_resume, adaresume,
635					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
636		    printf("adainit: power event registration failed!\n");
637		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
638					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
639		    printf("adainit: shutdown event registration failed!\n");
640	}
641}
642
643static void
644adaoninvalidate(struct cam_periph *periph)
645{
646	struct ada_softc *softc;
647
648	softc = (struct ada_softc *)periph->softc;
649
650	/*
651	 * De-register any async callbacks.
652	 */
653	xpt_register_async(0, adaasync, periph, periph->path);
654
655	softc->flags |= ADA_FLAG_PACK_INVALID;
656
657	/*
658	 * Return all queued I/O with ENXIO.
659	 * XXX Handle any transactions queued to the card
660	 *     with XPT_ABORT_CCB.
661	 */
662	bioq_flush(&softc->bio_queue, NULL, ENXIO);
663	bioq_flush(&softc->trim_queue, NULL, ENXIO);
664
665	disk_gone(softc->disk);
666	xpt_print(periph->path, "lost device\n");
667}
668
669static void
670adacleanup(struct cam_periph *periph)
671{
672	struct ada_softc *softc;
673
674	softc = (struct ada_softc *)periph->softc;
675
676	xpt_print(periph->path, "removing device entry\n");
677	cam_periph_unlock(periph);
678
679	/*
680	 * If we can't free the sysctl tree, oh well...
681	 */
682	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
683	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
684		xpt_print(periph->path, "can't remove sysctl context\n");
685	}
686
687	disk_destroy(softc->disk);
688	callout_drain(&softc->sendordered_c);
689	free(softc, M_DEVBUF);
690	cam_periph_lock(periph);
691}
692
693static void
694adaasync(void *callback_arg, u_int32_t code,
695	struct cam_path *path, void *arg)
696{
697	struct cam_periph *periph;
698	struct ada_softc *softc;
699
700	periph = (struct cam_periph *)callback_arg;
701	switch (code) {
702	case AC_FOUND_DEVICE:
703	{
704		struct ccb_getdev *cgd;
705		cam_status status;
706
707		cgd = (struct ccb_getdev *)arg;
708		if (cgd == NULL)
709			break;
710
711		if (cgd->protocol != PROTO_ATA)
712			break;
713
714		/*
715		 * Allocate a peripheral instance for
716		 * this device and start the probe
717		 * process.
718		 */
719		status = cam_periph_alloc(adaregister, adaoninvalidate,
720					  adacleanup, adastart,
721					  "ada", CAM_PERIPH_BIO,
722					  cgd->ccb_h.path, adaasync,
723					  AC_FOUND_DEVICE, cgd);
724
725		if (status != CAM_REQ_CMP
726		 && status != CAM_REQ_INPROG)
727			printf("adaasync: Unable to attach to new device "
728				"due to status 0x%x\n", status);
729		break;
730	}
731	case AC_SENT_BDR:
732	case AC_BUS_RESET:
733	{
734		struct ccb_getdev cgd;
735
736		softc = (struct ada_softc *)periph->softc;
737		cam_periph_async(periph, code, path, arg);
738		if (ada_write_cache < 0 && softc->write_cache < 0)
739			break;
740		if (softc->state != ADA_STATE_NORMAL)
741			break;
742		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
743		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
744		xpt_action((union ccb *)&cgd);
745		if ((cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) == 0)
746			break;
747		softc->state = ADA_STATE_WCACHE;
748		cam_periph_acquire(periph);
749		cam_freeze_devq_arg(periph->path,
750		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
751		xpt_schedule(periph, CAM_PRIORITY_DEV);
752	}
753	default:
754		cam_periph_async(periph, code, path, arg);
755		break;
756	}
757}
758
759static void
760adasysctlinit(void *context, int pending)
761{
762	struct cam_periph *periph;
763	struct ada_softc *softc;
764	char tmpstr[80], tmpstr2[80];
765
766	periph = (struct cam_periph *)context;
767
768	/* periph was held for us when this task was enqueued */
769	if (periph->flags & CAM_PERIPH_INVALID) {
770		cam_periph_release(periph);
771		return;
772	}
773
774	softc = (struct ada_softc *)periph->softc;
775	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
776	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
777
778	sysctl_ctx_init(&softc->sysctl_ctx);
779	softc->flags |= ADA_FLAG_SCTX_INIT;
780	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
781		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
782		CTLFLAG_RD, 0, tmpstr);
783	if (softc->sysctl_tree == NULL) {
784		printf("adasysctlinit: unable to allocate sysctl tree\n");
785		cam_periph_release(periph);
786		return;
787	}
788
789	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
790		OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE,
791		&softc->write_cache, 0, "Enable disk write cache.");
792#ifdef ADA_TEST_FAILURE
793	/*
794	 * Add a 'door bell' sysctl which allows one to set it from userland
795	 * and cause something bad to happen.  For the moment, we only allow
796	 * whacking the next read or write.
797	 */
798	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
799		OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
800		&softc->force_read_error, 0,
801		"Force a read error for the next N reads.");
802	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
803		OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
804		&softc->force_write_error, 0,
805		"Force a write error for the next N writes.");
806	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
807		OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
808		&softc->periodic_read_error, 0,
809		"Force a read error every N reads (don't set too low).");
810#endif
811	cam_periph_release(periph);
812}
813
814static cam_status
815adaregister(struct cam_periph *periph, void *arg)
816{
817	struct ada_softc *softc;
818	struct ccb_pathinq cpi;
819	struct ccb_getdev *cgd;
820	char   announce_buf[80], buf1[32];
821	struct disk_params *dp;
822	caddr_t match;
823	u_int maxio;
824	int legacy_id, quirks;
825
826	cgd = (struct ccb_getdev *)arg;
827	if (periph == NULL) {
828		printf("adaregister: periph was NULL!!\n");
829		return(CAM_REQ_CMP_ERR);
830	}
831
832	if (cgd == NULL) {
833		printf("adaregister: no getdev CCB, can't register device\n");
834		return(CAM_REQ_CMP_ERR);
835	}
836
837	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
838	    M_NOWAIT|M_ZERO);
839
840	if (softc == NULL) {
841		printf("adaregister: Unable to probe new device. "
842		    "Unable to allocate softc\n");
843		return(CAM_REQ_CMP_ERR);
844	}
845
846	bioq_init(&softc->bio_queue);
847	bioq_init(&softc->trim_queue);
848
849	if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA &&
850	    (cgd->inq_flags & SID_DMA))
851		softc->flags |= ADA_FLAG_CAN_DMA;
852	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
853		softc->flags |= ADA_FLAG_CAN_48BIT;
854	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
855		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
856	if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
857		softc->flags |= ADA_FLAG_CAN_POWERMGT;
858	if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
859	    (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
860		softc->flags |= ADA_FLAG_CAN_NCQ;
861	if (cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) {
862		softc->flags |= ADA_FLAG_CAN_TRIM;
863		softc->trim_max_ranges = TRIM_MAX_RANGES;
864		if (cgd->ident_data.max_dsm_blocks != 0) {
865			softc->trim_max_ranges =
866			    min(cgd->ident_data.max_dsm_blocks * 64,
867				softc->trim_max_ranges);
868		}
869	}
870	if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
871		softc->flags |= ADA_FLAG_CAN_CFA;
872
873	periph->softc = softc;
874
875	/*
876	 * See if this device has any quirks.
877	 */
878	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
879			       (caddr_t)ada_quirk_table,
880			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
881			       sizeof(*ada_quirk_table), ata_identify_match);
882	if (match != NULL)
883		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
884	else
885		softc->quirks = ADA_Q_NONE;
886
887	bzero(&cpi, sizeof(cpi));
888	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
889	cpi.ccb_h.func_code = XPT_PATH_INQ;
890	xpt_action((union ccb *)&cpi);
891
892	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
893
894	/*
895	 * Register this media as a disk
896	 */
897	(void)cam_periph_hold(periph, PRIBIO);
898	mtx_unlock(periph->sim->mtx);
899	snprintf(announce_buf, sizeof(announce_buf),
900	    "kern.cam.ada.%d.quirks", periph->unit_number);
901	quirks = softc->quirks;
902	TUNABLE_INT_FETCH(announce_buf, &quirks);
903	softc->quirks = quirks;
904	softc->write_cache = -1;
905	snprintf(announce_buf, sizeof(announce_buf),
906	    "kern.cam.ada.%d.write_cache", periph->unit_number);
907	TUNABLE_INT_FETCH(announce_buf, &softc->write_cache);
908	adagetparams(periph, cgd);
909	softc->disk = disk_alloc();
910	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
911			  periph->unit_number, softc->params.secsize,
912			  DEVSTAT_ALL_SUPPORTED,
913			  DEVSTAT_TYPE_DIRECT |
914			  XPORT_DEVSTAT_TYPE(cpi.transport),
915			  DEVSTAT_PRIORITY_DISK);
916	softc->disk->d_open = adaopen;
917	softc->disk->d_close = adaclose;
918	softc->disk->d_strategy = adastrategy;
919	softc->disk->d_dump = adadump;
920	softc->disk->d_name = "ada";
921	softc->disk->d_drv1 = periph;
922	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
923	if (maxio == 0)
924		maxio = DFLTPHYS;	/* traditional default */
925	else if (maxio > MAXPHYS)
926		maxio = MAXPHYS;	/* for safety */
927	if (softc->flags & ADA_FLAG_CAN_48BIT)
928		maxio = min(maxio, 65536 * softc->params.secsize);
929	else					/* 28bit ATA command limit */
930		maxio = min(maxio, 256 * softc->params.secsize);
931	softc->disk->d_maxsize = maxio;
932	softc->disk->d_unit = periph->unit_number;
933	softc->disk->d_flags = 0;
934	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
935		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
936	if ((softc->flags & ADA_FLAG_CAN_TRIM) ||
937	    ((softc->flags & ADA_FLAG_CAN_CFA) &&
938	    !(softc->flags & ADA_FLAG_CAN_48BIT)))
939		softc->disk->d_flags |= DISKFLAG_CANDELETE;
940	strlcpy(softc->disk->d_ident, cgd->serial_num,
941	    MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1));
942	strlcpy(softc->disk->d_descr, cgd->ident_data.model,
943	    MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
944	softc->disk->d_hba_vendor = cpi.hba_vendor;
945	softc->disk->d_hba_device = cpi.hba_device;
946	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
947	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
948
949	softc->disk->d_sectorsize = softc->params.secsize;
950	softc->disk->d_mediasize = (off_t)softc->params.sectors *
951	    softc->params.secsize;
952	if (ata_physical_sector_size(&cgd->ident_data) !=
953	    softc->params.secsize) {
954		softc->disk->d_stripesize =
955		    ata_physical_sector_size(&cgd->ident_data);
956		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
957		    ata_logical_sector_offset(&cgd->ident_data)) %
958		    softc->disk->d_stripesize;
959	} else if (softc->quirks & ADA_Q_4K) {
960		softc->disk->d_stripesize = 4096;
961		softc->disk->d_stripeoffset = 0;
962	}
963	softc->disk->d_fwsectors = softc->params.secs_per_track;
964	softc->disk->d_fwheads = softc->params.heads;
965	ata_disk_firmware_geom_adjust(softc->disk);
966
967	if (ada_legacy_aliases) {
968#ifdef ATA_STATIC_ID
969		legacy_id = xpt_path_legacy_ata_id(periph->path);
970#else
971		legacy_id = softc->disk->d_unit;
972#endif
973		if (legacy_id >= 0) {
974			snprintf(announce_buf, sizeof(announce_buf),
975			    "kern.devalias.%s%d",
976			    softc->disk->d_name, softc->disk->d_unit);
977			snprintf(buf1, sizeof(buf1),
978			    "ad%d", legacy_id);
979			setenv(announce_buf, buf1);
980		}
981	} else
982		legacy_id = -1;
983	disk_create(softc->disk, DISK_VERSION);
984	mtx_lock(periph->sim->mtx);
985	cam_periph_unhold(periph);
986
987	dp = &softc->params;
988	snprintf(announce_buf, sizeof(announce_buf),
989		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
990		(uintmax_t)(((uintmax_t)dp->secsize *
991		dp->sectors) / (1024*1024)),
992		(uintmax_t)dp->sectors,
993		dp->secsize, dp->heads,
994		dp->secs_per_track, dp->cylinders);
995	xpt_announce_periph(periph, announce_buf);
996	if (legacy_id >= 0)
997		printf("%s%d: Previously was known as ad%d\n",
998		       periph->periph_name, periph->unit_number, legacy_id);
999
1000	/*
1001	 * Create our sysctl variables, now that we know
1002	 * we have successfully attached.
1003	 */
1004	cam_periph_acquire(periph);
1005	taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
1006
1007	/*
1008	 * Add async callbacks for bus reset and
1009	 * bus device reset calls.  I don't bother
1010	 * checking if this fails as, in most cases,
1011	 * the system will function just fine without
1012	 * them and the only alternative would be to
1013	 * not attach the device on failure.
1014	 */
1015	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
1016			   adaasync, periph, periph->path);
1017
1018	/*
1019	 * Schedule a periodic event to occasionally send an
1020	 * ordered tag to a device.
1021	 */
1022	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
1023	callout_reset(&softc->sendordered_c,
1024	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1025	    adasendorderedtag, softc);
1026
1027	if ((ada_write_cache >= 0 || softc->write_cache >= 0) &&
1028	    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
1029		softc->state = ADA_STATE_WCACHE;
1030		cam_periph_acquire(periph);
1031		cam_freeze_devq_arg(periph->path,
1032		    RELSIM_RELEASE_RUNLEVEL, CAM_RL_DEV + 1);
1033		xpt_schedule(periph, CAM_PRIORITY_DEV);
1034	} else
1035		softc->state = ADA_STATE_NORMAL;
1036
1037	return(CAM_REQ_CMP);
1038}
1039
1040static void
1041adastart(struct cam_periph *periph, union ccb *start_ccb)
1042{
1043	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1044	struct ccb_ataio *ataio = &start_ccb->ataio;
1045
1046	switch (softc->state) {
1047	case ADA_STATE_NORMAL:
1048	{
1049		struct bio *bp;
1050		u_int8_t tag_code;
1051
1052		/* Execute immediate CCB if waiting. */
1053		if (periph->immediate_priority <= periph->pinfo.priority) {
1054			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
1055					("queuing for immediate ccb\n"));
1056			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
1057			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1058					  periph_links.sle);
1059			periph->immediate_priority = CAM_PRIORITY_NONE;
1060			wakeup(&periph->ccb_list);
1061			/* Have more work to do, so ensure we stay scheduled */
1062			adaschedule(periph);
1063			break;
1064		}
1065		/* Run TRIM if not running yet. */
1066		if (!softc->trim_running &&
1067		    (bp = bioq_first(&softc->trim_queue)) != 0) {
1068			struct trim_request *req = &softc->trim_req;
1069			struct bio *bp1;
1070			int bps = 0, ranges = 0;
1071
1072			softc->trim_running = 1;
1073			bzero(req, sizeof(*req));
1074			bp1 = bp;
1075			do {
1076				uint64_t lba = bp1->bio_pblkno;
1077				int count = bp1->bio_bcount /
1078				    softc->params.secsize;
1079
1080				bioq_remove(&softc->trim_queue, bp1);
1081				while (count > 0) {
1082					int c = min(count, 0xffff);
1083					int off = ranges * 8;
1084
1085					req->data[off + 0] = lba & 0xff;
1086					req->data[off + 1] = (lba >> 8) & 0xff;
1087					req->data[off + 2] = (lba >> 16) & 0xff;
1088					req->data[off + 3] = (lba >> 24) & 0xff;
1089					req->data[off + 4] = (lba >> 32) & 0xff;
1090					req->data[off + 5] = (lba >> 40) & 0xff;
1091					req->data[off + 6] = c & 0xff;
1092					req->data[off + 7] = (c >> 8) & 0xff;
1093					lba += c;
1094					count -= c;
1095					ranges++;
1096				}
1097				req->bps[bps++] = bp1;
1098				bp1 = bioq_first(&softc->trim_queue);
1099				if (bp1 == NULL ||
1100				    bp1->bio_bcount / softc->params.secsize >
1101				    (softc->trim_max_ranges - ranges) * 0xffff)
1102					break;
1103			} while (1);
1104			cam_fill_ataio(ataio,
1105			    ada_retry_count,
1106			    adadone,
1107			    CAM_DIR_OUT,
1108			    0,
1109			    req->data,
1110			    ((ranges + 63) / 64) * 512,
1111			    ada_default_timeout * 1000);
1112			ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
1113			    ATA_DSM_TRIM, 0, (ranges + 63) / 64);
1114			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
1115			goto out;
1116		}
1117		/* Run regular command. */
1118		bp = bioq_first(&softc->bio_queue);
1119		if (bp == NULL) {
1120			xpt_release_ccb(start_ccb);
1121			break;
1122		}
1123		bioq_remove(&softc->bio_queue, bp);
1124
1125		if ((bp->bio_flags & BIO_ORDERED) != 0
1126		 || (softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
1127			softc->flags &= ~ADA_FLAG_NEED_OTAG;
1128			softc->ordered_tag_count++;
1129			tag_code = 0;
1130		} else {
1131			tag_code = 1;
1132		}
1133		switch (bp->bio_cmd) {
1134		case BIO_READ:
1135		case BIO_WRITE:
1136		{
1137			uint64_t lba = bp->bio_pblkno;
1138			uint16_t count = bp->bio_bcount / softc->params.secsize;
1139#ifdef ADA_TEST_FAILURE
1140			int fail = 0;
1141
1142			/*
1143			 * Support the failure ioctls.  If the command is a
1144			 * read, and there are pending forced read errors, or
1145			 * if a write and pending write errors, then fail this
1146			 * operation with EIO.  This is useful for testing
1147			 * purposes.  Also, support having every Nth read fail.
1148			 *
1149			 * This is a rather blunt tool.
1150			 */
1151			if (bp->bio_cmd == BIO_READ) {
1152				if (softc->force_read_error) {
1153					softc->force_read_error--;
1154					fail = 1;
1155				}
1156				if (softc->periodic_read_error > 0) {
1157					if (++softc->periodic_read_count >=
1158					    softc->periodic_read_error) {
1159						softc->periodic_read_count = 0;
1160						fail = 1;
1161					}
1162				}
1163			} else {
1164				if (softc->force_write_error) {
1165					softc->force_write_error--;
1166					fail = 1;
1167				}
1168			}
1169			if (fail) {
1170				bp->bio_error = EIO;
1171				bp->bio_flags |= BIO_ERROR;
1172				biodone(bp);
1173				xpt_release_ccb(start_ccb);
1174				adaschedule(periph);
1175				return;
1176			}
1177#endif
1178			cam_fill_ataio(ataio,
1179			    ada_retry_count,
1180			    adadone,
1181			    bp->bio_cmd == BIO_READ ?
1182			        CAM_DIR_IN : CAM_DIR_OUT,
1183			    tag_code,
1184			    bp->bio_data,
1185			    bp->bio_bcount,
1186			    ada_default_timeout*1000);
1187
1188			if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
1189				if (bp->bio_cmd == BIO_READ) {
1190					ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
1191					    lba, count);
1192				} else {
1193					ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
1194					    lba, count);
1195				}
1196			} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
1197			    (lba + count >= ATA_MAX_28BIT_LBA ||
1198			    count > 256)) {
1199				if (softc->flags & ADA_FLAG_CAN_DMA) {
1200					if (bp->bio_cmd == BIO_READ) {
1201						ata_48bit_cmd(ataio, ATA_READ_DMA48,
1202						    0, lba, count);
1203					} else {
1204						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
1205						    0, lba, count);
1206					}
1207				} else {
1208					if (bp->bio_cmd == BIO_READ) {
1209						ata_48bit_cmd(ataio, ATA_READ_MUL48,
1210						    0, lba, count);
1211					} else {
1212						ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
1213						    0, lba, count);
1214					}
1215				}
1216			} else {
1217				if (count == 256)
1218					count = 0;
1219				if (softc->flags & ADA_FLAG_CAN_DMA) {
1220					if (bp->bio_cmd == BIO_READ) {
1221						ata_28bit_cmd(ataio, ATA_READ_DMA,
1222						    0, lba, count);
1223					} else {
1224						ata_28bit_cmd(ataio, ATA_WRITE_DMA,
1225						    0, lba, count);
1226					}
1227				} else {
1228					if (bp->bio_cmd == BIO_READ) {
1229						ata_28bit_cmd(ataio, ATA_READ_MUL,
1230						    0, lba, count);
1231					} else {
1232						ata_28bit_cmd(ataio, ATA_WRITE_MUL,
1233						    0, lba, count);
1234					}
1235				}
1236			}
1237			break;
1238		}
1239		case BIO_DELETE:
1240		{
1241			uint64_t lba = bp->bio_pblkno;
1242			uint16_t count = bp->bio_bcount / softc->params.secsize;
1243
1244			cam_fill_ataio(ataio,
1245			    ada_retry_count,
1246			    adadone,
1247			    CAM_DIR_NONE,
1248			    0,
1249			    NULL,
1250			    0,
1251			    ada_default_timeout*1000);
1252
1253			if (count >= 256)
1254				count = 0;
1255			ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
1256			break;
1257		}
1258		case BIO_FLUSH:
1259			cam_fill_ataio(ataio,
1260			    1,
1261			    adadone,
1262			    CAM_DIR_NONE,
1263			    0,
1264			    NULL,
1265			    0,
1266			    ada_default_timeout*1000);
1267
1268			if (softc->flags & ADA_FLAG_CAN_48BIT)
1269				ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1270			else
1271				ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
1272			break;
1273		}
1274		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
1275out:
1276		start_ccb->ccb_h.ccb_bp = bp;
1277		softc->outstanding_cmds++;
1278		xpt_action(start_ccb);
1279
1280		/* May have more work to do, so ensure we stay scheduled */
1281		adaschedule(periph);
1282		break;
1283	}
1284	case ADA_STATE_WCACHE:
1285	{
1286		cam_fill_ataio(ataio,
1287		    1,
1288		    adadone,
1289		    CAM_DIR_NONE,
1290		    0,
1291		    NULL,
1292		    0,
1293		    ada_default_timeout*1000);
1294
1295		ata_28bit_cmd(ataio, ATA_SETFEATURES, (softc->write_cache > 0 ||
1296		     (softc->write_cache < 0 && ada_write_cache)) ?
1297		    ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0);
1298		start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE;
1299		xpt_action(start_ccb);
1300		break;
1301	}
1302	}
1303}
1304
1305static void
1306adadone(struct cam_periph *periph, union ccb *done_ccb)
1307{
1308	struct ada_softc *softc;
1309	struct ccb_ataio *ataio;
1310
1311	softc = (struct ada_softc *)periph->softc;
1312	ataio = &done_ccb->ataio;
1313	switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
1314	case ADA_CCB_BUFFER_IO:
1315	case ADA_CCB_TRIM:
1316	{
1317		struct bio *bp;
1318
1319		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1320		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1321			int error;
1322
1323			error = adaerror(done_ccb, 0, 0);
1324			if (error == ERESTART) {
1325				/* A retry was scheduled, so just return. */
1326				return;
1327			}
1328			if (error != 0) {
1329				if (error == ENXIO) {
1330					/*
1331					 * Catastrophic error.  Mark our pack as
1332					 * invalid.
1333					 */
1334					/*
1335					 * XXX See if this is really a media
1336					 * XXX change first?
1337					 */
1338					xpt_print(periph->path,
1339					    "Invalidating pack\n");
1340					softc->flags |= ADA_FLAG_PACK_INVALID;
1341				}
1342				bp->bio_error = error;
1343				bp->bio_resid = bp->bio_bcount;
1344				bp->bio_flags |= BIO_ERROR;
1345			} else {
1346				bp->bio_resid = ataio->resid;
1347				bp->bio_error = 0;
1348				if (bp->bio_resid != 0)
1349					bp->bio_flags |= BIO_ERROR;
1350			}
1351			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1352				cam_release_devq(done_ccb->ccb_h.path,
1353						 /*relsim_flags*/0,
1354						 /*reduction*/0,
1355						 /*timeout*/0,
1356						 /*getcount_only*/0);
1357		} else {
1358			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1359				panic("REQ_CMP with QFRZN");
1360			bp->bio_resid = ataio->resid;
1361			if (ataio->resid > 0)
1362				bp->bio_flags |= BIO_ERROR;
1363		}
1364		softc->outstanding_cmds--;
1365		if (softc->outstanding_cmds == 0)
1366			softc->flags |= ADA_FLAG_WENT_IDLE;
1367		if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) ==
1368		    ADA_CCB_TRIM) {
1369			struct trim_request *req =
1370			    (struct trim_request *)ataio->data_ptr;
1371			int i;
1372
1373			for (i = 1; i < softc->trim_max_ranges &&
1374			    req->bps[i]; i++) {
1375				struct bio *bp1 = req->bps[i];
1376
1377				bp1->bio_resid = bp->bio_resid;
1378				bp1->bio_error = bp->bio_error;
1379				if (bp->bio_flags & BIO_ERROR)
1380					bp1->bio_flags |= BIO_ERROR;
1381				biodone(bp1);
1382			}
1383			softc->trim_running = 0;
1384			biodone(bp);
1385			adaschedule(periph);
1386		} else
1387			biodone(bp);
1388		break;
1389	}
1390	case ADA_CCB_WCACHE:
1391	{
1392		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1393			if (adaerror(done_ccb, 0, 0) == ERESTART) {
1394				return;
1395			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1396				cam_release_devq(done_ccb->ccb_h.path,
1397				    /*relsim_flags*/0,
1398				    /*reduction*/0,
1399				    /*timeout*/0,
1400				    /*getcount_only*/0);
1401			}
1402		}
1403
1404		softc->state = ADA_STATE_NORMAL;
1405		/*
1406		 * Since our peripheral may be invalidated by an error
1407		 * above or an external event, we must release our CCB
1408		 * before releasing the reference on the peripheral.
1409		 * The peripheral will only go away once the last reference
1410		 * is removed, and we need it around for the CCB release
1411		 * operation.
1412		 */
1413		xpt_release_ccb(done_ccb);
1414		cam_release_devq(periph->path,
1415		    RELSIM_RELEASE_RUNLEVEL, 0, CAM_RL_DEV + 1, FALSE);
1416		adaschedule(periph);
1417		cam_periph_release_locked(periph);
1418		return;
1419	}
1420	case ADA_CCB_WAITING:
1421	{
1422		/* Caller will release the CCB */
1423		wakeup(&done_ccb->ccb_h.cbfcnp);
1424		return;
1425	}
1426	case ADA_CCB_DUMP:
1427		/* No-op.  We're polling */
1428		return;
1429	default:
1430		break;
1431	}
1432	xpt_release_ccb(done_ccb);
1433}
1434
1435static int
1436adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1437{
1438
1439	return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
1440}
1441
1442static void
1443adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
1444{
1445	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1446	struct disk_params *dp = &softc->params;
1447	u_int64_t lbasize48;
1448	u_int32_t lbasize;
1449
1450	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
1451	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1452		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1453		dp->heads = cgd->ident_data.current_heads;
1454		dp->secs_per_track = cgd->ident_data.current_sectors;
1455		dp->cylinders = cgd->ident_data.cylinders;
1456		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1457			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1458	} else {
1459		dp->heads = cgd->ident_data.heads;
1460		dp->secs_per_track = cgd->ident_data.sectors;
1461		dp->cylinders = cgd->ident_data.cylinders;
1462		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1463	}
1464	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1465		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1466
1467	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
1468	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1469		dp->sectors = lbasize;
1470
1471	/* use the 48bit LBA size if valid */
1472	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1473		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1474		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1475		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1476	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1477	    lbasize48 > ATA_MAX_28BIT_LBA)
1478		dp->sectors = lbasize48;
1479}
1480
1481static void
1482adasendorderedtag(void *arg)
1483{
1484	struct ada_softc *softc = arg;
1485
1486	if (ada_send_ordered) {
1487		if ((softc->ordered_tag_count == 0)
1488		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1489			softc->flags |= ADA_FLAG_NEED_OTAG;
1490		}
1491		if (softc->outstanding_cmds > 0)
1492			softc->flags &= ~ADA_FLAG_WENT_IDLE;
1493
1494		softc->ordered_tag_count = 0;
1495	}
1496	/* Queue us up again */
1497	callout_reset(&softc->sendordered_c,
1498	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1499	    adasendorderedtag, softc);
1500}
1501
1502/*
1503 * Step through all ADA peripheral drivers, and if the device is still open,
1504 * sync the disk cache to physical media.
1505 */
1506static void
1507adaflush(void)
1508{
1509	struct cam_periph *periph;
1510	struct ada_softc *softc;
1511
1512	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1513		union ccb ccb;
1514
1515		/* If we paniced with lock held - not recurse here. */
1516		if (cam_periph_owned(periph))
1517			continue;
1518		cam_periph_lock(periph);
1519		softc = (struct ada_softc *)periph->softc;
1520		/*
1521		 * We only sync the cache if the drive is still open, and
1522		 * if the drive is capable of it..
1523		 */
1524		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1525		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1526			cam_periph_unlock(periph);
1527			continue;
1528		}
1529
1530		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1531
1532		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1533		cam_fill_ataio(&ccb.ataio,
1534				    1,
1535				    adadone,
1536				    CAM_DIR_NONE,
1537				    0,
1538				    NULL,
1539				    0,
1540				    ada_default_timeout*1000);
1541
1542		if (softc->flags & ADA_FLAG_CAN_48BIT)
1543			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1544		else
1545			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
1546		xpt_polled_action(&ccb);
1547
1548		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1549			xpt_print(periph->path, "Synchronize cache failed\n");
1550
1551		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1552			cam_release_devq(ccb.ccb_h.path,
1553					 /*relsim_flags*/0,
1554					 /*reduction*/0,
1555					 /*timeout*/0,
1556					 /*getcount_only*/0);
1557		cam_periph_unlock(periph);
1558	}
1559}
1560
1561static void
1562adaspindown(uint8_t cmd, int flags)
1563{
1564	struct cam_periph *periph;
1565	struct ada_softc *softc;
1566
1567	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1568		union ccb ccb;
1569
1570		/* If we paniced with lock held - not recurse here. */
1571		if (cam_periph_owned(periph))
1572			continue;
1573		cam_periph_lock(periph);
1574		softc = (struct ada_softc *)periph->softc;
1575		/*
1576		 * We only spin-down the drive if it is capable of it..
1577		 */
1578		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1579			cam_periph_unlock(periph);
1580			continue;
1581		}
1582
1583		if (bootverbose)
1584			xpt_print(periph->path, "spin-down\n");
1585
1586		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1587
1588		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1589		cam_fill_ataio(&ccb.ataio,
1590				    1,
1591				    adadone,
1592				    CAM_DIR_NONE | flags,
1593				    0,
1594				    NULL,
1595				    0,
1596				    ada_default_timeout*1000);
1597
1598		ata_28bit_cmd(&ccb.ataio, cmd, 0, 0, 0);
1599		xpt_polled_action(&ccb);
1600
1601		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1602			xpt_print(periph->path, "Spin-down disk failed\n");
1603
1604		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1605			cam_release_devq(ccb.ccb_h.path,
1606					 /*relsim_flags*/0,
1607					 /*reduction*/0,
1608					 /*timeout*/0,
1609					 /*getcount_only*/0);
1610		cam_periph_unlock(periph);
1611	}
1612}
1613
1614static void
1615adashutdown(void *arg, int howto)
1616{
1617
1618	adaflush();
1619	if (ada_spindown_shutdown != 0 &&
1620	    (howto & (RB_HALT | RB_POWEROFF)) != 0)
1621		adaspindown(ATA_STANDBY_IMMEDIATE, 0);
1622}
1623
1624static void
1625adasuspend(void *arg)
1626{
1627
1628	adaflush();
1629	if (ada_spindown_suspend != 0)
1630		adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE);
1631}
1632
1633static void
1634adaresume(void *arg)
1635{
1636	struct cam_periph *periph;
1637	struct ada_softc *softc;
1638
1639	if (ada_spindown_suspend == 0)
1640		return;
1641
1642	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1643		cam_periph_lock(periph);
1644		softc = (struct ada_softc *)periph->softc;
1645		/*
1646		 * We only spin-down the drive if it is capable of it..
1647		 */
1648		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1649			cam_periph_unlock(periph);
1650			continue;
1651		}
1652
1653		if (bootverbose)
1654			xpt_print(periph->path, "resume\n");
1655
1656		/*
1657		 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on
1658		 * sleep request.
1659		 */
1660		cam_release_devq(periph->path,
1661			 /*relsim_flags*/0,
1662			 /*openings*/0,
1663			 /*timeout*/0,
1664			 /*getcount_only*/0);
1665
1666		cam_periph_unlock(periph);
1667	}
1668}
1669
1670#endif /* _KERNEL */
1671