ata_da.c revision 250033
1/*-
2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/cam/ata/ata_da.c 250033 2013-04-28 21:14:23Z smh $");
29
30#include "opt_ada.h"
31
32#include <sys/param.h>
33
34#ifdef _KERNEL
35#include <sys/systm.h>
36#include <sys/kernel.h>
37#include <sys/bio.h>
38#include <sys/sysctl.h>
39#include <sys/taskqueue.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/conf.h>
43#include <sys/devicestat.h>
44#include <sys/eventhandler.h>
45#include <sys/malloc.h>
46#include <sys/cons.h>
47#include <sys/reboot.h>
48#include <geom/geom_disk.h>
49#endif /* _KERNEL */
50
51#ifndef _KERNEL
52#include <stdio.h>
53#include <string.h>
54#endif /* _KERNEL */
55
56#include <cam/cam.h>
57#include <cam/cam_ccb.h>
58#include <cam/cam_periph.h>
59#include <cam/cam_xpt_periph.h>
60#include <cam/cam_sim.h>
61
62#include <cam/ata/ata_all.h>
63
64#include <machine/md_var.h>	/* geometry translation */
65
66#ifdef _KERNEL
67
68#define ATA_MAX_28BIT_LBA               268435455UL
69
70typedef enum {
71	ADA_STATE_RAHEAD,
72	ADA_STATE_WCACHE,
73	ADA_STATE_NORMAL
74} ada_state;
75
76typedef enum {
77	ADA_FLAG_CAN_48BIT	= 0x0002,
78	ADA_FLAG_CAN_FLUSHCACHE	= 0x0004,
79	ADA_FLAG_CAN_NCQ	= 0x0008,
80	ADA_FLAG_CAN_DMA	= 0x0010,
81	ADA_FLAG_NEED_OTAG	= 0x0020,
82	ADA_FLAG_WENT_IDLE	= 0x0040,
83	ADA_FLAG_CAN_TRIM	= 0x0080,
84	ADA_FLAG_OPEN		= 0x0100,
85	ADA_FLAG_SCTX_INIT	= 0x0200,
86	ADA_FLAG_CAN_CFA        = 0x0400,
87	ADA_FLAG_CAN_POWERMGT   = 0x0800,
88	ADA_FLAG_CAN_DMA48	= 0x1000
89} ada_flags;
90
91typedef enum {
92	ADA_Q_NONE		= 0x00,
93	ADA_Q_4K		= 0x01,
94} ada_quirks;
95
96typedef enum {
97	ADA_CCB_RAHEAD		= 0x01,
98	ADA_CCB_WCACHE		= 0x02,
99	ADA_CCB_BUFFER_IO	= 0x03,
100	ADA_CCB_WAITING		= 0x04,
101	ADA_CCB_DUMP		= 0x05,
102	ADA_CCB_TRIM		= 0x06,
103	ADA_CCB_TYPE_MASK	= 0x0F,
104} ada_ccb_state;
105
106/* Offsets into our private area for storing information */
107#define ccb_state	ppriv_field0
108#define ccb_bp		ppriv_ptr1
109
110struct disk_params {
111	u_int8_t  heads;
112	u_int8_t  secs_per_track;
113	u_int32_t cylinders;
114	u_int32_t secsize;	/* Number of bytes/logical sector */
115	u_int64_t sectors;	/* Total number sectors */
116};
117
118#define TRIM_MAX_BLOCKS	8
119#define TRIM_MAX_RANGES	(TRIM_MAX_BLOCKS * ATA_DSM_BLK_RANGES)
120#define TRIM_MAX_BIOS	(TRIM_MAX_RANGES * 4)
121struct trim_request {
122	uint8_t		data[TRIM_MAX_RANGES * ATA_DSM_RANGE_SIZE];
123	struct bio	*bps[TRIM_MAX_BIOS];
124};
125
126struct ada_softc {
127	struct	 bio_queue_head bio_queue;
128	struct	 bio_queue_head trim_queue;
129	ada_state state;
130	ada_flags flags;
131	ada_quirks quirks;
132	int	 sort_io_queue;
133	int	 ordered_tag_count;
134	int	 outstanding_cmds;
135	int	 trim_max_ranges;
136	int	 trim_running;
137	int	 read_ahead;
138	int	 write_cache;
139#ifdef ADA_TEST_FAILURE
140	int      force_read_error;
141	int      force_write_error;
142	int      periodic_read_error;
143	int      periodic_read_count;
144#endif
145	struct	 disk_params params;
146	struct	 disk *disk;
147	struct task		sysctl_task;
148	struct sysctl_ctx_list	sysctl_ctx;
149	struct sysctl_oid	*sysctl_tree;
150	struct callout		sendordered_c;
151	struct trim_request	trim_req;
152};
153
154struct ada_quirk_entry {
155	struct scsi_inquiry_pattern inq_pat;
156	ada_quirks quirks;
157};
158
159static struct ada_quirk_entry ada_quirk_table[] =
160{
161	{
162		/* Hitachi Advanced Format (4k) drives */
163		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Hitachi H??????????E3*", "*" },
164		/*quirks*/ADA_Q_4K
165	},
166	{
167		/* Samsung Advanced Format (4k) drives */
168		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD155UI*", "*" },
169		/*quirks*/ADA_Q_4K
170	},
171	{
172		/* Samsung Advanced Format (4k) drives */
173		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SAMSUNG HD204UI*", "*" },
174		/*quirks*/ADA_Q_4K
175	},
176	{
177		/* Seagate Barracuda Green Advanced Format (4k) drives */
178		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DL*", "*" },
179		/*quirks*/ADA_Q_4K
180	},
181	{
182		/* Seagate Barracuda Advanced Format (4k) drives */
183		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???DM*", "*" },
184		/*quirks*/ADA_Q_4K
185	},
186	{
187		/* Seagate Barracuda Advanced Format (4k) drives */
188		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST????DM*", "*" },
189		/*quirks*/ADA_Q_4K
190	},
191	{
192		/* Seagate Momentus Advanced Format (4k) drives */
193		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500423AS*", "*" },
194		/*quirks*/ADA_Q_4K
195	},
196	{
197		/* Seagate Momentus Advanced Format (4k) drives */
198		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9500424AS*", "*" },
199		/*quirks*/ADA_Q_4K
200	},
201	{
202		/* Seagate Momentus Advanced Format (4k) drives */
203		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640423AS*", "*" },
204		/*quirks*/ADA_Q_4K
205	},
206	{
207		/* Seagate Momentus Advanced Format (4k) drives */
208		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9640424AS*", "*" },
209		/*quirks*/ADA_Q_4K
210	},
211	{
212		/* Seagate Momentus Advanced Format (4k) drives */
213		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750420AS*", "*" },
214		/*quirks*/ADA_Q_4K
215	},
216	{
217		/* Seagate Momentus Advanced Format (4k) drives */
218		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750422AS*", "*" },
219		/*quirks*/ADA_Q_4K
220	},
221	{
222		/* Seagate Momentus Advanced Format (4k) drives */
223		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST9750423AS*", "*" },
224		/*quirks*/ADA_Q_4K
225	},
226	{
227		/* Seagate Momentus Thin Advanced Format (4k) drives */
228		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "ST???LT*", "*" },
229		/*quirks*/ADA_Q_4K
230	},
231	{
232		/* WDC Caviar Green Advanced Format (4k) drives */
233		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RS*", "*" },
234		/*quirks*/ADA_Q_4K
235	},
236	{
237		/* WDC Caviar Green Advanced Format (4k) drives */
238		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD????RX*", "*" },
239		/*quirks*/ADA_Q_4K
240	},
241	{
242		/* WDC Caviar Green Advanced Format (4k) drives */
243		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RS*", "*" },
244		/*quirks*/ADA_Q_4K
245	},
246	{
247		/* WDC Caviar Green Advanced Format (4k) drives */
248		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD??????RX*", "*" },
249		/*quirks*/ADA_Q_4K
250	},
251	{
252		/* WDC Scorpio Black Advanced Format (4k) drives */
253		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PKT*", "*" },
254		/*quirks*/ADA_Q_4K
255	},
256	{
257		/* WDC Scorpio Black Advanced Format (4k) drives */
258		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PKT*", "*" },
259		/*quirks*/ADA_Q_4K
260	},
261	{
262		/* WDC Scorpio Blue Advanced Format (4k) drives */
263		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD???PVT*", "*" },
264		/*quirks*/ADA_Q_4K
265	},
266	{
267		/* WDC Scorpio Blue Advanced Format (4k) drives */
268		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "WDC WD?????PVT*", "*" },
269		/*quirks*/ADA_Q_4K
270	},
271	{
272		/*
273		 * Corsair Force 2 SSDs
274		 * 4k optimised & trim only works in 4k requests + 4k aligned
275		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
276		 * PR: 169974
277		 */
278		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair CSSD-F*", "*" },
279		/*quirks*/ADA_Q_4K
280	},
281	{
282		/*
283		 * Corsair Force 3 SSDs
284		 * 4k optimised & trim only works in 4k requests + 4k aligned
285		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
286		 * PR: 169974
287		 */
288		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "Corsair Force 3*", "*" },
289		/*quirks*/ADA_Q_4K
290	},
291	{
292		/*
293		 * OCZ Agility 3 SSDs
294		 * 4k optimised & trim only works in 4k requests + 4k aligned
295		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
296		 * PR: 169974
297		 */
298		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-AGILITY3*", "*" },
299		/*quirks*/ADA_Q_4K
300	},
301	{
302		/*
303		 * OCZ Vertex 2 SSDs (inc pro series)
304		 * 4k optimised & trim only works in 4k requests + 4k aligned
305		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
306		 * PR: 169974
307		 */
308		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ?VERTEX2*", "*" },
309		/*quirks*/ADA_Q_4K
310	},
311	{
312		/*
313		 * OCZ Vertex 3 SSDs
314		 * 4k optimised & trim only works in 4k requests + 4k aligned
315		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
316		 * PR: 169974
317		 */
318		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "OCZ-VERTEX3*", "*" },
319		/*quirks*/ADA_Q_4K
320	},
321	{
322		/*
323		 * SuperTalent TeraDrive CT SSDs
324		 * 4k optimised & trim only works in 4k requests + 4k aligned
325		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
326		 * PR: 169974
327		 */
328		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "FTM??CT25H*", "*" },
329		/*quirks*/ADA_Q_4K
330	},
331	{
332		/*
333		 * Crucial RealSSD C300 SSDs
334		 * 4k optimised
335		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
336		 * PR: 169974
337		 */
338		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "C300-CTFDDAC???MAG*",
339		"*" }, /*quirks*/ADA_Q_4K
340	},
341	{
342		/*
343		 * XceedIOPS SATA SSDs
344		 * 4k optimised
345		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
346		 * PR: 169974
347		 */
348		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "SG9XCS2D*", "*" },
349		/*quirks*/ADA_Q_4K
350	},
351	{
352		/*
353		 * Intel 330 Series SSDs
354		 * 4k optimised & trim only works in 4k requests + 4k aligned
355		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
356		 * PR: 169974
357		 */
358		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "INTEL SSDSC2ct*", "*" },
359		/*quirks*/ADA_Q_4K
360	},
361	{
362		/*
363		 * OCZ Deneva R Series SSDs
364		 * 4k optimised & trim only works in 4k requests + 4k aligned
365		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
366		 * PR: 169974
367		 */
368		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "DENRSTE251M45*", "*" },
369		/*quirks*/ADA_Q_4K
370	},
371	{
372		/*
373		 * Kingston HyperX 3k SSDs
374		 * 4k optimised & trim only works in 4k requests + 4k aligned
375		 * Submitted by: Steven Hartland <steven.hartland@multiplay.co.uk>
376		 * PR: 169974
377		 */
378		{ T_DIRECT, SIP_MEDIA_FIXED, "*", "KINGSTON SH103S3*", "*" },
379		/*quirks*/ADA_Q_4K
380	},
381	{
382		/* Default */
383		{
384		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
385		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
386		},
387		/*quirks*/0
388	},
389};
390
391static	disk_strategy_t	adastrategy;
392static	dumper_t	adadump;
393static	periph_init_t	adainit;
394static	void		adaasync(void *callback_arg, u_int32_t code,
395				struct cam_path *path, void *arg);
396static	void		adasysctlinit(void *context, int pending);
397static	periph_ctor_t	adaregister;
398static	periph_dtor_t	adacleanup;
399static	periph_start_t	adastart;
400static	periph_oninv_t	adaoninvalidate;
401static	void		adadone(struct cam_periph *periph,
402			       union ccb *done_ccb);
403static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
404				u_int32_t sense_flags);
405static void		adagetparams(struct cam_periph *periph,
406				struct ccb_getdev *cgd);
407static timeout_t	adasendorderedtag;
408static void		adashutdown(void *arg, int howto);
409static void		adasuspend(void *arg);
410static void		adaresume(void *arg);
411
412#ifndef	ADA_DEFAULT_LEGACY_ALIASES
413#define	ADA_DEFAULT_LEGACY_ALIASES	1
414#endif
415
416#ifndef ADA_DEFAULT_TIMEOUT
417#define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
418#endif
419
420#ifndef	ADA_DEFAULT_RETRY
421#define	ADA_DEFAULT_RETRY	4
422#endif
423
424#ifndef	ADA_DEFAULT_SEND_ORDERED
425#define	ADA_DEFAULT_SEND_ORDERED	1
426#endif
427
428#ifndef	ADA_DEFAULT_SPINDOWN_SHUTDOWN
429#define	ADA_DEFAULT_SPINDOWN_SHUTDOWN	1
430#endif
431
432#ifndef	ADA_DEFAULT_SPINDOWN_SUSPEND
433#define	ADA_DEFAULT_SPINDOWN_SUSPEND	1
434#endif
435
436#ifndef	ADA_DEFAULT_READ_AHEAD
437#define	ADA_DEFAULT_READ_AHEAD	1
438#endif
439
440#ifndef	ADA_DEFAULT_WRITE_CACHE
441#define	ADA_DEFAULT_WRITE_CACHE	1
442#endif
443
444#define	ADA_RA	(softc->read_ahead >= 0 ? \
445		 softc->read_ahead : ada_read_ahead)
446#define	ADA_WC	(softc->write_cache >= 0 ? \
447		 softc->write_cache : ada_write_cache)
448#define	ADA_SIO	(softc->sort_io_queue >= 0 ? \
449		 softc->sort_io_queue : cam_sort_io_queues)
450
451/*
452 * Most platforms map firmware geometry to actual, but some don't.  If
453 * not overridden, default to nothing.
454 */
455#ifndef ata_disk_firmware_geom_adjust
456#define	ata_disk_firmware_geom_adjust(disk)
457#endif
458
459static int ada_legacy_aliases = ADA_DEFAULT_LEGACY_ALIASES;
460static int ada_retry_count = ADA_DEFAULT_RETRY;
461static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
462static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
463static int ada_spindown_shutdown = ADA_DEFAULT_SPINDOWN_SHUTDOWN;
464static int ada_spindown_suspend = ADA_DEFAULT_SPINDOWN_SUSPEND;
465static int ada_read_ahead = ADA_DEFAULT_READ_AHEAD;
466static int ada_write_cache = ADA_DEFAULT_WRITE_CACHE;
467
468static SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
469            "CAM Direct Access Disk driver");
470SYSCTL_INT(_kern_cam_ada, OID_AUTO, legacy_aliases, CTLFLAG_RW,
471           &ada_legacy_aliases, 0, "Create legacy-like device aliases");
472TUNABLE_INT("kern.cam.ada.legacy_aliases", &ada_legacy_aliases);
473SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
474           &ada_retry_count, 0, "Normal I/O retry count");
475TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
476SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
477           &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
478TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
479SYSCTL_INT(_kern_cam_ada, OID_AUTO, send_ordered, CTLFLAG_RW,
480           &ada_send_ordered, 0, "Send Ordered Tags");
481TUNABLE_INT("kern.cam.ada.send_ordered", &ada_send_ordered);
482SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_shutdown, CTLFLAG_RW,
483           &ada_spindown_shutdown, 0, "Spin down upon shutdown");
484TUNABLE_INT("kern.cam.ada.spindown_shutdown", &ada_spindown_shutdown);
485SYSCTL_INT(_kern_cam_ada, OID_AUTO, spindown_suspend, CTLFLAG_RW,
486           &ada_spindown_suspend, 0, "Spin down upon suspend");
487TUNABLE_INT("kern.cam.ada.spindown_suspend", &ada_spindown_suspend);
488SYSCTL_INT(_kern_cam_ada, OID_AUTO, read_ahead, CTLFLAG_RW,
489           &ada_read_ahead, 0, "Enable disk read-ahead");
490TUNABLE_INT("kern.cam.ada.read_ahead", &ada_read_ahead);
491SYSCTL_INT(_kern_cam_ada, OID_AUTO, write_cache, CTLFLAG_RW,
492           &ada_write_cache, 0, "Enable disk write cache");
493TUNABLE_INT("kern.cam.ada.write_cache", &ada_write_cache);
494
495/*
496 * ADA_ORDEREDTAG_INTERVAL determines how often, relative
497 * to the default timeout, we check to see whether an ordered
498 * tagged transaction is appropriate to prevent simple tag
499 * starvation.  Since we'd like to ensure that there is at least
500 * 1/2 of the timeout length left for a starved transaction to
501 * complete after we've sent an ordered tag, we must poll at least
502 * four times in every timeout period.  This takes care of the worst
503 * case where a starved transaction starts during an interval that
504 * meets the requirement "don't send an ordered tag" test so it takes
505 * us two intervals to determine that a tag must be sent.
506 */
507#ifndef ADA_ORDEREDTAG_INTERVAL
508#define ADA_ORDEREDTAG_INTERVAL 4
509#endif
510
511static struct periph_driver adadriver =
512{
513	adainit, "ada",
514	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
515};
516
517PERIPHDRIVER_DECLARE(ada, adadriver);
518
519static MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
520
521static int
522adaopen(struct disk *dp)
523{
524	struct cam_periph *periph;
525	struct ada_softc *softc;
526	int error;
527
528	periph = (struct cam_periph *)dp->d_drv1;
529	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
530		return(ENXIO);
531	}
532
533	cam_periph_lock(periph);
534	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
535		cam_periph_unlock(periph);
536		cam_periph_release(periph);
537		return (error);
538	}
539
540	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
541	    ("adaopen\n"));
542
543	softc = (struct ada_softc *)periph->softc;
544	softc->flags |= ADA_FLAG_OPEN;
545
546	cam_periph_unhold(periph);
547	cam_periph_unlock(periph);
548	return (0);
549}
550
551static int
552adaclose(struct disk *dp)
553{
554	struct	cam_periph *periph;
555	struct	ada_softc *softc;
556	union ccb *ccb;
557
558	periph = (struct cam_periph *)dp->d_drv1;
559	cam_periph_lock(periph);
560	if (cam_periph_hold(periph, PRIBIO) != 0) {
561		cam_periph_unlock(periph);
562		cam_periph_release(periph);
563		return (0);
564	}
565
566	softc = (struct ada_softc *)periph->softc;
567
568	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE | CAM_DEBUG_PERIPH,
569	    ("adaclose\n"));
570
571	/* We only sync the cache if the drive is capable of it. */
572	if ((softc->flags & ADA_FLAG_CAN_FLUSHCACHE) != 0 &&
573	    (periph->flags & CAM_PERIPH_INVALID) == 0) {
574
575		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
576		cam_fill_ataio(&ccb->ataio,
577				    1,
578				    adadone,
579				    CAM_DIR_NONE,
580				    0,
581				    NULL,
582				    0,
583				    ada_default_timeout*1000);
584
585		if (softc->flags & ADA_FLAG_CAN_48BIT)
586			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
587		else
588			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
589		cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
590		    /*sense_flags*/0, softc->disk->d_devstat);
591
592		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
593			xpt_print(periph->path, "Synchronize cache failed\n");
594		xpt_release_ccb(ccb);
595	}
596
597	softc->flags &= ~ADA_FLAG_OPEN;
598	cam_periph_unhold(periph);
599	cam_periph_unlock(periph);
600	cam_periph_release(periph);
601	return (0);
602}
603
604static void
605adaschedule(struct cam_periph *periph)
606{
607	struct ada_softc *softc = (struct ada_softc *)periph->softc;
608	uint32_t prio;
609
610	if (softc->state != ADA_STATE_NORMAL)
611		return;
612
613	/* Check if cam_periph_getccb() was called. */
614	prio = periph->immediate_priority;
615
616	/* Check if we have more work to do. */
617	if (bioq_first(&softc->bio_queue) ||
618	    (!softc->trim_running && bioq_first(&softc->trim_queue))) {
619		prio = CAM_PRIORITY_NORMAL;
620	}
621
622	/* Schedule CCB if any of above is true. */
623	if (prio != CAM_PRIORITY_NONE)
624		xpt_schedule(periph, prio);
625}
626
627/*
628 * Actually translate the requested transfer into one the physical driver
629 * can understand.  The transfer is described by a buf and will include
630 * only one physical transfer.
631 */
632static void
633adastrategy(struct bio *bp)
634{
635	struct cam_periph *periph;
636	struct ada_softc *softc;
637
638	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
639	softc = (struct ada_softc *)periph->softc;
640
641	cam_periph_lock(periph);
642
643	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastrategy(%p)\n", bp));
644
645	/*
646	 * If the device has been made invalid, error out
647	 */
648	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
649		cam_periph_unlock(periph);
650		biofinish(bp, NULL, ENXIO);
651		return;
652	}
653
654	/*
655	 * Place it in the queue of disk activities for this disk
656	 */
657	if (bp->bio_cmd == BIO_DELETE &&
658	    (softc->flags & ADA_FLAG_CAN_TRIM)) {
659		if (ADA_SIO)
660		    bioq_disksort(&softc->trim_queue, bp);
661		else
662		    bioq_insert_tail(&softc->trim_queue, bp);
663	} else {
664		if (ADA_SIO)
665		    bioq_disksort(&softc->bio_queue, bp);
666		else
667		    bioq_insert_tail(&softc->bio_queue, bp);
668	}
669
670	/*
671	 * Schedule ourselves for performing the work.
672	 */
673	adaschedule(periph);
674	cam_periph_unlock(periph);
675
676	return;
677}
678
679static int
680adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
681{
682	struct	    cam_periph *periph;
683	struct	    ada_softc *softc;
684	u_int	    secsize;
685	union	    ccb ccb;
686	struct	    disk *dp;
687	uint64_t    lba;
688	uint16_t    count;
689	int	    error = 0;
690
691	dp = arg;
692	periph = dp->d_drv1;
693	softc = (struct ada_softc *)periph->softc;
694	cam_periph_lock(periph);
695	secsize = softc->params.secsize;
696	lba = offset / secsize;
697	count = length / secsize;
698
699	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
700		cam_periph_unlock(periph);
701		return (ENXIO);
702	}
703
704	if (length > 0) {
705		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
706		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
707		cam_fill_ataio(&ccb.ataio,
708		    0,
709		    adadone,
710		    CAM_DIR_OUT,
711		    0,
712		    (u_int8_t *) virtual,
713		    length,
714		    ada_default_timeout*1000);
715		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
716		    (lba + count >= ATA_MAX_28BIT_LBA ||
717		    count >= 256)) {
718			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
719			    0, lba, count);
720		} else {
721			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
722			    0, lba, count);
723		}
724		xpt_polled_action(&ccb);
725
726		error = cam_periph_error(&ccb,
727		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
728		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
729			cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0,
730			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
731		if (error != 0)
732			printf("Aborting dump due to I/O error.\n");
733
734		cam_periph_unlock(periph);
735		return (error);
736	}
737
738	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
739		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
740
741		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
742		cam_fill_ataio(&ccb.ataio,
743				    0,
744				    adadone,
745				    CAM_DIR_NONE,
746				    0,
747				    NULL,
748				    0,
749				    ada_default_timeout*1000);
750
751		if (softc->flags & ADA_FLAG_CAN_48BIT)
752			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
753		else
754			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
755		xpt_polled_action(&ccb);
756
757		error = cam_periph_error(&ccb,
758		    0, SF_NO_RECOVERY | SF_NO_RETRY, NULL);
759		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
760			cam_release_devq(ccb.ccb_h.path, /*relsim_flags*/0,
761			    /*reduction*/0, /*timeout*/0, /*getcount_only*/0);
762		if (error != 0)
763			xpt_print(periph->path, "Synchronize cache failed\n");
764	}
765	cam_periph_unlock(periph);
766	return (error);
767}
768
769static void
770adainit(void)
771{
772	cam_status status;
773
774	/*
775	 * Install a global async callback.  This callback will
776	 * receive async callbacks like "new device found".
777	 */
778	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
779
780	if (status != CAM_REQ_CMP) {
781		printf("ada: Failed to attach master async callback "
782		       "due to status 0x%x!\n", status);
783	} else if (ada_send_ordered) {
784
785		/* Register our event handlers */
786		if ((EVENTHANDLER_REGISTER(power_suspend, adasuspend,
787					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
788		    printf("adainit: power event registration failed!\n");
789		if ((EVENTHANDLER_REGISTER(power_resume, adaresume,
790					   NULL, EVENTHANDLER_PRI_LAST)) == NULL)
791		    printf("adainit: power event registration failed!\n");
792		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
793					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
794		    printf("adainit: shutdown event registration failed!\n");
795	}
796}
797
798/*
799 * Callback from GEOM, called when it has finished cleaning up its
800 * resources.
801 */
802static void
803adadiskgonecb(struct disk *dp)
804{
805	struct cam_periph *periph;
806
807	periph = (struct cam_periph *)dp->d_drv1;
808
809	cam_periph_release(periph);
810}
811
812static void
813adaoninvalidate(struct cam_periph *periph)
814{
815	struct ada_softc *softc;
816
817	softc = (struct ada_softc *)periph->softc;
818
819	/*
820	 * De-register any async callbacks.
821	 */
822	xpt_register_async(0, adaasync, periph, periph->path);
823
824	/*
825	 * Return all queued I/O with ENXIO.
826	 * XXX Handle any transactions queued to the card
827	 *     with XPT_ABORT_CCB.
828	 */
829	bioq_flush(&softc->bio_queue, NULL, ENXIO);
830	bioq_flush(&softc->trim_queue, NULL, ENXIO);
831
832	disk_gone(softc->disk);
833	xpt_print(periph->path, "lost device\n");
834}
835
836static void
837adacleanup(struct cam_periph *periph)
838{
839	struct ada_softc *softc;
840
841	softc = (struct ada_softc *)periph->softc;
842
843	xpt_print(periph->path, "removing device entry\n");
844	cam_periph_unlock(periph);
845
846	/*
847	 * If we can't free the sysctl tree, oh well...
848	 */
849	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
850	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
851		xpt_print(periph->path, "can't remove sysctl context\n");
852	}
853
854	disk_destroy(softc->disk);
855	callout_drain(&softc->sendordered_c);
856	free(softc, M_DEVBUF);
857	cam_periph_lock(periph);
858}
859
860static void
861adaasync(void *callback_arg, u_int32_t code,
862	struct cam_path *path, void *arg)
863{
864	struct ccb_getdev cgd;
865	struct cam_periph *periph;
866	struct ada_softc *softc;
867
868	periph = (struct cam_periph *)callback_arg;
869	switch (code) {
870	case AC_FOUND_DEVICE:
871	{
872		struct ccb_getdev *cgd;
873		cam_status status;
874
875		cgd = (struct ccb_getdev *)arg;
876		if (cgd == NULL)
877			break;
878
879		if (cgd->protocol != PROTO_ATA)
880			break;
881
882		/*
883		 * Allocate a peripheral instance for
884		 * this device and start the probe
885		 * process.
886		 */
887		status = cam_periph_alloc(adaregister, adaoninvalidate,
888					  adacleanup, adastart,
889					  "ada", CAM_PERIPH_BIO,
890					  cgd->ccb_h.path, adaasync,
891					  AC_FOUND_DEVICE, cgd);
892
893		if (status != CAM_REQ_CMP
894		 && status != CAM_REQ_INPROG)
895			printf("adaasync: Unable to attach to new device "
896				"due to status 0x%x\n", status);
897		break;
898	}
899	case AC_GETDEV_CHANGED:
900	{
901		softc = (struct ada_softc *)periph->softc;
902		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
903		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
904		xpt_action((union ccb *)&cgd);
905
906		if ((cgd.ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
907		    (cgd.inq_flags & SID_DMA))
908			softc->flags |= ADA_FLAG_CAN_DMA;
909		else
910			softc->flags &= ~ADA_FLAG_CAN_DMA;
911		if (cgd.ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
912			softc->flags |= ADA_FLAG_CAN_48BIT;
913			if (cgd.inq_flags & SID_DMA48)
914				softc->flags |= ADA_FLAG_CAN_DMA48;
915			else
916				softc->flags &= ~ADA_FLAG_CAN_DMA48;
917		} else
918			softc->flags &= ~(ADA_FLAG_CAN_48BIT |
919			    ADA_FLAG_CAN_DMA48);
920		if ((cgd.ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
921		    (cgd.inq_flags & SID_DMA) && (cgd.inq_flags & SID_CmdQue))
922			softc->flags |= ADA_FLAG_CAN_NCQ;
923		else
924			softc->flags &= ~ADA_FLAG_CAN_NCQ;
925		if ((cgd.ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
926		    (cgd.inq_flags & SID_DMA))
927			softc->flags |= ADA_FLAG_CAN_TRIM;
928		else
929			softc->flags &= ~ADA_FLAG_CAN_TRIM;
930
931		cam_periph_async(periph, code, path, arg);
932		break;
933	}
934	case AC_ADVINFO_CHANGED:
935	{
936		uintptr_t buftype;
937
938		buftype = (uintptr_t)arg;
939		if (buftype == CDAI_TYPE_PHYS_PATH) {
940			struct ada_softc *softc;
941
942			softc = periph->softc;
943			disk_attr_changed(softc->disk, "GEOM::physpath",
944					  M_NOWAIT);
945		}
946		break;
947	}
948	case AC_SENT_BDR:
949	case AC_BUS_RESET:
950	{
951		softc = (struct ada_softc *)periph->softc;
952		cam_periph_async(periph, code, path, arg);
953		if (softc->state != ADA_STATE_NORMAL)
954			break;
955		xpt_setup_ccb(&cgd.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
956		cgd.ccb_h.func_code = XPT_GDEV_TYPE;
957		xpt_action((union ccb *)&cgd);
958		if (ADA_RA >= 0 &&
959		    cgd.ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD)
960			softc->state = ADA_STATE_RAHEAD;
961		else if (ADA_WC >= 0 &&
962		    cgd.ident_data.support.command1 & ATA_SUPPORT_WRITECACHE)
963			softc->state = ADA_STATE_WCACHE;
964		else
965		    break;
966		cam_periph_acquire(periph);
967		xpt_schedule(periph, CAM_PRIORITY_DEV);
968	}
969	default:
970		cam_periph_async(periph, code, path, arg);
971		break;
972	}
973}
974
975static void
976adasysctlinit(void *context, int pending)
977{
978	struct cam_periph *periph;
979	struct ada_softc *softc;
980	char tmpstr[80], tmpstr2[80];
981
982	periph = (struct cam_periph *)context;
983
984	/* periph was held for us when this task was enqueued */
985	if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
986		cam_periph_release(periph);
987		return;
988	}
989
990	softc = (struct ada_softc *)periph->softc;
991	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
992	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
993
994	sysctl_ctx_init(&softc->sysctl_ctx);
995	softc->flags |= ADA_FLAG_SCTX_INIT;
996	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
997		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
998		CTLFLAG_RD, 0, tmpstr);
999	if (softc->sysctl_tree == NULL) {
1000		printf("adasysctlinit: unable to allocate sysctl tree\n");
1001		cam_periph_release(periph);
1002		return;
1003	}
1004
1005	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1006		OID_AUTO, "read_ahead", CTLFLAG_RW | CTLFLAG_MPSAFE,
1007		&softc->read_ahead, 0, "Enable disk read ahead.");
1008	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1009		OID_AUTO, "write_cache", CTLFLAG_RW | CTLFLAG_MPSAFE,
1010		&softc->write_cache, 0, "Enable disk write cache.");
1011	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1012		OID_AUTO, "sort_io_queue", CTLFLAG_RW | CTLFLAG_MPSAFE,
1013		&softc->sort_io_queue, 0,
1014		"Sort IO queue to try and optimise disk access patterns");
1015#ifdef ADA_TEST_FAILURE
1016	/*
1017	 * Add a 'door bell' sysctl which allows one to set it from userland
1018	 * and cause something bad to happen.  For the moment, we only allow
1019	 * whacking the next read or write.
1020	 */
1021	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1022		OID_AUTO, "force_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1023		&softc->force_read_error, 0,
1024		"Force a read error for the next N reads.");
1025	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1026		OID_AUTO, "force_write_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1027		&softc->force_write_error, 0,
1028		"Force a write error for the next N writes.");
1029	SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree),
1030		OID_AUTO, "periodic_read_error", CTLFLAG_RW | CTLFLAG_MPSAFE,
1031		&softc->periodic_read_error, 0,
1032		"Force a read error every N reads (don't set too low).");
1033#endif
1034	cam_periph_release(periph);
1035}
1036
1037static int
1038adagetattr(struct bio *bp)
1039{
1040	int ret;
1041	struct cam_periph *periph;
1042
1043	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
1044	cam_periph_lock(periph);
1045	ret = xpt_getattr(bp->bio_data, bp->bio_length, bp->bio_attribute,
1046	    periph->path);
1047	cam_periph_unlock(periph);
1048	if (ret == 0)
1049		bp->bio_completed = bp->bio_length;
1050	return ret;
1051}
1052
1053static cam_status
1054adaregister(struct cam_periph *periph, void *arg)
1055{
1056	struct ada_softc *softc;
1057	struct ccb_pathinq cpi;
1058	struct ccb_getdev *cgd;
1059	char   announce_buf[80], buf1[32];
1060	struct disk_params *dp;
1061	caddr_t match;
1062	u_int maxio;
1063	int legacy_id, quirks;
1064
1065	cgd = (struct ccb_getdev *)arg;
1066	if (cgd == NULL) {
1067		printf("adaregister: no getdev CCB, can't register device\n");
1068		return(CAM_REQ_CMP_ERR);
1069	}
1070
1071	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
1072	    M_NOWAIT|M_ZERO);
1073
1074	if (softc == NULL) {
1075		printf("adaregister: Unable to probe new device. "
1076		    "Unable to allocate softc\n");
1077		return(CAM_REQ_CMP_ERR);
1078	}
1079
1080	bioq_init(&softc->bio_queue);
1081	bioq_init(&softc->trim_queue);
1082
1083	if ((cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) &&
1084	    (cgd->inq_flags & SID_DMA))
1085		softc->flags |= ADA_FLAG_CAN_DMA;
1086	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) {
1087		softc->flags |= ADA_FLAG_CAN_48BIT;
1088		if (cgd->inq_flags & SID_DMA48)
1089			softc->flags |= ADA_FLAG_CAN_DMA48;
1090	}
1091	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
1092		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
1093	if (cgd->ident_data.support.command1 & ATA_SUPPORT_POWERMGT)
1094		softc->flags |= ADA_FLAG_CAN_POWERMGT;
1095	if ((cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ) &&
1096	    (cgd->inq_flags & SID_DMA) && (cgd->inq_flags & SID_CmdQue))
1097		softc->flags |= ADA_FLAG_CAN_NCQ;
1098	if ((cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) &&
1099	    (cgd->inq_flags & SID_DMA)) {
1100		softc->flags |= ADA_FLAG_CAN_TRIM;
1101		softc->trim_max_ranges = TRIM_MAX_RANGES;
1102		if (cgd->ident_data.max_dsm_blocks != 0) {
1103			softc->trim_max_ranges =
1104			    min(cgd->ident_data.max_dsm_blocks *
1105				ATA_DSM_BLK_RANGES, softc->trim_max_ranges);
1106		}
1107	}
1108	if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA)
1109		softc->flags |= ADA_FLAG_CAN_CFA;
1110
1111	periph->softc = softc;
1112
1113	/*
1114	 * See if this device has any quirks.
1115	 */
1116	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
1117			       (caddr_t)ada_quirk_table,
1118			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
1119			       sizeof(*ada_quirk_table), ata_identify_match);
1120	if (match != NULL)
1121		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
1122	else
1123		softc->quirks = ADA_Q_NONE;
1124
1125	bzero(&cpi, sizeof(cpi));
1126	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE);
1127	cpi.ccb_h.func_code = XPT_PATH_INQ;
1128	xpt_action((union ccb *)&cpi);
1129
1130	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
1131
1132	/*
1133	 * Register this media as a disk
1134	 */
1135	(void)cam_periph_hold(periph, PRIBIO);
1136	cam_periph_unlock(periph);
1137	snprintf(announce_buf, sizeof(announce_buf),
1138	    "kern.cam.ada.%d.quirks", periph->unit_number);
1139	quirks = softc->quirks;
1140	TUNABLE_INT_FETCH(announce_buf, &quirks);
1141	softc->quirks = quirks;
1142	softc->read_ahead = -1;
1143	snprintf(announce_buf, sizeof(announce_buf),
1144	    "kern.cam.ada.%d.read_ahead", periph->unit_number);
1145	TUNABLE_INT_FETCH(announce_buf, &softc->read_ahead);
1146	softc->write_cache = -1;
1147	snprintf(announce_buf, sizeof(announce_buf),
1148	    "kern.cam.ada.%d.write_cache", periph->unit_number);
1149	TUNABLE_INT_FETCH(announce_buf, &softc->write_cache);
1150	/* Disable queue sorting for non-rotational media by default. */
1151	if (cgd->ident_data.media_rotation_rate == 1)
1152		softc->sort_io_queue = 0;
1153	else
1154		softc->sort_io_queue = -1;
1155	adagetparams(periph, cgd);
1156	softc->disk = disk_alloc();
1157	softc->disk->d_devstat = devstat_new_entry(periph->periph_name,
1158			  periph->unit_number, softc->params.secsize,
1159			  DEVSTAT_ALL_SUPPORTED,
1160			  DEVSTAT_TYPE_DIRECT |
1161			  XPORT_DEVSTAT_TYPE(cpi.transport),
1162			  DEVSTAT_PRIORITY_DISK);
1163	softc->disk->d_open = adaopen;
1164	softc->disk->d_close = adaclose;
1165	softc->disk->d_strategy = adastrategy;
1166	softc->disk->d_getattr = adagetattr;
1167	softc->disk->d_dump = adadump;
1168	softc->disk->d_gone = adadiskgonecb;
1169	softc->disk->d_name = "ada";
1170	softc->disk->d_drv1 = periph;
1171	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
1172	if (maxio == 0)
1173		maxio = DFLTPHYS;	/* traditional default */
1174	else if (maxio > MAXPHYS)
1175		maxio = MAXPHYS;	/* for safety */
1176	if (softc->flags & ADA_FLAG_CAN_48BIT)
1177		maxio = min(maxio, 65536 * softc->params.secsize);
1178	else					/* 28bit ATA command limit */
1179		maxio = min(maxio, 256 * softc->params.secsize);
1180	softc->disk->d_maxsize = maxio;
1181	softc->disk->d_unit = periph->unit_number;
1182	softc->disk->d_flags = 0;
1183	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
1184		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
1185	if (softc->flags & ADA_FLAG_CAN_TRIM) {
1186		softc->disk->d_flags |= DISKFLAG_CANDELETE;
1187		softc->disk->d_delmaxsize = softc->params.secsize *
1188					    ATA_DSM_RANGE_MAX *
1189					    softc->trim_max_ranges;
1190	} else if ((softc->flags & ADA_FLAG_CAN_CFA) &&
1191	    !(softc->flags & ADA_FLAG_CAN_48BIT)) {
1192		softc->disk->d_flags |= DISKFLAG_CANDELETE;
1193		softc->disk->d_delmaxsize = 256 * softc->params.secsize;
1194	} else
1195		softc->disk->d_delmaxsize = maxio;
1196	if ((cpi.hba_misc & PIM_UNMAPPED) != 0)
1197		softc->disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
1198	strlcpy(softc->disk->d_descr, cgd->ident_data.model,
1199	    MIN(sizeof(softc->disk->d_descr), sizeof(cgd->ident_data.model)));
1200	strlcpy(softc->disk->d_ident, cgd->ident_data.serial,
1201	    MIN(sizeof(softc->disk->d_ident), sizeof(cgd->ident_data.serial)));
1202	softc->disk->d_hba_vendor = cpi.hba_vendor;
1203	softc->disk->d_hba_device = cpi.hba_device;
1204	softc->disk->d_hba_subvendor = cpi.hba_subvendor;
1205	softc->disk->d_hba_subdevice = cpi.hba_subdevice;
1206
1207	softc->disk->d_sectorsize = softc->params.secsize;
1208	softc->disk->d_mediasize = (off_t)softc->params.sectors *
1209	    softc->params.secsize;
1210	if (ata_physical_sector_size(&cgd->ident_data) !=
1211	    softc->params.secsize) {
1212		softc->disk->d_stripesize =
1213		    ata_physical_sector_size(&cgd->ident_data);
1214		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
1215		    ata_logical_sector_offset(&cgd->ident_data)) %
1216		    softc->disk->d_stripesize;
1217	} else if (softc->quirks & ADA_Q_4K) {
1218		softc->disk->d_stripesize = 4096;
1219		softc->disk->d_stripeoffset = 0;
1220	}
1221	softc->disk->d_fwsectors = softc->params.secs_per_track;
1222	softc->disk->d_fwheads = softc->params.heads;
1223	ata_disk_firmware_geom_adjust(softc->disk);
1224
1225	if (ada_legacy_aliases) {
1226#ifdef ATA_STATIC_ID
1227		legacy_id = xpt_path_legacy_ata_id(periph->path);
1228#else
1229		legacy_id = softc->disk->d_unit;
1230#endif
1231		if (legacy_id >= 0) {
1232			snprintf(announce_buf, sizeof(announce_buf),
1233			    "kern.devalias.%s%d",
1234			    softc->disk->d_name, softc->disk->d_unit);
1235			snprintf(buf1, sizeof(buf1),
1236			    "ad%d", legacy_id);
1237			setenv(announce_buf, buf1);
1238		}
1239	} else
1240		legacy_id = -1;
1241	/*
1242	 * Acquire a reference to the periph before we register with GEOM.
1243	 * We'll release this reference once GEOM calls us back (via
1244	 * adadiskgonecb()) telling us that our provider has been freed.
1245	 */
1246	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
1247		xpt_print(periph->path, "%s: lost periph during "
1248			  "registration!\n", __func__);
1249		cam_periph_lock(periph);
1250		return (CAM_REQ_CMP_ERR);
1251	}
1252	disk_create(softc->disk, DISK_VERSION);
1253	cam_periph_lock(periph);
1254	cam_periph_unhold(periph);
1255
1256	dp = &softc->params;
1257	snprintf(announce_buf, sizeof(announce_buf),
1258		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
1259		(uintmax_t)(((uintmax_t)dp->secsize *
1260		dp->sectors) / (1024*1024)),
1261		(uintmax_t)dp->sectors,
1262		dp->secsize, dp->heads,
1263		dp->secs_per_track, dp->cylinders);
1264	xpt_announce_periph(periph, announce_buf);
1265	if (legacy_id >= 0)
1266		printf("%s%d: Previously was known as ad%d\n",
1267		       periph->periph_name, periph->unit_number, legacy_id);
1268
1269	/*
1270	 * Create our sysctl variables, now that we know
1271	 * we have successfully attached.
1272	 */
1273	cam_periph_acquire(periph);
1274	taskqueue_enqueue(taskqueue_thread, &softc->sysctl_task);
1275
1276	/*
1277	 * Add async callbacks for bus reset and
1278	 * bus device reset calls.  I don't bother
1279	 * checking if this fails as, in most cases,
1280	 * the system will function just fine without
1281	 * them and the only alternative would be to
1282	 * not attach the device on failure.
1283	 */
1284	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE |
1285	    AC_GETDEV_CHANGED | AC_ADVINFO_CHANGED,
1286	    adaasync, periph, periph->path);
1287
1288	/*
1289	 * Schedule a periodic event to occasionally send an
1290	 * ordered tag to a device.
1291	 */
1292	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
1293	callout_reset(&softc->sendordered_c,
1294	    (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
1295	    adasendorderedtag, softc);
1296
1297	if (ADA_RA >= 0 &&
1298	    cgd->ident_data.support.command1 & ATA_SUPPORT_LOOKAHEAD) {
1299		softc->state = ADA_STATE_RAHEAD;
1300		cam_periph_acquire(periph);
1301		xpt_schedule(periph, CAM_PRIORITY_DEV);
1302	} else if (ADA_WC >= 0 &&
1303	    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
1304		softc->state = ADA_STATE_WCACHE;
1305		cam_periph_acquire(periph);
1306		xpt_schedule(periph, CAM_PRIORITY_DEV);
1307	} else
1308		softc->state = ADA_STATE_NORMAL;
1309
1310	return(CAM_REQ_CMP);
1311}
1312
1313static void
1314adastart(struct cam_periph *periph, union ccb *start_ccb)
1315{
1316	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1317	struct ccb_ataio *ataio = &start_ccb->ataio;
1318
1319	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("adastart\n"));
1320
1321	switch (softc->state) {
1322	case ADA_STATE_NORMAL:
1323	{
1324		struct bio *bp;
1325		u_int8_t tag_code;
1326
1327		/* Execute immediate CCB if waiting. */
1328		if (periph->immediate_priority <= periph->pinfo.priority) {
1329			CAM_DEBUG(periph->path, CAM_DEBUG_SUBTRACE,
1330					("queuing for immediate ccb\n"));
1331			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
1332			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
1333					  periph_links.sle);
1334			periph->immediate_priority = CAM_PRIORITY_NONE;
1335			wakeup(&periph->ccb_list);
1336			/* Have more work to do, so ensure we stay scheduled */
1337			adaschedule(periph);
1338			break;
1339		}
1340		/* Run TRIM if not running yet. */
1341		if (!softc->trim_running &&
1342		    (bp = bioq_first(&softc->trim_queue)) != 0) {
1343			struct trim_request *req = &softc->trim_req;
1344			struct bio *bp1;
1345			uint64_t lastlba = (uint64_t)-1;
1346			int bps = 0, c, lastcount = 0, off, ranges = 0;
1347
1348			softc->trim_running = 1;
1349			bzero(req, sizeof(*req));
1350			bp1 = bp;
1351			do {
1352				uint64_t lba = bp1->bio_pblkno;
1353				int count = bp1->bio_bcount /
1354				    softc->params.secsize;
1355
1356				bioq_remove(&softc->trim_queue, bp1);
1357
1358				/* Try to extend the previous range. */
1359				if (lba == lastlba) {
1360					c = min(count, ATA_DSM_RANGE_MAX - lastcount);
1361					lastcount += c;
1362					off = (ranges - 1) * ATA_DSM_RANGE_SIZE;
1363					req->data[off + 6] = lastcount & 0xff;
1364					req->data[off + 7] =
1365					    (lastcount >> 8) & 0xff;
1366					count -= c;
1367					lba += c;
1368				}
1369
1370				while (count > 0) {
1371					c = min(count, ATA_DSM_RANGE_MAX);
1372					off = ranges * ATA_DSM_RANGE_SIZE;
1373					req->data[off + 0] = lba & 0xff;
1374					req->data[off + 1] = (lba >> 8) & 0xff;
1375					req->data[off + 2] = (lba >> 16) & 0xff;
1376					req->data[off + 3] = (lba >> 24) & 0xff;
1377					req->data[off + 4] = (lba >> 32) & 0xff;
1378					req->data[off + 5] = (lba >> 40) & 0xff;
1379					req->data[off + 6] = c & 0xff;
1380					req->data[off + 7] = (c >> 8) & 0xff;
1381					lba += c;
1382					count -= c;
1383					lastcount = c;
1384					ranges++;
1385					/*
1386					 * Its the caller's responsibility to ensure the
1387					 * request will fit so we don't need to check for
1388					 * overrun here
1389					 */
1390				}
1391				lastlba = lba;
1392				req->bps[bps++] = bp1;
1393				bp1 = bioq_first(&softc->trim_queue);
1394				if (bps >= TRIM_MAX_BIOS ||
1395				    bp1 == NULL ||
1396				    bp1->bio_bcount / softc->params.secsize >
1397				    (softc->trim_max_ranges - ranges) *
1398				    ATA_DSM_RANGE_MAX)
1399					break;
1400			} while (1);
1401			cam_fill_ataio(ataio,
1402			    ada_retry_count,
1403			    adadone,
1404			    CAM_DIR_OUT,
1405			    0,
1406			    req->data,
1407			    ((ranges + ATA_DSM_BLK_RANGES - 1) /
1408			        ATA_DSM_BLK_RANGES) * ATA_DSM_BLK_SIZE,
1409			    ada_default_timeout * 1000);
1410			ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT,
1411			    ATA_DSM_TRIM, 0, (ranges + ATA_DSM_BLK_RANGES -
1412			    1) / ATA_DSM_BLK_RANGES);
1413			start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM;
1414			goto out;
1415		}
1416		/* Run regular command. */
1417		bp = bioq_first(&softc->bio_queue);
1418		if (bp == NULL) {
1419			xpt_release_ccb(start_ccb);
1420			break;
1421		}
1422		bioq_remove(&softc->bio_queue, bp);
1423
1424		if ((bp->bio_flags & BIO_ORDERED) != 0
1425		 || (softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
1426			softc->flags &= ~ADA_FLAG_NEED_OTAG;
1427			softc->ordered_tag_count++;
1428			tag_code = 0;
1429		} else {
1430			tag_code = 1;
1431		}
1432		switch (bp->bio_cmd) {
1433		case BIO_READ:
1434		case BIO_WRITE:
1435		{
1436			uint64_t lba = bp->bio_pblkno;
1437			uint16_t count = bp->bio_bcount / softc->params.secsize;
1438#ifdef ADA_TEST_FAILURE
1439			int fail = 0;
1440
1441			/*
1442			 * Support the failure ioctls.  If the command is a
1443			 * read, and there are pending forced read errors, or
1444			 * if a write and pending write errors, then fail this
1445			 * operation with EIO.  This is useful for testing
1446			 * purposes.  Also, support having every Nth read fail.
1447			 *
1448			 * This is a rather blunt tool.
1449			 */
1450			if (bp->bio_cmd == BIO_READ) {
1451				if (softc->force_read_error) {
1452					softc->force_read_error--;
1453					fail = 1;
1454				}
1455				if (softc->periodic_read_error > 0) {
1456					if (++softc->periodic_read_count >=
1457					    softc->periodic_read_error) {
1458						softc->periodic_read_count = 0;
1459						fail = 1;
1460					}
1461				}
1462			} else {
1463				if (softc->force_write_error) {
1464					softc->force_write_error--;
1465					fail = 1;
1466				}
1467			}
1468			if (fail) {
1469				bp->bio_error = EIO;
1470				bp->bio_flags |= BIO_ERROR;
1471				biodone(bp);
1472				xpt_release_ccb(start_ccb);
1473				adaschedule(periph);
1474				return;
1475			}
1476#endif
1477			KASSERT((bp->bio_flags & BIO_UNMAPPED) == 0 ||
1478			    round_page(bp->bio_bcount + bp->bio_ma_offset) /
1479			    PAGE_SIZE == bp->bio_ma_n,
1480			    ("Short bio %p", bp));
1481			cam_fill_ataio(ataio,
1482			    ada_retry_count,
1483			    adadone,
1484			    (bp->bio_cmd == BIO_READ ? CAM_DIR_IN :
1485				CAM_DIR_OUT) | ((bp->bio_flags & BIO_UNMAPPED)
1486				!= 0 ? CAM_DATA_BIO : 0),
1487			    tag_code,
1488			    ((bp->bio_flags & BIO_UNMAPPED) != 0) ? (void *)bp :
1489				bp->bio_data,
1490			    bp->bio_bcount,
1491			    ada_default_timeout*1000);
1492
1493			if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
1494				if (bp->bio_cmd == BIO_READ) {
1495					ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
1496					    lba, count);
1497				} else {
1498					ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
1499					    lba, count);
1500				}
1501			} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
1502			    (lba + count >= ATA_MAX_28BIT_LBA ||
1503			    count > 256)) {
1504				if (softc->flags & ADA_FLAG_CAN_DMA48) {
1505					if (bp->bio_cmd == BIO_READ) {
1506						ata_48bit_cmd(ataio, ATA_READ_DMA48,
1507						    0, lba, count);
1508					} else {
1509						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
1510						    0, lba, count);
1511					}
1512				} else {
1513					if (bp->bio_cmd == BIO_READ) {
1514						ata_48bit_cmd(ataio, ATA_READ_MUL48,
1515						    0, lba, count);
1516					} else {
1517						ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
1518						    0, lba, count);
1519					}
1520				}
1521			} else {
1522				if (count == 256)
1523					count = 0;
1524				if (softc->flags & ADA_FLAG_CAN_DMA) {
1525					if (bp->bio_cmd == BIO_READ) {
1526						ata_28bit_cmd(ataio, ATA_READ_DMA,
1527						    0, lba, count);
1528					} else {
1529						ata_28bit_cmd(ataio, ATA_WRITE_DMA,
1530						    0, lba, count);
1531					}
1532				} else {
1533					if (bp->bio_cmd == BIO_READ) {
1534						ata_28bit_cmd(ataio, ATA_READ_MUL,
1535						    0, lba, count);
1536					} else {
1537						ata_28bit_cmd(ataio, ATA_WRITE_MUL,
1538						    0, lba, count);
1539					}
1540				}
1541			}
1542			break;
1543		}
1544		case BIO_DELETE:
1545		{
1546			uint64_t lba = bp->bio_pblkno;
1547			uint16_t count = bp->bio_bcount / softc->params.secsize;
1548
1549			cam_fill_ataio(ataio,
1550			    ada_retry_count,
1551			    adadone,
1552			    CAM_DIR_NONE,
1553			    0,
1554			    NULL,
1555			    0,
1556			    ada_default_timeout*1000);
1557
1558			if (count >= 256)
1559				count = 0;
1560			ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count);
1561			break;
1562		}
1563		case BIO_FLUSH:
1564			cam_fill_ataio(ataio,
1565			    1,
1566			    adadone,
1567			    CAM_DIR_NONE,
1568			    0,
1569			    NULL,
1570			    0,
1571			    ada_default_timeout*1000);
1572
1573			if (softc->flags & ADA_FLAG_CAN_48BIT)
1574				ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1575			else
1576				ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
1577			break;
1578		}
1579		start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
1580out:
1581		start_ccb->ccb_h.ccb_bp = bp;
1582		softc->outstanding_cmds++;
1583		xpt_action(start_ccb);
1584
1585		/* May have more work to do, so ensure we stay scheduled */
1586		adaschedule(periph);
1587		break;
1588	}
1589	case ADA_STATE_RAHEAD:
1590	case ADA_STATE_WCACHE:
1591	{
1592		if ((periph->flags & CAM_PERIPH_INVALID) != 0) {
1593			softc->state = ADA_STATE_NORMAL;
1594			xpt_release_ccb(start_ccb);
1595			cam_periph_release_locked(periph);
1596			return;
1597		}
1598
1599		cam_fill_ataio(ataio,
1600		    1,
1601		    adadone,
1602		    CAM_DIR_NONE,
1603		    0,
1604		    NULL,
1605		    0,
1606		    ada_default_timeout*1000);
1607
1608		if (softc->state == ADA_STATE_RAHEAD) {
1609			ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_RA ?
1610			    ATA_SF_ENAB_RCACHE : ATA_SF_DIS_RCACHE, 0, 0);
1611			start_ccb->ccb_h.ccb_state = ADA_CCB_RAHEAD;
1612		} else {
1613			ata_28bit_cmd(ataio, ATA_SETFEATURES, ADA_WC ?
1614			    ATA_SF_ENAB_WCACHE : ATA_SF_DIS_WCACHE, 0, 0);
1615			start_ccb->ccb_h.ccb_state = ADA_CCB_WCACHE;
1616		}
1617		start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
1618		xpt_action(start_ccb);
1619		break;
1620	}
1621	}
1622}
1623
1624static void
1625adadone(struct cam_periph *periph, union ccb *done_ccb)
1626{
1627	struct ada_softc *softc;
1628	struct ccb_ataio *ataio;
1629	struct ccb_getdev *cgd;
1630	struct cam_path *path;
1631
1632	softc = (struct ada_softc *)periph->softc;
1633	ataio = &done_ccb->ataio;
1634	path = done_ccb->ccb_h.path;
1635
1636	CAM_DEBUG(path, CAM_DEBUG_TRACE, ("adadone\n"));
1637
1638	switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
1639	case ADA_CCB_BUFFER_IO:
1640	case ADA_CCB_TRIM:
1641	{
1642		struct bio *bp;
1643
1644		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
1645		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1646			int error;
1647
1648			error = adaerror(done_ccb, 0, 0);
1649			if (error == ERESTART) {
1650				/* A retry was scheduled, so just return. */
1651				return;
1652			}
1653			if (error != 0) {
1654				bp->bio_error = error;
1655				bp->bio_resid = bp->bio_bcount;
1656				bp->bio_flags |= BIO_ERROR;
1657			} else {
1658				bp->bio_resid = ataio->resid;
1659				bp->bio_error = 0;
1660				if (bp->bio_resid != 0)
1661					bp->bio_flags |= BIO_ERROR;
1662			}
1663			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1664				cam_release_devq(path,
1665						 /*relsim_flags*/0,
1666						 /*reduction*/0,
1667						 /*timeout*/0,
1668						 /*getcount_only*/0);
1669		} else {
1670			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
1671				panic("REQ_CMP with QFRZN");
1672			bp->bio_resid = ataio->resid;
1673			if (ataio->resid > 0)
1674				bp->bio_flags |= BIO_ERROR;
1675		}
1676		softc->outstanding_cmds--;
1677		if (softc->outstanding_cmds == 0)
1678			softc->flags |= ADA_FLAG_WENT_IDLE;
1679		if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) ==
1680		    ADA_CCB_TRIM) {
1681			struct trim_request *req =
1682			    (struct trim_request *)ataio->data_ptr;
1683			int i;
1684
1685			for (i = 1; i < TRIM_MAX_BIOS && req->bps[i]; i++) {
1686				struct bio *bp1 = req->bps[i];
1687
1688				bp1->bio_resid = bp->bio_resid;
1689				bp1->bio_error = bp->bio_error;
1690				if (bp->bio_flags & BIO_ERROR)
1691					bp1->bio_flags |= BIO_ERROR;
1692				biodone(bp1);
1693			}
1694			softc->trim_running = 0;
1695			biodone(bp);
1696			adaschedule(periph);
1697		} else
1698			biodone(bp);
1699		break;
1700	}
1701	case ADA_CCB_RAHEAD:
1702	{
1703		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1704			if (adaerror(done_ccb, 0, 0) == ERESTART) {
1705out:
1706				/* Drop freeze taken due to CAM_DEV_QFREEZE */
1707				cam_release_devq(path, 0, 0, 0, FALSE);
1708				return;
1709			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1710				cam_release_devq(path,
1711				    /*relsim_flags*/0,
1712				    /*reduction*/0,
1713				    /*timeout*/0,
1714				    /*getcount_only*/0);
1715			}
1716		}
1717
1718		/*
1719		 * Since our peripheral may be invalidated by an error
1720		 * above or an external event, we must release our CCB
1721		 * before releasing the reference on the peripheral.
1722		 * The peripheral will only go away once the last reference
1723		 * is removed, and we need it around for the CCB release
1724		 * operation.
1725		 */
1726		cgd = (struct ccb_getdev *)done_ccb;
1727		xpt_setup_ccb(&cgd->ccb_h, path, CAM_PRIORITY_NORMAL);
1728		cgd->ccb_h.func_code = XPT_GDEV_TYPE;
1729		xpt_action((union ccb *)cgd);
1730		if (ADA_WC >= 0 &&
1731		    cgd->ident_data.support.command1 & ATA_SUPPORT_WRITECACHE) {
1732			softc->state = ADA_STATE_WCACHE;
1733			xpt_release_ccb(done_ccb);
1734			xpt_schedule(periph, CAM_PRIORITY_DEV);
1735			goto out;
1736		}
1737		softc->state = ADA_STATE_NORMAL;
1738		xpt_release_ccb(done_ccb);
1739		/* Drop freeze taken due to CAM_DEV_QFREEZE */
1740		cam_release_devq(path, 0, 0, 0, FALSE);
1741		adaschedule(periph);
1742		cam_periph_release_locked(periph);
1743		return;
1744	}
1745	case ADA_CCB_WCACHE:
1746	{
1747		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1748			if (adaerror(done_ccb, 0, 0) == ERESTART) {
1749				goto out;
1750			} else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
1751				cam_release_devq(path,
1752				    /*relsim_flags*/0,
1753				    /*reduction*/0,
1754				    /*timeout*/0,
1755				    /*getcount_only*/0);
1756			}
1757		}
1758
1759		softc->state = ADA_STATE_NORMAL;
1760		/*
1761		 * Since our peripheral may be invalidated by an error
1762		 * above or an external event, we must release our CCB
1763		 * before releasing the reference on the peripheral.
1764		 * The peripheral will only go away once the last reference
1765		 * is removed, and we need it around for the CCB release
1766		 * operation.
1767		 */
1768		xpt_release_ccb(done_ccb);
1769		/* Drop freeze taken due to CAM_DEV_QFREEZE */
1770		cam_release_devq(path, 0, 0, 0, FALSE);
1771		adaschedule(periph);
1772		cam_periph_release_locked(periph);
1773		return;
1774	}
1775	case ADA_CCB_WAITING:
1776	{
1777		/* Caller will release the CCB */
1778		wakeup(&done_ccb->ccb_h.cbfcnp);
1779		return;
1780	}
1781	case ADA_CCB_DUMP:
1782		/* No-op.  We're polling */
1783		return;
1784	default:
1785		break;
1786	}
1787	xpt_release_ccb(done_ccb);
1788}
1789
1790static int
1791adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1792{
1793
1794	return(cam_periph_error(ccb, cam_flags, sense_flags, NULL));
1795}
1796
1797static void
1798adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
1799{
1800	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1801	struct disk_params *dp = &softc->params;
1802	u_int64_t lbasize48;
1803	u_int32_t lbasize;
1804
1805	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
1806	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1807		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1808		dp->heads = cgd->ident_data.current_heads;
1809		dp->secs_per_track = cgd->ident_data.current_sectors;
1810		dp->cylinders = cgd->ident_data.cylinders;
1811		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1812			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1813	} else {
1814		dp->heads = cgd->ident_data.heads;
1815		dp->secs_per_track = cgd->ident_data.sectors;
1816		dp->cylinders = cgd->ident_data.cylinders;
1817		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1818	}
1819	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1820		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1821
1822	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
1823	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1824		dp->sectors = lbasize;
1825
1826	/* use the 48bit LBA size if valid */
1827	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1828		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1829		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1830		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1831	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1832	    lbasize48 > ATA_MAX_28BIT_LBA)
1833		dp->sectors = lbasize48;
1834}
1835
1836static void
1837adasendorderedtag(void *arg)
1838{
1839	struct ada_softc *softc = arg;
1840
1841	if (ada_send_ordered) {
1842		if ((softc->ordered_tag_count == 0)
1843		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1844			softc->flags |= ADA_FLAG_NEED_OTAG;
1845		}
1846		if (softc->outstanding_cmds > 0)
1847			softc->flags &= ~ADA_FLAG_WENT_IDLE;
1848
1849		softc->ordered_tag_count = 0;
1850	}
1851	/* Queue us up again */
1852	callout_reset(&softc->sendordered_c,
1853	    (ada_default_timeout * hz) / ADA_ORDEREDTAG_INTERVAL,
1854	    adasendorderedtag, softc);
1855}
1856
1857/*
1858 * Step through all ADA peripheral drivers, and if the device is still open,
1859 * sync the disk cache to physical media.
1860 */
1861static void
1862adaflush(void)
1863{
1864	struct cam_periph *periph;
1865	struct ada_softc *softc;
1866	union ccb *ccb;
1867	int error;
1868
1869	CAM_PERIPH_FOREACH(periph, &adadriver) {
1870		/* If we paniced with lock held - not recurse here. */
1871		if (cam_periph_owned(periph))
1872			continue;
1873		cam_periph_lock(periph);
1874		softc = (struct ada_softc *)periph->softc;
1875		/*
1876		 * We only sync the cache if the drive is still open, and
1877		 * if the drive is capable of it..
1878		 */
1879		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1880		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1881			cam_periph_unlock(periph);
1882			continue;
1883		}
1884
1885		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1886		cam_fill_ataio(&ccb->ataio,
1887				    0,
1888				    adadone,
1889				    CAM_DIR_NONE,
1890				    0,
1891				    NULL,
1892				    0,
1893				    ada_default_timeout*1000);
1894		if (softc->flags & ADA_FLAG_CAN_48BIT)
1895			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1896		else
1897			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
1898
1899		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
1900		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
1901		    softc->disk->d_devstat);
1902		if (error != 0)
1903			xpt_print(periph->path, "Synchronize cache failed\n");
1904		xpt_release_ccb(ccb);
1905		cam_periph_unlock(periph);
1906	}
1907}
1908
1909static void
1910adaspindown(uint8_t cmd, int flags)
1911{
1912	struct cam_periph *periph;
1913	struct ada_softc *softc;
1914	union ccb *ccb;
1915	int error;
1916
1917	CAM_PERIPH_FOREACH(periph, &adadriver) {
1918		/* If we paniced with lock held - not recurse here. */
1919		if (cam_periph_owned(periph))
1920			continue;
1921		cam_periph_lock(periph);
1922		softc = (struct ada_softc *)periph->softc;
1923		/*
1924		 * We only spin-down the drive if it is capable of it..
1925		 */
1926		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1927			cam_periph_unlock(periph);
1928			continue;
1929		}
1930
1931		if (bootverbose)
1932			xpt_print(periph->path, "spin-down\n");
1933
1934		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
1935		cam_fill_ataio(&ccb->ataio,
1936				    0,
1937				    adadone,
1938				    CAM_DIR_NONE | flags,
1939				    0,
1940				    NULL,
1941				    0,
1942				    ada_default_timeout*1000);
1943		ata_28bit_cmd(&ccb->ataio, cmd, 0, 0, 0);
1944
1945		error = cam_periph_runccb(ccb, adaerror, /*cam_flags*/0,
1946		    /*sense_flags*/ SF_NO_RECOVERY | SF_NO_RETRY,
1947		    softc->disk->d_devstat);
1948		if (error != 0)
1949			xpt_print(periph->path, "Spin-down disk failed\n");
1950		xpt_release_ccb(ccb);
1951		cam_periph_unlock(periph);
1952	}
1953}
1954
1955static void
1956adashutdown(void *arg, int howto)
1957{
1958
1959	adaflush();
1960	if (ada_spindown_shutdown != 0 &&
1961	    (howto & (RB_HALT | RB_POWEROFF)) != 0)
1962		adaspindown(ATA_STANDBY_IMMEDIATE, 0);
1963}
1964
1965static void
1966adasuspend(void *arg)
1967{
1968
1969	adaflush();
1970	if (ada_spindown_suspend != 0)
1971		adaspindown(ATA_SLEEP, CAM_DEV_QFREEZE);
1972}
1973
1974static void
1975adaresume(void *arg)
1976{
1977	struct cam_periph *periph;
1978	struct ada_softc *softc;
1979
1980	if (ada_spindown_suspend == 0)
1981		return;
1982
1983	CAM_PERIPH_FOREACH(periph, &adadriver) {
1984		cam_periph_lock(periph);
1985		softc = (struct ada_softc *)periph->softc;
1986		/*
1987		 * We only spin-down the drive if it is capable of it..
1988		 */
1989		if ((softc->flags & ADA_FLAG_CAN_POWERMGT) == 0) {
1990			cam_periph_unlock(periph);
1991			continue;
1992		}
1993
1994		if (bootverbose)
1995			xpt_print(periph->path, "resume\n");
1996
1997		/*
1998		 * Drop freeze taken due to CAM_DEV_QFREEZE flag set on
1999		 * sleep request.
2000		 */
2001		cam_release_devq(periph->path,
2002			 /*relsim_flags*/0,
2003			 /*openings*/0,
2004			 /*timeout*/0,
2005			 /*getcount_only*/0);
2006
2007		cam_periph_unlock(periph);
2008	}
2009}
2010
2011#endif /* _KERNEL */
2012