ata_da.c revision 200969
1/*-
2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/cam/ata/ata_da.c 200969 2009-12-24 21:54:44Z mav $");
29
30#include <sys/param.h>
31
32#ifdef _KERNEL
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/bio.h>
36#include <sys/sysctl.h>
37#include <sys/taskqueue.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/conf.h>
41#include <sys/devicestat.h>
42#include <sys/eventhandler.h>
43#include <sys/malloc.h>
44#include <sys/cons.h>
45#include <geom/geom_disk.h>
46#endif /* _KERNEL */
47
48#ifndef _KERNEL
49#include <stdio.h>
50#include <string.h>
51#endif /* _KERNEL */
52
53#include <cam/cam.h>
54#include <cam/cam_ccb.h>
55#include <cam/cam_periph.h>
56#include <cam/cam_xpt_periph.h>
57#include <cam/cam_sim.h>
58
59#include <cam/ata/ata_all.h>
60
61#ifdef _KERNEL
62
63#define ATA_MAX_28BIT_LBA               268435455UL
64
65typedef enum {
66	ADA_STATE_NORMAL
67} ada_state;
68
69typedef enum {
70	ADA_FLAG_PACK_INVALID	= 0x001,
71	ADA_FLAG_CAN_48BIT	= 0x002,
72	ADA_FLAG_CAN_FLUSHCACHE	= 0x004,
73	ADA_FLAG_CAN_NCQ	= 0x008,
74	ADA_FLAG_CAN_DMA	= 0x010,
75	ADA_FLAG_NEED_OTAG	= 0x020,
76	ADA_FLAG_WENT_IDLE	= 0x040,
77	ADA_FLAG_OPEN		= 0x100,
78	ADA_FLAG_SCTX_INIT	= 0x200
79} ada_flags;
80
81typedef enum {
82	ADA_Q_NONE		= 0x00
83} ada_quirks;
84
85typedef enum {
86	ADA_CCB_BUFFER_IO	= 0x03,
87	ADA_CCB_WAITING		= 0x04,
88	ADA_CCB_DUMP		= 0x05,
89	ADA_CCB_TYPE_MASK	= 0x0F,
90} ada_ccb_state;
91
92/* Offsets into our private area for storing information */
93#define ccb_state	ppriv_field0
94#define ccb_bp		ppriv_ptr1
95
96struct disk_params {
97	u_int8_t  heads;
98	u_int8_t  secs_per_track;
99	u_int32_t cylinders;
100	u_int32_t secsize;	/* Number of bytes/logical sector */
101	u_int64_t sectors;	/* Total number sectors */
102};
103
104struct ada_softc {
105	struct	 bio_queue_head bio_queue;
106	ada_state state;
107	ada_flags flags;
108	ada_quirks quirks;
109	int	 ordered_tag_count;
110	int	 outstanding_cmds;
111	struct	 disk_params params;
112	struct	 disk *disk;
113	union	 ccb saved_ccb;
114	struct task		sysctl_task;
115	struct sysctl_ctx_list	sysctl_ctx;
116	struct sysctl_oid	*sysctl_tree;
117	struct callout		sendordered_c;
118};
119
120struct ada_quirk_entry {
121	struct scsi_inquiry_pattern inq_pat;
122	ada_quirks quirks;
123};
124
125static struct ada_quirk_entry ada_quirk_table[] =
126{
127	{
128		/* Default */
129		{
130		  T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
131		  /*vendor*/"*", /*product*/"*", /*revision*/"*"
132		},
133		/*quirks*/0
134	},
135};
136
137static	disk_strategy_t	adastrategy;
138static	dumper_t	adadump;
139static	periph_init_t	adainit;
140static	void		adaasync(void *callback_arg, u_int32_t code,
141				struct cam_path *path, void *arg);
142static	void		adasysctlinit(void *context, int pending);
143static	periph_ctor_t	adaregister;
144static	periph_dtor_t	adacleanup;
145static	periph_start_t	adastart;
146static	periph_oninv_t	adaoninvalidate;
147static	void		adadone(struct cam_periph *periph,
148			       union ccb *done_ccb);
149static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
150				u_int32_t sense_flags);
151static void		adagetparams(struct cam_periph *periph,
152				struct ccb_getdev *cgd);
153static timeout_t	adasendorderedtag;
154static void		adashutdown(void *arg, int howto);
155
156#ifndef ADA_DEFAULT_TIMEOUT
157#define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
158#endif
159
160#ifndef	ADA_DEFAULT_RETRY
161#define	ADA_DEFAULT_RETRY	4
162#endif
163
164#ifndef	ADA_DEFAULT_SEND_ORDERED
165#define	ADA_DEFAULT_SEND_ORDERED	1
166#endif
167
168
169static int ada_retry_count = ADA_DEFAULT_RETRY;
170static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
171static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
172
173SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
174            "CAM Direct Access Disk driver");
175SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
176           &ada_retry_count, 0, "Normal I/O retry count");
177TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
178SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
179           &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
180TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
181SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
182           &ada_send_ordered, 0, "Send Ordered Tags");
183TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
184
185/*
186 * ADA_ORDEREDTAG_INTERVAL determines how often, relative
187 * to the default timeout, we check to see whether an ordered
188 * tagged transaction is appropriate to prevent simple tag
189 * starvation.  Since we'd like to ensure that there is at least
190 * 1/2 of the timeout length left for a starved transaction to
191 * complete after we've sent an ordered tag, we must poll at least
192 * four times in every timeout period.  This takes care of the worst
193 * case where a starved transaction starts during an interval that
194 * meets the requirement "don't send an ordered tag" test so it takes
195 * us two intervals to determine that a tag must be sent.
196 */
197#ifndef ADA_ORDEREDTAG_INTERVAL
198#define ADA_ORDEREDTAG_INTERVAL 4
199#endif
200
201static struct periph_driver adadriver =
202{
203	adainit, "ada",
204	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
205};
206
207PERIPHDRIVER_DECLARE(ada, adadriver);
208
209MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
210
211static int
212adaopen(struct disk *dp)
213{
214	struct cam_periph *periph;
215	struct ada_softc *softc;
216	int unit;
217	int error;
218
219	periph = (struct cam_periph *)dp->d_drv1;
220	if (periph == NULL) {
221		return (ENXIO);
222	}
223
224	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
225		return(ENXIO);
226	}
227
228	cam_periph_lock(periph);
229	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
230		cam_periph_unlock(periph);
231		cam_periph_release(periph);
232		return (error);
233	}
234
235	unit = periph->unit_number;
236	softc = (struct ada_softc *)periph->softc;
237	softc->flags |= ADA_FLAG_OPEN;
238
239	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
240	    ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
241	     unit));
242
243	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
244		/* Invalidate our pack information. */
245		softc->flags &= ~ADA_FLAG_PACK_INVALID;
246	}
247
248	cam_periph_unhold(periph);
249	cam_periph_unlock(periph);
250	return (0);
251}
252
253static int
254adaclose(struct disk *dp)
255{
256	struct	cam_periph *periph;
257	struct	ada_softc *softc;
258	union ccb *ccb;
259	int error;
260
261	periph = (struct cam_periph *)dp->d_drv1;
262	if (periph == NULL)
263		return (ENXIO);
264
265	cam_periph_lock(periph);
266	if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
267		cam_periph_unlock(periph);
268		cam_periph_release(periph);
269		return (error);
270	}
271
272	softc = (struct ada_softc *)periph->softc;
273	/* We only sync the cache if the drive is capable of it. */
274	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
275
276		ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL);
277		cam_fill_ataio(&ccb->ataio,
278				    1,
279				    adadone,
280				    CAM_DIR_NONE,
281				    0,
282				    NULL,
283				    0,
284				    ada_default_timeout*1000);
285
286		if (softc->flags & ADA_FLAG_CAN_48BIT)
287			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
288		else
289			ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
290		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
291		    /*sense_flags*/0, softc->disk->d_devstat);
292
293		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
294			xpt_print(periph->path, "Synchronize cache failed\n");
295
296		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
297			cam_release_devq(ccb->ccb_h.path,
298					 /*relsim_flags*/0,
299					 /*reduction*/0,
300					 /*timeout*/0,
301					 /*getcount_only*/0);
302		xpt_release_ccb(ccb);
303	}
304
305	softc->flags &= ~ADA_FLAG_OPEN;
306	cam_periph_unhold(periph);
307	cam_periph_unlock(periph);
308	cam_periph_release(periph);
309	return (0);
310}
311
312/*
313 * Actually translate the requested transfer into one the physical driver
314 * can understand.  The transfer is described by a buf and will include
315 * only one physical transfer.
316 */
317static void
318adastrategy(struct bio *bp)
319{
320	struct cam_periph *periph;
321	struct ada_softc *softc;
322
323	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
324	if (periph == NULL) {
325		biofinish(bp, NULL, ENXIO);
326		return;
327	}
328	softc = (struct ada_softc *)periph->softc;
329
330	cam_periph_lock(periph);
331
332	/*
333	 * If the device has been made invalid, error out
334	 */
335	if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
336		cam_periph_unlock(periph);
337		biofinish(bp, NULL, ENXIO);
338		return;
339	}
340
341	/*
342	 * Place it in the queue of disk activities for this disk
343	 */
344	bioq_disksort(&softc->bio_queue, bp);
345
346	/*
347	 * Schedule ourselves for performing the work.
348	 */
349	xpt_schedule(periph, CAM_PRIORITY_NORMAL);
350	cam_periph_unlock(periph);
351
352	return;
353}
354
355static int
356adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
357{
358	struct	    cam_periph *periph;
359	struct	    ada_softc *softc;
360	u_int	    secsize;
361	union	    ccb ccb;
362	struct	    disk *dp;
363	uint64_t    lba;
364	uint16_t    count;
365
366	dp = arg;
367	periph = dp->d_drv1;
368	if (periph == NULL)
369		return (ENXIO);
370	softc = (struct ada_softc *)periph->softc;
371	cam_periph_lock(periph);
372	secsize = softc->params.secsize;
373	lba = offset / secsize;
374	count = length / secsize;
375
376	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
377		cam_periph_unlock(periph);
378		return (ENXIO);
379	}
380
381	if (length > 0) {
382		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
383		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
384		cam_fill_ataio(&ccb.ataio,
385		    0,
386		    adadone,
387		    CAM_DIR_OUT,
388		    0,
389		    (u_int8_t *) virtual,
390		    length,
391		    ada_default_timeout*1000);
392		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
393		    (lba + count >= ATA_MAX_28BIT_LBA ||
394		    count >= 256)) {
395			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
396			    0, lba, count);
397		} else {
398			ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
399			    0, lba, count);
400		}
401		xpt_polled_action(&ccb);
402
403		if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
404			printf("Aborting dump due to I/O error.\n");
405			cam_periph_unlock(periph);
406			return(EIO);
407		}
408		cam_periph_unlock(periph);
409		return(0);
410	}
411
412	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
413		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
414
415		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
416		cam_fill_ataio(&ccb.ataio,
417				    1,
418				    adadone,
419				    CAM_DIR_NONE,
420				    0,
421				    NULL,
422				    0,
423				    ada_default_timeout*1000);
424
425		if (softc->flags & ADA_FLAG_CAN_48BIT)
426			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
427		else
428			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
429		xpt_polled_action(&ccb);
430
431		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
432			xpt_print(periph->path, "Synchronize cache failed\n");
433
434		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
435			cam_release_devq(ccb.ccb_h.path,
436					 /*relsim_flags*/0,
437					 /*reduction*/0,
438					 /*timeout*/0,
439					 /*getcount_only*/0);
440	}
441	cam_periph_unlock(periph);
442	return (0);
443}
444
445static void
446adainit(void)
447{
448	cam_status status;
449
450	/*
451	 * Install a global async callback.  This callback will
452	 * receive async callbacks like "new device found".
453	 */
454	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
455
456	if (status != CAM_REQ_CMP) {
457		printf("ada: Failed to attach master async callback "
458		       "due to status 0x%x!\n", status);
459	} else if (ada_send_ordered) {
460
461		/* Register our shutdown event handler */
462		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
463					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
464		    printf("adainit: shutdown event registration failed!\n");
465	}
466}
467
468static void
469adaoninvalidate(struct cam_periph *periph)
470{
471	struct ada_softc *softc;
472
473	softc = (struct ada_softc *)periph->softc;
474
475	/*
476	 * De-register any async callbacks.
477	 */
478	xpt_register_async(0, adaasync, periph, periph->path);
479
480	softc->flags |= ADA_FLAG_PACK_INVALID;
481
482	/*
483	 * Return all queued I/O with ENXIO.
484	 * XXX Handle any transactions queued to the card
485	 *     with XPT_ABORT_CCB.
486	 */
487	bioq_flush(&softc->bio_queue, NULL, ENXIO);
488
489	disk_gone(softc->disk);
490	xpt_print(periph->path, "lost device\n");
491}
492
493static void
494adacleanup(struct cam_periph *periph)
495{
496	struct ada_softc *softc;
497
498	softc = (struct ada_softc *)periph->softc;
499
500	xpt_print(periph->path, "removing device entry\n");
501	cam_periph_unlock(periph);
502
503	/*
504	 * If we can't free the sysctl tree, oh well...
505	 */
506	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
507	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
508		xpt_print(periph->path, "can't remove sysctl context\n");
509	}
510
511	disk_destroy(softc->disk);
512	callout_drain(&softc->sendordered_c);
513	free(softc, M_DEVBUF);
514	cam_periph_lock(periph);
515}
516
517static void
518adaasync(void *callback_arg, u_int32_t code,
519	struct cam_path *path, void *arg)
520{
521	struct cam_periph *periph;
522
523	periph = (struct cam_periph *)callback_arg;
524	switch (code) {
525	case AC_FOUND_DEVICE:
526	{
527		struct ccb_getdev *cgd;
528		cam_status status;
529
530		cgd = (struct ccb_getdev *)arg;
531		if (cgd == NULL)
532			break;
533
534		if (cgd->protocol != PROTO_ATA)
535			break;
536
537		/*
538		 * Allocate a peripheral instance for
539		 * this device and start the probe
540		 * process.
541		 */
542		status = cam_periph_alloc(adaregister, adaoninvalidate,
543					  adacleanup, adastart,
544					  "ada", CAM_PERIPH_BIO,
545					  cgd->ccb_h.path, adaasync,
546					  AC_FOUND_DEVICE, cgd);
547
548		if (status != CAM_REQ_CMP
549		 && status != CAM_REQ_INPROG)
550			printf("adaasync: Unable to attach to new device "
551				"due to status 0x%x\n", status);
552		break;
553	}
554	default:
555		cam_periph_async(periph, code, path, arg);
556		break;
557	}
558}
559
560static void
561adasysctlinit(void *context, int pending)
562{
563	struct cam_periph *periph;
564	struct ada_softc *softc;
565	char tmpstr[80], tmpstr2[80];
566
567	periph = (struct cam_periph *)context;
568	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
569		return;
570
571	softc = (struct ada_softc *)periph->softc;
572	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
573	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
574
575	sysctl_ctx_init(&softc->sysctl_ctx);
576	softc->flags |= ADA_FLAG_SCTX_INIT;
577	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
578		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
579		CTLFLAG_RD, 0, tmpstr);
580	if (softc->sysctl_tree == NULL) {
581		printf("adasysctlinit: unable to allocate sysctl tree\n");
582		cam_periph_release(periph);
583		return;
584	}
585
586	cam_periph_release(periph);
587}
588
589static cam_status
590adaregister(struct cam_periph *periph, void *arg)
591{
592	struct ada_softc *softc;
593	struct ccb_pathinq cpi;
594	struct ccb_getdev *cgd;
595	char   announce_buf[80];
596	struct disk_params *dp;
597	caddr_t match;
598	u_int maxio;
599
600	cgd = (struct ccb_getdev *)arg;
601	if (periph == NULL) {
602		printf("adaregister: periph was NULL!!\n");
603		return(CAM_REQ_CMP_ERR);
604	}
605
606	if (cgd == NULL) {
607		printf("adaregister: no getdev CCB, can't register device\n");
608		return(CAM_REQ_CMP_ERR);
609	}
610
611	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
612	    M_NOWAIT|M_ZERO);
613
614	if (softc == NULL) {
615		printf("adaregister: Unable to probe new device. "
616		    "Unable to allocate softc\n");
617		return(CAM_REQ_CMP_ERR);
618	}
619
620	bioq_init(&softc->bio_queue);
621
622	if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA)
623		softc->flags |= ADA_FLAG_CAN_DMA;
624	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
625		softc->flags |= ADA_FLAG_CAN_48BIT;
626	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
627		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
628	if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
629	    cgd->inq_flags & SID_CmdQue)
630		softc->flags |= ADA_FLAG_CAN_NCQ;
631	softc->state = ADA_STATE_NORMAL;
632
633	periph->softc = softc;
634
635	/*
636	 * See if this device has any quirks.
637	 */
638	match = cam_quirkmatch((caddr_t)&cgd->ident_data,
639			       (caddr_t)ada_quirk_table,
640			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
641			       sizeof(*ada_quirk_table), ata_identify_match);
642	if (match != NULL)
643		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
644	else
645		softc->quirks = ADA_Q_NONE;
646
647	/* Check if the SIM does not want queued commands */
648	bzero(&cpi, sizeof(cpi));
649	xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
650	cpi.ccb_h.func_code = XPT_PATH_INQ;
651	xpt_action((union ccb *)&cpi);
652	if (cpi.ccb_h.status != CAM_REQ_CMP ||
653	    (cpi.hba_inquiry & PI_TAG_ABLE) == 0)
654		softc->flags &= ~ADA_FLAG_CAN_NCQ;
655
656	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
657
658	/*
659	 * Register this media as a disk
660	 */
661	mtx_unlock(periph->sim->mtx);
662	adagetparams(periph, cgd);
663	softc->disk = disk_alloc();
664	softc->disk->d_open = adaopen;
665	softc->disk->d_close = adaclose;
666	softc->disk->d_strategy = adastrategy;
667	softc->disk->d_dump = adadump;
668	softc->disk->d_name = "ada";
669	softc->disk->d_drv1 = periph;
670	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
671	if (maxio == 0)
672		maxio = DFLTPHYS;	/* traditional default */
673	else if (maxio > MAXPHYS)
674		maxio = MAXPHYS;	/* for safety */
675	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
676		maxio = min(maxio, 65536 * softc->params.secsize);
677	else					/* 28bit ATA command limit */
678		maxio = min(maxio, 256 * softc->params.secsize);
679	softc->disk->d_maxsize = maxio;
680	softc->disk->d_unit = periph->unit_number;
681	softc->disk->d_flags = 0;
682	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
683		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
684	strlcpy(softc->disk->d_ident, cgd->serial_num,
685	    MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1));
686
687	softc->disk->d_sectorsize = softc->params.secsize;
688	softc->disk->d_mediasize = (off_t)softc->params.sectors *
689	    softc->params.secsize;
690	if (ata_physical_sector_size(&cgd->ident_data) !=
691	    softc->params.secsize) {
692		softc->disk->d_stripesize =
693		    ata_physical_sector_size(&cgd->ident_data);
694		softc->disk->d_stripeoffset = (softc->disk->d_stripesize -
695		    ata_logical_sector_offset(&cgd->ident_data)) %
696		    softc->disk->d_stripesize;
697	}
698	/* XXX: these are not actually "firmware" values, so they may be wrong */
699	softc->disk->d_fwsectors = softc->params.secs_per_track;
700	softc->disk->d_fwheads = softc->params.heads;
701
702	disk_create(softc->disk, DISK_VERSION);
703	mtx_lock(periph->sim->mtx);
704
705	dp = &softc->params;
706	snprintf(announce_buf, sizeof(announce_buf),
707		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
708		(uintmax_t)(((uintmax_t)dp->secsize *
709		dp->sectors) / (1024*1024)),
710		(uintmax_t)dp->sectors,
711		dp->secsize, dp->heads,
712		dp->secs_per_track, dp->cylinders);
713	xpt_announce_periph(periph, announce_buf);
714	/*
715	 * Add async callbacks for bus reset and
716	 * bus device reset calls.  I don't bother
717	 * checking if this fails as, in most cases,
718	 * the system will function just fine without
719	 * them and the only alternative would be to
720	 * not attach the device on failure.
721	 */
722	xpt_register_async(AC_LOST_DEVICE,
723			   adaasync, periph, periph->path);
724
725	/*
726	 * Schedule a periodic event to occasionally send an
727	 * ordered tag to a device.
728	 */
729	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
730	callout_reset(&softc->sendordered_c,
731	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
732	    adasendorderedtag, softc);
733
734	return(CAM_REQ_CMP);
735}
736
737static void
738adastart(struct cam_periph *periph, union ccb *start_ccb)
739{
740	struct ada_softc *softc = (struct ada_softc *)periph->softc;
741	struct ccb_ataio *ataio = &start_ccb->ataio;
742
743	switch (softc->state) {
744	case ADA_STATE_NORMAL:
745	{
746		/* Pull a buffer from the queue and get going on it */
747		struct bio *bp;
748
749		/*
750		 * See if there is a buf with work for us to do..
751		 */
752		bp = bioq_first(&softc->bio_queue);
753		if (periph->immediate_priority <= periph->pinfo.priority) {
754			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
755					("queuing for immediate ccb\n"));
756			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
757			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
758					  periph_links.sle);
759			periph->immediate_priority = CAM_PRIORITY_NONE;
760			wakeup(&periph->ccb_list);
761		} else if (bp == NULL) {
762			xpt_release_ccb(start_ccb);
763		} else {
764			u_int8_t tag_code;
765
766			bioq_remove(&softc->bio_queue, bp);
767
768			if ((softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
769				softc->flags &= ~ADA_FLAG_NEED_OTAG;
770				softc->ordered_tag_count++;
771				tag_code = 0;
772			} else {
773				tag_code = 1;
774			}
775			switch (bp->bio_cmd) {
776			case BIO_READ:
777			case BIO_WRITE:
778			{
779				uint64_t lba = bp->bio_pblkno;
780				uint16_t count = bp->bio_bcount / softc->params.secsize;
781
782				cam_fill_ataio(ataio,
783				    ada_retry_count,
784				    adadone,
785				    bp->bio_cmd == BIO_READ ?
786				        CAM_DIR_IN : CAM_DIR_OUT,
787				    tag_code,
788				    bp->bio_data,
789				    bp->bio_bcount,
790				    ada_default_timeout*1000);
791
792				if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) {
793					if (bp->bio_cmd == BIO_READ) {
794						ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
795						    lba, count);
796					} else {
797						ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
798						    lba, count);
799					}
800				} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
801				    (lba + count >= ATA_MAX_28BIT_LBA ||
802				    count > 256)) {
803					if (softc->flags & ADA_FLAG_CAN_DMA) {
804						if (bp->bio_cmd == BIO_READ) {
805							ata_48bit_cmd(ataio, ATA_READ_DMA48,
806							    0, lba, count);
807						} else {
808							ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
809							    0, lba, count);
810						}
811					} else {
812						if (bp->bio_cmd == BIO_READ) {
813							ata_48bit_cmd(ataio, ATA_READ_MUL48,
814							    0, lba, count);
815						} else {
816							ata_48bit_cmd(ataio, ATA_WRITE_MUL48,
817							    0, lba, count);
818						}
819					}
820				} else {
821					if (count == 256)
822						count = 0;
823					if (softc->flags & ADA_FLAG_CAN_DMA) {
824						if (bp->bio_cmd == BIO_READ) {
825							ata_28bit_cmd(ataio, ATA_READ_DMA,
826							    0, lba, count);
827						} else {
828							ata_28bit_cmd(ataio, ATA_WRITE_DMA,
829							    0, lba, count);
830						}
831					} else {
832						if (bp->bio_cmd == BIO_READ) {
833							ata_28bit_cmd(ataio, ATA_READ_MUL,
834							    0, lba, count);
835						} else {
836							ata_28bit_cmd(ataio, ATA_WRITE_MUL,
837							    0, lba, count);
838						}
839					}
840				}
841			}
842				break;
843			case BIO_FLUSH:
844				cam_fill_ataio(ataio,
845				    1,
846				    adadone,
847				    CAM_DIR_NONE,
848				    0,
849				    NULL,
850				    0,
851				    ada_default_timeout*1000);
852
853				if (softc->flags & ADA_FLAG_CAN_48BIT)
854					ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
855				else
856					ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
857				break;
858			}
859			start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
860			start_ccb->ccb_h.ccb_bp = bp;
861			softc->outstanding_cmds++;
862			xpt_action(start_ccb);
863			bp = bioq_first(&softc->bio_queue);
864		}
865
866		if (bp != NULL) {
867			/* Have more work to do, so ensure we stay scheduled */
868			xpt_schedule(periph, CAM_PRIORITY_NORMAL);
869		}
870		break;
871	}
872	}
873}
874
875static void
876adadone(struct cam_periph *periph, union ccb *done_ccb)
877{
878	struct ada_softc *softc;
879	struct ccb_ataio *ataio;
880
881	softc = (struct ada_softc *)periph->softc;
882	ataio = &done_ccb->ataio;
883	switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
884	case ADA_CCB_BUFFER_IO:
885	{
886		struct bio *bp;
887
888		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
889		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
890			int error;
891
892			error = adaerror(done_ccb, 0, 0);
893			if (error == ERESTART) {
894				/* A retry was scheduled, so just return. */
895				return;
896			}
897			if (error != 0) {
898				if (error == ENXIO) {
899					/*
900					 * Catastrophic error.  Mark our pack as
901					 * invalid.
902					 */
903					/*
904					 * XXX See if this is really a media
905					 * XXX change first?
906					 */
907					xpt_print(periph->path,
908					    "Invalidating pack\n");
909					softc->flags |= ADA_FLAG_PACK_INVALID;
910				}
911
912				/*
913				 * return all queued I/O with EIO, so that
914				 * the client can retry these I/Os in the
915				 * proper order should it attempt to recover.
916				 */
917				bioq_flush(&softc->bio_queue, NULL, EIO);
918				bp->bio_error = error;
919				bp->bio_resid = bp->bio_bcount;
920				bp->bio_flags |= BIO_ERROR;
921			} else {
922				bp->bio_resid = ataio->resid;
923				bp->bio_error = 0;
924				if (bp->bio_resid != 0)
925					bp->bio_flags |= BIO_ERROR;
926			}
927			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
928				cam_release_devq(done_ccb->ccb_h.path,
929						 /*relsim_flags*/0,
930						 /*reduction*/0,
931						 /*timeout*/0,
932						 /*getcount_only*/0);
933		} else {
934			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
935				panic("REQ_CMP with QFRZN");
936			bp->bio_resid = ataio->resid;
937			if (ataio->resid > 0)
938				bp->bio_flags |= BIO_ERROR;
939		}
940		softc->outstanding_cmds--;
941		if (softc->outstanding_cmds == 0)
942			softc->flags |= ADA_FLAG_WENT_IDLE;
943
944		biodone(bp);
945		break;
946	}
947	case ADA_CCB_WAITING:
948	{
949		/* Caller will release the CCB */
950		wakeup(&done_ccb->ccb_h.cbfcnp);
951		return;
952	}
953	case ADA_CCB_DUMP:
954		/* No-op.  We're polling */
955		return;
956	default:
957		break;
958	}
959	xpt_release_ccb(done_ccb);
960}
961
962static int
963adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
964{
965	struct ada_softc	  *softc;
966	struct cam_periph *periph;
967
968	periph = xpt_path_periph(ccb->ccb_h.path);
969	softc = (struct ada_softc *)periph->softc;
970
971	return(cam_periph_error(ccb, cam_flags, sense_flags,
972				&softc->saved_ccb));
973}
974
975static void
976adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd)
977{
978	struct ada_softc *softc = (struct ada_softc *)periph->softc;
979	struct disk_params *dp = &softc->params;
980	u_int64_t lbasize48;
981	u_int32_t lbasize;
982
983	dp->secsize = ata_logical_sector_size(&cgd->ident_data);
984	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
985		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
986		dp->heads = cgd->ident_data.current_heads;
987		dp->secs_per_track = cgd->ident_data.current_sectors;
988		dp->cylinders = cgd->ident_data.cylinders;
989		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
990			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
991	} else {
992		dp->heads = cgd->ident_data.heads;
993		dp->secs_per_track = cgd->ident_data.sectors;
994		dp->cylinders = cgd->ident_data.cylinders;
995		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
996	}
997	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
998		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
999
1000	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
1001	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1002		dp->sectors = lbasize;
1003
1004	/* use the 48bit LBA size if valid */
1005	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1006		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1007		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1008		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1009	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1010	    lbasize48 > ATA_MAX_28BIT_LBA)
1011		dp->sectors = lbasize48;
1012}
1013
1014static void
1015adasendorderedtag(void *arg)
1016{
1017	struct ada_softc *softc = arg;
1018
1019	if (ada_send_ordered) {
1020		if ((softc->ordered_tag_count == 0)
1021		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1022			softc->flags |= ADA_FLAG_NEED_OTAG;
1023		}
1024		if (softc->outstanding_cmds > 0)
1025			softc->flags &= ~ADA_FLAG_WENT_IDLE;
1026
1027		softc->ordered_tag_count = 0;
1028	}
1029	/* Queue us up again */
1030	callout_reset(&softc->sendordered_c,
1031	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1032	    adasendorderedtag, softc);
1033}
1034
1035/*
1036 * Step through all ADA peripheral drivers, and if the device is still open,
1037 * sync the disk cache to physical media.
1038 */
1039static void
1040adashutdown(void * arg, int howto)
1041{
1042	struct cam_periph *periph;
1043	struct ada_softc *softc;
1044
1045	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1046		union ccb ccb;
1047
1048		/* If we paniced with lock held - not recurse here. */
1049		if (cam_periph_owned(periph))
1050			continue;
1051		cam_periph_lock(periph);
1052		softc = (struct ada_softc *)periph->softc;
1053		/*
1054		 * We only sync the cache if the drive is still open, and
1055		 * if the drive is capable of it..
1056		 */
1057		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1058		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1059			cam_periph_unlock(periph);
1060			continue;
1061		}
1062
1063		xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL);
1064
1065		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1066		cam_fill_ataio(&ccb.ataio,
1067				    1,
1068				    adadone,
1069				    CAM_DIR_NONE,
1070				    0,
1071				    NULL,
1072				    0,
1073				    ada_default_timeout*1000);
1074
1075		if (softc->flags & ADA_FLAG_CAN_48BIT)
1076			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1077		else
1078			ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
1079		xpt_polled_action(&ccb);
1080
1081		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1082			xpt_print(periph->path, "Synchronize cache failed\n");
1083
1084		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1085			cam_release_devq(ccb.ccb_h.path,
1086					 /*relsim_flags*/0,
1087					 /*reduction*/0,
1088					 /*timeout*/0,
1089					 /*getcount_only*/0);
1090		cam_periph_unlock(periph);
1091	}
1092}
1093
1094#endif /* _KERNEL */
1095