ata_da.c revision 196657
1/*-
2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer,
10 *    without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/cam/ata/ata_da.c 196657 2009-08-30 15:36:56Z mav $");
29
30#include <sys/param.h>
31
32#ifdef _KERNEL
33#include <sys/systm.h>
34#include <sys/kernel.h>
35#include <sys/bio.h>
36#include <sys/sysctl.h>
37#include <sys/taskqueue.h>
38#include <sys/lock.h>
39#include <sys/mutex.h>
40#include <sys/conf.h>
41#include <sys/devicestat.h>
42#include <sys/eventhandler.h>
43#include <sys/malloc.h>
44#include <sys/cons.h>
45#include <geom/geom_disk.h>
46#endif /* _KERNEL */
47
48#ifndef _KERNEL
49#include <stdio.h>
50#include <string.h>
51#endif /* _KERNEL */
52
53#include <cam/cam.h>
54#include <cam/cam_ccb.h>
55#include <cam/cam_periph.h>
56#include <cam/cam_xpt_periph.h>
57#include <cam/cam_sim.h>
58
59#include <cam/ata/ata_all.h>
60
61#ifdef _KERNEL
62
63#define ATA_MAX_28BIT_LBA               268435455UL
64
65typedef enum {
66	ADA_STATE_NORMAL
67} ada_state;
68
69typedef enum {
70	ADA_FLAG_PACK_INVALID	= 0x001,
71	ADA_FLAG_CAN_48BIT	= 0x002,
72	ADA_FLAG_CAN_FLUSHCACHE	= 0x004,
73	ADA_FLAG_CAN_NCQ		= 0x008,
74	ADA_FLAG_TAGGED_QUEUING	= 0x010,
75	ADA_FLAG_NEED_OTAG	= 0x020,
76	ADA_FLAG_WENT_IDLE	= 0x040,
77	ADA_FLAG_RETRY_UA	= 0x080,
78	ADA_FLAG_OPEN		= 0x100,
79	ADA_FLAG_SCTX_INIT	= 0x200
80} ada_flags;
81
82typedef enum {
83	ADA_Q_NONE		= 0x00,
84	ADA_Q_NO_SYNC_CACHE	= 0x01,
85	ADA_Q_NO_6_BYTE		= 0x02,
86	ADA_Q_NO_PREVENT		= 0x04
87} ada_quirks;
88
89typedef enum {
90	ADA_CCB_PROBE		= 0x01,
91	ADA_CCB_PROBE2		= 0x02,
92	ADA_CCB_BUFFER_IO	= 0x03,
93	ADA_CCB_WAITING		= 0x04,
94	ADA_CCB_DUMP		= 0x05,
95	ADA_CCB_TYPE_MASK	= 0x0F,
96	ADA_CCB_RETRY_UA		= 0x10
97} ada_ccb_state;
98
99/* Offsets into our private area for storing information */
100#define ccb_state	ppriv_field0
101#define ccb_bp		ppriv_ptr1
102
103struct disk_params {
104	u_int8_t  heads;
105	u_int32_t cylinders;
106	u_int8_t  secs_per_track;
107	u_int32_t secsize;	/* Number of bytes/sector */
108	u_int64_t sectors;	/* total number sectors */
109};
110
111struct ada_softc {
112	struct	 bio_queue_head bio_queue;
113	SLIST_ENTRY(ada_softc) links;
114	LIST_HEAD(, ccb_hdr) pending_ccbs;
115	ada_state state;
116	ada_flags flags;
117	ada_quirks quirks;
118	int	 ordered_tag_count;
119	int	 outstanding_cmds;
120	struct	 disk_params params;
121	struct	 disk *disk;
122	union	 ccb saved_ccb;
123	struct task		sysctl_task;
124	struct sysctl_ctx_list	sysctl_ctx;
125	struct sysctl_oid	*sysctl_tree;
126	struct callout		sendordered_c;
127};
128
129struct ada_quirk_entry {
130	struct scsi_inquiry_pattern inq_pat;
131	ada_quirks quirks;
132};
133
134//static struct ada_quirk_entry ada_quirk_table[] =
135//{
136//};
137
138static	disk_strategy_t	adastrategy;
139static	dumper_t	adadump;
140static	periph_init_t	adainit;
141static	void		adaasync(void *callback_arg, u_int32_t code,
142				struct cam_path *path, void *arg);
143static	void		adasysctlinit(void *context, int pending);
144static	periph_ctor_t	adaregister;
145static	periph_dtor_t	adacleanup;
146static	periph_start_t	adastart;
147static	periph_oninv_t	adaoninvalidate;
148static	void		adadone(struct cam_periph *periph,
149			       union ccb *done_ccb);
150static  int		adaerror(union ccb *ccb, u_int32_t cam_flags,
151				u_int32_t sense_flags);
152static void		adasetgeom(struct cam_periph *periph,
153				struct ccb_getdev *cgd);
154static timeout_t	adasendorderedtag;
155static void		adashutdown(void *arg, int howto);
156
157#ifndef ADA_DEFAULT_TIMEOUT
158#define ADA_DEFAULT_TIMEOUT 30	/* Timeout in seconds */
159#endif
160
161#ifndef	ADA_DEFAULT_RETRY
162#define	ADA_DEFAULT_RETRY	4
163#endif
164
165#ifndef	ADA_DEFAULT_SEND_ORDERED
166#define	ADA_DEFAULT_SEND_ORDERED	1
167#endif
168
169
170static int ada_retry_count = ADA_DEFAULT_RETRY;
171static int ada_default_timeout = ADA_DEFAULT_TIMEOUT;
172static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED;
173
174SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0,
175            "CAM Direct Access Disk driver");
176SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW,
177           &ada_retry_count, 0, "Normal I/O retry count");
178TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count);
179SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW,
180           &ada_default_timeout, 0, "Normal I/O timeout (in seconds)");
181TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout);
182SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW,
183           &ada_send_ordered, 0, "Send Ordered Tags");
184TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered);
185
186/*
187 * ADA_ORDEREDTAG_INTERVAL determines how often, relative
188 * to the default timeout, we check to see whether an ordered
189 * tagged transaction is appropriate to prevent simple tag
190 * starvation.  Since we'd like to ensure that there is at least
191 * 1/2 of the timeout length left for a starved transaction to
192 * complete after we've sent an ordered tag, we must poll at least
193 * four times in every timeout period.  This takes care of the worst
194 * case where a starved transaction starts during an interval that
195 * meets the requirement "don't send an ordered tag" test so it takes
196 * us two intervals to determine that a tag must be sent.
197 */
198#ifndef ADA_ORDEREDTAG_INTERVAL
199#define ADA_ORDEREDTAG_INTERVAL 4
200#endif
201
202static struct periph_driver adadriver =
203{
204	adainit, "ada",
205	TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0
206};
207
208PERIPHDRIVER_DECLARE(ada, adadriver);
209
210MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers");
211
212static int
213adaopen(struct disk *dp)
214{
215	struct cam_periph *periph;
216	struct ada_softc *softc;
217	int unit;
218	int error;
219
220	periph = (struct cam_periph *)dp->d_drv1;
221	if (periph == NULL) {
222		return (ENXIO);
223	}
224
225	if (cam_periph_acquire(periph) != CAM_REQ_CMP) {
226		return(ENXIO);
227	}
228
229	cam_periph_lock(periph);
230	if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) {
231		cam_periph_unlock(periph);
232		cam_periph_release(periph);
233		return (error);
234	}
235
236	unit = periph->unit_number;
237	softc = (struct ada_softc *)periph->softc;
238	softc->flags |= ADA_FLAG_OPEN;
239
240	CAM_DEBUG(periph->path, CAM_DEBUG_TRACE,
241	    ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit,
242	     unit));
243
244	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
245		/* Invalidate our pack information. */
246		softc->flags &= ~ADA_FLAG_PACK_INVALID;
247	}
248
249	cam_periph_unhold(periph);
250	cam_periph_unlock(periph);
251	return (0);
252}
253
254static int
255adaclose(struct disk *dp)
256{
257	struct	cam_periph *periph;
258	struct	ada_softc *softc;
259	union ccb *ccb;
260	int error;
261
262	periph = (struct cam_periph *)dp->d_drv1;
263	if (periph == NULL)
264		return (ENXIO);
265
266	cam_periph_lock(periph);
267	if ((error = cam_periph_hold(periph, PRIBIO)) != 0) {
268		cam_periph_unlock(periph);
269		cam_periph_release(periph);
270		return (error);
271	}
272
273	softc = (struct ada_softc *)periph->softc;
274	/* We only sync the cache if the drive is capable of it. */
275	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
276
277		ccb = cam_periph_getccb(periph, /*priority*/1);
278		cam_fill_ataio(&ccb->ataio,
279				    1,
280				    adadone,
281				    CAM_DIR_NONE,
282				    0,
283				    NULL,
284				    0,
285				    ada_default_timeout*1000);
286
287		if (softc->flags & ADA_FLAG_CAN_48BIT)
288			ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0);
289		else
290			ata_36bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0);
291		cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0,
292		    /*sense_flags*/SF_RETRY_UA,
293		    softc->disk->d_devstat);
294
295		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
296			xpt_print(periph->path, "Synchronize cache failed\n");
297
298		if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
299			cam_release_devq(ccb->ccb_h.path,
300					 /*relsim_flags*/0,
301					 /*reduction*/0,
302					 /*timeout*/0,
303					 /*getcount_only*/0);
304		xpt_release_ccb(ccb);
305	}
306
307	softc->flags &= ~ADA_FLAG_OPEN;
308	cam_periph_unhold(periph);
309	cam_periph_unlock(periph);
310	cam_periph_release(periph);
311	return (0);
312}
313
314/*
315 * Actually translate the requested transfer into one the physical driver
316 * can understand.  The transfer is described by a buf and will include
317 * only one physical transfer.
318 */
319static void
320adastrategy(struct bio *bp)
321{
322	struct cam_periph *periph;
323	struct ada_softc *softc;
324
325	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
326	if (periph == NULL) {
327		biofinish(bp, NULL, ENXIO);
328		return;
329	}
330	softc = (struct ada_softc *)periph->softc;
331
332	cam_periph_lock(periph);
333
334#if 0
335	/*
336	 * check it's not too big a transfer for our adapter
337	 */
338	scsi_minphys(bp,&sd_switch);
339#endif
340
341	/*
342	 * Mask interrupts so that the pack cannot be invalidated until
343	 * after we are in the queue.  Otherwise, we might not properly
344	 * clean up one of the buffers.
345	 */
346
347	/*
348	 * If the device has been made invalid, error out
349	 */
350	if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
351		cam_periph_unlock(periph);
352		biofinish(bp, NULL, ENXIO);
353		return;
354	}
355
356	/*
357	 * Place it in the queue of disk activities for this disk
358	 */
359	bioq_disksort(&softc->bio_queue, bp);
360
361	/*
362	 * Schedule ourselves for performing the work.
363	 */
364	xpt_schedule(periph, /* XXX priority */1);
365	cam_periph_unlock(periph);
366
367	return;
368}
369
370static int
371adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length)
372{
373	struct	    cam_periph *periph;
374	struct	    ada_softc *softc;
375	u_int	    secsize;
376	union	    ccb ccb;
377	struct	    disk *dp;
378	uint64_t    lba;
379	uint16_t    count;
380
381	dp = arg;
382	periph = dp->d_drv1;
383	if (periph == NULL)
384		return (ENXIO);
385	softc = (struct ada_softc *)periph->softc;
386	cam_periph_lock(periph);
387	secsize = softc->params.secsize;
388	lba = offset / secsize;
389	count = length / secsize;
390
391	if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) {
392		cam_periph_unlock(periph);
393		return (ENXIO);
394	}
395
396	if (length > 0) {
397		periph->flags |= CAM_PERIPH_POLLED;
398		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
399		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
400		cam_fill_ataio(&ccb.ataio,
401		    0,
402		    adadone,
403		    CAM_DIR_OUT,
404		    0,
405		    (u_int8_t *) virtual,
406		    length,
407		    ada_default_timeout*1000);
408		if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
409		    (lba + count >= ATA_MAX_28BIT_LBA ||
410		    count >= 256)) {
411			ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48,
412			    0, lba, count);
413		} else {
414			ata_36bit_cmd(&ccb.ataio, ATA_WRITE_DMA,
415			    0, lba, count);
416		}
417		xpt_polled_action(&ccb);
418
419		if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
420			printf("Aborting dump due to I/O error.\n");
421			cam_periph_unlock(periph);
422			return(EIO);
423		}
424		cam_periph_unlock(periph);
425		return(0);
426	}
427
428	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) {
429		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
430
431		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
432		cam_fill_ataio(&ccb.ataio,
433				    1,
434				    adadone,
435				    CAM_DIR_NONE,
436				    0,
437				    NULL,
438				    0,
439				    ada_default_timeout*1000);
440
441		if (softc->flags & ADA_FLAG_CAN_48BIT)
442			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
443		else
444			ata_36bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
445		xpt_polled_action(&ccb);
446
447		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
448			xpt_print(periph->path, "Synchronize cache failed\n");
449
450		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
451			cam_release_devq(ccb.ccb_h.path,
452					 /*relsim_flags*/0,
453					 /*reduction*/0,
454					 /*timeout*/0,
455					 /*getcount_only*/0);
456	}
457	periph->flags &= ~CAM_PERIPH_POLLED;
458	cam_periph_unlock(periph);
459	return (0);
460}
461
462static void
463adainit(void)
464{
465	cam_status status;
466
467	/*
468	 * Install a global async callback.  This callback will
469	 * receive async callbacks like "new device found".
470	 */
471	status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL);
472
473	if (status != CAM_REQ_CMP) {
474		printf("ada: Failed to attach master async callback "
475		       "due to status 0x%x!\n", status);
476	} else if (ada_send_ordered) {
477
478		/* Register our shutdown event handler */
479		if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown,
480					   NULL, SHUTDOWN_PRI_DEFAULT)) == NULL)
481		    printf("adainit: shutdown event registration failed!\n");
482	}
483}
484
485static void
486adaoninvalidate(struct cam_periph *periph)
487{
488	struct ada_softc *softc;
489
490	softc = (struct ada_softc *)periph->softc;
491
492	/*
493	 * De-register any async callbacks.
494	 */
495	xpt_register_async(0, adaasync, periph, periph->path);
496
497	softc->flags |= ADA_FLAG_PACK_INVALID;
498
499	/*
500	 * Return all queued I/O with ENXIO.
501	 * XXX Handle any transactions queued to the card
502	 *     with XPT_ABORT_CCB.
503	 */
504	bioq_flush(&softc->bio_queue, NULL, ENXIO);
505
506	disk_gone(softc->disk);
507	xpt_print(periph->path, "lost device\n");
508}
509
510static void
511adacleanup(struct cam_periph *periph)
512{
513	struct ada_softc *softc;
514
515	softc = (struct ada_softc *)periph->softc;
516
517	xpt_print(periph->path, "removing device entry\n");
518	cam_periph_unlock(periph);
519
520	/*
521	 * If we can't free the sysctl tree, oh well...
522	 */
523	if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0
524	    && sysctl_ctx_free(&softc->sysctl_ctx) != 0) {
525		xpt_print(periph->path, "can't remove sysctl context\n");
526	}
527
528	disk_destroy(softc->disk);
529	callout_drain(&softc->sendordered_c);
530	free(softc, M_DEVBUF);
531	cam_periph_lock(periph);
532}
533
534static void
535adaasync(void *callback_arg, u_int32_t code,
536	struct cam_path *path, void *arg)
537{
538	struct cam_periph *periph;
539
540	periph = (struct cam_periph *)callback_arg;
541	switch (code) {
542	case AC_FOUND_DEVICE:
543	{
544		struct ccb_getdev *cgd;
545		cam_status status;
546
547		cgd = (struct ccb_getdev *)arg;
548		if (cgd == NULL)
549			break;
550
551		if (cgd->protocol != PROTO_ATA)
552			break;
553
554//		if (SID_TYPE(&cgd->inq_data) != T_DIRECT
555//		    && SID_TYPE(&cgd->inq_data) != T_RBC
556//		    && SID_TYPE(&cgd->inq_data) != T_OPTICAL)
557//			break;
558
559		/*
560		 * Allocate a peripheral instance for
561		 * this device and start the probe
562		 * process.
563		 */
564		status = cam_periph_alloc(adaregister, adaoninvalidate,
565					  adacleanup, adastart,
566					  "ada", CAM_PERIPH_BIO,
567					  cgd->ccb_h.path, adaasync,
568					  AC_FOUND_DEVICE, cgd);
569
570		if (status != CAM_REQ_CMP
571		 && status != CAM_REQ_INPROG)
572			printf("adaasync: Unable to attach to new device "
573				"due to status 0x%x\n", status);
574		break;
575	}
576	case AC_SENT_BDR:
577	case AC_BUS_RESET:
578	{
579		struct ada_softc *softc;
580		struct ccb_hdr *ccbh;
581
582		softc = (struct ada_softc *)periph->softc;
583		/*
584		 * Don't fail on the expected unit attention
585		 * that will occur.
586		 */
587		softc->flags |= ADA_FLAG_RETRY_UA;
588		LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le)
589			ccbh->ccb_state |= ADA_CCB_RETRY_UA;
590		/* FALLTHROUGH*/
591	}
592	default:
593		cam_periph_async(periph, code, path, arg);
594		break;
595	}
596}
597
598static void
599adasysctlinit(void *context, int pending)
600{
601	struct cam_periph *periph;
602	struct ada_softc *softc;
603	char tmpstr[80], tmpstr2[80];
604
605	periph = (struct cam_periph *)context;
606	if (cam_periph_acquire(periph) != CAM_REQ_CMP)
607		return;
608
609	softc = (struct ada_softc *)periph->softc;
610	snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number);
611	snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number);
612
613	sysctl_ctx_init(&softc->sysctl_ctx);
614	softc->flags |= ADA_FLAG_SCTX_INIT;
615	softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx,
616		SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2,
617		CTLFLAG_RD, 0, tmpstr);
618	if (softc->sysctl_tree == NULL) {
619		printf("adasysctlinit: unable to allocate sysctl tree\n");
620		cam_periph_release(periph);
621		return;
622	}
623
624	cam_periph_release(periph);
625}
626
627static cam_status
628adaregister(struct cam_periph *periph, void *arg)
629{
630	struct ada_softc *softc;
631	struct ccb_pathinq cpi;
632	struct ccb_getdev *cgd;
633	char   announce_buf[80];
634	struct disk_params *dp;
635	caddr_t match;
636	u_int maxio;
637
638	cgd = (struct ccb_getdev *)arg;
639	if (periph == NULL) {
640		printf("adaregister: periph was NULL!!\n");
641		return(CAM_REQ_CMP_ERR);
642	}
643
644	if (cgd == NULL) {
645		printf("adaregister: no getdev CCB, can't register device\n");
646		return(CAM_REQ_CMP_ERR);
647	}
648
649	softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF,
650	    M_NOWAIT|M_ZERO);
651
652	if (softc == NULL) {
653		printf("adaregister: Unable to probe new device. "
654		       "Unable to allocate softc\n");
655		return(CAM_REQ_CMP_ERR);
656	}
657
658	LIST_INIT(&softc->pending_ccbs);
659	softc->state = ADA_STATE_NORMAL;
660	bioq_init(&softc->bio_queue);
661
662	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
663		softc->flags |= ADA_FLAG_CAN_48BIT;
664	if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE)
665		softc->flags |= ADA_FLAG_CAN_FLUSHCACHE;
666	if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ &&
667	    cgd->ident_data.queue >= 31)
668		softc->flags |= ADA_FLAG_CAN_NCQ;
669//	if ((cgd->inq_data.flags & SID_CmdQue) != 0)
670//		softc->flags |= ADA_FLAG_TAGGED_QUEUING;
671
672	periph->softc = softc;
673
674	/*
675	 * See if this device has any quirks.
676	 */
677//	match = cam_quirkmatch((caddr_t)&cgd->inq_data,
678//			       (caddr_t)ada_quirk_table,
679//			       sizeof(ada_quirk_table)/sizeof(*ada_quirk_table),
680//			       sizeof(*ada_quirk_table), scsi_inquiry_match);
681	match = NULL;
682
683	if (match != NULL)
684		softc->quirks = ((struct ada_quirk_entry *)match)->quirks;
685	else
686		softc->quirks = ADA_Q_NONE;
687
688	/* Check if the SIM does not want queued commands */
689	bzero(&cpi, sizeof(cpi));
690	xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
691	cpi.ccb_h.func_code = XPT_PATH_INQ;
692	xpt_action((union ccb *)&cpi);
693	if (cpi.ccb_h.status != CAM_REQ_CMP ||
694	    (cpi.hba_inquiry & PI_TAG_ABLE) == 0)
695		softc->flags &= ~ADA_FLAG_CAN_NCQ;
696
697	TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph);
698
699	/*
700	 * Register this media as a disk
701	 */
702	mtx_unlock(periph->sim->mtx);
703	softc->disk = disk_alloc();
704	softc->disk->d_open = adaopen;
705	softc->disk->d_close = adaclose;
706	softc->disk->d_strategy = adastrategy;
707	softc->disk->d_dump = adadump;
708	softc->disk->d_name = "ada";
709	softc->disk->d_drv1 = periph;
710	maxio = cpi.maxio;		/* Honor max I/O size of SIM */
711	if (maxio == 0)
712		maxio = DFLTPHYS;	/* traditional default */
713	else if (maxio > MAXPHYS)
714		maxio = MAXPHYS;	/* for safety */
715	if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48)
716		maxio = min(maxio, 65535 * 512);
717	else					/* 28bit ATA command limit */
718		maxio = min(maxio, 255 * 512);
719	softc->disk->d_maxsize = maxio;
720	softc->disk->d_unit = periph->unit_number;
721	softc->disk->d_flags = 0;
722	if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE)
723		softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
724
725	adasetgeom(periph, cgd);
726	softc->disk->d_sectorsize = softc->params.secsize;
727	softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors;
728	/* XXX: these are not actually "firmware" values, so they may be wrong */
729	softc->disk->d_fwsectors = softc->params.secs_per_track;
730	softc->disk->d_fwheads = softc->params.heads;
731//	softc->disk->d_devstat->block_size = softc->params.secsize;
732//	softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE;
733
734	disk_create(softc->disk, DISK_VERSION);
735	mtx_lock(periph->sim->mtx);
736
737	dp = &softc->params;
738	snprintf(announce_buf, sizeof(announce_buf),
739		"%juMB (%ju %u byte sectors: %dH %dS/T %dC)",
740		(uintmax_t)(((uintmax_t)dp->secsize *
741		dp->sectors) / (1024*1024)),
742		(uintmax_t)dp->sectors,
743		dp->secsize, dp->heads,
744		dp->secs_per_track, dp->cylinders);
745	xpt_announce_periph(periph, announce_buf);
746	if (softc->flags & ADA_FLAG_CAN_NCQ) {
747		printf("%s%d: Native Command Queueing enabled\n",
748		       periph->periph_name, periph->unit_number);
749	}
750
751	/*
752	 * Add async callbacks for bus reset and
753	 * bus device reset calls.  I don't bother
754	 * checking if this fails as, in most cases,
755	 * the system will function just fine without
756	 * them and the only alternative would be to
757	 * not attach the device on failure.
758	 */
759	xpt_register_async(AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE,
760			   adaasync, periph, periph->path);
761
762	/*
763	 * Take an exclusive refcount on the periph while adastart is called
764	 * to finish the probe.  The reference will be dropped in adadone at
765	 * the end of probe.
766	 */
767//	(void)cam_periph_hold(periph, PRIBIO);
768//	xpt_schedule(periph, /*priority*/5);
769
770	/*
771	 * Schedule a periodic event to occasionally send an
772	 * ordered tag to a device.
773	 */
774	callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0);
775	callout_reset(&softc->sendordered_c,
776	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
777	    adasendorderedtag, softc);
778
779	return(CAM_REQ_CMP);
780}
781
782static void
783adastart(struct cam_periph *periph, union ccb *start_ccb)
784{
785	struct ada_softc *softc;
786
787	softc = (struct ada_softc *)periph->softc;
788
789	switch (softc->state) {
790	case ADA_STATE_NORMAL:
791	{
792		/* Pull a buffer from the queue and get going on it */
793		struct bio *bp;
794
795		/*
796		 * See if there is a buf with work for us to do..
797		 */
798		bp = bioq_first(&softc->bio_queue);
799		if (periph->immediate_priority <= periph->pinfo.priority) {
800			CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE,
801					("queuing for immediate ccb\n"));
802			start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING;
803			SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h,
804					  periph_links.sle);
805			periph->immediate_priority = CAM_PRIORITY_NONE;
806			wakeup(&periph->ccb_list);
807		} else if (bp == NULL) {
808			xpt_release_ccb(start_ccb);
809		} else {
810			struct ccb_ataio *ataio = &start_ccb->ataio;
811			u_int8_t tag_code;
812
813			bioq_remove(&softc->bio_queue, bp);
814
815			if ((softc->flags & ADA_FLAG_NEED_OTAG) != 0) {
816				softc->flags &= ~ADA_FLAG_NEED_OTAG;
817				softc->ordered_tag_count++;
818				tag_code = 0;//MSG_ORDERED_Q_TAG;
819			} else {
820				tag_code = 0;//MSG_SIMPLE_Q_TAG;
821			}
822			switch (bp->bio_cmd) {
823			case BIO_READ:
824			case BIO_WRITE:
825			{
826				uint64_t lba = bp->bio_pblkno;
827				uint16_t count = bp->bio_bcount / softc->params.secsize;
828
829				cam_fill_ataio(ataio,
830				    ada_retry_count,
831				    adadone,
832				    bp->bio_cmd == BIO_READ ?
833				        CAM_DIR_IN : CAM_DIR_OUT,
834				    tag_code,
835				    bp->bio_data,
836				    bp->bio_bcount,
837				    ada_default_timeout*1000);
838
839				if (softc->flags & ADA_FLAG_CAN_NCQ) {
840					if (bp->bio_cmd == BIO_READ) {
841						ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED,
842						    lba, count);
843					} else {
844						ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED,
845						    lba, count);
846					}
847				} else if ((softc->flags & ADA_FLAG_CAN_48BIT) &&
848				    (lba + count >= ATA_MAX_28BIT_LBA ||
849				    count >= 256)) {
850					if (bp->bio_cmd == BIO_READ) {
851						ata_48bit_cmd(ataio, ATA_READ_DMA48,
852						    0, lba, count);
853					} else {
854						ata_48bit_cmd(ataio, ATA_WRITE_DMA48,
855						    0, lba, count);
856					}
857				} else {
858					if (bp->bio_cmd == BIO_READ) {
859						ata_36bit_cmd(ataio, ATA_READ_DMA,
860						    0, lba, count);
861					} else {
862						ata_36bit_cmd(ataio, ATA_WRITE_DMA,
863						    0, lba, count);
864					}
865				}
866			}
867				break;
868			case BIO_FLUSH:
869				cam_fill_ataio(ataio,
870				    1,
871				    adadone,
872				    CAM_DIR_NONE,
873				    tag_code,
874				    NULL,
875				    0,
876				    ada_default_timeout*1000);
877
878				if (softc->flags & ADA_FLAG_CAN_48BIT)
879					ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0);
880				else
881					ata_36bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0);
882				break;
883			}
884			start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO;
885
886			/*
887			 * Block out any asyncronous callbacks
888			 * while we touch the pending ccb list.
889			 */
890			LIST_INSERT_HEAD(&softc->pending_ccbs,
891					 &start_ccb->ccb_h, periph_links.le);
892			softc->outstanding_cmds++;
893
894			/* We expect a unit attention from this device */
895			if ((softc->flags & ADA_FLAG_RETRY_UA) != 0) {
896				start_ccb->ccb_h.ccb_state |= ADA_CCB_RETRY_UA;
897				softc->flags &= ~ADA_FLAG_RETRY_UA;
898			}
899
900			start_ccb->ccb_h.ccb_bp = bp;
901			bp = bioq_first(&softc->bio_queue);
902
903			xpt_action(start_ccb);
904		}
905
906		if (bp != NULL) {
907			/* Have more work to do, so ensure we stay scheduled */
908			xpt_schedule(periph, /* XXX priority */1);
909		}
910		break;
911	}
912	}
913}
914
915static void
916adadone(struct cam_periph *periph, union ccb *done_ccb)
917{
918	struct ada_softc *softc;
919	struct ccb_ataio *ataio;
920
921	softc = (struct ada_softc *)periph->softc;
922	ataio = &done_ccb->ataio;
923	switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) {
924	case ADA_CCB_BUFFER_IO:
925	{
926		struct bio *bp;
927
928		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
929		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
930			int error;
931
932			error = adaerror(done_ccb, CAM_RETRY_SELTO, 0);
933			if (error == ERESTART) {
934				/*
935				 * A retry was scheuled, so
936				 * just return.
937				 */
938				return;
939			}
940			if (error != 0) {
941
942				if (error == ENXIO) {
943					/*
944					 * Catastrophic error.  Mark our pack as
945					 * invalid.
946					 */
947					/*
948					 * XXX See if this is really a media
949					 * XXX change first?
950					 */
951					xpt_print(periph->path,
952					    "Invalidating pack\n");
953					softc->flags |= ADA_FLAG_PACK_INVALID;
954				}
955
956				/*
957				 * return all queued I/O with EIO, so that
958				 * the client can retry these I/Os in the
959				 * proper order should it attempt to recover.
960				 */
961				bioq_flush(&softc->bio_queue, NULL, EIO);
962				bp->bio_error = error;
963				bp->bio_resid = bp->bio_bcount;
964				bp->bio_flags |= BIO_ERROR;
965			} else {
966				bp->bio_resid = ataio->resid;
967				bp->bio_error = 0;
968				if (bp->bio_resid != 0)
969					bp->bio_flags |= BIO_ERROR;
970			}
971			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
972				cam_release_devq(done_ccb->ccb_h.path,
973						 /*relsim_flags*/0,
974						 /*reduction*/0,
975						 /*timeout*/0,
976						 /*getcount_only*/0);
977		} else {
978			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
979				panic("REQ_CMP with QFRZN");
980			bp->bio_resid = ataio->resid;
981			if (ataio->resid > 0)
982				bp->bio_flags |= BIO_ERROR;
983		}
984
985		/*
986		 * Block out any asyncronous callbacks
987		 * while we touch the pending ccb list.
988		 */
989		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);
990		softc->outstanding_cmds--;
991		if (softc->outstanding_cmds == 0)
992			softc->flags |= ADA_FLAG_WENT_IDLE;
993
994		biodone(bp);
995		break;
996	}
997	case ADA_CCB_WAITING:
998	{
999		/* Caller will release the CCB */
1000		wakeup(&done_ccb->ccb_h.cbfcnp);
1001		return;
1002	}
1003	case ADA_CCB_DUMP:
1004		/* No-op.  We're polling */
1005		return;
1006	default:
1007		break;
1008	}
1009	xpt_release_ccb(done_ccb);
1010}
1011
1012static int
1013adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags)
1014{
1015	struct ada_softc	  *softc;
1016	struct cam_periph *periph;
1017
1018	periph = xpt_path_periph(ccb->ccb_h.path);
1019	softc = (struct ada_softc *)periph->softc;
1020
1021	return(cam_periph_error(ccb, cam_flags, sense_flags,
1022				&softc->saved_ccb));
1023}
1024
1025static void
1026adasetgeom(struct cam_periph *periph, struct ccb_getdev *cgd)
1027{
1028	struct ada_softc *softc = (struct ada_softc *)periph->softc;
1029	struct disk_params *dp = &softc->params;
1030	u_int64_t lbasize48;
1031	u_int32_t lbasize;
1032
1033	dp->secsize = 512;
1034	if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) &&
1035		cgd->ident_data.current_heads && cgd->ident_data.current_sectors) {
1036		dp->heads = cgd->ident_data.current_heads;
1037		dp->secs_per_track = cgd->ident_data.current_sectors;
1038		dp->cylinders = cgd->ident_data.cylinders;
1039		dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 |
1040			  ((u_int32_t)cgd->ident_data.current_size_2 << 16);
1041	} else {
1042		dp->heads = cgd->ident_data.heads;
1043		dp->secs_per_track = cgd->ident_data.sectors;
1044		dp->cylinders = cgd->ident_data.cylinders;
1045		dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track;
1046	}
1047	lbasize = (u_int32_t)cgd->ident_data.lba_size_1 |
1048		  ((u_int32_t)cgd->ident_data.lba_size_2 << 16);
1049
1050    /* does this device need oldstyle CHS addressing */
1051//    if (!ad_version(cgd->ident_data.version_major) || !lbasize)
1052//	atadev->flags |= ATA_D_USE_CHS;
1053
1054	/* use the 28bit LBA size if valid or bigger than the CHS mapping */
1055	if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize)
1056		dp->sectors = lbasize;
1057
1058	/* use the 48bit LBA size if valid */
1059	lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) |
1060		    ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) |
1061		    ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) |
1062		    ((u_int64_t)cgd->ident_data.lba_size48_4 << 48);
1063	if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) &&
1064	    lbasize48 > ATA_MAX_28BIT_LBA)
1065		dp->sectors = lbasize48;
1066}
1067
1068static void
1069adasendorderedtag(void *arg)
1070{
1071	struct ada_softc *softc = arg;
1072
1073	if (ada_send_ordered) {
1074		if ((softc->ordered_tag_count == 0)
1075		 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) {
1076			softc->flags |= ADA_FLAG_NEED_OTAG;
1077		}
1078		if (softc->outstanding_cmds > 0)
1079			softc->flags &= ~ADA_FLAG_WENT_IDLE;
1080
1081		softc->ordered_tag_count = 0;
1082	}
1083	/* Queue us up again */
1084	callout_reset(&softc->sendordered_c,
1085	    (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL,
1086	    adasendorderedtag, softc);
1087}
1088
1089/*
1090 * Step through all ADA peripheral drivers, and if the device is still open,
1091 * sync the disk cache to physical media.
1092 */
1093static void
1094adashutdown(void * arg, int howto)
1095{
1096	struct cam_periph *periph;
1097	struct ada_softc *softc;
1098
1099	TAILQ_FOREACH(periph, &adadriver.units, unit_links) {
1100		union ccb ccb;
1101
1102		cam_periph_lock(periph);
1103		softc = (struct ada_softc *)periph->softc;
1104		/*
1105		 * We only sync the cache if the drive is still open, and
1106		 * if the drive is capable of it..
1107		 */
1108		if (((softc->flags & ADA_FLAG_OPEN) == 0) ||
1109		    (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) {
1110			cam_periph_unlock(periph);
1111			continue;
1112		}
1113
1114		xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1);
1115
1116		ccb.ccb_h.ccb_state = ADA_CCB_DUMP;
1117		cam_fill_ataio(&ccb.ataio,
1118				    1,
1119				    adadone,
1120				    CAM_DIR_NONE,
1121				    0,
1122				    NULL,
1123				    0,
1124				    ada_default_timeout*1000);
1125
1126		if (softc->flags & ADA_FLAG_CAN_48BIT)
1127			ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0);
1128		else
1129			ata_36bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0);
1130		xpt_polled_action(&ccb);
1131
1132		if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1133			xpt_print(periph->path, "Synchronize cache failed\n");
1134
1135		if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0)
1136			cam_release_devq(ccb.ccb_h.path,
1137					 /*relsim_flags*/0,
1138					 /*reduction*/0,
1139					 /*timeout*/0,
1140					 /*getcount_only*/0);
1141		cam_periph_unlock(periph);
1142	}
1143}
1144
1145#endif /* _KERNEL */
1146