dad.c revision 8863:94039d51dda4
1219019Sgabor/*
2219019Sgabor * CDDL HEADER START
3219019Sgabor *
4219019Sgabor * The contents of this file are subject to the terms of the
5219019Sgabor * Common Development and Distribution License (the "License").
6219019Sgabor * You may not use this file except in compliance with the License.
7219019Sgabor *
8219019Sgabor * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9219019Sgabor * or http://www.opensolaris.org/os/licensing.
10219019Sgabor * See the License for the specific language governing permissions
11219019Sgabor * and limitations under the License.
12219019Sgabor *
13219019Sgabor * When distributing Covered Code, include this CDDL HEADER in each
14219019Sgabor * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15219019Sgabor * If applicable, add the following below this CDDL HEADER, with the
16219019Sgabor * fields enclosed by brackets "[]" replaced with your own identifying
17219019Sgabor * information: Portions Copyright [yyyy] [name of copyright owner]
18219019Sgabor *
19219019Sgabor * CDDL HEADER END
20219019Sgabor */
21219019Sgabor
22219019Sgabor/*
23219019Sgabor * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24219019Sgabor * Use is subject to license terms.
25219019Sgabor */
26219019Sgabor
27219019Sgabor
28219019Sgabor/*
29219019Sgabor * Direct Attached  disk driver for SPARC machines.
30219019Sgabor */
31219019Sgabor
32219019Sgabor/*
33219019Sgabor * Includes, Declarations and Local Data
34219019Sgabor */
35219019Sgabor#include <sys/dada/dada.h>
36219019Sgabor#include <sys/dkbad.h>
37219019Sgabor#include <sys/dklabel.h>
38219019Sgabor#include <sys/dkio.h>
39219019Sgabor#include <sys/cdio.h>
40219019Sgabor#include <sys/vtoc.h>
41219019Sgabor#include <sys/dada/targets/daddef.h>
42219019Sgabor#include <sys/dada/targets/dadpriv.h>
43219019Sgabor#include <sys/file.h>
44219019Sgabor#include <sys/stat.h>
45219019Sgabor#include <sys/kstat.h>
46219019Sgabor#include <sys/vtrace.h>
47219019Sgabor#include <sys/aio_req.h>
48219019Sgabor#include <sys/note.h>
49219019Sgabor#include <sys/cmlb.h>
50219019Sgabor
51219019Sgabor/*
52219019Sgabor * Global Error Levels for Error Reporting
53219019Sgabor */
54219019Sgaborint dcd_error_level	= DCD_ERR_RETRYABLE;
55219019Sgabor/*
56219019Sgabor * Local Static Data
57219019Sgabor */
58219019Sgabor
59219019Sgaborstatic int dcd_io_time		= DCD_IO_TIME;
60219019Sgaborstatic int dcd_retry_count	= DCD_RETRY_COUNT;
61219019Sgabor#ifndef lint
62219019Sgaborstatic int dcd_report_pfa = 1;
63219019Sgabor#endif
64219019Sgaborstatic int dcd_rot_delay = 4;
65219019Sgaborstatic int dcd_poll_busycnt = DCD_POLL_TIMEOUT;
66219019Sgabor
67219019Sgabor/*
68219019Sgabor * Local Function Prototypes
69219019Sgabor */
70219019Sgabor
71219019Sgaborstatic int dcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p);
72219019Sgaborstatic int dcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p);
73219019Sgaborstatic int dcdstrategy(struct buf *bp);
74219019Sgaborstatic int dcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk);
75219019Sgaborstatic int dcdioctl(dev_t, int, intptr_t, int, cred_t *, int *);
76219019Sgaborstatic int dcdread(dev_t dev, struct uio *uio, cred_t *cred_p);
77219019Sgaborstatic int dcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p);
78219019Sgaborstatic int dcd_prop_op(dev_t, dev_info_t *, ddi_prop_op_t, int,
79219019Sgabor    char *, caddr_t, int *);
80219019Sgaborstatic int dcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p);
81219019Sgaborstatic int dcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p);
82219019Sgabor
83219019Sgabor
84219019Sgaborstatic void dcd_free_softstate(struct dcd_disk *un, dev_info_t *devi);
85219019Sgaborstatic int dcd_doattach(dev_info_t *devi, int (*f)());
86219019Sgaborstatic int dcd_validate_geometry(struct dcd_disk *un);
87219019Sgaborstatic ddi_devid_t dcd_get_devid(struct dcd_disk *un);
88219019Sgaborstatic ddi_devid_t  dcd_create_devid(struct dcd_disk *un);
89219019Sgaborstatic int dcd_make_devid_from_serial(struct dcd_disk *un);
90219019Sgaborstatic void dcd_validate_model_serial(char *str, int *retlen, int totallen);
91219019Sgaborstatic int dcd_read_deviceid(struct dcd_disk *un);
92219019Sgaborstatic int dcd_write_deviceid(struct dcd_disk *un);
93219019Sgaborstatic int dcd_poll(struct dcd_pkt *pkt);
94219019Sgaborstatic char *dcd_rname(int reason);
95219019Sgaborstatic void dcd_flush_cache(struct dcd_disk *un);
96219019Sgabor
97219019Sgaborstatic int dcd_compute_dk_capacity(struct dcd_device *devp,
98219019Sgabor    diskaddr_t *capacity);
99219019Sgaborstatic int dcd_send_lb_rw_cmd(dev_info_t *devinfo, void *bufaddr,
100219019Sgabor    diskaddr_t start_block, size_t reqlength, uchar_t cmd);
101219019Sgabor
102219019Sgaborstatic void dcdmin(struct buf *bp);
103219019Sgabor
104219019Sgaborstatic int dcdioctl_cmd(dev_t, struct udcd_cmd *,
105219019Sgabor    enum uio_seg, enum uio_seg);
106219019Sgabor
107219019Sgaborstatic void dcdstart(struct dcd_disk *un);
108219019Sgaborstatic void dcddone_and_mutex_exit(struct dcd_disk *un, struct buf *bp);
109219019Sgaborstatic void make_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*f)());
110219019Sgaborstatic void dcdudcdmin(struct buf *bp);
111219019Sgabor
112219019Sgaborstatic int dcdrunout(caddr_t);
113219019Sgaborstatic int dcd_check_wp(dev_t dev);
114219019Sgaborstatic int dcd_unit_ready(dev_t dev);
115219019Sgaborstatic void dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp,
116219019Sgabor    struct dcd_disk *un);
117219019Sgaborstatic void dcdintr(struct dcd_pkt *pkt);
118219019Sgaborstatic int dcd_handle_incomplete(struct dcd_disk *un, struct buf *bp);
119219019Sgaborstatic void dcd_offline(struct dcd_disk *un, int bechatty);
120219019Sgaborstatic int dcd_ready_and_valid(dev_t dev, struct dcd_disk *un);
121219019Sgaborstatic void dcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt);
122219019Sgaborstatic void dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp);
123219019Sgaborstatic int dcdflushdone(struct buf *bp);
124219019Sgabor
125219019Sgabor/* Function prototypes for cmlb */
126219019Sgabor
127219019Sgaborstatic int dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
128219019Sgabor    diskaddr_t start_block, size_t reqlength, void *tg_cookie);
129219019Sgabor
130219019Sgaborstatic int dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp);
131219019Sgaborstatic int dcd_lb_getinfo(dev_info_t *devi, int cmd, void *arg,
132219019Sgabor    void *tg_cookie);
133219019Sgabor
134219019Sgabor
135219019Sgaborstatic cmlb_tg_ops_t dcd_lb_ops = {
136219019Sgabor	TG_DK_OPS_VERSION_1,
137219019Sgabor	dcd_lb_rdwr,
138219019Sgabor	dcd_lb_getinfo
139219019Sgabor};
140219019Sgabor
141219019Sgabor/*
142219019Sgabor * Error and Logging Functions
143219019Sgabor */
144219019Sgabor#ifndef lint
145219019Sgaborstatic void clean_print(dev_info_t *dev, char *label, uint_t level,
146219019Sgabor    char *title, char *data, int len);
147219019Sgaborstatic void dcdrestart(void *arg);
148219019Sgabor#endif /* lint */
149219019Sgabor
150219019Sgaborstatic int dcd_check_error(struct dcd_disk *un, struct buf *bp);
151219019Sgabor
152219019Sgabor/*
153219019Sgabor * Error statistics create/update functions
154219019Sgabor */
155219019Sgaborstatic int dcd_create_errstats(struct dcd_disk *, int);
156219019Sgabor
157219019Sgabor
158219019Sgabor
159219019Sgabor/*PRINTFLIKE4*/
160219019Sgaborextern void dcd_log(dev_info_t *, char *, uint_t, const char *, ...)
161219019Sgabor    __KPRINTFLIKE(4);
162219019Sgaborextern void makecommand(struct dcd_pkt *, int, uchar_t, uint32_t,
163219019Sgabor    uchar_t, uint32_t, uchar_t, uchar_t);
164219019Sgabor
165219019Sgabor
166219019Sgabor/*
167219019Sgabor * Configuration Routines
168219019Sgabor */
169219019Sgaborstatic int dcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
170219019Sgabor    void **result);
171219019Sgaborstatic int dcdprobe(dev_info_t *devi);
172219019Sgaborstatic int dcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd);
173219019Sgaborstatic int dcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd);
174219019Sgaborstatic int dcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd);
175219019Sgaborstatic int dcd_dr_detach(dev_info_t *devi);
176219019Sgaborstatic int dcdpower(dev_info_t *devi, int component, int level);
177219019Sgabor
178219019Sgaborstatic void *dcd_state;
179219019Sgaborstatic int dcd_max_instance;
180219019Sgaborstatic char *dcd_label = "dad";
181219019Sgabor
182219019Sgaborstatic char *diskokay = "disk okay\n";
183219019Sgabor
184219019Sgabor#if DEBUG || lint
185219019Sgabor#define	DCDDEBUG
186219019Sgabor#endif
187219019Sgabor
188219019Sgaborint dcd_test_flag = 0;
189219019Sgabor/*
190219019Sgabor * Debugging macros
191219019Sgabor */
192219019Sgabor#ifdef	DCDDEBUG
193219019Sgaborstatic int dcddebug = 0;
194219019Sgabor#define	DEBUGGING	(dcddebug > 1)
195219019Sgabor#define	DAD_DEBUG	if (dcddebug == 1) dcd_log
196219019Sgabor#define	DAD_DEBUG2	if (dcddebug > 1) dcd_log
197219019Sgabor#else	/* DCDDEBUG */
198219019Sgabor#define	dcddebug		(0)
199219019Sgabor#define	DEBUGGING	(0)
200219019Sgabor#define	DAD_DEBUG	if (0) dcd_log
201219019Sgabor#define	DAD_DEBUG2	if (0) dcd_log
202219019Sgabor#endif
203219019Sgabor
204219019Sgabor/*
205219019Sgabor * we use pkt_private area for storing bp and retry_count
206219019Sgabor * XXX: Really is this usefull.
207219019Sgabor */
208219019Sgaborstruct dcd_pkt_private {
209219019Sgabor	struct buf	*dcdpp_bp;
210219019Sgabor	short		 dcdpp_retry_count;
211219019Sgabor	short		 dcdpp_victim_retry_count;
212219019Sgabor};
213219019Sgabor
214219019Sgabor
215219019Sgabor_NOTE(SCHEME_PROTECTS_DATA("Unique per pkt", dcd_pkt_private buf))
216219019Sgabor
217219019Sgabor#define	PP_LEN	(sizeof (struct dcd_pkt_private))
218219019Sgabor
219219019Sgabor#define	PKT_SET_BP(pkt, bp)	\
220219019Sgabor	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp = bp
221219019Sgabor#define	PKT_GET_BP(pkt) \
222219019Sgabor	(((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_bp)
223219019Sgabor
224219019Sgabor
225219019Sgabor#define	PKT_SET_RETRY_CNT(pkt, n) \
226219019Sgabor	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count = n
227219019Sgabor
228219019Sgabor#define	PKT_GET_RETRY_CNT(pkt) \
229219019Sgabor	(((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count)
230219019Sgabor
231219019Sgabor#define	PKT_INCR_RETRY_CNT(pkt, n) \
232219019Sgabor	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_retry_count += n
233219019Sgabor
234219019Sgabor#define	PKT_SET_VICTIM_RETRY_CNT(pkt, n) \
235219019Sgabor	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
236219019Sgabor			= n
237219019Sgabor
238219019Sgabor#define	PKT_GET_VICTIM_RETRY_CNT(pkt) \
239219019Sgabor	(((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count)
240219019Sgabor#define	PKT_INCR_VICTIM_RETRY_CNT(pkt, n) \
241219019Sgabor	((struct dcd_pkt_private *)pkt->pkt_private)->dcdpp_victim_retry_count \
242219019Sgabor			+= n
243219019Sgabor
244219019Sgabor#define	DISK_NOT_READY_RETRY_COUNT	(dcd_retry_count / 2)
245219019Sgabor
246219019Sgabor
247219019Sgabor/*
248219019Sgabor * Urk!
249219019Sgabor */
250219019Sgabor#define	SET_BP_ERROR(bp, err)	\
251219019Sgabor	bioerror(bp, err);
252219019Sgabor
253219019Sgabor#define	IOSP			KSTAT_IO_PTR(un->un_stats)
254219019Sgabor#define	IO_PARTITION_STATS	un->un_pstats[DCDPART(bp->b_edev)]
255219019Sgabor#define	IOSP_PARTITION		KSTAT_IO_PTR(IO_PARTITION_STATS)
256219019Sgabor
257219019Sgabor#define	DCD_DO_KSTATS(un, kstat_function, bp) \
258219019Sgabor	ASSERT(mutex_owned(DCD_MUTEX)); \
259219019Sgabor	if (bp != un->un_sbufp) { \
260219019Sgabor		if (un->un_stats) { \
261219019Sgabor			kstat_function(IOSP); \
262219019Sgabor		} \
263219019Sgabor		if (IO_PARTITION_STATS) { \
264219019Sgabor			kstat_function(IOSP_PARTITION); \
265219019Sgabor		} \
266219019Sgabor	}
267219019Sgabor
268219019Sgabor#define	DCD_DO_ERRSTATS(un, x) \
269219019Sgabor	if (un->un_errstats) { \
270219019Sgabor		struct dcd_errstats *dtp; \
271219019Sgabor		dtp = (struct dcd_errstats *)un->un_errstats->ks_data; \
272219019Sgabor		dtp->x.value.ui32++; \
273219019Sgabor	}
274219019Sgabor
275219019Sgabor#define	GET_SOFT_STATE(dev)						\
276219019Sgabor	struct dcd_disk *un;					\
277219019Sgabor	int instance, part;					\
278219019Sgabor	minor_t minor = getminor(dev);				\
279219019Sgabor									\
280219019Sgabor	part = minor & DCDPART_MASK;					\
281219019Sgabor	instance = minor >> DCDUNIT_SHIFT;				\
282219019Sgabor	if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL)	\
283219019Sgabor		return (ENXIO);
284219019Sgabor
285219019Sgabor#define	LOGICAL_BLOCK_ALIGN(blkno, blknoshift) \
286219019Sgabor		(((blkno) & ((1 << (blknoshift)) - 1)) == 0)
287219019Sgabor
288219019Sgabor/*
289219019Sgabor * After the following number of sectors, the cylinder number spills over
290219019Sgabor * 0xFFFF if sectors = 63 and heads = 16.
291219019Sgabor */
292219019Sgabor#define	NUM_SECTORS_32G	0x3EFFC10
293219019Sgabor
294219019Sgabor/*
295219019Sgabor * Configuration Data
296219019Sgabor */
297219019Sgabor
298219019Sgabor/*
299219019Sgabor * Device driver ops vector
300219019Sgabor */
301219019Sgabor
302219019Sgaborstatic struct cb_ops dcd_cb_ops = {
303219019Sgabor	dcdopen,		/* open */
304219019Sgabor	dcdclose,		/* close */
305219019Sgabor	dcdstrategy,		/* strategy */
306219019Sgabor	nodev,			/* print */
307219019Sgabor	dcddump,		/* dump */
308219019Sgabor	dcdread,		/* read */
309219019Sgabor	dcdwrite,		/* write */
310219019Sgabor	dcdioctl,		/* ioctl */
311219019Sgabor	nodev,			/* devmap */
312219019Sgabor	nodev,			/* mmap */
313219019Sgabor	nodev,			/* segmap */
314219019Sgabor	nochpoll,		/* poll */
315219019Sgabor	dcd_prop_op,		/* cb_prop_op */
316219019Sgabor	0,			/* streamtab  */
317219019Sgabor	D_64BIT | D_MP | D_NEW,	/* Driver compatibility flag */
318219019Sgabor	CB_REV,			/* cb_rev */
319219019Sgabor	dcdaread, 		/* async I/O read entry point */
320219019Sgabor	dcdawrite		/* async I/O write entry point */
321219019Sgabor};
322219019Sgabor
323219019Sgaborstatic struct dev_ops dcd_ops = {
324219019Sgabor	DEVO_REV,		/* devo_rev, */
325219019Sgabor	0,			/* refcnt  */
326219019Sgabor	dcdinfo,		/* info */
327219019Sgabor	nulldev,		/* identify */
328219019Sgabor	dcdprobe,		/* probe */
329219019Sgabor	dcdattach,		/* attach */
330219019Sgabor	dcddetach,		/* detach */
331219019Sgabor	dcdreset,		/* reset */
332219019Sgabor	&dcd_cb_ops,		/* driver operations */
333219019Sgabor	(struct bus_ops *)0,	/* bus operations */
334219019Sgabor	dcdpower,		/* power */
335219019Sgabor	ddi_quiesce_not_supported,	/* devo_quiesce */
336219019Sgabor};
337219019Sgabor
338219019Sgabor
339219019Sgabor/*
340219019Sgabor * This is the loadable module wrapper.
341219019Sgabor */
342219019Sgabor#include <sys/modctl.h>
343219019Sgabor
344219019Sgaborstatic struct modldrv modldrv = {
345219019Sgabor	&mod_driverops,		/* Type of module. This one is a driver */
346219019Sgabor	"DAD Disk Driver",	/* Name of the module. */
347219019Sgabor	&dcd_ops,	/* driver ops */
348219019Sgabor};
349219019Sgabor
350219019Sgabor
351219019Sgabor
352219019Sgaborstatic struct modlinkage modlinkage = {
353219019Sgabor	MODREV_1, &modldrv, NULL
354219019Sgabor};
355219019Sgabor
356219019Sgabor/*
357219019Sgabor * the dcd_attach_mutex only protects dcd_max_instance in multi-threaded
358219019Sgabor * attach situations
359219019Sgabor */
360219019Sgaborstatic kmutex_t dcd_attach_mutex;
361219019Sgabor
362219019Sgaborint
363219019Sgabor_init(void)
364219019Sgabor{
365219019Sgabor	int e;
366219019Sgabor
367219019Sgabor	if ((e = ddi_soft_state_init(&dcd_state, sizeof (struct dcd_disk),
368219019Sgabor	    DCD_MAXUNIT)) != 0)
369219019Sgabor		return (e);
370219019Sgabor
371219019Sgabor	mutex_init(&dcd_attach_mutex, NULL, MUTEX_DRIVER, NULL);
372219019Sgabor	e = mod_install(&modlinkage);
373219019Sgabor	if (e != 0) {
374219019Sgabor		mutex_destroy(&dcd_attach_mutex);
375219019Sgabor		ddi_soft_state_fini(&dcd_state);
376219019Sgabor		return (e);
377219019Sgabor	}
378219019Sgabor
379219019Sgabor	return (e);
380219019Sgabor}
381219019Sgabor
382219019Sgaborint
383219019Sgabor_fini(void)
384219019Sgabor{
385219019Sgabor	int e;
386219019Sgabor
387219019Sgabor	if ((e = mod_remove(&modlinkage)) != 0)
388219019Sgabor		return (e);
389219019Sgabor
390219019Sgabor	ddi_soft_state_fini(&dcd_state);
391219019Sgabor	mutex_destroy(&dcd_attach_mutex);
392219019Sgabor
393219019Sgabor	return (e);
394219019Sgabor}
395219019Sgabor
396219019Sgaborint
397219019Sgabor_info(struct modinfo *modinfop)
398219019Sgabor{
399219019Sgabor
400219019Sgabor	return (mod_info(&modlinkage, modinfop));
401219019Sgabor}
402219019Sgabor
403219019Sgaborstatic int
404219019Sgabordcdprobe(dev_info_t *devi)
405219019Sgabor{
406219019Sgabor	struct dcd_device *devp;
407219019Sgabor	int rval = DDI_PROBE_PARTIAL;
408219019Sgabor	int instance;
409219019Sgabor
410219019Sgabor	devp = ddi_get_driver_private(devi);
411219019Sgabor	instance = ddi_get_instance(devi);
412219019Sgabor
413219019Sgabor	/*
414219019Sgabor	 * Keep a count of how many disks (ie. highest instance no) we have
415219019Sgabor	 * XXX currently not used but maybe useful later again
416219019Sgabor	 */
417219019Sgabor	mutex_enter(&dcd_attach_mutex);
418219019Sgabor	if (instance > dcd_max_instance)
419219019Sgabor		dcd_max_instance = instance;
420219019Sgabor	mutex_exit(&dcd_attach_mutex);
421219019Sgabor
422219019Sgabor	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "dcdprobe:\n");
423219019Sgabor
424219019Sgabor	if (ddi_get_soft_state(dcd_state, instance) != NULL)
425219019Sgabor		return (DDI_PROBE_PARTIAL);
426219019Sgabor
427219019Sgabor	/*
428219019Sgabor	 * Turn around and call utility probe routine
429219019Sgabor	 * to see whether we actually have a disk at
430219019Sgabor	 */
431219019Sgabor
432219019Sgabor	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
433219019Sgabor	    "dcdprobe: %x\n", dcd_probe(devp, NULL_FUNC));
434219019Sgabor
435219019Sgabor	switch (dcd_probe(devp, NULL_FUNC)) {
436219019Sgabor	default:
437219019Sgabor	case DCDPROBE_NORESP:
438219019Sgabor	case DCDPROBE_NONCCS:
439219019Sgabor	case DCDPROBE_NOMEM:
440219019Sgabor	case DCDPROBE_FAILURE:
441219019Sgabor	case DCDPROBE_BUSY:
442219019Sgabor		break;
443219019Sgabor
444219019Sgabor	case DCDPROBE_EXISTS:
445219019Sgabor		/*
446219019Sgabor		 * Check whether it is a ATA device and then
447219019Sgabor		 * return  SUCCESS.
448219019Sgabor		 */
449219019Sgabor		DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
450219019Sgabor		    "config %x\n", devp->dcd_ident->dcd_config);
451219019Sgabor		if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
452219019Sgabor			if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
453219019Sgabor				rval = DDI_PROBE_SUCCESS;
454219019Sgabor			} else
455219019Sgabor				rval = DDI_PROBE_FAILURE;
456219019Sgabor		} else {
457219019Sgabor			rval = DDI_PROBE_FAILURE;
458219019Sgabor		}
459219019Sgabor		break;
460219019Sgabor	}
461219019Sgabor	dcd_unprobe(devp);
462219019Sgabor
463219019Sgabor	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG,
464219019Sgabor	    "dcdprobe returns %x\n", rval);
465219019Sgabor
466219019Sgabor	return (rval);
467219019Sgabor}
468219019Sgabor
469219019Sgabor
470219019Sgabor/*ARGSUSED*/
471219019Sgaborstatic int
472219019Sgabordcdattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
473219019Sgabor{
474219019Sgabor	int instance, rval;
475219019Sgabor	struct dcd_device *devp;
476219019Sgabor	struct dcd_disk *un;
477219019Sgabor	struct diskhd *dp;
478219019Sgabor	char	*pm_comp[] =
479219019Sgabor	    { "NAME=ide-disk", "0=standby", "1=idle", "2=active" };
480219019Sgabor
481219019Sgabor	/* CONSTCOND */
482219019Sgabor	ASSERT(NO_COMPETING_THREADS);
483219019Sgabor
484219019Sgabor
485219019Sgabor	devp = ddi_get_driver_private(devi);
486219019Sgabor	instance = ddi_get_instance(devi);
487219019Sgabor	DAD_DEBUG2(devp->dcd_dev, dcd_label, DCD_DEBUG, "Attach Started\n");
488219019Sgabor
489219019Sgabor	switch (cmd) {
490219019Sgabor	case DDI_ATTACH:
491219019Sgabor		break;
492219019Sgabor
493219019Sgabor	case DDI_RESUME:
494219019Sgabor		if (!(un = ddi_get_soft_state(dcd_state, instance)))
495219019Sgabor			return (DDI_FAILURE);
496219019Sgabor		mutex_enter(DCD_MUTEX);
497219019Sgabor		Restore_state(un);
498219019Sgabor		/*
499219019Sgabor		 * Restore the state which was saved to give the
500219019Sgabor		 * the right state in un_last_state
501219019Sgabor		 */
502219019Sgabor		un->un_last_state = un->un_save_state;
503219019Sgabor		un->un_throttle = 2;
504219019Sgabor		cv_broadcast(&un->un_suspend_cv);
505219019Sgabor		/*
506219019Sgabor		 * Raise the power level of the device to active.
507219019Sgabor		 */
508219019Sgabor		mutex_exit(DCD_MUTEX);
509219019Sgabor		(void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE);
510219019Sgabor		mutex_enter(DCD_MUTEX);
511219019Sgabor
512219019Sgabor		/*
513219019Sgabor		 * start unit - if this is a low-activity device
514219019Sgabor		 * commands in queue will have to wait until new
515219019Sgabor		 * commands come in, which may take awhile.
516219019Sgabor		 * Also, we specifically don't check un_ncmds
517219019Sgabor		 * because we know that there really are no
518219019Sgabor		 * commands in progress after the unit was suspended
519219019Sgabor		 * and we could have reached the throttle level, been
520219019Sgabor		 * suspended, and have no new commands coming in for
521219019Sgabor		 * awhile.  Highly unlikely, but so is the low-
522219019Sgabor		 * activity disk scenario.
523219019Sgabor		 */
524219019Sgabor		dp = &un->un_utab;
525219019Sgabor		if (dp->b_actf && (dp->b_forw == NULL)) {
526219019Sgabor			dcdstart(un);
527219019Sgabor		}
528219019Sgabor
529219019Sgabor		mutex_exit(DCD_MUTEX);
530219019Sgabor		return (DDI_SUCCESS);
531219019Sgabor
532219019Sgabor	default:
533219019Sgabor		return (DDI_FAILURE);
534219019Sgabor	}
535219019Sgabor
536219019Sgabor	if (dcd_doattach(devi, SLEEP_FUNC) == DDI_FAILURE) {
537219019Sgabor		return (DDI_FAILURE);
538219019Sgabor	}
539219019Sgabor
540219019Sgabor	if (!(un = (struct dcd_disk *)
541219019Sgabor	    ddi_get_soft_state(dcd_state, instance))) {
542219019Sgabor		return (DDI_FAILURE);
543219019Sgabor	}
544219019Sgabor	devp->dcd_private = (ataopaque_t)un;
545219019Sgabor
546219019Sgabor	/*
547219019Sgabor	 * Add a zero-length attribute to tell the world we support
548219019Sgabor	 * kernel ioctls (for layered drivers)
549219019Sgabor	 */
550219019Sgabor	(void) ddi_prop_create(DDI_DEV_T_NONE, devi, DDI_PROP_CANSLEEP,
551219019Sgabor	    DDI_KERNEL_IOCTL, NULL, 0);
552219019Sgabor
553219019Sgabor	/*
554219019Sgabor	 * Since the dad device does not have the 'reg' property,
555219019Sgabor	 * cpr will not call its DDI_SUSPEND/DDI_RESUME entries.
556219019Sgabor	 * The following code is to tell cpr that this device
557219019Sgabor	 * does need to be suspended and resumed.
558219019Sgabor	 */
559219019Sgabor	(void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
560219019Sgabor	    "pm-hardware-state", (caddr_t)"needs-suspend-resume");
561219019Sgabor
562219019Sgabor	/*
563219019Sgabor	 * Initialize power management bookkeeping;
564219019Sgabor	 * Create components - In IDE case there are 3 levels and one
565219019Sgabor	 * component. The levels being - active, idle, standby.
566219019Sgabor	 */
567219019Sgabor
568219019Sgabor	rval = ddi_prop_update_string_array(DDI_DEV_T_NONE,
569219019Sgabor	    devi, "pm-components", pm_comp, 4);
570219019Sgabor	if (rval == DDI_PROP_SUCCESS) {
571219019Sgabor		/*
572219019Sgabor		 * Ignore the return value of pm_raise_power
573219019Sgabor		 * Even if we check the return values and
574219019Sgabor		 * remove the property created above, PM
575219019Sgabor		 * framework will not honour the change after
576219019Sgabor		 * first call to pm_raise_power. Hence, the
577219019Sgabor		 * removal of that property does not help if
578219019Sgabor		 * pm_raise_power fails.
579219019Sgabor		 */
580219019Sgabor		(void) pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE);
581219019Sgabor	}
582219019Sgabor
583219019Sgabor	ddi_report_dev(devi);
584219019Sgabor
585219019Sgabor	cmlb_alloc_handle(&un->un_dklbhandle);
586219019Sgabor
587219019Sgabor	if (cmlb_attach(devi,
588219019Sgabor	    &dcd_lb_ops,
589219019Sgabor	    0,
590219019Sgabor	    B_FALSE,
591219019Sgabor	    B_FALSE,
592219019Sgabor	    DDI_NT_BLOCK_CHAN,
593219019Sgabor	    CMLB_FAKE_GEOM_LABEL_IOCTLS_VTOC8,
594219019Sgabor	    un->un_dklbhandle,
595219019Sgabor	    0) != 0) {
596219019Sgabor		cmlb_free_handle(&un->un_dklbhandle);
597219019Sgabor		dcd_free_softstate(un, devi);
598219019Sgabor		return (DDI_FAILURE);
599219019Sgabor	}
600219019Sgabor
601219019Sgabor	mutex_enter(DCD_MUTEX);
602219019Sgabor	(void) dcd_validate_geometry(un);
603219019Sgabor
604219019Sgabor	/* Get devid; create a devid ONLY IF could not get ID */
605219019Sgabor	if (dcd_get_devid(un) == NULL) {
606219019Sgabor		/* Create the fab'd devid */
607219019Sgabor		(void) dcd_create_devid(un);
608219019Sgabor	}
609219019Sgabor	mutex_exit(DCD_MUTEX);
610219019Sgabor
611219019Sgabor	return (DDI_SUCCESS);
612219019Sgabor}
613219019Sgabor
614219019Sgaborstatic void
615219019Sgabordcd_free_softstate(struct dcd_disk *un, dev_info_t *devi)
616219019Sgabor{
617219019Sgabor	struct dcd_device		*devp;
618219019Sgabor	int instance = ddi_get_instance(devi);
619219019Sgabor
620219019Sgabor	devp = ddi_get_driver_private(devi);
621219019Sgabor
622219019Sgabor	if (un) {
623219019Sgabor		sema_destroy(&un->un_semoclose);
624219019Sgabor		cv_destroy(&un->un_sbuf_cv);
625219019Sgabor		cv_destroy(&un->un_state_cv);
626219019Sgabor		cv_destroy(&un->un_disk_busy_cv);
627219019Sgabor		cv_destroy(&un->un_suspend_cv);
628219019Sgabor
629219019Sgabor		/*
630219019Sgabor		 * Deallocate command packet resources.
631219019Sgabor		 */
632219019Sgabor		if (un->un_sbufp)
633219019Sgabor			freerbuf(un->un_sbufp);
634219019Sgabor		if (un->un_dp) {
635219019Sgabor			kmem_free((caddr_t)un->un_dp, sizeof (*un->un_dp));
636219019Sgabor		}
637219019Sgabor		/*
638219019Sgabor		 * Unregister the devid and free devid resources allocated
639219019Sgabor		 */
640219019Sgabor		ddi_devid_unregister(DCD_DEVINFO);
641219019Sgabor		if (un->un_devid) {
642219019Sgabor			ddi_devid_free(un->un_devid);
643219019Sgabor			un->un_devid = NULL;
644219019Sgabor		}
645219019Sgabor
646219019Sgabor		/*
647219019Sgabor		 * Delete kstats. Kstats for non CD devices are deleted
648219019Sgabor		 * in dcdclose.
649219019Sgabor		 */
650219019Sgabor		if (un->un_stats) {
651219019Sgabor			kstat_delete(un->un_stats);
652219019Sgabor		}
653219019Sgabor
654219019Sgabor	}
655219019Sgabor
656219019Sgabor	/*
657219019Sgabor	 * Cleanup scsi_device resources.
658219019Sgabor	 */
659219019Sgabor	ddi_soft_state_free(dcd_state, instance);
660219019Sgabor	devp->dcd_private = (ataopaque_t)0;
661219019Sgabor	/* unprobe scsi device */
662219019Sgabor	dcd_unprobe(devp);
663219019Sgabor
664219019Sgabor	/* Remove properties created during attach */
665219019Sgabor	ddi_prop_remove_all(devi);
666219019Sgabor}
667219019Sgabor
668219019Sgaborstatic int
669219019Sgabordcddetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
670219019Sgabor{
671219019Sgabor	int instance;
672219019Sgabor	struct dcd_disk *un;
673219019Sgabor	clock_t	wait_cmds_complete;
674219019Sgabor	instance = ddi_get_instance(devi);
675219019Sgabor
676219019Sgabor	if (!(un = ddi_get_soft_state(dcd_state, instance)))
677219019Sgabor		return (DDI_FAILURE);
678219019Sgabor
679219019Sgabor	switch (cmd) {
680219019Sgabor	case DDI_DETACH:
681219019Sgabor		return (dcd_dr_detach(devi));
682219019Sgabor
683219019Sgabor	case DDI_SUSPEND:
684219019Sgabor		mutex_enter(DCD_MUTEX);
685219019Sgabor		if (un->un_state == DCD_STATE_SUSPENDED) {
686219019Sgabor			mutex_exit(DCD_MUTEX);
687219019Sgabor			return (DDI_SUCCESS);
688219019Sgabor		}
689219019Sgabor		un->un_throttle = 0;
690219019Sgabor		/*
691219019Sgabor		 * Save the last state first
692219019Sgabor		 */
693219019Sgabor		un->un_save_state = un->un_last_state;
694219019Sgabor
695219019Sgabor		New_state(un, DCD_STATE_SUSPENDED);
696219019Sgabor
697219019Sgabor		/*
698219019Sgabor		 * wait till current operation completed. If we are
699219019Sgabor		 * in the resource wait state (with an intr outstanding)
700219019Sgabor		 * then we need to wait till the intr completes and
701219019Sgabor		 * starts the next cmd. We wait for
702219019Sgabor		 * DCD_WAIT_CMDS_COMPLETE seconds before failing the
703219019Sgabor		 * DDI_SUSPEND.
704219019Sgabor		 */
705219019Sgabor		wait_cmds_complete = ddi_get_lbolt();
706219019Sgabor		wait_cmds_complete +=
707219019Sgabor		    DCD_WAIT_CMDS_COMPLETE * drv_usectohz(1000000);
708219019Sgabor
709219019Sgabor		while (un->un_ncmds) {
710219019Sgabor			if (cv_timedwait(&un->un_disk_busy_cv,
711219019Sgabor			    DCD_MUTEX, wait_cmds_complete) == -1) {
712219019Sgabor				/*
713219019Sgabor				 * commands Didn't finish in the
714219019Sgabor				 * specified time, fail the DDI_SUSPEND.
715219019Sgabor				 */
716219019Sgabor				DAD_DEBUG2(DCD_DEVINFO, dcd_label,
717219019Sgabor				    DCD_DEBUG, "dcddetach: SUSPEND "
718219019Sgabor				    "failed due to outstanding cmds\n");
719219019Sgabor				Restore_state(un);
720219019Sgabor				mutex_exit(DCD_MUTEX);
721219019Sgabor				return (DDI_FAILURE);
722219019Sgabor			}
723219019Sgabor		}
724219019Sgabor		mutex_exit(DCD_MUTEX);
725219019Sgabor		return (DDI_SUCCESS);
726219019Sgabor	}
727219019Sgabor	return (DDI_FAILURE);
728219019Sgabor}
729219019Sgabor
730219019Sgabor/*
731219019Sgabor * The reset entry point gets invoked at the system shutdown time or through
732219019Sgabor * CPR code at system suspend.
733219019Sgabor * Will be flushing the cache and expect this to be last I/O operation to the
734219019Sgabor * disk before system reset/power off.
735219019Sgabor */
736219019Sgabor/*ARGSUSED*/
737219019Sgaborstatic int
738219019Sgabordcdreset(dev_info_t *dip, ddi_reset_cmd_t cmd)
739219019Sgabor{
740219019Sgabor	struct dcd_disk *un;
741219019Sgabor	int instance;
742219019Sgabor
743219019Sgabor	instance = ddi_get_instance(dip);
744219019Sgabor
745219019Sgabor	if (!(un = ddi_get_soft_state(dcd_state, instance)))
746219019Sgabor		return (DDI_FAILURE);
747219019Sgabor
748219019Sgabor	dcd_flush_cache(un);
749219019Sgabor
750219019Sgabor	return (DDI_SUCCESS);
751219019Sgabor}
752219019Sgabor
753219019Sgabor
754219019Sgaborstatic int
755219019Sgabordcd_dr_detach(dev_info_t *devi)
756219019Sgabor{
757219019Sgabor	struct dcd_device	*devp;
758219019Sgabor	struct dcd_disk		*un;
759219019Sgabor
760219019Sgabor	/*
761219019Sgabor	 * Get scsi_device structure for this instance.
762219019Sgabor	 */
763219019Sgabor	if ((devp = ddi_get_driver_private(devi)) == NULL)
764219019Sgabor		return (DDI_FAILURE);
765219019Sgabor
766219019Sgabor	/*
767219019Sgabor	 * Get dcd_disk structure containing target 'private' information
768219019Sgabor	 */
769219019Sgabor	un = (struct dcd_disk *)devp->dcd_private;
770219019Sgabor
771219019Sgabor	/*
772219019Sgabor	 * Verify there are NO outstanding commands issued to this device.
773219019Sgabor	 * ie, un_ncmds == 0.
774219019Sgabor	 * It's possible to have outstanding commands through the physio
775219019Sgabor	 * code path, even though everything's closed.
776219019Sgabor	 */
777219019Sgabor#ifndef lint
778219019Sgabor	_NOTE(COMPETING_THREADS_NOW);
779219019Sgabor#endif
780219019Sgabor	mutex_enter(DCD_MUTEX);
781219019Sgabor	if (un->un_ncmds) {
782219019Sgabor		mutex_exit(DCD_MUTEX);
783219019Sgabor		_NOTE(NO_COMPETING_THREADS_NOW);
784219019Sgabor		return (DDI_FAILURE);
785219019Sgabor	}
786219019Sgabor
787219019Sgabor	mutex_exit(DCD_MUTEX);
788219019Sgabor
789219019Sgabor	cmlb_detach(un->un_dklbhandle, 0);
790219019Sgabor	cmlb_free_handle(&un->un_dklbhandle);
791219019Sgabor
792219019Sgabor
793219019Sgabor	/*
794219019Sgabor	 * Lower the power state of the device
795219019Sgabor	 * i.e. the minimum power consumption state - sleep.
796219019Sgabor	 */
797219019Sgabor	(void) pm_lower_power(DCD_DEVINFO, 0, DCD_DEVICE_STANDBY);
798219019Sgabor
799219019Sgabor	_NOTE(NO_COMPETING_THREADS_NOW);
800219019Sgabor
801219019Sgabor	/*
802219019Sgabor	 * at this point there are no competing threads anymore
803219019Sgabor	 * release active MT locks and all device resources.
804219019Sgabor	 */
805219019Sgabor	dcd_free_softstate(un, devi);
806219019Sgabor
807219019Sgabor	return (DDI_SUCCESS);
808219019Sgabor}
809219019Sgabor
810219019Sgaborstatic int
811219019Sgabordcdpower(dev_info_t *devi, int component, int level)
812219019Sgabor{
813219019Sgabor	struct dcd_pkt *pkt;
814219019Sgabor	struct dcd_disk *un;
815219019Sgabor	int	instance;
816219019Sgabor	uchar_t	cmd;
817219019Sgabor
818219019Sgabor
819219019Sgabor	instance = ddi_get_instance(devi);
820219019Sgabor
821219019Sgabor	if (!(un = ddi_get_soft_state(dcd_state, instance)) ||
822219019Sgabor	    (DCD_DEVICE_STANDBY > level) || (level > DCD_DEVICE_ACTIVE) ||
823219019Sgabor	    component != 0) {
824219019Sgabor		return (DDI_FAILURE);
825219019Sgabor	}
826219019Sgabor
827219019Sgabor	mutex_enter(DCD_MUTEX);
828219019Sgabor	/*
829219019Sgabor	 * if there are active commands for the device or device will be
830219019Sgabor	 * active soon. At the same time there is request to lower power
831219019Sgabor	 * return failure.
832219019Sgabor	 */
833219019Sgabor	if ((un->un_ncmds) && (level != DCD_DEVICE_ACTIVE)) {
834219019Sgabor		mutex_exit(DCD_MUTEX);
835219019Sgabor		return (DDI_FAILURE);
836219019Sgabor	}
837219019Sgabor
838219019Sgabor	if ((un->un_state == DCD_STATE_OFFLINE) ||
839219019Sgabor	    (un->un_state == DCD_STATE_FATAL)) {
840219019Sgabor		mutex_exit(DCD_MUTEX);
841219019Sgabor		return (DDI_FAILURE);
842219019Sgabor	}
843219019Sgabor
844219019Sgabor	if (level == DCD_DEVICE_ACTIVE) {
845219019Sgabor		/*
846219019Sgabor		 * No need to fire any command, just set the state structure
847219019Sgabor		 * to indicate previous state and set the level to active
848219019Sgabor		 */
849219019Sgabor		un->un_power_level = DCD_DEVICE_ACTIVE;
850219019Sgabor		if (un->un_state == DCD_STATE_PM_SUSPENDED)
851219019Sgabor			Restore_state(un);
852219019Sgabor		mutex_exit(DCD_MUTEX);
853219019Sgabor	} else {
854219019Sgabor		pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
855219019Sgabor		    NULL, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
856219019Sgabor		    PKT_CONSISTENT, NULL_FUNC, NULL);
857219019Sgabor
858219019Sgabor		if (pkt == (struct dcd_pkt *)NULL) {
859219019Sgabor			mutex_exit(DCD_MUTEX);
860219019Sgabor			return (DDI_FAILURE);
861219019Sgabor		}
862219019Sgabor
863219019Sgabor		switch (level) {
864219019Sgabor		case DCD_DEVICE_IDLE:
865219019Sgabor			cmd = ATA_IDLE_IMMEDIATE;
866219019Sgabor			break;
867219019Sgabor
868219019Sgabor		case DCD_DEVICE_STANDBY:
869219019Sgabor			cmd = ATA_STANDBY_IMMEDIATE;
870219019Sgabor			break;
871219019Sgabor		}
872219019Sgabor
873219019Sgabor		makecommand(pkt, 0, cmd, 0, 0, 0, NO_DATA_XFER, 0);
874219019Sgabor		mutex_exit(DCD_MUTEX);
875219019Sgabor		/*
876219019Sgabor		 * Issue the appropriate command
877219019Sgabor		 */
878219019Sgabor		if ((dcd_poll(pkt)) || (SCBP_C(pkt) != STATUS_GOOD)) {
879219019Sgabor			dcd_destroy_pkt(pkt);
880219019Sgabor			return (DDI_FAILURE);
881219019Sgabor		}
882219019Sgabor		dcd_destroy_pkt(pkt);
883219019Sgabor		mutex_enter(DCD_MUTEX);
884219019Sgabor		if (un->un_state != DCD_STATE_PM_SUSPENDED)
885219019Sgabor			New_state(un, DCD_STATE_PM_SUSPENDED);
886219019Sgabor		un->un_power_level = level;
887219019Sgabor		mutex_exit(DCD_MUTEX);
888219019Sgabor	}
889219019Sgabor
890219019Sgabor	return (DDI_SUCCESS);
891219019Sgabor}
892219019Sgabor
893219019Sgaborstatic int
894219019Sgabordcd_doattach(dev_info_t *devi, int (*canwait)())
895219019Sgabor{
896219019Sgabor	struct dcd_device *devp;
897219019Sgabor	struct dcd_disk *un = (struct dcd_disk *)0;
898219019Sgabor	int instance;
899219019Sgabor	int km_flags = (canwait != NULL_FUNC)? KM_SLEEP : KM_NOSLEEP;
900219019Sgabor	int rval;
901219019Sgabor	char *prop_template = "target%x-dcd-options";
902219019Sgabor	int options;
903219019Sgabor	char    prop_str[32];
904219019Sgabor	int target;
905219019Sgabor	diskaddr_t capacity;
906219019Sgabor
907219019Sgabor	devp = ddi_get_driver_private(devi);
908219019Sgabor
909219019Sgabor	/*
910219019Sgabor	 * Call the routine scsi_probe to do some of the dirty work.
911219019Sgabor	 * If the INQUIRY command succeeds, the field dcd_inq in the
912219019Sgabor	 * device structure will be filled in. The dcd_sense structure
913219019Sgabor	 * will also be allocated.
914219019Sgabor	 */
915219019Sgabor
916219019Sgabor	switch (dcd_probe(devp, canwait)) {
917219019Sgabor	default:
918219019Sgabor		return (DDI_FAILURE);
919219019Sgabor
920219019Sgabor	case DCDPROBE_EXISTS:
921219019Sgabor		if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
922219019Sgabor			if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
923219019Sgabor				rval = DDI_SUCCESS;
924219019Sgabor			} else {
925219019Sgabor				rval = DDI_FAILURE;
926219019Sgabor				goto error;
927219019Sgabor			}
928219019Sgabor		} else {
929219019Sgabor			rval = DDI_FAILURE;
930219019Sgabor			goto error;
931219019Sgabor		}
932219019Sgabor	}
933219019Sgabor
934219019Sgabor
935219019Sgabor	instance = ddi_get_instance(devp->dcd_dev);
936219019Sgabor
937219019Sgabor	if (ddi_soft_state_zalloc(dcd_state, instance) != DDI_SUCCESS) {
938219019Sgabor		rval = DDI_FAILURE;
939219019Sgabor		goto error;
940219019Sgabor	}
941219019Sgabor
942219019Sgabor	un = ddi_get_soft_state(dcd_state, instance);
943219019Sgabor
944219019Sgabor	un->un_sbufp = getrbuf(km_flags);
945219019Sgabor	if (un->un_sbufp == (struct buf *)NULL) {
946219019Sgabor		rval = DDI_FAILURE;
947219019Sgabor		goto error;
948219019Sgabor	}
949219019Sgabor
950219019Sgabor
951219019Sgabor	un->un_dcd = devp;
952219019Sgabor	un->un_power_level = -1;
953219019Sgabor	un->un_tgattribute.media_is_writable = 1;
954219019Sgabor
955219019Sgabor	sema_init(&un->un_semoclose, 1, NULL, SEMA_DRIVER, NULL);
956219019Sgabor	cv_init(&un->un_sbuf_cv, NULL, CV_DRIVER, NULL);
957219019Sgabor	cv_init(&un->un_state_cv, NULL, CV_DRIVER, NULL);
958219019Sgabor	/* Initialize power management conditional variable */
959219019Sgabor	cv_init(&un->un_disk_busy_cv, NULL, CV_DRIVER, NULL);
960219019Sgabor	cv_init(&un->un_suspend_cv, NULL, CV_DRIVER, NULL);
961219019Sgabor
962219019Sgabor	if (un->un_dp == 0) {
963219019Sgabor		/*
964219019Sgabor		 * Assume CCS drive, assume parity, but call
965219019Sgabor		 * it a CDROM if it is a RODIRECT device.
966219019Sgabor		 */
967219019Sgabor		un->un_dp = (struct dcd_drivetype *)
968219019Sgabor		    kmem_zalloc(sizeof (struct dcd_drivetype), km_flags);
969219019Sgabor		if (!un->un_dp) {
970219019Sgabor			rval = DDI_FAILURE;
971219019Sgabor			goto error;
972219019Sgabor		}
973219019Sgabor		if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
974219019Sgabor			if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
975219019Sgabor				un->un_dp->ctype = CTYPE_DISK;
976219019Sgabor			}
977219019Sgabor		} else  {
978219019Sgabor			rval = DDI_FAILURE;
979219019Sgabor			goto error;
980219019Sgabor		}
981219019Sgabor		un->un_dp->name = "CCS";
982219019Sgabor		un->un_dp->options = 0;
983219019Sgabor	}
984219019Sgabor
985219019Sgabor	/*
986219019Sgabor	 * Allow I/O requests at un_secsize offset in multiple of un_secsize.
987219019Sgabor	 */
988219019Sgabor	un->un_secsize = DEV_BSIZE;
989219019Sgabor
990219019Sgabor	/*
991219019Sgabor	 * If the device is not a removable media device, make sure that
992219019Sgabor	 * that the device is ready, by issuing the another identify but
993219019Sgabor	 * not needed. Get the capacity from identify data and store here.
994219019Sgabor	 */
995219019Sgabor	if (dcd_compute_dk_capacity(devp, &capacity) == 0) {
996219019Sgabor		un->un_diskcapacity = capacity;
997219019Sgabor		un->un_lbasize = DEV_BSIZE;
998219019Sgabor	}
999219019Sgabor
1000219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Geometry Data\n");
1001219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "cyls %x, heads %x",
1002219019Sgabor	    devp->dcd_ident->dcd_fixcyls,
1003219019Sgabor	    devp->dcd_ident->dcd_heads);
1004219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "sectors %x,",
1005219019Sgabor	    devp->dcd_ident->dcd_sectors);
1006219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %llx\n",
1007219019Sgabor	    capacity);
1008219019Sgabor
1009219019Sgabor	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1010219019Sgabor	    "dcdprobe: drive selected\n");
1011219019Sgabor
1012219019Sgabor	/*
1013219019Sgabor	 * Check for the property target<n>-dcd-options to find the option
1014219019Sgabor	 * set by the HBA driver for this target so that we can set the
1015219019Sgabor	 * Unit structure variable so that we can send commands accordingly.
1016219019Sgabor	 */
1017219019Sgabor	target = devp->dcd_address->da_target;
1018219019Sgabor	(void) sprintf(prop_str, prop_template, target);
1019219019Sgabor	options = ddi_prop_get_int(DDI_DEV_T_ANY, devi, DDI_PROP_NOTPROM,
1020219019Sgabor	    prop_str, -1);
1021219019Sgabor	if (options < 0) {
1022219019Sgabor		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1023219019Sgabor		    "No per target properties");
1024219019Sgabor	} else {
1025219019Sgabor		if ((options & DCD_DMA_MODE) == DCD_DMA_MODE) {
1026219019Sgabor			un->un_dp->options |= DMA_SUPPORTTED;
1027219019Sgabor			un->un_dp->dma_mode = (options >> 3) & 0x03;
1028219019Sgabor			DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1029219019Sgabor			    "mode %x\n", un->un_dp->dma_mode);
1030219019Sgabor		} else {
1031219019Sgabor			un->un_dp->options &= ~DMA_SUPPORTTED;
1032219019Sgabor			un->un_dp->pio_mode = options & 0x7;
1033219019Sgabor			if (options & DCD_BLOCK_MODE)
1034219019Sgabor				un->un_dp->options |= BLOCK_MODE;
1035219019Sgabor			DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1036219019Sgabor			    "mode %x\n", un->un_dp->pio_mode);
1037219019Sgabor		}
1038219019Sgabor		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1039219019Sgabor		    "options %x,", un->un_dp->options);
1040219019Sgabor	}
1041219019Sgabor
1042219019Sgabor	un->un_throttle = 2;
1043219019Sgabor	/*
1044219019Sgabor	 * set default max_xfer_size - This should depend on whether the
1045219019Sgabor	 * Block mode is supported by the device or not.
1046219019Sgabor	 */
1047219019Sgabor	un->un_max_xfer_size = MAX_ATA_XFER_SIZE;
1048219019Sgabor
1049219019Sgabor	/*
1050219019Sgabor	 * Set write cache enable softstate
1051219019Sgabor	 *
1052219019Sgabor	 * WCE is only supported in ATAPI-4 or higher; for
1053219019Sgabor	 * lower rev devices, must assume write cache is
1054219019Sgabor	 * enabled.
1055219019Sgabor	 */
1056219019Sgabor	mutex_enter(DCD_MUTEX);
1057219019Sgabor	un->un_write_cache_enabled = (devp->dcd_ident->dcd_majvers == 0xffff) ||
1058219019Sgabor	    ((devp->dcd_ident->dcd_majvers & IDENTIFY_80_ATAPI_4) == 0) ||
1059219019Sgabor	    (devp->dcd_ident->dcd_features85 & IDENTIFY_85_WCE) != 0;
1060219019Sgabor	mutex_exit(DCD_MUTEX);
1061219019Sgabor
1062219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1063219019Sgabor	    "dcd_doattach returns good\n");
1064219019Sgabor
1065219019Sgabor	return (rval);
1066219019Sgabor
1067219019Sgaborerror:
1068219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcd_doattach failed\n");
1069219019Sgabor	dcd_free_softstate(un, devi);
1070219019Sgabor	return (rval);
1071219019Sgabor}
1072219019Sgabor
1073219019Sgabor#ifdef NOTNEEDED
1074219019Sgabor/*
1075219019Sgabor * This routine is used to set the block mode of operation by issuing the
1076219019Sgabor * Set Block mode ata command with the maximum block mode possible
1077219019Sgabor */
1078219019Sgabordcd_set_multiple(struct dcd_disk *un)
1079219019Sgabor{
1080219019Sgabor	int status;
1081219019Sgabor	struct udcd_cmd ucmd;
1082219019Sgabor	struct dcd_cmd cdb;
1083219019Sgabor	dev_t	dev;
1084219019Sgabor
1085219019Sgabor
1086219019Sgabor	/* Zero all the required structure */
1087219019Sgabor	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
1088219019Sgabor
1089219019Sgabor	(void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
1090219019Sgabor
1091219019Sgabor	cdb.cmd = ATA_SET_MULTIPLE;
1092219019Sgabor	/*
1093219019Sgabor	 * Here we should pass what needs to go into sector count REGISTER.
1094219019Sgabor	 * Eventhough this field indicates the number of bytes to read we
1095219019Sgabor	 * need to specify the block factor in terms of bytes so that it
1096219019Sgabor	 * will be programmed by the HBA driver into the sector count register.
1097219019Sgabor	 */
1098219019Sgabor	cdb.size = un->un_lbasize * un->un_dp->block_factor;
1099219019Sgabor
1100219019Sgabor	cdb.sector_num.lba_num = 0;
1101219019Sgabor	cdb.address_mode = ADD_LBA_MODE;
1102219019Sgabor	cdb.direction = NO_DATA_XFER;
1103219019Sgabor
1104219019Sgabor	ucmd.udcd_flags = 0;
1105219019Sgabor	ucmd.udcd_cmd = &cdb;
1106219019Sgabor	ucmd.udcd_bufaddr = NULL;
1107219019Sgabor	ucmd.udcd_buflen = 0;
1108219019Sgabor	ucmd.udcd_flags |= UDCD_SILENT;
1109219019Sgabor
1110219019Sgabor	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
1111219019Sgabor	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
1112219019Sgabor
1113219019Sgabor
1114219019Sgabor	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
1115219019Sgabor
1116219019Sgabor	return (status);
1117219019Sgabor}
1118219019Sgabor/*
1119219019Sgabor * The following routine is used only for setting the transfer mode
1120219019Sgabor * and it is not designed for transferring any other features subcommand.
1121219019Sgabor */
1122219019Sgabordcd_set_features(struct dcd_disk *un, uchar_t mode)
1123219019Sgabor{
1124219019Sgabor	int status;
1125219019Sgabor	struct udcd_cmd ucmd;
1126219019Sgabor	struct dcd_cmd cdb;
1127219019Sgabor	dev_t	dev;
1128219019Sgabor
1129219019Sgabor
1130219019Sgabor	/* Zero all the required structure */
1131219019Sgabor	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
1132219019Sgabor
1133219019Sgabor	(void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
1134219019Sgabor
1135219019Sgabor	cdb.cmd = ATA_SET_FEATURES;
1136219019Sgabor	/*
1137219019Sgabor	 * Here we need to pass what needs to go into the sector count register
1138219019Sgabor	 * But in the case of SET FEATURES command the value taken in the
1139219019Sgabor	 * sector count register depends what type of subcommand is
1140219019Sgabor	 * passed in the features register. Since we have defined the size to
1141219019Sgabor	 * be the size in bytes in this context it does not indicate bytes
1142219019Sgabor	 * instead it indicates the mode to be programmed.
1143219019Sgabor	 */
1144219019Sgabor	cdb.size = un->un_lbasize * mode;
1145219019Sgabor
1146219019Sgabor	cdb.sector_num.lba_num = 0;
1147219019Sgabor	cdb.address_mode = ADD_LBA_MODE;
1148219019Sgabor	cdb.direction = NO_DATA_XFER;
1149219019Sgabor	cdb.features = ATA_FEATURE_SET_MODE;
1150219019Sgabor	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1151219019Sgabor	    "size %x, features %x, cmd %x\n",
1152219019Sgabor	    cdb.size, cdb.features, cdb.cmd);
1153219019Sgabor
1154219019Sgabor	ucmd.udcd_flags = 0;
1155219019Sgabor	ucmd.udcd_cmd = &cdb;
1156219019Sgabor	ucmd.udcd_bufaddr = NULL;
1157219019Sgabor	ucmd.udcd_buflen = 0;
1158219019Sgabor	ucmd.udcd_flags |= UDCD_SILENT;
1159219019Sgabor
1160219019Sgabor	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
1161219019Sgabor	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
1162219019Sgabor
1163219019Sgabor	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
1164219019Sgabor
1165219019Sgabor	return (status);
1166219019Sgabor}
1167219019Sgabor#endif
1168219019Sgabor
1169219019Sgabor/*
1170219019Sgabor * Validate the geometry for this disk, e.g.,
1171219019Sgabor * see whether it has a valid label.
1172219019Sgabor */
1173219019Sgaborstatic int
1174219019Sgabordcd_validate_geometry(struct dcd_disk *un)
1175219019Sgabor{
1176219019Sgabor	int secsize = 0;
1177219019Sgabor	struct  dcd_device *devp;
1178219019Sgabor	int secdiv;
1179219019Sgabor	int rval;
1180219019Sgabor
1181219019Sgabor	ASSERT(mutex_owned(DCD_MUTEX));
1182219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1183219019Sgabor	    "dcd_validate_geometry: started \n");
1184219019Sgabor
1185219019Sgabor	if (un->un_lbasize < 0) {
1186219019Sgabor		return (DCD_BAD_LABEL);
1187219019Sgabor	}
1188219019Sgabor
1189219019Sgabor	if (un->un_state == DCD_STATE_PM_SUSPENDED) {
1190219019Sgabor		mutex_exit(DCD_MUTEX);
1191219019Sgabor		if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE) !=
1192219019Sgabor		    DDI_SUCCESS) {
1193219019Sgabor			mutex_enter(DCD_MUTEX);
1194219019Sgabor			return (DCD_BAD_LABEL);
1195219019Sgabor		}
1196219019Sgabor		mutex_enter(DCD_MUTEX);
1197219019Sgabor	}
1198219019Sgabor
1199219019Sgabor	secsize = un->un_secsize;
1200219019Sgabor
1201219019Sgabor	/*
1202219019Sgabor	 * take a log base 2 of sector size (sorry)
1203219019Sgabor	 */
1204219019Sgabor	for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1205219019Sgabor		;
1206219019Sgabor	un->un_secdiv = secdiv;
1207219019Sgabor
1208219019Sgabor	/*
1209219019Sgabor	 * Only DIRECT ACCESS devices will have Sun labels.
1210219019Sgabor	 * CD's supposedly have a Sun label, too
1211219019Sgabor	 */
1212219019Sgabor
1213219019Sgabor	devp = un->un_dcd;
1214219019Sgabor
1215219019Sgabor	if (((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) &&
1216219019Sgabor	    (devp->dcd_ident->dcd_config & ATANON_REMOVABLE)) {
1217219019Sgabor		mutex_exit(DCD_MUTEX);
1218219019Sgabor		rval = cmlb_validate(un->un_dklbhandle, 0, 0);
1219219019Sgabor		mutex_enter(DCD_MUTEX);
1220219019Sgabor		if (rval == ENOMEM)
1221219019Sgabor			return (DCD_NO_MEM_FOR_LABEL);
1222219019Sgabor		else if (rval != 0)
1223219019Sgabor			return (DCD_BAD_LABEL);
1224219019Sgabor	} else {
1225219019Sgabor		/* it should never get here. */
1226219019Sgabor		return (DCD_BAD_LABEL);
1227219019Sgabor	}
1228219019Sgabor
1229219019Sgabor	/*
1230219019Sgabor	 * take a log base 2 of logical block size
1231219019Sgabor	 */
1232219019Sgabor	secsize = un->un_lbasize;
1233219019Sgabor	for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1234219019Sgabor		;
1235219019Sgabor	un->un_lbadiv = secdiv;
1236219019Sgabor
1237219019Sgabor	/*
1238219019Sgabor	 * take a log base 2 of the multiple of DEV_BSIZE blocks that
1239219019Sgabor	 * make up one logical block
1240219019Sgabor	 */
1241219019Sgabor	secsize = un->un_lbasize >> DEV_BSHIFT;
1242219019Sgabor	for (secdiv = 0; secsize = secsize >> 1; secdiv++)
1243219019Sgabor		;
1244219019Sgabor	un->un_blknoshift = secdiv;
1245219019Sgabor	return (0);
1246219019Sgabor}
1247219019Sgabor
1248219019Sgabor/*
1249219019Sgabor * Unix Entry Points
1250219019Sgabor */
1251219019Sgabor
1252219019Sgabor/* ARGSUSED3 */
1253219019Sgaborstatic int
1254219019Sgabordcdopen(dev_t *dev_p, int flag, int otyp, cred_t *cred_p)
1255219019Sgabor{
1256219019Sgabor	dev_t dev = *dev_p;
1257219019Sgabor	int rval = EIO;
1258219019Sgabor	int partmask;
1259219019Sgabor	int nodelay = (flag & (FNDELAY | FNONBLOCK));
1260219019Sgabor	int i;
1261219019Sgabor	char kstatname[KSTAT_STRLEN];
1262219019Sgabor	diskaddr_t lblocks;
1263219019Sgabor	char *partname;
1264219019Sgabor
1265219019Sgabor	GET_SOFT_STATE(dev);
1266219019Sgabor
1267219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1268219019Sgabor	    "Inside Open flag %x, otyp %x\n", flag, otyp);
1269219019Sgabor
1270219019Sgabor	if (otyp >= OTYPCNT) {
1271219019Sgabor		return (EINVAL);
1272219019Sgabor	}
1273219019Sgabor
1274219019Sgabor	partmask = 1 << part;
1275219019Sgabor
1276219019Sgabor	/*
1277219019Sgabor	 * We use a semaphore here in order to serialize
1278219019Sgabor	 * open and close requests on the device.
1279219019Sgabor	 */
1280219019Sgabor	sema_p(&un->un_semoclose);
1281219019Sgabor
1282219019Sgabor	mutex_enter(DCD_MUTEX);
1283219019Sgabor
1284219019Sgabor	if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL) {
1285219019Sgabor		rval = ENXIO;
1286219019Sgabor		goto done;
1287219019Sgabor	}
1288219019Sgabor
1289219019Sgabor	while (un->un_state == DCD_STATE_SUSPENDED) {
1290219019Sgabor		cv_wait(&un->un_suspend_cv, DCD_MUTEX);
1291219019Sgabor	}
1292219019Sgabor
1293219019Sgabor	if ((un->un_state == DCD_STATE_PM_SUSPENDED) && (!nodelay)) {
1294219019Sgabor		mutex_exit(DCD_MUTEX);
1295219019Sgabor		if (pm_raise_power(DCD_DEVINFO, 0, DCD_DEVICE_ACTIVE)
1296219019Sgabor		    != DDI_SUCCESS) {
1297219019Sgabor			mutex_enter(DCD_MUTEX);
1298219019Sgabor			rval = EIO;
1299219019Sgabor			goto done;
1300219019Sgabor		}
1301219019Sgabor		mutex_enter(DCD_MUTEX);
1302219019Sgabor	}
1303219019Sgabor
1304219019Sgabor	/*
1305219019Sgabor	 * set make_dcd_cmd() flags and stat_size here since these
1306219019Sgabor	 * are unlikely to change
1307219019Sgabor	 */
1308219019Sgabor	un->un_cmd_flags = 0;
1309219019Sgabor
1310219019Sgabor	un->un_cmd_stat_size = 2;
1311219019Sgabor
1312219019Sgabor	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdopen un=0x%p\n",
1313219019Sgabor	    (void *)un);
1314219019Sgabor	/*
1315219019Sgabor	 * check for previous exclusive open
1316219019Sgabor	 */
1317219019Sgabor	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1318219019Sgabor	    "exclopen=%x, flag=%x, regopen=%x\n",
1319219019Sgabor	    un->un_exclopen, flag, un->un_ocmap.regopen[otyp]);
1320219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1321219019Sgabor	    "Exclusive open flag %x, partmask %x\n",
1322219019Sgabor	    un->un_exclopen, partmask);
1323219019Sgabor
1324219019Sgabor	if (un->un_exclopen & (partmask)) {
1325219019Sgaborfailed_exclusive:
1326219019Sgabor		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1327219019Sgabor		    "exclusive open fails\n");
1328219019Sgabor		rval = EBUSY;
1329219019Sgabor		goto done;
1330219019Sgabor	}
1331219019Sgabor
1332219019Sgabor	if (flag & FEXCL) {
1333219019Sgabor		int i;
1334219019Sgabor		if (un->un_ocmap.lyropen[part]) {
1335219019Sgabor			goto failed_exclusive;
1336219019Sgabor		}
1337219019Sgabor		for (i = 0; i < (OTYPCNT - 1); i++) {
1338219019Sgabor			if (un->un_ocmap.regopen[i] & (partmask)) {
1339219019Sgabor				goto failed_exclusive;
1340219019Sgabor			}
1341219019Sgabor		}
1342219019Sgabor	}
1343219019Sgabor	if (flag & FWRITE) {
1344219019Sgabor		mutex_exit(DCD_MUTEX);
1345219019Sgabor		if (dcd_check_wp(dev)) {
1346219019Sgabor			sema_v(&un->un_semoclose);
1347219019Sgabor			return (EROFS);
1348219019Sgabor		}
1349219019Sgabor		mutex_enter(DCD_MUTEX);
1350219019Sgabor	}
1351219019Sgabor
1352219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1353219019Sgabor	    "Check Write Protect handled\n");
1354219019Sgabor
1355219019Sgabor	if (!nodelay) {
1356219019Sgabor		mutex_exit(DCD_MUTEX);
1357219019Sgabor		if ((rval = dcd_ready_and_valid(dev, un)) != 0) {
1358219019Sgabor			rval = EIO;
1359219019Sgabor		}
1360219019Sgabor		(void) pm_idle_component(DCD_DEVINFO, 0);
1361219019Sgabor		/*
1362219019Sgabor		 * Fail if device is not ready or if the number of disk
1363219019Sgabor		 * blocks is zero or negative for non CD devices.
1364219019Sgabor		 */
1365219019Sgabor		if (rval || cmlb_partinfo(un->un_dklbhandle,
1366219019Sgabor		    part, &lblocks, NULL, &partname, NULL, 0) ||
1367219019Sgabor		    lblocks <= 0) {
1368219019Sgabor			rval = EIO;
1369219019Sgabor			mutex_enter(DCD_MUTEX);
1370219019Sgabor			goto done;
1371219019Sgabor		}
1372219019Sgabor		mutex_enter(DCD_MUTEX);
1373219019Sgabor	}
1374219019Sgabor
1375219019Sgabor	if (otyp == OTYP_LYR) {
1376219019Sgabor		un->un_ocmap.lyropen[part]++;
1377219019Sgabor	} else {
1378219019Sgabor		un->un_ocmap.regopen[otyp] |= partmask;
1379219019Sgabor	}
1380219019Sgabor
1381219019Sgabor	/*
1382219019Sgabor	 * set up open and exclusive open flags
1383219019Sgabor	 */
1384219019Sgabor	if (flag & FEXCL) {
1385219019Sgabor		un->un_exclopen |= (partmask);
1386219019Sgabor	}
1387219019Sgabor
1388219019Sgabor
1389219019Sgabor	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1390219019Sgabor	    "open of part %d type %d\n",
1391219019Sgabor	    part, otyp);
1392219019Sgabor
1393219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1394219019Sgabor	    "Kstats getting updated\n");
1395219019Sgabor	/*
1396219019Sgabor	 * only create kstats for disks, CD kstats created in dcdattach
1397219019Sgabor	 */
1398219019Sgabor	_NOTE(NO_COMPETING_THREADS_NOW);
1399219019Sgabor	mutex_exit(DCD_MUTEX);
1400219019Sgabor	if (un->un_stats == (kstat_t *)0) {
1401219019Sgabor		un->un_stats = kstat_create("dad", instance,
1402219019Sgabor		    NULL, "disk", KSTAT_TYPE_IO, 1,
1403219019Sgabor		    KSTAT_FLAG_PERSISTENT);
1404219019Sgabor		if (un->un_stats) {
1405219019Sgabor			un->un_stats->ks_lock = DCD_MUTEX;
1406219019Sgabor			kstat_install(un->un_stats);
1407219019Sgabor		}
1408219019Sgabor
1409219019Sgabor		/*
1410219019Sgabor		 * set up partition statistics for each partition
1411219019Sgabor		 * with number of blocks > 0
1412219019Sgabor		 */
1413219019Sgabor		if (!nodelay) {
1414219019Sgabor			for (i = 0; i < NDKMAP; i++) {
1415219019Sgabor				if ((un->un_pstats[i] == (kstat_t *)0) &&
1416219019Sgabor				    (cmlb_partinfo(un->un_dklbhandle,
1417219019Sgabor				    i, &lblocks, NULL, &partname,
1418219019Sgabor				    NULL, 0) == 0) && lblocks > 0) {
1419219019Sgabor					(void) sprintf(kstatname, "dad%d,%s",
1420219019Sgabor					    instance, partname);
1421219019Sgabor					un->un_pstats[i] = kstat_create("dad",
1422219019Sgabor					    instance,
1423219019Sgabor					    kstatname,
1424219019Sgabor					    "partition",
1425219019Sgabor					    KSTAT_TYPE_IO,
1426219019Sgabor					    1,
1427219019Sgabor					    KSTAT_FLAG_PERSISTENT);
1428219019Sgabor					if (un->un_pstats[i]) {
1429219019Sgabor						un->un_pstats[i]->ks_lock =
1430219019Sgabor						    DCD_MUTEX;
1431219019Sgabor						kstat_install(un->un_pstats[i]);
1432219019Sgabor					}
1433219019Sgabor				}
1434219019Sgabor			}
1435219019Sgabor		}
1436219019Sgabor		/*
1437219019Sgabor		 * set up error kstats
1438219019Sgabor		 */
1439219019Sgabor		(void) dcd_create_errstats(un, instance);
1440219019Sgabor	}
1441219019Sgabor#ifndef lint
1442219019Sgabor	_NOTE(COMPETING_THREADS_NOW);
1443219019Sgabor#endif
1444219019Sgabor
1445219019Sgabor	sema_v(&un->un_semoclose);
1446219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "Open success\n");
1447219019Sgabor	return (0);
1448219019Sgabor
1449219019Sgabordone:
1450219019Sgabor	mutex_exit(DCD_MUTEX);
1451219019Sgabor	sema_v(&un->un_semoclose);
1452219019Sgabor	return (rval);
1453219019Sgabor
1454219019Sgabor}
1455219019Sgabor
1456219019Sgabor/*
1457219019Sgabor * Test if disk is ready and has a valid geometry.
1458219019Sgabor */
1459219019Sgaborstatic int
1460219019Sgabordcd_ready_and_valid(dev_t dev, struct dcd_disk *un)
1461219019Sgabor{
1462219019Sgabor	int rval = 1;
1463219019Sgabor	int g_error = 0;
1464219019Sgabor
1465219019Sgabor	mutex_enter(DCD_MUTEX);
1466219019Sgabor	/*
1467219019Sgabor	 * cmds outstanding
1468219019Sgabor	 */
1469219019Sgabor	if (un->un_ncmds == 0) {
1470219019Sgabor		(void) dcd_unit_ready(dev);
1471219019Sgabor	}
1472219019Sgabor
1473219019Sgabor	/*
1474219019Sgabor	 * If device is not yet ready here, inform it is offline
1475219019Sgabor	 */
1476219019Sgabor	if (un->un_state == DCD_STATE_NORMAL) {
1477219019Sgabor		rval = dcd_unit_ready(dev);
1478219019Sgabor		if (rval != 0 && rval != EACCES) {
1479219019Sgabor			dcd_offline(un, 1);
1480219019Sgabor			goto done;
1481219019Sgabor		}
1482219019Sgabor	}
1483219019Sgabor
1484219019Sgabor	if (un->un_format_in_progress == 0) {
1485219019Sgabor		g_error = dcd_validate_geometry(un);
1486219019Sgabor	}
1487219019Sgabor
1488219019Sgabor	/*
1489219019Sgabor	 * check if geometry was valid. We don't check the validity of
1490219019Sgabor	 * geometry for CDROMS.
1491219019Sgabor	 */
1492219019Sgabor
1493219019Sgabor	if (g_error == DCD_BAD_LABEL) {
1494219019Sgabor		rval = 1;
1495219019Sgabor		goto done;
1496219019Sgabor	}
1497219019Sgabor
1498219019Sgabor
1499219019Sgabor	/*
1500219019Sgabor	 * the state has changed; inform the media watch routines
1501219019Sgabor	 */
1502219019Sgabor	un->un_mediastate = DKIO_INSERTED;
1503219019Sgabor	cv_broadcast(&un->un_state_cv);
1504219019Sgabor	rval = 0;
1505219019Sgabor
1506219019Sgabordone:
1507219019Sgabor	mutex_exit(DCD_MUTEX);
1508219019Sgabor	return (rval);
1509219019Sgabor}
1510219019Sgabor
1511219019Sgabor
1512219019Sgabor/*ARGSUSED*/
1513219019Sgaborstatic int
1514219019Sgabordcdclose(dev_t dev, int flag, int otyp, cred_t *cred_p)
1515219019Sgabor{
1516219019Sgabor	uchar_t *cp;
1517219019Sgabor	int i;
1518219019Sgabor
1519219019Sgabor	GET_SOFT_STATE(dev);
1520219019Sgabor
1521219019Sgabor
1522219019Sgabor	if (otyp >= OTYPCNT)
1523219019Sgabor		return (ENXIO);
1524219019Sgabor
1525219019Sgabor	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1526219019Sgabor	    "close of part %d type %d\n",
1527219019Sgabor	    part, otyp);
1528219019Sgabor	sema_p(&un->un_semoclose);
1529219019Sgabor
1530219019Sgabor	mutex_enter(DCD_MUTEX);
1531219019Sgabor
1532219019Sgabor	if (un->un_exclopen & (1<<part)) {
1533219019Sgabor		un->un_exclopen &= ~(1<<part);
1534219019Sgabor	}
1535219019Sgabor
1536219019Sgabor	if (otyp == OTYP_LYR) {
1537219019Sgabor		un->un_ocmap.lyropen[part] -= 1;
1538219019Sgabor	} else {
1539219019Sgabor		un->un_ocmap.regopen[otyp] &= ~(1<<part);
1540219019Sgabor	}
1541219019Sgabor
1542219019Sgabor	cp = &un->un_ocmap.chkd[0];
1543219019Sgabor	while (cp < &un->un_ocmap.chkd[OCSIZE]) {
1544219019Sgabor		if (*cp != (uchar_t)0) {
1545219019Sgabor			break;
1546219019Sgabor		}
1547219019Sgabor		cp++;
1548219019Sgabor	}
1549219019Sgabor
1550219019Sgabor	if (cp == &un->un_ocmap.chkd[OCSIZE]) {
1551219019Sgabor		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "last close\n");
1552219019Sgabor		if (un->un_state == DCD_STATE_OFFLINE) {
1553219019Sgabor			dcd_offline(un, 1);
1554219019Sgabor		}
1555219019Sgabor
1556219019Sgabor		mutex_exit(DCD_MUTEX);
1557219019Sgabor		(void) cmlb_close(un->un_dklbhandle, 0);
1558219019Sgabor
1559219019Sgabor		_NOTE(NO_COMPETING_THREADS_NOW);
1560219019Sgabor		if (un->un_stats) {
1561219019Sgabor			kstat_delete(un->un_stats);
1562219019Sgabor			un->un_stats = 0;
1563219019Sgabor		}
1564219019Sgabor		for (i = 0; i < NDKMAP; i++) {
1565219019Sgabor			if (un->un_pstats[i]) {
1566219019Sgabor				kstat_delete(un->un_pstats[i]);
1567219019Sgabor				un->un_pstats[i] = (kstat_t *)0;
1568219019Sgabor			}
1569219019Sgabor		}
1570219019Sgabor
1571219019Sgabor		if (un->un_errstats) {
1572219019Sgabor			kstat_delete(un->un_errstats);
1573219019Sgabor			un->un_errstats = (kstat_t *)0;
1574219019Sgabor		}
1575219019Sgabor		mutex_enter(DCD_MUTEX);
1576219019Sgabor
1577219019Sgabor#ifndef lint
1578219019Sgabor		_NOTE(COMPETING_THREADS_NOW);
1579219019Sgabor#endif
1580219019Sgabor	}
1581219019Sgabor
1582219019Sgabor	mutex_exit(DCD_MUTEX);
1583219019Sgabor	sema_v(&un->un_semoclose);
1584219019Sgabor	return (0);
1585219019Sgabor}
1586219019Sgabor
1587219019Sgaborstatic void
1588219019Sgabordcd_offline(struct dcd_disk *un, int bechatty)
1589219019Sgabor{
1590219019Sgabor	if (bechatty)
1591219019Sgabor		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "offline\n");
1592219019Sgabor
1593219019Sgabor	mutex_exit(DCD_MUTEX);
1594219019Sgabor	cmlb_invalidate(un->un_dklbhandle, 0);
1595219019Sgabor	mutex_enter(DCD_MUTEX);
1596219019Sgabor}
1597219019Sgabor
1598219019Sgabor/*
1599219019Sgabor * Given the device number return the devinfo pointer
1600219019Sgabor * from the scsi_device structure.
1601219019Sgabor */
1602219019Sgabor/*ARGSUSED*/
1603219019Sgaborstatic int
1604219019Sgabordcdinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
1605219019Sgabor{
1606219019Sgabor	dev_t dev;
1607219019Sgabor	struct dcd_disk *un;
1608219019Sgabor	int instance, error;
1609219019Sgabor
1610219019Sgabor
1611219019Sgabor	switch (infocmd) {
1612219019Sgabor	case DDI_INFO_DEVT2DEVINFO:
1613219019Sgabor		dev = (dev_t)arg;
1614219019Sgabor		instance = DCDUNIT(dev);
1615219019Sgabor		if ((un = ddi_get_soft_state(dcd_state, instance)) == NULL)
1616219019Sgabor			return (DDI_FAILURE);
1617219019Sgabor		*result = (void *) DCD_DEVINFO;
1618219019Sgabor		error = DDI_SUCCESS;
1619219019Sgabor		break;
1620219019Sgabor	case DDI_INFO_DEVT2INSTANCE:
1621219019Sgabor		dev = (dev_t)arg;
1622219019Sgabor		instance = DCDUNIT(dev);
1623219019Sgabor		*result = (void *)(uintptr_t)instance;
1624219019Sgabor		error = DDI_SUCCESS;
1625219019Sgabor		break;
1626219019Sgabor	default:
1627219019Sgabor		error = DDI_FAILURE;
1628219019Sgabor	}
1629219019Sgabor	return (error);
1630219019Sgabor}
1631219019Sgabor
1632219019Sgabor/*
1633219019Sgabor * property operation routine.	return the number of blocks for the partition
1634219019Sgabor * in question or forward the request to the propery facilities.
1635219019Sgabor */
1636219019Sgaborstatic int
1637219019Sgabordcd_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op, int mod_flags,
1638219019Sgabor    char *name, caddr_t valuep, int *lengthp)
1639219019Sgabor{
1640219019Sgabor	struct dcd_disk	*un;
1641219019Sgabor
1642219019Sgabor	if ((un = ddi_get_soft_state(dcd_state, ddi_get_instance(dip))) == NULL)
1643219019Sgabor		return (ddi_prop_op(dev, dip, prop_op, mod_flags,
1644219019Sgabor		    name, valuep, lengthp));
1645219019Sgabor
1646219019Sgabor	return (cmlb_prop_op(un->un_dklbhandle,
1647219019Sgabor	    dev, dip, prop_op, mod_flags, name, valuep, lengthp,
1648219019Sgabor	    DCDPART(dev), NULL));
1649219019Sgabor}
1650219019Sgabor
1651219019Sgabor/*
1652219019Sgabor * These routines perform raw i/o operations.
1653219019Sgabor */
1654219019Sgabor/*ARGSUSED*/
1655219019Sgaborvoid
1656219019Sgabordcduscsimin(struct buf *bp)
1657219019Sgabor{
1658219019Sgabor
1659219019Sgabor}
1660219019Sgabor
1661219019Sgabor
1662219019Sgaborstatic void
1663219019Sgabordcdmin(struct buf *bp)
1664219019Sgabor{
1665219019Sgabor	struct dcd_disk *un;
1666219019Sgabor	int instance;
1667219019Sgabor	minor_t minor = getminor(bp->b_edev);
1668219019Sgabor	instance = minor >> DCDUNIT_SHIFT;
1669219019Sgabor	un = ddi_get_soft_state(dcd_state, instance);
1670219019Sgabor
1671219019Sgabor	if (bp->b_bcount > un->un_max_xfer_size)
1672219019Sgabor		bp->b_bcount = un->un_max_xfer_size;
1673219019Sgabor}
1674219019Sgabor
1675219019Sgabor
1676219019Sgabor/* ARGSUSED2 */
1677219019Sgaborstatic int
1678219019Sgabordcdread(dev_t dev, struct uio *uio, cred_t *cred_p)
1679219019Sgabor{
1680219019Sgabor	int secmask;
1681219019Sgabor	GET_SOFT_STATE(dev);
1682219019Sgabor#ifdef lint
1683219019Sgabor	part = part;
1684219019Sgabor#endif /* lint */
1685219019Sgabor	secmask = un->un_secsize - 1;
1686219019Sgabor
1687219019Sgabor	if (uio->uio_loffset & ((offset_t)(secmask))) {
1688219019Sgabor		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1689219019Sgabor		    "file offset not modulo %d\n",
1690219019Sgabor		    un->un_secsize);
1691219019Sgabor		return (EINVAL);
1692219019Sgabor	} else if (uio->uio_iov->iov_len & (secmask)) {
1693219019Sgabor		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1694219019Sgabor		    "transfer length not modulo %d\n", un->un_secsize);
1695219019Sgabor		return (EINVAL);
1696219019Sgabor	}
1697219019Sgabor	return (physio(dcdstrategy, (struct buf *)0, dev, B_READ, dcdmin, uio));
1698219019Sgabor}
1699219019Sgabor
1700219019Sgabor/* ARGSUSED2 */
1701219019Sgaborstatic int
1702219019Sgabordcdaread(dev_t dev, struct aio_req *aio, cred_t *cred_p)
1703219019Sgabor{
1704219019Sgabor	int secmask;
1705219019Sgabor	struct uio *uio = aio->aio_uio;
1706219019Sgabor	GET_SOFT_STATE(dev);
1707219019Sgabor#ifdef lint
1708219019Sgabor	part = part;
1709219019Sgabor#endif /* lint */
1710219019Sgabor	secmask = un->un_secsize - 1;
1711219019Sgabor
1712219019Sgabor	if (uio->uio_loffset & ((offset_t)(secmask))) {
1713219019Sgabor		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1714219019Sgabor		    "file offset not modulo %d\n",
1715219019Sgabor		    un->un_secsize);
1716219019Sgabor		return (EINVAL);
1717219019Sgabor	} else if (uio->uio_iov->iov_len & (secmask)) {
1718219019Sgabor		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1719219019Sgabor		    "transfer length not modulo %d\n", un->un_secsize);
1720219019Sgabor		return (EINVAL);
1721219019Sgabor	}
1722219019Sgabor	return (aphysio(dcdstrategy, anocancel, dev, B_READ, dcdmin, aio));
1723219019Sgabor}
1724219019Sgabor
1725219019Sgabor/* ARGSUSED2 */
1726219019Sgaborstatic int
1727219019Sgabordcdwrite(dev_t dev, struct uio *uio, cred_t *cred_p)
1728219019Sgabor{
1729219019Sgabor	int secmask;
1730219019Sgabor	GET_SOFT_STATE(dev);
1731219019Sgabor#ifdef lint
1732219019Sgabor	part = part;
1733219019Sgabor#endif /* lint */
1734219019Sgabor	secmask = un->un_secsize - 1;
1735219019Sgabor
1736219019Sgabor	if (uio->uio_loffset & ((offset_t)(secmask))) {
1737219019Sgabor		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1738219019Sgabor		    "file offset not modulo %d\n",
1739219019Sgabor		    un->un_secsize);
1740219019Sgabor		return (EINVAL);
1741219019Sgabor	} else if (uio->uio_iov->iov_len & (secmask)) {
1742219019Sgabor		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1743219019Sgabor		    "transfer length not modulo %d\n", un->un_secsize);
1744219019Sgabor		return (EINVAL);
1745219019Sgabor	}
1746219019Sgabor	return (physio(dcdstrategy, (struct buf *)0, dev, B_WRITE, dcdmin,
1747219019Sgabor	    uio));
1748219019Sgabor}
1749219019Sgabor
1750219019Sgabor/* ARGSUSED2 */
1751219019Sgaborstatic int
1752219019Sgabordcdawrite(dev_t dev, struct aio_req *aio, cred_t *cred_p)
1753219019Sgabor{
1754219019Sgabor	int secmask;
1755219019Sgabor	struct uio *uio = aio->aio_uio;
1756219019Sgabor	GET_SOFT_STATE(dev);
1757219019Sgabor#ifdef lint
1758219019Sgabor	part = part;
1759219019Sgabor#endif /* lint */
1760219019Sgabor	secmask = un->un_secsize - 1;
1761219019Sgabor
1762219019Sgabor	if (uio->uio_loffset & ((offset_t)(secmask))) {
1763219019Sgabor		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1764219019Sgabor		    "file offset not modulo %d\n",
1765219019Sgabor		    un->un_secsize);
1766219019Sgabor		return (EINVAL);
1767219019Sgabor	} else if (uio->uio_iov->iov_len & (secmask)) {
1768219019Sgabor		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1769219019Sgabor		    "transfer length not modulo %d\n", un->un_secsize);
1770219019Sgabor		return (EINVAL);
1771219019Sgabor	}
1772219019Sgabor	return (aphysio(dcdstrategy, anocancel, dev, B_WRITE, dcdmin, aio));
1773219019Sgabor}
1774219019Sgabor
1775219019Sgabor/*
1776219019Sgabor * strategy routine
1777219019Sgabor */
1778219019Sgaborstatic int
1779219019Sgabordcdstrategy(struct buf *bp)
1780219019Sgabor{
1781219019Sgabor	struct dcd_disk *un;
1782219019Sgabor	struct diskhd *dp;
1783219019Sgabor	int i;
1784219019Sgabor	minor_t minor = getminor(bp->b_edev);
1785219019Sgabor	diskaddr_t p_lblksrt;
1786219019Sgabor	diskaddr_t lblocks;
1787219019Sgabor	diskaddr_t bn;
1788219019Sgabor
1789219019Sgabor	if ((un = ddi_get_soft_state(dcd_state,
1790219019Sgabor	    minor >> DCDUNIT_SHIFT)) == NULL ||
1791219019Sgabor	    un->un_state == DCD_STATE_DUMPING ||
1792219019Sgabor	    ((un->un_state  & DCD_STATE_FATAL) == DCD_STATE_FATAL)) {
1793219019Sgabor		SET_BP_ERROR(bp, ((un) ? ENXIO : EIO));
1794219019Sgaborerror:
1795219019Sgabor		bp->b_resid = bp->b_bcount;
1796219019Sgabor		biodone(bp);
1797219019Sgabor		return (0);
1798219019Sgabor	}
1799219019Sgabor
1800219019Sgabor	/*
1801219019Sgabor	 * If the request size (buf->b_bcount)is greater than the size
1802219019Sgabor	 * (un->un_max_xfer_size) supported by the target driver fail
1803219019Sgabor	 * the request with EINVAL error code.
1804219019Sgabor	 *
1805219019Sgabor	 * We are not supposed to receive requests exceeding
1806219019Sgabor	 * un->un_max_xfer_size size because the caller is expected to
1807219019Sgabor	 * check what is the maximum size that is supported by this
1808219019Sgabor	 * driver either through ioctl or dcdmin routine(which is private
1809219019Sgabor	 * to this driver).
1810219019Sgabor	 * But we have seen cases (like meta driver(md))where dcdstrategy
1811219019Sgabor	 * called with more than supported size and cause data corruption.
1812219019Sgabor	 */
1813219019Sgabor
1814219019Sgabor	if (bp->b_bcount > un->un_max_xfer_size) {
1815219019Sgabor		SET_BP_ERROR(bp, EINVAL);
1816219019Sgabor		goto error;
1817219019Sgabor	}
1818219019Sgabor
1819219019Sgabor	TRACE_2(TR_FAC_DADA, TR_DCDSTRATEGY_START,
1820219019Sgabor	    "dcdstrategy_start: bp 0x%p un 0x%p", bp, un);
1821219019Sgabor
1822219019Sgabor	/*
1823219019Sgabor	 * Commands may sneak in while we released the mutex in
1824219019Sgabor	 * DDI_SUSPEND, we should block new commands.
1825219019Sgabor	 */
1826219019Sgabor	mutex_enter(DCD_MUTEX);
1827219019Sgabor	while (un->un_state == DCD_STATE_SUSPENDED) {
1828219019Sgabor		cv_wait(&un->un_suspend_cv, DCD_MUTEX);
1829219019Sgabor	}
1830219019Sgabor
1831219019Sgabor	if (un->un_state == DCD_STATE_PM_SUSPENDED) {
1832219019Sgabor		mutex_exit(DCD_MUTEX);
1833219019Sgabor		(void) pm_idle_component(DCD_DEVINFO, 0);
1834219019Sgabor		if (pm_raise_power(DCD_DEVINFO, 0,
1835219019Sgabor		    DCD_DEVICE_ACTIVE) !=  DDI_SUCCESS) {
1836219019Sgabor			SET_BP_ERROR(bp, EIO);
1837219019Sgabor			goto error;
1838219019Sgabor		}
1839219019Sgabor		mutex_enter(DCD_MUTEX);
1840219019Sgabor	}
1841219019Sgabor	mutex_exit(DCD_MUTEX);
1842219019Sgabor
1843219019Sgabor	/*
1844219019Sgabor	 * Map-in the buffer in case starting address is not word aligned.
1845219019Sgabor	 */
1846219019Sgabor
1847219019Sgabor	if (((uintptr_t)bp->b_un.b_addr) & 0x1)
1848219019Sgabor		bp_mapin(bp);
1849219019Sgabor
1850219019Sgabor	bp->b_flags &= ~(B_DONE|B_ERROR);
1851219019Sgabor	bp->b_resid = 0;
1852219019Sgabor	bp->av_forw = 0;
1853219019Sgabor
1854219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1855219019Sgabor	    "bp->b_bcount %lx\n", bp->b_bcount);
1856219019Sgabor
1857219019Sgabor	if (bp != un->un_sbufp) {
1858219019Sgaborvalidated:	if (cmlb_partinfo(un->un_dklbhandle,
1859219019Sgabor		    minor & DCDPART_MASK,
1860219019Sgabor		    &lblocks,
1861219019Sgabor		    &p_lblksrt,
1862219019Sgabor		    NULL,
1863219019Sgabor		    NULL,
1864219019Sgabor		    0) == 0) {
1865219019Sgabor
1866219019Sgabor			bn = dkblock(bp);
1867219019Sgabor
1868219019Sgabor			DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1869219019Sgabor			    "dkblock(bp) is %llu\n", bn);
1870219019Sgabor
1871219019Sgabor			i = 0;
1872219019Sgabor			if (bn < 0) {
1873219019Sgabor				i = -1;
1874219019Sgabor			} else if (bn >= lblocks) {
1875219019Sgabor				/*
1876219019Sgabor				 * For proper comparison, file system block
1877219019Sgabor				 * number has to be scaled to actual CD
1878219019Sgabor				 * transfer size.
1879219019Sgabor				 * Since all the CDROM operations
1880219019Sgabor				 * that have Sun Labels are in the correct
1881219019Sgabor				 * block size this will work for CD's.	This
1882219019Sgabor				 * will have to change when we have different
1883219019Sgabor				 * sector sizes.
1884219019Sgabor				 *
1885219019Sgabor				 * if bn == lblocks,
1886219019Sgabor				 * Not an error, resid == count
1887219019Sgabor				 */
1888219019Sgabor				if (bn > lblocks) {
1889219019Sgabor					i = -1;
1890219019Sgabor				} else {
1891219019Sgabor					i = 1;
1892219019Sgabor				}
1893219019Sgabor			} else if (bp->b_bcount & (un->un_secsize-1)) {
1894219019Sgabor				/*
1895219019Sgabor				 * This should really be:
1896219019Sgabor				 *
1897219019Sgabor				 * ... if (bp->b_bcount & (un->un_lbasize-1))
1898219019Sgabor				 *
1899219019Sgabor				 */
1900219019Sgabor				i = -1;
1901219019Sgabor			} else {
1902219019Sgabor				if (!bp->b_bcount) {
1903219019Sgabor					printf("Waring : Zero read or Write\n");
1904219019Sgabor					goto error;
1905219019Sgabor				}
1906219019Sgabor				/*
1907219019Sgabor				 * sort by absolute block number.
1908219019Sgabor				 */
1909219019Sgabor				bp->b_resid = bn;
1910219019Sgabor				bp->b_resid += p_lblksrt;
1911219019Sgabor				/*
1912219019Sgabor				 * zero out av_back - this will be a signal
1913219019Sgabor				 * to dcdstart to go and fetch the resources
1914219019Sgabor				 */
1915219019Sgabor				bp->av_back = NO_PKT_ALLOCATED;
1916219019Sgabor			}
1917219019Sgabor
1918219019Sgabor			/*
1919219019Sgabor			 * Check to see whether or not we are done
1920219019Sgabor			 * (with or without errors).
1921219019Sgabor			 */
1922219019Sgabor
1923219019Sgabor			if (i != 0) {
1924219019Sgabor				if (i < 0) {
1925219019Sgabor					bp->b_flags |= B_ERROR;
1926219019Sgabor				}
1927219019Sgabor				goto error;
1928219019Sgabor			}
1929219019Sgabor		} else {
1930219019Sgabor			/*
1931219019Sgabor			 * opened in NDELAY/NONBLOCK mode?
1932219019Sgabor			 * Check if disk is ready and has a valid geometry
1933219019Sgabor			 */
1934219019Sgabor			if (dcd_ready_and_valid(bp->b_edev, un) == 0) {
1935219019Sgabor				goto validated;
1936219019Sgabor			} else {
1937219019Sgabor				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
1938219019Sgabor				    "i/o to invalid geometry\n");
1939219019Sgabor				SET_BP_ERROR(bp, EIO);
1940219019Sgabor				goto error;
1941219019Sgabor			}
1942219019Sgabor		}
1943219019Sgabor	} else if (BP_HAS_NO_PKT(bp)) {
1944219019Sgabor		struct udcd_cmd *tscmdp;
1945219019Sgabor		struct dcd_cmd *tcmdp;
1946219019Sgabor		/*
1947219019Sgabor		 * This indicates that it is a special buffer
1948219019Sgabor		 * This could be a udcd-cmd and hence call bp_mapin just
1949219019Sgabor		 * in case that it could be a PIO command issued.
1950219019Sgabor		 */
1951219019Sgabor		tscmdp = (struct udcd_cmd *)bp->b_forw;
1952219019Sgabor		tcmdp = tscmdp->udcd_cmd;
1953219019Sgabor		if ((tcmdp->cmd != ATA_READ_DMA) && (tcmdp->cmd != 0xc9) &&
1954219019Sgabor		    (tcmdp->cmd != ATA_WRITE_DMA) && (tcmdp->cmd != 0xcb) &&
1955219019Sgabor		    (tcmdp->cmd != IDENTIFY_DMA) &&
1956219019Sgabor		    (tcmdp->cmd != ATA_FLUSH_CACHE)) {
1957219019Sgabor			bp_mapin(bp);
1958219019Sgabor		}
1959219019Sgabor	}
1960219019Sgabor
1961219019Sgabor	/*
1962219019Sgabor	 * We are doing it a bit non-standard. That is, the
1963219019Sgabor	 * head of the b_actf chain is *not* the active command-
1964219019Sgabor	 * it is just the head of the wait queue. The reason
1965219019Sgabor	 * we do this is that the head of the b_actf chain is
1966219019Sgabor	 * guaranteed to not be moved by disksort(), so that
1967219019Sgabor	 * our restart command (pointed to by
1968219019Sgabor	 * b_forw) and the head of the wait queue (b_actf) can
1969219019Sgabor	 * have resources granted without it getting lost in
1970219019Sgabor	 * the queue at some later point (where we would have
1971219019Sgabor	 * to go and look for it).
1972219019Sgabor	 */
1973219019Sgabor	mutex_enter(DCD_MUTEX);
1974219019Sgabor
1975219019Sgabor	DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
1976219019Sgabor
1977219019Sgabor	dp = &un->un_utab;
1978219019Sgabor
1979219019Sgabor	if (dp->b_actf == NULL) {
1980219019Sgabor		dp->b_actf = bp;
1981219019Sgabor		dp->b_actl = bp;
1982219019Sgabor	} else if ((un->un_state == DCD_STATE_SUSPENDED) &&
1983219019Sgabor	    bp == un->un_sbufp) {
1984219019Sgabor		bp->b_actf = dp->b_actf;
1985219019Sgabor		dp->b_actf = bp;
1986219019Sgabor	} else {
1987219019Sgabor		TRACE_3(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_START,
1988219019Sgabor		    "dcdstrategy_disksort_start: dp 0x%p bp 0x%p un 0x%p",
1989219019Sgabor		    dp, bp, un);
1990219019Sgabor		disksort(dp, bp);
1991219019Sgabor		TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_DISKSORT_END,
1992219019Sgabor		    "dcdstrategy_disksort_end");
1993219019Sgabor	}
1994219019Sgabor
1995219019Sgabor	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
1996219019Sgabor	    "ncmd %x , throttle %x, forw 0x%p\n",
1997219019Sgabor	    un->un_ncmds, un->un_throttle, (void *)dp->b_forw);
1998219019Sgabor	ASSERT(un->un_ncmds >= 0);
1999219019Sgabor	ASSERT(un->un_throttle >= 0);
2000219019Sgabor	if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) {
2001219019Sgabor		dcdstart(un);
2002219019Sgabor	} else if (BP_HAS_NO_PKT(dp->b_actf)) {
2003219019Sgabor		struct buf *cmd_bp;
2004219019Sgabor
2005219019Sgabor		cmd_bp = dp->b_actf;
2006219019Sgabor		cmd_bp->av_back = ALLOCATING_PKT;
2007219019Sgabor		mutex_exit(DCD_MUTEX);
2008219019Sgabor		/*
2009219019Sgabor		 * try and map this one
2010219019Sgabor		 */
2011219019Sgabor		TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_START,
2012219019Sgabor		    "dcdstrategy_small_window_call (begin)");
2013219019Sgabor
2014219019Sgabor		make_dcd_cmd(un, cmd_bp, NULL_FUNC);
2015219019Sgabor
2016219019Sgabor		TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_SMALL_WINDOW_END,
2017219019Sgabor		    "dcdstrategy_small_window_call (end)");
2018219019Sgabor
2019219019Sgabor		/*
2020219019Sgabor		 * there is a small window where the active cmd
2021219019Sgabor		 * completes before make_dcd_cmd returns.
2022219019Sgabor		 * consequently, this cmd never gets started so
2023219019Sgabor		 * we start it from here
2024219019Sgabor		 */
2025219019Sgabor		mutex_enter(DCD_MUTEX);
2026219019Sgabor		if ((un->un_ncmds < un->un_throttle) &&
2027219019Sgabor		    (dp->b_forw == NULL)) {
2028219019Sgabor			dcdstart(un);
2029219019Sgabor		}
2030219019Sgabor	}
2031219019Sgabor	mutex_exit(DCD_MUTEX);
2032219019Sgabor
2033219019Sgabordone:
2034219019Sgabor	TRACE_0(TR_FAC_DADA, TR_DCDSTRATEGY_END, "dcdstrategy_end");
2035219019Sgabor	return (0);
2036219019Sgabor}
2037219019Sgabor
2038219019Sgabor
2039219019Sgabor/*
2040219019Sgabor * Unit start and Completion
2041219019Sgabor * NOTE: we assume that the caller has at least checked for:
2042219019Sgabor *		(un->un_ncmds < un->un_throttle)
2043219019Sgabor *	if not, there is no real harm done, dcd_transport() will
2044219019Sgabor *	return BUSY
2045219019Sgabor */
2046219019Sgaborstatic void
2047219019Sgabordcdstart(struct dcd_disk *un)
2048219019Sgabor{
2049219019Sgabor	int status, sort_key;
2050219019Sgabor	struct buf *bp;
2051219019Sgabor	struct diskhd *dp;
2052219019Sgabor	uchar_t state = un->un_last_state;
2053219019Sgabor
2054219019Sgabor	TRACE_1(TR_FAC_DADA, TR_DCDSTART_START, "dcdstart_start: un 0x%p", un);
2055219019Sgabor
2056219019Sgaborretry:
2057219019Sgabor	ASSERT(mutex_owned(DCD_MUTEX));
2058219019Sgabor
2059219019Sgabor	dp = &un->un_utab;
2060219019Sgabor	if (((bp = dp->b_actf) == NULL) || (bp->av_back == ALLOCATING_PKT) ||
2061219019Sgabor	    (dp->b_forw != NULL)) {
2062219019Sgabor		TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_WORK_END,
2063219019Sgabor		    "dcdstart_end (no work)");
2064219019Sgabor		return;
2065219019Sgabor	}
2066219019Sgabor
2067219019Sgabor	/*
2068219019Sgabor	 * remove from active queue
2069219019Sgabor	 */
2070219019Sgabor	dp->b_actf = bp->b_actf;
2071219019Sgabor	bp->b_actf = 0;
2072219019Sgabor
2073219019Sgabor	/*
2074219019Sgabor	 * increment ncmds before calling dcd_transport because dcdintr
2075219019Sgabor	 * may be called before we return from dcd_transport!
2076219019Sgabor	 */
2077219019Sgabor	un->un_ncmds++;
2078219019Sgabor
2079219019Sgabor	/*
2080219019Sgabor	 * If measuring stats, mark exit from wait queue and
2081219019Sgabor	 * entrance into run 'queue' if and only if we are
2082219019Sgabor	 * going to actually start a command.
2083219019Sgabor	 * Normally the bp already has a packet at this point
2084219019Sgabor	 */
2085219019Sgabor	DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp);
2086219019Sgabor
2087219019Sgabor	mutex_exit(DCD_MUTEX);
2088219019Sgabor
2089219019Sgabor	if (BP_HAS_NO_PKT(bp)) {
2090219019Sgabor		make_dcd_cmd(un, bp, dcdrunout);
2091219019Sgabor		if (BP_HAS_NO_PKT(bp) && !(bp->b_flags & B_ERROR)) {
2092219019Sgabor			mutex_enter(DCD_MUTEX);
2093219019Sgabor			DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2094219019Sgabor
2095219019Sgabor			bp->b_actf = dp->b_actf;
2096219019Sgabor			dp->b_actf = bp;
2097219019Sgabor			New_state(un, DCD_STATE_RWAIT);
2098219019Sgabor			un->un_ncmds--;
2099219019Sgabor			TRACE_0(TR_FAC_DADA, TR_DCDSTART_NO_RESOURCES_END,
2100219019Sgabor			    "dcdstart_end (No Resources)");
2101219019Sgabor			goto done;
2102219019Sgabor
2103219019Sgabor		} else if (bp->b_flags & B_ERROR) {
2104219019Sgabor			mutex_enter(DCD_MUTEX);
2105219019Sgabor			DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2106219019Sgabor
2107219019Sgabor			un->un_ncmds--;
2108219019Sgabor			bp->b_resid = bp->b_bcount;
2109219019Sgabor			if (bp->b_error == 0) {
2110219019Sgabor				SET_BP_ERROR(bp, EIO);
2111219019Sgabor			}
2112219019Sgabor
2113219019Sgabor			/*
2114219019Sgabor			 * restore old state
2115219019Sgabor			 */
2116219019Sgabor			un->un_state = un->un_last_state;
2117219019Sgabor			un->un_last_state = state;
2118219019Sgabor
2119219019Sgabor			mutex_exit(DCD_MUTEX);
2120219019Sgabor
2121219019Sgabor			biodone(bp);
2122219019Sgabor			mutex_enter(DCD_MUTEX);
2123219019Sgabor			if (un->un_state == DCD_STATE_SUSPENDED) {
2124219019Sgabor				cv_broadcast(&un->un_disk_busy_cv);
2125219019Sgabor			}
2126219019Sgabor
2127219019Sgabor			if ((un->un_ncmds < un->un_throttle) &&
2128219019Sgabor			    (dp->b_forw == NULL)) {
2129219019Sgabor				goto retry;
2130219019Sgabor			} else {
2131219019Sgabor				goto done;
2132219019Sgabor			}
2133219019Sgabor		}
2134219019Sgabor	}
2135219019Sgabor
2136219019Sgabor	/*
2137219019Sgabor	 * Restore resid from the packet, b_resid had been the
2138219019Sgabor	 * disksort key.
2139219019Sgabor	 */
2140219019Sgabor	sort_key = bp->b_resid;
2141219019Sgabor	bp->b_resid = BP_PKT(bp)->pkt_resid;
2142219019Sgabor	BP_PKT(bp)->pkt_resid = 0;
2143219019Sgabor
2144219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2145219019Sgabor	    "bp->b_resid %lx, pkt_resid %lx\n",
2146219019Sgabor	    bp->b_resid, BP_PKT(bp)->pkt_resid);
2147219019Sgabor
2148219019Sgabor	/*
2149219019Sgabor	 * We used to check whether or not to try and link commands here.
2150219019Sgabor	 * Since we have found that there is no performance improvement
2151219019Sgabor	 * for linked commands, this has not made much sense.
2152219019Sgabor	 */
2153219019Sgabor	if ((status = dcd_transport((struct dcd_pkt *)BP_PKT(bp)))
2154219019Sgabor	    != TRAN_ACCEPT) {
2155219019Sgabor		mutex_enter(DCD_MUTEX);
2156219019Sgabor		un->un_ncmds--;
2157219019Sgabor		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2158219019Sgabor		    "transport returned %x\n", status);
2159219019Sgabor		if (status == TRAN_BUSY) {
2160219019Sgabor			DCD_DO_ERRSTATS(un, dcd_transerrs);
2161219019Sgabor			DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2162219019Sgabor			dcd_handle_tran_busy(bp, dp, un);
2163219019Sgabor			if (un->un_ncmds > 0) {
2164219019Sgabor				bp->b_resid = sort_key;
2165219019Sgabor			}
2166219019Sgabor		} else {
2167219019Sgabor			DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2168219019Sgabor			mutex_exit(DCD_MUTEX);
2169219019Sgabor
2170219019Sgabor			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2171219019Sgabor			    "transport rejected (%d)\n",
2172219019Sgabor			    status);
2173219019Sgabor			SET_BP_ERROR(bp, EIO);
2174219019Sgabor			bp->b_resid = bp->b_bcount;
2175219019Sgabor			if (bp != un->un_sbufp) {
2176219019Sgabor				dcd_destroy_pkt(BP_PKT(bp));
2177219019Sgabor			}
2178219019Sgabor			biodone(bp);
2179219019Sgabor
2180219019Sgabor			mutex_enter(DCD_MUTEX);
2181219019Sgabor			if (un->un_state == DCD_STATE_SUSPENDED) {
2182219019Sgabor				cv_broadcast(&un->un_disk_busy_cv);
2183219019Sgabor			}
2184219019Sgabor			if ((un->un_ncmds < un->un_throttle) &&
2185219019Sgabor			    (dp->b_forw == NULL)) {
2186219019Sgabor					goto retry;
2187219019Sgabor			}
2188219019Sgabor		}
2189219019Sgabor	} else {
2190219019Sgabor		mutex_enter(DCD_MUTEX);
2191219019Sgabor
2192219019Sgabor		if (dp->b_actf && BP_HAS_NO_PKT(dp->b_actf)) {
2193219019Sgabor			struct buf *cmd_bp;
2194219019Sgabor
2195219019Sgabor			cmd_bp = dp->b_actf;
2196219019Sgabor			cmd_bp->av_back = ALLOCATING_PKT;
2197219019Sgabor			mutex_exit(DCD_MUTEX);
2198219019Sgabor			/*
2199219019Sgabor			 * try and map this one
2200219019Sgabor			 */
2201219019Sgabor			TRACE_0(TR_FAC_DADA, TR_DCASTART_SMALL_WINDOW_START,
2202219019Sgabor			    "dcdstart_small_window_start");
2203219019Sgabor
2204219019Sgabor			make_dcd_cmd(un, cmd_bp, NULL_FUNC);
2205219019Sgabor
2206219019Sgabor			TRACE_0(TR_FAC_DADA, TR_DCDSTART_SMALL_WINDOW_END,
2207219019Sgabor			    "dcdstart_small_window_end");
2208219019Sgabor			/*
2209219019Sgabor			 * there is a small window where the active cmd
2210219019Sgabor			 * completes before make_dcd_cmd returns.
2211219019Sgabor			 * consequently, this cmd never gets started so
2212219019Sgabor			 * we start it from here
2213219019Sgabor			 */
2214219019Sgabor			mutex_enter(DCD_MUTEX);
2215219019Sgabor			if ((un->un_ncmds < un->un_throttle) &&
2216219019Sgabor			    (dp->b_forw == NULL)) {
2217219019Sgabor				goto retry;
2218219019Sgabor			}
2219219019Sgabor		}
2220219019Sgabor	}
2221219019Sgabor
2222219019Sgabordone:
2223219019Sgabor	ASSERT(mutex_owned(DCD_MUTEX));
2224219019Sgabor	TRACE_0(TR_FAC_DADA, TR_DCDSTART_END, "dcdstart_end");
2225219019Sgabor}
2226219019Sgabor
2227219019Sgabor/*
2228219019Sgabor * make_dcd_cmd: create a pkt
2229219019Sgabor */
2230219019Sgaborstatic void
2231219019Sgabormake_dcd_cmd(struct dcd_disk *un, struct buf *bp, int (*func)())
2232219019Sgabor{
2233219019Sgabor	auto int count, com, direction;
2234219019Sgabor	struct dcd_pkt *pkt;
2235219019Sgabor	int flags, tval;
2236219019Sgabor
2237219019Sgabor	_NOTE(DATA_READABLE_WITHOUT_LOCK(dcd_disk::un_dp))
2238219019Sgabor	TRACE_3(TR_FAC_DADA, TR_MAKE_DCD_CMD_START,
2239219019Sgabor	    "make_dcd_cmd_start: un 0x%p bp 0x%p un 0x%p", un, bp, un);
2240219019Sgabor
2241219019Sgabor
2242219019Sgabor	flags = un->un_cmd_flags;
2243219019Sgabor
2244219019Sgabor	if (bp != un->un_sbufp) {
2245219019Sgabor		int partition = DCDPART(bp->b_edev);
2246219019Sgabor		diskaddr_t p_lblksrt;
2247219019Sgabor		diskaddr_t lblocks;
2248219019Sgabor		long secnt;
2249219019Sgabor		uint32_t blkno;
2250219019Sgabor		int dkl_nblk, delta;
2251219019Sgabor		long resid;
2252219019Sgabor
2253219019Sgabor		if (cmlb_partinfo(un->un_dklbhandle,
2254219019Sgabor		    partition,
2255219019Sgabor		    &lblocks,
2256219019Sgabor		    &p_lblksrt,
2257219019Sgabor		    NULL,
2258219019Sgabor		    NULL,
2259219019Sgabor		    0) != NULL) {
2260219019Sgabor			lblocks = 0;
2261219019Sgabor			p_lblksrt = 0;
2262219019Sgabor		}
2263219019Sgabor
2264219019Sgabor		dkl_nblk = (int)lblocks;
2265219019Sgabor
2266219019Sgabor		/*
2267219019Sgabor		 * Make sure we don't run off the end of a partition.
2268219019Sgabor		 *
2269219019Sgabor		 * Put this test here so that we can adjust b_count
2270219019Sgabor		 * to accurately reflect the actual amount we are
2271219019Sgabor		 * goint to transfer.
2272219019Sgabor		 */
2273219019Sgabor
2274219019Sgabor		/*
2275219019Sgabor		 * First, compute partition-relative block number
2276219019Sgabor		 */
2277219019Sgabor		blkno = dkblock(bp);
2278219019Sgabor		secnt = (bp->b_bcount + (un->un_secsize - 1)) >> un->un_secdiv;
2279219019Sgabor		count = MIN(secnt, dkl_nblk - blkno);
2280219019Sgabor		if (count != secnt) {
2281219019Sgabor			/*
2282219019Sgabor			 * We have an overrun
2283219019Sgabor			 */
2284219019Sgabor			resid = (secnt - count) << un->un_secdiv;
2285219019Sgabor			DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2286219019Sgabor			    "overrun by %ld sectors\n",
2287219019Sgabor			    secnt - count);
2288219019Sgabor			bp->b_bcount -= resid;
2289219019Sgabor		} else {
2290219019Sgabor			resid = 0;
2291219019Sgabor		}
2292219019Sgabor
2293219019Sgabor		/*
2294219019Sgabor		 * Adjust block number to absolute
2295219019Sgabor		 */
2296219019Sgabor		delta = (int)p_lblksrt;
2297219019Sgabor		blkno += delta;
2298219019Sgabor
2299219019Sgabor		mutex_enter(DCD_MUTEX);
2300219019Sgabor		/*
2301219019Sgabor		 * This is for devices having block size different from
2302219019Sgabor		 * from DEV_BSIZE (e.g. 2K CDROMs).
2303219019Sgabor		 */
2304219019Sgabor		if (un->un_lbasize != un->un_secsize) {
2305219019Sgabor			blkno >>= un->un_blknoshift;
2306219019Sgabor			count >>= un->un_blknoshift;
2307219019Sgabor		}
2308219019Sgabor		mutex_exit(DCD_MUTEX);
2309219019Sgabor
2310219019Sgabor		TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_START,
2311219019Sgabor		    "make_dcd_cmd_init_pkt_call (begin)");
2312219019Sgabor		pkt = dcd_init_pkt(ROUTE, NULL, bp,
2313219019Sgabor		    (uint32_t)sizeof (struct dcd_cmd),
2314219019Sgabor		    un->un_cmd_stat_size, PP_LEN, PKT_CONSISTENT,
2315219019Sgabor		    func, (caddr_t)un);
2316219019Sgabor		TRACE_1(TR_FAC_DADA, TR_MAKE_DCD_CMD_INIT_PKT_END,
2317219019Sgabor		    "make_dcd_cmd_init_pkt_call (end): pkt 0x%p", pkt);
2318219019Sgabor		if (!pkt) {
2319219019Sgabor			bp->b_bcount += resid;
2320219019Sgabor			bp->av_back = NO_PKT_ALLOCATED;
2321219019Sgabor			TRACE_0(TR_FAC_DADA,
2322219019Sgabor			    TR_MAKE_DCD_CMD_NO_PKT_ALLOCATED1_END,
2323219019Sgabor			    "make_dcd_cmd_end (NO_PKT_ALLOCATED1)");
2324219019Sgabor			return;
2325219019Sgabor		}
2326219019Sgabor		if (bp->b_flags & B_READ) {
2327219019Sgabor			if ((un->un_dp->options & DMA_SUPPORTTED) ==
2328219019Sgabor			    DMA_SUPPORTTED) {
2329219019Sgabor				com = ATA_READ_DMA;
2330219019Sgabor			} else {
2331219019Sgabor				if (un->un_dp->options & BLOCK_MODE)
2332219019Sgabor					com = ATA_READ_MULTIPLE;
2333219019Sgabor				else
2334219019Sgabor					com = ATA_READ;
2335219019Sgabor			}
2336219019Sgabor			direction = DATA_READ;
2337219019Sgabor		} else {
2338219019Sgabor			if ((un->un_dp->options & DMA_SUPPORTTED) ==
2339219019Sgabor			    DMA_SUPPORTTED) {
2340219019Sgabor				com = ATA_WRITE_DMA;
2341219019Sgabor			} else {
2342219019Sgabor				if (un->un_dp->options & BLOCK_MODE)
2343219019Sgabor					com = ATA_WRITE_MULTIPLE;
2344219019Sgabor				else
2345219019Sgabor					com = ATA_WRITE;
2346219019Sgabor			}
2347219019Sgabor			direction = DATA_WRITE;
2348219019Sgabor		}
2349219019Sgabor
2350219019Sgabor		/*
2351219019Sgabor		 * Save the resid in the packet, temporarily until
2352219019Sgabor		 * we transport the command.
2353219019Sgabor		 */
2354219019Sgabor		pkt->pkt_resid = resid;
2355219019Sgabor
2356219019Sgabor		makecommand(pkt, flags, com, blkno, ADD_LBA_MODE,
2357219019Sgabor		    bp->b_bcount, direction, 0);
2358219019Sgabor		tval = dcd_io_time;
2359219019Sgabor	} else {
2360219019Sgabor
2361219019Sgabor		struct udcd_cmd *scmd = (struct udcd_cmd *)bp->b_forw;
2362219019Sgabor
2363219019Sgabor		/*
2364219019Sgabor		 * set options
2365219019Sgabor		 */
2366219019Sgabor		if ((scmd->udcd_flags & UDCD_SILENT) && !(DEBUGGING)) {
2367219019Sgabor			flags |= FLAG_SILENT;
2368219019Sgabor		}
2369219019Sgabor		if (scmd->udcd_flags &  UDCD_DIAGNOSE)
2370219019Sgabor			flags |= FLAG_DIAGNOSE;
2371219019Sgabor
2372219019Sgabor		if (scmd->udcd_flags & UDCD_NOINTR)
2373219019Sgabor			flags |= FLAG_NOINTR;
2374219019Sgabor
2375219019Sgabor		pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
2376219019Sgabor		    (bp->b_bcount)? bp: NULL,
2377219019Sgabor		    (uint32_t)sizeof (struct dcd_cmd),
2378219019Sgabor		    2, PP_LEN, PKT_CONSISTENT, func, (caddr_t)un);
2379219019Sgabor
2380219019Sgabor		if (!pkt) {
2381219019Sgabor			bp->av_back = NO_PKT_ALLOCATED;
2382219019Sgabor			return;
2383219019Sgabor		}
2384219019Sgabor
2385219019Sgabor		makecommand(pkt, 0, scmd->udcd_cmd->cmd,
2386219019Sgabor		    scmd->udcd_cmd->sector_num.lba_num,
2387219019Sgabor		    scmd->udcd_cmd->address_mode,
2388219019Sgabor		    scmd->udcd_cmd->size,
2389219019Sgabor		    scmd->udcd_cmd->direction, scmd->udcd_cmd->features);
2390219019Sgabor
2391219019Sgabor		pkt->pkt_flags = flags;
2392219019Sgabor		if (scmd->udcd_timeout == 0)
2393219019Sgabor			tval = dcd_io_time;
2394219019Sgabor		else
2395219019Sgabor			tval = scmd->udcd_timeout;
2396219019Sgabor		/* UDAD interface should be decided. */
2397219019Sgabor		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2398219019Sgabor		    "udcd interface\n");
2399219019Sgabor	}
2400219019Sgabor
2401219019Sgabor	pkt->pkt_comp = dcdintr;
2402219019Sgabor	pkt->pkt_time = tval;
2403219019Sgabor	PKT_SET_BP(pkt, bp);
2404219019Sgabor	bp->av_back = (struct buf *)pkt;
2405219019Sgabor
2406219019Sgabor	TRACE_0(TR_FAC_DADA, TR_MAKE_DCD_CMD_END, "make_dcd_cmd_end");
2407219019Sgabor}
2408219019Sgabor
2409219019Sgabor/*
2410219019Sgabor * Command completion processing
2411219019Sgabor */
2412219019Sgaborstatic void
2413219019Sgabordcdintr(struct dcd_pkt *pkt)
2414219019Sgabor{
2415219019Sgabor	struct dcd_disk *un;
2416219019Sgabor	struct buf *bp;
2417219019Sgabor	int action;
2418219019Sgabor	int status;
2419219019Sgabor
2420219019Sgabor	bp = PKT_GET_BP(pkt);
2421219019Sgabor	un = ddi_get_soft_state(dcd_state, DCDUNIT(bp->b_edev));
2422219019Sgabor
2423219019Sgabor	TRACE_1(TR_FAC_DADA, TR_DCDINTR_START, "dcdintr_start: un 0x%p", un);
2424219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdintr\n");
2425219019Sgabor
2426219019Sgabor	mutex_enter(DCD_MUTEX);
2427219019Sgabor	un->un_ncmds--;
2428219019Sgabor	DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2429219019Sgabor	ASSERT(un->un_ncmds >= 0);
2430219019Sgabor
2431219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2432219019Sgabor	    "reason %x and Status %x\n", pkt->pkt_reason, SCBP_C(pkt));
2433219019Sgabor
2434219019Sgabor	/*
2435219019Sgabor	 * do most common case first
2436219019Sgabor	 */
2437219019Sgabor	if ((pkt->pkt_reason == CMD_CMPLT) && (SCBP_C(pkt) == 0)) {
2438219019Sgabor		int com = GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp);
2439219019Sgabor
2440219019Sgabor		if (un->un_state == DCD_STATE_OFFLINE) {
2441219019Sgabor			un->un_state = un->un_last_state;
2442219019Sgabor			dcd_log(DCD_DEVINFO, dcd_label, CE_NOTE,
2443219019Sgabor			    (const char *) diskokay);
2444219019Sgabor		}
2445219019Sgabor		/*
2446219019Sgabor		 * If the command is a read or a write, and we have
2447219019Sgabor		 * a non-zero pkt_resid, that is an error. We should
2448219019Sgabor		 * attempt to retry the operation if possible.
2449219019Sgabor		 */
2450219019Sgabor		action = COMMAND_DONE;
2451219019Sgabor		if (pkt->pkt_resid && (com == ATA_READ || com == ATA_WRITE)) {
2452219019Sgabor			DCD_DO_ERRSTATS(un, dcd_harderrs);
2453219019Sgabor			if ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count) {
2454219019Sgabor				PKT_INCR_RETRY_CNT(pkt, 1);
2455219019Sgabor				action = QUE_COMMAND;
2456219019Sgabor			} else {
2457219019Sgabor				/*
2458219019Sgabor				 * if we have exhausted retries
2459219019Sgabor				 * a command with a residual is in error in
2460219019Sgabor				 * this case.
2461219019Sgabor				 */
2462219019Sgabor				action = COMMAND_DONE_ERROR;
2463219019Sgabor			}
2464219019Sgabor			dcd_log(DCD_DEVINFO, dcd_label,
2465219019Sgabor			    CE_WARN, "incomplete %s- %s\n",
2466219019Sgabor			    (bp->b_flags & B_READ)? "read" : "write",
2467219019Sgabor			    (action == QUE_COMMAND)? "retrying" :
2468219019Sgabor			    "giving up");
2469219019Sgabor		}
2470219019Sgabor
2471219019Sgabor		/*
2472219019Sgabor		 * pkt_resid will reflect, at this point, a residual
2473219019Sgabor		 * of how many bytes left to be transferred there were
2474219019Sgabor		 * from the actual scsi command. Add this to b_resid i.e
2475219019Sgabor		 * the amount this driver could not see to transfer,
2476219019Sgabor		 * to get the total number of bytes not transfered.
2477219019Sgabor		 */
2478219019Sgabor		if (action != QUE_COMMAND) {
2479219019Sgabor			bp->b_resid += pkt->pkt_resid;
2480219019Sgabor		}
2481219019Sgabor
2482219019Sgabor	} else if (pkt->pkt_reason != CMD_CMPLT) {
2483219019Sgabor		action = dcd_handle_incomplete(un, bp);
2484219019Sgabor	}
2485219019Sgabor
2486219019Sgabor	/*
2487219019Sgabor	 * If we are in the middle of syncing or dumping, we have got
2488219019Sgabor	 * here because dcd_transport has called us explictly after
2489219019Sgabor	 * completing the command in a polled mode. We don't want to
2490219019Sgabor	 * have a recursive call into dcd_transport again.
2491219019Sgabor	 */
2492219019Sgabor	if (ddi_in_panic() && (action == QUE_COMMAND)) {
2493219019Sgabor		action = COMMAND_DONE_ERROR;
2494219019Sgabor	}
2495219019Sgabor
2496219019Sgabor	/*
2497219019Sgabor	 * save pkt reason; consecutive failures are not reported unless
2498219019Sgabor	 * fatal
2499219019Sgabor	 * do not reset last_pkt_reason when the cmd was retried and
2500219019Sgabor	 * succeeded because
2501219019Sgabor	 * there maybe more commands comming back with last_pkt_reason
2502219019Sgabor	 */
2503219019Sgabor	if ((un->un_last_pkt_reason != pkt->pkt_reason) &&
2504219019Sgabor	    ((pkt->pkt_reason != CMD_CMPLT) ||
2505219019Sgabor	    (PKT_GET_RETRY_CNT(pkt) == 0))) {
2506219019Sgabor		un->un_last_pkt_reason = pkt->pkt_reason;
2507219019Sgabor	}
2508219019Sgabor
2509219019Sgabor	switch (action) {
2510219019Sgabor	case COMMAND_DONE_ERROR:
2511219019Sgaborerror:
2512219019Sgabor		if (bp->b_resid == 0) {
2513219019Sgabor			bp->b_resid = bp->b_bcount;
2514219019Sgabor		}
2515219019Sgabor		if (bp->b_error == 0) {
2516219019Sgabor			struct	dcd_cmd *cdbp = (struct dcd_cmd *)pkt->pkt_cdbp;
2517219019Sgabor			if (cdbp->cmd == ATA_FLUSH_CACHE &&
2518219019Sgabor			    (pkt->pkt_scbp[0] & STATUS_ATA_ERR) &&
2519219019Sgabor			    (pkt->pkt_scbp[1] & ERR_ABORT)) {
2520219019Sgabor				SET_BP_ERROR(bp, ENOTSUP);
2521219019Sgabor				un->un_flush_not_supported = 1;
2522219019Sgabor			} else {
2523219019Sgabor				SET_BP_ERROR(bp, EIO);
2524219019Sgabor			}
2525219019Sgabor		}
2526219019Sgabor		bp->b_flags |= B_ERROR;
2527219019Sgabor		/*FALLTHROUGH*/
2528219019Sgabor	case COMMAND_DONE:
2529219019Sgabor		dcddone_and_mutex_exit(un, bp);
2530219019Sgabor
2531219019Sgabor		TRACE_0(TR_FAC_DADA, TR_DCDINTR_COMMAND_DONE_END,
2532219019Sgabor		    "dcdintr_end (COMMAND_DONE)");
2533219019Sgabor		return;
2534219019Sgabor
2535219019Sgabor	case QUE_COMMAND:
2536219019Sgabor		if (un->un_ncmds >= un->un_throttle) {
2537219019Sgabor			struct diskhd *dp = &un->un_utab;
2538219019Sgabor
2539219019Sgabor			bp->b_actf = dp->b_actf;
2540219019Sgabor			dp->b_actf = bp;
2541219019Sgabor
2542219019Sgabor			DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
2543219019Sgabor
2544219019Sgabor			mutex_exit(DCD_MUTEX);
2545219019Sgabor			goto exit;
2546219019Sgabor		}
2547219019Sgabor
2548219019Sgabor		un->un_ncmds++;
2549219019Sgabor		/* reset the pkt reason again */
2550219019Sgabor		pkt->pkt_reason = 0;
2551219019Sgabor		DCD_DO_KSTATS(un, kstat_runq_enter, bp);
2552219019Sgabor		mutex_exit(DCD_MUTEX);
2553219019Sgabor		if ((status = dcd_transport(BP_PKT(bp))) != TRAN_ACCEPT) {
2554219019Sgabor			struct diskhd *dp = &un->un_utab;
2555219019Sgabor
2556219019Sgabor			mutex_enter(DCD_MUTEX);
2557219019Sgabor			un->un_ncmds--;
2558219019Sgabor			if (status == TRAN_BUSY) {
2559219019Sgabor				DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
2560219019Sgabor				dcd_handle_tran_busy(bp, dp, un);
2561219019Sgabor				mutex_exit(DCD_MUTEX);
2562219019Sgabor				goto exit;
2563219019Sgabor			}
2564219019Sgabor			DCD_DO_ERRSTATS(un, dcd_transerrs);
2565219019Sgabor			DCD_DO_KSTATS(un, kstat_runq_exit, bp);
2566219019Sgabor
2567219019Sgabor			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2568219019Sgabor			    "requeue of command fails (%x)\n", status);
2569219019Sgabor			SET_BP_ERROR(bp, EIO);
2570219019Sgabor			bp->b_resid = bp->b_bcount;
2571219019Sgabor
2572219019Sgabor			dcddone_and_mutex_exit(un, bp);
2573219019Sgabor			goto exit;
2574219019Sgabor		}
2575219019Sgabor		break;
2576219019Sgabor
2577219019Sgabor	case JUST_RETURN:
2578219019Sgabor	default:
2579219019Sgabor		DCD_DO_KSTATS(un, kstat_waitq_enter, bp);
2580219019Sgabor		mutex_exit(DCD_MUTEX);
2581219019Sgabor		break;
2582219019Sgabor	}
2583219019Sgabor
2584219019Sgaborexit:
2585219019Sgabor	TRACE_0(TR_FAC_DADA, TR_DCDINTR_END, "dcdintr_end");
2586219019Sgabor}
2587219019Sgabor
2588219019Sgabor
2589219019Sgabor/*
2590219019Sgabor * Done with a command.
2591219019Sgabor */
2592219019Sgaborstatic void
2593219019Sgabordcddone_and_mutex_exit(struct dcd_disk *un, register struct buf *bp)
2594219019Sgabor{
2595219019Sgabor	struct diskhd *dp;
2596219019Sgabor
2597219019Sgabor	TRACE_1(TR_FAC_DADA, TR_DCDONE_START, "dcddone_start: un 0x%p", un);
2598219019Sgabor
2599219019Sgabor	_NOTE(LOCK_RELEASED_AS_SIDE_EFFECT(&un->un_dcd->dcd_mutex));
2600219019Sgabor
2601219019Sgabor	dp = &un->un_utab;
2602219019Sgabor	if (bp == dp->b_forw) {
2603219019Sgabor		dp->b_forw = NULL;
2604219019Sgabor	}
2605219019Sgabor
2606219019Sgabor	if (un->un_stats) {
2607219019Sgabor		ulong_t n_done = bp->b_bcount - bp->b_resid;
2608219019Sgabor		if (bp->b_flags & B_READ) {
2609219019Sgabor			IOSP->reads++;
2610219019Sgabor			IOSP->nread += n_done;
2611219019Sgabor		} else {
2612219019Sgabor			IOSP->writes++;
2613219019Sgabor			IOSP->nwritten += n_done;
2614219019Sgabor		}
2615219019Sgabor	}
2616219019Sgabor	if (IO_PARTITION_STATS) {
2617219019Sgabor		ulong_t n_done = bp->b_bcount - bp->b_resid;
2618219019Sgabor		if (bp->b_flags & B_READ) {
2619219019Sgabor			IOSP_PARTITION->reads++;
2620219019Sgabor			IOSP_PARTITION->nread += n_done;
2621219019Sgabor		} else {
2622219019Sgabor			IOSP_PARTITION->writes++;
2623219019Sgabor			IOSP_PARTITION->nwritten += n_done;
2624219019Sgabor		}
2625219019Sgabor	}
2626219019Sgabor
2627219019Sgabor	/*
2628219019Sgabor	 * Start the next one before releasing resources on this one
2629219019Sgabor	 */
2630219019Sgabor	if (un->un_state == DCD_STATE_SUSPENDED) {
2631219019Sgabor		cv_broadcast(&un->un_disk_busy_cv);
2632219019Sgabor	} else if (dp->b_actf && (un->un_ncmds < un->un_throttle) &&
2633219019Sgabor	    (dp->b_forw == NULL && un->un_state != DCD_STATE_SUSPENDED)) {
2634219019Sgabor		dcdstart(un);
2635219019Sgabor	}
2636219019Sgabor
2637219019Sgabor	mutex_exit(DCD_MUTEX);
2638219019Sgabor
2639219019Sgabor	if (bp != un->un_sbufp) {
2640219019Sgabor		dcd_destroy_pkt(BP_PKT(bp));
2641219019Sgabor		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2642219019Sgabor		    "regular done: resid %ld\n", bp->b_resid);
2643219019Sgabor	} else {
2644219019Sgabor		ASSERT(un->un_sbuf_busy);
2645219019Sgabor	}
2646219019Sgabor	TRACE_0(TR_FAC_DADA, TR_DCDDONE_BIODONE_CALL, "dcddone_biodone_call");
2647219019Sgabor
2648219019Sgabor	biodone(bp);
2649219019Sgabor
2650219019Sgabor	(void) pm_idle_component(DCD_DEVINFO, 0);
2651219019Sgabor
2652219019Sgabor	TRACE_0(TR_FAC_DADA, TR_DCDDONE_END, "dcddone end");
2653219019Sgabor}
2654219019Sgabor
2655219019Sgabor
2656219019Sgabor/*
2657219019Sgabor * reset the disk unless the transport layer has already
2658219019Sgabor * cleared the problem
2659219019Sgabor */
2660219019Sgabor#define	C1	(STAT_ATA_BUS_RESET|STAT_ATA_DEV_RESET|STAT_ATA_ABORTED)
2661219019Sgaborstatic void
2662219019Sgabordcd_reset_disk(struct dcd_disk *un, struct dcd_pkt *pkt)
2663219019Sgabor{
2664219019Sgabor
2665219019Sgabor	if ((pkt->pkt_statistics & C1) == 0) {
2666219019Sgabor		mutex_exit(DCD_MUTEX);
2667219019Sgabor		if (!dcd_reset(ROUTE, RESET_ALL)) {
2668219019Sgabor			DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2669219019Sgabor			    "Reset failed");
2670219019Sgabor		}
2671219019Sgabor		mutex_enter(DCD_MUTEX);
2672219019Sgabor	}
2673219019Sgabor}
2674219019Sgabor
2675219019Sgaborstatic int
2676219019Sgabordcd_handle_incomplete(struct dcd_disk *un, struct buf *bp)
2677219019Sgabor{
2678219019Sgabor	static char *fail = "ATA transport failed: reason '%s': %s\n";
2679219019Sgabor	static char *notresp = "disk not responding to selection\n";
2680219019Sgabor	int rval = COMMAND_DONE_ERROR;
2681219019Sgabor	int action = COMMAND_SOFT_ERROR;
2682219019Sgabor	struct dcd_pkt *pkt = BP_PKT(bp);
2683219019Sgabor	int be_chatty = (un->un_state != DCD_STATE_SUSPENDED) &&
2684219019Sgabor	    (bp != un->un_sbufp || !(pkt->pkt_flags & FLAG_SILENT));
2685219019Sgabor
2686219019Sgabor	ASSERT(mutex_owned(DCD_MUTEX));
2687219019Sgabor
2688219019Sgabor	switch (pkt->pkt_reason) {
2689219019Sgabor
2690219019Sgabor	case CMD_TIMEOUT:
2691219019Sgabor		/*
2692219019Sgabor		 * This Indicates the already the HBA would  have reset
2693219019Sgabor		 * so Just indicate to retry the command
2694219019Sgabor		 */
2695219019Sgabor		break;
2696219019Sgabor
2697219019Sgabor	case CMD_INCOMPLETE:
2698219019Sgabor		action = dcd_check_error(un, bp);
2699219019Sgabor		DCD_DO_ERRSTATS(un, dcd_transerrs);
2700219019Sgabor		if (action == COMMAND_HARD_ERROR) {
2701219019Sgabor			(void) dcd_reset_disk(un, pkt);
2702219019Sgabor		}
2703219019Sgabor		break;
2704219019Sgabor
2705219019Sgabor	case CMD_FATAL:
2706219019Sgabor		/*
2707219019Sgabor		 * Something drastic has gone wrong
2708219019Sgabor		 */
2709219019Sgabor		break;
2710219019Sgabor	case CMD_DMA_DERR:
2711219019Sgabor	case CMD_DATA_OVR:
2712219019Sgabor		/* FALLTHROUGH */
2713219019Sgabor
2714219019Sgabor	default:
2715219019Sgabor		/*
2716219019Sgabor		 * the target may still be running the	command,
2717219019Sgabor		 * so we should try and reset that target.
2718219019Sgabor		 */
2719219019Sgabor		DCD_DO_ERRSTATS(un, dcd_transerrs);
2720219019Sgabor		if ((pkt->pkt_reason != CMD_RESET) &&
2721219019Sgabor		    (pkt->pkt_reason != CMD_ABORTED)) {
2722219019Sgabor			(void) dcd_reset_disk(un, pkt);
2723219019Sgabor		}
2724219019Sgabor		break;
2725219019Sgabor	}
2726219019Sgabor
2727219019Sgabor	/*
2728219019Sgabor	 * If pkt_reason is CMD_RESET/ABORTED, chances are that this pkt got
2729219019Sgabor	 * reset/aborted because another disk on this bus caused it.
2730219019Sgabor	 * The disk that caused it, should get CMD_TIMEOUT with pkt_statistics
2731219019Sgabor	 * of STAT_TIMEOUT/STAT_DEV_RESET
2732219019Sgabor	 */
2733219019Sgabor	if ((pkt->pkt_reason == CMD_RESET) ||(pkt->pkt_reason == CMD_ABORTED)) {
2734219019Sgabor		/* To be written : XXX */
2735219019Sgabor		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2736219019Sgabor		    "Command aborted\n");
2737219019Sgabor	}
2738219019Sgabor
2739219019Sgabor	if (bp == un->un_sbufp && (pkt->pkt_flags & FLAG_DIAGNOSE)) {
2740219019Sgabor		rval = COMMAND_DONE_ERROR;
2741219019Sgabor	} else {
2742219019Sgabor		if ((rval == COMMAND_DONE_ERROR) &&
2743219019Sgabor		    (action == COMMAND_SOFT_ERROR) &&
2744219019Sgabor		    ((int)PKT_GET_RETRY_CNT(pkt) < dcd_retry_count)) {
2745219019Sgabor			PKT_INCR_RETRY_CNT(pkt, 1);
2746219019Sgabor			rval = QUE_COMMAND;
2747219019Sgabor		}
2748219019Sgabor	}
2749219019Sgabor
2750219019Sgabor	if (pkt->pkt_reason == CMD_INCOMPLETE && rval == COMMAND_DONE_ERROR) {
2751219019Sgabor		/*
2752219019Sgabor		 * Looks like someone turned off this shoebox.
2753219019Sgabor		 */
2754219019Sgabor		if (un->un_state != DCD_STATE_OFFLINE) {
2755219019Sgabor			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2756219019Sgabor			    (const char *) notresp);
2757219019Sgabor			New_state(un, DCD_STATE_OFFLINE);
2758219019Sgabor		}
2759219019Sgabor	} else if (pkt->pkt_reason == CMD_FATAL) {
2760219019Sgabor		/*
2761219019Sgabor		 * Suppressing the following message for the time being
2762219019Sgabor		 * dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2763219019Sgabor		 * (const char *) notresp);
2764219019Sgabor		 */
2765219019Sgabor		PKT_INCR_RETRY_CNT(pkt, 6);
2766219019Sgabor		rval = COMMAND_DONE_ERROR;
2767219019Sgabor		New_state(un, DCD_STATE_FATAL);
2768219019Sgabor	} else if (be_chatty) {
2769219019Sgabor		int in_panic = ddi_in_panic();
2770219019Sgabor		if (!in_panic || (rval == COMMAND_DONE_ERROR)) {
2771219019Sgabor			if (((pkt->pkt_reason != un->un_last_pkt_reason) &&
2772219019Sgabor			    (pkt->pkt_reason != CMD_RESET)) ||
2773219019Sgabor			    (rval == COMMAND_DONE_ERROR) ||
2774219019Sgabor			    (dcd_error_level == DCD_ERR_ALL)) {
2775219019Sgabor				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2776219019Sgabor				    fail, dcd_rname(pkt->pkt_reason),
2777219019Sgabor				    (rval == COMMAND_DONE_ERROR) ?
2778219019Sgabor				    "giving up": "retrying command");
2779219019Sgabor				DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2780219019Sgabor				    "retrycount=%x\n",
2781219019Sgabor				    PKT_GET_RETRY_CNT(pkt));
2782219019Sgabor			}
2783219019Sgabor		}
2784219019Sgabor	}
2785219019Sgaborerror:
2786219019Sgabor	return (rval);
2787219019Sgabor}
2788219019Sgabor
2789219019Sgaborstatic int
2790219019Sgabordcd_check_error(struct dcd_disk *un, struct buf *bp)
2791219019Sgabor{
2792219019Sgabor	struct diskhd *dp = &un->un_utab;
2793219019Sgabor	struct dcd_pkt *pkt = BP_PKT(bp);
2794219019Sgabor	int rval = 0;
2795219019Sgabor	unsigned char status;
2796219019Sgabor	unsigned char error;
2797219019Sgabor
2798219019Sgabor	TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_START, "dcd_check_error_start");
2799219019Sgabor	ASSERT(mutex_owned(DCD_MUTEX));
2800219019Sgabor
2801219019Sgabor	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
2802219019Sgabor	    "Pkt: 0x%p dp: 0x%p\n", (void *)pkt, (void *)dp);
2803219019Sgabor
2804219019Sgabor	/*
2805219019Sgabor	 * Here we need to check status first and then if error is indicated
2806219019Sgabor	 * Then the error register.
2807219019Sgabor	 */
2808219019Sgabor
2809219019Sgabor	status = (pkt->pkt_scbp)[0];
2810219019Sgabor	if ((status & STATUS_ATA_DWF) == STATUS_ATA_DWF) {
2811219019Sgabor		/*
2812219019Sgabor		 * There has been a Device Fault  - reason for such error
2813219019Sgabor		 * is vendor specific
2814219019Sgabor		 * Action to be taken is - Indicate error and reset device.
2815219019Sgabor		 */
2816219019Sgabor
2817219019Sgabor		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN, "Device Fault\n");
2818219019Sgabor		rval = COMMAND_HARD_ERROR;
2819219019Sgabor	} else if ((status & STATUS_ATA_CORR) == STATUS_ATA_CORR) {
2820219019Sgabor
2821219019Sgabor		/*
2822219019Sgabor		 * The sector read or written is marginal and hence ECC
2823219019Sgabor		 * Correction has been applied. Indicate to repair
2824219019Sgabor		 * Here we need to probably re-assign based on the badblock
2825219019Sgabor		 * mapping.
2826219019Sgabor		 */
2827219019Sgabor
2828219019Sgabor		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2829219019Sgabor		    "Soft Error on block %x\n",
2830219019Sgabor		    ((struct dcd_cmd *)pkt->pkt_cdbp)->sector_num.lba_num);
2831219019Sgabor		rval = COMMAND_SOFT_ERROR;
2832219019Sgabor	} else if ((status & STATUS_ATA_ERR) == STATUS_ATA_ERR) {
2833219019Sgabor		error = pkt->pkt_scbp[1];
2834219019Sgabor
2835219019Sgabor		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2836219019Sgabor		    "Command:0x%x,Error:0x%x,Status:0x%x\n",
2837219019Sgabor		    GETATACMD((struct dcd_cmd *)pkt->pkt_cdbp),
2838219019Sgabor		    error, status);
2839219019Sgabor		if ((error &  ERR_AMNF) == ERR_AMNF) {
2840219019Sgabor			/* Address make not found */
2841219019Sgabor			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2842219019Sgabor			    "Address Mark Not Found");
2843219019Sgabor		} else if ((error & ERR_TKONF) == ERR_TKONF) {
2844219019Sgabor			/* Track 0 Not found */
2845219019Sgabor			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2846219019Sgabor			    "Track 0 Not found \n");
2847219019Sgabor		} else if ((error & ERR_IDNF) == ERR_IDNF) {
2848219019Sgabor			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2849219019Sgabor			    " ID not found \n");
2850219019Sgabor		} else if ((error &  ERR_UNC) == ERR_UNC) {
2851219019Sgabor			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2852219019Sgabor			    "Uncorrectable data Error: Block %x\n",
2853219019Sgabor			    ((struct dcd_cmd *)pkt->pkt_cdbp)->
2854219019Sgabor			    sector_num.lba_num);
2855219019Sgabor		} else if ((error & ERR_BBK) == ERR_BBK) {
2856219019Sgabor			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2857219019Sgabor			    "Bad block detected: Block %x\n",
2858219019Sgabor			    ((struct dcd_cmd *)pkt->pkt_cdbp)->
2859219019Sgabor			    sector_num.lba_num);
2860219019Sgabor		} else if ((error & ERR_ABORT) == ERR_ABORT) {
2861219019Sgabor			/* Aborted Command */
2862219019Sgabor			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2863219019Sgabor			    " Aborted Command \n");
2864219019Sgabor		}
2865219019Sgabor		/*
2866219019Sgabor		 * Return the soft error so that the command
2867219019Sgabor		 * will be retried.
2868219019Sgabor		 */
2869219019Sgabor		rval = COMMAND_SOFT_ERROR;
2870219019Sgabor	}
2871219019Sgabor
2872219019Sgabor	TRACE_0(TR_FAC_DADA, TR_DCD_CHECK_ERROR_END, "dcd_check_error_end");
2873219019Sgabor	return (rval);
2874219019Sgabor}
2875219019Sgabor
2876219019Sgabor
2877219019Sgabor/*
2878219019Sgabor *	System Crash Dump routine
2879219019Sgabor */
2880219019Sgabor
2881219019Sgabor#define	NDUMP_RETRIES	5
2882219019Sgabor
2883219019Sgaborstatic int
2884219019Sgabordcddump(dev_t dev, caddr_t addr, daddr_t blkno, int nblk)
2885219019Sgabor{
2886219019Sgabor	struct dcd_pkt *pkt;
2887219019Sgabor	int i;
2888219019Sgabor	struct buf local, *bp;
2889219019Sgabor	int err;
2890219019Sgabor	unsigned char com;
2891219019Sgabor	diskaddr_t p_lblksrt;
2892219019Sgabor	diskaddr_t lblocks;
2893219019Sgabor
2894219019Sgabor	GET_SOFT_STATE(dev);
2895219019Sgabor#ifdef lint
2896219019Sgabor	part = part;
2897219019Sgabor#endif /* lint */
2898219019Sgabor
2899219019Sgabor	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*un))
2900219019Sgabor
2901219019Sgabor	if ((un->un_state & DCD_STATE_FATAL) == DCD_STATE_FATAL)
2902219019Sgabor		return (ENXIO);
2903219019Sgabor
2904219019Sgabor	if (cmlb_partinfo(un->un_dklbhandle, DCDPART(dev),
2905219019Sgabor	    &lblocks, &p_lblksrt, NULL, NULL, 0))
2906219019Sgabor		return (ENXIO);
2907219019Sgabor
2908219019Sgabor	if (blkno+nblk > lblocks) {
2909219019Sgabor		return (EINVAL);
2910219019Sgabor	}
2911219019Sgabor
2912219019Sgabor
2913219019Sgabor	if ((un->un_state == DCD_STATE_SUSPENDED) ||
2914219019Sgabor	    (un->un_state == DCD_STATE_PM_SUSPENDED)) {
2915219019Sgabor		if (pm_raise_power(DCD_DEVINFO, 0,
2916219019Sgabor		    DCD_DEVICE_ACTIVE) != DDI_SUCCESS) {
2917219019Sgabor			return (EIO);
2918219019Sgabor		}
2919219019Sgabor	}
2920219019Sgabor
2921219019Sgabor	/*
2922219019Sgabor	 * When cpr calls dcddump, we know that dad is in a
2923219019Sgabor	 * a good state, so no bus reset is required
2924219019Sgabor	 */
2925219019Sgabor	un->un_throttle = 0;
2926219019Sgabor
2927219019Sgabor	if ((un->un_state != DCD_STATE_SUSPENDED) &&
2928219019Sgabor	    (un->un_state != DCD_STATE_DUMPING)) {
2929219019Sgabor
2930219019Sgabor		New_state(un, DCD_STATE_DUMPING);
2931219019Sgabor
2932219019Sgabor		/*
2933219019Sgabor		 * Reset the bus. I'd like to not have to do this,
2934219019Sgabor		 * but this is the safest thing to do...
2935219019Sgabor		 */
2936219019Sgabor
2937219019Sgabor		if (dcd_reset(ROUTE, RESET_ALL) == 0) {
2938219019Sgabor			return (EIO);
2939219019Sgabor		}
2940219019Sgabor
2941219019Sgabor	}
2942219019Sgabor
2943219019Sgabor	blkno += p_lblksrt;
2944219019Sgabor
2945219019Sgabor	/*
2946219019Sgabor	 * It should be safe to call the allocator here without
2947219019Sgabor	 * worrying about being locked for DVMA mapping because
2948219019Sgabor	 * the address we're passed is already a DVMA mapping
2949219019Sgabor	 *
2950219019Sgabor	 * We are also not going to worry about semaphore ownership
2951219019Sgabor	 * in the dump buffer. Dumping is single threaded at present.
2952219019Sgabor	 */
2953219019Sgabor
2954219019Sgabor	bp = &local;
2955219019Sgabor	bzero((caddr_t)bp, sizeof (*bp));
2956219019Sgabor	bp->b_flags = B_BUSY;
2957219019Sgabor	bp->b_un.b_addr = addr;
2958219019Sgabor	bp->b_bcount = nblk << DEV_BSHIFT;
2959219019Sgabor	bp->b_resid = 0;
2960219019Sgabor
2961219019Sgabor	for (i = 0; i < NDUMP_RETRIES; i++) {
2962219019Sgabor		bp->b_flags &= ~B_ERROR;
2963219019Sgabor		if ((pkt = dcd_init_pkt(ROUTE, NULL, bp,
2964219019Sgabor		    (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
2965219019Sgabor		    PKT_CONSISTENT, NULL_FUNC, NULL)) != NULL) {
2966219019Sgabor			break;
2967219019Sgabor		}
2968219019Sgabor		if (i == 0) {
2969219019Sgabor			if (bp->b_flags & B_ERROR) {
2970219019Sgabor				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2971219019Sgabor				    "no resources for dumping; "
2972219019Sgabor				    "error code: 0x%x, retrying",
2973219019Sgabor				    geterror(bp));
2974219019Sgabor			} else {
2975219019Sgabor				dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
2976219019Sgabor				    "no resources for dumping; retrying");
2977219019Sgabor			}
2978219019Sgabor		} else if (i != (NDUMP_RETRIES - 1)) {
2979219019Sgabor			if (bp->b_flags & B_ERROR) {
2980219019Sgabor				dcd_log(DCD_DEVINFO, dcd_label, CE_CONT, "no "
2981219019Sgabor				    "resources for dumping; error code: 0x%x, "
2982219019Sgabor				    "retrying\n", geterror(bp));
2983219019Sgabor			}
2984219019Sgabor		} else {
2985219019Sgabor			if (bp->b_flags & B_ERROR) {
2986219019Sgabor				dcd_log(DCD_DEVINFO, dcd_label, CE_CONT,
2987219019Sgabor				    "no resources for dumping; "
2988219019Sgabor				    "error code: 0x%x, retries failed, "
2989219019Sgabor				    "giving up.\n", geterror(bp));
2990219019Sgabor			} else {
2991219019Sgabor				dcd_log(DCD_DEVINFO, dcd_label, CE_CONT,
2992219019Sgabor				    "no resources for dumping; "
2993219019Sgabor				    "retries failed, giving up.\n");
2994219019Sgabor			}
2995219019Sgabor			return (EIO);
2996219019Sgabor		}
2997219019Sgabor		delay(10);
2998219019Sgabor	}
2999219019Sgabor	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
3000219019Sgabor		com = ATA_WRITE_DMA;
3001219019Sgabor	} else {
3002219019Sgabor		if (un->un_dp->options & BLOCK_MODE)
3003219019Sgabor			com = ATA_WRITE_MULTIPLE;
3004219019Sgabor		else
3005219019Sgabor			com = ATA_WRITE;
3006219019Sgabor	}
3007219019Sgabor
3008219019Sgabor	makecommand(pkt, 0, com, blkno, ADD_LBA_MODE,
3009219019Sgabor	    (int)nblk*un->un_secsize, DATA_WRITE, 0);
3010219019Sgabor
3011219019Sgabor	for (err = EIO, i = 0; i < NDUMP_RETRIES && err == EIO; i++) {
3012219019Sgabor
3013219019Sgabor		if (dcd_poll(pkt) == 0) {
3014219019Sgabor			switch (SCBP_C(pkt)) {
3015219019Sgabor			case STATUS_GOOD:
3016219019Sgabor				if (pkt->pkt_resid == 0) {
3017219019Sgabor					err = 0;
3018219019Sgabor				}
3019219019Sgabor				break;
3020219019Sgabor			case STATUS_ATA_BUSY:
3021219019Sgabor				(void) dcd_reset(ROUTE, RESET_TARGET);
3022219019Sgabor				break;
3023219019Sgabor			default:
3024219019Sgabor				mutex_enter(DCD_MUTEX);
3025219019Sgabor				(void) dcd_reset_disk(un, pkt);
3026219019Sgabor				mutex_exit(DCD_MUTEX);
3027219019Sgabor				break;
3028219019Sgabor			}
3029219019Sgabor		} else if (i > NDUMP_RETRIES/2) {
3030219019Sgabor			(void) dcd_reset(ROUTE, RESET_ALL);
3031219019Sgabor		}
3032219019Sgabor
3033219019Sgabor	}
3034219019Sgabor	dcd_destroy_pkt(pkt);
3035219019Sgabor	return (err);
3036219019Sgabor}
3037219019Sgabor
3038219019Sgabor/*
3039219019Sgabor * This routine implements the ioctl calls.  It is called
3040219019Sgabor * from the device switch at normal priority.
3041219019Sgabor */
3042219019Sgabor/* ARGSUSED3 */
3043219019Sgaborstatic int
3044219019Sgabordcdioctl(dev_t dev, int cmd, intptr_t arg, int flag,
3045219019Sgabor	cred_t *cred_p, int *rval_p)
3046219019Sgabor{
3047219019Sgabor	auto int32_t data[512 / (sizeof (int32_t))];
3048219019Sgabor	struct dk_cinfo *info;
3049219019Sgabor	struct dk_minfo media_info;
3050219019Sgabor	struct udcd_cmd *scmd;
3051219019Sgabor	int i, err;
3052219019Sgabor	enum uio_seg uioseg = 0;
3053219019Sgabor	enum dkio_state state = 0;
3054219019Sgabor#ifdef _MULTI_DATAMODEL
3055219019Sgabor	struct dadkio_rwcmd rwcmd;
3056219019Sgabor#endif
3057219019Sgabor	struct dadkio_rwcmd32 rwcmd32;
3058219019Sgabor	struct dcd_cmd dcdcmd;
3059219019Sgabor
3060219019Sgabor	GET_SOFT_STATE(dev);
3061219019Sgabor#ifdef lint
3062219019Sgabor	part = part;
3063219019Sgabor	state = state;
3064219019Sgabor	uioseg = uioseg;
3065219019Sgabor#endif  /* lint */
3066219019Sgabor
3067219019Sgabor	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3068219019Sgabor	    "dcd_ioctl : cmd %x, arg %lx\n", cmd, arg);
3069219019Sgabor
3070219019Sgabor	bzero((caddr_t)data, sizeof (data));
3071219019Sgabor
3072219019Sgabor	switch (cmd) {
3073219019Sgabor
3074219019Sgabor#ifdef DCDDEBUG
3075219019Sgabor/*
3076219019Sgabor * Following ioctl are for testing RESET/ABORTS
3077219019Sgabor */
3078219019Sgabor#define	DKIOCRESET	(DKIOC|14)
3079219019Sgabor#define	DKIOCABORT	(DKIOC|15)
3080219019Sgabor
3081219019Sgabor	case DKIOCRESET:
3082219019Sgabor		if (ddi_copyin((caddr_t)arg, (caddr_t)data, 4, flag))
3083219019Sgabor			return (EFAULT);
3084219019Sgabor		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3085219019Sgabor		    "DKIOCRESET: data = 0x%x\n", data[0]);
3086219019Sgabor		if (dcd_reset(ROUTE, data[0])) {
3087219019Sgabor			return (0);
3088219019Sgabor		} else {
3089219019Sgabor			return (EIO);
3090219019Sgabor		}
3091219019Sgabor	case DKIOCABORT:
3092219019Sgabor		DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3093219019Sgabor		    "DKIOCABORT:\n");
3094219019Sgabor		if (dcd_abort(ROUTE, (struct dcd_pkt *)0)) {
3095219019Sgabor			return (0);
3096219019Sgabor		} else {
3097219019Sgabor			return (EIO);
3098219019Sgabor		}
3099219019Sgabor#endif
3100219019Sgabor
3101219019Sgabor	case DKIOCINFO:
3102219019Sgabor		/*
3103219019Sgabor		 * Controller Information
3104219019Sgabor		 */
3105219019Sgabor		info = (struct dk_cinfo *)data;
3106219019Sgabor
3107219019Sgabor		mutex_enter(DCD_MUTEX);
3108219019Sgabor		switch (un->un_dp->ctype) {
3109219019Sgabor		default:
3110219019Sgabor			info->dki_ctype = DKC_DIRECT;
3111219019Sgabor			break;
3112219019Sgabor		}
3113219019Sgabor		mutex_exit(DCD_MUTEX);
3114219019Sgabor		info->dki_cnum = ddi_get_instance(ddi_get_parent(DCD_DEVINFO));
3115219019Sgabor		(void) strcpy(info->dki_cname,
3116219019Sgabor		    ddi_get_name(ddi_get_parent(DCD_DEVINFO)));
3117219019Sgabor		/*
3118219019Sgabor		 * Unit Information
3119219019Sgabor		 */
3120219019Sgabor		info->dki_unit = ddi_get_instance(DCD_DEVINFO);
3121219019Sgabor		info->dki_slave = (Tgt(DCD_DCD_DEVP)<<3);
3122219019Sgabor		(void) strcpy(info->dki_dname, ddi_driver_name(DCD_DEVINFO));
3123219019Sgabor		info->dki_flags = DKI_FMTVOL;
3124219019Sgabor		info->dki_partition = DCDPART(dev);
3125219019Sgabor
3126219019Sgabor		/*
3127219019Sgabor		 * Max Transfer size of this device in blocks
3128219019Sgabor		 */
3129219019Sgabor		info->dki_maxtransfer = un->un_max_xfer_size / DEV_BSIZE;
3130219019Sgabor
3131219019Sgabor		/*
3132219019Sgabor		 * We can't get from here to there yet
3133219019Sgabor		 */
3134219019Sgabor		info->dki_addr = 0;
3135219019Sgabor		info->dki_space = 0;
3136219019Sgabor		info->dki_prio = 0;
3137219019Sgabor		info->dki_vec = 0;
3138219019Sgabor
3139219019Sgabor		i = sizeof (struct dk_cinfo);
3140219019Sgabor		if (ddi_copyout((caddr_t)data, (caddr_t)arg, i, flag))
3141219019Sgabor			return (EFAULT);
3142219019Sgabor		else
3143219019Sgabor			return (0);
3144219019Sgabor
3145219019Sgabor	case DKIOCGMEDIAINFO:
3146219019Sgabor		/*
3147219019Sgabor		 * As dad target driver is used for IDE disks only
3148219019Sgabor		 * Can keep the return value hardcoded to FIXED_DISK
3149219019Sgabor		 */
3150219019Sgabor		media_info.dki_media_type = DK_FIXED_DISK;
3151219019Sgabor
3152219019Sgabor		mutex_enter(DCD_MUTEX);
3153219019Sgabor		media_info.dki_lbsize = un->un_lbasize;
3154219019Sgabor		media_info.dki_capacity = un->un_diskcapacity;
3155219019Sgabor		mutex_exit(DCD_MUTEX);
3156219019Sgabor
3157219019Sgabor		if (ddi_copyout(&media_info, (caddr_t)arg,
3158219019Sgabor		    sizeof (struct dk_minfo), flag))
3159219019Sgabor			return (EFAULT);
3160219019Sgabor		else
3161219019Sgabor			return (0);
3162219019Sgabor
3163219019Sgabor	case DKIOCGGEOM:
3164219019Sgabor	case DKIOCGVTOC:
3165219019Sgabor	case DKIOCGETEFI:
3166219019Sgabor
3167219019Sgabor		mutex_enter(DCD_MUTEX);
3168219019Sgabor		if (un->un_ncmds == 0) {
3169219019Sgabor			if ((err = dcd_unit_ready(dev)) != 0) {
3170219019Sgabor				mutex_exit(DCD_MUTEX);
3171219019Sgabor				return (err);
3172219019Sgabor			}
3173219019Sgabor		}
3174219019Sgabor
3175219019Sgabor		mutex_exit(DCD_MUTEX);
3176219019Sgabor		err = cmlb_ioctl(un->un_dklbhandle, dev, cmd,
3177219019Sgabor		    arg, flag, cred_p, rval_p, 0);
3178219019Sgabor		return (err);
3179219019Sgabor
3180219019Sgabor	case DKIOCGAPART:
3181219019Sgabor	case DKIOCSAPART:
3182219019Sgabor	case DKIOCSGEOM:
3183219019Sgabor	case DKIOCSVTOC:
3184219019Sgabor	case DKIOCSETEFI:
3185219019Sgabor	case DKIOCPARTITION:
3186219019Sgabor	case DKIOCPARTINFO:
3187219019Sgabor	case DKIOCGMBOOT:
3188219019Sgabor	case DKIOCSMBOOT:
3189219019Sgabor
3190219019Sgabor		err = cmlb_ioctl(un->un_dklbhandle, dev, cmd,
3191219019Sgabor		    arg, flag, cred_p, rval_p, 0);
3192219019Sgabor		return (err);
3193219019Sgabor
3194219019Sgabor	case DIOCTL_RWCMD:
3195219019Sgabor		if (drv_priv(cred_p) != 0) {
3196219019Sgabor			return (EPERM);
3197219019Sgabor		}
3198219019Sgabor
3199219019Sgabor#ifdef _MULTI_DATAMODEL
3200219019Sgabor		switch (ddi_model_convert_from(flag & FMODELS)) {
3201219019Sgabor		case DDI_MODEL_NONE:
3202219019Sgabor			if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd,
3203219019Sgabor			    sizeof (struct dadkio_rwcmd), flag)) {
3204219019Sgabor				return (EFAULT);
3205219019Sgabor			}
3206219019Sgabor			rwcmd32.cmd = rwcmd.cmd;
3207219019Sgabor			rwcmd32.flags = rwcmd.flags;
3208219019Sgabor			rwcmd32.blkaddr = rwcmd.blkaddr;
3209219019Sgabor			rwcmd32.buflen = rwcmd.buflen;
3210219019Sgabor			rwcmd32.bufaddr = (caddr32_t)(uintptr_t)rwcmd.bufaddr;
3211219019Sgabor			break;
3212219019Sgabor		case DDI_MODEL_ILP32:
3213219019Sgabor			if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32,
3214219019Sgabor			    sizeof (struct dadkio_rwcmd32), flag)) {
3215219019Sgabor				return (EFAULT);
3216219019Sgabor			}
3217219019Sgabor			break;
3218219019Sgabor		}
3219219019Sgabor#else
3220219019Sgabor		if (ddi_copyin((caddr_t)arg, (caddr_t)&rwcmd32,
3221219019Sgabor		    sizeof (struct dadkio_rwcmd32), flag)) {
3222219019Sgabor			return (EFAULT);
3223219019Sgabor		}
3224219019Sgabor#endif
3225219019Sgabor		mutex_enter(DCD_MUTEX);
3226219019Sgabor
3227219019Sgabor		uioseg  = UIO_SYSSPACE;
3228219019Sgabor		scmd = (struct udcd_cmd *)data;
3229219019Sgabor		scmd->udcd_cmd = &dcdcmd;
3230219019Sgabor		/*
3231219019Sgabor		 * Convert the dadkio_rwcmd structure to udcd_cmd so that
3232219019Sgabor		 * it can take the normal path to get the io done
3233219019Sgabor		 */
3234219019Sgabor		if (rwcmd32.cmd == DADKIO_RWCMD_READ) {
3235219019Sgabor			if ((un->un_dp->options & DMA_SUPPORTTED) ==
3236219019Sgabor			    DMA_SUPPORTTED)
3237219019Sgabor				scmd->udcd_cmd->cmd = ATA_READ_DMA;
3238219019Sgabor			else
3239219019Sgabor				scmd->udcd_cmd->cmd = ATA_READ;
3240219019Sgabor			scmd->udcd_cmd->address_mode = ADD_LBA_MODE;
3241219019Sgabor			scmd->udcd_cmd->direction = DATA_READ;
3242219019Sgabor			scmd->udcd_flags |= UDCD_READ|UDCD_SILENT;
3243219019Sgabor		} else if (rwcmd32.cmd == DADKIO_RWCMD_WRITE) {
3244219019Sgabor			if ((un->un_dp->options & DMA_SUPPORTTED) ==
3245219019Sgabor			    DMA_SUPPORTTED)
3246219019Sgabor				scmd->udcd_cmd->cmd = ATA_WRITE_DMA;
3247219019Sgabor			else
3248219019Sgabor				scmd->udcd_cmd->cmd = ATA_WRITE;
3249219019Sgabor			scmd->udcd_cmd->direction = DATA_WRITE;
3250219019Sgabor			scmd->udcd_flags |= UDCD_WRITE|UDCD_SILENT;
3251219019Sgabor		} else {
3252219019Sgabor			mutex_exit(DCD_MUTEX);
3253219019Sgabor			return (EINVAL);
3254219019Sgabor		}
3255219019Sgabor
3256219019Sgabor		scmd->udcd_cmd->address_mode = ADD_LBA_MODE;
3257219019Sgabor		scmd->udcd_cmd->features = 0;
3258219019Sgabor		scmd->udcd_cmd->size = rwcmd32.buflen;
3259219019Sgabor		scmd->udcd_cmd->sector_num.lba_num = rwcmd32.blkaddr;
3260219019Sgabor		scmd->udcd_bufaddr = (caddr_t)(uintptr_t)rwcmd32.bufaddr;
3261219019Sgabor		scmd->udcd_buflen = rwcmd32.buflen;
3262219019Sgabor		scmd->udcd_timeout = (ushort_t)dcd_io_time;
3263219019Sgabor		scmd->udcd_resid = 0ULL;
3264219019Sgabor		scmd->udcd_status = 0;
3265219019Sgabor		scmd->udcd_error_reg = 0;
3266219019Sgabor		scmd->udcd_status_reg = 0;
3267219019Sgabor
3268219019Sgabor		mutex_exit(DCD_MUTEX);
3269219019Sgabor
3270219019Sgabor		i = dcdioctl_cmd(dev, scmd, UIO_SYSSPACE, UIO_USERSPACE);
3271		mutex_enter(DCD_MUTEX);
3272		/*
3273		 * After return convert the status from scmd to
3274		 * dadkio_status
3275		 */
3276		(void) dcd_translate(&(rwcmd32.status), scmd);
3277		rwcmd32.status.resid = scmd->udcd_resid;
3278		mutex_exit(DCD_MUTEX);
3279
3280#ifdef _MULTI_DATAMODEL
3281		switch (ddi_model_convert_from(flag & FMODELS)) {
3282		case DDI_MODEL_NONE: {
3283			int counter;
3284			rwcmd.status.status = rwcmd32.status.status;
3285			rwcmd.status.resid  = rwcmd32.status.resid;
3286			rwcmd.status.failed_blk_is_valid =
3287			    rwcmd32.status.failed_blk_is_valid;
3288			rwcmd.status.failed_blk = rwcmd32.status.failed_blk;
3289			rwcmd.status.fru_code_is_valid =
3290			    rwcmd32.status.fru_code_is_valid;
3291			rwcmd.status.fru_code = rwcmd32.status.fru_code;
3292			for (counter = 0;
3293			    counter < DADKIO_ERROR_INFO_LEN; counter++)
3294				rwcmd.status.add_error_info[counter] =
3295				    rwcmd32.status.add_error_info[counter];
3296			}
3297			/* Copy out the result back to the user program */
3298			if (ddi_copyout((caddr_t)&rwcmd, (caddr_t)arg,
3299			    sizeof (struct dadkio_rwcmd), flag)) {
3300				if (i != 0) {
3301					i = EFAULT;
3302				}
3303			}
3304			break;
3305		case DDI_MODEL_ILP32:
3306			/* Copy out the result back to the user program */
3307			if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg,
3308			    sizeof (struct dadkio_rwcmd32), flag)) {
3309				if (i != 0) {
3310					i = EFAULT;
3311				}
3312			}
3313			break;
3314		}
3315#else
3316		/* Copy out the result back to the user program  */
3317		if (ddi_copyout((caddr_t)&rwcmd32, (caddr_t)arg,
3318		    sizeof (struct dadkio_rwcmd32), flag)) {
3319			if (i != 0)
3320				i = EFAULT;
3321		}
3322#endif
3323		return (i);
3324
3325	case UDCDCMD:	{
3326#ifdef	_MULTI_DATAMODEL
3327		/*
3328		 * For use when a 32 bit app makes a call into a
3329		 * 64 bit ioctl
3330		 */
3331		struct udcd_cmd32	udcd_cmd_32_for_64;
3332		struct udcd_cmd32	*ucmd32 = &udcd_cmd_32_for_64;
3333		model_t			model;
3334#endif /* _MULTI_DATAMODEL */
3335
3336		if (drv_priv(cred_p) != 0) {
3337			return (EPERM);
3338		}
3339
3340		scmd = (struct udcd_cmd *)data;
3341
3342#ifdef _MULTI_DATAMODEL
3343		switch (model = ddi_model_convert_from(flag & FMODELS)) {
3344		case DDI_MODEL_ILP32:
3345			if (ddi_copyin((caddr_t)arg, ucmd32,
3346			    sizeof (struct udcd_cmd32), flag)) {
3347				return (EFAULT);
3348			}
3349			/*
3350			 * Convert the ILP32 uscsi data from the
3351			 * application to LP64 for internal use.
3352			 */
3353			udcd_cmd32toudcd_cmd(ucmd32, scmd);
3354			break;
3355		case DDI_MODEL_NONE:
3356			if (ddi_copyin((caddr_t)arg, scmd, sizeof (*scmd),
3357			    flag)) {
3358				return (EFAULT);
3359			}
3360			break;
3361		}
3362#else /* ! _MULTI_DATAMODEL */
3363		if (ddi_copyin((caddr_t)arg, (caddr_t)scmd,
3364		    sizeof (*scmd), flag)) {
3365			return (EFAULT);
3366		}
3367#endif /* ! _MULTI_DATAMODEL */
3368
3369		scmd->udcd_flags &= ~UDCD_NOINTR;
3370		uioseg = (flag & FKIOCTL)? UIO_SYSSPACE: UIO_USERSPACE;
3371
3372		i = dcdioctl_cmd(dev, scmd, uioseg, uioseg);
3373#ifdef _MULTI_DATAMODEL
3374		switch (model) {
3375		case DDI_MODEL_ILP32:
3376			/*
3377			 * Convert back to ILP32 before copyout to the
3378			 * application
3379			 */
3380			udcd_cmdtoudcd_cmd32(scmd, ucmd32);
3381			if (ddi_copyout(ucmd32, (caddr_t)arg,
3382			    sizeof (*ucmd32), flag)) {
3383				if (i != 0)
3384					i = EFAULT;
3385			}
3386			break;
3387		case DDI_MODEL_NONE:
3388			if (ddi_copyout(scmd, (caddr_t)arg, sizeof (*scmd),
3389			    flag)) {
3390				if (i != 0)
3391					i = EFAULT;
3392			}
3393			break;
3394		}
3395#else /* ! _MULTI_DATAMODE */
3396		if (ddi_copyout((caddr_t)scmd, (caddr_t)arg,
3397		    sizeof (*scmd), flag)) {
3398			if (i != 0)
3399				i = EFAULT;
3400		}
3401#endif
3402		return (i);
3403	}
3404	case DKIOCFLUSHWRITECACHE:	{
3405		struct dk_callback *dkc = (struct dk_callback *)arg;
3406		struct dcd_pkt *pkt;
3407		struct buf *bp;
3408		int is_sync = 1;
3409
3410		mutex_enter(DCD_MUTEX);
3411		if (un->un_flush_not_supported ||
3412		    ! un->un_write_cache_enabled) {
3413			i = un->un_flush_not_supported ? ENOTSUP : 0;
3414			mutex_exit(DCD_MUTEX);
3415			/*
3416			 * If a callback was requested: a callback will
3417			 * always be done if the caller saw the
3418			 * DKIOCFLUSHWRITECACHE ioctl return 0, and
3419			 * never done if the caller saw the ioctl return
3420			 * an error.
3421			 */
3422			if ((flag & FKIOCTL) && dkc != NULL &&
3423			    dkc->dkc_callback != NULL) {
3424				(*dkc->dkc_callback)(dkc->dkc_cookie, i);
3425				/*
3426				 * Did callback and reported error.
3427				 * Since we did a callback, ioctl
3428				 * should return 0.
3429				 */
3430				i = 0;
3431			}
3432			return (i);
3433		}
3434
3435		/*
3436		 * Get the special buffer
3437		 */
3438		while (un->un_sbuf_busy) {
3439			cv_wait(&un->un_sbuf_cv, DCD_MUTEX);
3440		}
3441		un->un_sbuf_busy = 1;
3442		bp  = un->un_sbufp;
3443		mutex_exit(DCD_MUTEX);
3444
3445		pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
3446		    NULL, (uint32_t)sizeof (struct dcd_cmd),
3447		    2, PP_LEN, PKT_CONSISTENT, SLEEP_FUNC, (caddr_t)un);
3448		ASSERT(pkt != NULL);
3449
3450		makecommand(pkt, un->un_cmd_flags | FLAG_SILENT,
3451		    ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0, NO_DATA_XFER, 0);
3452
3453		pkt->pkt_comp = dcdintr;
3454		pkt->pkt_time = DCD_FLUSH_TIME;
3455		PKT_SET_BP(pkt, bp);
3456
3457		bp->av_back = (struct buf *)pkt;
3458		bp->b_forw = NULL;
3459		bp->b_flags = B_BUSY;
3460		bp->b_error = 0;
3461		bp->b_edev = dev;
3462		bp->b_dev = cmpdev(dev);
3463		bp->b_bcount = 0;
3464		bp->b_blkno = 0;
3465		bp->b_un.b_addr = 0;
3466		bp->b_iodone = NULL;
3467		bp->b_list = NULL;
3468
3469		if ((flag & FKIOCTL) && dkc != NULL &&
3470		    dkc->dkc_callback != NULL) {
3471			struct dk_callback *dkc2 = (struct dk_callback *)
3472			    kmem_zalloc(sizeof (*dkc2), KM_SLEEP);
3473			bcopy(dkc, dkc2, sizeof (*dkc2));
3474
3475			bp->b_list = (struct buf *)dkc2;
3476			bp->b_iodone = dcdflushdone;
3477			is_sync = 0;
3478		}
3479
3480		(void) dcdstrategy(bp);
3481
3482		i = 0;
3483		if (is_sync) {
3484			i = biowait(bp);
3485			(void) dcdflushdone(bp);
3486		}
3487
3488		return (i);
3489	}
3490	default:
3491		break;
3492	}
3493	return (ENOTTY);
3494}
3495
3496
3497static int
3498dcdflushdone(struct buf *bp)
3499{
3500	struct dcd_disk *un = ddi_get_soft_state(dcd_state,
3501	    DCDUNIT(bp->b_edev));
3502	struct dcd_pkt *pkt = BP_PKT(bp);
3503	struct dk_callback *dkc = (struct dk_callback *)bp->b_list;
3504
3505	ASSERT(un != NULL);
3506	ASSERT(bp == un->un_sbufp);
3507	ASSERT(pkt != NULL);
3508
3509	dcd_destroy_pkt(pkt);
3510	bp->av_back = NO_PKT_ALLOCATED;
3511
3512	if (dkc != NULL) {
3513		ASSERT(bp->b_iodone != NULL);
3514		(*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp));
3515		kmem_free(dkc, sizeof (*dkc));
3516		bp->b_iodone = NULL;
3517		bp->b_list = NULL;
3518	}
3519
3520	/*
3521	 * Tell anybody who cares that the buffer is now free
3522	 */
3523	mutex_enter(DCD_MUTEX);
3524	un->un_sbuf_busy = 0;
3525	cv_signal(&un->un_sbuf_cv);
3526	mutex_exit(DCD_MUTEX);
3527	return (0);
3528}
3529
3530/*
3531 * dcdrunout:
3532 *	the callback function for resource allocation
3533 *
3534 * XXX it would be preferable that dcdrunout() scans the whole
3535 *	list for possible candidates for dcdstart(); this avoids
3536 *	that a bp at the head of the list whose request cannot be
3537 *	satisfied is retried again and again
3538 */
3539/*ARGSUSED*/
3540static int
3541dcdrunout(caddr_t arg)
3542{
3543	int serviced;
3544	struct dcd_disk *un;
3545	struct diskhd *dp;
3546
3547	TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_START, "dcdrunout_start: arg 0x%p",
3548	    arg);
3549	serviced = 1;
3550
3551	un = (struct dcd_disk *)arg;
3552	dp = &un->un_utab;
3553
3554	/*
3555	 * We now support passing a structure to the callback
3556	 * routine.
3557	 */
3558	ASSERT(un != NULL);
3559	mutex_enter(DCD_MUTEX);
3560	if ((un->un_ncmds < un->un_throttle) && (dp->b_forw == NULL)) {
3561		dcdstart(un);
3562	}
3563	if (un->un_state == DCD_STATE_RWAIT) {
3564		serviced = 0;
3565	}
3566	mutex_exit(DCD_MUTEX);
3567	TRACE_1(TR_FAC_DADA, TR_DCDRUNOUT_END,
3568	    "dcdrunout_end: serviced %d", serviced);
3569	return (serviced);
3570}
3571
3572
3573/*
3574 * This routine called to see whether unit is (still) there. Must not
3575 * be called when un->un_sbufp is in use, and must not be called with
3576 * an unattached disk. Soft state of disk is restored to what it was
3577 * upon entry- up to caller to set the correct state.
3578 *
3579 * We enter with the disk mutex held.
3580 */
3581
3582/* ARGSUSED0 */
3583static int
3584dcd_unit_ready(dev_t dev)
3585{
3586#ifndef lint
3587	auto struct udcd_cmd dcmd, *com = &dcmd;
3588	auto struct dcd_cmd cmdblk;
3589#endif
3590	int error;
3591#ifndef lint
3592	GET_SOFT_STATE(dev);
3593#endif
3594
3595	/*
3596	 * Now that we protect the special buffer with
3597	 * a mutex, we could probably do a mutex_tryenter
3598	 * on it here and return failure if it were held...
3599	 */
3600
3601	error = 0;
3602	return (error);
3603}
3604
3605/* ARGSUSED0 */
3606int
3607dcdioctl_cmd(dev_t devp, struct udcd_cmd *in, enum uio_seg cdbspace,
3608    enum uio_seg dataspace)
3609{
3610
3611	struct buf *bp;
3612	struct	udcd_cmd *scmd;
3613	struct dcd_pkt *pkt;
3614	int	err, rw;
3615	caddr_t	cdb;
3616	int	flags = 0;
3617
3618	GET_SOFT_STATE(devp);
3619
3620#ifdef lint
3621	part = part;
3622#endif
3623
3624	/*
3625	 * Is this a request to reset the bus?
3626	 * if so, we need to do reseting.
3627	 */
3628
3629	if (in->udcd_flags & UDCD_RESET) {
3630		int flag = RESET_TARGET;
3631		err = dcd_reset(ROUTE, flag) ? 0: EIO;
3632		return (err);
3633	}
3634
3635	scmd = in;
3636
3637
3638	/* Do some sanity checks */
3639	if (scmd->udcd_buflen <= 0) {
3640		if (scmd->udcd_flags & (UDCD_READ | UDCD_WRITE)) {
3641			return (EINVAL);
3642		} else {
3643			scmd->udcd_buflen = 0;
3644		}
3645	}
3646
3647	/* Make a copy of the dcd_cmd passed  */
3648	cdb = kmem_zalloc(sizeof (struct dcd_cmd), KM_SLEEP);
3649	if (cdbspace == UIO_SYSSPACE) {
3650		flags |= FKIOCTL;
3651	}
3652
3653	if (ddi_copyin((void *)scmd->udcd_cmd, cdb, sizeof (struct dcd_cmd),
3654	    flags)) {
3655		kmem_free(cdb, sizeof (struct dcd_cmd));
3656		return (EFAULT);
3657	}
3658	scmd = (struct udcd_cmd *)kmem_alloc(sizeof (*scmd), KM_SLEEP);
3659	bcopy((caddr_t)in, (caddr_t)scmd, sizeof (*scmd));
3660	scmd->udcd_cmd = (struct dcd_cmd *)cdb;
3661	rw = (scmd->udcd_flags & UDCD_READ) ? B_READ: B_WRITE;
3662
3663
3664	/*
3665	 * Get the special buffer
3666	 */
3667
3668	mutex_enter(DCD_MUTEX);
3669	while (un->un_sbuf_busy) {
3670		if (cv_wait_sig(&un->un_sbuf_cv, DCD_MUTEX) == 0) {
3671			kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd));
3672			kmem_free((caddr_t)scmd, sizeof (*scmd));
3673			mutex_exit(DCD_MUTEX);
3674			return (EINTR);
3675		}
3676	}
3677
3678	un->un_sbuf_busy = 1;
3679	bp  = un->un_sbufp;
3680	mutex_exit(DCD_MUTEX);
3681
3682
3683	/*
3684	 * If we are going to do actual I/O, let physio do all the
3685	 * things
3686	 */
3687	DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
3688	    "dcdioctl_cmd : buflen %x\n", scmd->udcd_buflen);
3689
3690	if (scmd->udcd_buflen) {
3691		auto struct iovec aiov;
3692		auto struct uio auio;
3693		struct uio *uio = &auio;
3694
3695		bzero((caddr_t)&auio, sizeof (struct uio));
3696		bzero((caddr_t)&aiov, sizeof (struct iovec));
3697
3698		aiov.iov_base = scmd->udcd_bufaddr;
3699		aiov.iov_len = scmd->udcd_buflen;
3700
3701		uio->uio_iov = &aiov;
3702		uio->uio_iovcnt = 1;
3703		uio->uio_resid = scmd->udcd_buflen;
3704		uio->uio_segflg = dataspace;
3705
3706		/*
3707		 * Let physio do the rest...
3708		 */
3709		bp->av_back = NO_PKT_ALLOCATED;
3710		bp->b_forw = (struct buf *)scmd;
3711		err = physio(dcdstrategy, bp, devp, rw, dcdudcdmin, uio);
3712	} else {
3713		/*
3714		 * We have to mimic what physio would do here.
3715		 */
3716		bp->av_back = NO_PKT_ALLOCATED;
3717		bp->b_forw = (struct buf *)scmd;
3718		bp->b_flags = B_BUSY | rw;
3719		bp->b_edev = devp;
3720		bp->b_dev = cmpdev(devp);
3721		bp->b_bcount = bp->b_blkno = 0;
3722		(void) dcdstrategy(bp);
3723		err = biowait(bp);
3724	}
3725
3726done:
3727	if ((pkt = BP_PKT(bp)) != NULL) {
3728		bp->av_back = NO_PKT_ALLOCATED;
3729		/* we need to update the completion status of udcd command */
3730		in->udcd_resid = bp->b_resid;
3731		in->udcd_status_reg = SCBP_C(pkt);
3732		/* XXX: we need to give error_reg also */
3733		dcd_destroy_pkt(pkt);
3734	}
3735	/*
3736	 * Tell anybody who cares that the buffer is now free
3737	 */
3738	mutex_enter(DCD_MUTEX);
3739	un->un_sbuf_busy = 0;
3740	cv_signal(&un->un_sbuf_cv);
3741	mutex_exit(DCD_MUTEX);
3742
3743	kmem_free(scmd->udcd_cmd, sizeof (struct dcd_cmd));
3744	kmem_free((caddr_t)scmd, sizeof (*scmd));
3745	return (err);
3746}
3747
3748static void
3749dcdudcdmin(struct buf *bp)
3750{
3751
3752#ifdef lint
3753	bp = bp;
3754#endif
3755
3756}
3757
3758/*
3759 * restart a cmd from timeout() context
3760 *
3761 * the cmd is expected to be in un_utab.b_forw. If this pointer is non-zero
3762 * a restart timeout request has been issued and no new timeouts should
3763 * be requested. b_forw is reset when the cmd eventually completes in
3764 * dcddone_and_mutex_exit()
3765 */
3766void
3767dcdrestart(void *arg)
3768{
3769	struct dcd_disk *un = (struct dcd_disk *)arg;
3770	struct buf *bp;
3771	int status;
3772
3773	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart\n");
3774
3775	mutex_enter(DCD_MUTEX);
3776	bp = un->un_utab.b_forw;
3777	if (bp) {
3778		un->un_ncmds++;
3779		DCD_DO_KSTATS(un, kstat_waitq_to_runq, bp);
3780	}
3781
3782
3783	if (bp) {
3784		struct dcd_pkt *pkt = BP_PKT(bp);
3785
3786		mutex_exit(DCD_MUTEX);
3787
3788		pkt->pkt_flags = 0;
3789
3790		if ((status = dcd_transport(pkt)) != TRAN_ACCEPT) {
3791			mutex_enter(DCD_MUTEX);
3792			DCD_DO_KSTATS(un, kstat_runq_back_to_waitq, bp);
3793			un->un_ncmds--;
3794			if (status == TRAN_BUSY) {
3795				/* XXX : To be checked */
3796				/*
3797				 * if (un->un_throttle > 1) {
3798				 *	ASSERT(un->un_ncmds >= 0);
3799				 *	un->un_throttle = un->un_ncmds;
3800				 * }
3801				 */
3802				un->un_reissued_timeid =
3803				    timeout(dcdrestart, (caddr_t)un,
3804				    DCD_BSY_TIMEOUT/500);
3805				mutex_exit(DCD_MUTEX);
3806				return;
3807			}
3808			DCD_DO_ERRSTATS(un, dcd_transerrs);
3809			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
3810			    "dcdrestart transport failed (%x)\n", status);
3811			bp->b_resid = bp->b_bcount;
3812			SET_BP_ERROR(bp, EIO);
3813
3814			DCD_DO_KSTATS(un, kstat_waitq_exit, bp);
3815			un->un_reissued_timeid = 0L;
3816			dcddone_and_mutex_exit(un, bp);
3817			return;
3818		}
3819		mutex_enter(DCD_MUTEX);
3820	}
3821	un->un_reissued_timeid = 0L;
3822	mutex_exit(DCD_MUTEX);
3823	DAD_DEBUG(DCD_DEVINFO, dcd_label, DCD_DEBUG, "dcdrestart done\n");
3824}
3825
3826/*
3827 * This routine gets called to reset the throttle to its saved
3828 * value wheneven we lower the throttle.
3829 */
3830void
3831dcd_reset_throttle(caddr_t arg)
3832{
3833	struct dcd_disk *un = (struct dcd_disk *)arg;
3834	struct diskhd *dp;
3835
3836	mutex_enter(DCD_MUTEX);
3837	dp = &un->un_utab;
3838
3839	/*
3840	 * start any commands that didn't start while throttling.
3841	 */
3842	if (dp->b_actf && (un->un_ncmds < un->un_throttle) &&
3843	    (dp->b_forw == NULL)) {
3844		dcdstart(un);
3845	}
3846	mutex_exit(DCD_MUTEX);
3847}
3848
3849
3850/*
3851 * This routine handles the case when a TRAN_BUSY is
3852 * returned by HBA.
3853 *
3854 * If there are some commands already in the transport, the
3855 * bp can be put back on queue and it will
3856 * be retried when the queue is emptied after command
3857 * completes. But if there is no command in the tranport
3858 * and it still return busy, we have to retry the command
3859 * after some time like 10ms.
3860 */
3861/* ARGSUSED0 */
3862static void
3863dcd_handle_tran_busy(struct buf *bp, struct diskhd *dp, struct dcd_disk *un)
3864{
3865	ASSERT(mutex_owned(DCD_MUTEX));
3866
3867
3868	if (dp->b_forw == NULL || dp->b_forw == bp) {
3869		dp->b_forw = bp;
3870	} else if (dp->b_forw != bp) {
3871		bp->b_actf = dp->b_actf;
3872		dp->b_actf = bp;
3873
3874	}
3875	if (!un->un_reissued_timeid) {
3876		un->un_reissued_timeid =
3877		    timeout(dcdrestart, (caddr_t)un, DCD_BSY_TIMEOUT/500);
3878	}
3879}
3880
3881static int
3882dcd_write_deviceid(struct dcd_disk *un)
3883{
3884
3885	int 	status;
3886	diskaddr_t blk;
3887	struct udcd_cmd ucmd;
3888	struct dcd_cmd cdb;
3889	struct dk_devid	*dkdevid;
3890	uint_t *ip, chksum;
3891	int	i;
3892	dev_t	dev;
3893
3894	mutex_exit(DCD_MUTEX);
3895	if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) {
3896		mutex_enter(DCD_MUTEX);
3897		return (EINVAL);
3898	}
3899	mutex_enter(DCD_MUTEX);
3900
3901	/* Allocate the buffer */
3902	dkdevid = kmem_zalloc(un->un_secsize, KM_SLEEP);
3903
3904	/* Fill in the revision */
3905	dkdevid->dkd_rev_hi = DK_DEVID_REV_MSB;
3906	dkdevid->dkd_rev_lo = DK_DEVID_REV_LSB;
3907
3908	/* Copy in the device id */
3909	bcopy(un->un_devid, &dkdevid->dkd_devid,
3910	    ddi_devid_sizeof(un->un_devid));
3911
3912	/* Calculate the chksum */
3913	chksum = 0;
3914	ip = (uint_t *)dkdevid;
3915	for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++)
3916		chksum ^= ip[i];
3917
3918	/* Fill in the checksum */
3919	DKD_FORMCHKSUM(chksum, dkdevid);
3920
3921	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
3922	(void) bzero((caddr_t)&cdb, sizeof (struct dcd_cmd));
3923
3924	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
3925		cdb.cmd = ATA_WRITE_DMA;
3926	} else {
3927		if (un->un_dp->options & BLOCK_MODE)
3928			cdb.cmd = ATA_WRITE_MULTIPLE;
3929		else
3930			cdb.cmd = ATA_WRITE;
3931	}
3932	cdb.size = un->un_secsize;
3933	cdb.sector_num.lba_num = blk;
3934	cdb.address_mode = ADD_LBA_MODE;
3935	cdb.direction = DATA_WRITE;
3936
3937	ucmd.udcd_flags = UDCD_WRITE;
3938	ucmd.udcd_cmd =  &cdb;
3939	ucmd.udcd_bufaddr = (caddr_t)dkdevid;
3940	ucmd.udcd_buflen = un->un_secsize;
3941	ucmd.udcd_flags |= UDCD_SILENT;
3942	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
3943	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
3944	mutex_exit(DCD_MUTEX);
3945	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
3946	mutex_enter(DCD_MUTEX);
3947
3948	kmem_free(dkdevid, un->un_secsize);
3949	return (status);
3950}
3951
3952static int
3953dcd_read_deviceid(struct dcd_disk *un)
3954{
3955	int status;
3956	diskaddr_t blk;
3957	struct udcd_cmd ucmd;
3958	struct dcd_cmd cdb;
3959	struct dk_devid *dkdevid;
3960	uint_t *ip;
3961	int chksum;
3962	int i, sz;
3963	dev_t dev;
3964
3965	mutex_exit(DCD_MUTEX);
3966	if (cmlb_get_devid_block(un->un_dklbhandle, &blk, 0)) {
3967		mutex_enter(DCD_MUTEX);
3968		return (EINVAL);
3969	}
3970	mutex_enter(DCD_MUTEX);
3971
3972	dkdevid = kmem_alloc(un->un_secsize, KM_SLEEP);
3973
3974	(void) bzero((caddr_t)&ucmd, sizeof (ucmd));
3975	(void) bzero((caddr_t)&cdb, sizeof (cdb));
3976
3977	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
3978		cdb.cmd = ATA_READ_DMA;
3979	} else {
3980		if (un->un_dp->options & BLOCK_MODE)
3981			cdb.cmd = ATA_READ_MULTIPLE;
3982		else
3983			cdb.cmd = ATA_READ;
3984	}
3985	cdb.size = un->un_secsize;
3986	cdb.sector_num.lba_num = blk;
3987	cdb.address_mode = ADD_LBA_MODE;
3988	cdb.direction = DATA_READ;
3989
3990	ucmd.udcd_flags = UDCD_READ;
3991	ucmd.udcd_cmd =  &cdb;
3992	ucmd.udcd_bufaddr = (caddr_t)dkdevid;
3993	ucmd.udcd_buflen = un->un_secsize;
3994	ucmd.udcd_flags |= UDCD_SILENT;
3995	dev = makedevice(ddi_driver_major(DCD_DEVINFO),
3996	    ddi_get_instance(DCD_DEVINFO) << DCDUNIT_SHIFT);
3997	mutex_exit(DCD_MUTEX);
3998	status = dcdioctl_cmd(dev, &ucmd, UIO_SYSSPACE, UIO_SYSSPACE);
3999	mutex_enter(DCD_MUTEX);
4000
4001	if (status != 0) {
4002		kmem_free((caddr_t)dkdevid, un->un_secsize);
4003		return (status);
4004	}
4005
4006	/* Validate the revision */
4007
4008	if ((dkdevid->dkd_rev_hi != DK_DEVID_REV_MSB) ||
4009	    (dkdevid->dkd_rev_lo != DK_DEVID_REV_LSB)) {
4010		kmem_free((caddr_t)dkdevid, un->un_secsize);
4011		return (EINVAL);
4012	}
4013
4014	/* Calculate the checksum */
4015	chksum = 0;
4016	ip = (uint_t *)dkdevid;
4017	for (i = 0; i < ((un->un_secsize - sizeof (int))/sizeof (int)); i++)
4018		chksum ^= ip[i];
4019
4020	/* Compare the checksums */
4021
4022	if (DKD_GETCHKSUM(dkdevid) != chksum) {
4023		kmem_free((caddr_t)dkdevid, un->un_secsize);
4024		return (EINVAL);
4025	}
4026
4027	/* VAlidate the device id */
4028	if (ddi_devid_valid((ddi_devid_t)&dkdevid->dkd_devid) != DDI_SUCCESS) {
4029		kmem_free((caddr_t)dkdevid, un->un_secsize);
4030		return (EINVAL);
4031	}
4032
4033	/* return a copy of the device id */
4034	sz = ddi_devid_sizeof((ddi_devid_t)&dkdevid->dkd_devid);
4035	un->un_devid = (ddi_devid_t)kmem_alloc(sz, KM_SLEEP);
4036	bcopy(&dkdevid->dkd_devid, un->un_devid, sz);
4037	kmem_free((caddr_t)dkdevid, un->un_secsize);
4038
4039	return (0);
4040}
4041
4042/*
4043 * Return the device id for the device.
4044 * 1. If the device ID exists then just return it - nothing to do in that case.
4045 * 2. Build one from the drives model number and serial number.
4046 * 3. If there is a problem in building it from serial/model #, then try
4047 * to read it from the acyl region of the disk.
4048 * Note: If this function is unable to return a valid ID then the calling
4049 * point will invoke the routine to create a fabricated ID ans stor it on the
4050 * acyl region of the disk.
4051 */
4052static ddi_devid_t
4053dcd_get_devid(struct dcd_disk *un)
4054{
4055	int		rc;
4056
4057	/* If already registered, return that value */
4058	if (un->un_devid != NULL)
4059		return (un->un_devid);
4060
4061	/* Build a devid from model and serial number, if present */
4062	rc = dcd_make_devid_from_serial(un);
4063
4064	if (rc != DDI_SUCCESS) {
4065		/* Read the devid from the disk. */
4066		if (dcd_read_deviceid(un))
4067			return (NULL);
4068	}
4069
4070	(void) ddi_devid_register(DCD_DEVINFO, un->un_devid);
4071	return (un->un_devid);
4072}
4073
4074
4075static ddi_devid_t
4076dcd_create_devid(struct dcd_disk *un)
4077{
4078	if (ddi_devid_init(DCD_DEVINFO, DEVID_FAB, 0, NULL, (ddi_devid_t *)
4079	    &un->un_devid) == DDI_FAILURE)
4080		return (NULL);
4081
4082	if (dcd_write_deviceid(un)) {
4083		ddi_devid_free(un->un_devid);
4084		un->un_devid = NULL;
4085		return (NULL);
4086	}
4087
4088	(void) ddi_devid_register(DCD_DEVINFO, un->un_devid);
4089	return (un->un_devid);
4090}
4091
4092/*
4093 * Build a devid from the model and serial number, if present
4094 * Return DDI_SUCCESS or DDI_FAILURE.
4095 */
4096static int
4097dcd_make_devid_from_serial(struct dcd_disk *un)
4098{
4099	int	rc = DDI_SUCCESS;
4100	char	*hwid;
4101	char	*model;
4102	int	model_len;
4103	char	*serno;
4104	int	serno_len;
4105	int	total_len;
4106
4107	/* initialize the model and serial number information */
4108	model = un->un_dcd->dcd_ident->dcd_model;
4109	model_len = DCD_MODEL_NUMBER_LENGTH;
4110	serno = un->un_dcd->dcd_ident->dcd_drvser;
4111	serno_len = DCD_SERIAL_NUMBER_LENGTH;
4112
4113	/* Verify the model and serial number */
4114	dcd_validate_model_serial(model, &model_len, model_len);
4115	if (model_len == 0) {
4116		rc = DDI_FAILURE;
4117		goto out;
4118	}
4119	dcd_validate_model_serial(serno, &serno_len, serno_len);
4120	if (serno_len == 0) {
4121		rc = DDI_FAILURE;
4122		goto out;
4123	}
4124
4125	/*
4126	 * The device ID will be concatenation of the model number,
4127	 * the '=' separator, the serial number. Allocate
4128	 * the string and concatenate the components.
4129	 */
4130	total_len = model_len + 1 + serno_len;
4131	hwid = kmem_alloc(total_len, KM_SLEEP);
4132	bcopy((caddr_t)model, (caddr_t)hwid, model_len);
4133	bcopy((caddr_t)"=", (caddr_t)&hwid[model_len], 1);
4134	bcopy((caddr_t)serno, (caddr_t)&hwid[model_len + 1], serno_len);
4135
4136	/* Initialize the device ID, trailing NULL not included */
4137	rc = ddi_devid_init(DCD_DEVINFO, DEVID_ATA_SERIAL, total_len,
4138	    hwid, (ddi_devid_t *)&un->un_devid);
4139
4140	/* Free the allocated string */
4141	kmem_free(hwid, total_len);
4142
4143out:	return (rc);
4144}
4145
4146/*
4147 * Test for a valid model or serial number. Assume that a valid representation
4148 * contains at least one character that is neither a space, 0 digit, or NULL.
4149 * Trim trailing blanks and NULLS from returned length.
4150 */
4151static void
4152dcd_validate_model_serial(char *str, int *retlen, int totallen)
4153{
4154	char		ch;
4155	boolean_t	ret = B_FALSE;
4156	int		i;
4157	int		tb;
4158
4159	for (i = 0, tb = 0; i < totallen; i++) {
4160		ch = *str++;
4161		if ((ch != ' ') && (ch != '\0') && (ch != '0'))
4162			ret = B_TRUE;
4163		if ((ch == ' ') || (ch == '\0'))
4164			tb++;
4165		else
4166			tb = 0;
4167	}
4168
4169	if (ret == B_TRUE) {
4170		/* Atleast one non 0 or blank character. */
4171		*retlen = totallen - tb;
4172	} else {
4173		*retlen = 0;
4174	}
4175}
4176
4177#ifndef lint
4178void
4179clean_print(dev_info_t *dev, char *label, uint_t level,
4180	char *title, char *data, int len)
4181{
4182	int	i;
4183	char	buf[256];
4184
4185	(void) sprintf(buf, "%s:", title);
4186	for (i = 0; i < len; i++) {
4187		(void) sprintf(&buf[strlen(buf)], "0x%x ", (data[i] & 0xff));
4188	}
4189	(void) sprintf(&buf[strlen(buf)], "\n");
4190
4191	dcd_log(dev, label, level, "%s", buf);
4192}
4193#endif /* Not lint */
4194
4195#ifndef lint
4196/*
4197 * Print a piece of inquiry data- cleaned up for non-printable characters
4198 * and stopping at the first space character after the beginning of the
4199 * passed string;
4200 */
4201
4202void
4203inq_fill(char *p, int l, char *s)
4204{
4205	unsigned i = 0;
4206	char c;
4207
4208	while (i++ < l) {
4209		if ((c = *p++) < ' ' || c >= 0177) {
4210			c = '*';
4211		} else if (i != 1 && c == ' ') {
4212			break;
4213		}
4214		*s++ = c;
4215	}
4216	*s++ = 0;
4217}
4218#endif /* Not lint */
4219
4220char *
4221dcd_sname(uchar_t status)
4222{
4223	switch (status & STATUS_ATA_MASK) {
4224	case STATUS_GOOD:
4225		return ("good status");
4226
4227	case STATUS_ATA_BUSY:
4228		return ("busy");
4229
4230	default:
4231		return ("<unknown status>");
4232	}
4233}
4234
4235/* ARGSUSED0 */
4236char *
4237dcd_rname(int reason)
4238{
4239	static char *rnames[] = {
4240		"cmplt",
4241		"incomplete",
4242		"dma_derr",
4243		"tran_err",
4244		"reset",
4245		"aborted",
4246		"timeout",
4247		"data_ovr",
4248	};
4249	if (reason > CMD_DATA_OVR) {
4250		return ("<unknown reason>");
4251	} else {
4252		return (rnames[reason]);
4253	}
4254}
4255
4256
4257
4258/* ARGSUSED0 */
4259int
4260dcd_check_wp(dev_t dev)
4261{
4262
4263	return (0);
4264}
4265
4266/*
4267 * Create device error kstats
4268 */
4269static int
4270dcd_create_errstats(struct dcd_disk *un, int instance)
4271{
4272
4273	char kstatname[KSTAT_STRLEN];
4274
4275	if (un->un_errstats == (kstat_t *)0) {
4276		(void) sprintf(kstatname, "dad%d,error", instance);
4277		un->un_errstats = kstat_create("daderror", instance, kstatname,
4278		    "device_error", KSTAT_TYPE_NAMED,
4279		    sizeof (struct dcd_errstats)/ sizeof (kstat_named_t),
4280		    KSTAT_FLAG_PERSISTENT);
4281
4282		if (un->un_errstats) {
4283			struct dcd_errstats *dtp;
4284
4285			dtp = (struct dcd_errstats *)un->un_errstats->ks_data;
4286			kstat_named_init(&dtp->dcd_softerrs, "Soft Errors",
4287			    KSTAT_DATA_UINT32);
4288			kstat_named_init(&dtp->dcd_harderrs, "Hard Errors",
4289			    KSTAT_DATA_UINT32);
4290			kstat_named_init(&dtp->dcd_transerrs,
4291			    "Transport Errors", KSTAT_DATA_UINT32);
4292			kstat_named_init(&dtp->dcd_model, "Model",
4293			    KSTAT_DATA_CHAR);
4294			kstat_named_init(&dtp->dcd_revision, "Revision",
4295			    KSTAT_DATA_CHAR);
4296			kstat_named_init(&dtp->dcd_serial, "Serial No",
4297			    KSTAT_DATA_CHAR);
4298			kstat_named_init(&dtp->dcd_capacity, "Size",
4299			    KSTAT_DATA_ULONGLONG);
4300			kstat_named_init(&dtp->dcd_rq_media_err, "Media Error",
4301			    KSTAT_DATA_UINT32);
4302			kstat_named_init(&dtp->dcd_rq_ntrdy_err,
4303			    "Device Not Ready", KSTAT_DATA_UINT32);
4304			kstat_named_init(&dtp->dcd_rq_nodev_err, " No Device",
4305			    KSTAT_DATA_UINT32);
4306			kstat_named_init(&dtp->dcd_rq_recov_err, "Recoverable",
4307			    KSTAT_DATA_UINT32);
4308			kstat_named_init(&dtp->dcd_rq_illrq_err,
4309			    "Illegal Request", KSTAT_DATA_UINT32);
4310
4311			un->un_errstats->ks_private = un;
4312			un->un_errstats->ks_update = nulldev;
4313			kstat_install(un->un_errstats);
4314
4315			(void) strncpy(&dtp->dcd_model.value.c[0],
4316			    un->un_dcd->dcd_ident->dcd_model, 16);
4317			(void) strncpy(&dtp->dcd_serial.value.c[0],
4318			    un->un_dcd->dcd_ident->dcd_drvser, 16);
4319			(void) strncpy(&dtp->dcd_revision.value.c[0],
4320			    un->un_dcd->dcd_ident->dcd_fw, 8);
4321			dtp->dcd_capacity.value.ui64 =
4322			    (uint64_t)((uint64_t)un->un_diskcapacity *
4323			    (uint64_t)un->un_lbasize);
4324		}
4325	}
4326	return (0);
4327}
4328
4329
4330/*
4331 * This has been moved from DADA layer as this does not do anything other than
4332 * retrying the command when it is busy or it does not complete
4333 */
4334int
4335dcd_poll(struct dcd_pkt *pkt)
4336{
4337	int	busy_count, rval = -1, savef;
4338	clock_t	savet;
4339	void	(*savec)();
4340
4341
4342	/*
4343	 * Save old flags
4344	 */
4345	savef = pkt->pkt_flags;
4346	savec = pkt->pkt_comp;
4347	savet = pkt->pkt_time;
4348
4349	pkt->pkt_flags |= FLAG_NOINTR;
4350
4351
4352	/*
4353	 * Set the Pkt_comp to NULL
4354	 */
4355
4356	pkt->pkt_comp = 0;
4357
4358	/*
4359	 * Set the Pkt time for the polled command
4360	 */
4361	if (pkt->pkt_time == 0) {
4362		pkt->pkt_time = DCD_POLL_TIMEOUT;
4363	}
4364
4365
4366	/* Now transport the command */
4367	for (busy_count = 0; busy_count < dcd_poll_busycnt; busy_count++) {
4368		if ((rval = dcd_transport(pkt)) == TRAN_ACCEPT) {
4369			if (pkt->pkt_reason == CMD_INCOMPLETE &&
4370			    pkt->pkt_state == 0) {
4371				delay(100);
4372			} else if (pkt->pkt_reason  == CMD_CMPLT) {
4373				rval = 0;
4374				break;
4375			}
4376		}
4377		if (rval == TRAN_BUSY)  {
4378			delay(100);
4379			continue;
4380		}
4381	}
4382
4383	pkt->pkt_flags = savef;
4384	pkt->pkt_comp = savec;
4385	pkt->pkt_time = savet;
4386	return (rval);
4387}
4388
4389
4390void
4391dcd_translate(struct dadkio_status32 *statp, struct udcd_cmd *cmdp)
4392{
4393	if (cmdp->udcd_status_reg & STATUS_ATA_BUSY)
4394		statp->status = DADKIO_STAT_NOT_READY;
4395	else if (cmdp->udcd_status_reg & STATUS_ATA_DWF)
4396		statp->status = DADKIO_STAT_HARDWARE_ERROR;
4397	else if (cmdp->udcd_status_reg & STATUS_ATA_CORR)
4398		statp->status = DADKIO_STAT_SOFT_ERROR;
4399	else if (cmdp->udcd_status_reg & STATUS_ATA_ERR) {
4400		/*
4401		 * The error register is valid only when BSY and DRQ not set
4402		 * Assumed that HBA has checked this before it gives the data
4403		 */
4404		if (cmdp->udcd_error_reg & ERR_AMNF)
4405			statp->status = DADKIO_STAT_NOT_FORMATTED;
4406		else if (cmdp->udcd_error_reg & ERR_TKONF)
4407			statp->status = DADKIO_STAT_NOT_FORMATTED;
4408		else if (cmdp->udcd_error_reg & ERR_ABORT)
4409			statp->status = DADKIO_STAT_ILLEGAL_REQUEST;
4410		else if (cmdp->udcd_error_reg & ERR_IDNF)
4411			statp->status = DADKIO_STAT_NOT_FORMATTED;
4412		else if (cmdp->udcd_error_reg & ERR_UNC)
4413			statp->status = DADKIO_STAT_BUS_ERROR;
4414		else if (cmdp->udcd_error_reg & ERR_BBK)
4415			statp->status = DADKIO_STAT_MEDIUM_ERROR;
4416	} else
4417		statp->status = DADKIO_STAT_NO_ERROR;
4418}
4419
4420static void
4421dcd_flush_cache(struct dcd_disk *un)
4422{
4423	struct dcd_pkt *pkt;
4424	int retry_count;
4425
4426
4427	if ((pkt = dcd_init_pkt(ROUTE, NULL, NULL,
4428	    (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
4429	    PKT_CONSISTENT, NULL_FUNC, NULL)) == NULL) {
4430		return;
4431	}
4432
4433	makecommand(pkt, 0, ATA_FLUSH_CACHE, 0, ADD_LBA_MODE, 0,
4434	    NO_DATA_XFER, 0);
4435
4436	/*
4437	 * Send the command. There are chances it might fail on some
4438	 * disks since it is not a mandatory command as per ata-4. Try
4439	 * 3 times if it fails. The retry count has been randomly selected.
4440	 * There is a need for retry since as per the spec FLUSH CACHE can fail
4441	 * as a result of unrecoverable error encountered during execution
4442	 * of writing data and subsequent command should continue flushing
4443	 * cache.
4444	 */
4445	for (retry_count = 0; retry_count < 3; retry_count++) {
4446		/*
4447		 * Set the packet fields.
4448		 */
4449		pkt->pkt_comp = 0;
4450		pkt->pkt_time = DCD_POLL_TIMEOUT;
4451		pkt->pkt_flags |= FLAG_FORCENOINTR;
4452		pkt->pkt_flags |= FLAG_NOINTR;
4453		if (dcd_transport(pkt) == TRAN_ACCEPT) {
4454			if (pkt->pkt_reason  == CMD_CMPLT) {
4455				break;
4456			}
4457		}
4458		/*
4459		 * Note the wait time value of 100ms is same as in the
4460		 * dcd_poll routine.
4461		 */
4462		drv_usecwait(1000000);
4463	}
4464	(void) dcd_destroy_pkt(pkt);
4465}
4466
4467static int
4468dcd_send_lb_rw_cmd(dev_info_t *devi, void *bufaddr,
4469    diskaddr_t start_block, size_t reqlength, uchar_t cmd)
4470{
4471	struct dcd_pkt *pkt;
4472	struct buf *bp;
4473	diskaddr_t real_addr = start_block;
4474	size_t buffer_size = reqlength;
4475	uchar_t command, tmp;
4476	int i, rval = 0;
4477	struct dcd_disk *un;
4478
4479	un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi));
4480	if (un == NULL)
4481		return (ENXIO);
4482
4483	bp = dcd_alloc_consistent_buf(ROUTE, (struct buf *)NULL,
4484	    buffer_size, B_READ, NULL_FUNC, NULL);
4485	if (!bp) {
4486		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4487		    "no bp for disk label\n");
4488		return (ENOMEM);
4489	}
4490
4491	pkt = dcd_init_pkt(ROUTE, (struct dcd_pkt *)NULL,
4492	    bp, (uint32_t)sizeof (struct dcd_cmd), 2, PP_LEN,
4493	    PKT_CONSISTENT, NULL_FUNC, NULL);
4494
4495	if (!pkt) {
4496		dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4497		    "no memory for disk label\n");
4498		dcd_free_consistent_buf(bp);
4499		return (ENOMEM);
4500	}
4501
4502	if (cmd == TG_READ) {
4503		bzero(bp->b_un.b_addr, buffer_size);
4504		tmp = DATA_READ;
4505	} else {
4506		bcopy((caddr_t)bufaddr, bp->b_un.b_addr, buffer_size);
4507		tmp = DATA_WRITE;
4508	}
4509
4510	mutex_enter(DCD_MUTEX);
4511	if ((un->un_dp->options & DMA_SUPPORTTED) == DMA_SUPPORTTED) {
4512		if (cmd == TG_READ) {
4513			command = ATA_READ_DMA;
4514		} else {
4515			command = ATA_WRITE_DMA;
4516		}
4517	} else {
4518		if (cmd == TG_READ) {
4519			if (un->un_dp->options & BLOCK_MODE)
4520				command = ATA_READ_MULTIPLE;
4521			else
4522				command = ATA_READ;
4523		} else {
4524			if (un->un_dp->options & BLOCK_MODE)
4525				command = ATA_READ_MULTIPLE;
4526			else
4527				command = ATA_WRITE;
4528		}
4529	}
4530	mutex_exit(DCD_MUTEX);
4531	(void) makecommand(pkt, 0, command, real_addr, ADD_LBA_MODE,
4532	    buffer_size, tmp, 0);
4533
4534	for (i = 0; i < 3; i++) {
4535		if (dcd_poll(pkt) || SCBP_C(pkt) != STATUS_GOOD ||
4536		    (pkt->pkt_state & STATE_XFERRED_DATA) == 0 ||
4537		    (pkt->pkt_resid != 0)) {
4538			DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
4539			    "Status %x, state %x, resid %lx\n",
4540			    SCBP_C(pkt), pkt->pkt_state, pkt->pkt_resid);
4541			rval = EIO;
4542		} else {
4543			break;
4544		}
4545	}
4546
4547	if (rval != 0) {
4548		dcd_destroy_pkt(pkt);
4549		dcd_free_consistent_buf(bp);
4550		return (EIO);
4551	}
4552
4553	if (cmd == TG_READ) {
4554		bcopy(bp->b_un.b_addr, bufaddr, reqlength);
4555		rval = 0;
4556	}
4557
4558	dcd_destroy_pkt(pkt);
4559	dcd_free_consistent_buf(bp);
4560	return (rval);
4561}
4562
4563static int dcd_compute_dk_capacity(struct dcd_device *devp,
4564    diskaddr_t *capacity)
4565{
4566	diskaddr_t cap;
4567	diskaddr_t no_of_lbasec;
4568
4569	cap = devp->dcd_ident->dcd_fixcyls *
4570	    devp->dcd_ident->dcd_heads *
4571	    devp->dcd_ident->dcd_sectors;
4572	no_of_lbasec = devp->dcd_ident->dcd_addrsec[1];
4573	no_of_lbasec = no_of_lbasec << 16;
4574	no_of_lbasec = no_of_lbasec | devp->dcd_ident->dcd_addrsec[0];
4575
4576	if (no_of_lbasec > cap) {
4577		cap = no_of_lbasec;
4578	}
4579
4580	if (cap != ((uint32_t)-1))
4581		*capacity = cap;
4582	else
4583		return (EINVAL);
4584	return (0);
4585}
4586
4587/*ARGSUSED5*/
4588static int
4589dcd_lb_rdwr(dev_info_t *devi, uchar_t cmd, void *bufaddr,
4590    diskaddr_t start_block, size_t reqlength, void *tg_cookie)
4591{
4592	if (cmd != TG_READ && cmd != TG_WRITE)
4593		return (EINVAL);
4594
4595	return (dcd_send_lb_rw_cmd(devi, bufaddr, start_block,
4596	    reqlength, cmd));
4597}
4598
4599static int
4600dcd_lb_getphygeom(dev_info_t *devi, cmlb_geom_t *phygeomp)
4601{
4602	struct dcd_device *devp;
4603	uint32_t no_of_lbasec, capacity, calculated_cylinders;
4604
4605	devp = ddi_get_driver_private(devi);
4606
4607	if ((devp->dcd_ident->dcd_config & ATAPI_DEVICE) == 0) {
4608		if (devp->dcd_ident->dcd_config & ATANON_REMOVABLE) {
4609			phygeomp->g_ncyl = devp->dcd_ident->dcd_fixcyls - 2;
4610			phygeomp->g_acyl = 2;
4611			phygeomp->g_nhead = devp->dcd_ident->dcd_heads;
4612			phygeomp->g_nsect = devp->dcd_ident->dcd_sectors;
4613
4614			no_of_lbasec = devp->dcd_ident->dcd_addrsec[1];
4615			no_of_lbasec = no_of_lbasec << 16;
4616			no_of_lbasec = no_of_lbasec |
4617			    devp->dcd_ident->dcd_addrsec[0];
4618			capacity = devp->dcd_ident->dcd_fixcyls *
4619			    devp->dcd_ident->dcd_heads *
4620			    devp->dcd_ident->dcd_sectors;
4621			if (no_of_lbasec > capacity) {
4622				capacity = no_of_lbasec;
4623				if (capacity > NUM_SECTORS_32G) {
4624					/*
4625					 * if the capacity is greater than 32G,
4626					 * then 255 is the sectors per track.
4627					 * This should be good until 128G disk
4628					 * capacity, which is the current ATA-4
4629					 * limitation.
4630					 */
4631					phygeomp->g_nsect = 255;
4632				}
4633
4634				/*
4635				 * If the disk capacity is >= 128GB then no. of
4636				 * addressable sectors will be set to 0xfffffff
4637				 * in the IDENTIFY info. In that case set the
4638				 *  no. of pcyl to the Max. 16bit value.
4639				 */
4640
4641				calculated_cylinders = (capacity) /
4642				    (phygeomp->g_nhead * phygeomp->g_nsect);
4643				if (calculated_cylinders >= USHRT_MAX) {
4644					phygeomp->g_ncyl = USHRT_MAX - 2;
4645				} else {
4646					phygeomp->g_ncyl =
4647					    calculated_cylinders - 2;
4648				}
4649			}
4650
4651			phygeomp->g_capacity = capacity;
4652			phygeomp->g_intrlv = 0;
4653			phygeomp->g_rpm = 5400;
4654			phygeomp->g_secsize = devp->dcd_ident->dcd_secsiz;
4655
4656			return (0);
4657		} else
4658			return (ENOTSUP);
4659	} else {
4660		return (EINVAL);
4661	}
4662}
4663
4664
4665/*ARGSUSED3*/
4666static int
4667dcd_lb_getinfo(dev_info_t *devi, int cmd,  void *arg, void *tg_cookie)
4668{
4669	struct dcd_disk *un;
4670
4671	un = ddi_get_soft_state(dcd_state, ddi_get_instance(devi));
4672
4673	if (un == NULL)
4674		return (ENXIO);
4675
4676	switch (cmd) {
4677	case TG_GETPHYGEOM:
4678		return (dcd_lb_getphygeom(devi, (cmlb_geom_t *)arg));
4679
4680	case TG_GETVIRTGEOM:
4681		return (-1);
4682
4683	case TG_GETCAPACITY:
4684	case TG_GETBLOCKSIZE:
4685		mutex_enter(DCD_MUTEX);
4686		if (un->un_diskcapacity <= 0) {
4687			mutex_exit(DCD_MUTEX);
4688			dcd_log(DCD_DEVINFO, dcd_label, CE_WARN,
4689			    "invalid disk capacity\n");
4690			return (EIO);
4691		}
4692		if (cmd == TG_GETCAPACITY)
4693			*(diskaddr_t *)arg = un->un_diskcapacity;
4694		else
4695			*(uint32_t *)arg = DEV_BSIZE;
4696
4697		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG, "capacity %x\n",
4698		    un->un_diskcapacity);
4699		mutex_exit(DCD_MUTEX);
4700		return (0);
4701
4702	case TG_GETATTR:
4703		mutex_enter(DCD_MUTEX);
4704		*(tg_attribute_t *)arg = un->un_tgattribute;
4705		DAD_DEBUG2(DCD_DEVINFO, dcd_label, DCD_DEBUG,
4706		    "media_is_writable %x\n",
4707		    un->un_tgattribute.media_is_writable);
4708		mutex_exit(DCD_MUTEX);
4709		return (0);
4710	default:
4711		return (ENOTTY);
4712	}
4713}
4714