amr.c revision 7656:2621e50fdf4a
1/*
2 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
3 * Use is subject to license terms.
4 */
5/*
6 * Copyright (c) 1999,2000 Michael Smith
7 * Copyright (c) 2000 BSDi
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31/*
32 * Copyright (c) 2002 Eric Moore
33 * Copyright (c) 2002 LSI Logic Corporation
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 *    notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 *    notice, this list of conditions and the following disclaimer in the
43 *    documentation and/or other materials provided with the distribution.
44 * 3. The party using or redistributing the source code and binary forms
45 *    agrees to the disclaimer below and the terms and conditions set forth
46 *    herein.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61#include <sys/int_types.h>
62#include <sys/scsi/scsi.h>
63#include <sys/dkbad.h>
64#include <sys/dklabel.h>
65#include <sys/dkio.h>
66#include <sys/cdio.h>
67#include <sys/mhd.h>
68#include <sys/vtoc.h>
69#include <sys/dktp/fdisk.h>
70#include <sys/scsi/targets/sddef.h>
71#include <sys/debug.h>
72#include <sys/pci.h>
73#include <sys/ksynch.h>
74#include <sys/ddi.h>
75#include <sys/sunddi.h>
76#include <sys/modctl.h>
77#include <sys/byteorder.h>
78
79#include "amrreg.h"
80#include "amrvar.h"
81
82/* dynamic debug symbol */
83int	amr_debug_var = 0;
84
85#define	AMR_DELAY(cond, count, done_flag) { \
86		int local_counter = 0; \
87		done_flag = 1; \
88		while (!(cond)) { \
89			delay(drv_usectohz(100)); \
90			if ((local_counter) > count) { \
91				done_flag = 0; \
92				break; \
93			} \
94			(local_counter)++; \
95		} \
96	}
97
98#define	AMR_BUSYWAIT(cond, count, done_flag) { \
99		int local_counter = 0; \
100		done_flag = 1; \
101		while (!(cond)) { \
102			drv_usecwait(100); \
103			if ((local_counter) > count) { \
104				done_flag = 0; \
105				break; \
106			} \
107			(local_counter)++; \
108		} \
109	}
110
111/*
112 * driver interfaces
113 */
114char _depends_on[] = "misc/scsi";
115
116static uint_t amr_intr(caddr_t arg);
117static void amr_done(struct amr_softs *softs);
118
119static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
120			void *arg, void **result);
121static int amr_attach(dev_info_t *, ddi_attach_cmd_t);
122static int amr_detach(dev_info_t *, ddi_detach_cmd_t);
123
124static int amr_setup_mbox(struct amr_softs *softs);
125static int amr_setup_sg(struct amr_softs *softs);
126
127/*
128 * Command wrappers
129 */
130static int amr_query_controller(struct amr_softs *softs);
131static void *amr_enquiry(struct amr_softs *softs, size_t bufsize,
132			uint8_t cmd, uint8_t cmdsub, uint8_t cmdqual);
133static int amr_flush(struct amr_softs *softs);
134
135/*
136 * Command processing.
137 */
138static void amr_rw_command(struct amr_softs *softs,
139			struct scsi_pkt *pkt, int lun);
140static void amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp,
141			unsigned int capacity);
142static void amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key);
143static int amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size);
144static void amr_enquiry_unmapcmd(struct amr_command *ac);
145static int amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg);
146static void amr_unmapcmd(struct amr_command *ac);
147
148/*
149 * Status monitoring
150 */
151static void amr_periodic(void *data);
152
153/*
154 * Interface-specific shims
155 */
156static int amr_poll_command(struct amr_command *ac);
157static void amr_start_waiting_queue(void *softp);
158static void amr_call_pkt_comp(struct amr_command *head);
159
160/*
161 * SCSI interface
162 */
163static int amr_setup_tran(dev_info_t  *dip, struct amr_softs *softp);
164
165/*
166 * Function prototypes
167 *
168 * SCSA functions exported by means of the transport table
169 */
170static int amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
171	scsi_hba_tran_t *tran, struct scsi_device *sd);
172static int amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
173static int amr_tran_reset(struct scsi_address *ap, int level);
174static int amr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
175static int amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
176    int whom);
177static struct scsi_pkt *amr_tran_init_pkt(struct scsi_address *ap,
178    struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
179    int tgtlen, int flags, int (*callback)(), caddr_t arg);
180static void amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
181static void amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
182static void amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
183
184static ddi_dma_attr_t buffer_dma_attr = {
185		DMA_ATTR_V0,	/* version of this structure */
186		0,		/* lowest usable address */
187		0xffffffffull,	/* highest usable address */
188		0x00ffffffull,	/* maximum DMAable byte count */
189		4,		/* alignment */
190		1,		/* burst sizes */
191		1,		/* minimum transfer */
192		0xffffffffull,	/* maximum transfer */
193		0xffffffffull,	/* maximum segment length */
194		AMR_NSEG,	/* maximum number of segments */
195		AMR_BLKSIZE,	/* granularity */
196		0,		/* flags (reserved) */
197};
198
199static ddi_dma_attr_t addr_dma_attr = {
200		DMA_ATTR_V0,	/* version of this structure */
201		0,		/* lowest usable address */
202		0xffffffffull,	/* highest usable address */
203		0x7fffffff,	/* maximum DMAable byte count */
204		4,		/* alignment */
205		1,		/* burst sizes */
206		1,		/* minimum transfer */
207		0xffffffffull,	/* maximum transfer */
208		0xffffffffull,	/* maximum segment length */
209		1,		/* maximum number of segments */
210		1,		/* granularity */
211		0,		/* flags (reserved) */
212};
213
214
215static struct dev_ops   amr_ops = {
216	DEVO_REV,	/* devo_rev, */
217	0,		/* refcnt  */
218	amr_info,	/* info */
219	nulldev,	/* identify */
220	nulldev,	/* probe */
221	amr_attach,	/* attach */
222	amr_detach,	/* detach */
223	nodev,		/* reset */
224	NULL,		/* driver operations */
225	(struct bus_ops *)0,	/* bus operations */
226	0,		/* power */
227	ddi_quiesce_not_supported,	/* devo_quiesce */
228};
229
230
231extern struct mod_ops mod_driverops;
232static struct modldrv modldrv = {
233	&mod_driverops,		/* Type of module. driver here */
234	"AMR Driver",		/* Name of the module. */
235	&amr_ops,		/* Driver ops vector */
236};
237
238static struct modlinkage modlinkage = {
239	MODREV_1,
240	&modldrv,
241	NULL
242};
243
244/* DMA access attributes */
245static ddi_device_acc_attr_t accattr = {
246	DDI_DEVICE_ATTR_V0,
247	DDI_NEVERSWAP_ACC,
248	DDI_STRICTORDER_ACC
249};
250
251static struct amr_softs  *amr_softstatep;
252
253
254int
255_init(void)
256{
257	int		error;
258
259	error = ddi_soft_state_init((void *)&amr_softstatep,
260	    sizeof (struct amr_softs), 0);
261
262	if (error != 0)
263		goto error_out;
264
265	if ((error = scsi_hba_init(&modlinkage)) != 0) {
266		ddi_soft_state_fini((void*)&amr_softstatep);
267		goto error_out;
268	}
269
270	error = mod_install(&modlinkage);
271	if (error != 0) {
272		scsi_hba_fini(&modlinkage);
273		ddi_soft_state_fini((void*)&amr_softstatep);
274		goto error_out;
275	}
276
277	return (error);
278
279error_out:
280	cmn_err(CE_NOTE, "_init failed");
281	return (error);
282}
283
284int
285_info(struct modinfo *modinfop)
286{
287	return (mod_info(&modlinkage, modinfop));
288}
289
290int
291_fini(void)
292{
293	int	error;
294
295	if ((error = mod_remove(&modlinkage)) != 0) {
296		return (error);
297	}
298
299	scsi_hba_fini(&modlinkage);
300
301	ddi_soft_state_fini((void*)&amr_softstatep);
302	return (error);
303}
304
305
306static int
307amr_attach(dev_info_t *dev, ddi_attach_cmd_t cmd)
308{
309	struct amr_softs	*softs;
310	int			error;
311	uint32_t		command, i;
312	int			instance;
313	caddr_t			cfgaddr;
314
315	instance = ddi_get_instance(dev);
316
317	switch (cmd) {
318		case DDI_ATTACH:
319			break;
320
321		case DDI_RESUME:
322			return (DDI_FAILURE);
323
324		default:
325			return (DDI_FAILURE);
326	}
327
328	/*
329	 * Initialize softs.
330	 */
331	if (ddi_soft_state_zalloc(amr_softstatep, instance) != DDI_SUCCESS)
332		return (DDI_FAILURE);
333	softs = ddi_get_soft_state(amr_softstatep, instance);
334	softs->state |= AMR_STATE_SOFT_STATE_SETUP;
335
336	softs->dev_info_p = dev;
337
338	AMRDB_PRINT((CE_NOTE, "softs: %p; busy_slot addr: %p",
339	    (void *)softs, (void *)&(softs->amr_busyslots)));
340
341	if (pci_config_setup(dev, &(softs->pciconfig_handle))
342	    != DDI_SUCCESS) {
343		goto error_out;
344	}
345	softs->state |= AMR_STATE_PCI_CONFIG_SETUP;
346
347	error = ddi_regs_map_setup(dev, 1, &cfgaddr, 0, 0,
348	    &accattr, &(softs->regsmap_handle));
349	if (error != DDI_SUCCESS) {
350		goto error_out;
351	}
352	softs->state |= AMR_STATE_PCI_MEM_MAPPED;
353
354	/*
355	 * Determine board type.
356	 */
357	command = pci_config_get16(softs->pciconfig_handle, PCI_CONF_COMM);
358
359	/*
360	 * Make sure we are going to be able to talk to this board.
361	 */
362	if ((command & PCI_COMM_MAE) == 0) {
363		AMRDB_PRINT((CE_NOTE,  "memory window not available"));
364		goto error_out;
365	}
366
367	/* force the busmaster enable bit on */
368	if (!(command & PCI_COMM_ME)) {
369		command |= PCI_COMM_ME;
370		pci_config_put16(softs->pciconfig_handle,
371		    PCI_CONF_COMM, command);
372		command = pci_config_get16(softs->pciconfig_handle,
373		    PCI_CONF_COMM);
374		if (!(command & PCI_COMM_ME))
375			goto error_out;
376	}
377
378	/*
379	 * Allocate and connect our interrupt.
380	 */
381	if (ddi_intr_hilevel(dev, 0) != 0) {
382		AMRDB_PRINT((CE_NOTE,
383		    "High level interrupt is not supported!"));
384		goto error_out;
385	}
386
387	if (ddi_get_iblock_cookie(dev, 0,  &softs->iblock_cookiep)
388	    != DDI_SUCCESS) {
389		goto error_out;
390	}
391
392	mutex_init(&softs->cmd_mutex, NULL, MUTEX_DRIVER,
393	    softs->iblock_cookiep); /* should be used in interrupt */
394	mutex_init(&softs->queue_mutex, NULL, MUTEX_DRIVER,
395	    softs->iblock_cookiep); /* should be used in interrupt */
396	mutex_init(&softs->periodic_mutex, NULL, MUTEX_DRIVER,
397	    softs->iblock_cookiep); /* should be used in interrupt */
398	/* sychronize waits for the busy slots via this cv */
399	cv_init(&softs->cmd_cv, NULL, CV_DRIVER, NULL);
400	softs->state |= AMR_STATE_KMUTEX_INITED;
401
402	/*
403	 * Do bus-independent initialisation, bring controller online.
404	 */
405	if (amr_setup_mbox(softs) != DDI_SUCCESS)
406		goto error_out;
407	softs->state |= AMR_STATE_MAILBOX_SETUP;
408
409	if (amr_setup_sg(softs) != DDI_SUCCESS)
410		goto error_out;
411
412	softs->state |= AMR_STATE_SG_TABLES_SETUP;
413
414	if (amr_query_controller(softs) != DDI_SUCCESS)
415		goto error_out;
416
417	/*
418	 * A taskq is created for dispatching the waiting queue processing
419	 * thread. The threads number equals to the logic drive number and
420	 * the thread number should be 1 if there is no logic driver is
421	 * configured for this instance.
422	 */
423	if ((softs->amr_taskq = ddi_taskq_create(dev, "amr_taskq",
424	    MAX(softs->amr_nlogdrives, 1), TASKQ_DEFAULTPRI, 0)) == NULL) {
425		goto error_out;
426	}
427	softs->state |= AMR_STATE_TASKQ_SETUP;
428
429	if (ddi_add_intr(dev, 0, &softs->iblock_cookiep, NULL,
430	    amr_intr, (caddr_t)softs) != DDI_SUCCESS) {
431		goto error_out;
432	}
433	softs->state |= AMR_STATE_INTR_SETUP;
434
435	/* set up the tran interface */
436	if (amr_setup_tran(softs->dev_info_p, softs) != DDI_SUCCESS) {
437		AMRDB_PRINT((CE_NOTE, "setup tran failed"));
438		goto error_out;
439	}
440	softs->state |= AMR_STATE_TRAN_SETUP;
441
442	/* schedule a thread for periodic check */
443	mutex_enter(&softs->periodic_mutex);
444	softs->timeout_t = timeout(amr_periodic, (void *)softs,
445	    drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
446	softs->state |= AMR_STATE_TIMEOUT_ENABLED;
447	mutex_exit(&softs->periodic_mutex);
448
449	/* print firmware information in verbose mode */
450	cmn_err(CE_CONT, "?MegaRaid %s %s attached.",
451	    softs->amr_product_info.pi_product_name,
452	    softs->amr_product_info.pi_firmware_ver);
453
454	/* clear any interrupts */
455	AMR_QCLEAR_INTR(softs);
456	return (DDI_SUCCESS);
457
458error_out:
459	if (softs->state & AMR_STATE_INTR_SETUP) {
460		ddi_remove_intr(dev, 0, softs->iblock_cookiep);
461	}
462	if (softs->state & AMR_STATE_TASKQ_SETUP) {
463		ddi_taskq_destroy(softs->amr_taskq);
464	}
465	if (softs->state & AMR_STATE_SG_TABLES_SETUP) {
466		for (i = 0; i < softs->sg_max_count; i++) {
467			(void) ddi_dma_unbind_handle(
468			    softs->sg_items[i].sg_handle);
469			(void) ddi_dma_mem_free(
470			    &((softs->sg_items[i]).sg_acc_handle));
471			(void) ddi_dma_free_handle(
472			    &(softs->sg_items[i].sg_handle));
473		}
474	}
475	if (softs->state & AMR_STATE_MAILBOX_SETUP) {
476		(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
477		(void) ddi_dma_mem_free(&softs->mbox_acc_handle);
478		(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
479	}
480	if (softs->state & AMR_STATE_KMUTEX_INITED) {
481		mutex_destroy(&softs->queue_mutex);
482		mutex_destroy(&softs->cmd_mutex);
483		mutex_destroy(&softs->periodic_mutex);
484		cv_destroy(&softs->cmd_cv);
485	}
486	if (softs->state & AMR_STATE_PCI_MEM_MAPPED)
487		ddi_regs_map_free(&softs->regsmap_handle);
488	if (softs->state & AMR_STATE_PCI_CONFIG_SETUP)
489		pci_config_teardown(&softs->pciconfig_handle);
490	if (softs->state & AMR_STATE_SOFT_STATE_SETUP)
491		ddi_soft_state_free(amr_softstatep, instance);
492	return (DDI_FAILURE);
493}
494
495/*
496 * Bring the controller down to a dormant state and detach all child devices.
497 * This function is called during detach, system shutdown.
498 *
499 * Note that we can assume that the bufq on the controller is empty, as we won't
500 * allow shutdown if any device is open.
501 */
502/*ARGSUSED*/
503static int amr_detach(dev_info_t *dev, ddi_detach_cmd_t cmd)
504{
505	struct amr_softs	*softs;
506	int			instance;
507	uint32_t		i, done_flag;
508
509	instance = ddi_get_instance(dev);
510	softs = ddi_get_soft_state(amr_softstatep, instance);
511
512	/* flush the controllor */
513	if (amr_flush(softs) != 0) {
514		AMRDB_PRINT((CE_NOTE, "device shutdown failed"));
515		return (EIO);
516	}
517
518	/* release the amr timer */
519	mutex_enter(&softs->periodic_mutex);
520	softs->state &= ~AMR_STATE_TIMEOUT_ENABLED;
521	if (softs->timeout_t) {
522		(void) untimeout(softs->timeout_t);
523		softs->timeout_t = 0;
524	}
525	mutex_exit(&softs->periodic_mutex);
526
527	for (i = 0; i < softs->sg_max_count; i++) {
528		(void) ddi_dma_unbind_handle(
529		    softs->sg_items[i].sg_handle);
530		(void) ddi_dma_mem_free(
531		    &((softs->sg_items[i]).sg_acc_handle));
532		(void) ddi_dma_free_handle(
533		    &(softs->sg_items[i].sg_handle));
534	}
535
536	(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
537	(void) ddi_dma_mem_free(&softs->mbox_acc_handle);
538	(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
539
540	/* disconnect the interrupt handler */
541	ddi_remove_intr(softs->dev_info_p,  0, softs->iblock_cookiep);
542
543	/* wait for the completion of current in-progress interruptes */
544	AMR_DELAY((softs->amr_interrupts_counter == 0), 1000, done_flag);
545	if (!done_flag) {
546		cmn_err(CE_WARN, "Suspicious interrupts in-progress.");
547	}
548
549	ddi_taskq_destroy(softs->amr_taskq);
550
551	(void) scsi_hba_detach(dev);
552	scsi_hba_tran_free(softs->hba_tran);
553	ddi_regs_map_free(&softs->regsmap_handle);
554	pci_config_teardown(&softs->pciconfig_handle);
555
556	mutex_destroy(&softs->queue_mutex);
557	mutex_destroy(&softs->cmd_mutex);
558	mutex_destroy(&softs->periodic_mutex);
559	cv_destroy(&softs->cmd_cv);
560
561	/* print firmware information in verbose mode */
562	cmn_err(CE_NOTE, "?MegaRaid %s %s detached.",
563	    softs->amr_product_info.pi_product_name,
564	    softs->amr_product_info.pi_firmware_ver);
565
566	ddi_soft_state_free(amr_softstatep, instance);
567
568	return (DDI_SUCCESS);
569}
570
571
572/*ARGSUSED*/
573static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
574	void *arg, void **result)
575{
576	struct amr_softs	*softs;
577	int			instance;
578
579	instance = ddi_get_instance(dip);
580
581	switch (infocmd) {
582		case DDI_INFO_DEVT2DEVINFO:
583			softs = ddi_get_soft_state(amr_softstatep, instance);
584			if (softs != NULL) {
585				*result = softs->dev_info_p;
586				return (DDI_SUCCESS);
587			} else {
588				*result = NULL;
589				return (DDI_FAILURE);
590			}
591		case DDI_INFO_DEVT2INSTANCE:
592			*(int *)result = instance;
593			break;
594		default:
595			break;
596	}
597	return (DDI_SUCCESS);
598}
599
600/*
601 * Take an interrupt, or be poked by other code to look for interrupt-worthy
602 * status.
603 */
604static uint_t
605amr_intr(caddr_t arg)
606{
607	struct amr_softs *softs = (struct amr_softs *)arg;
608
609	softs->amr_interrupts_counter++;
610
611	if (AMR_QGET_ODB(softs) != AMR_QODB_READY) {
612		softs->amr_interrupts_counter--;
613		return (DDI_INTR_UNCLAIMED);
614	}
615
616	/* collect finished commands, queue anything waiting */
617	amr_done(softs);
618
619	softs->amr_interrupts_counter--;
620
621	return (DDI_INTR_CLAIMED);
622
623}
624
625/*
626 * Setup the amr mailbox
627 */
628static int
629amr_setup_mbox(struct amr_softs *softs)
630{
631	uint32_t	move;
632	size_t		mbox_len;
633
634	if (ddi_dma_alloc_handle(
635	    softs->dev_info_p,
636	    &addr_dma_attr,
637	    DDI_DMA_SLEEP,
638	    NULL,
639	    &softs->mbox_dma_handle) != DDI_SUCCESS) {
640		AMRDB_PRINT((CE_NOTE, "Cannot alloc dma handle for mailbox"));
641		goto error_out;
642	}
643
644	if (ddi_dma_mem_alloc(
645	    softs->mbox_dma_handle,
646	    sizeof (struct amr_mailbox) + 16,
647	    &accattr,
648	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
649	    DDI_DMA_SLEEP,
650	    NULL,
651	    (caddr_t *)(&softs->mbox),
652	    &mbox_len,
653	    &softs->mbox_acc_handle) !=
654	    DDI_SUCCESS) {
655
656		AMRDB_PRINT((CE_WARN, "Cannot alloc dma memory for mailbox"));
657		goto error_out;
658	}
659
660	if (ddi_dma_addr_bind_handle(
661	    softs->mbox_dma_handle,
662	    NULL,
663	    (caddr_t)softs->mbox,
664	    mbox_len,
665	    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
666	    DDI_DMA_SLEEP,
667	    NULL,
668	    &softs->mbox_dma_cookie,
669	    &softs->mbox_dma_cookien) != DDI_DMA_MAPPED) {
670
671		AMRDB_PRINT((CE_NOTE, "Cannot bind dma memory for mailbox"));
672		goto error_out;
673	}
674
675	if (softs->mbox_dma_cookien != 1)
676		goto error_out;
677
678	/* The phy address of mailbox must be aligned on a 16-byte boundary */
679	move = 16 - (((uint32_t)softs->mbox_dma_cookie.dmac_address)&0xf);
680	softs->mbox_phyaddr =
681	    (softs->mbox_dma_cookie.dmac_address + move);
682
683	softs->mailbox =
684	    (struct amr_mailbox *)(((uintptr_t)softs->mbox) + move);
685
686	AMRDB_PRINT((CE_NOTE, "phraddy=%x, mailbox=%p, softs->mbox=%p, move=%x",
687	    softs->mbox_phyaddr, (void *)softs->mailbox,
688	    softs->mbox, move));
689
690	return (DDI_SUCCESS);
691
692error_out:
693	if (softs->mbox_dma_cookien)
694		(void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
695	if (softs->mbox_acc_handle) {
696		(void) ddi_dma_mem_free(&(softs->mbox_acc_handle));
697		softs->mbox_acc_handle = NULL;
698	}
699	if (softs->mbox_dma_handle) {
700		(void) ddi_dma_free_handle(&softs->mbox_dma_handle);
701		softs->mbox_dma_handle = NULL;
702	}
703
704	return (DDI_FAILURE);
705}
706
707/*
708 * Perform a periodic check of the controller status
709 */
710static void
711amr_periodic(void *data)
712{
713	uint32_t		i;
714	struct amr_softs	*softs = (struct amr_softs *)data;
715	struct scsi_pkt 	*pkt;
716	register struct amr_command	*ac;
717
718	for (i = 0; i < softs->sg_max_count; i++) {
719		if (softs->busycmd[i] == NULL)
720			continue;
721
722		mutex_enter(&softs->cmd_mutex);
723
724		if (softs->busycmd[i] == NULL) {
725			mutex_exit(&softs->cmd_mutex);
726			continue;
727		}
728
729		pkt = softs->busycmd[i]->pkt;
730
731		if ((pkt->pkt_time != 0) &&
732		    (ddi_get_time() -
733		    softs->busycmd[i]->ac_timestamp >
734		    pkt->pkt_time)) {
735
736			cmn_err(CE_WARN,
737			    "!timed out packet detected,\
738				sc = %p, pkt = %p, index = %d, ac = %p",
739			    (void *)softs,
740			    (void *)pkt,
741			    i,
742			    (void *)softs->busycmd[i]);
743
744			ac = softs->busycmd[i];
745			ac->ac_next = NULL;
746
747			/* pull command from the busy index */
748			softs->busycmd[i] = NULL;
749			if (softs->amr_busyslots > 0)
750				softs->amr_busyslots--;
751			if (softs->amr_busyslots == 0)
752				cv_broadcast(&softs->cmd_cv);
753
754			mutex_exit(&softs->cmd_mutex);
755
756			pkt = ac->pkt;
757			*pkt->pkt_scbp = 0;
758			pkt->pkt_statistics |= STAT_TIMEOUT;
759			pkt->pkt_reason = CMD_TIMEOUT;
760			if (!(pkt->pkt_flags &
761			    FLAG_NOINTR) && pkt->pkt_comp) {
762				/* call pkt callback */
763				(*pkt->pkt_comp)(pkt);
764			}
765
766		} else {
767			mutex_exit(&softs->cmd_mutex);
768		}
769	}
770
771	/* restart the amr timer */
772	mutex_enter(&softs->periodic_mutex);
773	if (softs->state & AMR_STATE_TIMEOUT_ENABLED)
774		softs->timeout_t = timeout(amr_periodic, (void *)softs,
775		    drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
776	mutex_exit(&softs->periodic_mutex);
777}
778
779/*
780 * Interrogate the controller for the operational parameters we require.
781 */
782static int
783amr_query_controller(struct amr_softs *softs)
784{
785	struct amr_enquiry3	*aex;
786	struct amr_prodinfo	*ap;
787	struct amr_enquiry	*ae;
788	uint32_t		ldrv;
789	int			instance;
790
791	/*
792	 * If we haven't found the real limit yet, let us have a couple of
793	 * commands in order to be able to probe.
794	 */
795	if (softs->maxio == 0)
796		softs->maxio = 2;
797
798	instance = ddi_get_instance(softs->dev_info_p);
799
800	/*
801	 * Try to issue an ENQUIRY3 command
802	 */
803	if ((aex = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE, AMR_CMD_CONFIG,
804	    AMR_CONFIG_ENQ3, AMR_CONFIG_ENQ3_SOLICITED_FULL)) != NULL) {
805
806		AMRDB_PRINT((CE_NOTE, "First enquiry"));
807
808		for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
809			softs->logic_drive[ldrv].al_size =
810			    aex->ae_drivesize[ldrv];
811			softs->logic_drive[ldrv].al_state =
812			    aex->ae_drivestate[ldrv];
813			softs->logic_drive[ldrv].al_properties =
814			    aex->ae_driveprop[ldrv];
815			AMRDB_PRINT((CE_NOTE,
816			    "  drive %d: size: %d state %x properties %x\n",
817			    ldrv,
818			    softs->logic_drive[ldrv].al_size,
819			    softs->logic_drive[ldrv].al_state,
820			    softs->logic_drive[ldrv].al_properties));
821
822			if (softs->logic_drive[ldrv].al_state ==
823			    AMR_LDRV_OFFLINE)
824				cmn_err(CE_NOTE,
825				    "!instance %d log-drive %d is offline",
826				    instance, ldrv);
827			else
828				softs->amr_nlogdrives++;
829		}
830		kmem_free(aex, AMR_ENQ_BUFFER_SIZE);
831
832		if ((ap = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE,
833		    AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0)) == NULL) {
834			AMRDB_PRINT((CE_NOTE,
835			    "Cannot obtain product data from controller"));
836			return (EIO);
837		}
838
839		softs->maxdrives = AMR_40LD_MAXDRIVES;
840		softs->maxchan = ap->ap_nschan;
841		softs->maxio = ap->ap_maxio;
842
843		bcopy(ap->ap_firmware, softs->amr_product_info.pi_firmware_ver,
844		    AMR_FIRMWARE_VER_SIZE);
845		softs->amr_product_info.
846		    pi_firmware_ver[AMR_FIRMWARE_VER_SIZE] = 0;
847
848		bcopy(ap->ap_product, softs->amr_product_info.pi_product_name,
849		    AMR_PRODUCT_INFO_SIZE);
850		softs->amr_product_info.
851		    pi_product_name[AMR_PRODUCT_INFO_SIZE] = 0;
852
853		kmem_free(ap, AMR_ENQ_BUFFER_SIZE);
854		AMRDB_PRINT((CE_NOTE, "maxio=%d", softs->maxio));
855	} else {
856
857		AMRDB_PRINT((CE_NOTE, "First enquiry failed, \
858				so try another way"));
859
860		/* failed, try the 8LD ENQUIRY commands */
861		if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
862		    AMR_ENQ_BUFFER_SIZE, AMR_CMD_EXT_ENQUIRY2, 0, 0))
863		    == NULL) {
864
865			if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
866			    AMR_ENQ_BUFFER_SIZE, AMR_CMD_ENQUIRY, 0, 0))
867			    == NULL) {
868				AMRDB_PRINT((CE_NOTE,
869				    "Cannot obtain configuration data"));
870				return (EIO);
871			}
872			ae->ae_signature = 0;
873		}
874
875		/*
876		 * Fetch current state of logical drives.
877		 */
878		for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
879			softs->logic_drive[ldrv].al_size =
880			    ae->ae_ldrv.al_size[ldrv];
881			softs->logic_drive[ldrv].al_state =
882			    ae->ae_ldrv.al_state[ldrv];
883			softs->logic_drive[ldrv].al_properties =
884			    ae->ae_ldrv.al_properties[ldrv];
885			AMRDB_PRINT((CE_NOTE,
886			    " ********* drive %d: %d state %x properties %x",
887			    ldrv,
888			    softs->logic_drive[ldrv].al_size,
889			    softs->logic_drive[ldrv].al_state,
890			    softs->logic_drive[ldrv].al_properties));
891
892			if (softs->logic_drive[ldrv].al_state ==
893			    AMR_LDRV_OFFLINE)
894				cmn_err(CE_NOTE,
895				    "!instance %d log-drive %d is offline",
896				    instance, ldrv);
897			else
898				softs->amr_nlogdrives++;
899		}
900
901		softs->maxdrives = AMR_8LD_MAXDRIVES;
902		softs->maxchan = ae->ae_adapter.aa_channels;
903		softs->maxio = ae->ae_adapter.aa_maxio;
904		kmem_free(ae, AMR_ENQ_BUFFER_SIZE);
905	}
906
907	/*
908	 * Mark remaining drives as unused.
909	 */
910	for (; ldrv < AMR_MAXLD; ldrv++)
911		softs->logic_drive[ldrv].al_state = AMR_LDRV_OFFLINE;
912
913	/*
914	 * Cap the maximum number of outstanding I/Os.  AMI's driver
915	 * doesn't trust the controller's reported value, and lockups have
916	 * been seen when we do.
917	 */
918	softs->maxio = MIN(softs->maxio, AMR_LIMITCMD);
919
920	return (DDI_SUCCESS);
921}
922
923/*
924 * Run a generic enquiry-style command.
925 */
926static void *
927amr_enquiry(struct amr_softs *softs, size_t bufsize, uint8_t cmd,
928				uint8_t cmdsub, uint8_t cmdqual)
929{
930	struct amr_command	ac;
931	void			*result;
932
933	result = NULL;
934
935	bzero(&ac, sizeof (struct amr_command));
936	ac.ac_softs = softs;
937
938	/* set command flags */
939	ac.ac_flags |= AMR_CMD_DATAOUT;
940
941	/* build the command proper */
942	ac.mailbox.mb_command	= cmd;
943	ac.mailbox.mb_cmdsub	= cmdsub;
944	ac.mailbox.mb_cmdqual	= cmdqual;
945
946	if (amr_enquiry_mapcmd(&ac, bufsize) != DDI_SUCCESS)
947		return (NULL);
948
949	if (amr_poll_command(&ac) || ac.ac_status != 0) {
950		AMRDB_PRINT((CE_NOTE, "can not poll command, goto out"));
951		amr_enquiry_unmapcmd(&ac);
952		return (NULL);
953	}
954
955	/* allocate the response structure */
956	result = kmem_zalloc(bufsize, KM_SLEEP);
957
958	bcopy(ac.ac_data, result, bufsize);
959
960	amr_enquiry_unmapcmd(&ac);
961	return (result);
962}
963
964/*
965 * Flush the controller's internal cache, return status.
966 */
967static int
968amr_flush(struct amr_softs *softs)
969{
970	struct amr_command	ac;
971	int			error = 0;
972
973	bzero(&ac, sizeof (struct amr_command));
974	ac.ac_softs = softs;
975
976	ac.ac_flags |= AMR_CMD_DATAOUT;
977
978	/* build the command proper */
979	ac.mailbox.mb_command = AMR_CMD_FLUSH;
980
981	/* have to poll, as the system may be going down or otherwise damaged */
982	if (error = amr_poll_command(&ac)) {
983		AMRDB_PRINT((CE_NOTE, "can not poll this cmd"));
984		return (error);
985	}
986
987	return (error);
988}
989
990/*
991 * Take a command, submit it to the controller and wait for it to return.
992 * Returns nonzero on error.  Can be safely called with interrupts enabled.
993 */
994static int
995amr_poll_command(struct amr_command *ac)
996{
997	struct amr_softs	*softs = ac->ac_softs;
998	volatile uint32_t	done_flag;
999
1000	AMRDB_PRINT((CE_NOTE, "Amr_Poll bcopy(%p, %p, %d)",
1001	    (void *)&ac->mailbox,
1002	    (void *)softs->mailbox,
1003	    (uint32_t)AMR_MBOX_CMDSIZE));
1004
1005	mutex_enter(&softs->cmd_mutex);
1006
1007	while (softs->amr_busyslots != 0)
1008		cv_wait(&softs->cmd_cv, &softs->cmd_mutex);
1009
1010	/*
1011	 * For read/write commands, the scatter/gather table should be
1012	 * filled, and the last entry in scatter/gather table will be used.
1013	 */
1014	if ((ac->mailbox.mb_command == AMR_CMD_LREAD) ||
1015	    (ac->mailbox.mb_command == AMR_CMD_LWRITE)) {
1016		bcopy(ac->sgtable,
1017		    softs->sg_items[softs->sg_max_count - 1].sg_table,
1018		    sizeof (struct amr_sgentry) * AMR_NSEG);
1019
1020		(void) ddi_dma_sync(
1021		    softs->sg_items[softs->sg_max_count - 1].sg_handle,
1022		    0, 0, DDI_DMA_SYNC_FORDEV);
1023
1024		ac->mailbox.mb_physaddr =
1025		    softs->sg_items[softs->sg_max_count - 1].sg_phyaddr;
1026	}
1027
1028	bcopy(&ac->mailbox, (void *)softs->mailbox, AMR_MBOX_CMDSIZE);
1029
1030	/* sync the dma memory */
1031	(void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
1032
1033	/* clear the poll/ack fields in the mailbox */
1034	softs->mailbox->mb_ident = AMR_POLL_COMMAND_ID;
1035	softs->mailbox->mb_nstatus = AMR_POLL_DEFAULT_NSTATUS;
1036	softs->mailbox->mb_status = AMR_POLL_DEFAULT_STATUS;
1037	softs->mailbox->mb_poll = 0;
1038	softs->mailbox->mb_ack = 0;
1039	softs->mailbox->mb_busy = 1;
1040
1041	AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
1042
1043	/* sync the dma memory */
1044	(void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1045
1046	AMR_DELAY((softs->mailbox->mb_nstatus != AMR_POLL_DEFAULT_NSTATUS),
1047	    1000, done_flag);
1048	if (!done_flag) {
1049		mutex_exit(&softs->cmd_mutex);
1050		return (1);
1051	}
1052
1053	ac->ac_status = softs->mailbox->mb_status;
1054
1055	AMR_DELAY((softs->mailbox->mb_poll == AMR_POLL_ACK), 1000, done_flag);
1056	if (!done_flag) {
1057		mutex_exit(&softs->cmd_mutex);
1058		return (1);
1059	}
1060
1061	softs->mailbox->mb_poll = 0;
1062	softs->mailbox->mb_ack = AMR_POLL_ACK;
1063
1064	/* acknowledge that we have the commands */
1065	AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1066
1067	AMR_DELAY(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK), 1000, done_flag);
1068	if (!done_flag) {
1069		mutex_exit(&softs->cmd_mutex);
1070		return (1);
1071	}
1072
1073	mutex_exit(&softs->cmd_mutex);
1074	return (ac->ac_status != AMR_STATUS_SUCCESS);
1075}
1076
1077/*
1078 * setup the scatter/gather table
1079 */
1080static int
1081amr_setup_sg(struct amr_softs *softs)
1082{
1083	uint32_t		i;
1084	size_t			len;
1085	ddi_dma_cookie_t	cookie;
1086	uint_t			cookien;
1087
1088	softs->sg_max_count = 0;
1089
1090	for (i = 0; i < AMR_MAXCMD; i++) {
1091
1092		/* reset the cookien */
1093		cookien = 0;
1094
1095		(softs->sg_items[i]).sg_handle = NULL;
1096		if (ddi_dma_alloc_handle(
1097		    softs->dev_info_p,
1098		    &addr_dma_attr,
1099		    DDI_DMA_SLEEP,
1100		    NULL,
1101		    &((softs->sg_items[i]).sg_handle)) != DDI_SUCCESS) {
1102
1103			AMRDB_PRINT((CE_WARN,
1104			"Cannot alloc dma handle for s/g table"));
1105			goto error_out;
1106		}
1107
1108		if (ddi_dma_mem_alloc((softs->sg_items[i]).sg_handle,
1109		    sizeof (struct amr_sgentry) * AMR_NSEG,
1110		    &accattr,
1111		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1112		    DDI_DMA_SLEEP, NULL,
1113		    (caddr_t *)(&(softs->sg_items[i]).sg_table),
1114		    &len,
1115		    &(softs->sg_items[i]).sg_acc_handle)
1116		    != DDI_SUCCESS) {
1117
1118			AMRDB_PRINT((CE_WARN,
1119			"Cannot allocate DMA memory"));
1120			goto error_out;
1121		}
1122
1123		if (ddi_dma_addr_bind_handle(
1124		    (softs->sg_items[i]).sg_handle,
1125		    NULL,
1126		    (caddr_t)((softs->sg_items[i]).sg_table),
1127		    len,
1128		    DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1129		    DDI_DMA_SLEEP,
1130		    NULL,
1131		    &cookie,
1132		    &cookien) != DDI_DMA_MAPPED) {
1133
1134			AMRDB_PRINT((CE_WARN,
1135			"Cannot bind communication area for s/g table"));
1136			goto error_out;
1137		}
1138
1139		if (cookien != 1)
1140			goto error_out;
1141
1142		softs->sg_items[i].sg_phyaddr = cookie.dmac_address;
1143		softs->sg_max_count++;
1144	}
1145
1146	return (DDI_SUCCESS);
1147
1148error_out:
1149	/*
1150	 * Couldn't allocate/initialize all of the sg table entries.
1151	 * Clean up the partially-initialized entry before returning.
1152	 */
1153	if (cookien) {
1154		(void) ddi_dma_unbind_handle((softs->sg_items[i]).sg_handle);
1155	}
1156	if ((softs->sg_items[i]).sg_acc_handle) {
1157		(void) ddi_dma_mem_free(&((softs->sg_items[i]).sg_acc_handle));
1158		(softs->sg_items[i]).sg_acc_handle = NULL;
1159	}
1160	if ((softs->sg_items[i]).sg_handle) {
1161		(void) ddi_dma_free_handle(&((softs->sg_items[i]).sg_handle));
1162		(softs->sg_items[i]).sg_handle = NULL;
1163	}
1164
1165	/*
1166	 * At least two sg table entries are needed. One is for regular data
1167	 * I/O commands, the other is for poll I/O commands.
1168	 */
1169	return (softs->sg_max_count > 1 ? DDI_SUCCESS : DDI_FAILURE);
1170}
1171
1172/*
1173 * Map/unmap (ac)'s data in the controller's addressable space as required.
1174 *
1175 * These functions may be safely called multiple times on a given command.
1176 */
1177static void
1178amr_setup_dmamap(struct amr_command *ac, ddi_dma_cookie_t *buffer_dma_cookiep,
1179		int nsegments)
1180{
1181	struct amr_sgentry	*sg;
1182	uint32_t		i, size;
1183
1184	sg = ac->sgtable;
1185
1186	size = 0;
1187
1188	ac->mailbox.mb_nsgelem = (uint8_t)nsegments;
1189	for (i = 0; i < nsegments; i++, sg++) {
1190		sg->sg_addr = buffer_dma_cookiep->dmac_address;
1191		sg->sg_count = buffer_dma_cookiep->dmac_size;
1192		size += sg->sg_count;
1193
1194		/*
1195		 * There is no next cookie if the end of the current
1196		 * window is reached. Otherwise, the next cookie
1197		 * would be found.
1198		 */
1199		if ((ac->current_cookie + i + 1) != ac->num_of_cookie)
1200			ddi_dma_nextcookie(ac->buffer_dma_handle,
1201			    buffer_dma_cookiep);
1202	}
1203
1204	ac->transfer_size = size;
1205	ac->data_transfered += size;
1206}
1207
1208
1209/*
1210 * map the amr command for enquiry, allocate the DMA resource
1211 */
1212static int
1213amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size)
1214{
1215	struct amr_softs	*softs = ac->ac_softs;
1216	size_t			len;
1217	uint_t			dma_flags;
1218
1219	AMRDB_PRINT((CE_NOTE, "Amr_enquiry_mapcmd called, ac=%p, flags=%x",
1220	    (void *)ac, ac->ac_flags));
1221
1222	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1223		dma_flags = DDI_DMA_READ;
1224	} else {
1225		dma_flags = DDI_DMA_WRITE;
1226	}
1227
1228	dma_flags |= DDI_DMA_CONSISTENT;
1229
1230	/* process the DMA by address bind mode */
1231	if (ddi_dma_alloc_handle(softs->dev_info_p,
1232	    &addr_dma_attr, DDI_DMA_SLEEP, NULL,
1233	    &ac->buffer_dma_handle) !=
1234	    DDI_SUCCESS) {
1235
1236		AMRDB_PRINT((CE_WARN,
1237		"Cannot allocate addr DMA tag"));
1238		goto error_out;
1239	}
1240
1241	if (ddi_dma_mem_alloc(ac->buffer_dma_handle,
1242	    data_size,
1243	    &accattr,
1244	    dma_flags,
1245	    DDI_DMA_SLEEP,
1246	    NULL,
1247	    (caddr_t *)&ac->ac_data,
1248	    &len,
1249	    &ac->buffer_acc_handle) !=
1250	    DDI_SUCCESS) {
1251
1252		AMRDB_PRINT((CE_WARN,
1253		"Cannot allocate DMA memory"));
1254		goto error_out;
1255	}
1256
1257	if ((ddi_dma_addr_bind_handle(
1258	    ac->buffer_dma_handle,
1259	    NULL, ac->ac_data, len, dma_flags,
1260	    DDI_DMA_SLEEP, NULL, &ac->buffer_dma_cookie,
1261	    &ac->num_of_cookie)) != DDI_DMA_MAPPED) {
1262
1263		AMRDB_PRINT((CE_WARN,
1264		    "Cannot bind addr for dma"));
1265		goto error_out;
1266	}
1267
1268	ac->ac_dataphys = (&ac->buffer_dma_cookie)->dmac_address;
1269
1270	((struct amr_mailbox *)&(ac->mailbox))->mb_param = 0;
1271	ac->mailbox.mb_nsgelem = 0;
1272	ac->mailbox.mb_physaddr = ac->ac_dataphys;
1273
1274	ac->ac_flags |= AMR_CMD_MAPPED;
1275
1276	return (DDI_SUCCESS);
1277
1278error_out:
1279	if (ac->num_of_cookie)
1280		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1281	if (ac->buffer_acc_handle) {
1282		ddi_dma_mem_free(&ac->buffer_acc_handle);
1283		ac->buffer_acc_handle = NULL;
1284	}
1285	if (ac->buffer_dma_handle) {
1286		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1287		ac->buffer_dma_handle = NULL;
1288	}
1289
1290	return (DDI_FAILURE);
1291}
1292
1293/*
1294 * unmap the amr command for enquiry, free the DMA resource
1295 */
1296static void
1297amr_enquiry_unmapcmd(struct amr_command *ac)
1298{
1299	AMRDB_PRINT((CE_NOTE, "Amr_enquiry_unmapcmd called, ac=%p",
1300	    (void *)ac));
1301
1302	/* if the command involved data at all and was mapped */
1303	if ((ac->ac_flags & AMR_CMD_MAPPED) && ac->ac_data) {
1304		if (ac->buffer_dma_handle)
1305			(void) ddi_dma_unbind_handle(
1306			    ac->buffer_dma_handle);
1307		if (ac->buffer_acc_handle) {
1308			ddi_dma_mem_free(&ac->buffer_acc_handle);
1309			ac->buffer_acc_handle = NULL;
1310		}
1311		if (ac->buffer_dma_handle) {
1312			(void) ddi_dma_free_handle(
1313			    &ac->buffer_dma_handle);
1314			ac->buffer_dma_handle = NULL;
1315		}
1316	}
1317
1318	ac->ac_flags &= ~AMR_CMD_MAPPED;
1319}
1320
1321/*
1322 * map the amr command, allocate the DMA resource
1323 */
1324static int
1325amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg)
1326{
1327	uint_t	dma_flags;
1328	off_t	off;
1329	size_t	len;
1330	int	error;
1331	int	(*cb)(caddr_t);
1332
1333	AMRDB_PRINT((CE_NOTE, "Amr_mapcmd called, ac=%p, flags=%x",
1334	    (void *)ac, ac->ac_flags));
1335
1336	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1337		dma_flags = DDI_DMA_READ;
1338	} else {
1339		dma_flags = DDI_DMA_WRITE;
1340	}
1341
1342	if (ac->ac_flags & AMR_CMD_PKT_CONSISTENT) {
1343		dma_flags |= DDI_DMA_CONSISTENT;
1344	}
1345	if (ac->ac_flags & AMR_CMD_PKT_DMA_PARTIAL) {
1346		dma_flags |= DDI_DMA_PARTIAL;
1347	}
1348
1349	if ((!(ac->ac_flags & AMR_CMD_MAPPED)) && (ac->ac_buf == NULL)) {
1350		ac->ac_flags |= AMR_CMD_MAPPED;
1351		return (DDI_SUCCESS);
1352	}
1353
1354	cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1355
1356	/* if the command involves data at all, and hasn't been mapped */
1357	if (!(ac->ac_flags & AMR_CMD_MAPPED)) {
1358		/* process the DMA by buffer bind mode */
1359		error = ddi_dma_buf_bind_handle(ac->buffer_dma_handle,
1360		    ac->ac_buf,
1361		    dma_flags,
1362		    cb,
1363		    arg,
1364		    &ac->buffer_dma_cookie,
1365		    &ac->num_of_cookie);
1366		switch (error) {
1367		case DDI_DMA_PARTIAL_MAP:
1368			if (ddi_dma_numwin(ac->buffer_dma_handle,
1369			    &ac->num_of_win) == DDI_FAILURE) {
1370
1371				AMRDB_PRINT((CE_WARN,
1372				    "Cannot get dma num win"));
1373				(void) ddi_dma_unbind_handle(
1374				    ac->buffer_dma_handle);
1375				(void) ddi_dma_free_handle(
1376				    &ac->buffer_dma_handle);
1377				ac->buffer_dma_handle = NULL;
1378				return (DDI_FAILURE);
1379			}
1380			ac->current_win = 0;
1381			break;
1382
1383		case DDI_DMA_MAPPED:
1384			ac->num_of_win = 1;
1385			ac->current_win = 0;
1386			break;
1387
1388		default:
1389			AMRDB_PRINT((CE_WARN,
1390			    "Cannot bind buf for dma"));
1391
1392			(void) ddi_dma_free_handle(
1393			    &ac->buffer_dma_handle);
1394			ac->buffer_dma_handle = NULL;
1395			return (DDI_FAILURE);
1396		}
1397
1398		ac->current_cookie = 0;
1399
1400		ac->ac_flags |= AMR_CMD_MAPPED;
1401	} else if (ac->current_cookie == AMR_LAST_COOKIE_TAG) {
1402		/* get the next window */
1403		ac->current_win++;
1404		(void) ddi_dma_getwin(ac->buffer_dma_handle,
1405		    ac->current_win, &off, &len,
1406		    &ac->buffer_dma_cookie,
1407		    &ac->num_of_cookie);
1408		ac->current_cookie = 0;
1409	}
1410
1411	if ((ac->num_of_cookie - ac->current_cookie) > AMR_NSEG) {
1412		amr_setup_dmamap(ac, &ac->buffer_dma_cookie, AMR_NSEG);
1413		ac->current_cookie += AMR_NSEG;
1414	} else {
1415		amr_setup_dmamap(ac, &ac->buffer_dma_cookie,
1416		    ac->num_of_cookie - ac->current_cookie);
1417		ac->current_cookie = AMR_LAST_COOKIE_TAG;
1418	}
1419
1420	return (DDI_SUCCESS);
1421}
1422
1423/*
1424 * unmap the amr command, free the DMA resource
1425 */
1426static void
1427amr_unmapcmd(struct amr_command *ac)
1428{
1429	AMRDB_PRINT((CE_NOTE, "Amr_unmapcmd called, ac=%p",
1430	    (void *)ac));
1431
1432	/* if the command involved data at all and was mapped */
1433	if ((ac->ac_flags & AMR_CMD_MAPPED) &&
1434	    ac->ac_buf && ac->buffer_dma_handle)
1435		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1436
1437	ac->ac_flags &= ~AMR_CMD_MAPPED;
1438}
1439
1440static int
1441amr_setup_tran(dev_info_t  *dip, struct amr_softs *softp)
1442{
1443	softp->hba_tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
1444
1445	/*
1446	 * hba_private always points to the amr_softs struct
1447	 */
1448	softp->hba_tran->tran_hba_private	= softp;
1449	softp->hba_tran->tran_tgt_init		= amr_tran_tgt_init;
1450	softp->hba_tran->tran_tgt_probe		= scsi_hba_probe;
1451	softp->hba_tran->tran_start		= amr_tran_start;
1452	softp->hba_tran->tran_reset		= amr_tran_reset;
1453	softp->hba_tran->tran_getcap		= amr_tran_getcap;
1454	softp->hba_tran->tran_setcap		= amr_tran_setcap;
1455	softp->hba_tran->tran_init_pkt		= amr_tran_init_pkt;
1456	softp->hba_tran->tran_destroy_pkt	= amr_tran_destroy_pkt;
1457	softp->hba_tran->tran_dmafree		= amr_tran_dmafree;
1458	softp->hba_tran->tran_sync_pkt		= amr_tran_sync_pkt;
1459	softp->hba_tran->tran_abort		= NULL;
1460	softp->hba_tran->tran_tgt_free		= NULL;
1461	softp->hba_tran->tran_quiesce		= NULL;
1462	softp->hba_tran->tran_unquiesce		= NULL;
1463	softp->hba_tran->tran_sd		= NULL;
1464
1465	if (scsi_hba_attach_setup(dip, &buffer_dma_attr, softp->hba_tran,
1466	    SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
1467		scsi_hba_tran_free(softp->hba_tran);
1468		softp->hba_tran = NULL;
1469		return (DDI_FAILURE);
1470	} else {
1471		return (DDI_SUCCESS);
1472	}
1473}
1474
1475/*ARGSUSED*/
1476static int
1477amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1478	scsi_hba_tran_t *tran, struct scsi_device *sd)
1479{
1480	struct amr_softs	*softs;
1481	ushort_t		target = sd->sd_address.a_target;
1482	uchar_t			lun = sd->sd_address.a_lun;
1483
1484	softs = (struct amr_softs *)
1485	    (sd->sd_address.a_hba_tran->tran_hba_private);
1486
1487	if ((lun == 0) && (target < AMR_MAXLD))
1488		if (softs->logic_drive[target].al_state != AMR_LDRV_OFFLINE)
1489			return (DDI_SUCCESS);
1490
1491	return (DDI_FAILURE);
1492}
1493
1494static int
1495amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1496{
1497	struct amr_softs	*softs;
1498	struct buf		*bp = NULL;
1499	union scsi_cdb		*cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1500	int			ret;
1501	uint32_t		capacity;
1502	struct amr_command	*ac;
1503
1504	AMRDB_PRINT((CE_NOTE, "amr_tran_start, cmd=%X,target=%d,lun=%d",
1505	    cdbp->scc_cmd, ap->a_target, ap->a_lun));
1506
1507	softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1508	if ((ap->a_lun != 0) || (ap->a_target >= AMR_MAXLD) ||
1509	    (softs->logic_drive[ap->a_target].al_state ==
1510	    AMR_LDRV_OFFLINE)) {
1511		cmn_err(CE_WARN, "target or lun is not correct!");
1512		ret = TRAN_BADPKT;
1513		return (ret);
1514	}
1515
1516	ac = (struct amr_command *)pkt->pkt_ha_private;
1517	bp = ac->ac_buf;
1518
1519	AMRDB_PRINT((CE_NOTE, "scsi cmd accepted, cmd=%X", cdbp->scc_cmd));
1520
1521	switch (cdbp->scc_cmd) {
1522	case SCMD_READ:		/* read		*/
1523	case SCMD_READ_G1:	/* read	g1	*/
1524	case SCMD_READ_BUFFER:	/* read buffer	*/
1525	case SCMD_WRITE:	/* write	*/
1526	case SCMD_WRITE_G1:	/* write g1	*/
1527	case SCMD_WRITE_BUFFER:	/* write buffer	*/
1528		amr_rw_command(softs, pkt, ap->a_target);
1529
1530		if (pkt->pkt_flags & FLAG_NOINTR) {
1531			(void) amr_poll_command(ac);
1532			pkt->pkt_state |= (STATE_GOT_BUS
1533			    | STATE_GOT_TARGET
1534			    | STATE_SENT_CMD
1535			    | STATE_XFERRED_DATA);
1536			*pkt->pkt_scbp = 0;
1537			pkt->pkt_statistics |= STAT_SYNC;
1538			pkt->pkt_reason = CMD_CMPLT;
1539		} else {
1540			mutex_enter(&softs->queue_mutex);
1541			if (softs->waiting_q_head == NULL) {
1542				ac->ac_prev = NULL;
1543				ac->ac_next = NULL;
1544				softs->waiting_q_head = ac;
1545				softs->waiting_q_tail = ac;
1546			} else {
1547				ac->ac_next = NULL;
1548				ac->ac_prev = softs->waiting_q_tail;
1549				softs->waiting_q_tail->ac_next = ac;
1550				softs->waiting_q_tail = ac;
1551			}
1552			mutex_exit(&softs->queue_mutex);
1553			amr_start_waiting_queue((void *)softs);
1554		}
1555		ret = TRAN_ACCEPT;
1556		break;
1557
1558	case SCMD_INQUIRY: /* inquiry */
1559		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1560			struct scsi_inquiry inqp;
1561			uint8_t *sinq_p = (uint8_t *)&inqp;
1562
1563			bzero(&inqp, sizeof (struct scsi_inquiry));
1564
1565			if (((char *)cdbp)[1] || ((char *)cdbp)[2]) {
1566				/*
1567				 * The EVDP and pagecode is
1568				 * not supported
1569				 */
1570				sinq_p[1] = 0xFF;
1571				sinq_p[2] = 0x0;
1572			} else {
1573				inqp.inq_len = AMR_INQ_ADDITIONAL_LEN;
1574				inqp.inq_ansi = AMR_INQ_ANSI_VER;
1575				inqp.inq_rdf = AMR_INQ_RESP_DATA_FORMAT;
1576				/* Enable Tag Queue */
1577				inqp.inq_cmdque = 1;
1578				bcopy("MegaRaid", inqp.inq_vid,
1579				    sizeof (inqp.inq_vid));
1580				bcopy(softs->amr_product_info.pi_product_name,
1581				    inqp.inq_pid,
1582				    AMR_PRODUCT_INFO_SIZE);
1583				bcopy(softs->amr_product_info.pi_firmware_ver,
1584				    inqp.inq_revision,
1585				    AMR_FIRMWARE_VER_SIZE);
1586			}
1587
1588			amr_unmapcmd(ac);
1589
1590			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1591				bp_mapin(bp);
1592			bcopy(&inqp, bp->b_un.b_addr,
1593			    sizeof (struct scsi_inquiry));
1594
1595			pkt->pkt_state |= STATE_XFERRED_DATA;
1596		}
1597		pkt->pkt_reason = CMD_CMPLT;
1598		pkt->pkt_state |= (STATE_GOT_BUS
1599		    | STATE_GOT_TARGET
1600		    | STATE_SENT_CMD);
1601		*pkt->pkt_scbp = 0;
1602		ret = TRAN_ACCEPT;
1603		if (!(pkt->pkt_flags & FLAG_NOINTR))
1604			(*pkt->pkt_comp)(pkt);
1605		break;
1606
1607	case SCMD_READ_CAPACITY: /* read capacity */
1608		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1609			struct scsi_capacity cp;
1610
1611			capacity = softs->logic_drive[ap->a_target].al_size - 1;
1612			cp.capacity = BE_32(capacity);
1613			cp.lbasize = BE_32(512);
1614
1615			amr_unmapcmd(ac);
1616
1617			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1618				bp_mapin(bp);
1619			bcopy(&cp, bp->b_un.b_addr, 8);
1620		}
1621		pkt->pkt_reason = CMD_CMPLT;
1622		pkt->pkt_state |= (STATE_GOT_BUS
1623		    | STATE_GOT_TARGET
1624		    | STATE_SENT_CMD
1625		    | STATE_XFERRED_DATA);
1626		*pkt->pkt_scbp = 0;
1627		ret = TRAN_ACCEPT;
1628		if (!(pkt->pkt_flags & FLAG_NOINTR))
1629			(*pkt->pkt_comp)(pkt);
1630		break;
1631
1632	case SCMD_MODE_SENSE:		/* mode sense */
1633	case SCMD_MODE_SENSE_G1:	/* mode sense g1 */
1634		amr_unmapcmd(ac);
1635
1636		capacity = softs->logic_drive[ap->a_target].al_size - 1;
1637		amr_mode_sense(cdbp, bp, capacity);
1638
1639		pkt->pkt_reason = CMD_CMPLT;
1640		pkt->pkt_state |= (STATE_GOT_BUS
1641		    | STATE_GOT_TARGET
1642		    | STATE_SENT_CMD
1643		    | STATE_XFERRED_DATA);
1644		*pkt->pkt_scbp = 0;
1645		ret = TRAN_ACCEPT;
1646		if (!(pkt->pkt_flags & FLAG_NOINTR))
1647			(*pkt->pkt_comp)(pkt);
1648		break;
1649
1650	case SCMD_TEST_UNIT_READY:	/* test unit ready */
1651	case SCMD_REQUEST_SENSE:	/* request sense */
1652	case SCMD_FORMAT:		/* format */
1653	case SCMD_START_STOP:		/* start stop */
1654	case SCMD_SYNCHRONIZE_CACHE:	/* synchronize cache */
1655		if (bp && bp->b_un.b_addr && bp->b_bcount) {
1656			amr_unmapcmd(ac);
1657
1658			if (bp->b_flags & (B_PHYS | B_PAGEIO))
1659				bp_mapin(bp);
1660			bzero(bp->b_un.b_addr, bp->b_bcount);
1661
1662			pkt->pkt_state |= STATE_XFERRED_DATA;
1663		}
1664		pkt->pkt_reason = CMD_CMPLT;
1665		pkt->pkt_state |= (STATE_GOT_BUS
1666		    | STATE_GOT_TARGET
1667		    | STATE_SENT_CMD);
1668		ret = TRAN_ACCEPT;
1669		*pkt->pkt_scbp = 0;
1670		if (!(pkt->pkt_flags & FLAG_NOINTR))
1671			(*pkt->pkt_comp)(pkt);
1672		break;
1673
1674	default: /* any other commands */
1675		amr_unmapcmd(ac);
1676		pkt->pkt_reason = CMD_INCOMPLETE;
1677		pkt->pkt_state = (STATE_GOT_BUS
1678		    | STATE_GOT_TARGET
1679		    | STATE_SENT_CMD
1680		    | STATE_GOT_STATUS
1681		    | STATE_ARQ_DONE);
1682		ret = TRAN_ACCEPT;
1683		*pkt->pkt_scbp = 0;
1684		amr_set_arq_data(pkt, KEY_ILLEGAL_REQUEST);
1685		if (!(pkt->pkt_flags & FLAG_NOINTR))
1686			(*pkt->pkt_comp)(pkt);
1687		break;
1688	}
1689
1690	return (ret);
1691}
1692
1693/*
1694 * tran_reset() will reset the bus/target/adapter to support the fault recovery
1695 * functionality according to the "level" in interface. However, we got the
1696 * confirmation from LSI that these HBA cards does not support any commands to
1697 * reset bus/target/adapter/channel.
1698 *
1699 * If the tran_reset() return a FAILURE to the sd, the system will not
1700 * continue to dump the core. But core dump is an crucial method to analyze
1701 * problems in panic. Now we adopt a work around solution, that is to return
1702 * a fake SUCCESS to sd during panic, which will force the system continue
1703 * to dump core though the core may have problems in some situtation because
1704 * some on-the-fly commands will continue DMAing data to the memory.
1705 * In addition, the work around core dump method may not be performed
1706 * successfully if the panic is caused by the HBA itself. So the work around
1707 * solution is not a good example for the implementation of tran_reset(),
1708 * the most reasonable approach should send a reset command to the adapter.
1709 */
1710/*ARGSUSED*/
1711static int
1712amr_tran_reset(struct scsi_address *ap, int level)
1713{
1714	struct amr_softs	*softs;
1715	volatile uint32_t	done_flag;
1716
1717	if (ddi_in_panic()) {
1718		softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1719
1720		/* Acknowledge the card if there are any significant commands */
1721		while (softs->amr_busyslots > 0) {
1722			AMR_DELAY((softs->mailbox->mb_busy == 0),
1723			    AMR_RETRYCOUNT, done_flag);
1724			if (!done_flag) {
1725				/*
1726				 * command not completed, indicate the
1727				 * problem and continue get ac
1728				 */
1729				cmn_err(CE_WARN,
1730				    "AMR command is not completed");
1731				return (0);
1732			}
1733
1734			AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1735
1736			/* wait for the acknowledge from hardware */
1737			AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
1738			    AMR_RETRYCOUNT, done_flag);
1739			if (!done_flag) {
1740				/*
1741				 * command is not completed, return from the
1742				 * current interrupt and wait for the next one
1743				 */
1744				cmn_err(CE_WARN, "No answer from the hardware");
1745
1746				mutex_exit(&softs->cmd_mutex);
1747				return (0);
1748			}
1749
1750			softs->amr_busyslots -= softs->mailbox->mb_nstatus;
1751		}
1752
1753		/* flush the controllor */
1754		(void) amr_flush(softs);
1755
1756		/*
1757		 * If the system is in panic, the tran_reset() will return a
1758		 * fake SUCCESS to sd, then the system would continue dump the
1759		 * core by poll commands. This is a work around for dumping
1760		 * core in panic.
1761		 *
1762		 * Note: Some on-the-fly command will continue DMAing data to
1763		 *	 the memory when the core is dumping, which may cause
1764		 *	 some flaws in the dumped core file, so a cmn_err()
1765		 *	 will be printed out to warn users. However, for most
1766		 *	 cases, the core file will be fine.
1767		 */
1768		cmn_err(CE_WARN, "This system contains a SCSI HBA card/driver "
1769		    "that doesn't support software reset. This "
1770		    "means that memory being used by the HBA for "
1771		    "DMA based reads could have been updated after "
1772		    "we panic'd.");
1773		return (1);
1774	} else {
1775		/* return failure to sd */
1776		return (0);
1777	}
1778}
1779
1780/*ARGSUSED*/
1781static int
1782amr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1783{
1784	struct amr_softs	*softs;
1785
1786	/*
1787	 * We don't allow inquiring about capabilities for other targets
1788	 */
1789	if (cap == NULL || whom == 0)
1790		return (-1);
1791
1792	softs = ((struct amr_softs *)(ap->a_hba_tran)->tran_hba_private);
1793
1794	switch (scsi_hba_lookup_capstr(cap)) {
1795	case SCSI_CAP_ARQ:
1796		return (1);
1797	case SCSI_CAP_GEOMETRY:
1798		return ((AMR_DEFAULT_HEADS << 16) | AMR_DEFAULT_CYLINDERS);
1799	case SCSI_CAP_SECTOR_SIZE:
1800		return (AMR_DEFAULT_SECTORS);
1801	case SCSI_CAP_TOTAL_SECTORS:
1802		/* number of sectors */
1803		return (softs->logic_drive[ap->a_target].al_size);
1804	case SCSI_CAP_UNTAGGED_QING:
1805	case SCSI_CAP_TAGGED_QING:
1806		return (1);
1807	default:
1808		return (-1);
1809	}
1810}
1811
1812/*ARGSUSED*/
1813static int
1814amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
1815		int whom)
1816{
1817	/*
1818	 * We don't allow setting capabilities for other targets
1819	 */
1820	if (cap == NULL || whom == 0) {
1821		AMRDB_PRINT((CE_NOTE,
1822		    "Set Cap not supported, string = %s, whom=%d",
1823		    cap, whom));
1824		return (-1);
1825	}
1826
1827	switch (scsi_hba_lookup_capstr(cap)) {
1828	case SCSI_CAP_ARQ:
1829		return (1);
1830	case SCSI_CAP_TOTAL_SECTORS:
1831		return (1);
1832	case SCSI_CAP_SECTOR_SIZE:
1833		return (1);
1834	case SCSI_CAP_UNTAGGED_QING:
1835	case SCSI_CAP_TAGGED_QING:
1836		return ((value == 1) ? 1 : 0);
1837	default:
1838		return (0);
1839	}
1840}
1841
1842static struct scsi_pkt *
1843amr_tran_init_pkt(struct scsi_address *ap,
1844    struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
1845    int tgtlen, int flags, int (*callback)(), caddr_t arg)
1846{
1847	struct amr_softs	*softs;
1848	struct amr_command	*ac;
1849	uint32_t		slen;
1850
1851	softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1852
1853	if ((ap->a_lun != 0)||(ap->a_target >= AMR_MAXLD)||
1854	    (softs->logic_drive[ap->a_target].al_state ==
1855	    AMR_LDRV_OFFLINE)) {
1856		return (NULL);
1857	}
1858
1859	if (pkt == NULL) {
1860		/* force auto request sense */
1861		slen = MAX(statuslen, sizeof (struct scsi_arq_status));
1862
1863		pkt = scsi_hba_pkt_alloc(softs->dev_info_p, ap, cmdlen,
1864		    slen, tgtlen, sizeof (struct amr_command),
1865		    callback, arg);
1866		if (pkt == NULL) {
1867			AMRDB_PRINT((CE_WARN, "scsi_hba_pkt_alloc failed"));
1868			return (NULL);
1869		}
1870		pkt->pkt_address	= *ap;
1871		pkt->pkt_comp		= (void (*)())NULL;
1872		pkt->pkt_time		= 0;
1873		pkt->pkt_resid		= 0;
1874		pkt->pkt_statistics	= 0;
1875		pkt->pkt_reason		= 0;
1876
1877		ac = (struct amr_command *)pkt->pkt_ha_private;
1878		ac->ac_buf = bp;
1879		ac->cmdlen = cmdlen;
1880		ac->ac_softs = softs;
1881		ac->pkt = pkt;
1882		ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
1883		ac->ac_flags &= ~AMR_CMD_BUSY;
1884
1885		if ((bp == NULL) || (bp->b_bcount == 0)) {
1886			return (pkt);
1887		}
1888
1889		if (ddi_dma_alloc_handle(softs->dev_info_p, &buffer_dma_attr,
1890		    DDI_DMA_SLEEP, NULL,
1891		    &ac->buffer_dma_handle) != DDI_SUCCESS) {
1892
1893			AMRDB_PRINT((CE_WARN,
1894			    "Cannot allocate buffer DMA tag"));
1895			scsi_hba_pkt_free(ap, pkt);
1896			return (NULL);
1897
1898		}
1899
1900	} else {
1901		if ((bp == NULL) || (bp->b_bcount == 0)) {
1902			return (pkt);
1903		}
1904		ac = (struct amr_command *)pkt->pkt_ha_private;
1905	}
1906
1907	ASSERT(ac != NULL);
1908
1909	if (bp->b_flags & B_READ) {
1910		ac->ac_flags |= AMR_CMD_DATAOUT;
1911	} else {
1912		ac->ac_flags |= AMR_CMD_DATAIN;
1913	}
1914
1915	if (flags & PKT_CONSISTENT) {
1916		ac->ac_flags |= AMR_CMD_PKT_CONSISTENT;
1917	}
1918
1919	if (flags & PKT_DMA_PARTIAL) {
1920		ac->ac_flags |= AMR_CMD_PKT_DMA_PARTIAL;
1921	}
1922
1923	if (amr_mapcmd(ac, callback, arg) != DDI_SUCCESS) {
1924		scsi_hba_pkt_free(ap, pkt);
1925		return (NULL);
1926	}
1927
1928	pkt->pkt_resid = bp->b_bcount - ac->data_transfered;
1929
1930	AMRDB_PRINT((CE_NOTE,
1931	    "init pkt, pkt_resid=%d, b_bcount=%d, data_transfered=%d",
1932	    (uint32_t)pkt->pkt_resid, (uint32_t)bp->b_bcount,
1933	    ac->data_transfered));
1934
1935	ASSERT(pkt->pkt_resid >= 0);
1936
1937	return (pkt);
1938}
1939
1940static void
1941amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1942{
1943	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1944
1945	amr_unmapcmd(ac);
1946
1947	if (ac->buffer_dma_handle) {
1948		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1949		ac->buffer_dma_handle = NULL;
1950	}
1951
1952	scsi_hba_pkt_free(ap, pkt);
1953	AMRDB_PRINT((CE_NOTE, "Destroy pkt called"));
1954}
1955
1956/*ARGSUSED*/
1957static void
1958amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1959{
1960	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1961
1962	if (ac->buffer_dma_handle) {
1963		(void) ddi_dma_sync(ac->buffer_dma_handle, 0, 0,
1964		    (ac->ac_flags & AMR_CMD_DATAIN) ?
1965		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1966	}
1967}
1968
1969/*ARGSUSED*/
1970static void
1971amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1972{
1973	struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1974
1975	if (ac->ac_flags & AMR_CMD_MAPPED) {
1976		(void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1977		(void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1978		ac->buffer_dma_handle = NULL;
1979		ac->ac_flags &= ~AMR_CMD_MAPPED;
1980	}
1981
1982}
1983
1984/*ARGSUSED*/
1985static void
1986amr_rw_command(struct amr_softs *softs, struct scsi_pkt *pkt, int target)
1987{
1988	struct amr_command	*ac = (struct amr_command *)pkt->pkt_ha_private;
1989	union scsi_cdb		*cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1990	uint8_t			cmd;
1991
1992	if (ac->ac_flags & AMR_CMD_DATAOUT) {
1993		cmd = AMR_CMD_LREAD;
1994	} else {
1995		cmd = AMR_CMD_LWRITE;
1996	}
1997
1998	ac->mailbox.mb_command = cmd;
1999	ac->mailbox.mb_blkcount =
2000	    (ac->transfer_size + AMR_BLKSIZE - 1)/AMR_BLKSIZE;
2001	ac->mailbox.mb_lba = (ac->cmdlen == 10) ?
2002	    GETG1ADDR(cdbp) : GETG0ADDR(cdbp);
2003	ac->mailbox.mb_drive = (uint8_t)target;
2004}
2005
2006static void
2007amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp, unsigned int capacity)
2008{
2009	uchar_t			pagecode;
2010	struct mode_format	*page3p;
2011	struct mode_geometry	*page4p;
2012	struct mode_header	*headerp;
2013	uint32_t		ncyl;
2014
2015	if (!(bp && bp->b_un.b_addr && bp->b_bcount))
2016		return;
2017
2018	if (bp->b_flags & (B_PHYS | B_PAGEIO))
2019		bp_mapin(bp);
2020
2021	pagecode = cdbp->cdb_un.sg.scsi[0];
2022	switch (pagecode) {
2023	case SD_MODE_SENSE_PAGE3_CODE:
2024		headerp = (struct mode_header *)(bp->b_un.b_addr);
2025		headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2026
2027		page3p = (struct mode_format *)((caddr_t)headerp +
2028		    MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2029		page3p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE3_CODE);
2030		page3p->mode_page.length = BE_8(sizeof (struct mode_format));
2031		page3p->data_bytes_sect = BE_16(AMR_DEFAULT_SECTORS);
2032		page3p->sect_track = BE_16(AMR_DEFAULT_CYLINDERS);
2033
2034		return;
2035
2036	case SD_MODE_SENSE_PAGE4_CODE:
2037		headerp = (struct mode_header *)(bp->b_un.b_addr);
2038		headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2039
2040		page4p = (struct mode_geometry *)((caddr_t)headerp +
2041		    MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2042		page4p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE4_CODE);
2043		page4p->mode_page.length = BE_8(sizeof (struct mode_geometry));
2044		page4p->heads = BE_8(AMR_DEFAULT_HEADS);
2045		page4p->rpm = BE_16(AMR_DEFAULT_ROTATIONS);
2046
2047		ncyl = capacity / (AMR_DEFAULT_HEADS*AMR_DEFAULT_CYLINDERS);
2048		page4p->cyl_lb = BE_8(ncyl & 0xff);
2049		page4p->cyl_mb = BE_8((ncyl >> 8) & 0xff);
2050		page4p->cyl_ub = BE_8((ncyl >> 16) & 0xff);
2051
2052		return;
2053	default:
2054		bzero(bp->b_un.b_addr, bp->b_bcount);
2055		return;
2056	}
2057}
2058
2059static void
2060amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key)
2061{
2062	struct scsi_arq_status *arqstat;
2063
2064	arqstat = (struct scsi_arq_status *)(pkt->pkt_scbp);
2065	arqstat->sts_status.sts_chk = 1; /* CHECK CONDITION */
2066	arqstat->sts_rqpkt_reason = CMD_CMPLT;
2067	arqstat->sts_rqpkt_resid = 0;
2068	arqstat->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2069	    STATE_SENT_CMD | STATE_XFERRED_DATA;
2070	arqstat->sts_rqpkt_statistics = 0;
2071	arqstat->sts_sensedata.es_valid = 1;
2072	arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2073	arqstat->sts_sensedata.es_key = key;
2074}
2075
2076static void
2077amr_start_waiting_queue(void *softp)
2078{
2079	uint32_t		slot;
2080	struct amr_command	*ac;
2081	volatile uint32_t	done_flag;
2082	struct amr_softs	*softs = (struct amr_softs *)softp;
2083
2084	/* only one command allowed at the same time */
2085	mutex_enter(&softs->queue_mutex);
2086	mutex_enter(&softs->cmd_mutex);
2087
2088	while ((ac = softs->waiting_q_head) != NULL) {
2089		/*
2090		 * Find an available slot, the last slot is
2091		 * occupied by poll I/O command.
2092		 */
2093		for (slot = 0; slot < (softs->sg_max_count - 1); slot++) {
2094			if (softs->busycmd[slot] == NULL) {
2095				if (AMR_QGET_IDB(softs) & AMR_QIDB_SUBMIT) {
2096					/*
2097					 * only one command allowed at the
2098					 * same time
2099					 */
2100					mutex_exit(&softs->cmd_mutex);
2101					mutex_exit(&softs->queue_mutex);
2102					return;
2103				}
2104
2105				ac->ac_timestamp = ddi_get_time();
2106
2107				if (!(ac->ac_flags & AMR_CMD_GOT_SLOT)) {
2108
2109					softs->busycmd[slot] = ac;
2110					ac->ac_slot = slot;
2111					softs->amr_busyslots++;
2112
2113					bcopy(ac->sgtable,
2114					    softs->sg_items[slot].sg_table,
2115					    sizeof (struct amr_sgentry) *
2116					    AMR_NSEG);
2117
2118					(void) ddi_dma_sync(
2119					    softs->sg_items[slot].sg_handle,
2120					    0, 0, DDI_DMA_SYNC_FORDEV);
2121
2122					ac->mailbox.mb_physaddr =
2123					    softs->sg_items[slot].sg_phyaddr;
2124				}
2125
2126				/* take the cmd from the queue */
2127				softs->waiting_q_head = ac->ac_next;
2128
2129				ac->mailbox.mb_ident = ac->ac_slot + 1;
2130				ac->mailbox.mb_busy = 1;
2131				ac->ac_next = NULL;
2132				ac->ac_prev = NULL;
2133				ac->ac_flags |= AMR_CMD_GOT_SLOT;
2134
2135				/* clear the poll/ack fields in the mailbox */
2136				softs->mailbox->mb_poll = 0;
2137				softs->mailbox->mb_ack = 0;
2138
2139				AMR_DELAY((softs->mailbox->mb_busy == 0),
2140				    AMR_RETRYCOUNT, done_flag);
2141				if (!done_flag) {
2142					/*
2143					 * command not completed, indicate the
2144					 * problem and continue get ac
2145					 */
2146					cmn_err(CE_WARN,
2147					    "AMR command is not completed");
2148					break;
2149				}
2150
2151				bcopy(&ac->mailbox, (void *)softs->mailbox,
2152				    AMR_MBOX_CMDSIZE);
2153				ac->ac_flags |= AMR_CMD_BUSY;
2154
2155				(void) ddi_dma_sync(softs->mbox_dma_handle,
2156				    0, 0, DDI_DMA_SYNC_FORDEV);
2157
2158				AMR_QPUT_IDB(softs,
2159				    softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
2160
2161				/*
2162				 * current ac is submitted
2163				 * so quit 'for-loop' to get next ac
2164				 */
2165				break;
2166			}
2167		}
2168
2169		/* no slot, finish our task */
2170		if (slot == softs->maxio)
2171			break;
2172	}
2173
2174	/* only one command allowed at the same time */
2175	mutex_exit(&softs->cmd_mutex);
2176	mutex_exit(&softs->queue_mutex);
2177}
2178
2179static void
2180amr_done(struct amr_softs *softs)
2181{
2182
2183	uint32_t		i, idx;
2184	volatile uint32_t	done_flag;
2185	struct amr_mailbox	*mbox, mbsave;
2186	struct amr_command	*ac, *head, *tail;
2187
2188	head = tail = NULL;
2189
2190	AMR_QPUT_ODB(softs, AMR_QODB_READY);
2191
2192	/* acknowledge interrupt */
2193	(void) AMR_QGET_ODB(softs);
2194
2195	mutex_enter(&softs->cmd_mutex);
2196
2197	if (softs->mailbox->mb_nstatus != 0) {
2198		(void) ddi_dma_sync(softs->mbox_dma_handle,
2199		    0, 0, DDI_DMA_SYNC_FORCPU);
2200
2201		/* save mailbox, which contains a list of completed commands */
2202		bcopy((void *)(uintptr_t)(volatile void *)softs->mailbox,
2203		    &mbsave, sizeof (mbsave));
2204
2205		mbox = &mbsave;
2206
2207		AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
2208
2209		/* wait for the acknowledge from hardware */
2210		AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
2211		    AMR_RETRYCOUNT, done_flag);
2212		if (!done_flag) {
2213			/*
2214			 * command is not completed, return from the current
2215			 * interrupt and wait for the next one
2216			 */
2217			cmn_err(CE_WARN, "No answer from the hardware");
2218
2219			mutex_exit(&softs->cmd_mutex);
2220			return;
2221		}
2222
2223		for (i = 0; i < mbox->mb_nstatus; i++) {
2224			idx = mbox->mb_completed[i] - 1;
2225			ac = softs->busycmd[idx];
2226
2227			if (ac != NULL) {
2228				/* pull the command from the busy index */
2229				softs->busycmd[idx] = NULL;
2230				if (softs->amr_busyslots > 0)
2231					softs->amr_busyslots--;
2232				if (softs->amr_busyslots == 0)
2233					cv_broadcast(&softs->cmd_cv);
2234
2235				ac->ac_flags &= ~AMR_CMD_BUSY;
2236				ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
2237				ac->ac_status = mbox->mb_status;
2238
2239				/* enqueue here */
2240				if (head) {
2241					tail->ac_next = ac;
2242					tail = ac;
2243					tail->ac_next = NULL;
2244				} else {
2245					tail = head = ac;
2246					ac->ac_next = NULL;
2247				}
2248			} else {
2249				AMRDB_PRINT((CE_WARN,
2250				    "ac in mailbox is NULL!"));
2251			}
2252		}
2253	} else {
2254		AMRDB_PRINT((CE_WARN, "mailbox is not ready for copy out!"));
2255	}
2256
2257	mutex_exit(&softs->cmd_mutex);
2258
2259	if (head != NULL) {
2260		amr_call_pkt_comp(head);
2261	}
2262
2263	/* dispatch a thread to process the pending I/O if there is any */
2264	if ((ddi_taskq_dispatch(softs->amr_taskq, amr_start_waiting_queue,
2265	    (void *)softs, DDI_NOSLEEP)) != DDI_SUCCESS) {
2266		cmn_err(CE_WARN, "No memory available to dispatch taskq");
2267	}
2268}
2269
2270static void
2271amr_call_pkt_comp(register struct amr_command *head)
2272{
2273	register struct scsi_pkt	*pkt;
2274	register struct amr_command	*ac, *localhead;
2275
2276	localhead = head;
2277
2278	while (localhead) {
2279		ac = localhead;
2280		localhead = ac->ac_next;
2281		ac->ac_next = NULL;
2282
2283		pkt = ac->pkt;
2284		*pkt->pkt_scbp = 0;
2285
2286		if (ac->ac_status == AMR_STATUS_SUCCESS) {
2287			pkt->pkt_state |= (STATE_GOT_BUS
2288			    | STATE_GOT_TARGET
2289			    | STATE_SENT_CMD
2290			    | STATE_XFERRED_DATA);
2291			pkt->pkt_reason = CMD_CMPLT;
2292		} else {
2293			pkt->pkt_state |= STATE_GOT_BUS
2294			    | STATE_ARQ_DONE;
2295			pkt->pkt_reason = CMD_INCOMPLETE;
2296			amr_set_arq_data(pkt, KEY_HARDWARE_ERROR);
2297		}
2298
2299		if (!(pkt->pkt_flags & FLAG_NOINTR) &&
2300		    pkt->pkt_comp) {
2301			(*pkt->pkt_comp)(pkt);
2302		}
2303	}
2304}
2305