megaraid_sas.c revision 9106:ee8f18cdafb2
1/*
2 * megaraid_sas.c: source for mega_sas driver
3 *
4 * MegaRAID device driver for SAS controllers
5 * Copyright (c) 2005-2008, LSI Logic Corporation.
6 * All rights reserved.
7 *
8 * Version:
9 * Author:
10 *        	Rajesh Prabhakaran<Rajesh.Prabhakaran@lsil.com>
11 *        	Seokmann Ju
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright notice,
17 *    this list of conditions and the following disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above copyright notice,
20 *    this list of conditions and the following disclaimer in the documentation
21 *    and/or other materials provided with the distribution.
22 *
23 * 3. Neither the name of the author nor the names of its contributors may be
24 *    used to endorse or promote products derived from this software without
25 *    specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
30 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
31 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
35 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
37 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 */
40
41/*
42 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
43 * Use is subject to license terms.
44 */
45
46#include <sys/types.h>
47#include <sys/param.h>
48#include <sys/file.h>
49#include <sys/errno.h>
50#include <sys/open.h>
51#include <sys/cred.h>
52#include <sys/modctl.h>
53#include <sys/conf.h>
54#include <sys/devops.h>
55#include <sys/cmn_err.h>
56#include <sys/kmem.h>
57#include <sys/stat.h>
58#include <sys/mkdev.h>
59#include <sys/pci.h>
60#include <sys/scsi/scsi.h>
61#include <sys/ddi.h>
62#include <sys/sunddi.h>
63#include <sys/atomic.h>
64#include <sys/signal.h>
65
66#include "megaraid_sas.h"
67
68/*
69 * FMA header files
70 */
71#include <sys/ddifm.h>
72#include <sys/fm/protocol.h>
73#include <sys/fm/util.h>
74#include <sys/fm/io/ddi.h>
75
76/*
77 * Local static data
78 */
79static void	*megasas_state = NULL;
80static int 	debug_level_g = CL_ANN;
81
82#pragma weak scsi_hba_open
83#pragma weak scsi_hba_close
84#pragma weak scsi_hba_ioctl
85
86static ddi_dma_attr_t megasas_generic_dma_attr = {
87	DMA_ATTR_V0,		/* dma_attr_version */
88	0,			/* low DMA address range */
89	0xFFFFFFFFU,		/* high DMA address range */
90	0xFFFFFFFFU,		/* DMA counter register  */
91	8,			/* DMA address alignment */
92	0x07,			/* DMA burstsizes  */
93	1,			/* min DMA size */
94	0xFFFFFFFFU,		/* max DMA size */
95	0xFFFFFFFFU,		/* segment boundary */
96	MEGASAS_MAX_SGE_CNT,	/* dma_attr_sglen */
97	512,			/* granularity of device */
98	0			/* bus specific DMA flags */
99};
100
101int32_t megasas_max_cap_maxxfer = 0x1000000;
102
103/*
104 * cb_ops contains base level routines
105 */
106static struct cb_ops megasas_cb_ops = {
107	megasas_open,		/* open */
108	megasas_close,		/* close */
109	nodev,			/* strategy */
110	nodev,			/* print */
111	nodev,			/* dump */
112	nodev,			/* read */
113	nodev,			/* write */
114	megasas_ioctl,		/* ioctl */
115	nodev,			/* devmap */
116	nodev,			/* mmap */
117	nodev,			/* segmap */
118	nochpoll,		/* poll */
119	nodev,			/* cb_prop_op */
120	0,			/* streamtab  */
121	D_NEW | D_HOTPLUG,	/* cb_flag */
122	CB_REV,			/* cb_rev */
123	nodev,			/* cb_aread */
124	nodev			/* cb_awrite */
125};
126
127/*
128 * dev_ops contains configuration routines
129 */
130static struct dev_ops megasas_ops = {
131	DEVO_REV,		/* rev, */
132	0,			/* refcnt */
133	megasas_getinfo,	/* getinfo */
134	nulldev,		/* identify */
135	nulldev,		/* probe */
136	megasas_attach,		/* attach */
137	megasas_detach,		/* detach */
138	megasas_reset,		/* reset */
139	&megasas_cb_ops,	/* char/block ops */
140	NULL,			/* bus ops */
141	NULL,			/* power */
142	ddi_quiesce_not_supported,	/* devo_quiesce */
143};
144
145char _depends_on[] = "misc/scsi";
146
147static struct modldrv modldrv = {
148	&mod_driverops,		/* module type - driver */
149	MEGASAS_VERSION,
150	&megasas_ops,		/* driver ops */
151};
152
153static struct modlinkage modlinkage = {
154	MODREV_1,	/* ml_rev - must be MODREV_1 */
155	&modldrv,	/* ml_linkage */
156	NULL		/* end of driver linkage */
157};
158
159static struct ddi_device_acc_attr endian_attr = {
160	DDI_DEVICE_ATTR_V0,
161	DDI_STRUCTURE_LE_ACC,
162	DDI_STRICTORDER_ACC
163};
164
165
166/*
167 * ************************************************************************** *
168 *                                                                            *
169 *         common entry points - for loadable kernel modules                  *
170 *                                                                            *
171 * ************************************************************************** *
172 */
173
174/*
175 * _init - initialize a loadable module
176 * @void
177 *
178 * The driver should perform any one-time resource allocation or data
179 * initialization during driver loading in _init(). For example, the driver
180 * should initialize any mutexes global to the driver in this routine.
181 * The driver should not, however, use _init() to allocate or initialize
182 * anything that has to do with a particular instance of the device.
183 * Per-instance initialization must be done in attach().
184 */
185int
186_init(void)
187{
188	int ret;
189
190	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
191
192	ret = ddi_soft_state_init(&megasas_state,
193	    sizeof (struct megasas_instance), 0);
194
195	if (ret != 0) {
196		con_log(CL_ANN, (CE_WARN, "megaraid: could not init state"));
197		return (ret);
198	}
199
200	if ((ret = scsi_hba_init(&modlinkage)) != 0) {
201		con_log(CL_ANN, (CE_WARN, "megaraid: could not init scsi hba"));
202		ddi_soft_state_fini(&megasas_state);
203		return (ret);
204	}
205
206	ret = mod_install(&modlinkage);
207
208	if (ret != 0) {
209		con_log(CL_ANN, (CE_WARN, "megaraid: mod_install failed"));
210		scsi_hba_fini(&modlinkage);
211		ddi_soft_state_fini(&megasas_state);
212	}
213
214	return (ret);
215}
216
217/*
218 * _info - returns information about a loadable module.
219 * @void
220 *
221 * _info() is called to return module information. This is a typical entry
222 * point that does predefined role. It simply calls mod_info().
223 */
224int
225_info(struct modinfo *modinfop)
226{
227	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
228
229	return (mod_info(&modlinkage, modinfop));
230}
231
232/*
233 * _fini - prepare a loadable module for unloading
234 * @void
235 *
236 * In _fini(), the driver should release any resources that were allocated in
237 * _init(). The driver must remove itself from the system module list.
238 */
239int
240_fini(void)
241{
242	int ret;
243
244	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
245
246	if ((ret = mod_remove(&modlinkage)) != 0)
247		return (ret);
248
249	scsi_hba_fini(&modlinkage);
250
251	ddi_soft_state_fini(&megasas_state);
252
253	return (ret);
254}
255
256
257/*
258 * ************************************************************************** *
259 *                                                                            *
260 *               common entry points - for autoconfiguration                  *
261 *                                                                            *
262 * ************************************************************************** *
263 */
264/*
265 * attach - adds a device to the system as part of initialization
266 * @dip:
267 * @cmd:
268 *
269 * The kernel calls a driver's attach() entry point to attach an instance of
270 * a device (for MegaRAID, it is instance of a controller) or to resume
271 * operation for an instance of a device that has been suspended or has been
272 * shut down by the power management framework
273 * The attach() entry point typically includes the following types of
274 * processing:
275 * - allocate a soft-state structure for the device instance (for MegaRAID,
276 *   controller instance)
277 * - initialize per-instance mutexes
278 * - initialize condition variables
279 * - register the device's interrupts (for MegaRAID, controller's interrupts)
280 * - map the registers and memory of the device instance (for MegaRAID,
281 *   controller instance)
282 * - create minor device nodes for the device instance (for MegaRAID,
283 *   controller instance)
284 * - report that the device instance (for MegaRAID, controller instance) has
285 *   attached
286 */
287static int
288megasas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
289{
290	int		instance_no;
291	int		nregs;
292	uint8_t		added_isr_f = 0;
293	uint8_t		added_soft_isr_f = 0;
294	uint8_t		create_devctl_node_f = 0;
295	uint8_t		create_scsi_node_f = 0;
296	uint8_t		create_ioc_node_f = 0;
297	uint8_t		tran_alloc_f = 0;
298	uint8_t 	irq;
299	uint16_t	vendor_id;
300	uint16_t	device_id;
301	uint16_t	subsysvid;
302	uint16_t	subsysid;
303	uint16_t	command;
304
305	scsi_hba_tran_t		*tran;
306	ddi_dma_attr_t  tran_dma_attr;
307	struct megasas_instance	*instance;
308
309	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
310
311	/* CONSTCOND */
312	ASSERT(NO_COMPETING_THREADS);
313
314	instance_no = ddi_get_instance(dip);
315
316	/*
317	 * Since we know that some instantiations of this device can be
318	 * plugged into slave-only SBus slots, check to see whether this is
319	 * one such.
320	 */
321	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
322		con_log(CL_ANN, (CE_WARN,
323		    "mega%d: Device in slave-only slot, unused", instance_no));
324		return (DDI_FAILURE);
325	}
326
327	switch (cmd) {
328		case DDI_ATTACH:
329			con_log(CL_DLEVEL1, (CE_NOTE, "megasas: DDI_ATTACH"));
330			/* allocate the soft state for the instance */
331			if (ddi_soft_state_zalloc(megasas_state, instance_no)
332			    != DDI_SUCCESS) {
333				con_log(CL_ANN, (CE_WARN,
334				    "mega%d: Failed to allocate soft state",
335				    instance_no));
336
337				return (DDI_FAILURE);
338			}
339
340			instance = (struct megasas_instance *)ddi_get_soft_state
341			    (megasas_state, instance_no);
342
343			if (instance == NULL) {
344				con_log(CL_ANN, (CE_WARN,
345				    "mega%d: Bad soft state", instance_no));
346
347				ddi_soft_state_free(megasas_state, instance_no);
348
349				return (DDI_FAILURE);
350			}
351
352			bzero((caddr_t)instance,
353			    sizeof (struct megasas_instance));
354
355			instance->func_ptr = kmem_zalloc(
356			    sizeof (struct megasas_func_ptr), KM_SLEEP);
357			ASSERT(instance->func_ptr);
358
359			/* Setup the PCI configuration space handles */
360			if (pci_config_setup(dip, &instance->pci_handle) !=
361			    DDI_SUCCESS) {
362				con_log(CL_ANN, (CE_WARN,
363				    "mega%d: pci config setup failed ",
364				    instance_no));
365
366				kmem_free(instance->func_ptr,
367				    sizeof (struct megasas_func_ptr));
368				ddi_soft_state_free(megasas_state, instance_no);
369
370				return (DDI_FAILURE);
371			}
372
373			if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
374				con_log(CL_ANN, (CE_WARN,
375				    "megaraid: failed to get registers."));
376
377				pci_config_teardown(&instance->pci_handle);
378				kmem_free(instance->func_ptr,
379				    sizeof (struct megasas_func_ptr));
380				ddi_soft_state_free(megasas_state, instance_no);
381
382				return (DDI_FAILURE);
383			}
384
385			vendor_id = pci_config_get16(instance->pci_handle,
386			    PCI_CONF_VENID);
387			device_id = pci_config_get16(instance->pci_handle,
388			    PCI_CONF_DEVID);
389
390			subsysvid = pci_config_get16(instance->pci_handle,
391			    PCI_CONF_SUBVENID);
392			subsysid = pci_config_get16(instance->pci_handle,
393			    PCI_CONF_SUBSYSID);
394
395			pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
396			    (pci_config_get16(instance->pci_handle,
397			    PCI_CONF_COMM) | PCI_COMM_ME));
398			irq = pci_config_get8(instance->pci_handle,
399			    PCI_CONF_ILINE);
400
401			con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
402			    "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s\n",
403			    instance_no, vendor_id, device_id, subsysvid,
404			    subsysid, irq, MEGASAS_VERSION));
405
406			/* enable bus-mastering */
407			command = pci_config_get16(instance->pci_handle,
408			    PCI_CONF_COMM);
409
410			if (!(command & PCI_COMM_ME)) {
411				command |= PCI_COMM_ME;
412
413				pci_config_put16(instance->pci_handle,
414				    PCI_CONF_COMM, command);
415
416				con_log(CL_ANN, (CE_CONT, "megaraid%d: "
417				    "enable bus-mastering\n", instance_no));
418			} else {
419				con_log(CL_DLEVEL1, (CE_CONT, "megaraid%d: "
420				"bus-mastering already set\n", instance_no));
421			}
422
423			/* initialize function pointers */
424			if ((device_id == PCI_DEVICE_ID_LSI_1078) ||
425			    (device_id == PCI_DEVICE_ID_LSI_1078DE)) {
426				con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
427				    "1078R/DE detected\n", instance_no));
428				instance->func_ptr->read_fw_status_reg =
429				    read_fw_status_reg_ppc;
430				instance->func_ptr->issue_cmd = issue_cmd_ppc;
431				instance->func_ptr->issue_cmd_in_sync_mode =
432				    issue_cmd_in_sync_mode_ppc;
433				instance->func_ptr->issue_cmd_in_poll_mode =
434				    issue_cmd_in_poll_mode_ppc;
435				instance->func_ptr->enable_intr =
436				    enable_intr_ppc;
437				instance->func_ptr->disable_intr =
438				    disable_intr_ppc;
439				instance->func_ptr->intr_ack = intr_ack_ppc;
440			} else {
441				con_log(CL_DLEVEL1, (CE_CONT, "megasas%d: "
442				    "1064/8R detected\n", instance_no));
443				instance->func_ptr->read_fw_status_reg =
444				    read_fw_status_reg_xscale;
445				instance->func_ptr->issue_cmd =
446				    issue_cmd_xscale;
447				instance->func_ptr->issue_cmd_in_sync_mode =
448				    issue_cmd_in_sync_mode_xscale;
449				instance->func_ptr->issue_cmd_in_poll_mode =
450				    issue_cmd_in_poll_mode_xscale;
451				instance->func_ptr->enable_intr =
452				    enable_intr_xscale;
453				instance->func_ptr->disable_intr =
454				    disable_intr_xscale;
455				instance->func_ptr->intr_ack =
456				    intr_ack_xscale;
457			}
458
459			instance->baseaddress = pci_config_get32(
460			    instance->pci_handle, PCI_CONF_BASE0);
461			instance->baseaddress &= 0x0fffc;
462
463			instance->dip		= dip;
464			instance->vendor_id	= vendor_id;
465			instance->device_id	= device_id;
466			instance->subsysvid	= subsysvid;
467			instance->subsysid	= subsysid;
468
469			/* Initialize FMA */
470			instance->fm_capabilities = ddi_prop_get_int(
471			    DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
472			    "fm-capable", DDI_FM_EREPORT_CAPABLE |
473			    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
474			    | DDI_FM_ERRCB_CAPABLE);
475
476			megasas_fm_init(instance);
477
478			/* setup the mfi based low level driver */
479			if (init_mfi(instance) != DDI_SUCCESS) {
480				con_log(CL_ANN, (CE_WARN, "megaraid: "
481				"could not initialize the low level driver"));
482
483				goto fail_attach;
484			}
485
486			/*
487			 * Allocate the interrupt blocking cookie.
488			 * It represents the information the framework
489			 * needs to block interrupts. This cookie will
490			 * be used by the locks shared accross our ISR.
491			 * These locks must be initialized before we
492			 * register our ISR.
493			 * ddi_add_intr(9F)
494			 */
495			if (ddi_get_iblock_cookie(dip, 0,
496			    &instance->iblock_cookie) != DDI_SUCCESS) {
497
498				goto fail_attach;
499			}
500
501			if (ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_HIGH,
502			    &instance->soft_iblock_cookie) != DDI_SUCCESS) {
503
504				goto fail_attach;
505			}
506
507			/*
508			 * Initialize the driver mutexes common to
509			 * normal/high level isr
510			 */
511			if (ddi_intr_hilevel(dip, 0)) {
512				instance->isr_level = HIGH_LEVEL_INTR;
513				mutex_init(&instance->cmd_pool_mtx,
514				    "cmd_pool_mtx", MUTEX_DRIVER,
515				    instance->soft_iblock_cookie);
516				mutex_init(&instance->cmd_pend_mtx,
517				    "cmd_pend_mtx", MUTEX_DRIVER,
518				    instance->soft_iblock_cookie);
519			} else {
520				/*
521				 * Initialize the driver mutexes
522				 * specific to soft-isr
523				 */
524				instance->isr_level = NORMAL_LEVEL_INTR;
525				mutex_init(&instance->cmd_pool_mtx,
526				    "cmd_pool_mtx", MUTEX_DRIVER,
527				    instance->iblock_cookie);
528				mutex_init(&instance->cmd_pend_mtx,
529				    "cmd_pend_mtx", MUTEX_DRIVER,
530				    instance->iblock_cookie);
531			}
532
533			mutex_init(&instance->completed_pool_mtx,
534			    "completed_pool_mtx", MUTEX_DRIVER,
535			    instance->iblock_cookie);
536			mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
537			    MUTEX_DRIVER, instance->iblock_cookie);
538			mutex_init(&instance->aen_cmd_mtx, "aen_cmd_mtx",
539			    MUTEX_DRIVER, instance->iblock_cookie);
540			mutex_init(&instance->abort_cmd_mtx, "abort_cmd_mtx",
541			    MUTEX_DRIVER, instance->iblock_cookie);
542
543			cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
544			cv_init(&instance->abort_cmd_cv, NULL, CV_DRIVER, NULL);
545
546			INIT_LIST_HEAD(&instance->completed_pool_list);
547
548			/* Register our isr. */
549			if (ddi_add_intr(dip, 0, NULL, NULL, megasas_isr,
550			    (caddr_t)instance) != DDI_SUCCESS) {
551				con_log(CL_ANN, (CE_WARN,
552				    " ISR did not register"));
553
554				goto fail_attach;
555			}
556
557			added_isr_f = 1;
558
559			/* Register our soft-isr for highlevel interrupts. */
560			if (instance->isr_level == HIGH_LEVEL_INTR) {
561				if (ddi_add_softintr(dip, DDI_SOFTINT_HIGH,
562				    &instance->soft_intr_id, NULL, NULL,
563				    megasas_softintr, (caddr_t)instance) !=
564				    DDI_SUCCESS) {
565					con_log(CL_ANN, (CE_WARN,
566					    " Software ISR did not register"));
567
568					goto fail_attach;
569				}
570
571				added_soft_isr_f = 1;
572			}
573
574			/* Allocate a transport structure */
575			tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
576
577			if (tran == NULL) {
578				con_log(CL_ANN, (CE_WARN,
579				    "scsi_hba_tran_alloc failed"));
580				goto fail_attach;
581			}
582
583			tran_alloc_f = 1;
584
585			instance->tran = tran;
586
587			tran->tran_hba_private	= instance;
588			tran->tran_tgt_private 	= NULL;
589			tran->tran_tgt_init	= megasas_tran_tgt_init;
590			tran->tran_tgt_probe	= scsi_hba_probe;
591			tran->tran_tgt_free	= (void (*)())NULL;
592			tran->tran_init_pkt	= megasas_tran_init_pkt;
593			tran->tran_start	= megasas_tran_start;
594			tran->tran_abort	= megasas_tran_abort;
595			tran->tran_reset	= megasas_tran_reset;
596			tran->tran_bus_reset	= megasas_tran_bus_reset;
597			tran->tran_getcap	= megasas_tran_getcap;
598			tran->tran_setcap	= megasas_tran_setcap;
599			tran->tran_destroy_pkt	= megasas_tran_destroy_pkt;
600			tran->tran_dmafree	= megasas_tran_dmafree;
601			tran->tran_sync_pkt	= megasas_tran_sync_pkt;
602			tran->tran_reset_notify	= NULL;
603			tran->tran_quiesce	= megasas_tran_quiesce;
604			tran->tran_unquiesce	= megasas_tran_unquiesce;
605
606			tran_dma_attr = megasas_generic_dma_attr;
607			tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
608
609			/* Attach this instance of the hba */
610			if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
611			    != DDI_SUCCESS) {
612				con_log(CL_ANN, (CE_WARN,
613				    "scsi_hba_attach failed\n"));
614
615				goto fail_attach;
616			}
617
618			/* create devctl node for cfgadm command */
619			if (ddi_create_minor_node(dip, "devctl",
620			    S_IFCHR, INST2DEVCTL(instance_no),
621			    DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
622				con_log(CL_ANN, (CE_WARN,
623				    "megaraid: failed to create devctl node."));
624
625				goto fail_attach;
626			}
627
628			create_devctl_node_f = 1;
629
630			/* create scsi node for cfgadm command */
631			if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
632			    INST2SCSI(instance_no),
633			    DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
634			    DDI_FAILURE) {
635				con_log(CL_ANN, (CE_WARN,
636				    "megaraid: failed to create scsi node."));
637
638				goto fail_attach;
639			}
640
641			create_scsi_node_f = 1;
642
643			(void) sprintf(instance->iocnode, "%d:lsirdctl",
644			    instance_no);
645
646			/*
647			 * Create a node for applications
648			 * for issuing ioctl to the driver.
649			 */
650			if (ddi_create_minor_node(dip, instance->iocnode,
651			    S_IFCHR, INST2LSIRDCTL(instance_no),
652			    DDI_PSEUDO, 0) == DDI_FAILURE) {
653				con_log(CL_ANN, (CE_WARN,
654				    "megaraid: failed to create ioctl node."));
655
656				goto fail_attach;
657			}
658
659			create_ioc_node_f = 1;
660
661			/* enable interrupt */
662			instance->func_ptr->enable_intr(instance);
663
664			/* initiate AEN */
665			if (start_mfi_aen(instance)) {
666				con_log(CL_ANN, (CE_WARN,
667				    "megaraid: failed to initiate AEN."));
668				goto fail_initiate_aen;
669			}
670
671			con_log(CL_DLEVEL1, (CE_NOTE,
672			    "AEN started for instance %d.", instance_no));
673
674			/* Finally! We are on the air.  */
675			ddi_report_dev(dip);
676
677			if (megasas_check_acc_handle(instance->regmap_handle) !=
678			    DDI_SUCCESS) {
679				goto fail_attach;
680			}
681			if (megasas_check_acc_handle(instance->pci_handle) !=
682			    DDI_SUCCESS) {
683				goto fail_attach;
684			}
685			break;
686		case DDI_PM_RESUME:
687			con_log(CL_ANN, (CE_NOTE,
688			    "megasas: DDI_PM_RESUME"));
689			break;
690		case DDI_RESUME:
691			con_log(CL_ANN, (CE_NOTE,
692			    "megasas: DDI_RESUME"));
693			break;
694		default:
695			con_log(CL_ANN, (CE_WARN,
696			    "megasas: invalid attach cmd=%x", cmd));
697			return (DDI_FAILURE);
698	}
699
700	return (DDI_SUCCESS);
701
702fail_initiate_aen:
703fail_attach:
704	if (create_devctl_node_f) {
705		ddi_remove_minor_node(dip, "devctl");
706	}
707
708	if (create_scsi_node_f) {
709		ddi_remove_minor_node(dip, "scsi");
710	}
711
712	if (create_ioc_node_f) {
713		ddi_remove_minor_node(dip, instance->iocnode);
714	}
715
716	if (tran_alloc_f) {
717		scsi_hba_tran_free(tran);
718	}
719
720
721	if (added_soft_isr_f) {
722		ddi_remove_softintr(instance->soft_intr_id);
723	}
724
725	if (added_isr_f) {
726		ddi_remove_intr(dip, 0, instance->iblock_cookie);
727	}
728
729	megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
730	ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
731
732	megasas_fm_fini(instance);
733
734	pci_config_teardown(&instance->pci_handle);
735
736	ddi_soft_state_free(megasas_state, instance_no);
737
738	con_log(CL_ANN, (CE_NOTE,
739	    "megasas: return failure from mega_attach\n"));
740
741	return (DDI_FAILURE);
742}
743
744/*
745 * getinfo - gets device information
746 * @dip:
747 * @cmd:
748 * @arg:
749 * @resultp:
750 *
751 * The system calls getinfo() to obtain configuration information that only
752 * the driver knows. The mapping of minor numbers to device instance is
753 * entirely under the control of the driver. The system sometimes needs to ask
754 * the driver which device a particular dev_t represents.
755 * Given the device number return the devinfo pointer from the scsi_device
756 * structure.
757 */
758/*ARGSUSED*/
759static int
760megasas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,  void *arg, void **resultp)
761{
762	int	rval;
763	int	megasas_minor = getminor((dev_t)arg);
764
765	struct megasas_instance	*instance;
766
767	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
768
769	switch (cmd) {
770		case DDI_INFO_DEVT2DEVINFO:
771			instance = (struct megasas_instance *)
772			    ddi_get_soft_state(megasas_state,
773			    MINOR2INST(megasas_minor));
774
775			if (instance == NULL) {
776				*resultp = NULL;
777				rval = DDI_FAILURE;
778			} else {
779				*resultp = instance->dip;
780				rval = DDI_SUCCESS;
781			}
782			break;
783		case DDI_INFO_DEVT2INSTANCE:
784			*resultp = (void *)instance;
785			rval = DDI_SUCCESS;
786			break;
787		default:
788			*resultp = NULL;
789			rval = DDI_FAILURE;
790	}
791
792	return (rval);
793}
794
795/*
796 * detach - detaches a device from the system
797 * @dip: pointer to the device's dev_info structure
798 * @cmd: type of detach
799 *
800 * A driver's detach() entry point is called to detach an instance of a device
801 * that is bound to the driver. The entry point is called with the instance of
802 * the device node to be detached and with DDI_DETACH, which is specified as
803 * the cmd argument to the entry point.
804 * This routine is called during driver unload. We free all the allocated
805 * resources and call the corresponding LLD so that it can also release all
806 * its resources.
807 */
808static int
809megasas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
810{
811	int	instance_no;
812
813	struct megasas_instance	*instance;
814
815	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
816
817	/* CONSTCOND */
818	ASSERT(NO_COMPETING_THREADS);
819
820	instance_no = ddi_get_instance(dip);
821
822	instance = (struct megasas_instance *)ddi_get_soft_state(megasas_state,
823	    instance_no);
824
825	if (!instance) {
826		con_log(CL_ANN, (CE_WARN,
827		    "megasas:%d could not get instance in detach",
828		    instance_no));
829
830		return (DDI_FAILURE);
831	}
832
833	con_log(CL_ANN, (CE_NOTE,
834	    "megasas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x\n",
835	    instance_no, instance->vendor_id, instance->device_id,
836	    instance->subsysvid, instance->subsysid));
837
838	switch (cmd) {
839		case DDI_DETACH:
840			con_log(CL_ANN, (CE_NOTE,
841			    "megasas_detach: DDI_DETACH\n"));
842
843			if (scsi_hba_detach(dip) != DDI_SUCCESS) {
844				con_log(CL_ANN, (CE_WARN,
845				    "megasas:%d failed to detach",
846				    instance_no));
847
848				return (DDI_FAILURE);
849			}
850
851			scsi_hba_tran_free(instance->tran);
852
853			if (abort_aen_cmd(instance, instance->aen_cmd)) {
854				con_log(CL_ANN, (CE_WARN, "megasas_detach: "
855				    "failed to abort prevous AEN command\n"));
856
857				return (DDI_FAILURE);
858			}
859
860			instance->func_ptr->disable_intr(instance);
861
862			if (instance->isr_level == HIGH_LEVEL_INTR) {
863				ddi_remove_softintr(instance->soft_intr_id);
864			}
865
866			ddi_remove_intr(dip, 0, instance->iblock_cookie);
867
868			free_space_for_mfi(instance);
869
870			megasas_fm_fini(instance);
871
872			pci_config_teardown(&instance->pci_handle);
873
874			kmem_free(instance->func_ptr,
875			    sizeof (struct megasas_func_ptr));
876
877			ddi_soft_state_free(megasas_state, instance_no);
878			break;
879		case DDI_PM_SUSPEND:
880			con_log(CL_ANN, (CE_NOTE,
881			    "megasas_detach: DDI_PM_SUSPEND\n"));
882
883			break;
884		case DDI_SUSPEND:
885			con_log(CL_ANN, (CE_NOTE,
886			    "megasas_detach: DDI_SUSPEND\n"));
887
888			break;
889		default:
890			con_log(CL_ANN, (CE_WARN,
891			    "invalid detach command:0x%x", cmd));
892			return (DDI_FAILURE);
893	}
894
895	return (DDI_SUCCESS);
896}
897
898/*
899 * ************************************************************************** *
900 *                                                                            *
901 *             common entry points - for character driver types               *
902 *                                                                            *
903 * ************************************************************************** *
904 */
905/*
906 * open - gets access to a device
907 * @dev:
908 * @openflags:
909 * @otyp:
910 * @credp:
911 *
912 * Access to a device by one or more application programs is controlled
913 * through the open() and close() entry points. The primary function of
914 * open() is to verify that the open request is allowed.
915 */
916static  int
917megasas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
918{
919	int	rval = 0;
920
921	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
922
923	/* Check root permissions */
924	if (drv_priv(credp) != 0) {
925		con_log(CL_ANN, (CE_WARN,
926		    "megaraid: Non-root ioctl access tried!"));
927		return (EPERM);
928	}
929
930	/* Verify we are being opened as a character device */
931	if (otyp != OTYP_CHR) {
932		con_log(CL_ANN, (CE_WARN,
933		    "megaraid: ioctl node must be a char node\n"));
934		return (EINVAL);
935	}
936
937	if (ddi_get_soft_state(megasas_state, MINOR2INST(getminor(*dev)))
938	    == NULL) {
939		return (ENXIO);
940	}
941
942	if (scsi_hba_open) {
943		rval = scsi_hba_open(dev, openflags, otyp, credp);
944	}
945
946	return (rval);
947}
948
949/*
950 * close - gives up access to a device
951 * @dev:
952 * @openflags:
953 * @otyp:
954 * @credp:
955 *
956 * close() should perform any cleanup necessary to finish using the minor
957 * device, and prepare the device (and driver) to be opened again.
958 */
959static  int
960megasas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
961{
962	int	rval = 0;
963
964	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
965
966	/* no need for locks! */
967
968	if (scsi_hba_close) {
969		rval = scsi_hba_close(dev, openflags, otyp, credp);
970	}
971
972	return (rval);
973}
974
975/*
976 * ioctl - performs a range of I/O commands for character drivers
977 * @dev:
978 * @cmd:
979 * @arg:
980 * @mode:
981 * @credp:
982 * @rvalp:
983 *
984 * ioctl() routine must make sure that user data is copied into or out of the
985 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
986 * and ddi_copyout(), as appropriate.
987 * This is a wrapper routine to serialize access to the actual ioctl routine.
988 * ioctl() should return 0 on success, or the appropriate error number. The
989 * driver may also set the value returned to the calling process through rvalp.
990 */
991static int
992megasas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
993    int *rvalp)
994{
995	int	rval = 0;
996
997	struct megasas_instance	*instance;
998	struct megasas_ioctl	ioctl;
999	struct megasas_aen	aen;
1000
1001	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1002
1003	instance = ddi_get_soft_state(megasas_state, MINOR2INST(getminor(dev)));
1004
1005	if (instance == NULL) {
1006		/* invalid minor number */
1007		con_log(CL_ANN, (CE_WARN, "megaraid: adapter not found."));
1008		return (ENXIO);
1009	}
1010
1011	switch ((uint_t)cmd) {
1012		case MEGASAS_IOCTL_FIRMWARE:
1013			if (ddi_copyin((void *) arg, &ioctl,
1014			    sizeof (struct megasas_ioctl), mode)) {
1015				con_log(CL_ANN, (CE_WARN, "megasas_ioctl: "
1016				    "ERROR IOCTL copyin"));
1017				return (EFAULT);
1018			}
1019
1020			if (ioctl.control_code == MR_DRIVER_IOCTL_COMMON) {
1021				rval = handle_drv_ioctl(instance, &ioctl, mode);
1022			} else {
1023				rval = handle_mfi_ioctl(instance, &ioctl, mode);
1024			}
1025
1026			if (ddi_copyout((void *) &ioctl, (void *)arg,
1027			    (sizeof (struct megasas_ioctl) - 1), mode)) {
1028				con_log(CL_ANN, (CE_WARN,
1029				    "megasas_ioctl: copy_to_user failed\n"));
1030				rval = 1;
1031			}
1032
1033			break;
1034		case MEGASAS_IOCTL_AEN:
1035			if (ddi_copyin((void *) arg, &aen,
1036			    sizeof (struct megasas_aen), mode)) {
1037				con_log(CL_ANN, (CE_WARN,
1038				    "megasas_ioctl: ERROR AEN copyin"));
1039				return (EFAULT);
1040			}
1041
1042			rval = handle_mfi_aen(instance, &aen);
1043
1044			if (ddi_copyout((void *) &aen, (void *)arg,
1045			    sizeof (struct megasas_aen), mode)) {
1046				con_log(CL_ANN, (CE_WARN,
1047				    "megasas_ioctl: copy_to_user failed\n"));
1048				rval = 1;
1049			}
1050
1051			break;
1052		default:
1053			rval = scsi_hba_ioctl(dev, cmd, arg,
1054			    mode, credp, rvalp);
1055
1056			con_log(CL_DLEVEL1, (CE_NOTE, "megasas_ioctl: "
1057			    "scsi_hba_ioctl called, ret = %x.", rval));
1058	}
1059
1060	return (rval);
1061}
1062
1063/*
1064 * ************************************************************************** *
1065 *                                                                            *
1066 *               common entry points - for block driver types                 *
1067 *                                                                            *
1068 * ************************************************************************** *
1069 */
1070/*
1071 * reset - TBD
1072 * @dip:
1073 * @cmd:
1074 *
1075 * TBD
1076 */
1077/*ARGSUSED*/
1078static int
1079megasas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1080{
1081	int	instance_no;
1082
1083	struct megasas_instance	*instance;
1084
1085	instance_no = ddi_get_instance(dip);
1086	instance = (struct megasas_instance *)ddi_get_soft_state
1087	    (megasas_state, instance_no);
1088
1089	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1090
1091	if (!instance) {
1092		con_log(CL_ANN, (CE_WARN,
1093		    "megaraid:%d could not get adapter in reset",
1094		    instance_no));
1095		return (DDI_FAILURE);
1096	}
1097
1098	con_log(CL_ANN, (CE_NOTE, "flushing cache for instance %d ..",
1099	    instance_no));
1100
1101	flush_cache(instance);
1102
1103	return (DDI_SUCCESS);
1104}
1105
1106
1107/*
1108 * ************************************************************************** *
1109 *                                                                            *
1110 *                          entry points (SCSI HBA)                           *
1111 *                                                                            *
1112 * ************************************************************************** *
1113 */
1114/*
1115 * tran_tgt_init - initialize a target device instance
1116 * @hba_dip:
1117 * @tgt_dip:
1118 * @tran:
1119 * @sd:
1120 *
1121 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1122 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1123 * the device's address as valid and supportable for that particular HBA.
1124 * By returning DDI_FAILURE, the instance of the target driver for that device
1125 * is not probed or attached.
1126 */
1127/*ARGSUSED*/
1128static int
1129megasas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1130		scsi_hba_tran_t *tran, struct scsi_device *sd)
1131{
1132	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1133
1134	return (DDI_SUCCESS);
1135}
1136
1137/*
1138 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1139 * @ap:
1140 * @pkt:
1141 * @bp:
1142 * @cmdlen:
1143 * @statuslen:
1144 * @tgtlen:
1145 * @flags:
1146 * @callback:
1147 *
1148 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1149 * structure and DMA resources for a target driver request. The
1150 * tran_init_pkt() entry point is called when the target driver calls the
1151 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1152 * is a request to perform one or more of three possible services:
1153 *  - allocation and initialization of a scsi_pkt structure
1154 *  - allocation of DMA resources for data transfer
1155 *  - reallocation of DMA resources for the next portion of the data transfer
1156 */
1157static struct scsi_pkt *
1158megasas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1159	struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1160	int flags, int (*callback)(), caddr_t arg)
1161{
1162	struct scsa_cmd	*acmd;
1163	struct megasas_instance	*instance;
1164	struct scsi_pkt	*new_pkt;
1165
1166	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1167
1168	instance = ADDR2MEGA(ap);
1169
1170	/* step #1 : pkt allocation */
1171	if (pkt == NULL) {
1172		pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1173		    tgtlen, sizeof (struct scsa_cmd), callback, arg);
1174		if (pkt == NULL) {
1175			return (NULL);
1176		}
1177
1178		acmd = PKT2CMD(pkt);
1179
1180		/*
1181		 * Initialize the new pkt - we redundantly initialize
1182		 * all the fields for illustrative purposes.
1183		 */
1184		acmd->cmd_pkt		= pkt;
1185		acmd->cmd_flags		= 0;
1186		acmd->cmd_scblen	= statuslen;
1187		acmd->cmd_cdblen	= cmdlen;
1188		acmd->cmd_dmahandle	= NULL;
1189		acmd->cmd_ncookies	= 0;
1190		acmd->cmd_cookie	= 0;
1191		acmd->cmd_cookiecnt	= 0;
1192		acmd->cmd_nwin		= 0;
1193
1194		pkt->pkt_address	= *ap;
1195		pkt->pkt_comp		= (void (*)())NULL;
1196		pkt->pkt_flags		= 0;
1197		pkt->pkt_time		= 0;
1198		pkt->pkt_resid		= 0;
1199		pkt->pkt_state		= 0;
1200		pkt->pkt_statistics	= 0;
1201		pkt->pkt_reason		= 0;
1202		new_pkt			= pkt;
1203	} else {
1204		acmd = PKT2CMD(pkt);
1205		new_pkt = NULL;
1206	}
1207
1208	/* step #2 : dma allocation/move */
1209	if (bp && bp->b_bcount != 0) {
1210		if (acmd->cmd_dmahandle == NULL) {
1211			if (megasas_dma_alloc(instance, pkt, bp, flags,
1212			    callback) == -1) {
1213				if (new_pkt) {
1214					scsi_hba_pkt_free(ap, new_pkt);
1215				}
1216
1217				return ((struct scsi_pkt *)NULL);
1218			}
1219		} else {
1220			if (megasas_dma_move(instance, pkt, bp) == -1) {
1221				return ((struct scsi_pkt *)NULL);
1222			}
1223		}
1224	}
1225
1226	return (pkt);
1227}
1228
1229/*
1230 * tran_start - transport a SCSI command to the addressed target
1231 * @ap:
1232 * @pkt:
1233 *
1234 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1235 * SCSI command to the addressed target. The SCSI command is described
1236 * entirely within the scsi_pkt structure, which the target driver allocated
1237 * through the HBA driver's tran_init_pkt() entry point. If the command
1238 * involves a data transfer, DMA resources must also have been allocated for
1239 * the scsi_pkt structure.
1240 *
1241 * Return Values :
1242 *	TRAN_BUSY - request queue is full, no more free scbs
1243 *	TRAN_ACCEPT - pkt has been submitted to the instance
1244 */
1245static int
1246megasas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1247{
1248	uchar_t 	cmd_done = 0;
1249
1250	struct megasas_instance	*instance = ADDR2MEGA(ap);
1251	struct megasas_cmd	*cmd;
1252
1253	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d:SCSI CDB[0]=0x%x",
1254	    __func__, __LINE__, pkt->pkt_cdbp[0]));
1255
1256	pkt->pkt_reason	= CMD_CMPLT;
1257	*pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1258
1259	cmd = build_cmd(instance, ap, pkt, &cmd_done);
1260
1261	/*
1262	 * Check if the command is already completed by the mega_build_cmd()
1263	 * routine. In which case the busy_flag would be clear and scb will be
1264	 * NULL and appropriate reason provided in pkt_reason field
1265	 */
1266	if (cmd_done) {
1267		if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1268			scsi_hba_pkt_comp(pkt);
1269		}
1270		pkt->pkt_reason = CMD_CMPLT;
1271		pkt->pkt_scbp[0] = STATUS_GOOD;
1272		pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1273		    | STATE_SENT_CMD;
1274		return (TRAN_ACCEPT);
1275	}
1276
1277	if (cmd == NULL) {
1278		return (TRAN_BUSY);
1279	}
1280
1281	if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1282		if (instance->fw_outstanding > instance->max_fw_cmds) {
1283			con_log(CL_ANN, (CE_CONT, "megasas:Firmware busy"));
1284			return_mfi_pkt(instance, cmd);
1285			return (TRAN_BUSY);
1286		}
1287
1288		/* Syncronize the Cmd frame for the controller */
1289		(void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1290		    DDI_DMA_SYNC_FORDEV);
1291
1292		instance->func_ptr->issue_cmd(cmd, instance);
1293
1294	} else {
1295		struct megasas_header *hdr = &cmd->frame->hdr;
1296
1297		cmd->sync_cmd = MEGASAS_TRUE;
1298
1299		instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd);
1300
1301		pkt->pkt_reason		= CMD_CMPLT;
1302		pkt->pkt_statistics	= 0;
1303		pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1304
1305		switch (hdr->cmd_status) {
1306		case MFI_STAT_OK:
1307			pkt->pkt_scbp[0] = STATUS_GOOD;
1308			break;
1309
1310		case MFI_STAT_SCSI_DONE_WITH_ERROR:
1311
1312			pkt->pkt_reason	= CMD_CMPLT;
1313			pkt->pkt_statistics = 0;
1314
1315			((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1316			break;
1317
1318		case MFI_STAT_DEVICE_NOT_FOUND:
1319			pkt->pkt_reason		= CMD_DEV_GONE;
1320			pkt->pkt_statistics	= STAT_DISCON;
1321			break;
1322
1323		default:
1324			((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1325		}
1326
1327		return_mfi_pkt(instance, cmd);
1328		(void) megasas_common_check(instance, cmd);
1329
1330		scsi_hba_pkt_comp(pkt);
1331
1332	}
1333
1334	return (TRAN_ACCEPT);
1335}
1336
1337/*
1338 * tran_abort - Abort any commands that are currently in transport
1339 * @ap:
1340 * @pkt:
1341 *
1342 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1343 * commands that are currently in transport for a particular target. This entry
1344 * point is called when a target driver calls scsi_abort(). The tran_abort()
1345 * entry point should attempt to abort the command denoted by the pkt
1346 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1347 * abort all outstanding commands in the transport layer for the particular
1348 * target or logical unit.
1349 */
1350/*ARGSUSED*/
1351static int
1352megasas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1353{
1354	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1355
1356	/* aborting command not supported by H/W */
1357
1358	return (DDI_FAILURE);
1359}
1360
1361/*
1362 * tran_reset - reset either the SCSI bus or target
1363 * @ap:
1364 * @level:
1365 *
1366 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
1367 * the SCSI bus or a particular SCSI target device. This entry point is called
1368 * when a target driver calls scsi_reset(). The tran_reset() entry point must
1369 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
1370 * particular target or logical unit must be reset.
1371 */
1372/*ARGSUSED*/
1373static int
1374megasas_tran_reset(struct scsi_address *ap, int level)
1375{
1376	struct megasas_instance *instance = ADDR2MEGA(ap);
1377
1378	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1379
1380	if (wait_for_outstanding(instance)) {
1381		return (DDI_FAILURE);
1382	} else {
1383		return (DDI_SUCCESS);
1384	}
1385}
1386
1387/*
1388 * tran_bus_reset - reset the SCSI bus
1389 * @dip:
1390 * @level:
1391 *
1392 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
1393 * initialized during the HBA driver's attach(). The vector should point to
1394 * an HBA entry point that is to be called when a user initiates a bus reset.
1395 * Implementation is hardware specific. If the HBA driver cannot reset the
1396 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
1397 * or not initialize this vector.
1398 */
1399/*ARGSUSED*/
1400static int
1401megasas_tran_bus_reset(dev_info_t *dip, int level)
1402{
1403	int	instance_no = ddi_get_instance(dip);
1404
1405	struct megasas_instance	*instance = ddi_get_soft_state(megasas_state,
1406	    instance_no);
1407
1408	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1409
1410	if (wait_for_outstanding(instance)) {
1411		return (DDI_FAILURE);
1412	} else {
1413		return (DDI_SUCCESS);
1414	}
1415}
1416
1417/*
1418 * tran_getcap - get one of a set of SCSA-defined capabilities
1419 * @ap:
1420 * @cap:
1421 * @whom:
1422 *
1423 * The target driver can request the current setting of the capability for a
1424 * particular target by setting the whom parameter to nonzero. A whom value of
1425 * zero indicates a request for the current setting of the general capability
1426 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
1427 * for undefined capabilities or the current value of the requested capability.
1428 */
1429/*ARGSUSED*/
1430static int
1431megasas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1432{
1433	int	rval = 0;
1434
1435	struct megasas_instance	*instance = ADDR2MEGA(ap);
1436
1437	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1438
1439	/* we do allow inquiring about capabilities for other targets */
1440	if (cap == NULL) {
1441		return (-1);
1442	}
1443
1444	switch (scsi_hba_lookup_capstr(cap)) {
1445		case SCSI_CAP_DMA_MAX:
1446			/* Limit to 16MB max transfer */
1447			rval = megasas_max_cap_maxxfer;
1448			break;
1449		case SCSI_CAP_MSG_OUT:
1450			rval = 1;
1451			break;
1452		case SCSI_CAP_DISCONNECT:
1453			rval = 0;
1454			break;
1455		case SCSI_CAP_SYNCHRONOUS:
1456			rval = 0;
1457			break;
1458		case SCSI_CAP_WIDE_XFER:
1459			rval = 1;
1460			break;
1461		case SCSI_CAP_TAGGED_QING:
1462			rval = 1;
1463			break;
1464		case SCSI_CAP_UNTAGGED_QING:
1465			rval = 1;
1466			break;
1467		case SCSI_CAP_PARITY:
1468			rval = 1;
1469			break;
1470		case SCSI_CAP_INITIATOR_ID:
1471			rval = instance->init_id;
1472			break;
1473		case SCSI_CAP_ARQ:
1474			rval = 1;
1475			break;
1476		case SCSI_CAP_LINKED_CMDS:
1477			rval = 0;
1478			break;
1479		case SCSI_CAP_RESET_NOTIFICATION:
1480			rval = 1;
1481			break;
1482		case SCSI_CAP_GEOMETRY:
1483			rval = -1;
1484
1485			break;
1486		default:
1487			con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
1488			    scsi_hba_lookup_capstr(cap)));
1489			rval = -1;
1490			break;
1491	}
1492
1493	return (rval);
1494}
1495
1496/*
1497 * tran_setcap - set one of a set of SCSA-defined capabilities
1498 * @ap:
1499 * @cap:
1500 * @value:
1501 * @whom:
1502 *
1503 * The target driver might request that the new value be set for a particular
1504 * target by setting the whom parameter to nonzero. A whom value of zero
1505 * means that request is to set the new value for the SCSI bus or for adapter
1506 * hardware in general.
1507 * The tran_setcap() should return the following values as appropriate:
1508 * - -1 for undefined capabilities
1509 * - 0 if the HBA driver cannot set the capability to the requested value
1510 * - 1 if the HBA driver is able to set the capability to the requested value
1511 */
1512/*ARGSUSED*/
1513static int
1514megasas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
1515{
1516	int		rval = 1;
1517
1518	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1519
1520	/* We don't allow setting capabilities for other targets */
1521	if (cap == NULL || whom == 0) {
1522		return (-1);
1523	}
1524
1525	switch (scsi_hba_lookup_capstr(cap)) {
1526		case SCSI_CAP_DMA_MAX:
1527		case SCSI_CAP_MSG_OUT:
1528		case SCSI_CAP_PARITY:
1529		case SCSI_CAP_LINKED_CMDS:
1530		case SCSI_CAP_RESET_NOTIFICATION:
1531		case SCSI_CAP_DISCONNECT:
1532		case SCSI_CAP_SYNCHRONOUS:
1533		case SCSI_CAP_UNTAGGED_QING:
1534		case SCSI_CAP_WIDE_XFER:
1535		case SCSI_CAP_INITIATOR_ID:
1536		case SCSI_CAP_ARQ:
1537			/*
1538			 * None of these are settable via
1539			 * the capability interface.
1540			 */
1541			break;
1542		case SCSI_CAP_TAGGED_QING:
1543			rval = 1;
1544			break;
1545		case SCSI_CAP_SECTOR_SIZE:
1546			rval = 1;
1547			break;
1548
1549		case SCSI_CAP_TOTAL_SECTORS:
1550			rval = 1;
1551			break;
1552		default:
1553			rval = -1;
1554			break;
1555	}
1556
1557	return (rval);
1558}
1559
1560/*
1561 * tran_destroy_pkt - deallocate scsi_pkt structure
1562 * @ap:
1563 * @pkt:
1564 *
1565 * The tran_destroy_pkt() entry point is the HBA driver function that
1566 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
1567 * called when the target driver calls scsi_destroy_pkt(). The
1568 * tran_destroy_pkt() entry point must free any DMA resources that have been
1569 * allocated for the packet. An implicit DMA synchronization occurs if the
1570 * DMA resources are freed and any cached data remains after the completion
1571 * of the transfer.
1572 */
1573static void
1574megasas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1575{
1576	struct scsa_cmd *acmd = PKT2CMD(pkt);
1577
1578	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1579
1580	if (acmd->cmd_flags & CFLAG_DMAVALID) {
1581		acmd->cmd_flags &= ~CFLAG_DMAVALID;
1582
1583		(void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1584
1585		ddi_dma_free_handle(&acmd->cmd_dmahandle);
1586
1587		acmd->cmd_dmahandle = NULL;
1588	}
1589
1590	/* free the pkt */
1591	scsi_hba_pkt_free(ap, pkt);
1592}
1593
1594/*
1595 * tran_dmafree - deallocates DMA resources
1596 * @ap:
1597 * @pkt:
1598 *
1599 * The tran_dmafree() entry point deallocates DMAQ resources that have been
1600 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
1601 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
1602 * free only DMA resources allocated for a scsi_pkt structure, not the
1603 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
1604 * implicitly performed.
1605 */
1606/*ARGSUSED*/
1607static void
1608megasas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1609{
1610	register struct scsa_cmd *acmd = PKT2CMD(pkt);
1611
1612	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1613
1614	if (acmd->cmd_flags & CFLAG_DMAVALID) {
1615		acmd->cmd_flags &= ~CFLAG_DMAVALID;
1616
1617		(void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
1618
1619		ddi_dma_free_handle(&acmd->cmd_dmahandle);
1620
1621		acmd->cmd_dmahandle = NULL;
1622	}
1623}
1624
1625/*
1626 * tran_sync_pkt - synchronize the DMA object allocated
1627 * @ap:
1628 * @pkt:
1629 *
1630 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
1631 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
1632 * entry point is called when the target driver calls scsi_sync_pkt(). If the
1633 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
1634 * must synchronize the CPU's view of the data. If the data transfer direction
1635 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
1636 * device's view of the data.
1637 */
1638/*ARGSUSED*/
1639static void
1640megasas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1641{
1642	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1643
1644	/*
1645	 * following 'ddi_dma_sync()' API call
1646	 * already called for each I/O in the ISR
1647	 */
1648#if 0
1649	int	i;
1650
1651	register struct scsa_cmd	*acmd = PKT2CMD(pkt);
1652
1653	if (acmd->cmd_flags & CFLAG_DMAVALID) {
1654		(void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
1655		    acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
1656		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1657	}
1658#endif
1659}
1660
1661/*ARGSUSED*/
1662static int
1663megasas_tran_quiesce(dev_info_t *dip)
1664{
1665	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1666
1667	return (1);
1668}
1669
1670/*ARGSUSED*/
1671static int
1672megasas_tran_unquiesce(dev_info_t *dip)
1673{
1674	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1675
1676	return (1);
1677}
1678
1679/*
1680 * megasas_isr(caddr_t)
1681 *
1682 * The Interrupt Service Routine
1683 *
1684 * Collect status for all completed commands and do callback
1685 *
1686 */
1687static uint_t
1688megasas_isr(struct megasas_instance *instance)
1689{
1690	int		need_softintr;
1691	uint32_t	producer;
1692	uint32_t	consumer;
1693	uint32_t	context;
1694
1695	struct megasas_cmd	*cmd;
1696
1697	con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1698
1699	ASSERT(instance);
1700	if (!instance->func_ptr->intr_ack(instance)) {
1701		return (DDI_INTR_UNCLAIMED);
1702	}
1703
1704	(void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1705	    0, 0, DDI_DMA_SYNC_FORCPU);
1706
1707	if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
1708	    != DDI_SUCCESS) {
1709		megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
1710		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
1711		return (DDI_INTR_UNCLAIMED);
1712	}
1713
1714	producer = *instance->producer;
1715	consumer = *instance->consumer;
1716
1717	con_log(CL_ANN1, (CE_CONT, " producer %x consumer %x ",
1718	    producer, consumer));
1719
1720	mutex_enter(&instance->completed_pool_mtx);
1721
1722	while (consumer != producer) {
1723		context = instance->reply_queue[consumer];
1724		cmd = instance->cmd_list[context];
1725		mlist_add_tail(&cmd->list, &instance->completed_pool_list);
1726
1727		consumer++;
1728		if (consumer == (instance->max_fw_cmds + 1)) {
1729			consumer = 0;
1730		}
1731	}
1732
1733	mutex_exit(&instance->completed_pool_mtx);
1734
1735	*instance->consumer = consumer;
1736	(void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
1737	    0, 0, DDI_DMA_SYNC_FORDEV);
1738
1739	if (instance->softint_running) {
1740		need_softintr = 0;
1741	} else {
1742		need_softintr = 1;
1743	}
1744
1745	if (instance->isr_level == HIGH_LEVEL_INTR) {
1746		if (need_softintr) {
1747			ddi_trigger_softintr(instance->soft_intr_id);
1748		}
1749	} else {
1750		/*
1751		 * Not a high-level interrupt, therefore call the soft level
1752		 * interrupt explicitly
1753		 */
1754		(void) megasas_softintr(instance);
1755	}
1756
1757	return (DDI_INTR_CLAIMED);
1758}
1759
1760
1761/*
1762 * ************************************************************************** *
1763 *                                                                            *
1764 *                                  libraries                                 *
1765 *                                                                            *
1766 * ************************************************************************** *
1767 */
1768/*
1769 * get_mfi_pkt : Get a command from the free pool
1770 */
1771static struct megasas_cmd *
1772get_mfi_pkt(struct megasas_instance *instance)
1773{
1774	mlist_t 		*head = &instance->cmd_pool_list;
1775	struct megasas_cmd	*cmd = NULL;
1776
1777	mutex_enter(&instance->cmd_pool_mtx);
1778	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1779
1780	if (!mlist_empty(head)) {
1781		cmd = mlist_entry(head->next, struct megasas_cmd, list);
1782		mlist_del_init(head->next);
1783	}
1784	if (cmd != NULL)
1785		cmd->pkt = NULL;
1786	mutex_exit(&instance->cmd_pool_mtx);
1787
1788	return (cmd);
1789}
1790
1791/*
1792 * return_mfi_pkt : Return a cmd to free command pool
1793 */
1794static void
1795return_mfi_pkt(struct megasas_instance *instance, struct megasas_cmd *cmd)
1796{
1797	mutex_enter(&instance->cmd_pool_mtx);
1798	ASSERT(mutex_owned(&instance->cmd_pool_mtx));
1799
1800	mlist_add(&cmd->list, &instance->cmd_pool_list);
1801
1802	mutex_exit(&instance->cmd_pool_mtx);
1803}
1804
1805/*
1806 * destroy_mfi_frame_pool
1807 */
1808static void
1809destroy_mfi_frame_pool(struct megasas_instance *instance)
1810{
1811	int		i;
1812	uint32_t	max_cmd = instance->max_fw_cmds;
1813
1814	struct megasas_cmd	*cmd;
1815
1816	/* return all frames to pool */
1817	for (i = 0; i < max_cmd; i++) {
1818
1819		cmd = instance->cmd_list[i];
1820
1821		if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
1822			(void) mega_free_dma_obj(instance, cmd->frame_dma_obj);
1823
1824		cmd->frame_dma_obj_status  = DMA_OBJ_FREED;
1825	}
1826
1827}
1828
1829/*
1830 * create_mfi_frame_pool
1831 */
1832static int
1833create_mfi_frame_pool(struct megasas_instance *instance)
1834{
1835	int		i = 0;
1836	int		cookie_cnt;
1837	uint16_t	max_cmd;
1838	uint16_t	sge_sz;
1839	uint32_t	sgl_sz;
1840	uint32_t	tot_frame_size;
1841
1842	struct megasas_cmd	*cmd;
1843
1844	max_cmd = instance->max_fw_cmds;
1845
1846	sge_sz	= sizeof (struct megasas_sge64);
1847
1848	/* calculated the number of 64byte frames required for SGL */
1849	sgl_sz		= sge_sz * instance->max_num_sge;
1850	tot_frame_size	= sgl_sz + MEGAMFI_FRAME_SIZE + SENSE_LENGTH;
1851
1852	con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
1853	    "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
1854
1855	while (i < max_cmd) {
1856		cmd = instance->cmd_list[i];
1857
1858		cmd->frame_dma_obj.size	= tot_frame_size;
1859		cmd->frame_dma_obj.dma_attr = megasas_generic_dma_attr;
1860		cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1861		cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1862		cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
1863		cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
1864
1865
1866		cookie_cnt = mega_alloc_dma_obj(instance, &cmd->frame_dma_obj);
1867
1868		if (cookie_cnt == -1 || cookie_cnt > 1) {
1869			con_log(CL_ANN, (CE_WARN,
1870			    "create_mfi_frame_pool: could not alloc."));
1871			return (DDI_FAILURE);
1872		}
1873
1874		bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
1875
1876		cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
1877		cmd->frame = (union megasas_frame *)cmd->frame_dma_obj.buffer;
1878		cmd->frame_phys_addr =
1879		    cmd->frame_dma_obj.dma_cookie[0].dmac_address;
1880
1881		cmd->sense = (uint8_t *)(((unsigned long)
1882		    cmd->frame_dma_obj.buffer) +
1883		    tot_frame_size - SENSE_LENGTH);
1884		cmd->sense_phys_addr =
1885		    cmd->frame_dma_obj.dma_cookie[0].dmac_address +
1886		    tot_frame_size - SENSE_LENGTH;
1887
1888		if (!cmd->frame || !cmd->sense) {
1889			con_log(CL_ANN, (CE_NOTE,
1890			    "megasas: pci_pool_alloc failed \n"));
1891
1892			return (-ENOMEM);
1893		}
1894
1895		cmd->frame->io.context = cmd->index;
1896		i++;
1897
1898		con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
1899		    cmd->frame->io.context, cmd->frame_phys_addr));
1900	}
1901
1902	return (DDI_SUCCESS);
1903}
1904
1905/*
1906 * free_additional_dma_buffer
1907 */
1908static void
1909free_additional_dma_buffer(struct megasas_instance *instance)
1910{
1911	if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
1912		(void) mega_free_dma_obj(instance,
1913		    instance->mfi_internal_dma_obj);
1914		instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
1915	}
1916
1917	if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
1918		(void) mega_free_dma_obj(instance,
1919		    instance->mfi_evt_detail_obj);
1920		instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
1921	}
1922}
1923
1924/*
1925 * alloc_additional_dma_buffer
1926 */
1927static int
1928alloc_additional_dma_buffer(struct megasas_instance *instance)
1929{
1930	uint32_t	reply_q_sz;
1931	uint32_t	internal_buf_size = PAGESIZE*2;
1932
1933	/* max cmds plus 1 + producer & consumer */
1934	reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
1935
1936	instance->mfi_internal_dma_obj.size = internal_buf_size;
1937	instance->mfi_internal_dma_obj.dma_attr	= megasas_generic_dma_attr;
1938	instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1939	instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
1940	    0xFFFFFFFFU;
1941	instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen	= 1;
1942
1943	if (mega_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj)
1944	    != 1) {
1945		con_log(CL_ANN, (CE_WARN, "megaraid: could not alloc reply Q"));
1946		return (DDI_FAILURE);
1947	}
1948
1949	bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
1950
1951	instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
1952
1953	instance->producer = (uint32_t *)((unsigned long)
1954	    instance->mfi_internal_dma_obj.buffer);
1955	instance->consumer = (uint32_t *)((unsigned long)
1956	    instance->mfi_internal_dma_obj.buffer + 4);
1957	instance->reply_queue = (uint32_t *)((unsigned long)
1958	    instance->mfi_internal_dma_obj.buffer + 8);
1959	instance->internal_buf = (caddr_t)(((unsigned long)
1960	    instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
1961	instance->internal_buf_dmac_add =
1962	    instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
1963	    reply_q_sz;
1964	instance->internal_buf_size = internal_buf_size -
1965	    (reply_q_sz + 8);
1966
1967	/* allocate evt_detail */
1968	instance->mfi_evt_detail_obj.size = sizeof (struct megasas_evt_detail);
1969	instance->mfi_evt_detail_obj.dma_attr = megasas_generic_dma_attr;
1970	instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1971	instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1972	instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
1973	instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
1974
1975	if (mega_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj) != 1) {
1976		con_log(CL_ANN, (CE_WARN, "alloc_additional_dma_buffer: "
1977		    "could not data transfer buffer alloc."));
1978		return (DDI_FAILURE);
1979	}
1980
1981	bzero(instance->mfi_evt_detail_obj.buffer,
1982	    sizeof (struct megasas_evt_detail));
1983
1984	instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
1985
1986	return (DDI_SUCCESS);
1987}
1988
1989/*
1990 * free_space_for_mfi
1991 */
1992static void
1993free_space_for_mfi(struct megasas_instance *instance)
1994{
1995	int		i;
1996	uint32_t	max_cmd = instance->max_fw_cmds;
1997
1998	/* already freed */
1999	if (instance->cmd_list == NULL) {
2000		return;
2001	}
2002
2003	free_additional_dma_buffer(instance);
2004
2005	/* first free the MFI frame pool */
2006	destroy_mfi_frame_pool(instance);
2007
2008	/* free all the commands in the cmd_list */
2009	for (i = 0; i < instance->max_fw_cmds; i++) {
2010		kmem_free(instance->cmd_list[i],
2011		    sizeof (struct megasas_cmd));
2012
2013		instance->cmd_list[i] = NULL;
2014	}
2015
2016	/* free the cmd_list buffer itself */
2017	kmem_free(instance->cmd_list,
2018	    sizeof (struct megasas_cmd *) * max_cmd);
2019
2020	instance->cmd_list = NULL;
2021
2022	INIT_LIST_HEAD(&instance->cmd_pool_list);
2023}
2024
2025/*
2026 * alloc_space_for_mfi
2027 */
2028static int
2029alloc_space_for_mfi(struct megasas_instance *instance)
2030{
2031	int		i;
2032	uint32_t	max_cmd;
2033	size_t		sz;
2034
2035	struct megasas_cmd	*cmd;
2036
2037	max_cmd = instance->max_fw_cmds;
2038	sz = sizeof (struct megasas_cmd *) * max_cmd;
2039
2040	/*
2041	 * instance->cmd_list is an array of struct megasas_cmd pointers.
2042	 * Allocate the dynamic array first and then allocate individual
2043	 * commands.
2044	 */
2045	instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
2046	ASSERT(instance->cmd_list);
2047
2048	for (i = 0; i < max_cmd; i++) {
2049		instance->cmd_list[i] = kmem_zalloc(sizeof (struct megasas_cmd),
2050		    KM_SLEEP);
2051		ASSERT(instance->cmd_list[i]);
2052	}
2053
2054	INIT_LIST_HEAD(&instance->cmd_pool_list);
2055
2056	/* add all the commands to command pool (instance->cmd_pool) */
2057	for (i = 0; i < max_cmd; i++) {
2058		cmd		= instance->cmd_list[i];
2059		cmd->index	= i;
2060
2061		mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2062	}
2063
2064	/* create a frame pool and assign one frame to each cmd */
2065	if (create_mfi_frame_pool(instance)) {
2066		con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2067		return (DDI_FAILURE);
2068	}
2069
2070	/* create a frame pool and assign one frame to each cmd */
2071	if (alloc_additional_dma_buffer(instance)) {
2072		con_log(CL_ANN, (CE_NOTE, "error creating frame DMA pool\n"));
2073		return (DDI_FAILURE);
2074	}
2075
2076	return (DDI_SUCCESS);
2077}
2078
2079/*
2080 * get_ctrl_info
2081 */
2082static int
2083get_ctrl_info(struct megasas_instance *instance,
2084    struct megasas_ctrl_info *ctrl_info)
2085{
2086	int	ret = 0;
2087
2088	struct megasas_cmd		*cmd;
2089	struct megasas_dcmd_frame	*dcmd;
2090	struct megasas_ctrl_info	*ci;
2091
2092	cmd = get_mfi_pkt(instance);
2093
2094	if (!cmd) {
2095		con_log(CL_ANN, (CE_WARN,
2096		    "Failed to get a cmd for ctrl info\n"));
2097		return (DDI_FAILURE);
2098	}
2099
2100	dcmd = &cmd->frame->dcmd;
2101
2102	ci = (struct megasas_ctrl_info *)instance->internal_buf;
2103
2104	if (!ci) {
2105		con_log(CL_ANN, (CE_WARN,
2106		    "Failed to alloc mem for ctrl info\n"));
2107		return_mfi_pkt(instance, cmd);
2108		return (DDI_FAILURE);
2109	}
2110
2111	(void) memset(ci, 0, sizeof (struct megasas_ctrl_info));
2112
2113	/* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
2114	(void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2115
2116	dcmd->cmd			= MFI_CMD_OP_DCMD;
2117	dcmd->cmd_status		= MFI_CMD_STATUS_POLL_MODE;
2118	dcmd->sge_count			= 1;
2119	dcmd->flags			= MFI_FRAME_DIR_READ;
2120	dcmd->timeout			= 0;
2121	dcmd->data_xfer_len		= sizeof (struct megasas_ctrl_info);
2122	dcmd->opcode			= MR_DCMD_CTRL_GET_INFO;
2123	dcmd->sgl.sge32[0].phys_addr	= instance->internal_buf_dmac_add;
2124	dcmd->sgl.sge32[0].length	= sizeof (struct megasas_ctrl_info);
2125
2126	cmd->frame_count = 1;
2127
2128	if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2129		ret = 0;
2130		(void) memcpy(ctrl_info, ci, sizeof (struct megasas_ctrl_info));
2131	} else {
2132		con_log(CL_ANN, (CE_WARN, "get_ctrl_info: Ctrl info failed\n"));
2133		ret = -1;
2134	}
2135
2136	return_mfi_pkt(instance, cmd);
2137	if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2138		ret = -1;
2139	}
2140
2141	return (ret);
2142}
2143
2144/*
2145 * abort_aen_cmd
2146 */
2147static int
2148abort_aen_cmd(struct megasas_instance *instance,
2149    struct megasas_cmd *cmd_to_abort)
2150{
2151	int	ret = 0;
2152
2153	struct megasas_cmd		*cmd;
2154	struct megasas_abort_frame	*abort_fr;
2155
2156	cmd = get_mfi_pkt(instance);
2157
2158	if (!cmd) {
2159		con_log(CL_ANN, (CE_WARN,
2160		    "Failed to get a cmd for ctrl info\n"));
2161		return (DDI_FAILURE);
2162	}
2163
2164	abort_fr = &cmd->frame->abort;
2165
2166	/* prepare and issue the abort frame */
2167	abort_fr->cmd = MFI_CMD_OP_ABORT;
2168	abort_fr->cmd_status = MFI_CMD_STATUS_SYNC_MODE;
2169	abort_fr->flags = 0;
2170	abort_fr->abort_context = cmd_to_abort->index;
2171	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
2172	abort_fr->abort_mfi_phys_addr_hi = 0;
2173
2174	instance->aen_cmd->abort_aen = 1;
2175
2176	cmd->sync_cmd = MEGASAS_TRUE;
2177	cmd->frame_count = 1;
2178
2179	if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2180		con_log(CL_ANN, (CE_WARN,
2181		    "abort_aen_cmd: issue_cmd_in_sync_mode failed\n"));
2182		ret = -1;
2183	} else {
2184		ret = 0;
2185	}
2186
2187	instance->aen_cmd->abort_aen = 1;
2188	instance->aen_cmd = 0;
2189
2190	return_mfi_pkt(instance, cmd);
2191	(void) megasas_common_check(instance, cmd);
2192
2193	return (ret);
2194}
2195
2196/*
2197 * init_mfi
2198 */
2199static int
2200init_mfi(struct megasas_instance *instance)
2201{
2202	off_t				reglength;
2203	struct megasas_cmd		*cmd;
2204	struct megasas_ctrl_info	ctrl_info;
2205	struct megasas_init_frame	*init_frame;
2206	struct megasas_init_queue_info	*initq_info;
2207
2208	if ((ddi_dev_regsize(instance->dip, REGISTER_SET_IO, &reglength)
2209	    != DDI_SUCCESS) || reglength < MINIMUM_MFI_MEM_SZ) {
2210		return (DDI_FAILURE);
2211	}
2212
2213	if (reglength > DEFAULT_MFI_MEM_SZ) {
2214		reglength = DEFAULT_MFI_MEM_SZ;
2215		con_log(CL_DLEVEL1, (CE_NOTE,
2216		    "mega: register length to map is 0x%lx bytes", reglength));
2217	}
2218
2219	if (ddi_regs_map_setup(instance->dip, REGISTER_SET_IO,
2220	    &instance->regmap, 0, reglength, &endian_attr,
2221	    &instance->regmap_handle) != DDI_SUCCESS) {
2222		con_log(CL_ANN, (CE_NOTE,
2223		    "megaraid: couldn't map control registers"));
2224
2225		goto fail_mfi_reg_setup;
2226	}
2227
2228	/* we expect the FW state to be READY */
2229	if (mfi_state_transition_to_ready(instance)) {
2230		con_log(CL_ANN, (CE_WARN, "megaraid: F/W is not ready"));
2231		goto fail_ready_state;
2232	}
2233
2234	/* get various operational parameters from status register */
2235	instance->max_num_sge =
2236	    (instance->func_ptr->read_fw_status_reg(instance) &
2237	    0xFF0000) >> 0x10;
2238	/*
2239	 * Reduce the max supported cmds by 1. This is to ensure that the
2240	 * reply_q_sz (1 more than the max cmd that driver may send)
2241	 * does not exceed max cmds that the FW can support
2242	 */
2243	instance->max_fw_cmds =
2244	    instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
2245	instance->max_fw_cmds = instance->max_fw_cmds - 1;
2246
2247	instance->max_num_sge =
2248	    (instance->max_num_sge > MEGASAS_MAX_SGE_CNT) ?
2249	    MEGASAS_MAX_SGE_CNT : instance->max_num_sge;
2250
2251	/* create a pool of commands */
2252	if (alloc_space_for_mfi(instance))
2253		goto fail_alloc_fw_space;
2254
2255	/* disable interrupt for initial preparation */
2256	instance->func_ptr->disable_intr(instance);
2257
2258	/*
2259	 * Prepare a init frame. Note the init frame points to queue info
2260	 * structure. Each frame has SGL allocated after first 64 bytes. For
2261	 * this frame - since we don't need any SGL - we use SGL's space as
2262	 * queue info structure
2263	 */
2264	cmd = get_mfi_pkt(instance);
2265
2266	init_frame = (struct megasas_init_frame *)cmd->frame;
2267	initq_info = (struct megasas_init_queue_info *)
2268	    ((unsigned long)init_frame + 64);
2269
2270	(void) memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
2271	(void) memset(initq_info, 0, sizeof (struct megasas_init_queue_info));
2272
2273	initq_info->init_flags = 0;
2274
2275	initq_info->reply_queue_entries	= instance->max_fw_cmds + 1;
2276
2277	initq_info->producer_index_phys_addr_hi	= 0;
2278	initq_info->producer_index_phys_addr_lo =
2279	    instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
2280
2281	initq_info->consumer_index_phys_addr_hi = 0;
2282	initq_info->consumer_index_phys_addr_lo =
2283	    instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4;
2284
2285	initq_info->reply_queue_start_phys_addr_hi = 0;
2286	initq_info->reply_queue_start_phys_addr_lo =
2287	    instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8;
2288
2289	init_frame->cmd				= MFI_CMD_OP_INIT;
2290	init_frame->cmd_status			= MFI_CMD_STATUS_POLL_MODE;
2291	init_frame->flags			= 0;
2292	init_frame->queue_info_new_phys_addr_lo	=
2293	    cmd->frame_phys_addr + 64;
2294	init_frame->queue_info_new_phys_addr_hi	= 0;
2295
2296	init_frame->data_xfer_len = sizeof (struct megasas_init_queue_info);
2297
2298	cmd->frame_count = 1;
2299
2300	/* issue the init frame in polled mode */
2301	if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2302		con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
2303		goto fail_fw_init;
2304	}
2305
2306	return_mfi_pkt(instance, cmd);
2307	if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2308		goto fail_fw_init;
2309	}
2310
2311	/* gather misc FW related information */
2312	if (!get_ctrl_info(instance, &ctrl_info)) {
2313		instance->max_sectors_per_req = ctrl_info.max_request_size;
2314		con_log(CL_ANN1, (CE_NOTE, "product name %s ld present %d",
2315		    ctrl_info.product_name, ctrl_info.ld_present_count));
2316	} else {
2317		instance->max_sectors_per_req = instance->max_num_sge *
2318		    PAGESIZE / 512;
2319	}
2320
2321	if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2322		goto fail_fw_init;
2323	}
2324
2325	return (0);
2326
2327fail_fw_init:
2328fail_alloc_fw_space:
2329
2330	free_space_for_mfi(instance);
2331
2332fail_ready_state:
2333	ddi_regs_map_free(&instance->regmap_handle);
2334
2335fail_mfi_reg_setup:
2336	return (DDI_FAILURE);
2337}
2338
2339/*
2340 * mfi_state_transition_to_ready	: Move the FW to READY state
2341 *
2342 * @reg_set			: MFI register set
2343 */
2344static int
2345mfi_state_transition_to_ready(struct megasas_instance *instance)
2346{
2347	int		i;
2348	uint8_t		max_wait;
2349	uint32_t	fw_ctrl;
2350	uint32_t	fw_state;
2351	uint32_t	cur_state;
2352
2353	fw_state =
2354	    instance->func_ptr->read_fw_status_reg(instance) & MFI_STATE_MASK;
2355	con_log(CL_ANN1, (CE_NOTE,
2356	    "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
2357
2358	while (fw_state != MFI_STATE_READY) {
2359		con_log(CL_ANN, (CE_NOTE,
2360		    "mfi_state_transition_to_ready:FW state%x", fw_state));
2361
2362		switch (fw_state) {
2363		case MFI_STATE_FAULT:
2364			con_log(CL_ANN, (CE_NOTE,
2365			    "megasas: FW in FAULT state!!"));
2366
2367			return (-ENODEV);
2368		case MFI_STATE_WAIT_HANDSHAKE:
2369			/* set the CLR bit in IMR0 */
2370			con_log(CL_ANN, (CE_NOTE,
2371			    "megasas: FW waiting for HANDSHAKE"));
2372			/*
2373			 * PCI_Hot Plug: MFI F/W requires
2374			 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2375			 * to be set
2376			 */
2377			/* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
2378			WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
2379			    MFI_INIT_HOTPLUG, instance);
2380
2381			max_wait	= 2;
2382			cur_state	= MFI_STATE_WAIT_HANDSHAKE;
2383			break;
2384		case MFI_STATE_BOOT_MESSAGE_PENDING:
2385			/* set the CLR bit in IMR0 */
2386			con_log(CL_ANN, (CE_NOTE,
2387			    "megasas: FW state boot message pending"));
2388			/*
2389			 * PCI_Hot Plug: MFI F/W requires
2390			 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
2391			 * to be set
2392			 */
2393			WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
2394
2395			max_wait	= 10;
2396			cur_state	= MFI_STATE_BOOT_MESSAGE_PENDING;
2397			break;
2398		case MFI_STATE_OPERATIONAL:
2399			/* bring it to READY state; assuming max wait 2 secs */
2400			instance->func_ptr->disable_intr(instance);
2401			con_log(CL_ANN1, (CE_NOTE,
2402			    "megasas: FW in OPERATIONAL state"));
2403			/*
2404			 * PCI_Hot Plug: MFI F/W requires
2405			 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
2406			 * to be set
2407			 */
2408			/* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
2409			WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
2410
2411			max_wait	= 10;
2412			cur_state	= MFI_STATE_OPERATIONAL;
2413			break;
2414		case MFI_STATE_UNDEFINED:
2415			/* this state should not last for more than 2 seconds */
2416			con_log(CL_ANN, (CE_NOTE, "FW state undefined\n"));
2417
2418			max_wait	= 2;
2419			cur_state	= MFI_STATE_UNDEFINED;
2420			break;
2421		case MFI_STATE_BB_INIT:
2422			max_wait	= 2;
2423			cur_state	= MFI_STATE_BB_INIT;
2424			break;
2425		case MFI_STATE_FW_INIT:
2426			max_wait	= 2;
2427			cur_state	= MFI_STATE_FW_INIT;
2428			break;
2429		case MFI_STATE_DEVICE_SCAN:
2430			max_wait	= 10;
2431			cur_state	= MFI_STATE_DEVICE_SCAN;
2432			break;
2433		default:
2434			con_log(CL_ANN, (CE_NOTE,
2435			    "megasas: Unknown state 0x%x\n", fw_state));
2436			return (-ENODEV);
2437		}
2438
2439		/* the cur_state should not last for more than max_wait secs */
2440		for (i = 0; i < (max_wait * MILLISEC); i++) {
2441			/* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
2442			fw_state =
2443			    instance->func_ptr->read_fw_status_reg(instance) &
2444			    MFI_STATE_MASK;
2445
2446			if (fw_state == cur_state) {
2447				delay(1 * drv_usectohz(MILLISEC));
2448			} else {
2449				break;
2450			}
2451		}
2452
2453		/* return error if fw_state hasn't changed after max_wait */
2454		if (fw_state == cur_state) {
2455			con_log(CL_ANN, (CE_NOTE,
2456			    "FW state hasn't changed in %d secs\n", max_wait));
2457			return (-ENODEV);
2458		}
2459	};
2460
2461	fw_ctrl = RD_IB_DOORBELL(instance);
2462
2463	con_log(CL_ANN1, (CE_NOTE,
2464	    "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
2465
2466	/*
2467	 * Write 0xF to the doorbell register to do the following.
2468	 * - Abort all outstanding commands (bit 0).
2469	 * - Transition from OPERATIONAL to READY state (bit 1).
2470	 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
2471	 * - Set to release FW to continue running (i.e. BIOS handshake
2472	 *   (bit 3).
2473	 */
2474	WR_IB_DOORBELL(0xF, instance);
2475
2476	if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2477		return (-ENODEV);
2478	}
2479	return (0);
2480}
2481
2482/*
2483 * get_seq_num
2484 */
2485static int
2486get_seq_num(struct megasas_instance *instance,
2487    struct megasas_evt_log_info *eli)
2488{
2489	int	ret = 0;
2490
2491	dma_obj_t			dcmd_dma_obj;
2492	struct megasas_cmd		*cmd;
2493	struct megasas_dcmd_frame	*dcmd;
2494
2495	cmd = get_mfi_pkt(instance);
2496
2497	if (!cmd) {
2498		cmn_err(CE_WARN, "megasas: failed to get a cmd\n");
2499		return (-ENOMEM);
2500	}
2501
2502	dcmd	= &cmd->frame->dcmd;
2503
2504	/* allocate the data transfer buffer */
2505	dcmd_dma_obj.size = sizeof (struct megasas_evt_log_info);
2506	dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
2507	dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2508	dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2509	dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
2510	dcmd_dma_obj.dma_attr.dma_attr_align = 1;
2511
2512	if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
2513		con_log(CL_ANN, (CE_WARN,
2514		    "get_seq_num: could not data transfer buffer alloc."));
2515		return (DDI_FAILURE);
2516	}
2517
2518	(void) memset(dcmd_dma_obj.buffer, 0,
2519	    sizeof (struct megasas_evt_log_info));
2520
2521	(void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2522
2523	dcmd->cmd = MFI_CMD_OP_DCMD;
2524	dcmd->cmd_status = 0;
2525	dcmd->sge_count	= 1;
2526	dcmd->flags = MFI_FRAME_DIR_READ;
2527	dcmd->timeout = 0;
2528	dcmd->data_xfer_len = sizeof (struct megasas_evt_log_info);
2529	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
2530	dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_log_info);
2531	dcmd->sgl.sge32[0].phys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
2532
2533	cmd->sync_cmd = MEGASAS_TRUE;
2534	cmd->frame_count = 1;
2535
2536	if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
2537		cmn_err(CE_WARN, "get_seq_num: "
2538		    "failed to issue MR_DCMD_CTRL_EVENT_GET_INFO\n");
2539		ret = -1;
2540	} else {
2541		/* copy the data back into callers buffer */
2542		bcopy(dcmd_dma_obj.buffer, eli,
2543		    sizeof (struct megasas_evt_log_info));
2544		ret = 0;
2545	}
2546
2547	if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
2548		ret = -1;
2549
2550	return_mfi_pkt(instance, cmd);
2551	if (megasas_common_check(instance, cmd) != DDI_SUCCESS) {
2552		ret = -1;
2553	}
2554	return (ret);
2555}
2556
2557/*
2558 * start_mfi_aen
2559 */
2560static int
2561start_mfi_aen(struct megasas_instance *instance)
2562{
2563	int	ret = 0;
2564
2565	struct megasas_evt_log_info	eli;
2566	union megasas_evt_class_locale	class_locale;
2567
2568	/* get the latest sequence number from FW */
2569	(void) memset(&eli, 0, sizeof (struct megasas_evt_log_info));
2570
2571	if (get_seq_num(instance, &eli)) {
2572		cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num\n");
2573		return (-1);
2574	}
2575
2576	/* register AEN with FW for latest sequence number plus 1 */
2577	class_locale.members.reserved	= 0;
2578	class_locale.members.locale	= MR_EVT_LOCALE_ALL;
2579	class_locale.members.class	= MR_EVT_CLASS_CRITICAL;
2580
2581	ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
2582	    class_locale.word);
2583
2584	if (ret) {
2585		cmn_err(CE_WARN, "start_mfi_aen: aen registration failed\n");
2586		return (-1);
2587	}
2588
2589	return (ret);
2590}
2591
2592/*
2593 * flush_cache
2594 */
2595static void
2596flush_cache(struct megasas_instance *instance)
2597{
2598	struct megasas_cmd		*cmd;
2599	struct megasas_dcmd_frame	*dcmd;
2600
2601	if (!(cmd = get_mfi_pkt(instance)))
2602		return;
2603
2604	dcmd = &cmd->frame->dcmd;
2605
2606	(void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2607
2608	dcmd->cmd		= MFI_CMD_OP_DCMD;
2609	dcmd->cmd_status	= 0x0;
2610	dcmd->sge_count		= 0;
2611	dcmd->flags		= MFI_FRAME_DIR_NONE;
2612	dcmd->timeout		= 0;
2613	dcmd->data_xfer_len	= 0;
2614	dcmd->opcode		= MR_DCMD_CTRL_CACHE_FLUSH;
2615	dcmd->mbox.b[0]		= MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
2616
2617	cmd->frame_count = 1;
2618
2619	if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2620		cmn_err(CE_WARN,
2621		    "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH\n");
2622	}
2623	con_log(CL_DLEVEL1, (CE_NOTE, "done"));
2624	return_mfi_pkt(instance, cmd);
2625	(void) megasas_common_check(instance, cmd);
2626}
2627
2628/*
2629 * service_mfi_aen-	Completes an AEN command
2630 * @instance:			Adapter soft state
2631 * @cmd:			Command to be completed
2632 *
2633 */
2634static void
2635service_mfi_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2636{
2637	uint32_t	seq_num;
2638	struct megasas_evt_detail *evt_detail =
2639	    (struct megasas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
2640
2641	cmd->cmd_status = cmd->frame->io.cmd_status;
2642
2643	if (cmd->cmd_status == ENODATA) {
2644		cmd->cmd_status = 0;
2645	}
2646
2647	/*
2648	 * log the MFI AEN event to the sysevent queue so that
2649	 * application will get noticed
2650	 */
2651	if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
2652	    NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
2653		int	instance_no = ddi_get_instance(instance->dip);
2654		con_log(CL_ANN, (CE_WARN,
2655		    "mega%d: Failed to log AEN event", instance_no));
2656	}
2657
2658	/* get copy of seq_num and class/locale for re-registration */
2659	seq_num = evt_detail->seq_num;
2660	seq_num++;
2661	(void) memset(instance->mfi_evt_detail_obj.buffer, 0,
2662	    sizeof (struct megasas_evt_detail));
2663
2664	cmd->frame->dcmd.cmd_status = 0x0;
2665	cmd->frame->dcmd.mbox.w[0] = seq_num;
2666
2667	instance->aen_seq_num = seq_num;
2668
2669	cmd->frame_count = 1;
2670
2671	/* Issue the aen registration frame */
2672	instance->func_ptr->issue_cmd(cmd, instance);
2673}
2674
2675/*
2676 * complete_cmd_in_sync_mode -	Completes an internal command
2677 * @instance:			Adapter soft state
2678 * @cmd:			Command to be completed
2679 *
2680 * The issue_cmd_in_sync_mode() function waits for a command to complete
2681 * after it issues a command. This function wakes up that waiting routine by
2682 * calling wake_up() on the wait queue.
2683 */
2684static void
2685complete_cmd_in_sync_mode(struct megasas_instance *instance,
2686    struct megasas_cmd *cmd)
2687{
2688	cmd->cmd_status = cmd->frame->io.cmd_status;
2689
2690	cmd->sync_cmd = MEGASAS_FALSE;
2691
2692	if (cmd->cmd_status == ENODATA) {
2693		cmd->cmd_status = 0;
2694	}
2695
2696	cv_broadcast(&instance->int_cmd_cv);
2697}
2698
2699/*
2700 * megasas_softintr - The Software ISR
2701 * @param arg	: HBA soft state
2702 *
2703 * called from high-level interrupt if hi-level interrupt are not there,
2704 * otherwise triggered as a soft interrupt
2705 */
2706static uint_t
2707megasas_softintr(struct megasas_instance *instance)
2708{
2709	struct scsi_pkt		*pkt;
2710	struct scsa_cmd		*acmd;
2711	struct megasas_cmd	*cmd;
2712	struct mlist_head	*pos, *next;
2713	mlist_t			process_list;
2714	struct megasas_header	*hdr;
2715	struct scsi_arq_status	*arqstat;
2716
2717	con_log(CL_ANN1, (CE_CONT, "megasas_softintr called"));
2718
2719	ASSERT(instance);
2720	mutex_enter(&instance->completed_pool_mtx);
2721
2722	if (mlist_empty(&instance->completed_pool_list)) {
2723		mutex_exit(&instance->completed_pool_mtx);
2724		return (DDI_INTR_UNCLAIMED);
2725	}
2726
2727	instance->softint_running = 1;
2728
2729	INIT_LIST_HEAD(&process_list);
2730	mlist_splice(&instance->completed_pool_list, &process_list);
2731	INIT_LIST_HEAD(&instance->completed_pool_list);
2732
2733	mutex_exit(&instance->completed_pool_mtx);
2734
2735	/* perform all callbacks first, before releasing the SCBs */
2736	mlist_for_each_safe(pos, next, &process_list) {
2737		cmd = mlist_entry(pos, struct megasas_cmd, list);
2738
2739		/* syncronize the Cmd frame for the controller */
2740		(void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
2741		    0, 0, DDI_DMA_SYNC_FORCPU);
2742
2743		if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
2744		    DDI_SUCCESS) {
2745			megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2746			ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2747			return (DDI_INTR_UNCLAIMED);
2748		}
2749
2750		hdr = &cmd->frame->hdr;
2751
2752		/* remove the internal command from the process list */
2753		mlist_del_init(&cmd->list);
2754
2755		switch (hdr->cmd) {
2756		case MFI_CMD_OP_PD_SCSI:
2757		case MFI_CMD_OP_LD_SCSI:
2758		case MFI_CMD_OP_LD_READ:
2759		case MFI_CMD_OP_LD_WRITE:
2760			/*
2761			 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
2762			 * could have been issued either through an
2763			 * IO path or an IOCTL path. If it was via IOCTL,
2764			 * we will send it to internal completion.
2765			 */
2766			if (cmd->sync_cmd == MEGASAS_TRUE) {
2767				complete_cmd_in_sync_mode(instance, cmd);
2768				break;
2769			}
2770
2771			/* regular commands */
2772			acmd =	cmd->cmd;
2773			pkt =	CMD2PKT(acmd);
2774
2775			if (acmd->cmd_flags & CFLAG_DMAVALID) {
2776				if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2777					(void) ddi_dma_sync(acmd->cmd_dmahandle,
2778					    acmd->cmd_dma_offset,
2779					    acmd->cmd_dma_len,
2780					    DDI_DMA_SYNC_FORCPU);
2781				}
2782			}
2783
2784			pkt->pkt_reason		= CMD_CMPLT;
2785			pkt->pkt_statistics	= 0;
2786			pkt->pkt_state = STATE_GOT_BUS
2787			    | STATE_GOT_TARGET | STATE_SENT_CMD
2788			    | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2789
2790			con_log(CL_ANN1, (CE_CONT,
2791			    "CDB[0] = %x completed for %s: size %lx context %x",
2792			    pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
2793			    acmd->cmd_dmacount, hdr->context));
2794
2795			if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2796				struct scsi_inquiry	*inq;
2797
2798				if (acmd->cmd_dmacount != 0) {
2799					bp_mapin(acmd->cmd_buf);
2800					inq = (struct scsi_inquiry *)
2801					    acmd->cmd_buf->b_un.b_addr;
2802
2803					/* don't expose physical drives to OS */
2804					if (acmd->islogical &&
2805					    (hdr->cmd_status == MFI_STAT_OK)) {
2806						display_scsi_inquiry(
2807						    (caddr_t)inq);
2808					} else if ((hdr->cmd_status ==
2809					    MFI_STAT_OK) && inq->inq_dtype ==
2810					    DTYPE_DIRECT) {
2811
2812						display_scsi_inquiry(
2813						    (caddr_t)inq);
2814
2815						/* for physical disk */
2816						hdr->cmd_status =
2817						    MFI_STAT_DEVICE_NOT_FOUND;
2818					}
2819				}
2820			}
2821
2822			switch (hdr->cmd_status) {
2823			case MFI_STAT_OK:
2824				pkt->pkt_scbp[0] = STATUS_GOOD;
2825				break;
2826			case MFI_STAT_LD_CC_IN_PROGRESS:
2827			case MFI_STAT_LD_RECON_IN_PROGRESS:
2828			    /* SJ - these are not correct way */
2829				pkt->pkt_scbp[0] = STATUS_GOOD;
2830				break;
2831			case MFI_STAT_LD_INIT_IN_PROGRESS:
2832				con_log(CL_ANN,
2833				    (CE_WARN, "Initialization in Progress"));
2834				pkt->pkt_reason	= CMD_TRAN_ERR;
2835
2836				break;
2837			case MFI_STAT_SCSI_DONE_WITH_ERROR:
2838				con_log(CL_ANN1, (CE_CONT, "scsi_done error"));
2839
2840				pkt->pkt_reason	= CMD_CMPLT;
2841				((struct scsi_status *)
2842				    pkt->pkt_scbp)->sts_chk = 1;
2843
2844				if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2845
2846					con_log(CL_ANN,
2847					    (CE_WARN, "TEST_UNIT_READY fail"));
2848
2849				} else {
2850					pkt->pkt_state |= STATE_ARQ_DONE;
2851					arqstat = (void *)(pkt->pkt_scbp);
2852					arqstat->sts_rqpkt_reason = CMD_CMPLT;
2853					arqstat->sts_rqpkt_resid = 0;
2854					arqstat->sts_rqpkt_state |=
2855					    STATE_GOT_BUS | STATE_GOT_TARGET
2856					    | STATE_SENT_CMD
2857					    | STATE_XFERRED_DATA;
2858					*(uint8_t *)&arqstat->sts_rqpkt_status =
2859					    STATUS_GOOD;
2860
2861					bcopy(cmd->sense,
2862					    &(arqstat->sts_sensedata),
2863					    acmd->cmd_scblen -
2864					    offsetof(struct scsi_arq_status,
2865					    sts_sensedata));
2866				}
2867				break;
2868			case MFI_STAT_LD_OFFLINE:
2869			case MFI_STAT_DEVICE_NOT_FOUND:
2870				con_log(CL_ANN1, (CE_CONT,
2871				    "device not found error"));
2872				pkt->pkt_reason	= CMD_DEV_GONE;
2873				pkt->pkt_statistics  = STAT_DISCON;
2874				break;
2875			case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2876				pkt->pkt_state |= STATE_ARQ_DONE;
2877				pkt->pkt_reason	= CMD_CMPLT;
2878				((struct scsi_status *)
2879				    pkt->pkt_scbp)->sts_chk = 1;
2880
2881				arqstat = (void *)(pkt->pkt_scbp);
2882				arqstat->sts_rqpkt_reason = CMD_CMPLT;
2883				arqstat->sts_rqpkt_resid = 0;
2884				arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2885				    | STATE_GOT_TARGET | STATE_SENT_CMD
2886				    | STATE_XFERRED_DATA;
2887				*(uint8_t *)&arqstat->sts_rqpkt_status =
2888				    STATUS_GOOD;
2889
2890				arqstat->sts_sensedata.es_valid = 1;
2891				arqstat->sts_sensedata.es_key =
2892				    KEY_ILLEGAL_REQUEST;
2893				arqstat->sts_sensedata.es_class =
2894				    CLASS_EXTENDED_SENSE;
2895
2896				/*
2897				 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2898				 * ASC: 0x21h; ASCQ: 0x00h;
2899				 */
2900				arqstat->sts_sensedata.es_add_code = 0x21;
2901				arqstat->sts_sensedata.es_qual_code = 0x00;
2902
2903				break;
2904
2905			default:
2906				con_log(CL_ANN, (CE_CONT, "Unknown status!"));
2907				pkt->pkt_reason	= CMD_TRAN_ERR;
2908
2909				break;
2910			}
2911
2912			atomic_add_16(&instance->fw_outstanding, (-1));
2913
2914			return_mfi_pkt(instance, cmd);
2915
2916			(void) megasas_common_check(instance, cmd);
2917
2918			if (acmd->cmd_dmahandle) {
2919				if (megasas_check_dma_handle(
2920				    acmd->cmd_dmahandle) != DDI_SUCCESS) {
2921					ddi_fm_service_impact(instance->dip,
2922					    DDI_SERVICE_UNAFFECTED);
2923					pkt->pkt_reason = CMD_TRAN_ERR;
2924					pkt->pkt_statistics = 0;
2925				}
2926			}
2927
2928			/* Call the callback routine */
2929			if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2930				scsi_hba_pkt_comp(pkt);
2931			}
2932
2933			break;
2934		case MFI_CMD_OP_SMP:
2935		case MFI_CMD_OP_STP:
2936			complete_cmd_in_sync_mode(instance, cmd);
2937			break;
2938		case MFI_CMD_OP_DCMD:
2939			/* see if got an event notification */
2940			if (cmd->frame->dcmd.opcode ==
2941			    MR_DCMD_CTRL_EVENT_WAIT) {
2942				if ((instance->aen_cmd == cmd) &&
2943				    (instance->aen_cmd->abort_aen)) {
2944					con_log(CL_ANN, (CE_WARN,
2945					    "megasas_softintr: "
2946					    "aborted_aen returned"));
2947				} else {
2948					service_mfi_aen(instance, cmd);
2949
2950					atomic_add_16(&instance->fw_outstanding,
2951					    (-1));
2952				}
2953			} else {
2954				complete_cmd_in_sync_mode(instance, cmd);
2955			}
2956
2957			break;
2958		case MFI_CMD_OP_ABORT:
2959			con_log(CL_ANN, (CE_WARN, "MFI_CMD_OP_ABORT complete"));
2960			/*
2961			 * MFI_CMD_OP_ABORT successfully completed
2962			 * in the synchronous mode
2963			 */
2964			complete_cmd_in_sync_mode(instance, cmd);
2965			break;
2966		default:
2967			megasas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2968			ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2969
2970			if (cmd->pkt != NULL) {
2971				pkt = cmd->pkt;
2972				if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
2973					scsi_hba_pkt_comp(pkt);
2974				}
2975			}
2976			con_log(CL_ANN, (CE_WARN, "Cmd type unknown !!"));
2977			break;
2978		}
2979	}
2980
2981	instance->softint_running = 0;
2982
2983	return (DDI_INTR_CLAIMED);
2984}
2985
2986/*
2987 * mega_alloc_dma_obj
2988 *
2989 * Allocate the memory and other resources for an dma object.
2990 */
2991static int
2992mega_alloc_dma_obj(struct megasas_instance *instance, dma_obj_t *obj)
2993{
2994	int	i;
2995	size_t	alen = 0;
2996	uint_t	cookie_cnt;
2997
2998	i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
2999	    DDI_DMA_SLEEP, NULL, &obj->dma_handle);
3000	if (i != DDI_SUCCESS) {
3001
3002		switch (i) {
3003			case DDI_DMA_BADATTR :
3004				con_log(CL_ANN, (CE_WARN,
3005				"Failed ddi_dma_alloc_handle- Bad atrib"));
3006				break;
3007			case DDI_DMA_NORESOURCES :
3008				con_log(CL_ANN, (CE_WARN,
3009				"Failed ddi_dma_alloc_handle- No Resources"));
3010				break;
3011			default :
3012				con_log(CL_ANN, (CE_WARN,
3013				"Failed ddi_dma_alloc_handle :unknown %d", i));
3014				break;
3015		}
3016
3017		return (-1);
3018	}
3019
3020	if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &endian_attr,
3021	    DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
3022	    &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
3023	    alen < obj->size) {
3024
3025		ddi_dma_free_handle(&obj->dma_handle);
3026
3027		con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
3028
3029		return (-1);
3030	}
3031
3032	if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
3033	    obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
3034	    NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
3035
3036		ddi_dma_mem_free(&obj->acc_handle);
3037		ddi_dma_free_handle(&obj->dma_handle);
3038
3039		con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
3040
3041		return (-1);
3042	}
3043
3044	if (megasas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
3045		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3046		return (-1);
3047	}
3048
3049	if (megasas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
3050		ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
3051		return (-1);
3052	}
3053
3054	return (cookie_cnt);
3055}
3056
3057/*
3058 * mega_free_dma_obj(struct megasas_instance *, dma_obj_t)
3059 *
3060 * De-allocate the memory and other resources for an dma object, which must
3061 * have been alloated by a previous call to mega_alloc_dma_obj()
3062 */
3063static int
3064mega_free_dma_obj(struct megasas_instance *instance, dma_obj_t obj)
3065{
3066
3067	if (megasas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
3068		ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3069		return (DDI_FAILURE);
3070	}
3071
3072	if (megasas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
3073		ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
3074		return (DDI_FAILURE);
3075	}
3076
3077	(void) ddi_dma_unbind_handle(obj.dma_handle);
3078	ddi_dma_mem_free(&obj.acc_handle);
3079	ddi_dma_free_handle(&obj.dma_handle);
3080
3081	return (DDI_SUCCESS);
3082}
3083
3084/*
3085 * megasas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
3086 * int, int (*)())
3087 *
3088 * Allocate dma resources for a new scsi command
3089 */
3090static int
3091megasas_dma_alloc(struct megasas_instance *instance, struct scsi_pkt *pkt,
3092    struct buf *bp, int flags, int (*callback)())
3093{
3094	int	dma_flags;
3095	int	(*cb)(caddr_t);
3096	int	i;
3097
3098	ddi_dma_attr_t	tmp_dma_attr = megasas_generic_dma_attr;
3099	struct scsa_cmd	*acmd = PKT2CMD(pkt);
3100
3101	acmd->cmd_buf = bp;
3102
3103	if (bp->b_flags & B_READ) {
3104		acmd->cmd_flags &= ~CFLAG_DMASEND;
3105		dma_flags = DDI_DMA_READ;
3106	} else {
3107		acmd->cmd_flags |= CFLAG_DMASEND;
3108		dma_flags = DDI_DMA_WRITE;
3109	}
3110
3111	if (flags & PKT_CONSISTENT) {
3112		acmd->cmd_flags |= CFLAG_CONSISTENT;
3113		dma_flags |= DDI_DMA_CONSISTENT;
3114	}
3115
3116	if (flags & PKT_DMA_PARTIAL) {
3117		dma_flags |= DDI_DMA_PARTIAL;
3118	}
3119
3120	dma_flags |= DDI_DMA_REDZONE;
3121
3122	cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
3123
3124	tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
3125	tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
3126
3127	if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
3128	    cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
3129		switch (i) {
3130		case DDI_DMA_BADATTR:
3131			bioerror(bp, EFAULT);
3132			return (-1);
3133
3134		case DDI_DMA_NORESOURCES:
3135			bioerror(bp, 0);
3136			return (-1);
3137
3138		default:
3139			con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
3140			    "0x%x impossible\n", i));
3141			bioerror(bp, EFAULT);
3142			return (-1);
3143		}
3144	}
3145
3146	i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
3147	    cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
3148
3149	switch (i) {
3150	case DDI_DMA_PARTIAL_MAP:
3151		if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
3152			con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3153			    "DDI_DMA_PARTIAL_MAP impossible\n"));
3154			goto no_dma_cookies;
3155		}
3156
3157		if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
3158		    DDI_FAILURE) {
3159			con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed\n"));
3160			goto no_dma_cookies;
3161		}
3162
3163		if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3164		    &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3165		    &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3166		    DDI_FAILURE) {
3167
3168			con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed\n"));
3169			goto no_dma_cookies;
3170		}
3171
3172		goto get_dma_cookies;
3173	case DDI_DMA_MAPPED:
3174		acmd->cmd_nwin = 1;
3175		acmd->cmd_dma_len = 0;
3176		acmd->cmd_dma_offset = 0;
3177
3178get_dma_cookies:
3179		i = 0;
3180		acmd->cmd_dmacount = 0;
3181		for (;;) {
3182			acmd->cmd_dmacount +=
3183			    acmd->cmd_dmacookies[i++].dmac_size;
3184
3185			if (i == instance->max_num_sge ||
3186			    i == acmd->cmd_ncookies)
3187				break;
3188
3189			ddi_dma_nextcookie(acmd->cmd_dmahandle,
3190			    &acmd->cmd_dmacookies[i]);
3191		}
3192
3193		acmd->cmd_cookie = i;
3194		acmd->cmd_cookiecnt = i;
3195
3196		acmd->cmd_flags |= CFLAG_DMAVALID;
3197
3198		if (bp->b_bcount >= acmd->cmd_dmacount) {
3199			pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3200		} else {
3201			pkt->pkt_resid = 0;
3202		}
3203
3204		return (0);
3205	case DDI_DMA_NORESOURCES:
3206		bioerror(bp, 0);
3207		break;
3208	case DDI_DMA_NOMAPPING:
3209		bioerror(bp, EFAULT);
3210		break;
3211	case DDI_DMA_TOOBIG:
3212		bioerror(bp, EINVAL);
3213		break;
3214	case DDI_DMA_INUSE:
3215		con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
3216		    " DDI_DMA_INUSE impossible\n"));
3217		break;
3218	default:
3219		con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
3220		    "0x%x impossible\n", i));
3221		break;
3222	}
3223
3224no_dma_cookies:
3225	ddi_dma_free_handle(&acmd->cmd_dmahandle);
3226	acmd->cmd_dmahandle = NULL;
3227	acmd->cmd_flags &= ~CFLAG_DMAVALID;
3228	return (-1);
3229}
3230
3231/*
3232 * megasas_dma_move(struct megasas_instance *, struct scsi_pkt *, struct buf *)
3233 *
3234 * move dma resources to next dma window
3235 *
3236 */
3237static int
3238megasas_dma_move(struct megasas_instance *instance, struct scsi_pkt *pkt,
3239    struct buf *bp)
3240{
3241	int	i = 0;
3242
3243	struct scsa_cmd	*acmd = PKT2CMD(pkt);
3244
3245	/*
3246	 * If there are no more cookies remaining in this window,
3247	 * must move to the next window first.
3248	 */
3249	if (acmd->cmd_cookie == acmd->cmd_ncookies) {
3250		if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
3251			return (0);
3252		}
3253
3254		/* at last window, cannot move */
3255		if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
3256			return (-1);
3257		}
3258
3259		if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
3260		    &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
3261		    &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
3262		    DDI_FAILURE) {
3263			return (-1);
3264		}
3265
3266		acmd->cmd_cookie = 0;
3267	} else {
3268		/* still more cookies in this window - get the next one */
3269		ddi_dma_nextcookie(acmd->cmd_dmahandle,
3270		    &acmd->cmd_dmacookies[0]);
3271	}
3272
3273	/* get remaining cookies in this window, up to our maximum */
3274	for (;;) {
3275		acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
3276		acmd->cmd_cookie++;
3277
3278		if (i == instance->max_num_sge ||
3279		    acmd->cmd_cookie == acmd->cmd_ncookies) {
3280			break;
3281		}
3282
3283		ddi_dma_nextcookie(acmd->cmd_dmahandle,
3284		    &acmd->cmd_dmacookies[i]);
3285	}
3286
3287	acmd->cmd_cookiecnt = i;
3288
3289	if (bp->b_bcount >= acmd->cmd_dmacount) {
3290		pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
3291	} else {
3292		pkt->pkt_resid = 0;
3293	}
3294
3295	return (0);
3296}
3297
3298/*
3299 * build_cmd
3300 */
3301static struct megasas_cmd *
3302build_cmd(struct megasas_instance *instance, struct scsi_address *ap,
3303    struct scsi_pkt *pkt, uchar_t *cmd_done)
3304{
3305	uint16_t	flags = 0;
3306	uint32_t	i;
3307	uint32_t 	context;
3308	uint32_t	sge_bytes;
3309
3310	struct megasas_cmd		*cmd;
3311	struct megasas_sge64		*mfi_sgl;
3312	struct scsa_cmd			*acmd = PKT2CMD(pkt);
3313	struct megasas_pthru_frame 	*pthru;
3314	struct megasas_io_frame		*ldio;
3315
3316	/* find out if this is logical or physical drive command.  */
3317	acmd->islogical = MEGADRV_IS_LOGICAL(ap);
3318	acmd->device_id = MAP_DEVICE_ID(instance, ap);
3319	*cmd_done = 0;
3320
3321	/* get the command packet */
3322	if (!(cmd = get_mfi_pkt(instance))) {
3323		return (NULL);
3324	}
3325
3326	cmd->pkt = pkt;
3327	cmd->cmd = acmd;
3328
3329	/* lets get the command directions */
3330	if (acmd->cmd_flags & CFLAG_DMASEND) {
3331		flags = MFI_FRAME_DIR_WRITE;
3332
3333		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3334			(void) ddi_dma_sync(acmd->cmd_dmahandle,
3335			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
3336			    DDI_DMA_SYNC_FORDEV);
3337		}
3338	} else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
3339		flags = MFI_FRAME_DIR_READ;
3340
3341		if (acmd->cmd_flags & CFLAG_CONSISTENT) {
3342			(void) ddi_dma_sync(acmd->cmd_dmahandle,
3343			    acmd->cmd_dma_offset, acmd->cmd_dma_len,
3344			    DDI_DMA_SYNC_FORCPU);
3345		}
3346	} else {
3347		flags = MFI_FRAME_DIR_NONE;
3348	}
3349
3350	flags |= MFI_FRAME_SGL64;
3351
3352	switch (pkt->pkt_cdbp[0]) {
3353
3354	/*
3355	 * case SCMD_SYNCHRONIZE_CACHE:
3356	 * 	flush_cache(instance);
3357	 *	return_mfi_pkt(instance, cmd);
3358	 *	*cmd_done = 1;
3359	 *
3360	 *	return (NULL);
3361	 */
3362
3363	case SCMD_READ:
3364	case SCMD_WRITE:
3365	case SCMD_READ_G1:
3366	case SCMD_WRITE_G1:
3367		if (acmd->islogical) {
3368			ldio = (struct megasas_io_frame *)cmd->frame;
3369
3370			/*
3371			 * preare the Logical IO frame:
3372			 * 2nd bit is zero for all read cmds
3373			 */
3374			ldio->cmd = (pkt->pkt_cdbp[0] & 0x02) ?
3375			    MFI_CMD_OP_LD_WRITE : MFI_CMD_OP_LD_READ;
3376			ldio->cmd_status = 0x0;
3377			ldio->scsi_status = 0x0;
3378			ldio->target_id	 = acmd->device_id;
3379			ldio->timeout = 0;
3380			ldio->reserved_0 = 0;
3381			ldio->pad_0 = 0;
3382			ldio->flags = flags;
3383
3384			/* Initialize sense Information */
3385			bzero(cmd->sense, SENSE_LENGTH);
3386			ldio->sense_len = SENSE_LENGTH;
3387			ldio->sense_buf_phys_addr_hi = 0;
3388			ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3389
3390			ldio->start_lba_hi = 0;
3391			ldio->access_byte = (acmd->cmd_cdblen != 6) ?
3392			    pkt->pkt_cdbp[1] : 0;
3393			ldio->sge_count = acmd->cmd_cookiecnt;
3394			mfi_sgl = (struct megasas_sge64	*)&ldio->sgl;
3395
3396			context = ldio->context;
3397
3398			if (acmd->cmd_cdblen == CDB_GROUP0) {
3399				ldio->lba_count	= host_to_le16(
3400				    (uint16_t)(pkt->pkt_cdbp[4]));
3401
3402				ldio->start_lba_lo = host_to_le32(
3403				    ((uint32_t)(pkt->pkt_cdbp[3])) |
3404				    ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
3405				    ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
3406				    << 16));
3407			} else if (acmd->cmd_cdblen == CDB_GROUP1) {
3408				ldio->lba_count = host_to_le16(
3409				    ((uint16_t)(pkt->pkt_cdbp[8])) |
3410				    ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
3411
3412				ldio->start_lba_lo = host_to_le32(
3413				    ((uint32_t)(pkt->pkt_cdbp[5])) |
3414				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3415				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3416				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3417			} else if (acmd->cmd_cdblen == CDB_GROUP2) {
3418				ldio->lba_count	 = host_to_le16(
3419				    ((uint16_t)(pkt->pkt_cdbp[9])) |
3420				    ((uint16_t)(pkt->pkt_cdbp[8]) << 8) |
3421				    ((uint16_t)(pkt->pkt_cdbp[7]) << 16) |
3422				    ((uint16_t)(pkt->pkt_cdbp[6]) << 24));
3423
3424				ldio->start_lba_lo = host_to_le32(
3425				    ((uint32_t)(pkt->pkt_cdbp[5])) |
3426				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3427				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3428				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3429			} else if (acmd->cmd_cdblen == CDB_GROUP3) {
3430				ldio->lba_count = host_to_le16(
3431				    ((uint16_t)(pkt->pkt_cdbp[13])) |
3432				    ((uint16_t)(pkt->pkt_cdbp[12]) << 8) |
3433				    ((uint16_t)(pkt->pkt_cdbp[11]) << 16) |
3434				    ((uint16_t)(pkt->pkt_cdbp[10]) << 24));
3435
3436				ldio->start_lba_lo = host_to_le32(
3437				    ((uint32_t)(pkt->pkt_cdbp[9])) |
3438				    ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
3439				    ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
3440				    ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
3441
3442				ldio->start_lba_lo = host_to_le32(
3443				    ((uint32_t)(pkt->pkt_cdbp[5])) |
3444				    ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
3445				    ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
3446				    ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
3447			}
3448
3449			break;
3450		}
3451		/* fall through For all non-rd/wr cmds */
3452	default:
3453		pthru	= (struct megasas_pthru_frame *)cmd->frame;
3454
3455		/* prepare the DCDB frame */
3456		pthru->cmd = (acmd->islogical) ?
3457		    MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI;
3458		pthru->cmd_status	= 0x0;
3459		pthru->scsi_status	= 0x0;
3460		pthru->target_id	= acmd->device_id;
3461		pthru->lun		= 0;
3462		pthru->cdb_len		= acmd->cmd_cdblen;
3463		pthru->timeout		= 0;
3464		pthru->flags		= flags;
3465		pthru->data_xfer_len	= acmd->cmd_dmacount;
3466		pthru->sge_count	= acmd->cmd_cookiecnt;
3467		mfi_sgl			= (struct megasas_sge64 *)&pthru->sgl;
3468
3469		bzero(cmd->sense, SENSE_LENGTH);
3470		pthru->sense_len	= SENSE_LENGTH;
3471		pthru->sense_buf_phys_addr_hi = 0;
3472		pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
3473
3474		context = pthru->context;
3475
3476		bcopy(pkt->pkt_cdbp, pthru->cdb, acmd->cmd_cdblen);
3477
3478		break;
3479	}
3480#ifdef lint
3481	context = context;
3482#endif
3483	/* bzero(mfi_sgl, sizeof (struct megasas_sge64) * MAX_SGL); */
3484
3485	/* prepare the scatter-gather list for the firmware */
3486	for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
3487		mfi_sgl->phys_addr = acmd->cmd_dmacookies[i].dmac_laddress;
3488		mfi_sgl->length    = acmd->cmd_dmacookies[i].dmac_size;
3489	}
3490
3491	sge_bytes = sizeof (struct megasas_sge64)*acmd->cmd_cookiecnt;
3492
3493	cmd->frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
3494	    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) + 1;
3495
3496	if (cmd->frame_count >= 8) {
3497		cmd->frame_count = 8;
3498	}
3499
3500	return (cmd);
3501}
3502
3503/*
3504 * wait_for_outstanding -	Wait for all outstanding cmds
3505 * @instance:				Adapter soft state
3506 *
3507 * This function waits for upto MEGASAS_RESET_WAIT_TIME seconds for FW to
3508 * complete all its outstanding commands. Returns error if one or more IOs
3509 * are pending after this time period.
3510 */
3511static int
3512wait_for_outstanding(struct megasas_instance *instance)
3513{
3514	int		i;
3515	uint32_t	wait_time = 90;
3516
3517	for (i = 0; i < wait_time; i++) {
3518		if (!instance->fw_outstanding) {
3519			break;
3520		}
3521
3522		drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
3523	}
3524
3525	if (instance->fw_outstanding) {
3526		return (1);
3527	}
3528
3529	ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VERSION);
3530
3531	return (0);
3532}
3533
3534/*
3535 * issue_mfi_pthru
3536 */
3537static int
3538issue_mfi_pthru(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3539    struct megasas_cmd *cmd, int mode)
3540{
3541	void		*ubuf;
3542	uint32_t	kphys_addr = 0;
3543	uint32_t	xferlen = 0;
3544	uint_t		model;
3545
3546	dma_obj_t			pthru_dma_obj;
3547	struct megasas_pthru_frame	*kpthru;
3548	struct megasas_pthru_frame	*pthru;
3549
3550	pthru = &cmd->frame->pthru;
3551	kpthru = (struct megasas_pthru_frame *)&ioctl->frame[0];
3552
3553	model = ddi_model_convert_from(mode & FMODELS);
3554	if (model == DDI_MODEL_ILP32) {
3555		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3556
3557		xferlen	= kpthru->sgl.sge32[0].length;
3558
3559		/* SJ! - ubuf needs to be virtual address. */
3560		ubuf	= (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3561	} else {
3562#ifdef _ILP32
3563		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP32"));
3564		xferlen	= kpthru->sgl.sge32[0].length;
3565		/* SJ! - ubuf needs to be virtual address. */
3566		ubuf	= (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
3567#else
3568		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_pthru: DDI_MODEL_LP64"));
3569		xferlen	= kpthru->sgl.sge64[0].length;
3570		/* SJ! - ubuf needs to be virtual address. */
3571		ubuf	= (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
3572#endif
3573	}
3574
3575	if (xferlen) {
3576		/* means IOCTL requires DMA */
3577		/* allocate the data transfer buffer */
3578		pthru_dma_obj.size = xferlen;
3579		pthru_dma_obj.dma_attr = megasas_generic_dma_attr;
3580		pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3581		pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3582		pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
3583		pthru_dma_obj.dma_attr.dma_attr_align = 1;
3584
3585		/* allocate kernel buffer for DMA */
3586		if (mega_alloc_dma_obj(instance, &pthru_dma_obj) != 1) {
3587			con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3588			    "could not data transfer buffer alloc."));
3589			return (DDI_FAILURE);
3590		}
3591
3592		/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3593		if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
3594			if (ddi_copyin(ubuf, (void *)pthru_dma_obj.buffer,
3595			    xferlen, mode)) {
3596				con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3597				    "copy from user space failed\n"));
3598				return (1);
3599			}
3600		}
3601
3602		kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
3603	}
3604
3605	pthru->cmd		= kpthru->cmd;
3606	pthru->sense_len	= kpthru->sense_len;
3607	pthru->cmd_status	= kpthru->cmd_status;
3608	pthru->scsi_status	= kpthru->scsi_status;
3609	pthru->target_id	= kpthru->target_id;
3610	pthru->lun		= kpthru->lun;
3611	pthru->cdb_len		= kpthru->cdb_len;
3612	pthru->sge_count	= kpthru->sge_count;
3613	pthru->timeout		= kpthru->timeout;
3614	pthru->data_xfer_len	= kpthru->data_xfer_len;
3615
3616	pthru->sense_buf_phys_addr_hi	= 0;
3617	/* pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; */
3618	pthru->sense_buf_phys_addr_lo	= 0;
3619
3620	bcopy((void *)kpthru->cdb, (void *)pthru->cdb, pthru->cdb_len);
3621
3622	pthru->flags			= kpthru->flags & ~MFI_FRAME_SGL64;
3623	pthru->sgl.sge32[0].length	= xferlen;
3624	pthru->sgl.sge32[0].phys_addr	= kphys_addr;
3625
3626	cmd->sync_cmd = MEGASAS_TRUE;
3627	cmd->frame_count = 1;
3628
3629	if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3630		con_log(CL_ANN, (CE_WARN,
3631		    "issue_mfi_pthru: fw_ioctl failed\n"));
3632	} else {
3633		if (xferlen && (kpthru->flags & MFI_FRAME_DIR_READ)) {
3634
3635			if (ddi_copyout(pthru_dma_obj.buffer, ubuf,
3636			    xferlen, mode)) {
3637				con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
3638				    "copy to user space failed\n"));
3639				return (1);
3640			}
3641		}
3642	}
3643
3644	kpthru->cmd_status = pthru->cmd_status;
3645	kpthru->scsi_status = pthru->scsi_status;
3646
3647	con_log(CL_ANN, (CE_NOTE, "issue_mfi_pthru: cmd_status %x, "
3648	    "scsi_status %x\n", pthru->cmd_status, pthru->scsi_status));
3649
3650	if (xferlen) {
3651		/* free kernel buffer */
3652		if (mega_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
3653			return (1);
3654	}
3655
3656	return (0);
3657}
3658
3659/*
3660 * issue_mfi_dcmd
3661 */
3662static int
3663issue_mfi_dcmd(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3664    struct megasas_cmd *cmd, int mode)
3665{
3666	void		*ubuf;
3667	uint32_t	kphys_addr = 0;
3668	uint32_t	xferlen = 0;
3669	uint32_t	model;
3670	dma_obj_t			dcmd_dma_obj;
3671	struct megasas_dcmd_frame	*kdcmd;
3672	struct megasas_dcmd_frame	*dcmd;
3673
3674	dcmd = &cmd->frame->dcmd;
3675	kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
3676
3677	model = ddi_model_convert_from(mode & FMODELS);
3678	if (model == DDI_MODEL_ILP32) {
3679		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3680
3681		xferlen	= kdcmd->sgl.sge32[0].length;
3682
3683		/* SJ! - ubuf needs to be virtual address. */
3684		ubuf	= (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3685	}
3686	else
3687	{
3688#ifdef _ILP32
3689		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
3690		xferlen	= kdcmd->sgl.sge32[0].length;
3691		/* SJ! - ubuf needs to be virtual address. */
3692		ubuf	= (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
3693#else
3694		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_dcmd: DDI_MODEL_LP64"));
3695		xferlen	= kdcmd->sgl.sge64[0].length;
3696		/* SJ! - ubuf needs to be virtual address. */
3697		ubuf	= (void *)(ulong_t)dcmd->sgl.sge64[0].phys_addr;
3698#endif
3699	}
3700	if (xferlen) {
3701		/* means IOCTL requires DMA */
3702		/* allocate the data transfer buffer */
3703		dcmd_dma_obj.size = xferlen;
3704		dcmd_dma_obj.dma_attr = megasas_generic_dma_attr;
3705		dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3706		dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3707		dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3708		dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3709
3710		/* allocate kernel buffer for DMA */
3711		if (mega_alloc_dma_obj(instance, &dcmd_dma_obj) != 1) {
3712			con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3713			    "could not data transfer buffer alloc."));
3714			return (DDI_FAILURE);
3715		}
3716
3717		/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3718		if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
3719			if (ddi_copyin(ubuf, (void *)dcmd_dma_obj.buffer,
3720			    xferlen, mode)) {
3721				con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3722				    "copy from user space failed\n"));
3723				return (1);
3724			}
3725		}
3726
3727		kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
3728	}
3729
3730	dcmd->cmd		= kdcmd->cmd;
3731	dcmd->cmd_status	= kdcmd->cmd_status;
3732	dcmd->sge_count		= kdcmd->sge_count;
3733	dcmd->timeout		= kdcmd->timeout;
3734	dcmd->data_xfer_len	= kdcmd->data_xfer_len;
3735	dcmd->opcode		= kdcmd->opcode;
3736
3737	bcopy((void *)kdcmd->mbox.b, (void *)dcmd->mbox.b, DCMD_MBOX_SZ);
3738
3739	dcmd->flags			= kdcmd->flags & ~MFI_FRAME_SGL64;
3740	dcmd->sgl.sge32[0].length	= xferlen;
3741	dcmd->sgl.sge32[0].phys_addr	= kphys_addr;
3742
3743	cmd->sync_cmd = MEGASAS_TRUE;
3744	cmd->frame_count = 1;
3745
3746	if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3747		con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed\n"));
3748	} else {
3749		if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
3750
3751			if (ddi_copyout(dcmd_dma_obj.buffer, ubuf,
3752			    xferlen, mode)) {
3753				con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
3754				    "copy to user space failed\n"));
3755				return (1);
3756			}
3757		}
3758	}
3759
3760	kdcmd->cmd_status = dcmd->cmd_status;
3761
3762	if (xferlen) {
3763		/* free kernel buffer */
3764		if (mega_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
3765			return (1);
3766	}
3767
3768	return (0);
3769}
3770
3771/*
3772 * issue_mfi_smp
3773 */
3774static int
3775issue_mfi_smp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
3776    struct megasas_cmd *cmd, int mode)
3777{
3778	void		*request_ubuf;
3779	void		*response_ubuf;
3780	uint32_t	request_xferlen = 0;
3781	uint32_t	response_xferlen = 0;
3782	uint_t		model;
3783	dma_obj_t			request_dma_obj;
3784	dma_obj_t			response_dma_obj;
3785	struct megasas_smp_frame	*ksmp;
3786	struct megasas_smp_frame	*smp;
3787	struct megasas_sge32		*sge32;
3788#ifndef _ILP32
3789	struct megasas_sge64		*sge64;
3790#endif
3791
3792	smp = &cmd->frame->smp;
3793	ksmp = (struct megasas_smp_frame *)&ioctl->frame[0];
3794
3795	model = ddi_model_convert_from(mode & FMODELS);
3796	if (model == DDI_MODEL_ILP32) {
3797		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3798
3799		sge32			= &ksmp->sgl[0].sge32[0];
3800		response_xferlen	= sge32[0].length;
3801		request_xferlen		= sge32[1].length;
3802		con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3803		    "response_xferlen = %x, request_xferlen = %x",
3804		    response_xferlen, request_xferlen));
3805
3806		/* SJ! - ubuf needs to be virtual address. */
3807
3808		response_ubuf	= (void *)(ulong_t)sge32[0].phys_addr;
3809		request_ubuf	= (void *)(ulong_t)sge32[1].phys_addr;
3810		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3811		    "response_ubuf = %p, request_ubuf = %p",
3812		    response_ubuf, request_ubuf));
3813	} else {
3814#ifdef _ILP32
3815		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_ILP32"));
3816
3817		sge32			= &ksmp->sgl[0].sge32[0];
3818		response_xferlen	= sge32[0].length;
3819		request_xferlen		= sge32[1].length;
3820		con_log(CL_ANN, (CE_NOTE, "issue_mfi_smp: "
3821		    "response_xferlen = %x, request_xferlen = %x",
3822		    response_xferlen, request_xferlen));
3823
3824		/* SJ! - ubuf needs to be virtual address. */
3825
3826		response_ubuf	= (void *)(ulong_t)sge32[0].phys_addr;
3827		request_ubuf	= (void *)(ulong_t)sge32[1].phys_addr;
3828		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3829		    "response_ubuf = %p, request_ubuf = %p",
3830		    response_ubuf, request_ubuf));
3831#else
3832		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: DDI_MODEL_LP64"));
3833
3834		sge64			= &ksmp->sgl[0].sge64[0];
3835		response_xferlen	= sge64[0].length;
3836		request_xferlen		= sge64[1].length;
3837
3838		/* SJ! - ubuf needs to be virtual address. */
3839		response_ubuf	= (void *)(ulong_t)sge64[0].phys_addr;
3840		request_ubuf	= (void *)(ulong_t)sge64[1].phys_addr;
3841#endif
3842	}
3843	if (request_xferlen) {
3844		/* means IOCTL requires DMA */
3845		/* allocate the data transfer buffer */
3846		request_dma_obj.size = request_xferlen;
3847		request_dma_obj.dma_attr = megasas_generic_dma_attr;
3848		request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3849		request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3850		request_dma_obj.dma_attr.dma_attr_sgllen = 1;
3851		request_dma_obj.dma_attr.dma_attr_align = 1;
3852
3853		/* allocate kernel buffer for DMA */
3854		if (mega_alloc_dma_obj(instance, &request_dma_obj) != 1) {
3855			con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3856			    "could not data transfer buffer alloc."));
3857			return (DDI_FAILURE);
3858		}
3859
3860		/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3861		if (ddi_copyin(request_ubuf, (void *) request_dma_obj.buffer,
3862		    request_xferlen, mode)) {
3863			con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3864			    "copy from user space failed\n"));
3865			return (1);
3866		}
3867	}
3868
3869	if (response_xferlen) {
3870		/* means IOCTL requires DMA */
3871		/* allocate the data transfer buffer */
3872		response_dma_obj.size = response_xferlen;
3873		response_dma_obj.dma_attr = megasas_generic_dma_attr;
3874		response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3875		response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3876		response_dma_obj.dma_attr.dma_attr_sgllen = 1;
3877		response_dma_obj.dma_attr.dma_attr_align = 1;
3878
3879		/* allocate kernel buffer for DMA */
3880		if (mega_alloc_dma_obj(instance, &response_dma_obj) != 1) {
3881			con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3882			    "could not data transfer buffer alloc."));
3883			return (DDI_FAILURE);
3884		}
3885
3886		/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
3887		if (ddi_copyin(response_ubuf, (void *) response_dma_obj.buffer,
3888		    response_xferlen, mode)) {
3889			con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3890			    "copy from user space failed\n"));
3891			return (1);
3892		}
3893	}
3894
3895	smp->cmd		= ksmp->cmd;
3896	smp->cmd_status		= ksmp->cmd_status;
3897	smp->connection_status	= ksmp->connection_status;
3898	smp->sge_count		= ksmp->sge_count;
3899	/* smp->context		= ksmp->context; */
3900	smp->timeout		= ksmp->timeout;
3901	smp->data_xfer_len	= ksmp->data_xfer_len;
3902
3903	bcopy((void *)&ksmp->sas_addr, (void *)&smp->sas_addr,
3904	    sizeof (uint64_t));
3905
3906	smp->flags		= ksmp->flags & ~MFI_FRAME_SGL64;
3907
3908	model = ddi_model_convert_from(mode & FMODELS);
3909	if (model == DDI_MODEL_ILP32) {
3910		con_log(CL_ANN1, (CE_NOTE,
3911		    "handle_drv_ioctl: DDI_MODEL_ILP32"));
3912
3913		sge32 = &smp->sgl[0].sge32[0];
3914		sge32[0].length	= response_xferlen;
3915		sge32[0].phys_addr =
3916		    response_dma_obj.dma_cookie[0].dmac_address;
3917		sge32[1].length	= request_xferlen;
3918		sge32[1].phys_addr =
3919		    request_dma_obj.dma_cookie[0].dmac_address;
3920	} else {
3921#ifdef _ILP32
3922		con_log(CL_ANN1, (CE_NOTE,
3923		    "handle_drv_ioctl: DDI_MODEL_ILP32"));
3924		sge32 = &smp->sgl[0].sge32[0];
3925		sge32[0].length	 = response_xferlen;
3926		sge32[0].phys_addr =
3927		    response_dma_obj.dma_cookie[0].dmac_address;
3928		sge32[1].length	= request_xferlen;
3929		sge32[1].phys_addr =
3930		    request_dma_obj.dma_cookie[0].dmac_address;
3931#else
3932		con_log(CL_ANN1, (CE_NOTE,
3933		    "issue_mfi_smp: DDI_MODEL_LP64"));
3934		sge64 = &smp->sgl[0].sge64[0];
3935		sge64[0].length	= response_xferlen;
3936		sge64[0].phys_addr =
3937		    response_dma_obj.dma_cookie[0].dmac_address;
3938		sge64[1].length	= request_xferlen;
3939		sge64[1].phys_addr =
3940		    request_dma_obj.dma_cookie[0].dmac_address;
3941#endif
3942	}
3943	con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: "
3944	    "smp->response_xferlen = %d, smp->request_xferlen = %d "
3945	    "smp->data_xfer_len = %d", sge32[0].length, sge32[1].length,
3946	    smp->data_xfer_len));
3947
3948	cmd->sync_cmd = MEGASAS_TRUE;
3949	cmd->frame_count = 1;
3950
3951	if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3952		con_log(CL_ANN, (CE_WARN,
3953		    "issue_mfi_smp: fw_ioctl failed\n"));
3954	} else {
3955		con_log(CL_ANN1, (CE_NOTE,
3956		    "issue_mfi_smp: copy to user space\n"));
3957
3958		if (request_xferlen) {
3959			if (ddi_copyout(request_dma_obj.buffer, request_ubuf,
3960			    request_xferlen, mode)) {
3961				con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3962				    "copy to user space failed\n"));
3963				return (1);
3964			}
3965		}
3966
3967		if (response_xferlen) {
3968			if (ddi_copyout(response_dma_obj.buffer, response_ubuf,
3969			    response_xferlen, mode)) {
3970				con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
3971				    "copy to user space failed\n"));
3972				return (1);
3973			}
3974		}
3975	}
3976
3977	ksmp->cmd_status = smp->cmd_status;
3978	con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
3979	    smp->cmd_status));
3980
3981
3982	if (request_xferlen) {
3983		/* free kernel buffer */
3984		if (mega_free_dma_obj(instance, request_dma_obj) != DDI_SUCCESS)
3985			return (1);
3986	}
3987
3988	if (response_xferlen) {
3989		/* free kernel buffer */
3990		if (mega_free_dma_obj(instance, response_dma_obj) !=
3991		    DDI_SUCCESS)
3992			return (1);
3993	}
3994
3995	return (0);
3996}
3997
3998/*
3999 * issue_mfi_stp
4000 */
4001static int
4002issue_mfi_stp(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4003    struct megasas_cmd *cmd, int mode)
4004{
4005	void		*fis_ubuf;
4006	void		*data_ubuf;
4007	uint32_t	fis_xferlen = 0;
4008	uint32_t	data_xferlen = 0;
4009	uint_t		model;
4010	dma_obj_t			fis_dma_obj;
4011	dma_obj_t			data_dma_obj;
4012	struct megasas_stp_frame	*kstp;
4013	struct megasas_stp_frame	*stp;
4014
4015	stp = &cmd->frame->stp;
4016	kstp = (struct megasas_stp_frame *)&ioctl->frame[0];
4017
4018	model = ddi_model_convert_from(mode & FMODELS);
4019	if (model == DDI_MODEL_ILP32) {
4020		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4021
4022		fis_xferlen	= kstp->sgl.sge32[0].length;
4023		data_xferlen	= kstp->sgl.sge32[1].length;
4024
4025		/* SJ! - ubuf needs to be virtual address. */
4026		fis_ubuf	= (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4027		data_ubuf	= (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4028	}
4029	else
4030	{
4031#ifdef _ILP32
4032		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_ILP32"));
4033
4034		fis_xferlen	= kstp->sgl.sge32[0].length;
4035		data_xferlen	= kstp->sgl.sge32[1].length;
4036
4037		/* SJ! - ubuf needs to be virtual address. */
4038		fis_ubuf	= (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
4039		data_ubuf	= (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
4040#else
4041		con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: DDI_MODEL_LP64"));
4042
4043		fis_xferlen	= kstp->sgl.sge64[0].length;
4044		data_xferlen	= kstp->sgl.sge64[1].length;
4045
4046		/* SJ! - ubuf needs to be virtual address. */
4047		fis_ubuf	= (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
4048		data_ubuf	= (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
4049#endif
4050	}
4051
4052
4053	if (fis_xferlen) {
4054		con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: "
4055		    "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
4056
4057		/* means IOCTL requires DMA */
4058		/* allocate the data transfer buffer */
4059		fis_dma_obj.size = fis_xferlen;
4060		fis_dma_obj.dma_attr = megasas_generic_dma_attr;
4061		fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4062		fis_dma_obj.dma_attr.dma_attr_count_max	= 0xFFFFFFFFU;
4063		fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
4064		fis_dma_obj.dma_attr.dma_attr_align = 1;
4065
4066		/* allocate kernel buffer for DMA */
4067		if (mega_alloc_dma_obj(instance, &fis_dma_obj) != 1) {
4068			con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4069			    "could not data transfer buffer alloc."));
4070			return (DDI_FAILURE);
4071		}
4072
4073		/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4074		if (ddi_copyin(fis_ubuf, (void *)fis_dma_obj.buffer,
4075		    fis_xferlen, mode)) {
4076			con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4077			    "copy from user space failed\n"));
4078			return (1);
4079		}
4080	}
4081
4082	if (data_xferlen) {
4083		con_log(CL_ANN, (CE_NOTE, "issue_mfi_stp: data_ubuf = %p "
4084		    "data_xferlen = %x", data_ubuf, data_xferlen));
4085
4086		/* means IOCTL requires DMA */
4087		/* allocate the data transfer buffer */
4088		data_dma_obj.size = data_xferlen;
4089		data_dma_obj.dma_attr = megasas_generic_dma_attr;
4090		data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4091		data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4092		data_dma_obj.dma_attr.dma_attr_sgllen = 1;
4093		data_dma_obj.dma_attr.dma_attr_align = 1;
4094
4095		/* allocate kernel buffer for DMA */
4096		if (mega_alloc_dma_obj(instance, &data_dma_obj) != 1) {
4097			con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4098			    "could not data transfer buffer alloc."));
4099			return (DDI_FAILURE);
4100		}
4101
4102		/* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
4103		if (ddi_copyin(data_ubuf, (void *) data_dma_obj.buffer,
4104		    data_xferlen, mode)) {
4105			con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4106			    "copy from user space failed\n"));
4107			return (1);
4108		}
4109	}
4110
4111	stp->cmd = kstp->cmd;
4112	stp->cmd_status	= kstp->cmd_status;
4113	stp->connection_status = kstp->connection_status;
4114	stp->target_id = kstp->target_id;
4115	stp->sge_count = kstp->sge_count;
4116	/* stp->context = kstp->context; */
4117	stp->timeout = kstp->timeout;
4118	stp->data_xfer_len = kstp->data_xfer_len;
4119
4120	bcopy((void *)kstp->fis, (void *)stp->fis, 10);
4121
4122	stp->flags = kstp->flags & ~MFI_FRAME_SGL64;
4123	stp->stp_flags = kstp->stp_flags;
4124	stp->sgl.sge32[0].length = fis_xferlen;
4125	stp->sgl.sge32[0].phys_addr = fis_dma_obj.dma_cookie[0].dmac_address;
4126	stp->sgl.sge32[1].length = data_xferlen;
4127	stp->sgl.sge32[1].phys_addr = data_dma_obj.dma_cookie[0].dmac_address;
4128
4129	cmd->sync_cmd = MEGASAS_TRUE;
4130	cmd->frame_count = 1;
4131
4132	if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4133		con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed\n"));
4134	} else {
4135
4136		if (fis_xferlen) {
4137			if (ddi_copyout(fis_dma_obj.buffer, fis_ubuf,
4138			    fis_xferlen, mode)) {
4139				con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4140				    "copy to user space failed\n"));
4141				return (1);
4142			}
4143		}
4144
4145		if (data_xferlen) {
4146			if (ddi_copyout(data_dma_obj.buffer, data_ubuf,
4147			    data_xferlen, mode)) {
4148				con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
4149				    "copy to user space failed\n"));
4150				return (1);
4151			}
4152		}
4153	}
4154
4155	kstp->cmd_status = stp->cmd_status;
4156
4157	if (fis_xferlen) {
4158		/* free kernel buffer */
4159		if (mega_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
4160			return (1);
4161	}
4162
4163	if (data_xferlen) {
4164		/* free kernel buffer */
4165		if (mega_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
4166			return (1);
4167	}
4168
4169	return (0);
4170}
4171
4172/*
4173 * fill_up_drv_ver
4174 */
4175static void
4176fill_up_drv_ver(struct megasas_drv_ver *dv)
4177{
4178	(void) memset(dv, 0, sizeof (struct megasas_drv_ver));
4179
4180	(void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
4181	(void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
4182	(void) memcpy(dv->drv_name, "megaraid_sas", strlen("megaraid_sas"));
4183	(void) memcpy(dv->drv_ver, MEGASAS_VERSION, strlen(MEGASAS_VERSION));
4184	(void) memcpy(dv->drv_rel_date, MEGASAS_RELDATE,
4185	    strlen(MEGASAS_RELDATE));
4186}
4187
4188/*
4189 * handle_drv_ioctl
4190 */
4191static int
4192handle_drv_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4193    int mode)
4194{
4195	int	i;
4196	int	rval = 0;
4197	int	*props = NULL;
4198	void	*ubuf;
4199
4200	uint8_t		*pci_conf_buf;
4201	uint32_t	xferlen;
4202	uint32_t	num_props;
4203	uint_t		model;
4204	struct megasas_dcmd_frame	*kdcmd;
4205	struct megasas_drv_ver		dv;
4206	struct megasas_pci_information	pi;
4207
4208	kdcmd = (struct megasas_dcmd_frame *)&ioctl->frame[0];
4209
4210	model = ddi_model_convert_from(mode & FMODELS);
4211	if (model == DDI_MODEL_ILP32) {
4212		con_log(CL_ANN1, (CE_NOTE,
4213		    "handle_drv_ioctl: DDI_MODEL_ILP32"));
4214
4215		xferlen	= kdcmd->sgl.sge32[0].length;
4216
4217		/* SJ! - ubuf needs to be virtual address. */
4218		ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4219	} else {
4220#ifdef _ILP32
4221		con_log(CL_ANN1, (CE_NOTE,
4222		    "handle_drv_ioctl: DDI_MODEL_ILP32"));
4223		xferlen	= kdcmd->sgl.sge32[0].length;
4224		/* SJ! - ubuf needs to be virtual address. */
4225		ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
4226#else
4227		con_log(CL_ANN1, (CE_NOTE,
4228		    "handle_drv_ioctl: DDI_MODEL_LP64"));
4229		xferlen	= kdcmd->sgl.sge64[0].length;
4230		/* SJ! - ubuf needs to be virtual address. */
4231		ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
4232#endif
4233	}
4234	con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4235	    "dataBuf=%p size=%d bytes", ubuf, xferlen));
4236
4237	switch (kdcmd->opcode) {
4238	case MR_DRIVER_IOCTL_DRIVER_VERSION:
4239		con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4240		    "MR_DRIVER_IOCTL_DRIVER_VERSION"));
4241
4242		fill_up_drv_ver(&dv);
4243
4244		if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
4245			con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4246			    "MR_DRIVER_IOCTL_DRIVER_VERSION : "
4247			    "copy to user space failed\n"));
4248			kdcmd->cmd_status = 1;
4249			rval = 1;
4250		} else {
4251			kdcmd->cmd_status = 0;
4252		}
4253		break;
4254	case MR_DRIVER_IOCTL_PCI_INFORMATION:
4255		con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
4256		    "MR_DRIVER_IOCTL_PCI_INFORMAITON"));
4257
4258		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
4259		    0, "reg", &props, &num_props)) {
4260			con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4261			    "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4262			    "ddi_prop_look_int_array failed\n"));
4263			rval = 1;
4264		} else {
4265
4266			pi.busNumber = (props[0] >> 16) & 0xFF;
4267			pi.deviceNumber = (props[0] >> 11) & 0x1f;
4268			pi.functionNumber = (props[0] >> 8) & 0x7;
4269			ddi_prop_free((void *)props);
4270		}
4271
4272		pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
4273
4274		for (i = 0; i < (sizeof (struct megasas_pci_information) -
4275		    offsetof(struct megasas_pci_information, pciHeaderInfo));
4276		    i++) {
4277			pci_conf_buf[i] =
4278			    pci_config_get8(instance->pci_handle, i);
4279		}
4280
4281		if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
4282			con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4283			    "MR_DRIVER_IOCTL_PCI_INFORMATION : "
4284			    "copy to user space failed\n"));
4285			kdcmd->cmd_status = 1;
4286			rval = 1;
4287		} else {
4288			kdcmd->cmd_status = 0;
4289		}
4290		break;
4291	default:
4292		con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
4293		    "invalid driver specific IOCTL opcode = 0x%x",
4294		    kdcmd->opcode));
4295		kdcmd->cmd_status = 1;
4296		rval = 1;
4297		break;
4298	}
4299
4300	return (rval);
4301}
4302
4303/*
4304 * handle_mfi_ioctl
4305 */
4306static int
4307handle_mfi_ioctl(struct megasas_instance *instance, struct megasas_ioctl *ioctl,
4308    int mode)
4309{
4310	int	rval = 0;
4311
4312	struct megasas_header	*hdr;
4313	struct megasas_cmd	*cmd;
4314
4315	cmd = get_mfi_pkt(instance);
4316
4317	if (!cmd) {
4318		con_log(CL_ANN, (CE_WARN, "megasas: "
4319		    "failed to get a cmd packet\n"));
4320		return (1);
4321	}
4322
4323	hdr = (struct megasas_header *)&ioctl->frame[0];
4324
4325	switch (hdr->cmd) {
4326	case MFI_CMD_OP_DCMD:
4327		rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
4328		break;
4329	case MFI_CMD_OP_SMP:
4330		rval = issue_mfi_smp(instance, ioctl, cmd, mode);
4331		break;
4332	case MFI_CMD_OP_STP:
4333		rval = issue_mfi_stp(instance, ioctl, cmd, mode);
4334		break;
4335	case MFI_CMD_OP_LD_SCSI:
4336	case MFI_CMD_OP_PD_SCSI:
4337		rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
4338		break;
4339	default:
4340		con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
4341		    "invalid mfi ioctl hdr->cmd = %d\n", hdr->cmd));
4342		rval = 1;
4343		break;
4344	}
4345
4346
4347	return_mfi_pkt(instance, cmd);
4348	if (megasas_common_check(instance, cmd) != DDI_SUCCESS)
4349		rval = 1;
4350	return (rval);
4351}
4352
4353/*
4354 * AEN
4355 */
4356static int
4357handle_mfi_aen(struct megasas_instance *instance, struct megasas_aen *aen)
4358{
4359	int	rval = 0;
4360
4361	rval = register_mfi_aen(instance, instance->aen_seq_num,
4362	    aen->class_locale_word);
4363
4364	aen->cmd_status = (uint8_t)rval;
4365
4366	return (rval);
4367}
4368
4369static int
4370register_mfi_aen(struct megasas_instance *instance, uint32_t seq_num,
4371    uint32_t class_locale_word)
4372{
4373	int	ret_val;
4374
4375	struct megasas_cmd		*cmd;
4376	struct megasas_dcmd_frame	*dcmd;
4377	union megasas_evt_class_locale	curr_aen;
4378	union megasas_evt_class_locale	prev_aen;
4379
4380	/*
4381	 * If there an AEN pending already (aen_cmd), check if the
4382	 * class_locale of that pending AEN is inclusive of the new
4383	 * AEN request we currently have. If it is, then we don't have
4384	 * to do anything. In other words, whichever events the current
4385	 * AEN request is subscribing to, have already been subscribed
4386	 * to.
4387	 *
4388	 * If the old_cmd is _not_ inclusive, then we have to abort
4389	 * that command, form a class_locale that is superset of both
4390	 * old and current and re-issue to the FW
4391	 */
4392
4393	curr_aen.word = class_locale_word;
4394
4395	if (instance->aen_cmd) {
4396		prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1];
4397
4398		/*
4399		 * A class whose enum value is smaller is inclusive of all
4400		 * higher values. If a PROGRESS (= -1) was previously
4401		 * registered, then a new registration requests for higher
4402		 * classes need not be sent to FW. They are automatically
4403		 * included.
4404		 *
4405		 * Locale numbers don't have such hierarchy. They are bitmap
4406		 * values
4407		 */
4408		if ((prev_aen.members.class <= curr_aen.members.class) &&
4409		    !((prev_aen.members.locale & curr_aen.members.locale) ^
4410		    curr_aen.members.locale)) {
4411			/*
4412			 * Previously issued event registration includes
4413			 * current request. Nothing to do.
4414			 */
4415
4416			return (0);
4417		} else {
4418			curr_aen.members.locale |= prev_aen.members.locale;
4419
4420			if (prev_aen.members.class < curr_aen.members.class)
4421				curr_aen.members.class = prev_aen.members.class;
4422
4423			ret_val = abort_aen_cmd(instance, instance->aen_cmd);
4424
4425			if (ret_val) {
4426				con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
4427				    "failed to abort prevous AEN command\n"));
4428
4429				return (ret_val);
4430			}
4431		}
4432	} else {
4433		curr_aen.word = class_locale_word;
4434	}
4435
4436	cmd = get_mfi_pkt(instance);
4437
4438	if (!cmd)
4439		return (-ENOMEM);
4440
4441	dcmd = &cmd->frame->dcmd;
4442
4443	/* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
4444	(void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4445
4446	(void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4447	    sizeof (struct megasas_evt_detail));
4448
4449	/* Prepare DCMD for aen registration */
4450	dcmd->cmd = MFI_CMD_OP_DCMD;
4451	dcmd->cmd_status = 0x0;
4452	dcmd->sge_count = 1;
4453	dcmd->flags = MFI_FRAME_DIR_READ;
4454	dcmd->timeout = 0;
4455	dcmd->data_xfer_len = sizeof (struct megasas_evt_detail);
4456	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
4457	dcmd->mbox.w[0] = seq_num;
4458	dcmd->mbox.w[1] = curr_aen.word;
4459	dcmd->sgl.sge32[0].phys_addr =
4460	    instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address;
4461	dcmd->sgl.sge32[0].length = sizeof (struct megasas_evt_detail);
4462
4463	instance->aen_seq_num = seq_num;
4464
4465	/*
4466	 * Store reference to the cmd used to register for AEN. When an
4467	 * application wants us to register for AEN, we have to abort this
4468	 * cmd and re-register with a new EVENT LOCALE supplied by that app
4469	 */
4470	instance->aen_cmd = cmd;
4471
4472	cmd->frame_count = 1;
4473
4474	/* Issue the aen registration frame */
4475	/* atomic_add_16 (&instance->fw_outstanding, 1); */
4476	instance->func_ptr->issue_cmd(cmd, instance);
4477
4478	return (0);
4479}
4480
4481static void
4482display_scsi_inquiry(caddr_t scsi_inq)
4483{
4484#define	MAX_SCSI_DEVICE_CODE	14
4485	int		i;
4486	char		inquiry_buf[256] = {0};
4487	int		len;
4488	const char	*const scsi_device_types[] = {
4489		"Direct-Access    ",
4490		"Sequential-Access",
4491		"Printer          ",
4492		"Processor        ",
4493		"WORM             ",
4494		"CD-ROM           ",
4495		"Scanner          ",
4496		"Optical Device   ",
4497		"Medium Changer   ",
4498		"Communications   ",
4499		"Unknown          ",
4500		"Unknown          ",
4501		"Unknown          ",
4502		"Enclosure        ",
4503	};
4504
4505	len = 0;
4506
4507	len += snprintf(inquiry_buf + len, 265 - len, "  Vendor: ");
4508	for (i = 8; i < 16; i++) {
4509		len += snprintf(inquiry_buf + len, 265 - len, "%c",
4510		    scsi_inq[i]);
4511	}
4512
4513	len += snprintf(inquiry_buf + len, 265 - len, "  Model: ");
4514
4515	for (i = 16; i < 32; i++) {
4516		len += snprintf(inquiry_buf + len, 265 - len, "%c",
4517		    scsi_inq[i]);
4518	}
4519
4520	len += snprintf(inquiry_buf + len, 265 - len, "  Rev: ");
4521
4522	for (i = 32; i < 36; i++) {
4523		len += snprintf(inquiry_buf + len, 265 - len, "%c",
4524		    scsi_inq[i]);
4525	}
4526
4527	len += snprintf(inquiry_buf + len, 265 - len, "\n");
4528
4529
4530	i = scsi_inq[0] & 0x1f;
4531
4532
4533	len += snprintf(inquiry_buf + len, 265 - len, "  Type:   %s ",
4534	    i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
4535	    "Unknown          ");
4536
4537
4538	len += snprintf(inquiry_buf + len, 265 - len,
4539	    "                 ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
4540
4541	if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
4542		len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
4543	} else {
4544		len += snprintf(inquiry_buf + len, 265 - len, "\n");
4545	}
4546
4547	con_log(CL_ANN1, (CE_CONT, inquiry_buf));
4548}
4549
4550static int
4551read_fw_status_reg_xscale(struct megasas_instance *instance)
4552{
4553	return ((int)RD_OB_MSG_0(instance));
4554}
4555
4556static int
4557read_fw_status_reg_ppc(struct megasas_instance *instance)
4558{
4559	return ((int)RD_OB_SCRATCH_PAD_0(instance));
4560}
4561
4562static void
4563issue_cmd_xscale(struct megasas_cmd *cmd, struct megasas_instance *instance)
4564{
4565	atomic_add_16(&instance->fw_outstanding, 1);
4566
4567	/* Issue the command to the FW */
4568	WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4569	    (cmd->frame_count - 1), instance);
4570}
4571
4572static void
4573issue_cmd_ppc(struct megasas_cmd *cmd, struct megasas_instance *instance)
4574{
4575	atomic_add_16(&instance->fw_outstanding, 1);
4576
4577	/* Issue the command to the FW */
4578	WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4579	    (((cmd->frame_count - 1) << 1) | 1), instance);
4580}
4581
4582/*
4583 * issue_cmd_in_sync_mode
4584 */
4585static int
4586issue_cmd_in_sync_mode_xscale(struct megasas_instance *instance,
4587    struct megasas_cmd *cmd)
4588{
4589	int		i;
4590	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4591
4592	cmd->cmd_status	= ENODATA;
4593
4594	WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4595	    (cmd->frame_count - 1), instance);
4596
4597	mutex_enter(&instance->int_cmd_mtx);
4598
4599	for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4600		cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4601	}
4602
4603	mutex_exit(&instance->int_cmd_mtx);
4604
4605	if (i < (msecs -1)) {
4606		return (0);
4607	} else {
4608		return (1);
4609	}
4610}
4611
4612static int
4613issue_cmd_in_sync_mode_ppc(struct megasas_instance *instance,
4614    struct megasas_cmd *cmd)
4615{
4616	int		i;
4617	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
4618
4619	con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called\n"));
4620
4621	cmd->cmd_status	= ENODATA;
4622
4623	WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4624	    (((cmd->frame_count - 1) << 1) | 1), instance);
4625
4626	mutex_enter(&instance->int_cmd_mtx);
4627
4628	for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
4629		cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
4630	}
4631
4632	mutex_exit(&instance->int_cmd_mtx);
4633
4634	con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done\n"));
4635
4636	if (i < (msecs -1)) {
4637		return (0);
4638	} else {
4639		return (1);
4640	}
4641}
4642
4643/*
4644 * issue_cmd_in_poll_mode
4645 */
4646static int
4647issue_cmd_in_poll_mode_xscale(struct megasas_instance *instance,
4648    struct megasas_cmd *cmd)
4649{
4650	int		i;
4651	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4652	struct megasas_header *frame_hdr;
4653
4654	frame_hdr = (struct megasas_header *)cmd->frame;
4655	frame_hdr->cmd_status	= MFI_CMD_STATUS_POLL_MODE;
4656	frame_hdr->flags 	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4657
4658	/* issue the frame using inbound queue port */
4659	WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr) >> 3) |
4660	    (cmd->frame_count - 1), instance);
4661
4662	/* wait for cmd_status to change from 0xFF */
4663	for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4664	    MFI_CMD_STATUS_POLL_MODE); i++) {
4665		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4666	}
4667
4668	if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4669		con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4670		    "cmd polling timed out"));
4671		return (DDI_FAILURE);
4672	}
4673
4674	return (DDI_SUCCESS);
4675}
4676
4677static int
4678issue_cmd_in_poll_mode_ppc(struct megasas_instance *instance,
4679    struct megasas_cmd *cmd)
4680{
4681	int		i;
4682	uint32_t	msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
4683	struct megasas_header *frame_hdr;
4684
4685	con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called\n"));
4686
4687	frame_hdr = (struct megasas_header *)cmd->frame;
4688	frame_hdr->cmd_status	= MFI_CMD_STATUS_POLL_MODE;
4689	frame_hdr->flags 	|= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
4690
4691	/* issue the frame using inbound queue port */
4692	WR_IB_QPORT((host_to_le32(cmd->frame_phys_addr)) |
4693	    (((cmd->frame_count - 1) << 1) | 1), instance);
4694
4695	/* wait for cmd_status to change from 0xFF */
4696	for (i = 0; i < msecs && (frame_hdr->cmd_status ==
4697	    MFI_CMD_STATUS_POLL_MODE); i++) {
4698		drv_usecwait(MILLISEC); /* wait for 1000 usecs */
4699	}
4700
4701	if (frame_hdr->cmd_status == MFI_CMD_STATUS_POLL_MODE) {
4702		con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
4703		    "cmd polling timed out"));
4704		return (DDI_FAILURE);
4705	}
4706
4707	return (DDI_SUCCESS);
4708}
4709
4710static void
4711enable_intr_xscale(struct megasas_instance *instance)
4712{
4713	MFI_ENABLE_INTR(instance);
4714}
4715
4716static void
4717enable_intr_ppc(struct megasas_instance *instance)
4718{
4719	uint32_t	mask;
4720
4721	con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called\n"));
4722
4723	/* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
4724	WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
4725
4726	/*
4727	 * As 1078DE is same as 1078 chip, the interrupt mask
4728	 * remains the same.
4729	 */
4730	/* WR_OB_INTR_MASK(~0x80000000, instance); */
4731	WR_OB_INTR_MASK(~(MFI_REPLY_1078_MESSAGE_INTR), instance);
4732
4733	/* dummy read to force PCI flush */
4734	mask = RD_OB_INTR_MASK(instance);
4735
4736	con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
4737	    "outbound_intr_mask = 0x%x\n", mask));
4738}
4739
4740static void
4741disable_intr_xscale(struct megasas_instance *instance)
4742{
4743	MFI_DISABLE_INTR(instance);
4744}
4745
4746static void
4747disable_intr_ppc(struct megasas_instance *instance)
4748{
4749	uint32_t	mask;
4750
4751	con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called\n"));
4752
4753	con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
4754	    "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4755
4756	/* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
4757	WR_OB_INTR_MASK(OB_INTR_MASK, instance);
4758
4759	con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
4760	    "outbound_intr_mask = 0x%x\n", RD_OB_INTR_MASK(instance)));
4761
4762	/* dummy read to force PCI flush */
4763	mask = RD_OB_INTR_MASK(instance);
4764#ifdef lint
4765	mask = mask;
4766#endif
4767}
4768
4769static int
4770intr_ack_xscale(struct megasas_instance *instance)
4771{
4772	uint32_t	status;
4773
4774	/* check if it is our interrupt */
4775	status = RD_OB_INTR_STATUS(instance);
4776
4777	if (!(status & MFI_OB_INTR_STATUS_MASK)) {
4778		return (DDI_INTR_UNCLAIMED);
4779	}
4780
4781	/* clear the interrupt by writing back the same value */
4782	WR_OB_INTR_STATUS(status, instance);
4783
4784	return (DDI_INTR_CLAIMED);
4785}
4786
4787static int
4788intr_ack_ppc(struct megasas_instance *instance)
4789{
4790	uint32_t	status;
4791
4792	con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called\n"));
4793
4794	/* check if it is our interrupt */
4795	status = RD_OB_INTR_STATUS(instance);
4796
4797	con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x\n", status));
4798
4799	/*
4800	 * As 1078DE is same as 1078 chip, the status field
4801	 * remains the same.
4802	 */
4803	if (!(status & MFI_REPLY_1078_MESSAGE_INTR)) {
4804		return (DDI_INTR_UNCLAIMED);
4805	}
4806
4807	/* clear the interrupt by writing back the same value */
4808	WR_OB_DOORBELL_CLEAR(status, instance);
4809
4810	/* dummy READ */
4811	status = RD_OB_INTR_STATUS(instance);
4812
4813	con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared\n"));
4814
4815	return (DDI_INTR_CLAIMED);
4816}
4817
4818static int
4819megasas_common_check(struct megasas_instance *instance,
4820    struct  megasas_cmd *cmd)
4821{
4822	int ret = DDI_SUCCESS;
4823
4824	if (megasas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4825	    DDI_SUCCESS) {
4826		ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4827		if (cmd->pkt != NULL) {
4828			cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4829			cmd->pkt->pkt_statistics = 0;
4830		}
4831		ret = DDI_FAILURE;
4832	}
4833	if (megasas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
4834	    != DDI_SUCCESS) {
4835		ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4836		if (cmd->pkt != NULL) {
4837			cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4838			cmd->pkt->pkt_statistics = 0;
4839		}
4840		ret = DDI_FAILURE;
4841	}
4842	if (megasas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
4843	    DDI_SUCCESS) {
4844		ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4845		if (cmd->pkt != NULL) {
4846			cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4847			cmd->pkt->pkt_statistics = 0;
4848		}
4849		ret = DDI_FAILURE;
4850	}
4851	if (megasas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
4852		ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4853		ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
4854		if (cmd->pkt != NULL) {
4855			cmd->pkt->pkt_reason = CMD_TRAN_ERR;
4856			cmd->pkt->pkt_statistics = 0;
4857		}
4858		ret = DDI_FAILURE;
4859	}
4860
4861	return (ret);
4862}
4863
4864/*ARGSUSED*/
4865static int
4866megasas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
4867{
4868	/*
4869	 * as the driver can always deal with an error in any dma or
4870	 * access handle, we can just return the fme_status value.
4871	 */
4872	pci_ereport_post(dip, err, NULL);
4873	return (err->fme_status);
4874}
4875
4876static void
4877megasas_fm_init(struct megasas_instance *instance)
4878{
4879	/* Need to change iblock to priority for new MSI intr */
4880	ddi_iblock_cookie_t fm_ibc;
4881
4882	/* Only register with IO Fault Services if we have some capability */
4883	if (instance->fm_capabilities) {
4884		/* Adjust access and dma attributes for FMA */
4885		endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
4886		megasas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
4887
4888		/*
4889		 * Register capabilities with IO Fault Services.
4890		 * fm_capabilities will be updated to indicate
4891		 * capabilities actually supported (not requested.)
4892		 */
4893
4894		ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
4895
4896		/*
4897		 * Initialize pci ereport capabilities if ereport
4898		 * capable (should always be.)
4899		 */
4900
4901		if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4902		    DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4903			pci_ereport_setup(instance->dip);
4904		}
4905
4906		/*
4907		 * Register error callback if error callback capable.
4908		 */
4909		if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4910			ddi_fm_handler_register(instance->dip,
4911			    megasas_fm_error_cb, (void*) instance);
4912		}
4913	} else {
4914		endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4915		megasas_generic_dma_attr.dma_attr_flags = 0;
4916	}
4917}
4918
4919static void
4920megasas_fm_fini(struct megasas_instance *instance)
4921{
4922	/* Only unregister FMA capabilities if registered */
4923	if (instance->fm_capabilities) {
4924		/*
4925		 * Un-register error callback if error callback capable.
4926		 */
4927		if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4928			ddi_fm_handler_unregister(instance->dip);
4929		}
4930
4931		/*
4932		 * Release any resources allocated by pci_ereport_setup()
4933		 */
4934		if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
4935		    DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
4936			pci_ereport_teardown(instance->dip);
4937		}
4938
4939		/* Unregister from IO Fault Services */
4940		ddi_fm_fini(instance->dip);
4941
4942		/* Adjust access and dma attributes for FMA */
4943		endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4944		megasas_generic_dma_attr.dma_attr_flags = 0;
4945	}
4946}
4947
4948int
4949megasas_check_acc_handle(ddi_acc_handle_t handle)
4950{
4951	ddi_fm_error_t de;
4952
4953	if (handle == NULL) {
4954		return (DDI_FAILURE);
4955	}
4956
4957	ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
4958
4959	return (de.fme_status);
4960}
4961
4962int
4963megasas_check_dma_handle(ddi_dma_handle_t handle)
4964{
4965	ddi_fm_error_t de;
4966
4967	if (handle == NULL) {
4968		return (DDI_FAILURE);
4969	}
4970
4971	ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
4972
4973	return (de.fme_status);
4974}
4975
4976void
4977megasas_fm_ereport(struct megasas_instance *instance, char *detail)
4978{
4979	uint64_t ena;
4980	char buf[FM_MAX_CLASS];
4981
4982	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
4983	ena = fm_ena_generate(0, FM_ENA_FMT1);
4984	if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
4985		ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
4986		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
4987	}
4988}
4989