1/*
2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy
3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy
4 * Support: freebsdraid@avagotech.com
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer. 2. Redistributions
12 * in binary form must reproduce the above copyright notice, this list of
13 * conditions and the following disclaimer in the documentation and/or other
14 * materials provided with the distribution. 3. Neither the name of the
15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or
16 * promote products derived from this software without specific prior written
17 * permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing
33 * official policies,either expressed or implied, of the FreeBSD Project.
34 *
35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621
36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD
37 *
38 */
39
40#include <sys/cdefs.h>
41__FBSDID("$FreeBSD: releng/10.3/sys/dev/mrsas/mrsas.c 284267 2015-06-11 14:11:41Z kadesai $");
42
43#include <dev/mrsas/mrsas.h>
44#include <dev/mrsas/mrsas_ioctl.h>
45
46#include <cam/cam.h>
47#include <cam/cam_ccb.h>
48
49#include <sys/sysctl.h>
50#include <sys/types.h>
51#include <sys/kthread.h>
52#include <sys/taskqueue.h>
53#include <sys/smp.h>
54
55
56/*
57 * Function prototypes
58 */
59static d_open_t mrsas_open;
60static d_close_t mrsas_close;
61static d_read_t mrsas_read;
62static d_write_t mrsas_write;
63static d_ioctl_t mrsas_ioctl;
64static d_poll_t mrsas_poll;
65
66static struct mrsas_mgmt_info mrsas_mgmt_info;
67static struct mrsas_ident *mrsas_find_ident(device_t);
68static int mrsas_setup_msix(struct mrsas_softc *sc);
69static int mrsas_allocate_msix(struct mrsas_softc *sc);
70static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
71static void mrsas_flush_cache(struct mrsas_softc *sc);
72static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
73static void mrsas_ocr_thread(void *arg);
74static int mrsas_get_map_info(struct mrsas_softc *sc);
75static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
76static int mrsas_sync_map_info(struct mrsas_softc *sc);
77static int mrsas_get_pd_list(struct mrsas_softc *sc);
78static int mrsas_get_ld_list(struct mrsas_softc *sc);
79static int mrsas_setup_irq(struct mrsas_softc *sc);
80static int mrsas_alloc_mem(struct mrsas_softc *sc);
81static int mrsas_init_fw(struct mrsas_softc *sc);
82static int mrsas_setup_raidmap(struct mrsas_softc *sc);
83static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
84static int mrsas_clear_intr(struct mrsas_softc *sc);
85static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
86static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
87static int
88mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
89    struct mrsas_mfi_cmd *cmd_to_abort);
90static struct mrsas_softc *
91mrsas_get_softc_instance(struct cdev *dev,
92    u_long cmd, caddr_t arg);
93u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
94u_int8_t
95mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
96    struct mrsas_mfi_cmd *mfi_cmd);
97void	mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc);
98int	mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
99int	mrsas_init_adapter(struct mrsas_softc *sc);
100int	mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
101int	mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
102int	mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
103int	mrsas_ioc_init(struct mrsas_softc *sc);
104int	mrsas_bus_scan(struct mrsas_softc *sc);
105int	mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
106int	mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
107int	mrsas_reset_ctrl(struct mrsas_softc *sc);
108int	mrsas_wait_for_outstanding(struct mrsas_softc *sc);
109int
110mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
111    struct mrsas_mfi_cmd *cmd);
112int
113mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
114    int size);
115void	mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
116void	mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
117void	mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
118void	mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
119void	mrsas_disable_intr(struct mrsas_softc *sc);
120void	mrsas_enable_intr(struct mrsas_softc *sc);
121void	mrsas_free_ioc_cmd(struct mrsas_softc *sc);
122void	mrsas_free_mem(struct mrsas_softc *sc);
123void	mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
124void	mrsas_isr(void *arg);
125void	mrsas_teardown_intr(struct mrsas_softc *sc);
126void	mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
127void	mrsas_kill_hba(struct mrsas_softc *sc);
128void	mrsas_aen_handler(struct mrsas_softc *sc);
129void
130mrsas_write_reg(struct mrsas_softc *sc, int offset,
131    u_int32_t value);
132void
133mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
134    u_int32_t req_desc_hi);
135void	mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
136void
137mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
138    struct mrsas_mfi_cmd *cmd, u_int8_t status);
139void
140mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
141    u_int8_t extStatus);
142struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc);
143
144MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd
145        (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
146
147extern int mrsas_cam_attach(struct mrsas_softc *sc);
148extern void mrsas_cam_detach(struct mrsas_softc *sc);
149extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
150extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
151extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
152extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
153extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
154extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd);
155extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
156extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
157extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map);
158extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
159extern void mrsas_xpt_release(struct mrsas_softc *sc);
160extern MRSAS_REQUEST_DESCRIPTOR_UNION *
161mrsas_get_request_desc(struct mrsas_softc *sc,
162    u_int16_t index);
163extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
164static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
165static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
166
167SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
168
169/*
170 * PCI device struct and table
171 *
172 */
173typedef struct mrsas_ident {
174	uint16_t vendor;
175	uint16_t device;
176	uint16_t subvendor;
177	uint16_t subdevice;
178	const char *desc;
179}	MRSAS_CTLR_ID;
180
181MRSAS_CTLR_ID device_table[] = {
182	{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
183	{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
184	{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
185	{0, 0, 0, 0, NULL}
186};
187
188/*
189 * Character device entry points
190 *
191 */
192static struct cdevsw mrsas_cdevsw = {
193	.d_version = D_VERSION,
194	.d_open = mrsas_open,
195	.d_close = mrsas_close,
196	.d_read = mrsas_read,
197	.d_write = mrsas_write,
198	.d_ioctl = mrsas_ioctl,
199	.d_poll = mrsas_poll,
200	.d_name = "mrsas",
201};
202
203MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
204
205/*
206 * In the cdevsw routines, we find our softc by using the si_drv1 member of
207 * struct cdev.  We set this variable to point to our softc in our attach
208 * routine when we create the /dev entry.
209 */
210int
211mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
212{
213	struct mrsas_softc *sc;
214
215	sc = dev->si_drv1;
216	return (0);
217}
218
219int
220mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
221{
222	struct mrsas_softc *sc;
223
224	sc = dev->si_drv1;
225	return (0);
226}
227
228int
229mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
230{
231	struct mrsas_softc *sc;
232
233	sc = dev->si_drv1;
234	return (0);
235}
236int
237mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
238{
239	struct mrsas_softc *sc;
240
241	sc = dev->si_drv1;
242	return (0);
243}
244
245/*
246 * Register Read/Write Functions
247 *
248 */
249void
250mrsas_write_reg(struct mrsas_softc *sc, int offset,
251    u_int32_t value)
252{
253	bus_space_tag_t bus_tag = sc->bus_tag;
254	bus_space_handle_t bus_handle = sc->bus_handle;
255
256	bus_space_write_4(bus_tag, bus_handle, offset, value);
257}
258
259u_int32_t
260mrsas_read_reg(struct mrsas_softc *sc, int offset)
261{
262	bus_space_tag_t bus_tag = sc->bus_tag;
263	bus_space_handle_t bus_handle = sc->bus_handle;
264
265	return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
266}
267
268
269/*
270 * Interrupt Disable/Enable/Clear Functions
271 *
272 */
273void
274mrsas_disable_intr(struct mrsas_softc *sc)
275{
276	u_int32_t mask = 0xFFFFFFFF;
277	u_int32_t status;
278
279	sc->mask_interrupts = 1;
280	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
281	/* Dummy read to force pci flush */
282	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
283}
284
285void
286mrsas_enable_intr(struct mrsas_softc *sc)
287{
288	u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
289	u_int32_t status;
290
291	sc->mask_interrupts = 0;
292	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
293	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
294
295	mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
296	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
297}
298
299static int
300mrsas_clear_intr(struct mrsas_softc *sc)
301{
302	u_int32_t status, fw_status, fw_state;
303
304	/* Read received interrupt */
305	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
306
307	/*
308	 * If FW state change interrupt is received, write to it again to
309	 * clear
310	 */
311	if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
312		fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
313		    outbound_scratch_pad));
314		fw_state = fw_status & MFI_STATE_MASK;
315		if (fw_state == MFI_STATE_FAULT) {
316			device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
317			if (sc->ocr_thread_active)
318				wakeup(&sc->ocr_chan);
319		}
320		mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
321		mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
322		return (1);
323	}
324	/* Not our interrupt, so just return */
325	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
326		return (0);
327
328	/* We got a reply interrupt */
329	return (1);
330}
331
332/*
333 * PCI Support Functions
334 *
335 */
336static struct mrsas_ident *
337mrsas_find_ident(device_t dev)
338{
339	struct mrsas_ident *pci_device;
340
341	for (pci_device = device_table; pci_device->vendor != 0; pci_device++) {
342		if ((pci_device->vendor == pci_get_vendor(dev)) &&
343		    (pci_device->device == pci_get_device(dev)) &&
344		    ((pci_device->subvendor == pci_get_subvendor(dev)) ||
345		    (pci_device->subvendor == 0xffff)) &&
346		    ((pci_device->subdevice == pci_get_subdevice(dev)) ||
347		    (pci_device->subdevice == 0xffff)))
348			return (pci_device);
349	}
350	return (NULL);
351}
352
353static int
354mrsas_probe(device_t dev)
355{
356	static u_int8_t first_ctrl = 1;
357	struct mrsas_ident *id;
358
359	if ((id = mrsas_find_ident(dev)) != NULL) {
360		if (first_ctrl) {
361			printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n",
362			    MRSAS_VERSION);
363			first_ctrl = 0;
364		}
365		device_set_desc(dev, id->desc);
366		/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
367		return (-30);
368	}
369	return (ENXIO);
370}
371
372/*
373 * mrsas_setup_sysctl:	setup sysctl values for mrsas
374 * input:				Adapter instance soft state
375 *
376 * Setup sysctl entries for mrsas driver.
377 */
378static void
379mrsas_setup_sysctl(struct mrsas_softc *sc)
380{
381	struct sysctl_ctx_list *sysctl_ctx = NULL;
382	struct sysctl_oid *sysctl_tree = NULL;
383	char tmpstr[80], tmpstr2[80];
384
385	/*
386	 * Setup the sysctl variable so the user can change the debug level
387	 * on the fly.
388	 */
389	snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
390	    device_get_unit(sc->mrsas_dev));
391	snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
392
393	sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
394	if (sysctl_ctx != NULL)
395		sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
396
397	if (sysctl_tree == NULL) {
398		sysctl_ctx_init(&sc->sysctl_ctx);
399		sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
400		    SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
401		    CTLFLAG_RD, 0, tmpstr);
402		if (sc->sysctl_tree == NULL)
403			return;
404		sysctl_ctx = &sc->sysctl_ctx;
405		sysctl_tree = sc->sysctl_tree;
406	}
407	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
408	    OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
409	    "Disable the use of OCR");
410
411	SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
412	    OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
413	    strlen(MRSAS_VERSION), "driver version");
414
415	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416	    OID_AUTO, "reset_count", CTLFLAG_RD,
417	    &sc->reset_count, 0, "number of ocr from start of the day");
418
419	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
420	    OID_AUTO, "fw_outstanding", CTLFLAG_RD,
421	    &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
422
423	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
424	    OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
425	    &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
426
427	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
428	    OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
429	    "Driver debug level");
430
431	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
432	    OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
433	    0, "Driver IO timeout value in mili-second.");
434
435	SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
436	    OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
437	    &sc->mrsas_fw_fault_check_delay,
438	    0, "FW fault check thread delay in seconds. <default is 1 sec>");
439
440	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
441	    OID_AUTO, "reset_in_progress", CTLFLAG_RD,
442	    &sc->reset_in_progress, 0, "ocr in progress status");
443
444}
445
446/*
447 * mrsas_get_tunables:	get tunable parameters.
448 * input:				Adapter instance soft state
449 *
450 * Get tunable parameters. This will help to debug driver at boot time.
451 */
452static void
453mrsas_get_tunables(struct mrsas_softc *sc)
454{
455	char tmpstr[80];
456
457	/* XXX default to some debugging for now */
458	sc->mrsas_debug = MRSAS_FAULT;
459	sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
460	sc->mrsas_fw_fault_check_delay = 1;
461	sc->reset_count = 0;
462	sc->reset_in_progress = 0;
463
464	/*
465	 * Grab the global variables.
466	 */
467	TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
468
469	/*
470	 * Grab the global variables.
471	 */
472	TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds);
473
474	/* Grab the unit-instance variables */
475	snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
476	    device_get_unit(sc->mrsas_dev));
477	TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
478}
479
480/*
481 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
482 * Used to get sequence number at driver load time.
483 * input:		Adapter soft state
484 *
485 * Allocates DMAable memory for the event log info internal command.
486 */
487int
488mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
489{
490	int el_info_size;
491
492	/* Allocate get event log info command */
493	el_info_size = sizeof(struct mrsas_evt_log_info);
494	if (bus_dma_tag_create(sc->mrsas_parent_tag,
495	    1, 0,
496	    BUS_SPACE_MAXADDR_32BIT,
497	    BUS_SPACE_MAXADDR,
498	    NULL, NULL,
499	    el_info_size,
500	    1,
501	    el_info_size,
502	    BUS_DMA_ALLOCNOW,
503	    NULL, NULL,
504	    &sc->el_info_tag)) {
505		device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
506		return (ENOMEM);
507	}
508	if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
509	    BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
510		device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
511		return (ENOMEM);
512	}
513	if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
514	    sc->el_info_mem, el_info_size, mrsas_addr_cb,
515	    &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
516		device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
517		return (ENOMEM);
518	}
519	memset(sc->el_info_mem, 0, el_info_size);
520	return (0);
521}
522
523/*
524 * mrsas_free_evt_info_cmd:	Free memory for Event log info command
525 * input:					Adapter soft state
526 *
527 * Deallocates memory for the event log info internal command.
528 */
529void
530mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
531{
532	if (sc->el_info_phys_addr)
533		bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
534	if (sc->el_info_mem != NULL)
535		bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
536	if (sc->el_info_tag != NULL)
537		bus_dma_tag_destroy(sc->el_info_tag);
538}
539
540/*
541 *  mrsas_get_seq_num:	Get latest event sequence number
542 *  @sc:				Adapter soft state
543 *  @eli:				Firmware event log sequence number information.
544 *
545 * Firmware maintains a log of all events in a non-volatile area.
546 * Driver get the sequence number using DCMD
547 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
548 */
549
550static int
551mrsas_get_seq_num(struct mrsas_softc *sc,
552    struct mrsas_evt_log_info *eli)
553{
554	struct mrsas_mfi_cmd *cmd;
555	struct mrsas_dcmd_frame *dcmd;
556
557	cmd = mrsas_get_mfi_cmd(sc);
558
559	if (!cmd) {
560		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
561		return -ENOMEM;
562	}
563	dcmd = &cmd->frame->dcmd;
564
565	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
566		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
567		mrsas_release_mfi_cmd(cmd);
568		return -ENOMEM;
569	}
570	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
571
572	dcmd->cmd = MFI_CMD_DCMD;
573	dcmd->cmd_status = 0x0;
574	dcmd->sge_count = 1;
575	dcmd->flags = MFI_FRAME_DIR_READ;
576	dcmd->timeout = 0;
577	dcmd->pad_0 = 0;
578	dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
579	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
580	dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
581	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
582
583	mrsas_issue_blocked_cmd(sc, cmd);
584
585	/*
586	 * Copy the data back into callers buffer
587	 */
588	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
589	mrsas_free_evt_log_info_cmd(sc);
590	mrsas_release_mfi_cmd(cmd);
591
592	return 0;
593}
594
595
596/*
597 *  mrsas_register_aen:		Register for asynchronous event notification
598 *  @sc:			Adapter soft state
599 *  @seq_num:			Starting sequence number
600 *  @class_locale:		Class of the event
601 *
602 *  This function subscribes for events beyond the @seq_num
603 *  and type @class_locale.
604 *
605 */
606static int
607mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
608    u_int32_t class_locale_word)
609{
610	int ret_val;
611	struct mrsas_mfi_cmd *cmd;
612	struct mrsas_dcmd_frame *dcmd;
613	union mrsas_evt_class_locale curr_aen;
614	union mrsas_evt_class_locale prev_aen;
615
616	/*
617	 * If there an AEN pending already (aen_cmd), check if the
618	 * class_locale of that pending AEN is inclusive of the new AEN
619	 * request we currently have. If it is, then we don't have to do
620	 * anything. In other words, whichever events the current AEN request
621	 * is subscribing to, have already been subscribed to. If the old_cmd
622	 * is _not_ inclusive, then we have to abort that command, form a
623	 * class_locale that is superset of both old and current and re-issue
624	 * to the FW
625	 */
626
627	curr_aen.word = class_locale_word;
628
629	if (sc->aen_cmd) {
630
631		prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
632
633		/*
634		 * A class whose enum value is smaller is inclusive of all
635		 * higher values. If a PROGRESS (= -1) was previously
636		 * registered, then a new registration requests for higher
637		 * classes need not be sent to FW. They are automatically
638		 * included. Locale numbers don't have such hierarchy. They
639		 * are bitmap values
640		 */
641		if ((prev_aen.members.class <= curr_aen.members.class) &&
642		    !((prev_aen.members.locale & curr_aen.members.locale) ^
643		    curr_aen.members.locale)) {
644			/*
645			 * Previously issued event registration includes
646			 * current request. Nothing to do.
647			 */
648			return 0;
649		} else {
650			curr_aen.members.locale |= prev_aen.members.locale;
651
652			if (prev_aen.members.class < curr_aen.members.class)
653				curr_aen.members.class = prev_aen.members.class;
654
655			sc->aen_cmd->abort_aen = 1;
656			ret_val = mrsas_issue_blocked_abort_cmd(sc,
657			    sc->aen_cmd);
658
659			if (ret_val) {
660				printf("mrsas: Failed to abort "
661				    "previous AEN command\n");
662				return ret_val;
663			}
664		}
665	}
666	cmd = mrsas_get_mfi_cmd(sc);
667
668	if (!cmd)
669		return -ENOMEM;
670
671	dcmd = &cmd->frame->dcmd;
672
673	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
674
675	/*
676	 * Prepare DCMD for aen registration
677	 */
678	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
679
680	dcmd->cmd = MFI_CMD_DCMD;
681	dcmd->cmd_status = 0x0;
682	dcmd->sge_count = 1;
683	dcmd->flags = MFI_FRAME_DIR_READ;
684	dcmd->timeout = 0;
685	dcmd->pad_0 = 0;
686	dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
687	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
688	dcmd->mbox.w[0] = seq_num;
689	sc->last_seq_num = seq_num;
690	dcmd->mbox.w[1] = curr_aen.word;
691	dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr;
692	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
693
694	if (sc->aen_cmd != NULL) {
695		mrsas_release_mfi_cmd(cmd);
696		return 0;
697	}
698	/*
699	 * Store reference to the cmd used to register for AEN. When an
700	 * application wants us to register for AEN, we have to abort this
701	 * cmd and re-register with a new EVENT LOCALE supplied by that app
702	 */
703	sc->aen_cmd = cmd;
704
705	/*
706	 * Issue the aen registration frame
707	 */
708	if (mrsas_issue_dcmd(sc, cmd)) {
709		device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
710		return (1);
711	}
712	return 0;
713}
714
715/*
716 * mrsas_start_aen:	Subscribes to AEN during driver load time
717 * @instance:		Adapter soft state
718 */
719static int
720mrsas_start_aen(struct mrsas_softc *sc)
721{
722	struct mrsas_evt_log_info eli;
723	union mrsas_evt_class_locale class_locale;
724
725
726	/* Get the latest sequence number from FW */
727
728	memset(&eli, 0, sizeof(eli));
729
730	if (mrsas_get_seq_num(sc, &eli))
731		return -1;
732
733	/* Register AEN with FW for latest sequence number plus 1 */
734	class_locale.members.reserved = 0;
735	class_locale.members.locale = MR_EVT_LOCALE_ALL;
736	class_locale.members.class = MR_EVT_CLASS_DEBUG;
737
738	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
739	    class_locale.word);
740
741}
742
743/*
744 * mrsas_setup_msix:	Allocate MSI-x vectors
745 * @sc:					adapter soft state
746 */
747static int
748mrsas_setup_msix(struct mrsas_softc *sc)
749{
750	int i;
751
752	for (i = 0; i < sc->msix_vectors; i++) {
753		sc->irq_context[i].sc = sc;
754		sc->irq_context[i].MSIxIndex = i;
755		sc->irq_id[i] = i + 1;
756		sc->mrsas_irq[i] = bus_alloc_resource_any
757		    (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i]
758		    ,RF_ACTIVE);
759		if (sc->mrsas_irq[i] == NULL) {
760			device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n");
761			goto irq_alloc_failed;
762		}
763		if (bus_setup_intr(sc->mrsas_dev,
764		    sc->mrsas_irq[i],
765		    INTR_MPSAFE | INTR_TYPE_CAM,
766		    NULL, mrsas_isr, &sc->irq_context[i],
767		    &sc->intr_handle[i])) {
768			device_printf(sc->mrsas_dev,
769			    "Cannot set up MSI-x interrupt handler\n");
770			goto irq_alloc_failed;
771		}
772	}
773	return SUCCESS;
774
775irq_alloc_failed:
776	mrsas_teardown_intr(sc);
777	return (FAIL);
778}
779
780/*
781 * mrsas_allocate_msix:		Setup MSI-x vectors
782 * @sc:						adapter soft state
783 */
784static int
785mrsas_allocate_msix(struct mrsas_softc *sc)
786{
787	if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) {
788		device_printf(sc->mrsas_dev, "Using MSI-X with %d number"
789		    " of vectors\n", sc->msix_vectors);
790	} else {
791		device_printf(sc->mrsas_dev, "MSI-x setup failed\n");
792		goto irq_alloc_failed;
793	}
794	return SUCCESS;
795
796irq_alloc_failed:
797	mrsas_teardown_intr(sc);
798	return (FAIL);
799}
800
801/*
802 * mrsas_attach:	PCI entry point
803 * input:			pointer to device struct
804 *
805 * Performs setup of PCI and registers, initializes mutexes and linked lists,
806 * registers interrupts and CAM, and initializes   the adapter/controller to
807 * its proper state.
808 */
809static int
810mrsas_attach(device_t dev)
811{
812	struct mrsas_softc *sc = device_get_softc(dev);
813	uint32_t cmd, bar, error;
814
815	/* Look up our softc and initialize its fields. */
816	sc->mrsas_dev = dev;
817	sc->device_id = pci_get_device(dev);
818
819	mrsas_get_tunables(sc);
820
821	/*
822	 * Set up PCI and registers
823	 */
824	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
825	if ((cmd & PCIM_CMD_PORTEN) == 0) {
826		return (ENXIO);
827	}
828	/* Force the busmaster enable bit on. */
829	cmd |= PCIM_CMD_BUSMASTEREN;
830	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
831
832	bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
833
834	sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */
835	if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
836	    &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
837	    == NULL) {
838		device_printf(dev, "Cannot allocate PCI registers\n");
839		goto attach_fail;
840	}
841	sc->bus_tag = rman_get_bustag(sc->reg_res);
842	sc->bus_handle = rman_get_bushandle(sc->reg_res);
843
844	/* Intialize mutexes */
845	mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF);
846	mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF);
847	mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF);
848	mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF);
849	mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN);
850	mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
851	mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
852	mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
853
854	/*
855	 * Intialize a counting Semaphore to take care no. of concurrent
856	 * IOCTLs
857	 */
858	sema_init(&sc->ioctl_count_sema, MRSAS_MAX_MFI_CMDS - 5, IOCTL_SEMA_DESCRIPTION);
859
860	/* Intialize linked list */
861	TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
862	TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
863
864	mrsas_atomic_set(&sc->fw_outstanding, 0);
865
866	sc->io_cmds_highwater = 0;
867
868	/* Create a /dev entry for this device. */
869	sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
870	    GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
871	    device_get_unit(dev));
872	if (device_get_unit(dev) == 0)
873		make_dev_alias(sc->mrsas_cdev, "megaraid_sas_ioctl_node");
874	if (sc->mrsas_cdev)
875		sc->mrsas_cdev->si_drv1 = sc;
876
877	sc->adprecovery = MRSAS_HBA_OPERATIONAL;
878	sc->UnevenSpanSupport = 0;
879
880	sc->msix_enable = 0;
881
882	/* Initialize Firmware */
883	if (mrsas_init_fw(sc) != SUCCESS) {
884		goto attach_fail_fw;
885	}
886	/* Register SCSI mid-layer */
887	if ((mrsas_cam_attach(sc) != SUCCESS)) {
888		goto attach_fail_cam;
889	}
890	/* Register IRQs */
891	if (mrsas_setup_irq(sc) != SUCCESS) {
892		goto attach_fail_irq;
893	}
894	/* Enable Interrupts */
895	mrsas_enable_intr(sc);
896
897	error = mrsas_kproc_create(mrsas_ocr_thread, sc,
898	    &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
899	    device_get_unit(sc->mrsas_dev));
900	if (error) {
901		printf("Error %d starting rescan thread\n", error);
902		goto attach_fail_irq;
903	}
904	mrsas_setup_sysctl(sc);
905
906	/* Initiate AEN (Asynchronous Event Notification) */
907
908	if (mrsas_start_aen(sc)) {
909		printf("Error: start aen failed\n");
910		goto fail_start_aen;
911	}
912	/*
913	 * Add this controller to mrsas_mgmt_info structure so that it can be
914	 * exported to management applications
915	 */
916	if (device_get_unit(dev) == 0)
917		memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
918
919	mrsas_mgmt_info.count++;
920	mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
921	mrsas_mgmt_info.max_index++;
922
923	return (0);
924
925fail_start_aen:
926attach_fail_irq:
927	mrsas_teardown_intr(sc);
928attach_fail_cam:
929	mrsas_cam_detach(sc);
930attach_fail_fw:
931	/* if MSIX vector is allocated and FW Init FAILED then release MSIX */
932	if (sc->msix_enable == 1)
933		pci_release_msi(sc->mrsas_dev);
934	mrsas_free_mem(sc);
935	mtx_destroy(&sc->sim_lock);
936	mtx_destroy(&sc->aen_lock);
937	mtx_destroy(&sc->pci_lock);
938	mtx_destroy(&sc->io_lock);
939	mtx_destroy(&sc->ioctl_lock);
940	mtx_destroy(&sc->mpt_cmd_pool_lock);
941	mtx_destroy(&sc->mfi_cmd_pool_lock);
942	mtx_destroy(&sc->raidmap_lock);
943	/* Destroy the counting semaphore created for Ioctl */
944	sema_destroy(&sc->ioctl_count_sema);
945attach_fail:
946	destroy_dev(sc->mrsas_cdev);
947	if (sc->reg_res) {
948		bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
949		    sc->reg_res_id, sc->reg_res);
950	}
951	return (ENXIO);
952}
953
954/*
955 * mrsas_detach:	De-allocates and teardown resources
956 * input:			pointer to device struct
957 *
958 * This function is the entry point for device disconnect and detach.
959 * It performs memory de-allocations, shutdown of the controller and various
960 * teardown and destroy resource functions.
961 */
962static int
963mrsas_detach(device_t dev)
964{
965	struct mrsas_softc *sc;
966	int i = 0;
967
968	sc = device_get_softc(dev);
969	sc->remove_in_progress = 1;
970
971	/* Destroy the character device so no other IOCTL will be handled */
972	destroy_dev(sc->mrsas_cdev);
973
974	/*
975	 * Take the instance off the instance array. Note that we will not
976	 * decrement the max_index. We let this array be sparse array
977	 */
978	for (i = 0; i < mrsas_mgmt_info.max_index; i++) {
979		if (mrsas_mgmt_info.sc_ptr[i] == sc) {
980			mrsas_mgmt_info.count--;
981			mrsas_mgmt_info.sc_ptr[i] = NULL;
982			break;
983		}
984	}
985
986	if (sc->ocr_thread_active)
987		wakeup(&sc->ocr_chan);
988	while (sc->reset_in_progress) {
989		i++;
990		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
991			mrsas_dprint(sc, MRSAS_INFO,
992			    "[%2d]waiting for ocr to be finished\n", i);
993		}
994		pause("mr_shutdown", hz);
995	}
996	i = 0;
997	while (sc->ocr_thread_active) {
998		i++;
999		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1000			mrsas_dprint(sc, MRSAS_INFO,
1001			    "[%2d]waiting for "
1002			    "mrsas_ocr thread to quit ocr %d\n", i,
1003			    sc->ocr_thread_active);
1004		}
1005		pause("mr_shutdown", hz);
1006	}
1007	mrsas_flush_cache(sc);
1008	mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
1009	mrsas_disable_intr(sc);
1010	mrsas_cam_detach(sc);
1011	mrsas_teardown_intr(sc);
1012	mrsas_free_mem(sc);
1013	mtx_destroy(&sc->sim_lock);
1014	mtx_destroy(&sc->aen_lock);
1015	mtx_destroy(&sc->pci_lock);
1016	mtx_destroy(&sc->io_lock);
1017	mtx_destroy(&sc->ioctl_lock);
1018	mtx_destroy(&sc->mpt_cmd_pool_lock);
1019	mtx_destroy(&sc->mfi_cmd_pool_lock);
1020	mtx_destroy(&sc->raidmap_lock);
1021
1022	/* Wait for all the semaphores to be released */
1023	while (sema_value(&sc->ioctl_count_sema) != (MRSAS_MAX_MFI_CMDS - 5))
1024		pause("mr_shutdown", hz);
1025
1026	/* Destroy the counting semaphore created for Ioctl */
1027	sema_destroy(&sc->ioctl_count_sema);
1028
1029	if (sc->reg_res) {
1030		bus_release_resource(sc->mrsas_dev,
1031		    SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
1032	}
1033	if (sc->sysctl_tree != NULL)
1034		sysctl_ctx_free(&sc->sysctl_ctx);
1035
1036	return (0);
1037}
1038
1039/*
1040 * mrsas_free_mem:		Frees allocated memory
1041 * input:				Adapter instance soft state
1042 *
1043 * This function is called from mrsas_detach() to free previously allocated
1044 * memory.
1045 */
1046void
1047mrsas_free_mem(struct mrsas_softc *sc)
1048{
1049	int i;
1050	u_int32_t max_cmd;
1051	struct mrsas_mfi_cmd *mfi_cmd;
1052	struct mrsas_mpt_cmd *mpt_cmd;
1053
1054	/*
1055	 * Free RAID map memory
1056	 */
1057	for (i = 0; i < 2; i++) {
1058		if (sc->raidmap_phys_addr[i])
1059			bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
1060		if (sc->raidmap_mem[i] != NULL)
1061			bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
1062		if (sc->raidmap_tag[i] != NULL)
1063			bus_dma_tag_destroy(sc->raidmap_tag[i]);
1064
1065		if (sc->ld_drv_map[i] != NULL)
1066			free(sc->ld_drv_map[i], M_MRSAS);
1067	}
1068
1069	/*
1070	 * Free version buffer memroy
1071	 */
1072	if (sc->verbuf_phys_addr)
1073		bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
1074	if (sc->verbuf_mem != NULL)
1075		bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
1076	if (sc->verbuf_tag != NULL)
1077		bus_dma_tag_destroy(sc->verbuf_tag);
1078
1079
1080	/*
1081	 * Free sense buffer memory
1082	 */
1083	if (sc->sense_phys_addr)
1084		bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
1085	if (sc->sense_mem != NULL)
1086		bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
1087	if (sc->sense_tag != NULL)
1088		bus_dma_tag_destroy(sc->sense_tag);
1089
1090	/*
1091	 * Free chain frame memory
1092	 */
1093	if (sc->chain_frame_phys_addr)
1094		bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
1095	if (sc->chain_frame_mem != NULL)
1096		bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
1097	if (sc->chain_frame_tag != NULL)
1098		bus_dma_tag_destroy(sc->chain_frame_tag);
1099
1100	/*
1101	 * Free IO Request memory
1102	 */
1103	if (sc->io_request_phys_addr)
1104		bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
1105	if (sc->io_request_mem != NULL)
1106		bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
1107	if (sc->io_request_tag != NULL)
1108		bus_dma_tag_destroy(sc->io_request_tag);
1109
1110	/*
1111	 * Free Reply Descriptor memory
1112	 */
1113	if (sc->reply_desc_phys_addr)
1114		bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
1115	if (sc->reply_desc_mem != NULL)
1116		bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
1117	if (sc->reply_desc_tag != NULL)
1118		bus_dma_tag_destroy(sc->reply_desc_tag);
1119
1120	/*
1121	 * Free event detail memory
1122	 */
1123	if (sc->evt_detail_phys_addr)
1124		bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1125	if (sc->evt_detail_mem != NULL)
1126		bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1127	if (sc->evt_detail_tag != NULL)
1128		bus_dma_tag_destroy(sc->evt_detail_tag);
1129
1130	/*
1131	 * Free MFI frames
1132	 */
1133	if (sc->mfi_cmd_list) {
1134		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1135			mfi_cmd = sc->mfi_cmd_list[i];
1136			mrsas_free_frame(sc, mfi_cmd);
1137		}
1138	}
1139	if (sc->mficmd_frame_tag != NULL)
1140		bus_dma_tag_destroy(sc->mficmd_frame_tag);
1141
1142	/*
1143	 * Free MPT internal command list
1144	 */
1145	max_cmd = sc->max_fw_cmds;
1146	if (sc->mpt_cmd_list) {
1147		for (i = 0; i < max_cmd; i++) {
1148			mpt_cmd = sc->mpt_cmd_list[i];
1149			bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1150			free(sc->mpt_cmd_list[i], M_MRSAS);
1151		}
1152		free(sc->mpt_cmd_list, M_MRSAS);
1153		sc->mpt_cmd_list = NULL;
1154	}
1155	/*
1156	 * Free MFI internal command list
1157	 */
1158
1159	if (sc->mfi_cmd_list) {
1160		for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1161			free(sc->mfi_cmd_list[i], M_MRSAS);
1162		}
1163		free(sc->mfi_cmd_list, M_MRSAS);
1164		sc->mfi_cmd_list = NULL;
1165	}
1166	/*
1167	 * Free request descriptor memory
1168	 */
1169	free(sc->req_desc, M_MRSAS);
1170	sc->req_desc = NULL;
1171
1172	/*
1173	 * Destroy parent tag
1174	 */
1175	if (sc->mrsas_parent_tag != NULL)
1176		bus_dma_tag_destroy(sc->mrsas_parent_tag);
1177
1178	/*
1179	 * Free ctrl_info memory
1180	 */
1181	if (sc->ctrl_info != NULL)
1182		free(sc->ctrl_info, M_MRSAS);
1183}
1184
1185/*
1186 * mrsas_teardown_intr:	Teardown interrupt
1187 * input:				Adapter instance soft state
1188 *
1189 * This function is called from mrsas_detach() to teardown and release bus
1190 * interrupt resourse.
1191 */
1192void
1193mrsas_teardown_intr(struct mrsas_softc *sc)
1194{
1195	int i;
1196
1197	if (!sc->msix_enable) {
1198		if (sc->intr_handle[0])
1199			bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]);
1200		if (sc->mrsas_irq[0] != NULL)
1201			bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1202			    sc->irq_id[0], sc->mrsas_irq[0]);
1203		sc->intr_handle[0] = NULL;
1204	} else {
1205		for (i = 0; i < sc->msix_vectors; i++) {
1206			if (sc->intr_handle[i])
1207				bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i],
1208				    sc->intr_handle[i]);
1209
1210			if (sc->mrsas_irq[i] != NULL)
1211				bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ,
1212				    sc->irq_id[i], sc->mrsas_irq[i]);
1213
1214			sc->intr_handle[i] = NULL;
1215		}
1216		pci_release_msi(sc->mrsas_dev);
1217	}
1218
1219}
1220
1221/*
1222 * mrsas_suspend:	Suspend entry point
1223 * input:			Device struct pointer
1224 *
1225 * This function is the entry point for system suspend from the OS.
1226 */
1227static int
1228mrsas_suspend(device_t dev)
1229{
1230	struct mrsas_softc *sc;
1231
1232	sc = device_get_softc(dev);
1233	return (0);
1234}
1235
1236/*
1237 * mrsas_resume:	Resume entry point
1238 * input:			Device struct pointer
1239 *
1240 * This function is the entry point for system resume from the OS.
1241 */
1242static int
1243mrsas_resume(device_t dev)
1244{
1245	struct mrsas_softc *sc;
1246
1247	sc = device_get_softc(dev);
1248	return (0);
1249}
1250
1251/**
1252 * mrsas_get_softc_instance:    Find softc instance based on cmd type
1253 *
1254 * This function will return softc instance based on cmd type.
1255 * In some case, application fire ioctl on required management instance and
1256 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those
1257 * case, else get the softc instance from host_no provided by application in
1258 * user data.
1259 */
1260
1261static struct mrsas_softc *
1262mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg)
1263{
1264	struct mrsas_softc *sc = NULL;
1265	struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg;
1266
1267	if (cmd == MRSAS_IOC_GET_PCI_INFO) {
1268		sc = dev->si_drv1;
1269	} else {
1270		/*
1271		 * get the Host number & the softc from data sent by the
1272		 * Application
1273		 */
1274		sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no];
1275		if ((user_ioc->host_no >= mrsas_mgmt_info.max_index) || (sc == NULL)) {
1276			if (sc == NULL)
1277				mrsas_dprint(sc, MRSAS_FAULT,
1278				    "There is no Controller number %d .\n", user_ioc->host_no);
1279			else
1280				mrsas_dprint(sc, MRSAS_FAULT,
1281				    "Invalid Controller number %d .\n", user_ioc->host_no);
1282		}
1283	}
1284
1285	return sc;
1286}
1287
1288/*
1289 * mrsas_ioctl:	IOCtl commands entry point.
1290 *
1291 * This function is the entry point for IOCtls from the OS.  It calls the
1292 * appropriate function for processing depending on the command received.
1293 */
1294static int
1295mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1296{
1297	struct mrsas_softc *sc;
1298	int ret = 0, i = 0;
1299	MRSAS_DRV_PCI_INFORMATION *pciDrvInfo;
1300
1301	sc = mrsas_get_softc_instance(dev, cmd, arg);
1302	if (!sc)
1303		return ENOENT;
1304
1305	if (sc->remove_in_progress) {
1306		mrsas_dprint(sc, MRSAS_INFO,
1307		    "Driver remove or shutdown called.\n");
1308		return ENOENT;
1309	}
1310	mtx_lock_spin(&sc->ioctl_lock);
1311	if (!sc->reset_in_progress) {
1312		mtx_unlock_spin(&sc->ioctl_lock);
1313		goto do_ioctl;
1314	}
1315	mtx_unlock_spin(&sc->ioctl_lock);
1316	while (sc->reset_in_progress) {
1317		i++;
1318		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1319			mrsas_dprint(sc, MRSAS_INFO,
1320			    "[%2d]waiting for "
1321			    "OCR to be finished %d\n", i,
1322			    sc->ocr_thread_active);
1323		}
1324		pause("mr_ioctl", hz);
1325	}
1326
1327do_ioctl:
1328	switch (cmd) {
1329	case MRSAS_IOC_FIRMWARE_PASS_THROUGH64:
1330#ifdef COMPAT_FREEBSD32
1331	case MRSAS_IOC_FIRMWARE_PASS_THROUGH32:
1332#endif
1333		/*
1334		 * Decrement the Ioctl counting Semaphore before getting an
1335		 * mfi command
1336		 */
1337		sema_wait(&sc->ioctl_count_sema);
1338
1339		ret = mrsas_passthru(sc, (void *)arg, cmd);
1340
1341		/* Increment the Ioctl counting semaphore value */
1342		sema_post(&sc->ioctl_count_sema);
1343
1344		break;
1345	case MRSAS_IOC_SCAN_BUS:
1346		ret = mrsas_bus_scan(sc);
1347		break;
1348
1349	case MRSAS_IOC_GET_PCI_INFO:
1350		pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg;
1351		memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION));
1352		pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev);
1353		pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev);
1354		pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev);
1355		pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev);
1356		mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d,"
1357		    "pci device no: %d, pci function no: %d,"
1358		    "pci domain ID: %d\n",
1359		    pciDrvInfo->busNumber, pciDrvInfo->deviceNumber,
1360		    pciDrvInfo->functionNumber, pciDrvInfo->domainID);
1361		ret = 0;
1362		break;
1363
1364	default:
1365		mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd);
1366		ret = ENOENT;
1367	}
1368
1369	return (ret);
1370}
1371
1372/*
1373 * mrsas_poll:	poll entry point for mrsas driver fd
1374 *
1375 * This function is the entry point for poll from the OS.  It waits for some AEN
1376 * events to be triggered from the controller and notifies back.
1377 */
1378static int
1379mrsas_poll(struct cdev *dev, int poll_events, struct thread *td)
1380{
1381	struct mrsas_softc *sc;
1382	int revents = 0;
1383
1384	sc = dev->si_drv1;
1385
1386	if (poll_events & (POLLIN | POLLRDNORM)) {
1387		if (sc->mrsas_aen_triggered) {
1388			revents |= poll_events & (POLLIN | POLLRDNORM);
1389		}
1390	}
1391	if (revents == 0) {
1392		if (poll_events & (POLLIN | POLLRDNORM)) {
1393			mtx_lock(&sc->aen_lock);
1394			sc->mrsas_poll_waiting = 1;
1395			selrecord(td, &sc->mrsas_select);
1396			mtx_unlock(&sc->aen_lock);
1397		}
1398	}
1399	return revents;
1400}
1401
1402/*
1403 * mrsas_setup_irq:	Set up interrupt
1404 * input:			Adapter instance soft state
1405 *
1406 * This function sets up interrupts as a bus resource, with flags indicating
1407 * resource permitting contemporaneous sharing and for resource to activate
1408 * atomically.
1409 */
1410static int
1411mrsas_setup_irq(struct mrsas_softc *sc)
1412{
1413	if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS))
1414		device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n");
1415
1416	else {
1417		device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n");
1418		sc->irq_context[0].sc = sc;
1419		sc->irq_context[0].MSIxIndex = 0;
1420		sc->irq_id[0] = 0;
1421		sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev,
1422		    SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
1423		if (sc->mrsas_irq[0] == NULL) {
1424			device_printf(sc->mrsas_dev, "Cannot allocate legcay"
1425			    "interrupt\n");
1426			return (FAIL);
1427		}
1428		if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0],
1429		    INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr,
1430		    &sc->irq_context[0], &sc->intr_handle[0])) {
1431			device_printf(sc->mrsas_dev, "Cannot set up legacy"
1432			    "interrupt\n");
1433			return (FAIL);
1434		}
1435	}
1436	return (0);
1437}
1438
1439/*
1440 * mrsas_isr:	ISR entry point
1441 * input:		argument pointer
1442 *
1443 * This function is the interrupt service routine entry point.  There are two
1444 * types of interrupts, state change interrupt and response interrupt.  If an
1445 * interrupt is not ours, we just return.
1446 */
1447void
1448mrsas_isr(void *arg)
1449{
1450	struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg;
1451	struct mrsas_softc *sc = irq_context->sc;
1452	int status = 0;
1453
1454	if (sc->mask_interrupts)
1455		return;
1456
1457	if (!sc->msix_vectors) {
1458		status = mrsas_clear_intr(sc);
1459		if (!status)
1460			return;
1461	}
1462	/* If we are resetting, bail */
1463	if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1464		printf(" Entered into ISR when OCR is going active. \n");
1465		mrsas_clear_intr(sc);
1466		return;
1467	}
1468	/* Process for reply request and clear response interrupt */
1469	if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS)
1470		mrsas_clear_intr(sc);
1471
1472	return;
1473}
1474
1475/*
1476 * mrsas_complete_cmd:	Process reply request
1477 * input:				Adapter instance soft state
1478 *
1479 * This function is called from mrsas_isr() to process reply request and clear
1480 * response interrupt. Processing of the reply request entails walking
1481 * through the reply descriptor array for the command request  pended from
1482 * Firmware.  We look at the Function field to determine the command type and
1483 * perform the appropriate action.  Before we return, we clear the response
1484 * interrupt.
1485 */
1486static int
1487mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
1488{
1489	Mpi2ReplyDescriptorsUnion_t *desc;
1490	MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1491	MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1492	struct mrsas_mpt_cmd *cmd_mpt;
1493	struct mrsas_mfi_cmd *cmd_mfi;
1494	u_int8_t reply_descript_type;
1495	u_int16_t smid, num_completed;
1496	u_int8_t status, extStatus;
1497	union desc_value desc_val;
1498	PLD_LOAD_BALANCE_INFO lbinfo;
1499	u_int32_t device_id;
1500	int threshold_reply_count = 0;
1501
1502
1503	/* If we have a hardware error, not need to continue */
1504	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1505		return (DONE);
1506
1507	desc = sc->reply_desc_mem;
1508	desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION))
1509	    + sc->last_reply_idx[MSIxIndex];
1510
1511	reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1512
1513	desc_val.word = desc->Words;
1514	num_completed = 0;
1515
1516	reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1517
1518	/* Find our reply descriptor for the command and process */
1519	while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) {
1520		smid = reply_desc->SMID;
1521		cmd_mpt = sc->mpt_cmd_list[smid - 1];
1522		scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request;
1523
1524		status = scsi_io_req->RaidContext.status;
1525		extStatus = scsi_io_req->RaidContext.exStatus;
1526
1527		switch (scsi_io_req->Function) {
1528		case MPI2_FUNCTION_SCSI_IO_REQUEST:	/* Fast Path IO. */
1529			device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1530			lbinfo = &sc->load_balance_info[device_id];
1531			if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1532				mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]);
1533				cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1534			}
1535			/* Fall thru and complete IO */
1536		case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1537			mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1538			mrsas_cmd_done(sc, cmd_mpt);
1539			scsi_io_req->RaidContext.status = 0;
1540			scsi_io_req->RaidContext.exStatus = 0;
1541			mrsas_atomic_dec(&sc->fw_outstanding);
1542			break;
1543		case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST:	/* MFI command */
1544			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1545			mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1546			cmd_mpt->flags = 0;
1547			mrsas_release_mpt_cmd(cmd_mpt);
1548			break;
1549		}
1550
1551		sc->last_reply_idx[MSIxIndex]++;
1552		if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth)
1553			sc->last_reply_idx[MSIxIndex] = 0;
1554
1555		desc->Words = ~((uint64_t)0x00);	/* set it back to all
1556							 * 0xFFFFFFFFs */
1557		num_completed++;
1558		threshold_reply_count++;
1559
1560		/* Get the next reply descriptor */
1561		if (!sc->last_reply_idx[MSIxIndex]) {
1562			desc = sc->reply_desc_mem;
1563			desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION));
1564		} else
1565			desc++;
1566
1567		reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc;
1568		desc_val.word = desc->Words;
1569
1570		reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1571
1572		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1573			break;
1574
1575		/*
1576		 * Write to reply post index after completing threshold reply
1577		 * count and still there are more replies in reply queue
1578		 * pending to be completed.
1579		 */
1580		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1581			if (sc->msix_enable) {
1582				if ((sc->device_id == MRSAS_INVADER) ||
1583				    (sc->device_id == MRSAS_FURY))
1584					mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1585					    ((MSIxIndex & 0x7) << 24) |
1586					    sc->last_reply_idx[MSIxIndex]);
1587				else
1588					mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1589					    sc->last_reply_idx[MSIxIndex]);
1590			} else
1591				mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1592				    reply_post_host_index), sc->last_reply_idx[0]);
1593
1594			threshold_reply_count = 0;
1595		}
1596	}
1597
1598	/* No match, just return */
1599	if (num_completed == 0)
1600		return (DONE);
1601
1602	/* Clear response interrupt */
1603	if (sc->msix_enable) {
1604		if ((sc->device_id == MRSAS_INVADER) ||
1605		    (sc->device_id == MRSAS_FURY)) {
1606			mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
1607			    ((MSIxIndex & 0x7) << 24) |
1608			    sc->last_reply_idx[MSIxIndex]);
1609		} else
1610			mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) |
1611			    sc->last_reply_idx[MSIxIndex]);
1612	} else
1613		mrsas_write_reg(sc, offsetof(mrsas_reg_set,
1614		    reply_post_host_index), sc->last_reply_idx[0]);
1615
1616	return (0);
1617}
1618
1619/*
1620 * mrsas_map_mpt_cmd_status:	Allocate DMAable memory.
1621 * input:						Adapter instance soft state
1622 *
1623 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1624 * It checks the command status and maps the appropriate CAM status for the
1625 * CCB.
1626 */
1627void
1628mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1629{
1630	struct mrsas_softc *sc = cmd->sc;
1631	u_int8_t *sense_data;
1632
1633	switch (status) {
1634	case MFI_STAT_OK:
1635		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1636		break;
1637	case MFI_STAT_SCSI_IO_FAILED:
1638	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1639		cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1640		sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1641		if (sense_data) {
1642			/* For now just copy 18 bytes back */
1643			memcpy(sense_data, cmd->sense, 18);
1644			cmd->ccb_ptr->csio.sense_len = 18;
1645			cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1646		}
1647		break;
1648	case MFI_STAT_LD_OFFLINE:
1649	case MFI_STAT_DEVICE_NOT_FOUND:
1650		if (cmd->ccb_ptr->ccb_h.target_lun)
1651			cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1652		else
1653			cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1654		break;
1655	case MFI_STAT_CONFIG_SEQ_MISMATCH:
1656		cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1657		break;
1658	default:
1659		device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1660		cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1661		cmd->ccb_ptr->csio.scsi_status = status;
1662	}
1663	return;
1664}
1665
1666/*
1667 * mrsas_alloc_mem:	Allocate DMAable memory
1668 * input:			Adapter instance soft state
1669 *
1670 * This function creates the parent DMA tag and allocates DMAable memory. DMA
1671 * tag describes constraints of DMA mapping. Memory allocated is mapped into
1672 * Kernel virtual address. Callback argument is physical memory address.
1673 */
1674static int
1675mrsas_alloc_mem(struct mrsas_softc *sc)
1676{
1677	u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1678	          chain_frame_size, evt_detail_size, count;
1679
1680	/*
1681	 * Allocate parent DMA tag
1682	 */
1683	if (bus_dma_tag_create(NULL,	/* parent */
1684	    1,				/* alignment */
1685	    0,				/* boundary */
1686	    BUS_SPACE_MAXADDR,		/* lowaddr */
1687	    BUS_SPACE_MAXADDR,		/* highaddr */
1688	    NULL, NULL,			/* filter, filterarg */
1689	    MRSAS_MAX_IO_SIZE,		/* maxsize */
1690	    MRSAS_MAX_SGL,		/* nsegments */
1691	    MRSAS_MAX_IO_SIZE,		/* maxsegsize */
1692	    0,				/* flags */
1693	    NULL, NULL,			/* lockfunc, lockarg */
1694	    &sc->mrsas_parent_tag	/* tag */
1695	    )) {
1696		device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1697		return (ENOMEM);
1698	}
1699	/*
1700	 * Allocate for version buffer
1701	 */
1702	verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t));
1703	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1704	    1, 0,
1705	    BUS_SPACE_MAXADDR_32BIT,
1706	    BUS_SPACE_MAXADDR,
1707	    NULL, NULL,
1708	    verbuf_size,
1709	    1,
1710	    verbuf_size,
1711	    BUS_DMA_ALLOCNOW,
1712	    NULL, NULL,
1713	    &sc->verbuf_tag)) {
1714		device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1715		return (ENOMEM);
1716	}
1717	if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1718	    BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1719		device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1720		return (ENOMEM);
1721	}
1722	bzero(sc->verbuf_mem, verbuf_size);
1723	if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1724	    verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr,
1725	    BUS_DMA_NOWAIT)) {
1726		device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1727		return (ENOMEM);
1728	}
1729	/*
1730	 * Allocate IO Request Frames
1731	 */
1732	io_req_size = sc->io_frames_alloc_sz;
1733	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1734	    16, 0,
1735	    BUS_SPACE_MAXADDR_32BIT,
1736	    BUS_SPACE_MAXADDR,
1737	    NULL, NULL,
1738	    io_req_size,
1739	    1,
1740	    io_req_size,
1741	    BUS_DMA_ALLOCNOW,
1742	    NULL, NULL,
1743	    &sc->io_request_tag)) {
1744		device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1745		return (ENOMEM);
1746	}
1747	if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1748	    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1749		device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1750		return (ENOMEM);
1751	}
1752	bzero(sc->io_request_mem, io_req_size);
1753	if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1754	    sc->io_request_mem, io_req_size, mrsas_addr_cb,
1755	    &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1756		device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1757		return (ENOMEM);
1758	}
1759	/*
1760	 * Allocate Chain Frames
1761	 */
1762	chain_frame_size = sc->chain_frames_alloc_sz;
1763	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1764	    4, 0,
1765	    BUS_SPACE_MAXADDR_32BIT,
1766	    BUS_SPACE_MAXADDR,
1767	    NULL, NULL,
1768	    chain_frame_size,
1769	    1,
1770	    chain_frame_size,
1771	    BUS_DMA_ALLOCNOW,
1772	    NULL, NULL,
1773	    &sc->chain_frame_tag)) {
1774		device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1775		return (ENOMEM);
1776	}
1777	if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1778	    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1779		device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1780		return (ENOMEM);
1781	}
1782	bzero(sc->chain_frame_mem, chain_frame_size);
1783	if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1784	    sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1785	    &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1786		device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1787		return (ENOMEM);
1788	}
1789	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
1790	/*
1791	 * Allocate Reply Descriptor Array
1792	 */
1793	reply_desc_size = sc->reply_alloc_sz * count;
1794	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1795	    16, 0,
1796	    BUS_SPACE_MAXADDR_32BIT,
1797	    BUS_SPACE_MAXADDR,
1798	    NULL, NULL,
1799	    reply_desc_size,
1800	    1,
1801	    reply_desc_size,
1802	    BUS_DMA_ALLOCNOW,
1803	    NULL, NULL,
1804	    &sc->reply_desc_tag)) {
1805		device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1806		return (ENOMEM);
1807	}
1808	if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1809	    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1810		device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1811		return (ENOMEM);
1812	}
1813	if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1814	    sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1815	    &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1816		device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1817		return (ENOMEM);
1818	}
1819	/*
1820	 * Allocate Sense Buffer Array.  Keep in lower 4GB
1821	 */
1822	sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1823	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1824	    64, 0,
1825	    BUS_SPACE_MAXADDR_32BIT,
1826	    BUS_SPACE_MAXADDR,
1827	    NULL, NULL,
1828	    sense_size,
1829	    1,
1830	    sense_size,
1831	    BUS_DMA_ALLOCNOW,
1832	    NULL, NULL,
1833	    &sc->sense_tag)) {
1834		device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1835		return (ENOMEM);
1836	}
1837	if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1838	    BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1839		device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1840		return (ENOMEM);
1841	}
1842	if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1843	    sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1844	    BUS_DMA_NOWAIT)) {
1845		device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1846		return (ENOMEM);
1847	}
1848	/*
1849	 * Allocate for Event detail structure
1850	 */
1851	evt_detail_size = sizeof(struct mrsas_evt_detail);
1852	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1853	    1, 0,
1854	    BUS_SPACE_MAXADDR_32BIT,
1855	    BUS_SPACE_MAXADDR,
1856	    NULL, NULL,
1857	    evt_detail_size,
1858	    1,
1859	    evt_detail_size,
1860	    BUS_DMA_ALLOCNOW,
1861	    NULL, NULL,
1862	    &sc->evt_detail_tag)) {
1863		device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1864		return (ENOMEM);
1865	}
1866	if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1867	    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1868		device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1869		return (ENOMEM);
1870	}
1871	bzero(sc->evt_detail_mem, evt_detail_size);
1872	if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1873	    sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1874	    &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1875		device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1876		return (ENOMEM);
1877	}
1878	/*
1879	 * Create a dma tag for data buffers; size will be the maximum
1880	 * possible I/O size (280kB).
1881	 */
1882	if (bus_dma_tag_create(sc->mrsas_parent_tag,
1883	    1,
1884	    0,
1885	    BUS_SPACE_MAXADDR,
1886	    BUS_SPACE_MAXADDR,
1887	    NULL, NULL,
1888	    MRSAS_MAX_IO_SIZE,
1889	    MRSAS_MAX_SGL,
1890	    MRSAS_MAX_IO_SIZE,
1891	    BUS_DMA_ALLOCNOW,
1892	    busdma_lock_mutex,
1893	    &sc->io_lock,
1894	    &sc->data_tag)) {
1895		device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1896		return (ENOMEM);
1897	}
1898	return (0);
1899}
1900
1901/*
1902 * mrsas_addr_cb:	Callback function of bus_dmamap_load()
1903 * input:			callback argument, machine dependent type
1904 * 					that describes DMA segments, number of segments, error code
1905 *
1906 * This function is for the driver to receive mapping information resultant of
1907 * the bus_dmamap_load(). The information is actually not being used, but the
1908 * address is saved anyway.
1909 */
1910void
1911mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1912{
1913	bus_addr_t *addr;
1914
1915	addr = arg;
1916	*addr = segs[0].ds_addr;
1917}
1918
1919/*
1920 * mrsas_setup_raidmap:	Set up RAID map.
1921 * input:				Adapter instance soft state
1922 *
1923 * Allocate DMA memory for the RAID maps and perform setup.
1924 */
1925static int
1926mrsas_setup_raidmap(struct mrsas_softc *sc)
1927{
1928	int i;
1929
1930	for (i = 0; i < 2; i++) {
1931		sc->ld_drv_map[i] =
1932		    (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT);
1933		/* Do Error handling */
1934		if (!sc->ld_drv_map[i]) {
1935			device_printf(sc->mrsas_dev, "Could not allocate memory for local map");
1936
1937			if (i == 1)
1938				free(sc->ld_drv_map[0], M_MRSAS);
1939			/* ABORT driver initialization */
1940			goto ABORT;
1941		}
1942	}
1943
1944	for (int i = 0; i < 2; i++) {
1945		if (bus_dma_tag_create(sc->mrsas_parent_tag,
1946		    4, 0,
1947		    BUS_SPACE_MAXADDR_32BIT,
1948		    BUS_SPACE_MAXADDR,
1949		    NULL, NULL,
1950		    sc->max_map_sz,
1951		    1,
1952		    sc->max_map_sz,
1953		    BUS_DMA_ALLOCNOW,
1954		    NULL, NULL,
1955		    &sc->raidmap_tag[i])) {
1956			device_printf(sc->mrsas_dev,
1957			    "Cannot allocate raid map tag.\n");
1958			return (ENOMEM);
1959		}
1960		if (bus_dmamem_alloc(sc->raidmap_tag[i],
1961		    (void **)&sc->raidmap_mem[i],
1962		    BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1963			device_printf(sc->mrsas_dev,
1964			    "Cannot allocate raidmap memory.\n");
1965			return (ENOMEM);
1966		}
1967		bzero(sc->raidmap_mem[i], sc->max_map_sz);
1968
1969		if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1970		    sc->raidmap_mem[i], sc->max_map_sz,
1971		    mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1972		    BUS_DMA_NOWAIT)) {
1973			device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1974			return (ENOMEM);
1975		}
1976		if (!sc->raidmap_mem[i]) {
1977			device_printf(sc->mrsas_dev,
1978			    "Cannot allocate memory for raid map.\n");
1979			return (ENOMEM);
1980		}
1981	}
1982
1983	if (!mrsas_get_map_info(sc))
1984		mrsas_sync_map_info(sc);
1985
1986	return (0);
1987
1988ABORT:
1989	return (1);
1990}
1991
1992/*
1993 * mrsas_init_fw:	Initialize Firmware
1994 * input:			Adapter soft state
1995 *
1996 * Calls transition_to_ready() to make sure Firmware is in operational state and
1997 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware.  It
1998 * issues internal commands to get the controller info after the IOC_INIT
1999 * command response is received by Firmware.  Note:  code relating to
2000 * get_pdlist, get_ld_list and max_sectors are currently not being used, it
2001 * is left here as placeholder.
2002 */
2003static int
2004mrsas_init_fw(struct mrsas_softc *sc)
2005{
2006
2007	int ret, loop, ocr = 0;
2008	u_int32_t max_sectors_1;
2009	u_int32_t max_sectors_2;
2010	u_int32_t tmp_sectors;
2011	u_int32_t scratch_pad_2;
2012	int msix_enable = 0;
2013	int fw_msix_count = 0;
2014
2015	/* Make sure Firmware is ready */
2016	ret = mrsas_transition_to_ready(sc, ocr);
2017	if (ret != SUCCESS) {
2018		return (ret);
2019	}
2020	/* MSI-x index 0- reply post host index register */
2021	sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET;
2022	/* Check if MSI-X is supported while in ready state */
2023	msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a;
2024
2025	if (msix_enable) {
2026		scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2027		    outbound_scratch_pad_2));
2028
2029		/* Check max MSI-X vectors */
2030		if (sc->device_id == MRSAS_TBOLT) {
2031			sc->msix_vectors = (scratch_pad_2
2032			    & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
2033			fw_msix_count = sc->msix_vectors;
2034		} else {
2035			/* Invader/Fury supports 96 MSI-X vectors */
2036			sc->msix_vectors = ((scratch_pad_2
2037			    & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
2038			    >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
2039			fw_msix_count = sc->msix_vectors;
2040
2041			for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY;
2042			    loop++) {
2043				sc->msix_reg_offset[loop] =
2044				    MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2045				    (loop * 0x10);
2046			}
2047		}
2048
2049		/* Don't bother allocating more MSI-X vectors than cpus */
2050		sc->msix_vectors = min(sc->msix_vectors,
2051		    mp_ncpus);
2052
2053		/* Allocate MSI-x vectors */
2054		if (mrsas_allocate_msix(sc) == SUCCESS)
2055			sc->msix_enable = 1;
2056		else
2057			sc->msix_enable = 0;
2058
2059		device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector,"
2060		    "Online CPU %d Current MSIX <%d>\n",
2061		    fw_msix_count, mp_ncpus, sc->msix_vectors);
2062	}
2063	if (mrsas_init_adapter(sc) != SUCCESS) {
2064		device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
2065		return (1);
2066	}
2067	/* Allocate internal commands for pass-thru */
2068	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
2069		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
2070		return (1);
2071	}
2072	sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
2073	if (!sc->ctrl_info) {
2074		device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n");
2075		return (1);
2076	}
2077	/*
2078	 * Get the controller info from FW, so that the MAX VD support
2079	 * availability can be decided.
2080	 */
2081	if (mrsas_get_ctrl_info(sc)) {
2082		device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n");
2083		return (1);
2084	}
2085	sc->secure_jbod_support =
2086	    (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD;
2087
2088	if (sc->secure_jbod_support)
2089		device_printf(sc->mrsas_dev, "FW supports SED \n");
2090
2091	if (mrsas_setup_raidmap(sc) != SUCCESS) {
2092		device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
2093		return (1);
2094	}
2095	/* For pass-thru, get PD/LD list and controller info */
2096	memset(sc->pd_list, 0,
2097	    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
2098	mrsas_get_pd_list(sc);
2099
2100	memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
2101	mrsas_get_ld_list(sc);
2102
2103	/*
2104	 * Compute the max allowed sectors per IO: The controller info has
2105	 * two limits on max sectors. Driver should use the minimum of these
2106	 * two.
2107	 *
2108	 * 1 << stripe_sz_ops.min = max sectors per strip
2109	 *
2110	 * Note that older firmwares ( < FW ver 30) didn't report information to
2111	 * calculate max_sectors_1. So the number ended up as zero always.
2112	 */
2113	tmp_sectors = 0;
2114	max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) *
2115	    sc->ctrl_info->max_strips_per_io;
2116	max_sectors_2 = sc->ctrl_info->max_request_size;
2117	tmp_sectors = min(max_sectors_1, max_sectors_2);
2118	sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
2119
2120	if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
2121		sc->max_sectors_per_req = tmp_sectors;
2122
2123	sc->disableOnlineCtrlReset =
2124	    sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
2125	sc->UnevenSpanSupport =
2126	    sc->ctrl_info->adapterOperations2.supportUnevenSpans;
2127	if (sc->UnevenSpanSupport) {
2128		device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n",
2129		    sc->UnevenSpanSupport);
2130
2131		if (MR_ValidateMapInfo(sc))
2132			sc->fast_path_io = 1;
2133		else
2134			sc->fast_path_io = 0;
2135	}
2136	return (0);
2137}
2138
2139/*
2140 * mrsas_init_adapter:	Initializes the adapter/controller
2141 * input:				Adapter soft state
2142 *
2143 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
2144 * ROC/controller.  The FW register is read to determined the number of
2145 * commands that is supported.  All memory allocations for IO is based on
2146 * max_cmd.  Appropriate calculations are performed in this function.
2147 */
2148int
2149mrsas_init_adapter(struct mrsas_softc *sc)
2150{
2151	uint32_t status;
2152	u_int32_t max_cmd;
2153	int ret;
2154	int i = 0;
2155
2156	/* Read FW status register */
2157	status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2158
2159	/* Get operational params from status register */
2160	sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
2161
2162	/* Decrement the max supported by 1, to correlate with FW */
2163	sc->max_fw_cmds = sc->max_fw_cmds - 1;
2164	max_cmd = sc->max_fw_cmds;
2165
2166	/* Determine allocation size of command frames */
2167	sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2;
2168	sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
2169	sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
2170	sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
2171	sc->chain_frames_alloc_sz = 1024 * max_cmd;
2172	sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2173	    offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
2174
2175	sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
2176	sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
2177
2178	/* Used for pass thru MFI frame (DCMD) */
2179	sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
2180
2181	sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
2182	    sizeof(MPI2_SGE_IO_UNION)) / 16;
2183
2184	int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2185
2186	for (i = 0; i < count; i++)
2187		sc->last_reply_idx[i] = 0;
2188
2189	ret = mrsas_alloc_mem(sc);
2190	if (ret != SUCCESS)
2191		return (ret);
2192
2193	ret = mrsas_alloc_mpt_cmds(sc);
2194	if (ret != SUCCESS)
2195		return (ret);
2196
2197	ret = mrsas_ioc_init(sc);
2198	if (ret != SUCCESS)
2199		return (ret);
2200
2201	return (0);
2202}
2203
2204/*
2205 * mrsas_alloc_ioc_cmd:	Allocates memory for IOC Init command
2206 * input:				Adapter soft state
2207 *
2208 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
2209 */
2210int
2211mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
2212{
2213	int ioc_init_size;
2214
2215	/* Allocate IOC INIT command */
2216	ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
2217	if (bus_dma_tag_create(sc->mrsas_parent_tag,
2218	    1, 0,
2219	    BUS_SPACE_MAXADDR_32BIT,
2220	    BUS_SPACE_MAXADDR,
2221	    NULL, NULL,
2222	    ioc_init_size,
2223	    1,
2224	    ioc_init_size,
2225	    BUS_DMA_ALLOCNOW,
2226	    NULL, NULL,
2227	    &sc->ioc_init_tag)) {
2228		device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
2229		return (ENOMEM);
2230	}
2231	if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
2232	    BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
2233		device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
2234		return (ENOMEM);
2235	}
2236	bzero(sc->ioc_init_mem, ioc_init_size);
2237	if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
2238	    sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
2239	    &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
2240		device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
2241		return (ENOMEM);
2242	}
2243	return (0);
2244}
2245
2246/*
2247 * mrsas_free_ioc_cmd:	Allocates memory for IOC Init command
2248 * input:				Adapter soft state
2249 *
2250 * Deallocates memory of the IOC Init cmd.
2251 */
2252void
2253mrsas_free_ioc_cmd(struct mrsas_softc *sc)
2254{
2255	if (sc->ioc_init_phys_mem)
2256		bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
2257	if (sc->ioc_init_mem != NULL)
2258		bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
2259	if (sc->ioc_init_tag != NULL)
2260		bus_dma_tag_destroy(sc->ioc_init_tag);
2261}
2262
2263/*
2264 * mrsas_ioc_init:	Sends IOC Init command to FW
2265 * input:			Adapter soft state
2266 *
2267 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
2268 */
2269int
2270mrsas_ioc_init(struct mrsas_softc *sc)
2271{
2272	struct mrsas_init_frame *init_frame;
2273	pMpi2IOCInitRequest_t IOCInitMsg;
2274	MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
2275	u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
2276	bus_addr_t phys_addr;
2277	int i, retcode = 0;
2278
2279	/* Allocate memory for the IOC INIT command */
2280	if (mrsas_alloc_ioc_cmd(sc)) {
2281		device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
2282		return (1);
2283	}
2284	IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024);
2285	IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
2286	IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2287	IOCInitMsg->MsgVersion = MPI2_VERSION;
2288	IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
2289	IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
2290	IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
2291	IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
2292	IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
2293	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
2294
2295	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
2296	init_frame->cmd = MFI_CMD_INIT;
2297	init_frame->cmd_status = 0xFF;
2298	init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2299
2300	/* driver support Extended MSIX */
2301	if ((sc->device_id == MRSAS_INVADER) ||
2302	    (sc->device_id == MRSAS_FURY)) {
2303		init_frame->driver_operations.
2304		    mfi_capabilities.support_additional_msix = 1;
2305	}
2306	if (sc->verbuf_mem) {
2307		snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n",
2308		    MRSAS_VERSION);
2309		init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
2310		init_frame->driver_ver_hi = 0;
2311	}
2312	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
2313	init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
2314	init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
2315	phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
2316	init_frame->queue_info_new_phys_addr_lo = phys_addr;
2317	init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
2318
2319	req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
2320	req_desc.MFAIo.RequestFlags =
2321	    (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2322
2323	mrsas_disable_intr(sc);
2324	mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
2325	mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
2326
2327	/*
2328	 * Poll response timer to wait for Firmware response.  While this
2329	 * timer with the DELAY call could block CPU, the time interval for
2330	 * this is only 1 millisecond.
2331	 */
2332	if (init_frame->cmd_status == 0xFF) {
2333		for (i = 0; i < (max_wait * 1000); i++) {
2334			if (init_frame->cmd_status == 0xFF)
2335				DELAY(1000);
2336			else
2337				break;
2338		}
2339	}
2340	if (init_frame->cmd_status == 0)
2341		mrsas_dprint(sc, MRSAS_OCR,
2342		    "IOC INIT response received from FW.\n");
2343	else {
2344		if (init_frame->cmd_status == 0xFF)
2345			device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
2346		else
2347			device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
2348		retcode = 1;
2349	}
2350
2351	mrsas_free_ioc_cmd(sc);
2352	return (retcode);
2353}
2354
2355/*
2356 * mrsas_alloc_mpt_cmds:	Allocates the command packets
2357 * input:					Adapter instance soft state
2358 *
2359 * This function allocates the internal commands for IOs. Each command that is
2360 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An
2361 * array is allocated with mrsas_mpt_cmd context.  The free commands are
2362 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2363 * max_fw_cmds.
2364 */
2365int
2366mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2367{
2368	int i, j;
2369	u_int32_t max_cmd, count;
2370	struct mrsas_mpt_cmd *cmd;
2371	pMpi2ReplyDescriptorsUnion_t reply_desc;
2372	u_int32_t offset, chain_offset, sense_offset;
2373	bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2374	u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2375
2376	max_cmd = sc->max_fw_cmds;
2377
2378	sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2379	if (!sc->req_desc) {
2380		device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2381		return (ENOMEM);
2382	}
2383	memset(sc->req_desc, 0, sc->request_alloc_sz);
2384
2385	/*
2386	 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers.
2387	 * Allocate the dynamic array first and then allocate individual
2388	 * commands.
2389	 */
2390	sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT);
2391	if (!sc->mpt_cmd_list) {
2392		device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2393		return (ENOMEM);
2394	}
2395	memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd);
2396	for (i = 0; i < max_cmd; i++) {
2397		sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2398		    M_MRSAS, M_NOWAIT);
2399		if (!sc->mpt_cmd_list[i]) {
2400			for (j = 0; j < i; j++)
2401				free(sc->mpt_cmd_list[j], M_MRSAS);
2402			free(sc->mpt_cmd_list, M_MRSAS);
2403			sc->mpt_cmd_list = NULL;
2404			return (ENOMEM);
2405		}
2406	}
2407
2408	io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2409	io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2410	chain_frame_base = (u_int8_t *)sc->chain_frame_mem;
2411	chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2412	sense_base = (u_int8_t *)sc->sense_mem;
2413	sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2414	for (i = 0; i < max_cmd; i++) {
2415		cmd = sc->mpt_cmd_list[i];
2416		offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2417		chain_offset = 1024 * i;
2418		sense_offset = MRSAS_SENSE_LEN * i;
2419		memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2420		cmd->index = i + 1;
2421		cmd->ccb_ptr = NULL;
2422		callout_init(&cmd->cm_callout, 0);
2423		cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2424		cmd->sc = sc;
2425		cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2426		memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2427		cmd->io_request_phys_addr = io_req_base_phys + offset;
2428		cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2429		cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2430		cmd->sense = sense_base + sense_offset;
2431		cmd->sense_phys_addr = sense_base_phys + sense_offset;
2432		if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2433			return (FAIL);
2434		}
2435		TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2436	}
2437
2438	/* Initialize reply descriptor array to 0xFFFFFFFF */
2439	reply_desc = sc->reply_desc_mem;
2440	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2441	for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) {
2442		reply_desc->Words = MRSAS_ULONG_MAX;
2443	}
2444	return (0);
2445}
2446
2447/*
2448 * mrsas_fire_cmd:	Sends command to FW
2449 * input:			Adapter softstate
2450 * 					request descriptor address low
2451 * 					request descriptor address high
2452 *
2453 * This functions fires the command to Firmware by writing to the
2454 * inbound_low_queue_port and inbound_high_queue_port.
2455 */
2456void
2457mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2458    u_int32_t req_desc_hi)
2459{
2460	mtx_lock(&sc->pci_lock);
2461	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2462	    req_desc_lo);
2463	mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2464	    req_desc_hi);
2465	mtx_unlock(&sc->pci_lock);
2466}
2467
2468/*
2469 * mrsas_transition_to_ready:  Move FW to Ready state input:
2470 * Adapter instance soft state
2471 *
2472 * During the initialization, FW passes can potentially be in any one of several
2473 * possible states. If the FW in operational, waiting-for-handshake states,
2474 * driver must take steps to bring it to ready state. Otherwise, it has to
2475 * wait for the ready state.
2476 */
2477int
2478mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2479{
2480	int i;
2481	u_int8_t max_wait;
2482	u_int32_t val, fw_state;
2483	u_int32_t cur_state;
2484	u_int32_t abs_state, curr_abs_state;
2485
2486	val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2487	fw_state = val & MFI_STATE_MASK;
2488	max_wait = MRSAS_RESET_WAIT_TIME;
2489
2490	if (fw_state != MFI_STATE_READY)
2491		device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2492
2493	while (fw_state != MFI_STATE_READY) {
2494		abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2495		switch (fw_state) {
2496		case MFI_STATE_FAULT:
2497			device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2498			if (ocr) {
2499				cur_state = MFI_STATE_FAULT;
2500				break;
2501			} else
2502				return -ENODEV;
2503		case MFI_STATE_WAIT_HANDSHAKE:
2504			/* Set the CLR bit in inbound doorbell */
2505			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2506			    MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG);
2507			cur_state = MFI_STATE_WAIT_HANDSHAKE;
2508			break;
2509		case MFI_STATE_BOOT_MESSAGE_PENDING:
2510			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2511			    MFI_INIT_HOTPLUG);
2512			cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2513			break;
2514		case MFI_STATE_OPERATIONAL:
2515			/*
2516			 * Bring it to READY state; assuming max wait 10
2517			 * secs
2518			 */
2519			mrsas_disable_intr(sc);
2520			mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2521			for (i = 0; i < max_wait * 1000; i++) {
2522				if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2523					DELAY(1000);
2524				else
2525					break;
2526			}
2527			cur_state = MFI_STATE_OPERATIONAL;
2528			break;
2529		case MFI_STATE_UNDEFINED:
2530			/*
2531			 * This state should not last for more than 2
2532			 * seconds
2533			 */
2534			cur_state = MFI_STATE_UNDEFINED;
2535			break;
2536		case MFI_STATE_BB_INIT:
2537			cur_state = MFI_STATE_BB_INIT;
2538			break;
2539		case MFI_STATE_FW_INIT:
2540			cur_state = MFI_STATE_FW_INIT;
2541			break;
2542		case MFI_STATE_FW_INIT_2:
2543			cur_state = MFI_STATE_FW_INIT_2;
2544			break;
2545		case MFI_STATE_DEVICE_SCAN:
2546			cur_state = MFI_STATE_DEVICE_SCAN;
2547			break;
2548		case MFI_STATE_FLUSH_CACHE:
2549			cur_state = MFI_STATE_FLUSH_CACHE;
2550			break;
2551		default:
2552			device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2553			return -ENODEV;
2554		}
2555
2556		/*
2557		 * The cur_state should not last for more than max_wait secs
2558		 */
2559		for (i = 0; i < (max_wait * 1000); i++) {
2560			fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2561			    outbound_scratch_pad)) & MFI_STATE_MASK);
2562			curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2563			    outbound_scratch_pad));
2564			if (abs_state == curr_abs_state)
2565				DELAY(1000);
2566			else
2567				break;
2568		}
2569
2570		/*
2571		 * Return error if fw_state hasn't changed after max_wait
2572		 */
2573		if (curr_abs_state == abs_state) {
2574			device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2575			    "in %d secs\n", fw_state, max_wait);
2576			return -ENODEV;
2577		}
2578	}
2579	mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2580	return 0;
2581}
2582
2583/*
2584 * mrsas_get_mfi_cmd:	Get a cmd from free command pool
2585 * input:				Adapter soft state
2586 *
2587 * This function removes an MFI command from the command list.
2588 */
2589struct mrsas_mfi_cmd *
2590mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2591{
2592	struct mrsas_mfi_cmd *cmd = NULL;
2593
2594	mtx_lock(&sc->mfi_cmd_pool_lock);
2595	if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) {
2596		cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2597		TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2598	}
2599	mtx_unlock(&sc->mfi_cmd_pool_lock);
2600
2601	return cmd;
2602}
2603
2604/*
2605 * mrsas_ocr_thread:	Thread to handle OCR/Kill Adapter.
2606 * input:				Adapter Context.
2607 *
2608 * This function will check FW status register and flag do_timeout_reset flag.
2609 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has
2610 * trigger reset.
2611 */
2612static void
2613mrsas_ocr_thread(void *arg)
2614{
2615	struct mrsas_softc *sc;
2616	u_int32_t fw_status, fw_state;
2617
2618	sc = (struct mrsas_softc *)arg;
2619
2620	mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2621
2622	sc->ocr_thread_active = 1;
2623	mtx_lock(&sc->sim_lock);
2624	for (;;) {
2625		/* Sleep for 1 second and check the queue status */
2626		msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2627		    "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2628		if (sc->remove_in_progress) {
2629			mrsas_dprint(sc, MRSAS_OCR,
2630			    "Exit due to shutdown from %s\n", __func__);
2631			break;
2632		}
2633		fw_status = mrsas_read_reg(sc,
2634		    offsetof(mrsas_reg_set, outbound_scratch_pad));
2635		fw_state = fw_status & MFI_STATE_MASK;
2636		if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2637			device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2638			    sc->do_timedout_reset ? "IO Timeout" :
2639			    "FW fault detected");
2640			mtx_lock_spin(&sc->ioctl_lock);
2641			sc->reset_in_progress = 1;
2642			sc->reset_count++;
2643			mtx_unlock_spin(&sc->ioctl_lock);
2644			mrsas_xpt_freeze(sc);
2645			mrsas_reset_ctrl(sc);
2646			mrsas_xpt_release(sc);
2647			sc->reset_in_progress = 0;
2648			sc->do_timedout_reset = 0;
2649		}
2650	}
2651	mtx_unlock(&sc->sim_lock);
2652	sc->ocr_thread_active = 0;
2653	mrsas_kproc_exit(0);
2654}
2655
2656/*
2657 * mrsas_reset_reply_desc:	Reset Reply descriptor as part of OCR.
2658 * input:					Adapter Context.
2659 *
2660 * This function will clear reply descriptor so that post OCR driver and FW will
2661 * lost old history.
2662 */
2663void
2664mrsas_reset_reply_desc(struct mrsas_softc *sc)
2665{
2666	int i, count;
2667	pMpi2ReplyDescriptorsUnion_t reply_desc;
2668
2669	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2670	for (i = 0; i < count; i++)
2671		sc->last_reply_idx[i] = 0;
2672
2673	reply_desc = sc->reply_desc_mem;
2674	for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2675		reply_desc->Words = MRSAS_ULONG_MAX;
2676	}
2677}
2678
2679/*
2680 * mrsas_reset_ctrl:	Core function to OCR/Kill adapter.
2681 * input:				Adapter Context.
2682 *
2683 * This function will run from thread context so that it can sleep. 1. Do not
2684 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command
2685 * to complete for 180 seconds. 3. If #2 does not find any outstanding
2686 * command Controller is in working state, so skip OCR. Otherwise, do
2687 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the
2688 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post
2689 * OCR, Re-fire Managment command and move Controller to Operation state.
2690 */
2691int
2692mrsas_reset_ctrl(struct mrsas_softc *sc)
2693{
2694	int retval = SUCCESS, i, j, retry = 0;
2695	u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2696	union ccb *ccb;
2697	struct mrsas_mfi_cmd *mfi_cmd;
2698	struct mrsas_mpt_cmd *mpt_cmd;
2699	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2700
2701	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2702		device_printf(sc->mrsas_dev,
2703		    "mrsas: Hardware critical error, returning FAIL.\n");
2704		return FAIL;
2705	}
2706	mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2707	sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2708	mrsas_disable_intr(sc);
2709	DELAY(1000 * 1000);
2710
2711	/* First try waiting for commands to complete */
2712	if (mrsas_wait_for_outstanding(sc)) {
2713		mrsas_dprint(sc, MRSAS_OCR,
2714		    "resetting adapter from %s.\n",
2715		    __func__);
2716		/* Now return commands back to the CAM layer */
2717		for (i = 0; i < sc->max_fw_cmds; i++) {
2718			mpt_cmd = sc->mpt_cmd_list[i];
2719			if (mpt_cmd->ccb_ptr) {
2720				ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2721				ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2722				mrsas_cmd_done(sc, mpt_cmd);
2723				mrsas_atomic_dec(&sc->fw_outstanding);
2724			}
2725		}
2726
2727		status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2728		    outbound_scratch_pad));
2729		abs_state = status_reg & MFI_STATE_MASK;
2730		reset_adapter = status_reg & MFI_RESET_ADAPTER;
2731		if (sc->disableOnlineCtrlReset ||
2732		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2733			/* Reset not supported, kill adapter */
2734			mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n");
2735			mrsas_kill_hba(sc);
2736			retval = FAIL;
2737			goto out;
2738		}
2739		/* Now try to reset the chip */
2740		for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2741			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2742			    MPI2_WRSEQ_FLUSH_KEY_VALUE);
2743			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2744			    MPI2_WRSEQ_1ST_KEY_VALUE);
2745			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2746			    MPI2_WRSEQ_2ND_KEY_VALUE);
2747			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2748			    MPI2_WRSEQ_3RD_KEY_VALUE);
2749			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2750			    MPI2_WRSEQ_4TH_KEY_VALUE);
2751			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2752			    MPI2_WRSEQ_5TH_KEY_VALUE);
2753			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2754			    MPI2_WRSEQ_6TH_KEY_VALUE);
2755
2756			/* Check that the diag write enable (DRWE) bit is on */
2757			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2758			    fusion_host_diag));
2759			retry = 0;
2760			while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2761				DELAY(100 * 1000);
2762				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2763				    fusion_host_diag));
2764				if (retry++ == 100) {
2765					mrsas_dprint(sc, MRSAS_OCR,
2766					    "Host diag unlock failed!\n");
2767					break;
2768				}
2769			}
2770			if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2771				continue;
2772
2773			/* Send chip reset command */
2774			mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2775			    host_diag | HOST_DIAG_RESET_ADAPTER);
2776			DELAY(3000 * 1000);
2777
2778			/* Make sure reset adapter bit is cleared */
2779			host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2780			    fusion_host_diag));
2781			retry = 0;
2782			while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2783				DELAY(100 * 1000);
2784				host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2785				    fusion_host_diag));
2786				if (retry++ == 1000) {
2787					mrsas_dprint(sc, MRSAS_OCR,
2788					    "Diag reset adapter never cleared!\n");
2789					break;
2790				}
2791			}
2792			if (host_diag & HOST_DIAG_RESET_ADAPTER)
2793				continue;
2794
2795			abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2796			    outbound_scratch_pad)) & MFI_STATE_MASK;
2797			retry = 0;
2798
2799			while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2800				DELAY(100 * 1000);
2801				abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2802				    outbound_scratch_pad)) & MFI_STATE_MASK;
2803			}
2804			if (abs_state <= MFI_STATE_FW_INIT) {
2805				mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2806				    " state = 0x%x\n", abs_state);
2807				continue;
2808			}
2809			/* Wait for FW to become ready */
2810			if (mrsas_transition_to_ready(sc, 1)) {
2811				mrsas_dprint(sc, MRSAS_OCR,
2812				    "mrsas: Failed to transition controller to ready.\n");
2813				continue;
2814			}
2815			mrsas_reset_reply_desc(sc);
2816			if (mrsas_ioc_init(sc)) {
2817				mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2818				continue;
2819			}
2820			/* Re-fire management commands */
2821			for (j = 0; j < sc->max_fw_cmds; j++) {
2822				mpt_cmd = sc->mpt_cmd_list[j];
2823				if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2824					mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2825					if (mfi_cmd->frame->dcmd.opcode ==
2826					    MR_DCMD_LD_MAP_GET_INFO) {
2827						mrsas_release_mfi_cmd(mfi_cmd);
2828						mrsas_release_mpt_cmd(mpt_cmd);
2829					} else {
2830						req_desc = mrsas_get_request_desc(sc,
2831						    mfi_cmd->cmd_id.context.smid - 1);
2832						mrsas_dprint(sc, MRSAS_OCR,
2833						    "Re-fire command DCMD opcode 0x%x index %d\n ",
2834						    mfi_cmd->frame->dcmd.opcode, j);
2835						if (!req_desc)
2836							device_printf(sc->mrsas_dev,
2837							    "Cannot build MPT cmd.\n");
2838						else
2839							mrsas_fire_cmd(sc, req_desc->addr.u.low,
2840							    req_desc->addr.u.high);
2841					}
2842				}
2843			}
2844
2845			/* Reset load balance info */
2846			memset(sc->load_balance_info, 0,
2847			    sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
2848
2849			if (mrsas_get_ctrl_info(sc)) {
2850				mrsas_kill_hba(sc);
2851				retval = FAIL;
2852				goto out;
2853			}
2854			if (!mrsas_get_map_info(sc))
2855				mrsas_sync_map_info(sc);
2856
2857			mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2858			mrsas_enable_intr(sc);
2859			sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2860
2861			/* Adapter reset completed successfully */
2862			device_printf(sc->mrsas_dev, "Reset successful\n");
2863			retval = SUCCESS;
2864			goto out;
2865		}
2866		/* Reset failed, kill the adapter */
2867		device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2868		mrsas_kill_hba(sc);
2869		retval = FAIL;
2870	} else {
2871		mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2872		mrsas_enable_intr(sc);
2873		sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2874	}
2875out:
2876	mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2877	mrsas_dprint(sc, MRSAS_OCR,
2878	    "Reset Exit with %d.\n", retval);
2879	return retval;
2880}
2881
2882/*
2883 * mrsas_kill_hba:	Kill HBA when OCR is not supported
2884 * input:			Adapter Context.
2885 *
2886 * This function will kill HBA when OCR is not supported.
2887 */
2888void
2889mrsas_kill_hba(struct mrsas_softc *sc)
2890{
2891	sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2892	pause("mrsas_kill_hba", 1000);
2893	mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2894	mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2895	    MFI_STOP_ADP);
2896	/* Flush */
2897	mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2898	mrsas_complete_outstanding_ioctls(sc);
2899}
2900
2901/**
2902 * mrsas_complete_outstanding_ioctls	Complete pending IOCTLS after kill_hba
2903 * input:			Controller softc
2904 *
2905 * Returns void
2906 */
2907void
2908mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
2909{
2910	int i;
2911	struct mrsas_mpt_cmd *cmd_mpt;
2912	struct mrsas_mfi_cmd *cmd_mfi;
2913	u_int32_t count, MSIxIndex;
2914
2915	count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2916	for (i = 0; i < sc->max_fw_cmds; i++) {
2917		cmd_mpt = sc->mpt_cmd_list[i];
2918
2919		if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2920			cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
2921			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
2922				for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
2923					mrsas_complete_mptmfi_passthru(sc, cmd_mfi,
2924					    cmd_mpt->io_request->RaidContext.status);
2925			}
2926		}
2927	}
2928}
2929
2930/*
2931 * mrsas_wait_for_outstanding:	Wait for outstanding commands
2932 * input:						Adapter Context.
2933 *
2934 * This function will wait for 180 seconds for outstanding commands to be
2935 * completed.
2936 */
2937int
2938mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2939{
2940	int i, outstanding, retval = 0;
2941	u_int32_t fw_state, count, MSIxIndex;
2942
2943
2944	for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2945		if (sc->remove_in_progress) {
2946			mrsas_dprint(sc, MRSAS_OCR,
2947			    "Driver remove or shutdown called.\n");
2948			retval = 1;
2949			goto out;
2950		}
2951		/* Check if firmware is in fault state */
2952		fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2953		    outbound_scratch_pad)) & MFI_STATE_MASK;
2954		if (fw_state == MFI_STATE_FAULT) {
2955			mrsas_dprint(sc, MRSAS_OCR,
2956			    "Found FW in FAULT state, will reset adapter.\n");
2957			retval = 1;
2958			goto out;
2959		}
2960		outstanding = mrsas_atomic_read(&sc->fw_outstanding);
2961		if (!outstanding)
2962			goto out;
2963
2964		if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2965			mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2966			    "commands to complete\n", i, outstanding);
2967			count = sc->msix_vectors > 0 ? sc->msix_vectors : 1;
2968			for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++)
2969				mrsas_complete_cmd(sc, MSIxIndex);
2970		}
2971		DELAY(1000 * 1000);
2972	}
2973
2974	if (mrsas_atomic_read(&sc->fw_outstanding)) {
2975		mrsas_dprint(sc, MRSAS_OCR,
2976		    " pending commands remain after waiting,"
2977		    " will reset adapter.\n");
2978		retval = 1;
2979	}
2980out:
2981	return retval;
2982}
2983
2984/*
2985 * mrsas_release_mfi_cmd:	Return a cmd to free command pool
2986 * input:					Command packet for return to free cmd pool
2987 *
2988 * This function returns the MFI command to the command list.
2989 */
2990void
2991mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2992{
2993	struct mrsas_softc *sc = cmd->sc;
2994
2995	mtx_lock(&sc->mfi_cmd_pool_lock);
2996	cmd->ccb_ptr = NULL;
2997	cmd->cmd_id.frame_count = 0;
2998	TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
2999	mtx_unlock(&sc->mfi_cmd_pool_lock);
3000
3001	return;
3002}
3003
3004/*
3005 * mrsas_get_controller_info:	Returns FW's controller structure
3006 * input:						Adapter soft state
3007 * 								Controller information structure
3008 *
3009 * Issues an internal command (DCMD) to get the FW's controller structure. This
3010 * information is mainly used to find out the maximum IO transfer per command
3011 * supported by the FW.
3012 */
3013static int
3014mrsas_get_ctrl_info(struct mrsas_softc *sc)
3015{
3016	int retcode = 0;
3017	struct mrsas_mfi_cmd *cmd;
3018	struct mrsas_dcmd_frame *dcmd;
3019
3020	cmd = mrsas_get_mfi_cmd(sc);
3021
3022	if (!cmd) {
3023		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
3024		return -ENOMEM;
3025	}
3026	dcmd = &cmd->frame->dcmd;
3027
3028	if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
3029		device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
3030		mrsas_release_mfi_cmd(cmd);
3031		return -ENOMEM;
3032	}
3033	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3034
3035	dcmd->cmd = MFI_CMD_DCMD;
3036	dcmd->cmd_status = 0xFF;
3037	dcmd->sge_count = 1;
3038	dcmd->flags = MFI_FRAME_DIR_READ;
3039	dcmd->timeout = 0;
3040	dcmd->pad_0 = 0;
3041	dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
3042	dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
3043	dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
3044	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
3045
3046	if (!mrsas_issue_polled(sc, cmd))
3047		memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
3048	else
3049		retcode = 1;
3050
3051	mrsas_update_ext_vd_details(sc);
3052
3053	mrsas_free_ctlr_info_cmd(sc);
3054	mrsas_release_mfi_cmd(cmd);
3055	return (retcode);
3056}
3057
3058/*
3059 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD
3060 * input:
3061 *	sc - Controller's softc
3062*/
3063static void
3064mrsas_update_ext_vd_details(struct mrsas_softc *sc)
3065{
3066	sc->max256vdSupport =
3067	sc->ctrl_info->adapterOperations3.supportMaxExtLDs;
3068	/* Below is additional check to address future FW enhancement */
3069	if (sc->ctrl_info->max_lds > 64)
3070		sc->max256vdSupport = 1;
3071
3072	sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS
3073	    * MRSAS_MAX_DEV_PER_CHANNEL;
3074	sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS
3075	    * MRSAS_MAX_DEV_PER_CHANNEL;
3076	if (sc->max256vdSupport) {
3077		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
3078		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3079	} else {
3080		sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
3081		sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
3082	}
3083
3084	sc->old_map_sz = sizeof(MR_FW_RAID_MAP) +
3085	    (sizeof(MR_LD_SPAN_MAP) *
3086	    (sc->fw_supported_vd_count - 1));
3087	sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT);
3088	sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) +
3089	    (sizeof(MR_LD_SPAN_MAP) *
3090	    (sc->drv_supported_vd_count - 1));
3091
3092	sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz);
3093
3094	if (sc->max256vdSupport)
3095		sc->current_map_sz = sc->new_map_sz;
3096	else
3097		sc->current_map_sz = sc->old_map_sz;
3098}
3099
3100/*
3101 * mrsas_alloc_ctlr_info_cmd:	Allocates memory for controller info command
3102 * input:						Adapter soft state
3103 *
3104 * Allocates DMAable memory for the controller info internal command.
3105 */
3106int
3107mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
3108{
3109	int ctlr_info_size;
3110
3111	/* Allocate get controller info command */
3112	ctlr_info_size = sizeof(struct mrsas_ctrl_info);
3113	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3114	    1, 0,
3115	    BUS_SPACE_MAXADDR_32BIT,
3116	    BUS_SPACE_MAXADDR,
3117	    NULL, NULL,
3118	    ctlr_info_size,
3119	    1,
3120	    ctlr_info_size,
3121	    BUS_DMA_ALLOCNOW,
3122	    NULL, NULL,
3123	    &sc->ctlr_info_tag)) {
3124		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
3125		return (ENOMEM);
3126	}
3127	if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
3128	    BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
3129		device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
3130		return (ENOMEM);
3131	}
3132	if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
3133	    sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
3134	    &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
3135		device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
3136		return (ENOMEM);
3137	}
3138	memset(sc->ctlr_info_mem, 0, ctlr_info_size);
3139	return (0);
3140}
3141
3142/*
3143 * mrsas_free_ctlr_info_cmd:	Free memory for controller info command
3144 * input:						Adapter soft state
3145 *
3146 * Deallocates memory of the get controller info cmd.
3147 */
3148void
3149mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
3150{
3151	if (sc->ctlr_info_phys_addr)
3152		bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
3153	if (sc->ctlr_info_mem != NULL)
3154		bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
3155	if (sc->ctlr_info_tag != NULL)
3156		bus_dma_tag_destroy(sc->ctlr_info_tag);
3157}
3158
3159/*
3160 * mrsas_issue_polled:	Issues a polling command
3161 * inputs:				Adapter soft state
3162 * 						Command packet to be issued
3163 *
3164 * This function is for posting of internal commands to Firmware.  MFI requires
3165 * the cmd_status to be set to 0xFF before posting.  The maximun wait time of
3166 * the poll response timer is 180 seconds.
3167 */
3168int
3169mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3170{
3171	struct mrsas_header *frame_hdr = &cmd->frame->hdr;
3172	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3173	int i, retcode = 0;
3174
3175	frame_hdr->cmd_status = 0xFF;
3176	frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3177
3178	/* Issue the frame using inbound queue port */
3179	if (mrsas_issue_dcmd(sc, cmd)) {
3180		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3181		return (1);
3182	}
3183	/*
3184	 * Poll response timer to wait for Firmware response.  While this
3185	 * timer with the DELAY call could block CPU, the time interval for
3186	 * this is only 1 millisecond.
3187	 */
3188	if (frame_hdr->cmd_status == 0xFF) {
3189		for (i = 0; i < (max_wait * 1000); i++) {
3190			if (frame_hdr->cmd_status == 0xFF)
3191				DELAY(1000);
3192			else
3193				break;
3194		}
3195	}
3196	if (frame_hdr->cmd_status != 0) {
3197		if (frame_hdr->cmd_status == 0xFF)
3198			device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
3199		else
3200			device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
3201		retcode = 1;
3202	}
3203	return (retcode);
3204}
3205
3206/*
3207 * mrsas_issue_dcmd:	Issues a MFI Pass thru cmd
3208 * input:				Adapter soft state mfi cmd pointer
3209 *
3210 * This function is called by mrsas_issued_blocked_cmd() and
3211 * mrsas_issued_polled(), to build the MPT command and then fire the command
3212 * to Firmware.
3213 */
3214int
3215mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3216{
3217	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3218
3219	req_desc = mrsas_build_mpt_cmd(sc, cmd);
3220	if (!req_desc) {
3221		device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
3222		return (1);
3223	}
3224	mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
3225
3226	return (0);
3227}
3228
3229/*
3230 * mrsas_build_mpt_cmd:	Calls helper function to build Passthru cmd
3231 * input:				Adapter soft state mfi cmd to build
3232 *
3233 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru
3234 * command and prepares the MPT command to send to Firmware.
3235 */
3236MRSAS_REQUEST_DESCRIPTOR_UNION *
3237mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3238{
3239	MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3240	u_int16_t index;
3241
3242	if (mrsas_build_mptmfi_passthru(sc, cmd)) {
3243		device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
3244		return NULL;
3245	}
3246	index = cmd->cmd_id.context.smid;
3247
3248	req_desc = mrsas_get_request_desc(sc, index - 1);
3249	if (!req_desc)
3250		return NULL;
3251
3252	req_desc->addr.Words = 0;
3253	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3254
3255	req_desc->SCSIIO.SMID = index;
3256
3257	return (req_desc);
3258}
3259
3260/*
3261 * mrsas_build_mptmfi_passthru:	Builds a MPT MFI Passthru command
3262 * input:						Adapter soft state mfi cmd pointer
3263 *
3264 * The MPT command and the io_request are setup as a passthru command. The SGE
3265 * chain address is set to frame_phys_addr of the MFI command.
3266 */
3267u_int8_t
3268mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
3269{
3270	MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3271	PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
3272	struct mrsas_mpt_cmd *mpt_cmd;
3273	struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
3274
3275	mpt_cmd = mrsas_get_mpt_cmd(sc);
3276	if (!mpt_cmd)
3277		return (1);
3278
3279	/* Save the smid. To be used for returning the cmd */
3280	mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
3281
3282	mpt_cmd->sync_cmd_idx = mfi_cmd->index;
3283
3284	/*
3285	 * For cmds where the flag is set, store the flag and check on
3286	 * completion. For cmds with this flag, don't call
3287	 * mrsas_complete_cmd.
3288	 */
3289
3290	if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
3291		mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
3292
3293	io_req = mpt_cmd->io_request;
3294
3295	if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
3296		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
3297
3298		sgl_ptr_end += sc->max_sge_in_main_msg - 1;
3299		sgl_ptr_end->Flags = 0;
3300	}
3301	mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain;
3302
3303	io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3304	io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
3305	io_req->ChainOffset = sc->chain_offset_mfi_pthru;
3306
3307	mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
3308
3309	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3310	    MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3311
3312	mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
3313
3314	return (0);
3315}
3316
3317/*
3318 * mrsas_issue_blocked_cmd:	Synchronous wrapper around regular FW cmds
3319 * input:					Adapter soft state Command to be issued
3320 *
3321 * This function waits on an event for the command to be returned from the ISR.
3322 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing
3323 * internal and ioctl commands.
3324 */
3325int
3326mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3327{
3328	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3329	unsigned long total_time = 0;
3330	int retcode = 0;
3331
3332	/* Initialize cmd_status */
3333	cmd->cmd_status = ECONNREFUSED;
3334
3335	/* Build MPT-MFI command for issue to FW */
3336	if (mrsas_issue_dcmd(sc, cmd)) {
3337		device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
3338		return (1);
3339	}
3340	sc->chan = (void *)&cmd;
3341
3342	while (1) {
3343		if (cmd->cmd_status == ECONNREFUSED) {
3344			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3345		} else
3346			break;
3347		total_time++;
3348		if (total_time >= max_wait) {
3349			device_printf(sc->mrsas_dev,
3350			    "Internal command timed out after %d seconds.\n", max_wait);
3351			retcode = 1;
3352			break;
3353		}
3354	}
3355	return (retcode);
3356}
3357
3358/*
3359 * mrsas_complete_mptmfi_passthru:	Completes a command
3360 * input:	@sc:					Adapter soft state
3361 * 			@cmd:					Command to be completed
3362 * 			@status:				cmd completion status
3363 *
3364 * This function is called from mrsas_complete_cmd() after an interrupt is
3365 * received from Firmware, and io_request->Function is
3366 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
3367 */
3368void
3369mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
3370    u_int8_t status)
3371{
3372	struct mrsas_header *hdr = &cmd->frame->hdr;
3373	u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
3374
3375	/* Reset the retry counter for future re-tries */
3376	cmd->retry_for_fw_reset = 0;
3377
3378	if (cmd->ccb_ptr)
3379		cmd->ccb_ptr = NULL;
3380
3381	switch (hdr->cmd) {
3382	case MFI_CMD_INVALID:
3383		device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
3384		break;
3385	case MFI_CMD_PD_SCSI_IO:
3386	case MFI_CMD_LD_SCSI_IO:
3387		/*
3388		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3389		 * issued either through an IO path or an IOCTL path. If it
3390		 * was via IOCTL, we will send it to internal completion.
3391		 */
3392		if (cmd->sync_cmd) {
3393			cmd->sync_cmd = 0;
3394			mrsas_wakeup(sc, cmd);
3395			break;
3396		}
3397	case MFI_CMD_SMP:
3398	case MFI_CMD_STP:
3399	case MFI_CMD_DCMD:
3400		/* Check for LD map update */
3401		if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
3402		    (cmd->frame->dcmd.mbox.b[1] == 1)) {
3403			sc->fast_path_io = 0;
3404			mtx_lock(&sc->raidmap_lock);
3405			if (cmd_status != 0) {
3406				if (cmd_status != MFI_STAT_NOT_FOUND)
3407					device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
3408				else {
3409					mrsas_release_mfi_cmd(cmd);
3410					mtx_unlock(&sc->raidmap_lock);
3411					break;
3412				}
3413			} else
3414				sc->map_id++;
3415			mrsas_release_mfi_cmd(cmd);
3416			if (MR_ValidateMapInfo(sc))
3417				sc->fast_path_io = 0;
3418			else
3419				sc->fast_path_io = 1;
3420			mrsas_sync_map_info(sc);
3421			mtx_unlock(&sc->raidmap_lock);
3422			break;
3423		}
3424		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3425		    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
3426			sc->mrsas_aen_triggered = 0;
3427		}
3428		/* See if got an event notification */
3429		if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
3430			mrsas_complete_aen(sc, cmd);
3431		else
3432			mrsas_wakeup(sc, cmd);
3433		break;
3434	case MFI_CMD_ABORT:
3435		/* Command issued to abort another cmd return */
3436		mrsas_complete_abort(sc, cmd);
3437		break;
3438	default:
3439		device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd);
3440		break;
3441	}
3442}
3443
3444/*
3445 * mrsas_wakeup:	Completes an internal command
3446 * input:			Adapter soft state
3447 * 					Command to be completed
3448 *
3449 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait
3450 * timer is started.  This function is called from
3451 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up
3452 * from the command wait.
3453 */
3454void
3455mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3456{
3457	cmd->cmd_status = cmd->frame->io.cmd_status;
3458
3459	if (cmd->cmd_status == ECONNREFUSED)
3460		cmd->cmd_status = 0;
3461
3462	sc->chan = (void *)&cmd;
3463	wakeup_one((void *)&sc->chan);
3464	return;
3465}
3466
3467/*
3468 * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller input:
3469 * Adapter soft state Shutdown/Hibernate
3470 *
3471 * This function issues a DCMD internal command to Firmware to initiate shutdown
3472 * of the controller.
3473 */
3474static void
3475mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3476{
3477	struct mrsas_mfi_cmd *cmd;
3478	struct mrsas_dcmd_frame *dcmd;
3479
3480	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3481		return;
3482
3483	cmd = mrsas_get_mfi_cmd(sc);
3484	if (!cmd) {
3485		device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n");
3486		return;
3487	}
3488	if (sc->aen_cmd)
3489		mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3490
3491	if (sc->map_update_cmd)
3492		mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3493
3494	dcmd = &cmd->frame->dcmd;
3495	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3496
3497	dcmd->cmd = MFI_CMD_DCMD;
3498	dcmd->cmd_status = 0x0;
3499	dcmd->sge_count = 0;
3500	dcmd->flags = MFI_FRAME_DIR_NONE;
3501	dcmd->timeout = 0;
3502	dcmd->pad_0 = 0;
3503	dcmd->data_xfer_len = 0;
3504	dcmd->opcode = opcode;
3505
3506	device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n");
3507
3508	mrsas_issue_blocked_cmd(sc, cmd);
3509	mrsas_release_mfi_cmd(cmd);
3510
3511	return;
3512}
3513
3514/*
3515 * mrsas_flush_cache:         Requests FW to flush all its caches input:
3516 * Adapter soft state
3517 *
3518 * This function is issues a DCMD internal command to Firmware to initiate
3519 * flushing of all caches.
3520 */
3521static void
3522mrsas_flush_cache(struct mrsas_softc *sc)
3523{
3524	struct mrsas_mfi_cmd *cmd;
3525	struct mrsas_dcmd_frame *dcmd;
3526
3527	if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3528		return;
3529
3530	cmd = mrsas_get_mfi_cmd(sc);
3531	if (!cmd) {
3532		device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n");
3533		return;
3534	}
3535	dcmd = &cmd->frame->dcmd;
3536	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3537
3538	dcmd->cmd = MFI_CMD_DCMD;
3539	dcmd->cmd_status = 0x0;
3540	dcmd->sge_count = 0;
3541	dcmd->flags = MFI_FRAME_DIR_NONE;
3542	dcmd->timeout = 0;
3543	dcmd->pad_0 = 0;
3544	dcmd->data_xfer_len = 0;
3545	dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3546	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3547
3548	mrsas_issue_blocked_cmd(sc, cmd);
3549	mrsas_release_mfi_cmd(cmd);
3550
3551	return;
3552}
3553
3554/*
3555 * mrsas_get_map_info:        Load and validate RAID map input:
3556 * Adapter instance soft state
3557 *
3558 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load
3559 * and validate RAID map.  It returns 0 if successful, 1 other- wise.
3560 */
3561static int
3562mrsas_get_map_info(struct mrsas_softc *sc)
3563{
3564	uint8_t retcode = 0;
3565
3566	sc->fast_path_io = 0;
3567	if (!mrsas_get_ld_map_info(sc)) {
3568		retcode = MR_ValidateMapInfo(sc);
3569		if (retcode == 0) {
3570			sc->fast_path_io = 1;
3571			return 0;
3572		}
3573	}
3574	return 1;
3575}
3576
3577/*
3578 * mrsas_get_ld_map_info:      Get FW's ld_map structure input:
3579 * Adapter instance soft state
3580 *
3581 * Issues an internal command (DCMD) to get the FW's controller PD list
3582 * structure.
3583 */
3584static int
3585mrsas_get_ld_map_info(struct mrsas_softc *sc)
3586{
3587	int retcode = 0;
3588	struct mrsas_mfi_cmd *cmd;
3589	struct mrsas_dcmd_frame *dcmd;
3590	void *map;
3591	bus_addr_t map_phys_addr = 0;
3592
3593	cmd = mrsas_get_mfi_cmd(sc);
3594	if (!cmd) {
3595		device_printf(sc->mrsas_dev,
3596		    "Cannot alloc for ld map info cmd.\n");
3597		return 1;
3598	}
3599	dcmd = &cmd->frame->dcmd;
3600
3601	map = (void *)sc->raidmap_mem[(sc->map_id & 1)];
3602	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3603	if (!map) {
3604		device_printf(sc->mrsas_dev,
3605		    "Failed to alloc mem for ld map info.\n");
3606		mrsas_release_mfi_cmd(cmd);
3607		return (ENOMEM);
3608	}
3609	memset(map, 0, sizeof(sc->max_map_sz));
3610	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3611
3612	dcmd->cmd = MFI_CMD_DCMD;
3613	dcmd->cmd_status = 0xFF;
3614	dcmd->sge_count = 1;
3615	dcmd->flags = MFI_FRAME_DIR_READ;
3616	dcmd->timeout = 0;
3617	dcmd->pad_0 = 0;
3618	dcmd->data_xfer_len = sc->current_map_sz;
3619	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3620	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3621	dcmd->sgl.sge32[0].length = sc->current_map_sz;
3622
3623	if (!mrsas_issue_polled(sc, cmd))
3624		retcode = 0;
3625	else {
3626		device_printf(sc->mrsas_dev,
3627		    "Fail to send get LD map info cmd.\n");
3628		retcode = 1;
3629	}
3630	mrsas_release_mfi_cmd(cmd);
3631
3632	return (retcode);
3633}
3634
3635/*
3636 * mrsas_sync_map_info:        Get FW's ld_map structure input:
3637 * Adapter instance soft state
3638 *
3639 * Issues an internal command (DCMD) to get the FW's controller PD list
3640 * structure.
3641 */
3642static int
3643mrsas_sync_map_info(struct mrsas_softc *sc)
3644{
3645	int retcode = 0, i;
3646	struct mrsas_mfi_cmd *cmd;
3647	struct mrsas_dcmd_frame *dcmd;
3648	uint32_t size_sync_info, num_lds;
3649	MR_LD_TARGET_SYNC *target_map = NULL;
3650	MR_DRV_RAID_MAP_ALL *map;
3651	MR_LD_RAID *raid;
3652	MR_LD_TARGET_SYNC *ld_sync;
3653	bus_addr_t map_phys_addr = 0;
3654
3655	cmd = mrsas_get_mfi_cmd(sc);
3656	if (!cmd) {
3657		device_printf(sc->mrsas_dev,
3658		    "Cannot alloc for sync map info cmd\n");
3659		return 1;
3660	}
3661	map = sc->ld_drv_map[sc->map_id & 1];
3662	num_lds = map->raidMap.ldCount;
3663
3664	dcmd = &cmd->frame->dcmd;
3665	size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3666	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3667
3668	target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1];
3669	memset(target_map, 0, sc->max_map_sz);
3670
3671	map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3672
3673	ld_sync = (MR_LD_TARGET_SYNC *) target_map;
3674
3675	for (i = 0; i < num_lds; i++, ld_sync++) {
3676		raid = MR_LdRaidGet(i, map);
3677		ld_sync->targetId = MR_GetLDTgtId(i, map);
3678		ld_sync->seqNum = raid->seqNum;
3679	}
3680
3681	dcmd->cmd = MFI_CMD_DCMD;
3682	dcmd->cmd_status = 0xFF;
3683	dcmd->sge_count = 1;
3684	dcmd->flags = MFI_FRAME_DIR_WRITE;
3685	dcmd->timeout = 0;
3686	dcmd->pad_0 = 0;
3687	dcmd->data_xfer_len = sc->current_map_sz;
3688	dcmd->mbox.b[0] = num_lds;
3689	dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3690	dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3691	dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3692	dcmd->sgl.sge32[0].length = sc->current_map_sz;
3693
3694	sc->map_update_cmd = cmd;
3695	if (mrsas_issue_dcmd(sc, cmd)) {
3696		device_printf(sc->mrsas_dev,
3697		    "Fail to send sync map info command.\n");
3698		return (1);
3699	}
3700	return (retcode);
3701}
3702
3703/*
3704 * mrsas_get_pd_list:           Returns FW's PD list structure input:
3705 * Adapter soft state
3706 *
3707 * Issues an internal command (DCMD) to get the FW's controller PD list
3708 * structure.  This information is mainly used to find out about system
3709 * supported by Firmware.
3710 */
3711static int
3712mrsas_get_pd_list(struct mrsas_softc *sc)
3713{
3714	int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
3715	struct mrsas_mfi_cmd *cmd;
3716	struct mrsas_dcmd_frame *dcmd;
3717	struct MR_PD_LIST *pd_list_mem;
3718	struct MR_PD_ADDRESS *pd_addr;
3719	bus_addr_t pd_list_phys_addr = 0;
3720	struct mrsas_tmp_dcmd *tcmd;
3721
3722	cmd = mrsas_get_mfi_cmd(sc);
3723	if (!cmd) {
3724		device_printf(sc->mrsas_dev,
3725		    "Cannot alloc for get PD list cmd\n");
3726		return 1;
3727	}
3728	dcmd = &cmd->frame->dcmd;
3729
3730	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3731	pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3732	if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3733		device_printf(sc->mrsas_dev,
3734		    "Cannot alloc dmamap for get PD list cmd\n");
3735		mrsas_release_mfi_cmd(cmd);
3736		return (ENOMEM);
3737	} else {
3738		pd_list_mem = tcmd->tmp_dcmd_mem;
3739		pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3740	}
3741	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3742
3743	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3744	dcmd->mbox.b[1] = 0;
3745	dcmd->cmd = MFI_CMD_DCMD;
3746	dcmd->cmd_status = 0xFF;
3747	dcmd->sge_count = 1;
3748	dcmd->flags = MFI_FRAME_DIR_READ;
3749	dcmd->timeout = 0;
3750	dcmd->pad_0 = 0;
3751	dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3752	dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3753	dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3754	dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3755
3756	if (!mrsas_issue_polled(sc, cmd))
3757		retcode = 0;
3758	else
3759		retcode = 1;
3760
3761	/* Get the instance PD list */
3762	pd_count = MRSAS_MAX_PD;
3763	pd_addr = pd_list_mem->addr;
3764	if (retcode == 0 && pd_list_mem->count < pd_count) {
3765		memset(sc->local_pd_list, 0,
3766		    MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3767		for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3768			sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3769			sc->local_pd_list[pd_addr->deviceId].driveType =
3770			    pd_addr->scsiDevType;
3771			sc->local_pd_list[pd_addr->deviceId].driveState =
3772			    MR_PD_STATE_SYSTEM;
3773			pd_addr++;
3774		}
3775	}
3776	/*
3777	 * Use mutext/spinlock if pd_list component size increase more than
3778	 * 32 bit.
3779	 */
3780	memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3781	mrsas_free_tmp_dcmd(tcmd);
3782	mrsas_release_mfi_cmd(cmd);
3783	free(tcmd, M_MRSAS);
3784	return (retcode);
3785}
3786
3787/*
3788 * mrsas_get_ld_list:           Returns FW's LD list structure input:
3789 * Adapter soft state
3790 *
3791 * Issues an internal command (DCMD) to get the FW's controller PD list
3792 * structure.  This information is mainly used to find out about supported by
3793 * the FW.
3794 */
3795static int
3796mrsas_get_ld_list(struct mrsas_softc *sc)
3797{
3798	int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3799	struct mrsas_mfi_cmd *cmd;
3800	struct mrsas_dcmd_frame *dcmd;
3801	struct MR_LD_LIST *ld_list_mem;
3802	bus_addr_t ld_list_phys_addr = 0;
3803	struct mrsas_tmp_dcmd *tcmd;
3804
3805	cmd = mrsas_get_mfi_cmd(sc);
3806	if (!cmd) {
3807		device_printf(sc->mrsas_dev,
3808		    "Cannot alloc for get LD list cmd\n");
3809		return 1;
3810	}
3811	dcmd = &cmd->frame->dcmd;
3812
3813	tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3814	ld_list_size = sizeof(struct MR_LD_LIST);
3815	if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3816		device_printf(sc->mrsas_dev,
3817		    "Cannot alloc dmamap for get LD list cmd\n");
3818		mrsas_release_mfi_cmd(cmd);
3819		return (ENOMEM);
3820	} else {
3821		ld_list_mem = tcmd->tmp_dcmd_mem;
3822		ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3823	}
3824	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3825
3826	if (sc->max256vdSupport)
3827		dcmd->mbox.b[0] = 1;
3828
3829	dcmd->cmd = MFI_CMD_DCMD;
3830	dcmd->cmd_status = 0xFF;
3831	dcmd->sge_count = 1;
3832	dcmd->flags = MFI_FRAME_DIR_READ;
3833	dcmd->timeout = 0;
3834	dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3835	dcmd->opcode = MR_DCMD_LD_GET_LIST;
3836	dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3837	dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3838	dcmd->pad_0 = 0;
3839
3840	if (!mrsas_issue_polled(sc, cmd))
3841		retcode = 0;
3842	else
3843		retcode = 1;
3844
3845#if VD_EXT_DEBUG
3846	printf("Number of LDs %d\n", ld_list_mem->ldCount);
3847#endif
3848
3849	/* Get the instance LD list */
3850	if ((retcode == 0) &&
3851	    (ld_list_mem->ldCount <= sc->fw_supported_vd_count)) {
3852		sc->CurLdCount = ld_list_mem->ldCount;
3853		memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
3854		for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3855			if (ld_list_mem->ldList[ld_index].state != 0) {
3856				ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3857				sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3858			}
3859		}
3860	}
3861	mrsas_free_tmp_dcmd(tcmd);
3862	mrsas_release_mfi_cmd(cmd);
3863	free(tcmd, M_MRSAS);
3864	return (retcode);
3865}
3866
3867/*
3868 * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command input:
3869 * Adapter soft state Temp command Size of alloction
3870 *
3871 * Allocates DMAable memory for a temporary internal command. The allocated
3872 * memory is initialized to all zeros upon successful loading of the dma
3873 * mapped memory.
3874 */
3875int
3876mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc,
3877    struct mrsas_tmp_dcmd *tcmd, int size)
3878{
3879	if (bus_dma_tag_create(sc->mrsas_parent_tag,
3880	    1, 0,
3881	    BUS_SPACE_MAXADDR_32BIT,
3882	    BUS_SPACE_MAXADDR,
3883	    NULL, NULL,
3884	    size,
3885	    1,
3886	    size,
3887	    BUS_DMA_ALLOCNOW,
3888	    NULL, NULL,
3889	    &tcmd->tmp_dcmd_tag)) {
3890		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3891		return (ENOMEM);
3892	}
3893	if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3894	    BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3895		device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3896		return (ENOMEM);
3897	}
3898	if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3899	    tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3900	    &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3901		device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3902		return (ENOMEM);
3903	}
3904	memset(tcmd->tmp_dcmd_mem, 0, size);
3905	return (0);
3906}
3907
3908/*
3909 * mrsas_free_tmp_dcmd:      Free memory for temporary command input:
3910 * temporary dcmd pointer
3911 *
3912 * Deallocates memory of the temporary command for use in the construction of
3913 * the internal DCMD.
3914 */
3915void
3916mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3917{
3918	if (tmp->tmp_dcmd_phys_addr)
3919		bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3920	if (tmp->tmp_dcmd_mem != NULL)
3921		bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3922	if (tmp->tmp_dcmd_tag != NULL)
3923		bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3924}
3925
3926/*
3927 * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd input:
3928 * Adapter soft state Previously issued cmd to be aborted
3929 *
3930 * This function is used to abort previously issued commands, such as AEN and
3931 * RAID map sync map commands.  The abort command is sent as a DCMD internal
3932 * command and subsequently the driver will wait for a return status.  The
3933 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3934 */
3935static int
3936mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3937    struct mrsas_mfi_cmd *cmd_to_abort)
3938{
3939	struct mrsas_mfi_cmd *cmd;
3940	struct mrsas_abort_frame *abort_fr;
3941	u_int8_t retcode = 0;
3942	unsigned long total_time = 0;
3943	u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3944
3945	cmd = mrsas_get_mfi_cmd(sc);
3946	if (!cmd) {
3947		device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3948		return (1);
3949	}
3950	abort_fr = &cmd->frame->abort;
3951
3952	/* Prepare and issue the abort frame */
3953	abort_fr->cmd = MFI_CMD_ABORT;
3954	abort_fr->cmd_status = 0xFF;
3955	abort_fr->flags = 0;
3956	abort_fr->abort_context = cmd_to_abort->index;
3957	abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3958	abort_fr->abort_mfi_phys_addr_hi = 0;
3959
3960	cmd->sync_cmd = 1;
3961	cmd->cmd_status = 0xFF;
3962
3963	if (mrsas_issue_dcmd(sc, cmd)) {
3964		device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3965		return (1);
3966	}
3967	/* Wait for this cmd to complete */
3968	sc->chan = (void *)&cmd;
3969	while (1) {
3970		if (cmd->cmd_status == 0xFF) {
3971			tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3972		} else
3973			break;
3974		total_time++;
3975		if (total_time >= max_wait) {
3976			device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3977			retcode = 1;
3978			break;
3979		}
3980	}
3981
3982	cmd->sync_cmd = 0;
3983	mrsas_release_mfi_cmd(cmd);
3984	return (retcode);
3985}
3986
3987/*
3988 * mrsas_complete_abort:      Completes aborting a command input:
3989 * Adapter soft state Cmd that was issued to abort another cmd
3990 *
3991 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to
3992 * change after sending the command.  This function is called from
3993 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3994 */
3995void
3996mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3997{
3998	if (cmd->sync_cmd) {
3999		cmd->sync_cmd = 0;
4000		cmd->cmd_status = 0;
4001		sc->chan = (void *)&cmd;
4002		wakeup_one((void *)&sc->chan);
4003	}
4004	return;
4005}
4006
4007/*
4008 * mrsas_aen_handler:	AEN processing callback function from thread context
4009 * input:				Adapter soft state
4010 *
4011 * Asynchronous event handler
4012 */
4013void
4014mrsas_aen_handler(struct mrsas_softc *sc)
4015{
4016	union mrsas_evt_class_locale class_locale;
4017	int doscan = 0;
4018	u_int32_t seq_num;
4019	int error;
4020
4021	if (!sc) {
4022		device_printf(sc->mrsas_dev, "invalid instance!\n");
4023		return;
4024	}
4025	if (sc->evt_detail_mem) {
4026		switch (sc->evt_detail_mem->code) {
4027		case MR_EVT_PD_INSERTED:
4028			mrsas_get_pd_list(sc);
4029			mrsas_bus_scan_sim(sc, sc->sim_1);
4030			doscan = 0;
4031			break;
4032		case MR_EVT_PD_REMOVED:
4033			mrsas_get_pd_list(sc);
4034			mrsas_bus_scan_sim(sc, sc->sim_1);
4035			doscan = 0;
4036			break;
4037		case MR_EVT_LD_OFFLINE:
4038		case MR_EVT_CFG_CLEARED:
4039		case MR_EVT_LD_DELETED:
4040			mrsas_bus_scan_sim(sc, sc->sim_0);
4041			doscan = 0;
4042			break;
4043		case MR_EVT_LD_CREATED:
4044			mrsas_get_ld_list(sc);
4045			mrsas_bus_scan_sim(sc, sc->sim_0);
4046			doscan = 0;
4047			break;
4048		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
4049		case MR_EVT_FOREIGN_CFG_IMPORTED:
4050		case MR_EVT_LD_STATE_CHANGE:
4051			doscan = 1;
4052			break;
4053		default:
4054			doscan = 0;
4055			break;
4056		}
4057	} else {
4058		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
4059		return;
4060	}
4061	if (doscan) {
4062		mrsas_get_pd_list(sc);
4063		mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
4064		mrsas_bus_scan_sim(sc, sc->sim_1);
4065		mrsas_get_ld_list(sc);
4066		mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
4067		mrsas_bus_scan_sim(sc, sc->sim_0);
4068	}
4069	seq_num = sc->evt_detail_mem->seq_num + 1;
4070
4071	/* Register AEN with FW for latest sequence number plus 1 */
4072	class_locale.members.reserved = 0;
4073	class_locale.members.locale = MR_EVT_LOCALE_ALL;
4074	class_locale.members.class = MR_EVT_CLASS_DEBUG;
4075
4076	if (sc->aen_cmd != NULL)
4077		return;
4078
4079	mtx_lock(&sc->aen_lock);
4080	error = mrsas_register_aen(sc, seq_num,
4081	    class_locale.word);
4082	mtx_unlock(&sc->aen_lock);
4083
4084	if (error)
4085		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
4086
4087}
4088
4089
4090/*
4091 * mrsas_complete_aen:	Completes AEN command
4092 * input:				Adapter soft state
4093 * 						Cmd that was issued to abort another cmd
4094 *
4095 * This function will be called from ISR and will continue event processing from
4096 * thread context by enqueuing task in ev_tq (callback function
4097 * "mrsas_aen_handler").
4098 */
4099void
4100mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
4101{
4102	/*
4103	 * Don't signal app if it is just an aborted previously registered
4104	 * aen
4105	 */
4106	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
4107		sc->mrsas_aen_triggered = 1;
4108		mtx_lock(&sc->aen_lock);
4109		if (sc->mrsas_poll_waiting) {
4110			sc->mrsas_poll_waiting = 0;
4111			selwakeup(&sc->mrsas_select);
4112		}
4113		mtx_unlock(&sc->aen_lock);
4114	} else
4115		cmd->abort_aen = 0;
4116
4117	sc->aen_cmd = NULL;
4118	mrsas_release_mfi_cmd(cmd);
4119
4120	if (!sc->remove_in_progress)
4121		taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
4122
4123	return;
4124}
4125
4126static device_method_t mrsas_methods[] = {
4127	DEVMETHOD(device_probe, mrsas_probe),
4128	DEVMETHOD(device_attach, mrsas_attach),
4129	DEVMETHOD(device_detach, mrsas_detach),
4130	DEVMETHOD(device_suspend, mrsas_suspend),
4131	DEVMETHOD(device_resume, mrsas_resume),
4132	DEVMETHOD(bus_print_child, bus_generic_print_child),
4133	DEVMETHOD(bus_driver_added, bus_generic_driver_added),
4134	{0, 0}
4135};
4136
4137static driver_t mrsas_driver = {
4138	"mrsas",
4139	mrsas_methods,
4140	sizeof(struct mrsas_softc)
4141};
4142
4143static devclass_t mrsas_devclass;
4144
4145DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
4146MODULE_DEPEND(mrsas, cam, 1, 1, 1);
4147