mrsas.c revision 273736
1/*
2 * Copyright (c) 2014, LSI Corp.
3 * All rights reserved.
4 * Author: Marian Choy
5 * Support: freebsdraid@lsi.com
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *    the documentation and/or other materials provided with the
16 *    distribution.
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 *
34 * The views and conclusions contained in the software and documentation
35 * are those of the authors and should not be interpreted as representing
36 * official policies,either expressed or implied, of the FreeBSD Project.
37 *
38 * Send feedback to: <megaraidfbsd@lsi.com>
39 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
40 *    ATTN: MegaRaid FreeBSD
41 *
42 */
43
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: stable/10/sys/dev/mrsas/mrsas.c 273736 2014-10-27 14:38:00Z hselasky $");
46
47#include <dev/mrsas/mrsas.h>
48#include <dev/mrsas/mrsas_ioctl.h>
49
50#include <cam/cam.h>
51#include <cam/cam_ccb.h>
52
53#include <sys/sysctl.h>
54#include <sys/types.h>
55#include <sys/kthread.h>
56#include <sys/taskqueue.h>
57
58
59/*
60 * Function prototypes
61 */
62static d_open_t     mrsas_open;
63static d_close_t    mrsas_close;
64static d_read_t     mrsas_read;
65static d_write_t    mrsas_write;
66static d_ioctl_t    mrsas_ioctl;
67
68static struct mrsas_ident *mrsas_find_ident(device_t);
69static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
70static void mrsas_flush_cache(struct mrsas_softc *sc);
71static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
72static void mrsas_ocr_thread(void *arg);
73static int mrsas_get_map_info(struct mrsas_softc *sc);
74static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
75static int mrsas_sync_map_info(struct mrsas_softc *sc);
76static int mrsas_get_pd_list(struct mrsas_softc *sc);
77static int mrsas_get_ld_list(struct mrsas_softc *sc);
78static int mrsas_setup_irq(struct mrsas_softc *sc);
79static int mrsas_alloc_mem(struct mrsas_softc *sc);
80static int mrsas_init_fw(struct mrsas_softc *sc);
81static int mrsas_setup_raidmap(struct mrsas_softc *sc);
82static int mrsas_complete_cmd(struct mrsas_softc *sc);
83static int mrsas_clear_intr(struct mrsas_softc *sc);
84static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
85                          struct mrsas_ctrl_info *ctrl_info);
86static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
87                         struct mrsas_mfi_cmd *cmd_to_abort);
88u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
89u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
90                         struct mrsas_mfi_cmd *mfi_cmd);
91int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
92int mrsas_init_adapter(struct mrsas_softc *sc);
93int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
94int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
95int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
96int mrsas_ioc_init(struct mrsas_softc *sc);
97int mrsas_bus_scan(struct mrsas_softc *sc);
98int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
99int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
100int mrsas_reset_ctrl(struct mrsas_softc *sc);
101int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
102int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
103                          struct mrsas_mfi_cmd *cmd);
104int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
105                          int size);
106void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
107void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
108void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110void mrsas_disable_intr(struct mrsas_softc *sc);
111void mrsas_enable_intr(struct mrsas_softc *sc);
112void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
113void mrsas_free_mem(struct mrsas_softc *sc);
114void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
115void mrsas_isr(void *arg);
116void mrsas_teardown_intr(struct mrsas_softc *sc);
117void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
118void mrsas_kill_hba (struct mrsas_softc *sc);
119void mrsas_aen_handler(struct mrsas_softc *sc);
120void mrsas_write_reg(struct mrsas_softc *sc, int offset,
121                          u_int32_t value);
122void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
123                          u_int32_t req_desc_hi);
124void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
125void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
126                          struct mrsas_mfi_cmd *cmd, u_int8_t status);
127void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
128                          u_int8_t extStatus);
129struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
130MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc,
131                          struct mrsas_mfi_cmd *cmd);
132
133extern int mrsas_cam_attach(struct mrsas_softc *sc);
134extern void mrsas_cam_detach(struct mrsas_softc *sc);
135extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
136extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
137extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
138extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
139extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
140extern int mrsas_passthru(struct mrsas_softc *sc, void *arg);
141extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
142extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
143extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
144extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
145extern void mrsas_xpt_release(struct mrsas_softc *sc);
146extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
147                         u_int16_t index);
148extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
149static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
150static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
151SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
152
153
154/**
155 * PCI device struct and table
156 *
157 */
158typedef struct mrsas_ident {
159    uint16_t    vendor;
160    uint16_t    device;
161    uint16_t    subvendor;
162    uint16_t    subdevice;
163    const char  *desc;
164} MRSAS_CTLR_ID;
165
166MRSAS_CTLR_ID device_table[] = {
167    {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
168    {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
169    {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
170    {0, 0, 0, 0, NULL}
171};
172
173/**
174 * Character device entry points
175 *
176 */
177static struct cdevsw mrsas_cdevsw = {
178    .d_version =    D_VERSION,
179    .d_open =   mrsas_open,
180    .d_close =  mrsas_close,
181    .d_read =   mrsas_read,
182    .d_write =  mrsas_write,
183    .d_ioctl =  mrsas_ioctl,
184    .d_name =   "mrsas",
185};
186
187MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
188
189/**
190 * In the cdevsw routines, we find our softc by using the si_drv1 member
191 * of struct cdev.  We set this variable to point to our softc in our
192 * attach routine when we create the /dev entry.
193 */
194int
195mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td)
196{
197    struct mrsas_softc *sc;
198
199    sc = dev->si_drv1;
200    return (0);
201}
202
203int
204mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td)
205{
206    struct mrsas_softc *sc;
207
208    sc = dev->si_drv1;
209    return (0);
210}
211
212int
213mrsas_read(struct cdev *dev, struct uio *uio, int ioflag)
214{
215    struct mrsas_softc *sc;
216
217    sc = dev->si_drv1;
218    return (0);
219}
220int
221mrsas_write(struct cdev *dev, struct uio *uio, int ioflag)
222{
223    struct mrsas_softc *sc;
224
225    sc = dev->si_drv1;
226    return (0);
227}
228
229/**
230 * Register Read/Write Functions
231 *
232 */
233void
234mrsas_write_reg(struct mrsas_softc *sc, int offset,
235                  u_int32_t value)
236{
237    bus_space_tag_t         bus_tag = sc->bus_tag;
238    bus_space_handle_t      bus_handle = sc->bus_handle;
239
240    bus_space_write_4(bus_tag, bus_handle, offset, value);
241}
242
243u_int32_t
244mrsas_read_reg(struct mrsas_softc *sc, int offset)
245{
246    bus_space_tag_t bus_tag = sc->bus_tag;
247    bus_space_handle_t bus_handle = sc->bus_handle;
248
249    return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
250}
251
252
253/**
254 * Interrupt Disable/Enable/Clear Functions
255 *
256 */
257void mrsas_disable_intr(struct mrsas_softc *sc)
258{
259    u_int32_t mask = 0xFFFFFFFF;
260    u_int32_t status;
261
262    mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
263    /* Dummy read to force pci flush */
264    status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
265}
266
267void mrsas_enable_intr(struct mrsas_softc *sc)
268{
269    u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
270    u_int32_t status;
271
272    mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
273    status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
274
275    mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
276    status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
277}
278
279static int mrsas_clear_intr(struct mrsas_softc *sc)
280{
281    u_int32_t status, fw_status, fw_state;
282
283    /* Read received interrupt */
284    status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
285
286    /* If FW state change interrupt is received, write to it again to clear */
287    if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
288        fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
289                                   outbound_scratch_pad));
290        fw_state = fw_status & MFI_STATE_MASK;
291        if (fw_state == MFI_STATE_FAULT) {
292            device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
293            if(sc->ocr_thread_active)
294                wakeup(&sc->ocr_chan);
295        }
296        mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
297        mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
298        return(1);
299    }
300
301    /* Not our interrupt, so just return */
302    if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
303        return(0);
304
305    /* We got a reply interrupt */
306    return(1);
307}
308
309/**
310 * PCI Support Functions
311 *
312 */
313static struct mrsas_ident * mrsas_find_ident(device_t dev)
314{
315    struct mrsas_ident *pci_device;
316
317    for (pci_device=device_table; pci_device->vendor != 0; pci_device++)
318    {
319        if ((pci_device->vendor == pci_get_vendor(dev)) &&
320            (pci_device->device == pci_get_device(dev)) &&
321            ((pci_device->subvendor == pci_get_subvendor(dev)) ||
322            (pci_device->subvendor == 0xffff)) &&
323            ((pci_device->subdevice == pci_get_subdevice(dev)) ||
324            (pci_device->subdevice == 0xffff)))
325        return (pci_device);
326    }
327    return (NULL);
328}
329
330static int mrsas_probe(device_t dev)
331{
332    static u_int8_t first_ctrl = 1;
333    struct mrsas_ident *id;
334
335    if ((id = mrsas_find_ident(dev)) != NULL) {
336        if (first_ctrl) {
337            printf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION);
338            first_ctrl = 0;
339        }
340        device_set_desc(dev, id->desc);
341    	/* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */
342    	return (-30);
343    }
344    return (ENXIO);
345}
346
347/**
348 * mrsas_setup_sysctl:  setup sysctl values for mrsas
349 * input:               Adapter instance soft state
350 *
351 * Setup sysctl entries for mrsas driver.
352 */
353static void
354mrsas_setup_sysctl(struct mrsas_softc *sc)
355{
356    struct sysctl_ctx_list  *sysctl_ctx = NULL;
357    struct sysctl_oid       *sysctl_tree = NULL;
358    char tmpstr[80], tmpstr2[80];
359
360    /*
361     * Setup the sysctl variable so the user can change the debug level
362     * on the fly.
363     */
364    snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
365    device_get_unit(sc->mrsas_dev));
366    snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev));
367
368    sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
369    if (sysctl_ctx != NULL)
370        sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
371
372    if (sysctl_tree == NULL) {
373        sysctl_ctx_init(&sc->sysctl_ctx);
374        sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
375            SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2,
376            CTLFLAG_RD, 0, tmpstr);
377        if (sc->sysctl_tree == NULL)
378             return;
379        sysctl_ctx = &sc->sysctl_ctx;
380        sysctl_tree = sc->sysctl_tree;
381    }
382    SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
383        OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
384        "Disable the use of OCR");
385
386    SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
387        OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
388        strlen(MRSAS_VERSION), "driver version");
389
390    SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
391        OID_AUTO, "reset_count", CTLFLAG_RD,
392        &sc->reset_count, 0, "number of ocr from start of the day");
393
394    SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
395        OID_AUTO, "fw_outstanding", CTLFLAG_RD,
396        &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands");
397
398	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
399        OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
400        &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
401
402    SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
403        OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
404        "Driver debug level");
405
406    SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
407        OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
408        0, "Driver IO timeout value in mili-second.");
409
410    SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
411        OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
412        &sc->mrsas_fw_fault_check_delay,
413        0, "FW fault check thread delay in seconds. <default is 1 sec>");
414
415    SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
416        OID_AUTO, "reset_in_progress", CTLFLAG_RD,
417        &sc->reset_in_progress, 0, "ocr in progress status");
418
419}
420
421/**
422 * mrsas_get_tunables:  get tunable parameters.
423 * input:               Adapter instance soft state
424 *
425 * Get tunable parameters. This will help to debug driver at boot time.
426 */
427static void
428mrsas_get_tunables(struct mrsas_softc *sc)
429{
430    char tmpstr[80];
431
432    /* XXX default to some debugging for now */
433    sc->mrsas_debug = MRSAS_FAULT;
434    sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
435    sc->mrsas_fw_fault_check_delay = 1;
436    sc->reset_count = 0;
437    sc->reset_in_progress = 0;
438
439    /*
440     * Grab the global variables.
441     */
442    TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
443
444    /* Grab the unit-instance variables */
445    snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
446        device_get_unit(sc->mrsas_dev));
447    TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
448}
449
450/**
451 * mrsas_alloc_evt_log_info cmd:	Allocates memory to get event log information.
452 * 								  	Used to get sequence number at driver load time.
453 * input:                      	  	Adapter soft state
454 *
455 * Allocates DMAable memory for the event log info internal command.
456 */
457int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
458{
459    int el_info_size;
460
461    /* Allocate get event log info command */
462    el_info_size = sizeof(struct mrsas_evt_log_info);
463    if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
464                            1, 0,                   // algnmnt, boundary
465                            BUS_SPACE_MAXADDR_32BIT,// lowaddr
466                            BUS_SPACE_MAXADDR,      // highaddr
467                            NULL, NULL,             // filter, filterarg
468                            el_info_size,          // maxsize
469                            1,                      // msegments
470                            el_info_size,          // maxsegsize
471                            BUS_DMA_ALLOCNOW,       // flags
472                            NULL, NULL,             // lockfunc, lockarg
473                            &sc->el_info_tag)) {
474        device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
475        return (ENOMEM);
476    }
477    if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
478            BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
479        device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
480        return (ENOMEM);
481    }
482    if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
483            sc->el_info_mem, el_info_size, mrsas_addr_cb,
484            &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
485        device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
486        return (ENOMEM);
487    }
488
489    memset(sc->el_info_mem, 0, el_info_size);
490    return (0);
491}
492
493/**
494 * mrsas_free_evt_info_cmd: 	Free memory for Event log info command
495 * input:                    	Adapter soft state
496 *
497 * Deallocates memory for the event log info internal command.
498 */
499void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
500{
501    if (sc->el_info_phys_addr)
502        bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
503    if (sc->el_info_mem != NULL)
504        bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
505    if (sc->el_info_tag != NULL)
506        bus_dma_tag_destroy(sc->el_info_tag);
507}
508
509/**
510 *  mrsas_get_seq_num:	Get latest event sequence number
511 *  @sc:				Adapter soft state
512 *  @eli:				Firmware event log sequence number information.
513 *						Firmware maintains a log of all events in a non-volatile area.
514 *						Driver get the sequence number using DCMD
515 *						"MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
516 */
517
518static int
519mrsas_get_seq_num(struct mrsas_softc *sc,
520		    struct mrsas_evt_log_info *eli)
521{
522	struct mrsas_mfi_cmd *cmd;
523	struct mrsas_dcmd_frame *dcmd;
524
525	cmd =  mrsas_get_mfi_cmd(sc);
526
527	if (!cmd) {
528		device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
529		return -ENOMEM;
530	}
531
532	dcmd = &cmd->frame->dcmd;
533
534	if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
535		device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
536		mrsas_release_mfi_cmd(cmd);
537		return -ENOMEM;
538	}
539
540	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
541
542	dcmd->cmd = MFI_CMD_DCMD;
543	dcmd->cmd_status = 0x0;
544	dcmd->sge_count = 1;
545	dcmd->flags = MFI_FRAME_DIR_READ;
546	dcmd->timeout = 0;
547	dcmd->pad_0 = 0;
548	dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
549	dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
550	dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
551	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
552
553	mrsas_issue_blocked_cmd(sc, cmd);
554
555	/*
556 	 * Copy the data back into callers buffer
557 	 */
558	memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
559	mrsas_free_evt_log_info_cmd(sc);
560	mrsas_release_mfi_cmd(cmd);
561
562	return 0;
563}
564
565
566/**
567 *  mrsas_register_aen:		Register for asynchronous event notification
568 *  @sc:					Adapter soft state
569 *  @seq_num:				Starting sequence number
570 *  @class_locale:			Class of the event
571 *  						This function subscribes for events beyond the @seq_num
572 *  						and type @class_locale.
573 *
574 * */
575static int
576mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
577		     u_int32_t class_locale_word)
578{
579	int ret_val;
580	struct mrsas_mfi_cmd *cmd;
581	struct mrsas_dcmd_frame *dcmd;
582	union mrsas_evt_class_locale curr_aen;
583	union mrsas_evt_class_locale prev_aen;
584
585/*
586 *  If there an AEN pending already (aen_cmd), check if the
587 *  class_locale of that pending AEN is inclusive of the new
588 *  AEN request we currently have. If it is, then we don't have
589 *  to do anything. In other words, whichever events the current
590 *  AEN request is subscribing to, have already been subscribed
591 *  to.
592 *  If the old_cmd is _not_ inclusive, then we have to abort
593 *  that command, form a class_locale that is superset of both
594 *  old and current and re-issue to the FW
595 * */
596
597	curr_aen.word = class_locale_word;
598
599	if (sc->aen_cmd) {
600
601		prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
602
603/*
604 * A class whose enum value is smaller is inclusive of all
605 * higher values. If a PROGRESS (= -1) was previously
606 * registered, then a new registration requests for higher
607 * classes need not be sent to FW. They are automatically
608 * included.
609 * Locale numbers don't have such hierarchy. They are bitmap values
610 */
611		if ((prev_aen.members.class <= curr_aen.members.class) &&
612	    	!((prev_aen.members.locale & curr_aen.members.locale) ^
613	      	curr_aen.members.locale)) {
614			/*
615  			 * Previously issued event registration includes
616  			 * current request. Nothing to do.
617  			 */
618			return 0;
619		} else {
620			curr_aen.members.locale |= prev_aen.members.locale;
621
622			if (prev_aen.members.class < curr_aen.members.class)
623				curr_aen.members.class = prev_aen.members.class;
624
625			sc->aen_cmd->abort_aen = 1;
626			ret_val = mrsas_issue_blocked_abort_cmd(sc,
627				  sc->aen_cmd);
628
629			if (ret_val) {
630				printf("mrsas: Failed to abort "
631					   "previous AEN command\n");
632				return ret_val;
633			}
634		}
635	}
636
637	cmd =  mrsas_get_mfi_cmd(sc);
638
639	if (!cmd)
640		return -ENOMEM;
641
642	dcmd = &cmd->frame->dcmd;
643
644	memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
645
646/*
647 * Prepare DCMD for aen registration
648 */
649	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
650
651	dcmd->cmd = MFI_CMD_DCMD;
652	dcmd->cmd_status = 0x0;
653	dcmd->sge_count = 1;
654	dcmd->flags = MFI_FRAME_DIR_READ;
655	dcmd->timeout = 0;
656	dcmd->pad_0 = 0;
657	dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
658	dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
659	dcmd->mbox.w[0] = seq_num;
660    sc->last_seq_num = seq_num;
661	dcmd->mbox.w[1] = curr_aen.word;
662	dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr;
663	dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
664
665	if (sc->aen_cmd != NULL) {
666		mrsas_release_mfi_cmd(cmd);
667		return 0;
668	}
669
670	/*
671  	 * Store reference to the cmd used to register for AEN. When an
672  	 * application wants us to register for AEN, we have to abort this
673   	 * cmd and re-register with a new EVENT LOCALE supplied by that app
674  	 */
675	sc->aen_cmd = cmd;
676
677	/*
678  	  Issue the aen registration frame
679  	*/
680  	if (mrsas_issue_dcmd(sc, cmd)){
681       	device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
682       	return(1);
683   	}
684
685	return 0;
686}
687/**
688 * mrsas_start_aen -  Subscribes to AEN during driver load time
689 * @instance:           Adapter soft state
690 */
691static int mrsas_start_aen(struct mrsas_softc *sc)
692{
693	struct mrsas_evt_log_info eli;
694	union mrsas_evt_class_locale class_locale;
695
696
697	/* Get the latest sequence number from FW*/
698
699	memset(&eli, 0, sizeof(eli));
700
701	if (mrsas_get_seq_num(sc, &eli))
702		return -1;
703
704	/* Register AEN with FW for latest sequence number plus 1*/
705	class_locale.members.reserved = 0;
706	class_locale.members.locale = MR_EVT_LOCALE_ALL;
707	class_locale.members.class = MR_EVT_CLASS_DEBUG;
708
709	return mrsas_register_aen(sc, eli.newest_seq_num + 1,
710				class_locale.word);
711}
712
713/**
714 * mrsas_attach:            PCI entry point
715 * input:                   device struct pointer
716 *
717 * Performs setup of PCI and registers, initializes mutexes and
718 * linked lists, registers interrupts and CAM, and initializes
719 * the adapter/controller to its proper state.
720 */
721static int mrsas_attach(device_t dev)
722{
723    struct mrsas_softc *sc = device_get_softc(dev);
724    uint32_t cmd, bar, error;
725
726    /* Look up our softc and initialize its fields. */
727    sc->mrsas_dev = dev;
728    sc->device_id = pci_get_device(dev);
729
730    mrsas_get_tunables(sc);
731
732    /*
733     * Set up PCI and registers
734     */
735    cmd = pci_read_config(dev, PCIR_COMMAND, 2);
736    if ( (cmd & PCIM_CMD_PORTEN) == 0) {
737        return (ENXIO);
738    }
739    /* Force the busmaster enable bit on. */
740    cmd |= PCIM_CMD_BUSMASTEREN;
741    pci_write_config(dev, PCIR_COMMAND, cmd, 2);
742
743    //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4);
744    bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
745
746    sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */
747    if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
748                                &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
749                                == NULL) {
750        device_printf(dev, "Cannot allocate PCI registers\n");
751        goto attach_fail;
752    }
753    sc->bus_tag = rman_get_bustag(sc->reg_res);
754    sc->bus_handle = rman_get_bushandle(sc->reg_res);
755
756    /* Intialize mutexes */
757    mtx_init(&sc->sim_lock,  "mrsas_sim_lock", NULL, MTX_DEF);
758    mtx_init(&sc->pci_lock,  "mrsas_pci_lock", NULL, MTX_DEF);
759    mtx_init(&sc->io_lock,  "mrsas_io_lock", NULL, MTX_DEF);
760    mtx_init(&sc->aen_lock,  "mrsas_aen_lock", NULL, MTX_DEF);
761    mtx_init(&sc->ioctl_lock,  "mrsas_ioctl_lock", NULL, MTX_SPIN);
762    mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF);
763    mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
764    mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
765
766    /* Intialize linked list */
767    TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
768    TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
769
770    atomic_set(&sc->fw_outstanding,0);
771
772	sc->io_cmds_highwater = 0;
773
774    /* Create a /dev entry for this device. */
775    sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
776        GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
777        device_get_unit(dev));
778    if (sc->mrsas_cdev)
779    	sc->mrsas_cdev->si_drv1 = sc;
780
781    sc->adprecovery = MRSAS_HBA_OPERATIONAL;
782	sc->UnevenSpanSupport = 0;
783
784    /* Initialize Firmware */
785    if (mrsas_init_fw(sc) != SUCCESS) {
786        goto attach_fail_fw;
787    }
788
789    /* Register SCSI mid-layer */
790    if ((mrsas_cam_attach(sc) != SUCCESS)) {
791        goto attach_fail_cam;
792    }
793
794    /* Register IRQs */
795    if (mrsas_setup_irq(sc) != SUCCESS) {
796        goto attach_fail_irq;
797    }
798
799    /* Enable Interrupts */
800    mrsas_enable_intr(sc);
801
802    error = mrsas_kproc_create(mrsas_ocr_thread, sc,
803        &sc->ocr_thread, 0, 0, "mrsas_ocr%d",
804        device_get_unit(sc->mrsas_dev));
805    if (error) {
806        printf("Error %d starting rescan thread\n", error);
807        goto attach_fail_irq;
808    }
809
810    mrsas_setup_sysctl(sc);
811
812	/* Initiate AEN (Asynchronous Event Notification)*/
813
814	if (mrsas_start_aen(sc)) {
815		printf("Error: start aen failed\n");
816		goto fail_start_aen;
817	}
818
819    return (0);
820
821fail_start_aen:
822attach_fail_irq:
823    mrsas_teardown_intr(sc);
824attach_fail_cam:
825    mrsas_cam_detach(sc);
826attach_fail_fw:
827//attach_fail_raidmap:
828    mrsas_free_mem(sc);
829    mtx_destroy(&sc->sim_lock);
830    mtx_destroy(&sc->aen_lock);
831    mtx_destroy(&sc->pci_lock);
832    mtx_destroy(&sc->io_lock);
833    mtx_destroy(&sc->ioctl_lock);
834    mtx_destroy(&sc->mpt_cmd_pool_lock);
835    mtx_destroy(&sc->mfi_cmd_pool_lock);
836    mtx_destroy(&sc->raidmap_lock);
837attach_fail:
838    destroy_dev(sc->mrsas_cdev);
839    if (sc->reg_res){
840        bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
841                             sc->reg_res_id, sc->reg_res);
842    }
843    return (ENXIO);
844}
845
846/**
847 * mrsas_detach:            De-allocates and teardown resources
848 * input:                   device struct pointer
849 *
850 * This function is the entry point for device disconnect and detach.  It
851 * performs memory de-allocations, shutdown of the controller and various
852 * teardown and destroy resource functions.
853 */
854static int mrsas_detach(device_t dev)
855{
856    struct mrsas_softc *sc;
857    int i = 0;
858
859    sc = device_get_softc(dev);
860    sc->remove_in_progress = 1;
861    if(sc->ocr_thread_active)
862        wakeup(&sc->ocr_chan);
863    while(sc->reset_in_progress){
864        i++;
865        if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
866            mrsas_dprint(sc, MRSAS_INFO,
867                "[%2d]waiting for ocr to be finished\n",i);
868        }
869        pause("mr_shutdown", hz);
870    }
871    i = 0;
872    while(sc->ocr_thread_active){
873        i++;
874        if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
875            mrsas_dprint(sc, MRSAS_INFO,
876                "[%2d]waiting for "
877                "mrsas_ocr thread to quit ocr %d\n",i,
878                 sc->ocr_thread_active);
879        }
880        pause("mr_shutdown", hz);
881    }
882    mrsas_flush_cache(sc);
883    mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
884    mrsas_disable_intr(sc);
885    mrsas_cam_detach(sc);
886    mrsas_teardown_intr(sc);
887    mrsas_free_mem(sc);
888    mtx_destroy(&sc->sim_lock);
889    mtx_destroy(&sc->aen_lock);
890    mtx_destroy(&sc->pci_lock);
891    mtx_destroy(&sc->io_lock);
892    mtx_destroy(&sc->ioctl_lock);
893    mtx_destroy(&sc->mpt_cmd_pool_lock);
894    mtx_destroy(&sc->mfi_cmd_pool_lock);
895    mtx_destroy(&sc->raidmap_lock);
896    if (sc->reg_res){
897        bus_release_resource(sc->mrsas_dev,
898                 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
899    }
900    destroy_dev(sc->mrsas_cdev);
901    if (sc->sysctl_tree != NULL)
902        sysctl_ctx_free(&sc->sysctl_ctx);
903    return (0);
904}
905
906/**
907 * mrsas_free_mem:          Frees allocated memory
908 * input:                   Adapter instance soft state
909 *
910 * This function is called from mrsas_detach() to free previously allocated
911 * memory.
912 */
913void mrsas_free_mem(struct mrsas_softc *sc)
914{
915    int i;
916    u_int32_t max_cmd;
917    struct mrsas_mfi_cmd *mfi_cmd;
918    struct mrsas_mpt_cmd *mpt_cmd;
919
920	/*
921     * Free RAID map memory
922     */
923    for (i=0; i < 2; i++)
924    {
925        if (sc->raidmap_phys_addr[i])
926            bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
927        if (sc->raidmap_mem[i] != NULL)
928            bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
929        if (sc->raidmap_tag[i] != NULL)
930            bus_dma_tag_destroy(sc->raidmap_tag[i]);
931    }
932
933    /*
934     * Free version buffer memroy
935     */
936    if (sc->verbuf_phys_addr)
937        bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
938    if (sc->verbuf_mem != NULL)
939        bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
940    if (sc->verbuf_tag != NULL)
941        bus_dma_tag_destroy(sc->verbuf_tag);
942
943
944    /*
945     * Free sense buffer memory
946     */
947    if (sc->sense_phys_addr)
948        bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
949    if (sc->sense_mem != NULL)
950        bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
951    if (sc->sense_tag != NULL)
952        bus_dma_tag_destroy(sc->sense_tag);
953
954    /*
955     * Free chain frame memory
956     */
957    if (sc->chain_frame_phys_addr)
958        bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
959    if (sc->chain_frame_mem != NULL)
960        bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
961    if (sc->chain_frame_tag != NULL)
962        bus_dma_tag_destroy(sc->chain_frame_tag);
963
964    /*
965     * Free IO Request memory
966     */
967    if (sc->io_request_phys_addr)
968        bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
969    if (sc->io_request_mem != NULL)
970        bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
971    if (sc->io_request_tag != NULL)
972        bus_dma_tag_destroy(sc->io_request_tag);
973
974    /*
975     * Free Reply Descriptor memory
976     */
977    if (sc->reply_desc_phys_addr)
978        bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
979    if (sc->reply_desc_mem != NULL)
980        bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
981    if (sc->reply_desc_tag != NULL)
982        bus_dma_tag_destroy(sc->reply_desc_tag);
983
984    /*
985     * Free event detail memory
986     */
987    if (sc->evt_detail_phys_addr)
988        bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
989    if (sc->evt_detail_mem != NULL)
990        bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
991    if (sc->evt_detail_tag != NULL)
992        bus_dma_tag_destroy(sc->evt_detail_tag);
993
994    /*
995     * Free MFI frames
996     */
997	if (sc->mfi_cmd_list) {
998    	for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
999        	mfi_cmd = sc->mfi_cmd_list[i];
1000        	mrsas_free_frame(sc, mfi_cmd);
1001		}
1002    }
1003    if (sc->mficmd_frame_tag != NULL)
1004        bus_dma_tag_destroy(sc->mficmd_frame_tag);
1005
1006    /*
1007     * Free MPT internal command list
1008     */
1009    max_cmd = sc->max_fw_cmds;
1010	if (sc->mpt_cmd_list) {
1011    	for (i = 0; i < max_cmd; i++) {
1012        	mpt_cmd = sc->mpt_cmd_list[i];
1013        	bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1014        	free(sc->mpt_cmd_list[i], M_MRSAS);
1015    	}
1016    	free(sc->mpt_cmd_list, M_MRSAS);
1017    	sc->mpt_cmd_list = NULL;
1018	}
1019
1020    /*
1021     * Free MFI internal command list
1022     */
1023
1024	if (sc->mfi_cmd_list) {
1025    	for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1026        	free(sc->mfi_cmd_list[i], M_MRSAS);
1027    	}
1028    	free(sc->mfi_cmd_list, M_MRSAS);
1029    	sc->mfi_cmd_list = NULL;
1030	}
1031
1032    /*
1033     * Free request descriptor memory
1034     */
1035    free(sc->req_desc, M_MRSAS);
1036    sc->req_desc = NULL;
1037
1038    /*
1039     * Destroy parent tag
1040     */
1041    if (sc->mrsas_parent_tag != NULL)
1042        bus_dma_tag_destroy(sc->mrsas_parent_tag);
1043}
1044
1045/**
1046 * mrsas_teardown_intr:        Teardown interrupt
1047 * input:                      Adapter instance soft state
1048 *
1049 * This function is called from mrsas_detach() to teardown and release
1050 * bus interrupt resourse.
1051 */
1052void mrsas_teardown_intr(struct mrsas_softc *sc)
1053{
1054    if (sc->intr_handle)
1055        bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle);
1056    if (sc->mrsas_irq != NULL)
1057        bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq);
1058    sc->intr_handle = NULL;
1059}
1060
1061/**
1062 * mrsas_suspend:          Suspend entry point
1063 * input:                  Device struct pointer
1064 *
1065 * This function is the entry point for system suspend from the OS.
1066 */
1067static int mrsas_suspend(device_t dev)
1068{
1069    struct mrsas_softc *sc;
1070
1071    sc = device_get_softc(dev);
1072    return (0);
1073}
1074
1075/**
1076 * mrsas_resume:           Resume entry point
1077 * input:                  Device struct pointer
1078 *
1079 * This function is the entry point for system resume from the OS.
1080 */
1081static int mrsas_resume(device_t dev)
1082{
1083    struct mrsas_softc *sc;
1084
1085    sc = device_get_softc(dev);
1086    return (0);
1087}
1088
1089/**
1090 * mrsas_ioctl:       IOCtl commands entry point.
1091 *
1092 * This function is the entry point for IOCtls from the OS.  It calls the
1093 * appropriate function for processing depending on the command received.
1094 */
1095static int
1096mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td)
1097{
1098    struct mrsas_softc *sc;
1099    int ret = 0, i = 0;
1100
1101    sc = (struct mrsas_softc *)(dev->si_drv1);
1102
1103    if (sc->remove_in_progress) {
1104        mrsas_dprint(sc, MRSAS_INFO,
1105            "Driver remove or shutdown called.\n");
1106        return ENOENT;
1107    }
1108
1109    mtx_lock_spin(&sc->ioctl_lock);
1110    if (!sc->reset_in_progress) {
1111        mtx_unlock_spin(&sc->ioctl_lock);
1112        goto do_ioctl;
1113    }
1114
1115    /* Release ioclt_lock, and wait for OCR
1116     * to be finished */
1117    mtx_unlock_spin(&sc->ioctl_lock);
1118    while(sc->reset_in_progress){
1119        i++;
1120        if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1121            mrsas_dprint(sc, MRSAS_INFO,
1122                "[%2d]waiting for "
1123                "OCR to be finished %d\n",i,
1124                 sc->ocr_thread_active);
1125        }
1126        pause("mr_ioctl", hz);
1127    }
1128
1129do_ioctl:
1130    switch (cmd) {
1131        case MRSAS_IOC_FIRMWARE_PASS_THROUGH:
1132            ret = mrsas_passthru(sc, (void *)arg);
1133            break;
1134        case MRSAS_IOC_SCAN_BUS:
1135            ret = mrsas_bus_scan(sc);
1136            break;
1137    }
1138
1139    return (ret);
1140}
1141
1142/**
1143 * mrsas_setup_irq:   Set up interrupt.
1144 * input:             Adapter instance soft state
1145 *
1146 * This function sets up interrupts as a bus resource, with flags indicating
1147 * resource permitting contemporaneous sharing and for resource to activate
1148 * atomically.
1149 */
1150static int mrsas_setup_irq(struct mrsas_softc *sc)
1151{
1152    sc->irq_id = 0;
1153    sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ,
1154                        &sc->irq_id, RF_SHAREABLE | RF_ACTIVE);
1155    if (sc->mrsas_irq == NULL){
1156        device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n");
1157        return (FAIL);
1158    }
1159    if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE|INTR_TYPE_CAM,
1160                       NULL, mrsas_isr, sc, &sc->intr_handle)) {
1161        device_printf(sc->mrsas_dev, "Cannot set up interrupt\n");
1162        return (FAIL);
1163    }
1164
1165    return (0);
1166}
1167
1168/*
1169 * mrsas_isr:        ISR entry point
1170 * input:            argument pointer
1171 *
1172 * This function is the interrupt service routine entry point.  There
1173 * are two types of interrupts, state change interrupt and response
1174 * interrupt.  If an interrupt is not ours, we just return.
1175 */
1176void mrsas_isr(void *arg)
1177{
1178    struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1179    int status;
1180
1181    /* Clear FW state change interrupt */
1182    status = mrsas_clear_intr(sc);
1183
1184    /* Not our interrupt */
1185    if (!status)
1186        return;
1187
1188    /* If we are resetting, bail */
1189    if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1190        printf(" Entered into ISR when OCR is going active. \n");
1191        mrsas_clear_intr(sc);
1192        return;
1193    }
1194    /* Process for reply request and clear response interrupt */
1195    if (mrsas_complete_cmd(sc) != SUCCESS)
1196        mrsas_clear_intr(sc);
1197
1198    return;
1199}
1200
1201/*
1202 * mrsas_complete_cmd:        Process reply request
1203 * input:                     Adapter instance soft state
1204 *
1205 * This function is called from mrsas_isr() to process reply request and
1206 * clear response interrupt. Processing of the reply request entails
1207 * walking through the reply descriptor array for the command request
1208 * pended from Firmware.  We look at the Function field to determine
1209 * the command type and perform the appropriate action.  Before we
1210 * return, we clear the response interrupt.
1211 */
1212static int mrsas_complete_cmd(struct mrsas_softc *sc)
1213{
1214    Mpi2ReplyDescriptorsUnion_t *desc;
1215    MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1216    MRSAS_RAID_SCSI_IO_REQUEST  *scsi_io_req;
1217    struct mrsas_mpt_cmd *cmd_mpt;
1218    struct mrsas_mfi_cmd *cmd_mfi;
1219    u_int8_t arm, reply_descript_type;
1220    u_int16_t smid, num_completed;
1221    u_int8_t status, extStatus;
1222    union desc_value desc_val;
1223    PLD_LOAD_BALANCE_INFO lbinfo;
1224    u_int32_t device_id;
1225    int threshold_reply_count = 0;
1226
1227
1228    /* If we have a hardware error, not need to continue */
1229    if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1230        return (DONE);
1231
1232    desc = sc->reply_desc_mem;
1233    desc += sc->last_reply_idx;
1234
1235    reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1236
1237    desc_val.word = desc->Words;
1238    num_completed = 0;
1239
1240    reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1241
1242    /* Find our reply descriptor for the command and process */
1243    while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
1244    {
1245        smid = reply_desc->SMID;
1246        cmd_mpt = sc->mpt_cmd_list[smid -1];
1247        scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
1248
1249        status = scsi_io_req->RaidContext.status;
1250        extStatus = scsi_io_req->RaidContext.exStatus;
1251
1252        switch (scsi_io_req->Function)
1253        {
1254            case MPI2_FUNCTION_SCSI_IO_REQUEST :  /*Fast Path IO.*/
1255                device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1256                lbinfo = &sc->load_balance_info[device_id];
1257                if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1258                    arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
1259                    atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
1260                    cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1261                }
1262                //Fall thru and complete IO
1263            case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1264                mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1265                mrsas_cmd_done(sc, cmd_mpt);
1266                scsi_io_req->RaidContext.status = 0;
1267                scsi_io_req->RaidContext.exStatus = 0;
1268                atomic_dec(&sc->fw_outstanding);
1269                break;
1270            case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
1271                cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1272                mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1273                cmd_mpt->flags = 0;
1274                mrsas_release_mpt_cmd(cmd_mpt);
1275                break;
1276        }
1277
1278        sc->last_reply_idx++;
1279        if (sc->last_reply_idx >= sc->reply_q_depth)
1280            sc->last_reply_idx = 0;
1281
1282        desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
1283        num_completed++;
1284        threshold_reply_count++;
1285
1286        /* Get the next reply descriptor */
1287        if (!sc->last_reply_idx)
1288            desc = sc->reply_desc_mem;
1289        else
1290            desc++;
1291
1292        reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1293        desc_val.word = desc->Words;
1294
1295        reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1296
1297        if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1298            break;
1299
1300        /*
1301         * Write to reply post index after completing threshold reply count
1302         * and still there are more replies in reply queue pending to be
1303         * completed.
1304         */
1305        if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1306            mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),
1307                            sc->last_reply_idx);
1308            threshold_reply_count = 0;
1309        }
1310    }
1311
1312    /* No match, just return */
1313    if (num_completed == 0)
1314        return (DONE);
1315
1316    /* Clear response interrupt */
1317    mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx);
1318
1319    return(0);
1320}
1321
1322/*
1323 * mrsas_map_mpt_cmd_status:  Allocate DMAable memory.
1324 * input:                     Adapter instance soft state
1325 *
1326 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1327 * It checks the command status and maps the appropriate CAM status for the CCB.
1328 */
1329void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1330{
1331    struct mrsas_softc *sc = cmd->sc;
1332    u_int8_t *sense_data;
1333
1334    switch (status) {
1335        case MFI_STAT_OK:
1336            cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1337            break;
1338        case MFI_STAT_SCSI_IO_FAILED:
1339        case MFI_STAT_SCSI_DONE_WITH_ERROR:
1340            cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1341            sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1342            if (sense_data) {
1343                /* For now just copy 18 bytes back */
1344                memcpy(sense_data, cmd->sense, 18);
1345                cmd->ccb_ptr->csio.sense_len = 18;
1346                cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1347            }
1348            break;
1349        case MFI_STAT_LD_OFFLINE:
1350        case MFI_STAT_DEVICE_NOT_FOUND:
1351            if (cmd->ccb_ptr->ccb_h.target_lun)
1352                cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1353            else
1354                cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1355            break;
1356        case MFI_STAT_CONFIG_SEQ_MISMATCH:
1357            /*send status to CAM layer to retry sending  command without
1358             * decrementing retry counter*/
1359            cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1360            break;
1361        default:
1362            device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1363            cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1364            cmd->ccb_ptr->csio.scsi_status = status;
1365    }
1366    return;
1367}
1368
1369/*
1370 * mrsas_alloc_mem:  Allocate DMAable memory.
1371 * input:            Adapter instance soft state
1372 *
1373 * This function creates the parent DMA tag and allocates DMAable memory.
1374 * DMA tag describes constraints of DMA mapping. Memory allocated is mapped
1375 * into Kernel virtual address. Callback argument is physical memory address.
1376 */
1377static int mrsas_alloc_mem(struct mrsas_softc *sc)
1378{
1379    u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1380              chain_frame_size, evt_detail_size;
1381
1382    /*
1383     * Allocate parent DMA tag
1384     */
1385    if (bus_dma_tag_create(NULL,                   /* parent */
1386                           1,         /* alignment */
1387                           0,                      /* boundary */
1388                           BUS_SPACE_MAXADDR,     /* lowaddr */
1389                           BUS_SPACE_MAXADDR,      /* highaddr */
1390                           NULL, NULL,             /* filter, filterarg */
1391                           MRSAS_MAX_IO_SIZE,/* maxsize */
1392                           MRSAS_MAX_SGL, /* nsegments */
1393                           MRSAS_MAX_IO_SIZE,/* maxsegsize */
1394                           0,                      /* flags */
1395                           NULL, NULL,             /* lockfunc, lockarg */
1396                           &sc->mrsas_parent_tag   /* tag */
1397                           )) {
1398           device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1399           return(ENOMEM);
1400    }
1401
1402    /*
1403     * Allocate for version buffer
1404     */
1405    verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t));
1406    if (bus_dma_tag_create(sc->mrsas_parent_tag,   // parent
1407                           1, 0,                   // algnmnt, boundary
1408                           BUS_SPACE_MAXADDR_32BIT,// lowaddr
1409                           BUS_SPACE_MAXADDR,      // highaddr
1410                           NULL, NULL,             // filter, filterarg
1411                           verbuf_size,           // maxsize
1412                           1,                      // msegments
1413                           verbuf_size,           // maxsegsize
1414                           BUS_DMA_ALLOCNOW,       // flags
1415                           NULL, NULL,             // lockfunc, lockarg
1416                           &sc->verbuf_tag)) {
1417            device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1418            return (ENOMEM);
1419    }
1420    if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1421        BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1422            device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1423            return (ENOMEM);
1424    }
1425    bzero(sc->verbuf_mem, verbuf_size);
1426    if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1427        verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){
1428            device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1429            return(ENOMEM);
1430    }
1431
1432    /*
1433     * Allocate IO Request Frames
1434     */
1435    io_req_size = sc->io_frames_alloc_sz;
1436    if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
1437                            16, 0,                   // algnmnt, boundary
1438                            BUS_SPACE_MAXADDR_32BIT,// lowaddr
1439                            BUS_SPACE_MAXADDR,      // highaddr
1440                            NULL, NULL,             // filter, filterarg
1441                            io_req_size,            // maxsize
1442                            1,                      // msegments
1443                            io_req_size,            // maxsegsize
1444                            BUS_DMA_ALLOCNOW,       // flags
1445                            NULL, NULL,             // lockfunc, lockarg
1446                            &sc->io_request_tag)) {
1447        device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1448        return (ENOMEM);
1449    }
1450    if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1451                    BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1452        device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1453        return (ENOMEM);
1454    }
1455    bzero(sc->io_request_mem, io_req_size);
1456    if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1457                        sc->io_request_mem, io_req_size, mrsas_addr_cb,
1458                        &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1459        device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1460        return (ENOMEM);
1461    }
1462
1463    /*
1464     * Allocate Chain Frames
1465     */
1466    chain_frame_size = sc->chain_frames_alloc_sz;
1467    if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
1468                            4, 0,                   // algnmnt, boundary
1469                            BUS_SPACE_MAXADDR_32BIT,// lowaddr
1470                            BUS_SPACE_MAXADDR,      // highaddr
1471                            NULL, NULL,             // filter, filterarg
1472                            chain_frame_size,       // maxsize
1473                            1,                      // msegments
1474                            chain_frame_size,       // maxsegsize
1475                            BUS_DMA_ALLOCNOW,       // flags
1476                            NULL, NULL,             // lockfunc, lockarg
1477                            &sc->chain_frame_tag)) {
1478        device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1479        return (ENOMEM);
1480    }
1481    if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1482                    BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1483        device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1484        return (ENOMEM);
1485    }
1486    bzero(sc->chain_frame_mem, chain_frame_size);
1487    if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1488                        sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1489                        &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1490        device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1491        return (ENOMEM);
1492    }
1493
1494    /*
1495     * Allocate Reply Descriptor Array
1496     */
1497    reply_desc_size = sc->reply_alloc_sz;
1498    if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
1499                            16, 0,                   // algnmnt, boundary
1500                            BUS_SPACE_MAXADDR_32BIT,// lowaddr
1501                            BUS_SPACE_MAXADDR,      // highaddr
1502                            NULL, NULL,             // filter, filterarg
1503                            reply_desc_size,        // maxsize
1504                            1,                      // msegments
1505                            reply_desc_size,        // maxsegsize
1506                            BUS_DMA_ALLOCNOW,       // flags
1507                            NULL, NULL,             // lockfunc, lockarg
1508                            &sc->reply_desc_tag)) {
1509        device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1510        return (ENOMEM);
1511    }
1512    if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1513                    BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1514        device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1515        return (ENOMEM);
1516    }
1517    if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1518                        sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1519                        &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1520        device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1521        return (ENOMEM);
1522    }
1523
1524    /*
1525     * Allocate Sense Buffer Array.  Keep in lower 4GB
1526     */
1527    sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1528    if (bus_dma_tag_create(sc->mrsas_parent_tag,    // parent
1529                            64, 0,                   // algnmnt, boundary
1530                            BUS_SPACE_MAXADDR_32BIT,// lowaddr
1531                            BUS_SPACE_MAXADDR,      // highaddr
1532                            NULL, NULL,             // filter, filterarg
1533                            sense_size,             // maxsize
1534                            1,                      // nsegments
1535                            sense_size,             // maxsegsize
1536                            BUS_DMA_ALLOCNOW,       // flags
1537                            NULL, NULL,             // lockfunc, lockarg
1538                            &sc->sense_tag)) {
1539        device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1540        return (ENOMEM);
1541    }
1542    if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1543            BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1544        device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1545        return (ENOMEM);
1546    }
1547    if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1548            sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1549            BUS_DMA_NOWAIT)){
1550        device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1551        return (ENOMEM);
1552    }
1553
1554    /*
1555     * Allocate for Event detail structure
1556     */
1557    evt_detail_size = sizeof(struct mrsas_evt_detail);
1558    if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
1559                            1, 0,                   // algnmnt, boundary
1560                            BUS_SPACE_MAXADDR_32BIT,// lowaddr
1561                            BUS_SPACE_MAXADDR,      // highaddr
1562                            NULL, NULL,             // filter, filterarg
1563                            evt_detail_size,        // maxsize
1564                            1,                      // msegments
1565                            evt_detail_size,        // maxsegsize
1566                            BUS_DMA_ALLOCNOW,       // flags
1567                            NULL, NULL,             // lockfunc, lockarg
1568                            &sc->evt_detail_tag)) {
1569        device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1570        return (ENOMEM);
1571    }
1572    if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1573                    BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1574        device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1575        return (ENOMEM);
1576    }
1577    bzero(sc->evt_detail_mem, evt_detail_size);
1578    if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1579                        sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1580                        &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1581        device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1582        return (ENOMEM);
1583    }
1584
1585
1586   /*
1587    * Create a dma tag for data buffers; size will be the maximum
1588    * possible I/O size (280kB).
1589    */
1590    if (bus_dma_tag_create(sc->mrsas_parent_tag,   // parent
1591                           1,         // alignment
1592                           0,                      // boundary
1593                           BUS_SPACE_MAXADDR,      // lowaddr
1594                           BUS_SPACE_MAXADDR,      // highaddr
1595                           NULL, NULL,             // filter, filterarg
1596                           MRSAS_MAX_IO_SIZE,      // maxsize
1597                           MRSAS_MAX_SGL,          // nsegments
1598                           MRSAS_MAX_IO_SIZE,      // maxsegsize
1599                           BUS_DMA_ALLOCNOW,       // flags
1600                           busdma_lock_mutex,      // lockfunc
1601                           &sc->io_lock,           // lockfuncarg
1602                           &sc->data_tag)) {
1603        device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1604        return(ENOMEM);
1605    }
1606
1607    return(0);
1608}
1609
1610/*
1611 * mrsas_addr_cb:   Callback function of bus_dmamap_load()
1612 * input:           callback argument,
1613 *                  machine dependent type that describes DMA segments,
1614 *                  number of segments,
1615 *                  error code.
1616 *
1617 * This function is for the driver to receive mapping information resultant
1618 * of the bus_dmamap_load(). The information is actually not being used,
1619 * but the address is saved anyway.
1620 */
1621void
1622mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1623{
1624    bus_addr_t *addr;
1625
1626    addr = arg;
1627    *addr = segs[0].ds_addr;
1628}
1629
1630/*
1631 * mrsas_setup_raidmap:  Set up RAID map.
1632 * input:                Adapter instance soft state
1633 *
1634 * Allocate DMA memory for the RAID maps and perform setup.
1635 */
1636static int mrsas_setup_raidmap(struct mrsas_softc *sc)
1637{
1638    sc->map_sz = sizeof(MR_FW_RAID_MAP) +
1639                (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
1640
1641    for (int i=0; i < 2; i++)
1642    {
1643        if (bus_dma_tag_create(sc->mrsas_parent_tag,    // parent
1644                            4, 0,                   // algnmnt, boundary
1645                            BUS_SPACE_MAXADDR_32BIT,// lowaddr
1646                            BUS_SPACE_MAXADDR,      // highaddr
1647                            NULL, NULL,             // filter, filterarg
1648                            sc->map_sz,             // maxsize
1649                            1,                      // nsegments
1650                            sc->map_sz,             // maxsegsize
1651                            BUS_DMA_ALLOCNOW,       // flags
1652                            NULL, NULL,             // lockfunc, lockarg
1653                            &sc->raidmap_tag[i])) {
1654            device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n");
1655            return (ENOMEM);
1656        }
1657        if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i],
1658                BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1659            device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n");
1660            return (ENOMEM);
1661        }
1662        if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1663                sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1664                BUS_DMA_NOWAIT)){
1665            device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1666            return (ENOMEM);
1667        }
1668        if (!sc->raidmap_mem[i]) {
1669            device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n");
1670            return (ENOMEM);
1671        }
1672    }
1673
1674    if (!mrsas_get_map_info(sc))
1675        mrsas_sync_map_info(sc);
1676
1677    return (0);
1678}
1679
1680/**
1681 * mrsas_init_fw:      Initialize Firmware
1682 * input:              Adapter soft state
1683 *
1684 * Calls transition_to_ready() to make sure Firmware is in operational
1685 * state and calls mrsas_init_adapter() to send IOC_INIT command to
1686 * Firmware.  It issues internal commands to get the controller info
1687 * after the IOC_INIT command response is received by Firmware.
1688 * Note:  code relating to get_pdlist, get_ld_list and max_sectors
1689 * are currently not being used, it is left here as placeholder.
1690 */
1691static int mrsas_init_fw(struct mrsas_softc *sc)
1692{
1693    u_int32_t max_sectors_1;
1694    u_int32_t max_sectors_2;
1695    u_int32_t tmp_sectors;
1696    struct mrsas_ctrl_info *ctrl_info;
1697
1698    int ret, ocr = 0;
1699
1700
1701    /* Make sure Firmware is ready */
1702    ret = mrsas_transition_to_ready(sc, ocr);
1703    if (ret != SUCCESS) {
1704        return(ret);
1705	}
1706
1707    /* Get operational params, sge flags, send init cmd to ctlr */
1708    if (mrsas_init_adapter(sc) != SUCCESS){
1709        device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
1710        return(1);
1711    }
1712
1713    /* Allocate internal commands for pass-thru */
1714    if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
1715        device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
1716        return(1);
1717    }
1718
1719    if (mrsas_setup_raidmap(sc) != SUCCESS) {
1720        device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
1721        return(1);
1722	}
1723
1724    /* For pass-thru, get PD/LD list and controller info */
1725    memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
1726    mrsas_get_pd_list(sc);
1727
1728    memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
1729    mrsas_get_ld_list(sc);
1730
1731	//memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO));
1732
1733    ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
1734
1735    /*
1736     * Compute the max allowed sectors per IO: The controller info has two
1737     * limits on max sectors. Driver should use the minimum of these two.
1738     *
1739     * 1 << stripe_sz_ops.min = max sectors per strip
1740     *
1741     * Note that older firmwares ( < FW ver 30) didn't report information
1742     * to calculate max_sectors_1. So the number ended up as zero always.
1743     */
1744    tmp_sectors = 0;
1745    if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) {
1746        max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1747                    ctrl_info->max_strips_per_io;
1748        max_sectors_2 = ctrl_info->max_request_size;
1749        tmp_sectors = min(max_sectors_1 , max_sectors_2);
1750        sc->disableOnlineCtrlReset =
1751            ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
1752        sc->UnevenSpanSupport =
1753            ctrl_info->adapterOperations2.supportUnevenSpans;
1754        if(sc->UnevenSpanSupport) {
1755            device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n",
1756                sc->UnevenSpanSupport);
1757            if (MR_ValidateMapInfo(sc))
1758           	    sc->fast_path_io = 1;
1759            else
1760                sc->fast_path_io = 0;
1761
1762        }
1763    }
1764    sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
1765
1766    if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
1767        sc->max_sectors_per_req = tmp_sectors;
1768
1769    if (ctrl_info)
1770        free(ctrl_info, M_MRSAS);
1771
1772    return(0);
1773}
1774
1775/**
1776 * mrsas_init_adapter:     Initializes the adapter/controller
1777 * input:                  Adapter soft state
1778 *
1779 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
1780 * ROC/controller.  The FW register is read to determined the number of
1781 * commands that is supported.  All memory allocations for IO is based on
1782 * max_cmd.  Appropriate calculations are performed in this function.
1783 */
1784int mrsas_init_adapter(struct mrsas_softc *sc)
1785{
1786    uint32_t status;
1787    u_int32_t max_cmd;
1788    int ret;
1789
1790    /* Read FW status register */
1791    status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
1792
1793    /* Get operational params from status register */
1794    sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
1795
1796    /* Decrement the max supported by 1, to correlate with FW */
1797    sc->max_fw_cmds = sc->max_fw_cmds-1;
1798    max_cmd = sc->max_fw_cmds;
1799
1800    /* Determine allocation size of command frames */
1801    sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16);
1802    sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
1803    sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
1804    sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
1805    sc->chain_frames_alloc_sz = 1024 * max_cmd;
1806    sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1807        offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
1808
1809    sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
1810    sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
1811
1812    /* Used for pass thru MFI frame (DCMD) */
1813    sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
1814
1815    sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1816        sizeof(MPI2_SGE_IO_UNION))/16;
1817
1818    sc->last_reply_idx = 0;
1819
1820    ret = mrsas_alloc_mem(sc);
1821    if (ret != SUCCESS)
1822        return(ret);
1823
1824    ret = mrsas_alloc_mpt_cmds(sc);
1825    if (ret != SUCCESS)
1826        return(ret);
1827
1828    ret = mrsas_ioc_init(sc);
1829    if (ret != SUCCESS)
1830        return(ret);
1831
1832
1833    return(0);
1834}
1835
1836/**
1837 * mrsas_alloc_ioc_cmd:   Allocates memory for IOC Init command
1838 * input:                 Adapter soft state
1839 *
1840 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
1841 */
1842int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
1843{
1844    int ioc_init_size;
1845
1846    /* Allocate IOC INIT command */
1847    ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
1848    if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
1849                            1, 0,                   // algnmnt, boundary
1850                            BUS_SPACE_MAXADDR_32BIT,// lowaddr
1851                            BUS_SPACE_MAXADDR,      // highaddr
1852                            NULL, NULL,             // filter, filterarg
1853                            ioc_init_size,          // maxsize
1854                            1,                      // msegments
1855                            ioc_init_size,          // maxsegsize
1856                            BUS_DMA_ALLOCNOW,       // flags
1857                            NULL, NULL,             // lockfunc, lockarg
1858                            &sc->ioc_init_tag)) {
1859        device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
1860        return (ENOMEM);
1861    }
1862    if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
1863            BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
1864        device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
1865        return (ENOMEM);
1866    }
1867    bzero(sc->ioc_init_mem, ioc_init_size);
1868    if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
1869            sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
1870            &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
1871        device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
1872        return (ENOMEM);
1873    }
1874
1875    return (0);
1876}
1877
1878/**
1879 * mrsas_free_ioc_cmd:   Allocates memory for IOC Init command
1880 * input:                Adapter soft state
1881 *
1882 * Deallocates memory of the IOC Init cmd.
1883 */
1884void mrsas_free_ioc_cmd(struct mrsas_softc *sc)
1885{
1886    if (sc->ioc_init_phys_mem)
1887        bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
1888    if (sc->ioc_init_mem != NULL)
1889        bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
1890    if (sc->ioc_init_tag != NULL)
1891        bus_dma_tag_destroy(sc->ioc_init_tag);
1892}
1893
1894/**
1895 * mrsas_ioc_init:     Sends IOC Init command to FW
1896 * input:              Adapter soft state
1897 *
1898 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
1899 */
1900int mrsas_ioc_init(struct mrsas_softc *sc)
1901{
1902    struct mrsas_init_frame *init_frame;
1903    pMpi2IOCInitRequest_t   IOCInitMsg;
1904    MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
1905    u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
1906    bus_addr_t phys_addr;
1907    int i, retcode = 0;
1908
1909    /* Allocate memory for the IOC INIT command */
1910    if (mrsas_alloc_ioc_cmd(sc)) {
1911        device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
1912        return(1);
1913    }
1914
1915    IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
1916    IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
1917    IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1918    IOCInitMsg->MsgVersion = MPI2_VERSION;
1919    IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
1920    IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
1921    IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
1922    IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
1923    IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
1924
1925    init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
1926    init_frame->cmd = MFI_CMD_INIT;
1927    init_frame->cmd_status = 0xFF;
1928    init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1929
1930    if (sc->verbuf_mem) {
1931        snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
1932                MRSAS_VERSION);
1933        init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
1934        init_frame->driver_ver_hi = 0;
1935    }
1936
1937    phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
1938    init_frame->queue_info_new_phys_addr_lo = phys_addr;
1939    init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
1940
1941    req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
1942    req_desc.MFAIo.RequestFlags =
1943        (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1944
1945    mrsas_disable_intr(sc);
1946    mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
1947    //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del?
1948    mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
1949
1950    /*
1951     * Poll response timer to wait for Firmware response.  While this
1952     * timer with the DELAY call could block CPU, the time interval for
1953     * this is only 1 millisecond.
1954     */
1955    if (init_frame->cmd_status == 0xFF) {
1956        for (i=0; i < (max_wait * 1000); i++){
1957            if (init_frame->cmd_status == 0xFF)
1958                DELAY(1000);
1959            else
1960                break;
1961        }
1962    }
1963
1964    if (init_frame->cmd_status == 0)
1965         mrsas_dprint(sc, MRSAS_OCR,
1966               "IOC INIT response received from FW.\n");
1967         //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del?
1968    else
1969    {
1970        if (init_frame->cmd_status == 0xFF)
1971            device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
1972        else
1973            device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
1974        retcode = 1;
1975    }
1976
1977    mrsas_free_ioc_cmd(sc);
1978    return (retcode);
1979}
1980
1981/**
1982 * mrsas_alloc_mpt_cmds:  Allocates the command packets
1983 * input:                 Adapter instance soft state
1984 *
1985 * This function allocates the internal commands for IOs. Each command that is
1986 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
1987 * An array is allocated with mrsas_mpt_cmd context.  The free commands are
1988 * maintained in a linked list (cmd pool). SMID value range is from 1 to
1989 * max_fw_cmds.
1990 */
1991int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
1992{
1993    int i, j;
1994    u_int32_t max_cmd;
1995    struct mrsas_mpt_cmd *cmd;
1996    pMpi2ReplyDescriptorsUnion_t reply_desc;
1997    u_int32_t offset, chain_offset, sense_offset;
1998    bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
1999    u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2000
2001    max_cmd = sc->max_fw_cmds;
2002
2003    sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2004    if (!sc->req_desc) {
2005        device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2006        return(ENOMEM);
2007    }
2008    memset(sc->req_desc, 0, sc->request_alloc_sz);
2009
2010    /*
2011     * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
2012     * dynamic array first and then allocate individual commands.
2013     */
2014    sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
2015    if (!sc->mpt_cmd_list) {
2016        device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2017        return(ENOMEM);
2018    }
2019    memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
2020    for (i = 0; i < max_cmd; i++) {
2021        sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd),
2022                                 M_MRSAS, M_NOWAIT);
2023        if (!sc->mpt_cmd_list[i]) {
2024            for (j = 0; j < i; j++)
2025                free(sc->mpt_cmd_list[j],M_MRSAS);
2026            free(sc->mpt_cmd_list, M_MRSAS);
2027            sc->mpt_cmd_list = NULL;
2028            return(ENOMEM);
2029        }
2030    }
2031
2032    io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2033    io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2034    chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
2035    chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2036    sense_base = (u_int8_t*)sc->sense_mem;
2037    sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2038    for (i = 0; i < max_cmd; i++) {
2039        cmd = sc->mpt_cmd_list[i];
2040        offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2041	chain_offset = 1024 * i;
2042        sense_offset = MRSAS_SENSE_LEN * i;
2043        memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2044        cmd->index = i + 1;
2045        cmd->ccb_ptr = NULL;
2046        callout_init(&cmd->cm_callout, 0);
2047        cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2048        cmd->sc = sc;
2049        cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2050        memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2051        cmd->io_request_phys_addr = io_req_base_phys + offset;
2052	cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2053	cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2054        cmd->sense = sense_base + sense_offset;
2055        cmd->sense_phys_addr = sense_base_phys + sense_offset;
2056        if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2057            return(FAIL);
2058        }
2059        TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2060    }
2061
2062    /* Initialize reply descriptor array to 0xFFFFFFFF */
2063    reply_desc = sc->reply_desc_mem;
2064    for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2065        reply_desc->Words = MRSAS_ULONG_MAX;
2066    }
2067    return(0);
2068}
2069
2070/**
2071 * mrsas_fire_cmd:     Sends command to FW
2072 * input:              Adapter soft state
2073 *                     request descriptor address low
2074 *                     request descriptor address high
2075 *
2076 * This functions fires the command to Firmware by writing to the
2077 * inbound_low_queue_port and inbound_high_queue_port.
2078 */
2079void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2080                   u_int32_t req_desc_hi)
2081{
2082    mtx_lock(&sc->pci_lock);
2083    mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2084                    req_desc_lo);
2085    mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2086                    req_desc_hi);
2087    mtx_unlock(&sc->pci_lock);
2088}
2089
2090/**
2091 * mrsas_transition_to_ready:  Move FW to Ready state
2092 * input:                      Adapter instance soft state
2093 *
2094 * During the initialization, FW passes can potentially be in any one of
2095 * several possible states. If the FW in operational, waiting-for-handshake
2096 * states, driver must take steps to bring it to ready state. Otherwise, it
2097 * has to wait for the ready state.
2098 */
2099int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2100{
2101    int i;
2102    u_int8_t max_wait;
2103    u_int32_t val, fw_state;
2104    u_int32_t cur_state;
2105    u_int32_t abs_state, curr_abs_state;
2106
2107    val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2108    fw_state = val & MFI_STATE_MASK;
2109    max_wait = MRSAS_RESET_WAIT_TIME;
2110
2111    if (fw_state != MFI_STATE_READY)
2112        device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2113
2114    while (fw_state != MFI_STATE_READY) {
2115	abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2116	switch (fw_state) {
2117	    case MFI_STATE_FAULT:
2118		device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2119		if (ocr) {
2120		    cur_state = MFI_STATE_FAULT;
2121	  	    break;
2122		}
2123        else
2124			return -ENODEV;
2125	    case MFI_STATE_WAIT_HANDSHAKE:
2126		/* Set the CLR bit in inbound doorbell */
2127                mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2128		        MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG);
2129		cur_state = MFI_STATE_WAIT_HANDSHAKE;
2130		break;
2131	    case MFI_STATE_BOOT_MESSAGE_PENDING:
2132                mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2133		        MFI_INIT_HOTPLUG);
2134		cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2135		break;
2136	    case MFI_STATE_OPERATIONAL:
2137		/* Bring it to READY state; assuming max wait 10 secs */
2138		mrsas_disable_intr(sc);
2139                mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2140                for (i=0; i < max_wait * 1000; i++) {
2141	            if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2142                        DELAY(1000);
2143		    else
2144		        break;
2145                }
2146		cur_state = MFI_STATE_OPERATIONAL;
2147	        break;
2148	    case MFI_STATE_UNDEFINED:
2149	        /* This state should not last for more than 2 seconds */
2150	        cur_state = MFI_STATE_UNDEFINED;
2151	        break;
2152	    case MFI_STATE_BB_INIT:
2153		cur_state = MFI_STATE_BB_INIT;
2154		break;
2155	    case MFI_STATE_FW_INIT:
2156		cur_state = MFI_STATE_FW_INIT;
2157		break;
2158	    case MFI_STATE_FW_INIT_2:
2159		cur_state = MFI_STATE_FW_INIT_2;
2160		break;
2161	    case MFI_STATE_DEVICE_SCAN:
2162		cur_state = MFI_STATE_DEVICE_SCAN;
2163		break;
2164	    case MFI_STATE_FLUSH_CACHE:
2165		cur_state = MFI_STATE_FLUSH_CACHE;
2166		break;
2167	    default:
2168		device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2169		return -ENODEV;
2170	}
2171
2172	/*
2173	 * The cur_state should not last for more than max_wait secs
2174	 */
2175	for (i = 0; i < (max_wait * 1000); i++) {
2176            fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2177                    outbound_scratch_pad))& MFI_STATE_MASK);
2178	    curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2179                    outbound_scratch_pad));
2180            if (abs_state == curr_abs_state)
2181                DELAY(1000);
2182	    else
2183		break;
2184	}
2185
2186	/*
2187	 * Return error if fw_state hasn't changed after max_wait
2188	 */
2189	if (curr_abs_state == abs_state) {
2190            device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2191		       "in %d secs\n", fw_state, max_wait);
2192	    return -ENODEV;
2193	}
2194    }
2195    mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2196    //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del?
2197    return 0;
2198}
2199
2200/**
2201 * mrsas_get_mfi_cmd:      Get a cmd from free command pool
2202 * input:                  Adapter soft state
2203 *
2204 * This function removes an MFI command from the command list.
2205 */
2206struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2207{
2208    struct mrsas_mfi_cmd *cmd = NULL;
2209
2210    mtx_lock(&sc->mfi_cmd_pool_lock);
2211    if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
2212        cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2213        TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2214    }
2215    mtx_unlock(&sc->mfi_cmd_pool_lock);
2216
2217    return cmd;
2218}
2219
2220/**
2221 * mrsas_ocr_thread             Thread to handle OCR/Kill Adapter.
2222 * input:               Adapter Context.
2223 *
2224 * This function will check FW status register and flag
2225 * do_timeout_reset flag. It will do OCR/Kill adapter if
2226 * FW is in fault state or IO timed out has trigger reset.
2227 */
2228static void
2229mrsas_ocr_thread(void *arg)
2230{
2231    struct mrsas_softc *sc;
2232    u_int32_t  fw_status, fw_state;
2233
2234    sc = (struct mrsas_softc *)arg;
2235
2236    mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2237
2238    sc->ocr_thread_active = 1;
2239    mtx_lock(&sc->sim_lock);
2240    for (;;) {
2241        /* Sleep for 1 second and check the queue status*/
2242        msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
2243               "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2244        if (sc->remove_in_progress) {
2245            mrsas_dprint(sc, MRSAS_OCR,
2246				"Exit due to shutdown from %s\n", __func__);
2247            break;
2248        }
2249        fw_status = mrsas_read_reg(sc,
2250				offsetof(mrsas_reg_set, outbound_scratch_pad));
2251        fw_state = fw_status & MFI_STATE_MASK;
2252        if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2253            device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2254                 sc->do_timedout_reset?"IO Timeout":
2255                 "FW fault detected");
2256            mtx_lock_spin(&sc->ioctl_lock);
2257            sc->reset_in_progress = 1;
2258            sc->reset_count++;
2259            mtx_unlock_spin(&sc->ioctl_lock);
2260            mrsas_xpt_freeze(sc);
2261            mrsas_reset_ctrl(sc);
2262            mrsas_xpt_release(sc);
2263            sc->reset_in_progress = 0;
2264            sc->do_timedout_reset = 0;
2265        }
2266    }
2267    mtx_unlock(&sc->sim_lock);
2268    sc->ocr_thread_active = 0;
2269    mrsas_kproc_exit(0);
2270}
2271
2272/**
2273 * mrsas_reset_reply_desc       Reset Reply descriptor as part of OCR.
2274 * input:                       Adapter Context.
2275 *
2276 * This function will clear reply descriptor so that post OCR
2277 * driver and FW will lost old history.
2278 */
2279void  mrsas_reset_reply_desc(struct mrsas_softc *sc)
2280{
2281    int i;
2282    pMpi2ReplyDescriptorsUnion_t reply_desc;
2283
2284    sc->last_reply_idx = 0;
2285    reply_desc = sc->reply_desc_mem;
2286    for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2287        reply_desc->Words = MRSAS_ULONG_MAX;
2288    }
2289}
2290
2291/**
2292 * mrsas_reset_ctrl     Core function to OCR/Kill adapter.
2293 * input:               Adapter Context.
2294 *
2295 * This function will run from thread context so that it can sleep.
2296 * 1. Do not handle OCR if FW is in HW critical error.
2297 * 2. Wait for outstanding command to complete for 180 seconds.
2298 * 3. If #2 does not find any outstanding command Controller is in working
2299 * state, so skip OCR.
2300 * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset.
2301 * 4. Start of the OCR, return all SCSI command back to CAM layer which has
2302 * ccb_ptr.
2303 * 5. Post OCR, Re-fire Managment command and move Controller to Operation
2304 * state.
2305 */
2306int mrsas_reset_ctrl(struct mrsas_softc *sc)
2307{
2308    int retval = SUCCESS, i, j, retry = 0;
2309    u_int32_t       host_diag, abs_state, status_reg, reset_adapter;
2310    union ccb   *ccb;
2311    struct mrsas_mfi_cmd *mfi_cmd;
2312    struct mrsas_mpt_cmd *mpt_cmd;
2313    MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2314
2315    if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2316        device_printf(sc->mrsas_dev,
2317                        "mrsas: Hardware critical error, returning FAIL.\n");
2318        return FAIL;
2319    }
2320
2321    set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2322    sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2323    mrsas_disable_intr(sc);
2324    DELAY(1000 * 1000);
2325
2326    /* First try waiting for commands to complete */
2327    if (mrsas_wait_for_outstanding(sc)) {
2328        mrsas_dprint(sc, MRSAS_OCR,
2329                     "resetting adapter from %s.\n",
2330                      __func__);
2331        /* Now return commands back to the CAM layer */
2332        for (i = 0 ; i < sc->max_fw_cmds; i++) {
2333            mpt_cmd = sc->mpt_cmd_list[i];
2334            if (mpt_cmd->ccb_ptr) {
2335                ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2336                ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2337                mrsas_cmd_done(sc, mpt_cmd);
2338                atomic_dec(&sc->fw_outstanding);
2339            }
2340        }
2341
2342        status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2343                                                           outbound_scratch_pad));
2344        abs_state = status_reg & MFI_STATE_MASK;
2345        reset_adapter = status_reg & MFI_RESET_ADAPTER;
2346        if (sc->disableOnlineCtrlReset ||
2347                        (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2348            /* Reset not supported, kill adapter */
2349            mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n");
2350            mrsas_kill_hba(sc);
2351            sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2352            retval = FAIL;
2353             goto out;
2354        }
2355
2356        /* Now try to reset the chip */
2357        for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2358            mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2359                MPI2_WRSEQ_FLUSH_KEY_VALUE);
2360            mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2361                MPI2_WRSEQ_1ST_KEY_VALUE);
2362            mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2363                MPI2_WRSEQ_2ND_KEY_VALUE);
2364            mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2365                MPI2_WRSEQ_3RD_KEY_VALUE);
2366            mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2367                MPI2_WRSEQ_4TH_KEY_VALUE);
2368            mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2369                MPI2_WRSEQ_5TH_KEY_VALUE);
2370            mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2371                MPI2_WRSEQ_6TH_KEY_VALUE);
2372
2373            /* Check that the diag write enable (DRWE) bit is on */
2374            host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2375                                                        fusion_host_diag));
2376            retry = 0;
2377            while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2378                DELAY(100 * 1000);
2379                host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2380                                                        fusion_host_diag));
2381                if (retry++ == 100) {
2382                    mrsas_dprint(sc, MRSAS_OCR,
2383                    "Host diag unlock failed!\n");
2384                    break;
2385                }
2386            }
2387            if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2388                continue;
2389
2390            /* Send chip reset command */
2391            mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2392               host_diag | HOST_DIAG_RESET_ADAPTER);
2393            DELAY(3000 * 1000);
2394
2395            /* Make sure reset adapter bit is cleared */
2396            host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2397                                                        fusion_host_diag));
2398            retry = 0;
2399            while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2400                DELAY(100 * 1000);
2401                host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2402                                                        fusion_host_diag));
2403                if (retry++ == 1000) {
2404                    mrsas_dprint(sc, MRSAS_OCR,
2405                                        "Diag reset adapter never cleared!\n");
2406                    break;
2407                }
2408            }
2409            if (host_diag & HOST_DIAG_RESET_ADAPTER)
2410                continue;
2411
2412            abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2413                                    outbound_scratch_pad)) & MFI_STATE_MASK;
2414            retry = 0;
2415
2416            while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2417                DELAY(100 * 1000);
2418                abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2419                                     outbound_scratch_pad)) & MFI_STATE_MASK;
2420            }
2421            if (abs_state <= MFI_STATE_FW_INIT) {
2422                mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2423                                " state = 0x%x\n", abs_state);
2424                continue;
2425            }
2426
2427            /* Wait for FW to become ready */
2428            if (mrsas_transition_to_ready(sc, 1)) {
2429                mrsas_dprint(sc, MRSAS_OCR,
2430                           "mrsas: Failed to transition controller to ready.\n");
2431                continue;
2432            }
2433
2434            mrsas_reset_reply_desc(sc);
2435            if (mrsas_ioc_init(sc)) {
2436                mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2437                continue;
2438            }
2439
2440            clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2441            mrsas_enable_intr(sc);
2442            sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2443
2444            /* Re-fire management commands */
2445            for (j = 0 ; j < sc->max_fw_cmds; j++) {
2446                mpt_cmd = sc->mpt_cmd_list[j];
2447                if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2448                    mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2449                    if (mfi_cmd->frame->dcmd.opcode ==
2450                                          MR_DCMD_LD_MAP_GET_INFO) {
2451                        mrsas_release_mfi_cmd(mfi_cmd);
2452                        mrsas_release_mpt_cmd(mpt_cmd);
2453                    } else  {
2454                        req_desc = mrsas_get_request_desc(sc,
2455                            mfi_cmd->cmd_id.context.smid - 1);
2456                        mrsas_dprint(sc, MRSAS_OCR,
2457                            "Re-fire command DCMD opcode 0x%x index %d\n ",
2458                             mfi_cmd->frame->dcmd.opcode, j);
2459                        if (!req_desc)
2460                            device_printf(sc->mrsas_dev,
2461                                          "Cannot build MPT cmd.\n");
2462                        else
2463                            mrsas_fire_cmd(sc, req_desc->addr.u.low,
2464                                                     req_desc->addr.u.high);
2465                    }
2466                }
2467            }
2468
2469            /* Reset load balance info */
2470            memset(sc->load_balance_info, 0,
2471                   sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES);
2472
2473            if (!mrsas_get_map_info(sc))
2474                mrsas_sync_map_info(sc);
2475
2476            /* Adapter reset completed successfully */
2477            device_printf(sc->mrsas_dev, "Reset successful\n");
2478            retval = SUCCESS;
2479            goto out;
2480        }
2481        /* Reset failed, kill the adapter */
2482        device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2483        mrsas_kill_hba(sc);
2484        retval = FAIL;
2485    } else {
2486        clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2487        mrsas_enable_intr(sc);
2488        sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2489    }
2490out:
2491    clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2492    mrsas_dprint(sc, MRSAS_OCR,
2493            "Reset Exit with %d.\n", retval);
2494    return retval;
2495}
2496
2497/**
2498 * mrsas_kill_hba       Kill HBA when OCR is not supported.
2499 * input:               Adapter Context.
2500 *
2501 * This function will kill HBA when OCR is not supported.
2502 */
2503void mrsas_kill_hba (struct mrsas_softc *sc)
2504{
2505    mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2506    mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2507                        MFI_STOP_ADP);
2508    /* Flush */
2509    mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2510}
2511
2512/**
2513 * mrsas_wait_for_outstanding           Wait for outstanding commands
2514 * input:                               Adapter Context.
2515 *
2516 * This function will wait for 180 seconds for outstanding
2517 * commands to be completed.
2518 */
2519int mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2520{
2521    int i, outstanding, retval = 0;
2522    u_int32_t fw_state;
2523
2524    for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2525        if (sc->remove_in_progress) {
2526            mrsas_dprint(sc, MRSAS_OCR,
2527                "Driver remove or shutdown called.\n");
2528            retval = 1;
2529            goto out;
2530        }
2531        /* Check if firmware is in fault state */
2532        fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2533                                  outbound_scratch_pad)) & MFI_STATE_MASK;
2534        if (fw_state == MFI_STATE_FAULT) {
2535            mrsas_dprint(sc, MRSAS_OCR,
2536                         "Found FW in FAULT state, will reset adapter.\n");
2537            retval = 1;
2538            goto out;
2539        }
2540        outstanding = atomic_read(&sc->fw_outstanding);
2541        if (!outstanding)
2542            goto out;
2543
2544        if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2545            mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2546                                "commands to complete\n",i,outstanding);
2547            mrsas_complete_cmd(sc);
2548        }
2549        DELAY(1000 * 1000);
2550    }
2551
2552    if (atomic_read(&sc->fw_outstanding)) {
2553        mrsas_dprint(sc, MRSAS_OCR,
2554                        " pending commands remain after waiting,"
2555                        " will reset adapter.\n");
2556        retval = 1;
2557    }
2558out:
2559    return retval;
2560}
2561
2562/**
2563 * mrsas_release_mfi_cmd: Return a cmd to free command pool
2564 * input:                 Command packet for return to free cmd pool
2565 *
2566 * This function returns the MFI command to the command list.
2567 */
2568void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2569{
2570    struct mrsas_softc *sc = cmd->sc;
2571
2572    mtx_lock(&sc->mfi_cmd_pool_lock);
2573    cmd->ccb_ptr = NULL;
2574	cmd->cmd_id.frame_count = 0;
2575    TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
2576    mtx_unlock(&sc->mfi_cmd_pool_lock);
2577
2578    return;
2579}
2580
2581/**
2582 * mrsas_get_controller_info -        Returns FW's controller structure
2583 * input:                             Adapter soft state
2584 *                                    Controller information structure
2585 *
2586 * Issues an internal command (DCMD) to get the FW's controller structure.
2587 * This information is mainly used to find out the maximum IO transfer per
2588 * command supported by the FW.
2589 */
2590static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
2591                      struct mrsas_ctrl_info *ctrl_info)
2592{
2593    int retcode = 0;
2594    struct mrsas_mfi_cmd *cmd;
2595    struct mrsas_dcmd_frame *dcmd;
2596
2597    cmd = mrsas_get_mfi_cmd(sc);
2598
2599    if (!cmd) {
2600        device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
2601        return -ENOMEM;
2602    }
2603    dcmd = &cmd->frame->dcmd;
2604
2605    if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
2606        device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
2607        mrsas_release_mfi_cmd(cmd);
2608        return -ENOMEM;
2609    }
2610    memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2611
2612    dcmd->cmd = MFI_CMD_DCMD;
2613    dcmd->cmd_status = 0xFF;
2614    dcmd->sge_count = 1;
2615    dcmd->flags = MFI_FRAME_DIR_READ;
2616    dcmd->timeout = 0;
2617    dcmd->pad_0 = 0;
2618    dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
2619    dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2620    dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
2621    dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
2622
2623    if (!mrsas_issue_polled(sc, cmd))
2624        memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
2625    else
2626        retcode = 1;
2627
2628    mrsas_free_ctlr_info_cmd(sc);
2629    mrsas_release_mfi_cmd(cmd);
2630    return(retcode);
2631}
2632
2633/**
2634 * mrsas_alloc_ctlr_info_cmd:  Allocates memory for controller info command
2635 * input:                      Adapter soft state
2636 *
2637 * Allocates DMAable memory for the controller info internal command.
2638 */
2639int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
2640{
2641    int ctlr_info_size;
2642
2643    /* Allocate get controller info command */
2644    ctlr_info_size = sizeof(struct mrsas_ctrl_info);
2645    if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
2646                            1, 0,                   // algnmnt, boundary
2647                            BUS_SPACE_MAXADDR_32BIT,// lowaddr
2648                            BUS_SPACE_MAXADDR,      // highaddr
2649                            NULL, NULL,             // filter, filterarg
2650                            ctlr_info_size,          // maxsize
2651                            1,                      // msegments
2652                            ctlr_info_size,          // maxsegsize
2653                            BUS_DMA_ALLOCNOW,       // flags
2654                            NULL, NULL,             // lockfunc, lockarg
2655                            &sc->ctlr_info_tag)) {
2656        device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
2657        return (ENOMEM);
2658    }
2659    if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
2660            BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
2661        device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
2662        return (ENOMEM);
2663    }
2664    if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
2665            sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
2666            &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
2667        device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
2668        return (ENOMEM);
2669    }
2670
2671    memset(sc->ctlr_info_mem, 0, ctlr_info_size);
2672    return (0);
2673}
2674
2675/**
2676 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
2677 * input:                    Adapter soft state
2678 *
2679 * Deallocates memory of the get controller info cmd.
2680 */
2681void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
2682{
2683    if (sc->ctlr_info_phys_addr)
2684        bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
2685    if (sc->ctlr_info_mem != NULL)
2686        bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
2687    if (sc->ctlr_info_tag != NULL)
2688        bus_dma_tag_destroy(sc->ctlr_info_tag);
2689}
2690
2691/**
2692 * mrsas_issue_polled:        Issues a polling command
2693 * inputs:                    Adapter soft state
2694 *                            Command packet to be issued
2695 *
2696 * This function is for posting of internal commands to Firmware.  MFI
2697 * requires the cmd_status to be set to 0xFF before posting.  The maximun
2698 * wait time of the poll response timer is 180 seconds.
2699 */
2700int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2701{
2702    struct mrsas_header *frame_hdr = &cmd->frame->hdr;
2703    u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2704    int i, retcode = 0;
2705
2706    frame_hdr->cmd_status = 0xFF;
2707    frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2708
2709    /* Issue the frame using inbound queue port */
2710    if (mrsas_issue_dcmd(sc, cmd)) {
2711        device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2712        return(1);
2713    }
2714
2715    /*
2716     * Poll response timer to wait for Firmware response.  While this
2717     * timer with the DELAY call could block CPU, the time interval for
2718     * this is only 1 millisecond.
2719     */
2720    if (frame_hdr->cmd_status == 0xFF) {
2721        for (i=0; i < (max_wait * 1000); i++){
2722            if (frame_hdr->cmd_status == 0xFF)
2723                DELAY(1000);
2724            else
2725                break;
2726        }
2727    }
2728    if (frame_hdr->cmd_status != 0)
2729    {
2730        if (frame_hdr->cmd_status == 0xFF)
2731            device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
2732        else
2733            device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
2734        retcode = 1;
2735    }
2736    return(retcode);
2737}
2738
2739/**
2740 * mrsas_issue_dcmd -     Issues a MFI Pass thru cmd
2741 * input:                 Adapter soft state
2742 *                        mfi cmd pointer
2743 *
2744 * This function is called by mrsas_issued_blocked_cmd() and
2745 * mrsas_issued_polled(), to build the MPT command and then fire the
2746 * command to Firmware.
2747 */
2748int
2749mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2750{
2751    MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2752
2753    req_desc = mrsas_build_mpt_cmd(sc, cmd);
2754    if (!req_desc) {
2755        device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
2756        return(1);
2757    }
2758
2759    mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
2760
2761    return(0);
2762}
2763
2764/**
2765 * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
2766 * input:                Adapter soft state
2767 *                       mfi cmd to build
2768 *
2769 * This function is called by mrsas_issue_cmd() to build the MPT-MFI
2770 * passthru command and prepares the MPT command to send to Firmware.
2771 */
2772MRSAS_REQUEST_DESCRIPTOR_UNION *
2773mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2774{
2775    MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2776    u_int16_t index;
2777
2778    if (mrsas_build_mptmfi_passthru(sc, cmd)) {
2779        device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
2780        return NULL;
2781    }
2782
2783    index = cmd->cmd_id.context.smid;
2784
2785    req_desc = mrsas_get_request_desc(sc, index-1);
2786    if(!req_desc)
2787        return NULL;
2788
2789    req_desc->addr.Words = 0;
2790    req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2791
2792    req_desc->SCSIIO.SMID = index;
2793
2794    return(req_desc);
2795}
2796
2797/**
2798 * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
2799 * input:                        Adapter soft state
2800 *                               mfi cmd pointer
2801 *
2802 * The MPT command and the io_request are setup as a passthru command.
2803 * The SGE chain address is set to frame_phys_addr of the MFI command.
2804 */
2805u_int8_t
2806mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
2807{
2808    MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2809    PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
2810    struct mrsas_mpt_cmd *mpt_cmd;
2811    struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
2812
2813    mpt_cmd = mrsas_get_mpt_cmd(sc);
2814    if (!mpt_cmd)
2815        return(1);
2816
2817    /* Save the smid. To be used for returning the cmd */
2818    mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
2819
2820    mpt_cmd->sync_cmd_idx = mfi_cmd->index;
2821
2822    /*
2823     * For cmds where the flag is set, store the flag and check
2824     * on completion. For cmds with this flag, don't call
2825     * mrsas_complete_cmd.
2826     */
2827
2828    if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
2829        mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2830
2831    io_req = mpt_cmd->io_request;
2832
2833    if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
2834		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
2835                sgl_ptr_end += sc->max_sge_in_main_msg - 1;
2836                sgl_ptr_end->Flags = 0;
2837    }
2838
2839    mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2840
2841    io_req->Function    = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2842    io_req->SGLOffset0  = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
2843    io_req->ChainOffset = sc->chain_offset_mfi_pthru;
2844
2845    mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
2846
2847    mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2848              MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2849
2850    mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
2851
2852    return(0);
2853}
2854
2855/**
2856 * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
2857 * input:                    Adapter soft state
2858 *                           Command to be issued
2859 *
2860 * This function waits on an event for the command to be returned
2861 * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs.
2862 * Used for issuing internal and ioctl commands.
2863 */
2864int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2865{
2866    u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2867    unsigned long total_time = 0;
2868    int retcode = 0;
2869
2870    /* Initialize cmd_status */
2871    cmd->cmd_status = ECONNREFUSED;
2872
2873    /* Build MPT-MFI command for issue to FW */
2874    if (mrsas_issue_dcmd(sc, cmd)){
2875        device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2876        return(1);
2877    }
2878
2879    sc->chan = (void*)&cmd;
2880
2881    /* The following is for debug only... */
2882    //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n");
2883    //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan);
2884
2885    while (1) {
2886       if (cmd->cmd_status == ECONNREFUSED){
2887           tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
2888       }
2889       else
2890           break;
2891       total_time++;
2892       if (total_time >= max_wait) {
2893           device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait);
2894           retcode = 1;
2895           break;
2896       }
2897    }
2898    return(retcode);
2899}
2900
2901/**
2902 * mrsas_complete_mptmfi_passthru - Completes a command
2903 * input:                           sc: Adapter soft state
2904 *                                  cmd: Command to be completed
2905 *                                  status: cmd completion status
2906 *
2907 * This function is called from mrsas_complete_cmd() after an interrupt
2908 * is received from Firmware, and io_request->Function is
2909 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
2910 */
2911void
2912mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
2913                     u_int8_t status)
2914{
2915    struct mrsas_header *hdr = &cmd->frame->hdr;
2916    u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
2917
2918    /* Reset the retry counter for future re-tries */
2919    cmd->retry_for_fw_reset = 0;
2920
2921    if (cmd->ccb_ptr)
2922        cmd->ccb_ptr = NULL;
2923
2924    switch (hdr->cmd) {
2925        case MFI_CMD_INVALID:
2926            device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
2927            break;
2928        case MFI_CMD_PD_SCSI_IO:
2929        case MFI_CMD_LD_SCSI_IO:
2930            /*
2931             * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
2932             * issued either through an IO path or an IOCTL path. If it
2933             * was via IOCTL, we will send it to internal completion.
2934             */
2935            if (cmd->sync_cmd) {
2936                cmd->sync_cmd = 0;
2937                mrsas_wakeup(sc, cmd);
2938                break;
2939            }
2940        case MFI_CMD_SMP:
2941        case MFI_CMD_STP:
2942        case MFI_CMD_DCMD:
2943            /* Check for LD map update */
2944            if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
2945                (cmd->frame->dcmd.mbox.b[1] == 1)) {
2946                sc->fast_path_io = 0;
2947		        mtx_lock(&sc->raidmap_lock);
2948                if (cmd_status != 0) {
2949                    if (cmd_status != MFI_STAT_NOT_FOUND)
2950                        device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status);
2951                    else {
2952                        mrsas_release_mfi_cmd(cmd);
2953		        mtx_unlock(&sc->raidmap_lock);
2954                        break;
2955                    }
2956                }
2957                else
2958                    sc->map_id++;
2959                mrsas_release_mfi_cmd(cmd);
2960                if (MR_ValidateMapInfo(sc))
2961                    sc->fast_path_io = 0;
2962                else
2963                    sc->fast_path_io = 1;
2964                mrsas_sync_map_info(sc);
2965                mtx_unlock(&sc->raidmap_lock);
2966                break;
2967            }
2968#if 0 //currently not supporting event handling, so commenting out
2969            if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
2970                    cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
2971                mrsas_poll_wait_aen = 0;
2972            }
2973#endif
2974            /* See if got an event notification */
2975            if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
2976                mrsas_complete_aen(sc, cmd);
2977            else
2978                mrsas_wakeup(sc, cmd);
2979            break;
2980        case MFI_CMD_ABORT:
2981            /* Command issued to abort another cmd return */
2982            mrsas_complete_abort(sc, cmd);
2983            break;
2984        default:
2985            device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd);
2986            break;
2987    }
2988}
2989
2990/**
2991 * mrsas_wakeup -         Completes an internal command
2992 * input:                 Adapter soft state
2993 *                        Command to be completed
2994 *
2995 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
2996 * a wait timer is started.  This function is called from
2997 * mrsas_complete_mptmfi_passthru() as it completes the command,
2998 * to wake up from the command wait.
2999 */
3000void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3001{
3002    cmd->cmd_status = cmd->frame->io.cmd_status;
3003
3004    if (cmd->cmd_status == ECONNREFUSED)
3005        cmd->cmd_status = 0;
3006
3007    /* For debug only ... */
3008    //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
3009
3010    sc->chan = (void*)&cmd;
3011    wakeup_one((void *)&sc->chan);
3012    return;
3013}
3014
3015/**
3016 * mrsas_shutdown_ctlr:       Instructs FW to shutdown the controller
3017 * input:                     Adapter soft state
3018 *                            Shutdown/Hibernate
3019 *
3020 * This function issues a DCMD internal command to Firmware to initiate
3021 * shutdown of the controller.
3022 */
3023static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3024{
3025    struct mrsas_mfi_cmd *cmd;
3026    struct mrsas_dcmd_frame *dcmd;
3027
3028    if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3029        return;
3030
3031    cmd = mrsas_get_mfi_cmd(sc);
3032    if (!cmd) {
3033        device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n");
3034        return;
3035    }
3036
3037	if (sc->aen_cmd)
3038        mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3039
3040	if (sc->map_update_cmd)
3041        mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3042
3043    dcmd = &cmd->frame->dcmd;
3044    memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3045
3046    dcmd->cmd = MFI_CMD_DCMD;
3047    dcmd->cmd_status = 0x0;
3048    dcmd->sge_count = 0;
3049    dcmd->flags = MFI_FRAME_DIR_NONE;
3050    dcmd->timeout = 0;
3051    dcmd->pad_0 = 0;
3052    dcmd->data_xfer_len = 0;
3053    dcmd->opcode = opcode;
3054
3055    device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n");
3056
3057    mrsas_issue_blocked_cmd(sc, cmd);
3058    mrsas_release_mfi_cmd(cmd);
3059
3060    return;
3061}
3062
3063/**
3064 * mrsas_flush_cache:         Requests FW to flush all its caches
3065 * input:                     Adapter soft state
3066 *
3067 * This function is issues a DCMD internal command to Firmware to initiate
3068 * flushing of all caches.
3069 */
3070static void mrsas_flush_cache(struct mrsas_softc *sc)
3071{
3072    struct mrsas_mfi_cmd *cmd;
3073    struct mrsas_dcmd_frame *dcmd;
3074
3075    if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3076        return;
3077
3078    cmd = mrsas_get_mfi_cmd(sc);
3079    if (!cmd) {
3080        device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n");
3081        return;
3082    }
3083
3084    dcmd = &cmd->frame->dcmd;
3085    memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3086
3087    dcmd->cmd = MFI_CMD_DCMD;
3088    dcmd->cmd_status = 0x0;
3089    dcmd->sge_count = 0;
3090    dcmd->flags = MFI_FRAME_DIR_NONE;
3091    dcmd->timeout = 0;
3092    dcmd->pad_0 = 0;
3093    dcmd->data_xfer_len = 0;
3094    dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3095    dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3096
3097    mrsas_issue_blocked_cmd(sc, cmd);
3098    mrsas_release_mfi_cmd(cmd);
3099
3100    return;
3101}
3102
3103/**
3104 * mrsas_get_map_info:        Load and validate RAID map
3105 * input:                     Adapter instance soft state
3106 *
3107 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo()
3108 * to load and validate RAID map.  It returns 0 if successful, 1 other-
3109 * wise.
3110 */
3111static int mrsas_get_map_info(struct mrsas_softc *sc)
3112{
3113   uint8_t  retcode = 0;
3114
3115    sc->fast_path_io = 0;
3116    if (!mrsas_get_ld_map_info(sc)) {
3117        retcode = MR_ValidateMapInfo(sc);
3118        if (retcode == 0) {
3119            sc->fast_path_io = 1;
3120            return 0;
3121        }
3122    }
3123    return 1;
3124}
3125
3126/**
3127 * mrsas_get_ld_map_info:      Get FW's ld_map structure
3128 * input:                      Adapter instance soft state
3129 *
3130 * Issues an internal command (DCMD) to get the FW's controller PD
3131 * list structure.
3132 */
3133static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
3134{
3135    int retcode = 0;
3136    struct mrsas_mfi_cmd *cmd;
3137    struct mrsas_dcmd_frame *dcmd;
3138    MR_FW_RAID_MAP_ALL *map;
3139    bus_addr_t map_phys_addr = 0;
3140
3141    cmd = mrsas_get_mfi_cmd(sc);
3142    if (!cmd) {
3143        device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n");
3144        return 1;
3145    }
3146
3147    dcmd = &cmd->frame->dcmd;
3148
3149    map = sc->raidmap_mem[(sc->map_id & 1)];
3150    map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3151    if (!map) {
3152        device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n");
3153        mrsas_release_mfi_cmd(cmd);
3154        return (ENOMEM);
3155    }
3156    memset(map, 0, sizeof(*map));
3157    memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3158
3159    dcmd->cmd = MFI_CMD_DCMD;
3160    dcmd->cmd_status = 0xFF;
3161    dcmd->sge_count = 1;
3162    dcmd->flags = MFI_FRAME_DIR_READ;
3163    dcmd->timeout = 0;
3164    dcmd->pad_0 = 0;
3165    dcmd->data_xfer_len = sc->map_sz;
3166    dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3167    dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3168    dcmd->sgl.sge32[0].length = sc->map_sz;
3169    if (!mrsas_issue_polled(sc, cmd))
3170        retcode = 0;
3171    else
3172    {
3173        device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n");
3174        retcode = 1;
3175    }
3176    mrsas_release_mfi_cmd(cmd);
3177    return(retcode);
3178}
3179
3180/**
3181 * mrsas_sync_map_info:        Get FW's ld_map structure
3182 * input:                      Adapter instance soft state
3183 *
3184 * Issues an internal command (DCMD) to get the FW's controller PD
3185 * list structure.
3186 */
3187static int mrsas_sync_map_info(struct mrsas_softc *sc)
3188{
3189    int retcode = 0, i;
3190    struct mrsas_mfi_cmd *cmd;
3191    struct mrsas_dcmd_frame *dcmd;
3192    uint32_t size_sync_info, num_lds;
3193    MR_LD_TARGET_SYNC *target_map = NULL;
3194    MR_FW_RAID_MAP_ALL *map;
3195    MR_LD_RAID  *raid;
3196    MR_LD_TARGET_SYNC *ld_sync;
3197    bus_addr_t map_phys_addr = 0;
3198
3199    cmd = mrsas_get_mfi_cmd(sc);
3200    if (!cmd) {
3201        device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
3202        return 1;
3203    }
3204
3205    map = sc->raidmap_mem[sc->map_id & 1];
3206    num_lds = map->raidMap.ldCount;
3207
3208    dcmd = &cmd->frame->dcmd;
3209    size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3210    memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3211
3212    target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
3213    memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL));
3214
3215    map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3216
3217    ld_sync = (MR_LD_TARGET_SYNC *)target_map;
3218
3219    for (i = 0; i < num_lds; i++, ld_sync++) {
3220        raid = MR_LdRaidGet(i, map);
3221        ld_sync->targetId = MR_GetLDTgtId(i, map);
3222        ld_sync->seqNum = raid->seqNum;
3223    }
3224
3225    dcmd->cmd = MFI_CMD_DCMD;
3226    dcmd->cmd_status = 0xFF;
3227    dcmd->sge_count = 1;
3228    dcmd->flags = MFI_FRAME_DIR_WRITE;
3229    dcmd->timeout = 0;
3230    dcmd->pad_0 = 0;
3231    dcmd->data_xfer_len = sc->map_sz;
3232    dcmd->mbox.b[0] = num_lds;
3233    dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3234    dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3235    dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3236    dcmd->sgl.sge32[0].length = sc->map_sz;
3237
3238    sc->map_update_cmd = cmd;
3239    if (mrsas_issue_dcmd(sc, cmd)) {
3240        device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n");
3241        return(1);
3242    }
3243    return(retcode);
3244}
3245
3246/**
3247 * mrsas_get_pd_list:           Returns FW's PD list structure
3248 * input:                       Adapter soft state
3249 *
3250 * Issues an internal command (DCMD) to get the FW's controller PD
3251 * list structure.  This information is mainly used to find out about
3252 * system supported by Firmware.
3253 */
3254static int mrsas_get_pd_list(struct mrsas_softc *sc)
3255{
3256    int retcode = 0, pd_index = 0, pd_count=0, pd_list_size;
3257    struct mrsas_mfi_cmd *cmd;
3258    struct mrsas_dcmd_frame *dcmd;
3259    struct MR_PD_LIST *pd_list_mem;
3260    struct MR_PD_ADDRESS *pd_addr;
3261    bus_addr_t pd_list_phys_addr = 0;
3262    struct mrsas_tmp_dcmd *tcmd;
3263
3264    cmd = mrsas_get_mfi_cmd(sc);
3265    if (!cmd) {
3266        device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n");
3267        return 1;
3268    }
3269
3270    dcmd = &cmd->frame->dcmd;
3271
3272    tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3273    pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3274    if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3275        device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n");
3276        mrsas_release_mfi_cmd(cmd);
3277        return(ENOMEM);
3278    }
3279    else {
3280        pd_list_mem = tcmd->tmp_dcmd_mem;
3281        pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3282    }
3283    memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3284
3285    dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3286    dcmd->mbox.b[1] = 0;
3287    dcmd->cmd = MFI_CMD_DCMD;
3288    dcmd->cmd_status = 0xFF;
3289    dcmd->sge_count = 1;
3290    dcmd->flags = MFI_FRAME_DIR_READ;
3291    dcmd->timeout = 0;
3292    dcmd->pad_0 = 0;
3293    dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3294    dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3295    dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3296    dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3297
3298    if (!mrsas_issue_polled(sc, cmd))
3299        retcode = 0;
3300    else
3301        retcode = 1;
3302
3303    /* Get the instance PD list */
3304    pd_count = MRSAS_MAX_PD;
3305    pd_addr = pd_list_mem->addr;
3306    if (retcode == 0 && pd_list_mem->count < pd_count) {
3307        memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3308        for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3309            sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3310            sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType;
3311            sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM;
3312            pd_addr++;
3313        }
3314    }
3315
3316    /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */
3317    memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3318    mrsas_free_tmp_dcmd(tcmd);
3319    mrsas_release_mfi_cmd(cmd);
3320    free(tcmd, M_MRSAS);
3321    return(retcode);
3322}
3323
3324/**
3325 * mrsas_get_ld_list:           Returns FW's LD list structure
3326 * input:                       Adapter soft state
3327 *
3328 * Issues an internal command (DCMD) to get the FW's controller PD
3329 * list structure.  This information is mainly used to find out about
3330 * supported by the FW.
3331 */
3332static int mrsas_get_ld_list(struct mrsas_softc *sc)
3333{
3334    int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3335    struct mrsas_mfi_cmd *cmd;
3336    struct mrsas_dcmd_frame *dcmd;
3337    struct MR_LD_LIST *ld_list_mem;
3338    bus_addr_t ld_list_phys_addr = 0;
3339    struct mrsas_tmp_dcmd *tcmd;
3340
3341    cmd = mrsas_get_mfi_cmd(sc);
3342    if (!cmd) {
3343        device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n");
3344        return 1;
3345    }
3346
3347    dcmd = &cmd->frame->dcmd;
3348
3349    tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3350    ld_list_size = sizeof(struct MR_LD_LIST);
3351    if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3352        device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n");
3353        mrsas_release_mfi_cmd(cmd);
3354        return(ENOMEM);
3355    }
3356    else {
3357        ld_list_mem = tcmd->tmp_dcmd_mem;
3358        ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3359    }
3360    memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3361
3362    dcmd->cmd = MFI_CMD_DCMD;
3363    dcmd->cmd_status = 0xFF;
3364    dcmd->sge_count = 1;
3365    dcmd->flags = MFI_FRAME_DIR_READ;
3366    dcmd->timeout = 0;
3367    dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3368    dcmd->opcode = MR_DCMD_LD_GET_LIST;
3369    dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3370    dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3371    dcmd->pad_0  = 0;
3372
3373    if (!mrsas_issue_polled(sc, cmd))
3374        retcode = 0;
3375    else
3376        retcode = 1;
3377
3378     /* Get the instance LD list */
3379     if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){
3380        sc->CurLdCount = ld_list_mem->ldCount;
3381        memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
3382        for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3383            if (ld_list_mem->ldList[ld_index].state != 0) {
3384                ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3385                sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3386            }
3387        }
3388    }
3389
3390    mrsas_free_tmp_dcmd(tcmd);
3391    mrsas_release_mfi_cmd(cmd);
3392    free(tcmd, M_MRSAS);
3393    return(retcode);
3394}
3395
3396/**
3397 * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command
3398 * input:                      Adapter soft state
3399 *                             Temp command
3400 *                             Size of alloction
3401 *
3402 * Allocates DMAable memory for a temporary internal command. The allocated
3403 * memory is initialized to all zeros upon successful loading of the dma
3404 * mapped memory.
3405 */
3406int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
3407          int size)
3408{
3409    if (bus_dma_tag_create( sc->mrsas_parent_tag,   // parent
3410                            1, 0,                   // algnmnt, boundary
3411                            BUS_SPACE_MAXADDR_32BIT,// lowaddr
3412                            BUS_SPACE_MAXADDR,      // highaddr
3413                            NULL, NULL,             // filter, filterarg
3414                            size,                   // maxsize
3415                            1,                      // msegments
3416                            size,                   // maxsegsize
3417                            BUS_DMA_ALLOCNOW,       // flags
3418                            NULL, NULL,             // lockfunc, lockarg
3419                            &tcmd->tmp_dcmd_tag)) {
3420        device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3421        return (ENOMEM);
3422    }
3423    if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3424            BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3425        device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3426        return (ENOMEM);
3427    }
3428    if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3429            tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3430            &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3431        device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3432        return (ENOMEM);
3433    }
3434
3435    memset(tcmd->tmp_dcmd_mem, 0, size);
3436    return (0);
3437}
3438
3439/**
3440 * mrsas_free_tmp_dcmd:      Free memory for temporary command
3441 * input:                    temporary dcmd pointer
3442 *
3443 * Deallocates memory of the temporary command for use in the construction
3444 * of the internal DCMD.
3445 */
3446void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3447{
3448    if (tmp->tmp_dcmd_phys_addr)
3449        bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3450    if (tmp->tmp_dcmd_mem != NULL)
3451        bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3452    if (tmp->tmp_dcmd_tag != NULL)
3453        bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3454}
3455
3456/**
3457 * mrsas_issue_blocked_abort_cmd:       Aborts previously issued cmd
3458 * input:                               Adapter soft state
3459 *                                      Previously issued cmd to be aborted
3460 *
3461 * This function is used to abort previously issued commands, such as AEN and
3462 * RAID map sync map commands.  The abort command is sent as a DCMD internal
3463 * command and subsequently the driver will wait for a return status.  The
3464 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3465 */
3466static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3467                                         struct mrsas_mfi_cmd *cmd_to_abort)
3468{
3469    struct mrsas_mfi_cmd *cmd;
3470    struct mrsas_abort_frame *abort_fr;
3471    u_int8_t retcode = 0;
3472    unsigned long total_time = 0;
3473    u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3474
3475    cmd = mrsas_get_mfi_cmd(sc);
3476    if (!cmd) {
3477        device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3478        return(1);
3479    }
3480
3481    abort_fr = &cmd->frame->abort;
3482
3483    /* Prepare and issue the abort frame */
3484    abort_fr->cmd = MFI_CMD_ABORT;
3485    abort_fr->cmd_status = 0xFF;
3486    abort_fr->flags = 0;
3487    abort_fr->abort_context = cmd_to_abort->index;
3488    abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3489    abort_fr->abort_mfi_phys_addr_hi = 0;
3490
3491    cmd->sync_cmd = 1;
3492    cmd->cmd_status = 0xFF;
3493
3494    if (mrsas_issue_dcmd(sc, cmd)) {
3495        device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3496        return(1);
3497    }
3498
3499    /* Wait for this cmd to complete */
3500    sc->chan = (void*)&cmd;
3501    while (1) {
3502       if (cmd->cmd_status == 0xFF){
3503           tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3504       }
3505       else
3506           break;
3507       total_time++;
3508       if (total_time >= max_wait) {
3509           device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3510           retcode = 1;
3511           break;
3512       }
3513    }
3514
3515    cmd->sync_cmd = 0;
3516    mrsas_release_mfi_cmd(cmd);
3517    return(retcode);
3518}
3519
3520/**
3521 * mrsas_complete_abort:      Completes aborting a command
3522 * input:                     Adapter soft state
3523 *                            Cmd that was issued to abort another cmd
3524 *
3525 * The mrsas_issue_blocked_abort_cmd() function waits for the command status
3526 * to change after sending the command.  This function is called from
3527 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3528 */
3529void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3530{
3531    if (cmd->sync_cmd) {
3532        cmd->sync_cmd = 0;
3533        cmd->cmd_status = 0;
3534        sc->chan = (void*)&cmd;
3535        wakeup_one((void *)&sc->chan);
3536    }
3537    return;
3538}
3539
3540/**
3541 * mrsas_aen_handler:		Callback function for AEN processing from thread context.
3542 * input:					Adapter soft state
3543 *
3544 */
3545void mrsas_aen_handler(struct mrsas_softc *sc)
3546{
3547	union mrsas_evt_class_locale class_locale;
3548	int     doscan = 0;
3549	u_int32_t seq_num;
3550	int error;
3551
3552	if (!sc) {
3553		device_printf(sc->mrsas_dev, "invalid instance!\n");
3554		return;
3555	}
3556
3557	if (sc->evt_detail_mem) {
3558		switch (sc->evt_detail_mem->code) {
3559			case MR_EVT_PD_INSERTED:
3560				mrsas_get_pd_list(sc);
3561				mrsas_bus_scan_sim(sc, sc->sim_1);
3562				doscan = 0;
3563				break;
3564			case MR_EVT_PD_REMOVED:
3565				mrsas_get_pd_list(sc);
3566				mrsas_bus_scan_sim(sc, sc->sim_1);
3567				doscan = 0;
3568				break;
3569			case MR_EVT_LD_OFFLINE:
3570			case MR_EVT_CFG_CLEARED:
3571			case MR_EVT_LD_DELETED:
3572				mrsas_bus_scan_sim(sc, sc->sim_0);
3573				doscan = 0;
3574				break;
3575			case MR_EVT_LD_CREATED:
3576				mrsas_get_ld_list(sc);
3577				mrsas_bus_scan_sim(sc, sc->sim_0);
3578				doscan = 0;
3579				break;
3580			case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
3581			case MR_EVT_FOREIGN_CFG_IMPORTED:
3582			case MR_EVT_LD_STATE_CHANGE:
3583				doscan = 1;
3584				break;
3585			default:
3586				doscan = 0;
3587				break;
3588		}
3589	} else {
3590		device_printf(sc->mrsas_dev, "invalid evt_detail\n");
3591		return;
3592	}
3593	if (doscan) {
3594		mrsas_get_pd_list(sc);
3595		mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
3596		mrsas_bus_scan_sim(sc, sc->sim_1);
3597		mrsas_get_ld_list(sc);
3598		mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
3599		mrsas_bus_scan_sim(sc, sc->sim_0);
3600	}
3601
3602	seq_num = sc->evt_detail_mem->seq_num + 1;
3603
3604	// Register AEN with FW for latest sequence number plus 1
3605	class_locale.members.reserved = 0;
3606	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3607	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3608
3609	if (sc->aen_cmd != NULL )
3610		return ;
3611
3612	mtx_lock(&sc->aen_lock);
3613	error = mrsas_register_aen(sc, seq_num,
3614					class_locale.word);
3615	mtx_unlock(&sc->aen_lock);
3616
3617	if (error)
3618		device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
3619
3620}
3621
3622
3623/**
3624 * mrsas_complete_aen:        	Completes AEN command
3625 * input:                     	Adapter soft state
3626 *                            	Cmd that was issued to abort another cmd
3627 *
3628 * 								This function will be called from ISR and will continue
3629 * 								event processing from thread context by enqueuing task
3630 * 								in ev_tq (callback function "mrsas_aen_handler").
3631 */
3632void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3633{
3634	/*
3635	* Don't signal app if it is just an aborted previously registered aen
3636	*/
3637	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
3638		/* TO DO (?) */
3639	}
3640	else
3641		cmd->abort_aen = 0;
3642
3643	sc->aen_cmd = NULL;
3644	mrsas_release_mfi_cmd(cmd);
3645
3646	if (!sc->remove_in_progress)
3647		taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
3648
3649	return;
3650}
3651
3652static device_method_t mrsas_methods[] = {
3653    DEVMETHOD(device_probe,     mrsas_probe),
3654    DEVMETHOD(device_attach,    mrsas_attach),
3655    DEVMETHOD(device_detach,    mrsas_detach),
3656    DEVMETHOD(device_suspend,   mrsas_suspend),
3657    DEVMETHOD(device_resume,    mrsas_resume),
3658    DEVMETHOD(bus_print_child,  bus_generic_print_child),
3659    DEVMETHOD(bus_driver_added, bus_generic_driver_added),
3660    { 0, 0 }
3661};
3662
3663static driver_t mrsas_driver = {
3664    "mrsas",
3665    mrsas_methods,
3666    sizeof(struct mrsas_softc)
3667};
3668
3669static devclass_t       mrsas_devclass;
3670DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0);
3671MODULE_DEPEND(mrsas, cam, 1,1,1);
3672
3673