1/*
2 * Copyright (c) 2014, University of Washington.
3 * All rights reserved.
4 *
5 * This file is distributed under the terms in the attached LICENSE file.
6 * If you do not find this file, copies can be found by writing to:
7 * ETH Zurich D-INFK, CAB F.78, Universitaetstr. 6, CH-8092 Zurich.
8 * Attn: Systems Group.
9 */
10
11#include <stdio.h>
12#include <stdlib.h>
13#include <assert.h>
14#include <stdint.h>
15#include <string.h>
16#include <sys/types.h>
17#ifdef BARRELFISH
18#include <barrelfish/barrelfish.h>
19#include <barrelfish/inthandler.h>
20#include <barrelfish/sys_debug.h>
21#include <barrelfish/deferred.h>
22#include <barrelfish/waitset.h>
23#include <barrelfish/core_state.h>
24#include <pci/pci.h>
25#include <skb/skb.h>
26#include <acpi_client/acpi_client.h>
27#else
28#include <arpa/inet.h>
29#include <pci/devids.h>
30#include <errors/errno.h>
31#include <sys/mman.h>
32#include <unistd.h>
33#include <sys/types.h>
34#include <sys/stat.h>
35#include <fcntl.h>
36#include "linux_defs.h"
37#endif
38
39#include "megaraid.h"
40
41/* PCI device address passed on command line */
42#ifdef BARRELFISH
43static uint32_t pci_bus = PCI_DONT_CARE;
44static uint32_t pci_device = PCI_DONT_CARE;
45static uint32_t pci_function = 0;
46static bool use_vtd = false;
47static int vtd_coherency = 1;
48#endif
49static uint32_t pci_deviceid = MRSAS_INVADER;
50
51#ifndef BARRELFISH
52#	define MEM_SIZE	(4 * 1024 * 1024)
53#endif
54
55#ifdef BARRELFISH
56#	define DELAY(x) barrelfish_usleep(x)
57#else
58#	define DELAY(x) usleep((x))
59#endif
60
61#define ECONNREFUSED	61
62
63#ifndef MIN
64#define MIN(a,b)	((a) < (b) ? (a) : (b))
65#endif
66
67struct megaraid_ctrl *sc = NULL;
68#ifndef BARRELFISH
69static uint8_t *pmem_base = NULL, *pmem_start = NULL;
70static uintptr_t paddr_start = 0, paddr_base = 0, paddr_end = 0;
71#endif
72
73struct megaraid_vsic {
74  struct megaraid_ctrl *ctrl;
75};
76
77#ifdef BARRELFISH
78static void interrupt_handler(void *arg)
79{
80  assert(!"NYI");
81}
82#endif
83
84/* allocate a single frame, mapping it into our vspace with given attributes */
85static void *alloc_map_frame(vregion_flags_t attr, size_t size,
86                             struct capref *retcap)
87{
88#ifdef BARRELFISH
89    struct capref frame;
90    errval_t r;
91
92    r = frame_alloc(&frame, size, NULL);
93    assert(err_is_ok(r));
94    void *va;
95    r = vspace_map_one_frame_attr(&va, size, frame, attr,
96                                  NULL, NULL);
97    if (err_is_fail(r)) {
98        DEBUG_ERR(r, "vspace_map_one_frame failed");
99        return NULL;
100    }
101
102    if (retcap != NULL) {
103        *retcap = frame;
104    }
105
106    return va;
107#else
108    /* void *va = mmap(NULL, size, */
109    /* 		    PROT_READ | PROT_WRITE, */
110    /* 		    MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE, 0, 0); */
111    /* assert(va != NULL); */
112    /* assert((uintptr_t)va % getpagesize() == 0); */
113    size += getpagesize() - (size % getpagesize());
114
115    assert(pmem_base != NULL);
116    void *va = pmem_base;
117    pmem_base += size;
118
119    /* printf("va = %p, mapped size = %zu\n", va, size); */
120
121    /* int fd = open("/proc/self/pagemap", O_RDONLY); */
122    /* assert(fd > 0); */
123    /* uint64_t pagemap; */
124    /* int ret = pread(fd, &pagemap, 8, ((uintptr_t)va / getpagesize()) * 8 + 8); */
125    /* assert(ret == 8); */
126    /* assert(pagemap & (1ULL << 63)); // is present */
127    /* assert(!(pagemap & (1ULL << 62))); // not swapped */
128    /* unsigned int shift = ((pagemap >> 55) & ((1UL << 6) - 1)); */
129    /* retcap->paddr = (pagemap & ((1UL << 55) - 1)) << shift; */
130    retcap->paddr = paddr_base;
131    paddr_base += size;
132    /* printf("paddr = %p\n", (void *)retcap->paddr); */
133
134    assert(paddr_base <= paddr_end);
135    /* close(fd); */
136
137    /* uint8_t buf[256]; */
138    /* fd = open("/dev/mem", O_RDONLY); */
139    /* ret = pread(fd, buf, 64, retcap->paddr); */
140    /* assert(ret == 64); */
141    /* for(int i = 0; i < 64; i++) { */
142    /*   printf("%x\n", buf[i]); */
143    /* } */
144    /* close(fd); */
145
146    /* abort(); */
147
148    memset(va, 0, size);
149
150    return va;
151#endif
152}
153
154#ifndef BARRELFISH
155lpaddr_t v2p(void *ptr, size_t len)
156{
157  lpaddr_t paddr;
158
159#ifdef BARRELFISH
160  if (!use_vtd) {
161    // Check if it's in morecore's region
162    struct morecore_state *mc_state = get_morecore_state();
163    struct vspace_mmu_aware *mmu_state = &mc_state->mmu_state;
164    genvaddr_t base = vregion_get_base_addr(&mmu_state->vregion);
165    struct memobj_frame_list *i;
166
167    // Walk frame list
168    for(i = mmu_state->memobj.frame_list; i != NULL; i = i->next) {
169      // If address is completely within frame, we can resolve
170      // XXX: Everything else would be easier with an IOMMU
171      /* printf("Heap: Comparing [%p:%p] against [%p:%p]\n", */
172      /*        ptr, ptr + q->len, */
173      /*        (void *)(base + i->offset), */
174      /*        (void *)(base + i->offset + i->size)); */
175      if(base + i->offset <= (genvaddr_t)ptr &&
176	 ((genvaddr_t)ptr) + len < base + i->offset + i->size) {
177	assert(i->pa != 0);
178
179	/* buf->pa = id.base + ((genvaddr_t)ptr - base - i->offset); */
180	paddr = i->pa + ((genvaddr_t)ptr - base - i->offset);
181	return paddr;
182      }
183    }
184
185    // Check if it's in text/data region
186    int entry;
187    for(entry = 0; entry < mc_state->v2p_entries; entry++) {
188      struct v2pmap *pmap = &mc_state->v2p_mappings[entry];
189
190      // If address is completely within frame, we can resolve
191      // XXX: Everything else would be easier with an IOMMU
192      /* printf("BSS: Comparing [%p:%p] against [%p:%p]\n", */
193      /*        ptr, ptr + len, */
194      /*        (void *)(pmap->va), */
195      /*        (void *)(pmap->va + pmap->size)); */
196      if(pmap->va <= (genvaddr_t)ptr &&
197	 ((genvaddr_t)ptr) + len < pmap->va + pmap->size) {
198	paddr = pmap->pa + ((genvaddr_t)ptr - pmap->va);
199	return paddr;
200      }
201    }
202
203    // Not found...
204    printf("Called from %p %p %p\n",
205	   __builtin_return_address(0),
206	   __builtin_return_address(1),
207	   __builtin_return_address(2));
208
209    USER_PANIC("Invalid buffer! ptr = %p\n", ptr);
210  } else {
211    return (lpaddr_t)ptr;
212  }
213#else
214  assert((uint8_t *)ptr >= pmem_start);
215  assert((uint8_t *)ptr < pmem_start + MEM_SIZE);
216  paddr = (uintptr_t)((uint8_t *)ptr - pmem_start);
217  paddr += paddr_start;
218  return paddr;
219#endif
220}
221#endif
222
223#ifndef BARRELFISH
224errval_t invoke_frame_identify(struct capref cap, struct frame_identity *id)
225{
226  id->base = cap.paddr;
227  return SYS_ERR_OK;
228}
229
230void *user_alloc(size_t size, uintptr_t *paddr);
231void *user_alloc(size_t size, uintptr_t *paddr)
232{
233  struct capref cap;
234  void * va = alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE,
235			      size, &cap);
236  assert(va != NULL);
237  struct frame_identity id;
238  errval_t err = invoke_frame_identify(cap, &id);
239  assert(err_is_ok(err));
240  *paddr = id.base;
241  return va;
242}
243#endif
244
245/**
246 * Interrupt Disable/Enable/Clear Functions
247 *
248 */
249static void mrsas_disable_intr(void)
250{
251    u_int32_t mask = 0xFFFFFFFF;
252    megaraid_outbound_intr_mask_wr(&sc->d, mask);
253    megaraid_outbound_intr_mask_rd(&sc->d);
254}
255
256/**
257 * mrsas_fire_cmd:     Sends command to FW
258 * input:              Adapter soft state
259 *                     request descriptor address low
260 *                     request descriptor address high
261 *
262 * This functions fires the command to Firmware by writing to the
263 * inbound_low_queue_port and inbound_high_queue_port.
264 */
265void mrsas_fire_cmd(u_int32_t req_desc_lo, u_int32_t req_desc_hi)
266{
267    /* mtx_lock(&sc->pci_lock); */
268    megaraid_inbound_low_queue_port_wr(&sc->d, req_desc_lo);
269    megaraid_inbound_high_queue_port_wr(&sc->d, req_desc_hi);
270    /* mtx_unlock(&sc->pci_lock); */
271}
272
273/**
274 * mrsas_alloc_frame -   Allocates MFI Frames
275 * input:                Adapter soft state
276 *
277 * Create bus DMA memory tag and dmamap and load memory for MFI frames.
278 * Returns virtual memory pointer to allocated region.
279 */
280static void *mrsas_alloc_frame(struct mrsas_mfi_cmd *cmd)
281{
282    u_int32_t frame_size = MRSAS_MFI_FRAME_SIZE;
283    struct capref cap;
284
285    cmd->frame_mem = alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE,
286				     frame_size, &cap);
287    assert(cmd->frame_mem != NULL);
288    struct frame_identity id;
289    errval_t err = invoke_frame_identify(cap, &id);
290    assert(err_is_ok(err));
291    cmd->frame_phys_addr = id.base;
292
293    return(cmd->frame_mem);
294}
295
296/**
297 * mrsas_alloc_mfi_cmds:  Allocates the command packets
298 * input:                 Adapter instance soft state
299 *
300 * Each IOCTL or passthru command that is issued to the FW are wrapped in a
301 * local data structure called mrsas_mfi_cmd.  The frame embedded in this
302 * mrsas_mfi is issued to FW. The array is used only to look up the
303 * mrsas_mfi_cmd given the context. The free commands are maintained in a
304 * linked list.
305 */
306static int mrsas_alloc_mfi_cmds(void)
307{
308    int i;
309    u_int32_t max_cmd;
310    struct mrsas_mfi_cmd *cmd;
311
312    max_cmd = MRSAS_MAX_MFI_CMDS;
313
314    /*
315     * sc->mfi_cmd_list is an array of struct mrsas_mfi_cmd pointers. Allocate the
316     * dynamic array first and then allocate individual commands.
317     */
318    sc->mfi_cmd_list = malloc(sizeof(struct mrsas_mfi_cmd*)*max_cmd);
319    assert(sc->mfi_cmd_list != NULL);
320    memset(sc->mfi_cmd_list, 0, sizeof(struct mrsas_mfi_cmd *)*max_cmd);
321    for (i = 0; i < max_cmd; i++) {
322        sc->mfi_cmd_list[i] = malloc(sizeof(struct mrsas_mfi_cmd));
323	assert(sc->mfi_cmd_list[i] != NULL);
324    }
325
326    for (i = 0; i < max_cmd; i++) {
327        cmd = sc->mfi_cmd_list[i];
328        memset(cmd, 0, sizeof(struct mrsas_mfi_cmd));
329        cmd->index = i;
330        cmd->ccb_ptr = NULL;
331        cmd->sc = sc;
332        TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
333    }
334
335    for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
336        cmd = sc->mfi_cmd_list[i];
337        cmd->frame = mrsas_alloc_frame(cmd);
338	assert(cmd->frame != NULL);
339        memset(cmd->frame, 0, MRSAS_MFI_FRAME_SIZE);
340        cmd->frame->io.context = cmd->index;
341        cmd->frame->io.pad_0 = 0;
342    }
343
344    return(0);
345}
346
347/**
348 * mrsas_alloc_mpt_cmds:  Allocates the command packets
349 * input:                 Adapter instance soft state
350 *
351 * This function allocates the internal commands for IOs. Each command that is
352 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
353 * An array is allocated with mrsas_mpt_cmd context.  The free commands are
354 * maintained in a linked list (cmd pool). SMID value range is from 1 to
355 * max_fw_cmds.
356 */
357static int mrsas_alloc_mpt_cmds(void)
358{
359    int i;
360    u_int32_t max_cmd;
361    struct mrsas_mpt_cmd *cmd;
362    pMpi2ReplyDescriptorsUnion_t reply_desc;
363    u_int32_t offset, chain_offset, sense_offset;
364    lpaddr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
365    u_int8_t *io_req_base, *chain_frame_base, *sense_base;
366
367    max_cmd = sc->max_fw_cmds;
368
369    sc->req_desc = malloc(sc->request_alloc_sz);
370    assert(sc->req_desc != NULL);
371    memset(sc->req_desc, 0, sc->request_alloc_sz);
372
373    /*
374     * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
375     * dynamic array first and then allocate individual commands.
376     */
377    sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd);
378    assert(sc->mpt_cmd_list != NULL);
379    memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
380    for (i = 0; i < max_cmd; i++) {
381        sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd));
382        assert(sc->mpt_cmd_list[i] != NULL);
383    }
384
385    io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
386    io_req_base_phys = (lpaddr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
387    chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
388    chain_frame_base_phys = (lpaddr_t)sc->chain_frame_phys_addr;
389    sense_base = (u_int8_t*)sc->sense_mem;
390    sense_base_phys = (lpaddr_t)sc->sense_phys_addr;
391    for (i = 0; i < max_cmd; i++) {
392        cmd = sc->mpt_cmd_list[i];
393        offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
394	chain_offset = 1024 * i;
395        sense_offset = MRSAS_SENSE_LEN * i;
396        memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
397        cmd->index = i + 1;
398        cmd->ccb_ptr = NULL;
399        /* callout_init(&cmd->cm_callout, 0); */
400        cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
401        cmd->sc = sc;
402        cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
403        memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
404        cmd->io_request_phys_addr = io_req_base_phys + offset;
405	cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
406	cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
407        cmd->sense = sense_base + sense_offset;
408        cmd->sense_phys_addr = sense_base_phys + sense_offset;
409        /* if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { */
410        /*     return(FAIL); */
411        /* } */
412        TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
413    }
414
415    /* Initialize reply descriptor array to 0xFFFFFFFF */
416    reply_desc = sc->reply_desc_mem;
417    for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
418        reply_desc->Words = MRSAS_ULONG_MAX;
419    }
420
421    return 0;
422}
423
424/**
425 * mrsas_get_mfi_cmd:      Get a cmd from free command pool
426 * input:                  Adapter soft state
427 *
428 * This function removes an MFI command from the command list.
429 */
430static struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(void)
431{
432    struct mrsas_mfi_cmd *cmd = NULL;
433
434    /* mtx_lock(&sc->mfi_cmd_pool_lock); */
435    if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
436        cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
437        TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
438    }
439    /* mtx_unlock(&sc->mfi_cmd_pool_lock); */
440
441    return cmd;
442}
443
444/**
445 * mrsas_get_mpt_cmd:            Get a cmd from free command pool
446 * input:                        Adapter instance soft state
447 *
448 * This function removes an MPT command from the command free list and
449 * initializes it.
450 */
451struct mrsas_mpt_cmd* mrsas_get_mpt_cmd(void)
452{
453    struct mrsas_mpt_cmd *cmd = NULL;
454
455    /* mtx_lock(&sc->mpt_cmd_pool_lock); */
456    if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)){
457        cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
458        TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
459    }
460    memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
461    cmd->data = NULL;
462    cmd->length = 0;
463    cmd->flags = 0;
464    cmd->error_code = 0;
465    cmd->load_balance = 0;
466    cmd->ccb_ptr = NULL;
467    /* mtx_unlock(&sc->mpt_cmd_pool_lock); */
468
469    return cmd;
470}
471
472/**
473 * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
474 * input:                        Adapter soft state
475 *                               mfi cmd pointer
476 *
477 * The MPT command and the io_request are setup as a passthru command.
478 * The SGE chain address is set to frame_phys_addr of the MFI command.
479 */
480static u_int8_t
481mrsas_build_mptmfi_passthru(struct mrsas_mfi_cmd *mfi_cmd)
482{
483    MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
484    PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
485    struct mrsas_mpt_cmd *mpt_cmd;
486    struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
487
488    mpt_cmd = mrsas_get_mpt_cmd();
489    if (!mpt_cmd)
490        return(1);
491
492    /* Save the smid. To be used for returning the cmd */
493    mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
494
495    mpt_cmd->sync_cmd_idx = mfi_cmd->index;
496
497    /* DEBUG("Building sync cmd #%u, from %p, %p, %p, %p\n", mpt_cmd->sync_cmd_idx, */
498    /* 	  __builtin_return_address(0), */
499    /* 	  __builtin_return_address(1), */
500    /* 	  __builtin_return_address(2), */
501    /* 	  __builtin_return_address(3)); */
502
503    /*
504     * For cmds where the flag is set, store the flag and check
505     * on completion. For cmds with this flag, don't call
506     * mrsas_complete_cmd.
507     */
508
509    if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
510        mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
511
512    io_req = mpt_cmd->io_request;
513
514    if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
515		pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
516                sgl_ptr_end += sc->max_sge_in_main_msg - 1;
517                sgl_ptr_end->Flags = 0;
518    }
519
520    mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
521
522    io_req->Function    = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
523    io_req->SGLOffset0  = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
524    io_req->ChainOffset = sc->chain_offset_mfi_pthru;
525
526    mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
527
528    mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
529              MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
530
531    mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
532
533    return(0);
534}
535
536/**
537 * mrsas_get_request_desc:     Get request descriptor from array
538 * input:                      Adapter instance soft state
539 *                             SMID index
540 *
541 * This function returns a pointer to the request descriptor.
542 */
543MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(u_int16_t idx)
544{
545    u_int8_t *p;
546
547    if (idx >= sc->max_fw_cmds) {
548        DEBUG("Invalid SMID (0x%x)request for desc\n", idx);
549        return NULL;
550    }
551    p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * idx;
552
553    return (MRSAS_REQUEST_DESCRIPTOR_UNION *)p;
554}
555
556/**
557 * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
558 * input:                Adapter soft state
559 *                       mfi cmd to build
560 *
561 * This function is called by mrsas_issue_cmd() to build the MPT-MFI
562 * passthru command and prepares the MPT command to send to Firmware.
563 */
564static MRSAS_REQUEST_DESCRIPTOR_UNION *
565mrsas_build_mpt_cmd(struct mrsas_mfi_cmd *cmd)
566{
567    MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
568    u_int16_t idx;
569
570    if (mrsas_build_mptmfi_passthru(cmd)) {
571        DEBUG("Cannot build MPT-MFI passthru cmd.\n");
572        return NULL;
573    }
574
575    idx = cmd->cmd_id.context.smid;
576
577    req_desc = mrsas_get_request_desc(idx-1);
578    if(!req_desc)
579        return NULL;
580
581    req_desc->addr.Words = 0;
582    req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
583
584    req_desc->SCSIIO.SMID = idx;
585
586    return(req_desc);
587}
588
589/**
590 * mrsas_issue_dcmd -     Issues a MFI Pass thru cmd
591 * input:                 Adapter soft state
592 *                        mfi cmd pointer
593 *
594 * This function is called by mrsas_issued_blocked_cmd() and
595 * mrsas_issued_polled(), to build the MPT command and then fire the
596 * command to Firmware.
597 */
598static int
599mrsas_issue_dcmd(struct mrsas_mfi_cmd *cmd)
600{
601    MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
602
603    req_desc = mrsas_build_mpt_cmd(cmd);
604    if (!req_desc) {
605        DEBUG("Cannot build MPT cmd.\n");
606        return(1);
607    }
608
609    mrsas_fire_cmd(req_desc->addr.u.low, req_desc->addr.u.high);
610
611    return(0);
612}
613
614/**
615 * mrsas_issue_polled:        Issues a polling command
616 * inputs:                    Adapter soft state
617 *                            Command packet to be issued
618 *
619 * This function is for posting of internal commands to Firmware.  MFI
620 * requires the cmd_status to be set to 0xFF before posting.  The maximun
621 * wait time of the poll response timer is 180 seconds.
622 */
623static int mrsas_issue_polled(struct mrsas_mfi_cmd *cmd)
624{
625    struct mrsas_header *frame_hdr = &cmd->frame->hdr;
626    u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
627    int i, retcode = 0;
628
629    frame_hdr->cmd_status = 0xFF;
630    frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
631
632    /* Issue the frame using inbound queue port */
633    if (mrsas_issue_dcmd(cmd)) {
634        DEBUG("Cannot issue DCMD internal command.\n");
635        return(1);
636    }
637
638    DEBUG("Waiting for return of polled command...\n");
639
640    /*
641     * Poll response timer to wait for Firmware response.  While this
642     * timer with the DELAY call could block CPU, the time interval for
643     * this is only 1 millisecond.
644     */
645    if (frame_hdr->cmd_status == 0xFF) {
646        for (i=0; i < (max_wait * 1000); i++){
647            if (frame_hdr->cmd_status == 0xFF)
648                DELAY(1000);
649            else
650                break;
651        }
652    }
653
654    DEBUG("Polled command returned.\n");
655
656    if (frame_hdr->cmd_status != 0)
657    {
658        if (frame_hdr->cmd_status == 0xFF)
659            DEBUG("DCMD timed out after %d seconds.\n", max_wait);
660        else
661            DEBUG("DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
662        retcode = 1;
663    }
664
665    return(retcode);
666}
667
668/**
669 * mrsas_release_mfi_cmd: Return a cmd to free command pool
670 * input:                 Command packet for return to free cmd pool
671 *
672 * This function returns the MFI command to the command list.
673 */
674static void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
675{
676    struct megaraid_ctrl *s = cmd->sc;
677
678    /* mtx_lock(&sc->mfi_cmd_pool_lock); */
679    cmd->ccb_ptr = NULL;
680    cmd->cmd_id.frame_count = 0;
681    TAILQ_INSERT_TAIL(&(s->mrsas_mfi_cmd_list_head), cmd, next);
682    /* mtx_unlock(&sc->mfi_cmd_pool_lock); */
683
684    return;
685}
686
687/**
688 * MR_ValidateMapInfo:        Validate RAID map
689 * input:                     Adapter instance soft state
690 *
691 * This function checks and validates the loaded RAID map. It returns 0 if
692 * successful, and 1 otherwise.
693 */
694static u_int8_t MR_ValidateMapInfo(void)
695{
696    uint32_t total_map_sz;
697    MR_FW_RAID_MAP_ALL *map = sc->raidmap_mem[(sc->map_id & 1)];
698    MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
699    /* PLD_SPAN_INFO ldSpanInfo = (PLD_SPAN_INFO) &sc->log_to_span; */
700
701    total_map_sz = (sizeof(MR_FW_RAID_MAP) - sizeof(MR_LD_SPAN_MAP) +
702                     (sizeof(MR_LD_SPAN_MAP) * pFwRaidMap->ldCount));
703
704    if (pFwRaidMap->totalSize != total_map_sz) {
705        DEBUG("map size %x not matching ld count\n", total_map_sz);
706        DEBUG("span map= %x\n", (unsigned int)sizeof(MR_LD_SPAN_MAP));
707        DEBUG("pFwRaidMap->totalSize=%x\n", pFwRaidMap->totalSize);
708        return 1;
709    }
710
711    printf("Max logical drives = %u\n", pFwRaidMap->raid_desc.validationInfo.maxLd);
712    printf("Num Logical drives = %u\n", pFwRaidMap->ldCount);
713
714    /* if (sc->UnevenSpanSupport) { */
715    /*     mr_update_span_set(map, ldSpanInfo); */
716    /* } */
717
718    /* mrsas_update_load_balance_params(map, sc->load_balance_info); */
719
720    return 0;
721}
722
723/*
724 * Various RAID map access functions.  These functions access the various
725 * parts of the RAID map and returns the appropriate parameters.
726 */
727
728static MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
729{
730    return (&map->raidMap.ldSpanMap[ld].ldRaid);
731}
732
733static u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map)
734{
735    return (map->raidMap.ldSpanMap[ld].ldRaid.targetId);
736}
737
738/**
739 * mrsas_sync_map_info:        Get FW's ld_map structure
740 * input:                      Adapter instance soft state
741 *
742 * Issues an internal command (DCMD) to get the FW's controller PD
743 * list structure.
744 */
745static int mrsas_sync_map_info(void)
746{
747    int retcode = 0, i;
748    struct mrsas_mfi_cmd *cmd;
749    struct mrsas_dcmd_frame *dcmd;
750    uint32_t num_lds;
751    MR_LD_TARGET_SYNC *target_map = NULL;
752    MR_FW_RAID_MAP_ALL *map;
753    MR_LD_RAID  *raid;
754    MR_LD_TARGET_SYNC *ld_sync;
755    lpaddr_t map_phys_addr = 0;
756
757    cmd = mrsas_get_mfi_cmd();
758    assert(cmd != NULL);
759
760    map = sc->raidmap_mem[sc->map_id & 1];
761    num_lds = map->raidMap.ldCount;
762
763    dcmd = &cmd->frame->dcmd;
764    memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
765
766    target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
767    memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL));
768
769    map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
770
771    ld_sync = (MR_LD_TARGET_SYNC *)target_map;
772
773    for (i = 0; i < num_lds; i++, ld_sync++) {
774        raid = MR_LdRaidGet(i, map);
775        ld_sync->targetId = MR_GetLDTgtId(i, map);
776        ld_sync->seqNum = raid->seqNum;
777    }
778
779    dcmd->cmd = MFI_CMD_DCMD;
780    dcmd->cmd_status = 0xFF;
781    dcmd->sge_count = 1;
782    dcmd->flags = MFI_FRAME_DIR_WRITE;
783    dcmd->timeout = 0;
784    dcmd->pad_0 = 0;
785    dcmd->data_xfer_len = sc->map_sz;
786    dcmd->mbox.b[0] = num_lds;
787    dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
788    dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
789    dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
790    dcmd->sgl.sge32[0].length = sc->map_sz;
791
792    sc->map_update_cmd = cmd;
793    if (mrsas_issue_dcmd(cmd)) {
794        DEBUG("Fail to send sync map info command.\n");
795        return(1);
796    }
797    return(retcode);
798}
799
800/**
801 * mrsas_alloc_tmp_dcmd:       Allocates memory for temporary command
802 * input:                      Adapter soft state
803 *                             Temp command
804 *                             Size of alloction
805 *
806 * Allocates DMAable memory for a temporary internal command. The allocated
807 * memory is initialized to all zeros upon successful loading of the dma
808 * mapped memory.
809 */
810static int mrsas_alloc_tmp_dcmd(struct mrsas_tmp_dcmd *tcmd, int size)
811{
812    struct capref cap;
813    tcmd->tmp_dcmd_mem =
814      alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE, size, &cap);
815    if(use_vtd) {
816        tcmd->tmp_dcmd_phys_addr = (lpaddr_t)tcmd->tmp_dcmd_mem;
817    } else {
818        struct frame_identity id;
819        errval_t err = invoke_frame_identify(cap, &id);
820        assert(err_is_ok(err));
821        tcmd->tmp_dcmd_phys_addr = id.base;
822    }
823
824    memset(tcmd->tmp_dcmd_mem, 0, size);
825    return (0);
826}
827
828/**
829 * mrsas_alloc_ctlr_info_cmd:  Allocates memory for controller info command
830 * input:                      Adapter soft state
831 *
832 * Allocates DMAable memory for the controller info internal command.
833 */
834static int mrsas_alloc_ctlr_info_cmd(void)
835{
836    int ctlr_info_size;
837
838    /* Allocate get controller info command */
839    ctlr_info_size = sizeof(struct mrsas_ctrl_info);
840    struct capref cap;
841    sc->ctlr_info_mem =
842      alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE, ctlr_info_size, &cap);
843    if(use_vtd) {
844        sc->ctlr_info_phys_addr = (lpaddr_t)sc->ctlr_info_mem;
845    } else {
846        struct frame_identity id;
847        errval_t err = invoke_frame_identify(cap, &id);
848        assert(err_is_ok(err));
849        sc->ctlr_info_phys_addr = id.base;
850    }
851
852    memset(sc->ctlr_info_mem, 0, ctlr_info_size);
853    return (0);
854}
855
856/**
857 * mrsas_get_controller_info -        Returns FW's controller structure
858 * input:                             Adapter soft state
859 *                                    Controller information structure
860 *
861 * Issues an internal command (DCMD) to get the FW's controller structure.
862 * This information is mainly used to find out the maximum IO transfer per
863 * command supported by the FW.
864 */
865static int mrsas_get_ctrl_info(struct mrsas_ctrl_info *ctrl_info)
866{
867    int retcode = 0;
868    struct mrsas_mfi_cmd *cmd;
869    struct mrsas_dcmd_frame *dcmd;
870
871    cmd = mrsas_get_mfi_cmd();
872    assert(cmd != NULL);
873
874    dcmd = &cmd->frame->dcmd;
875
876    if (mrsas_alloc_ctlr_info_cmd() != SUCCESS) {
877        DEBUG("Cannot allocate get ctlr info cmd\n");
878        mrsas_release_mfi_cmd(cmd);
879	abort();
880    }
881    memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
882
883    dcmd->cmd = MFI_CMD_DCMD;
884    dcmd->cmd_status = 0xFF;
885    dcmd->sge_count = 1;
886    dcmd->flags = MFI_FRAME_DIR_READ;
887    dcmd->timeout = 0;
888    dcmd->pad_0 = 0;
889    dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
890    dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
891    dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
892    dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
893
894    if (!mrsas_issue_polled(cmd))
895        memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
896    else
897        retcode = 1;
898
899    /* mrsas_free_ctlr_info_cmd(sc); */
900    mrsas_release_mfi_cmd(cmd);
901    return(retcode);
902}
903
904/**
905 * mrsas_get_ld_list:           Returns FW's LD list structure
906 * input:                       Adapter soft state
907 *
908 * Issues an internal command (DCMD) to get the FW's controller PD
909 * list structure.  This information is mainly used to find out about
910 * supported by the FW.
911 */
912static int mrsas_get_ld_list(void)
913{
914    int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
915    struct mrsas_mfi_cmd *cmd;
916    struct mrsas_dcmd_frame *dcmd;
917    struct MR_LD_LIST *ld_list_mem;
918    lpaddr_t ld_list_phys_addr = 0;
919    struct mrsas_tmp_dcmd *tcmd;
920
921    cmd = mrsas_get_mfi_cmd();
922    assert(cmd != NULL);
923
924    dcmd = &cmd->frame->dcmd;
925
926    tcmd = malloc(sizeof(struct mrsas_tmp_dcmd));
927    ld_list_size = sizeof(struct MR_LD_LIST);
928    if (mrsas_alloc_tmp_dcmd(tcmd, ld_list_size) != SUCCESS) {
929        DEBUG("Cannot alloc dmamap for get LD list cmd\n");
930        mrsas_release_mfi_cmd(cmd);
931	abort();
932    }
933    else {
934        ld_list_mem = tcmd->tmp_dcmd_mem;
935        ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
936    }
937    memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
938
939    dcmd->cmd = MFI_CMD_DCMD;
940    dcmd->cmd_status = 0xFF;
941    dcmd->sge_count = 1;
942    dcmd->flags = MFI_FRAME_DIR_READ;
943    dcmd->timeout = 0;
944    dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
945    dcmd->opcode = MR_DCMD_LD_GET_LIST;
946    dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
947    dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
948    dcmd->pad_0  = 0;
949
950    if (!mrsas_issue_polled(cmd))
951        retcode = 0;
952    else
953        retcode = 1;
954
955    DEBUG("Logical drive list: retcode = %d, ldcount = %d\n",
956	  retcode, ld_list_mem->ldCount);
957
958     /* Get the instance LD list */
959     if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){
960        sc->CurLdCount = ld_list_mem->ldCount;
961        memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
962        for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
963            if (ld_list_mem->ldList[ld_index].state != 0) {
964                ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
965                sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
966		DEBUG("Logical drive %d, ID = %d, state = %u, size = %" PRIu64 "\n",
967		      ld_index, ids,
968		      ld_list_mem->ldList[ld_index].state,
969		      ld_list_mem->ldList[ld_index].size);
970            }
971        }
972    }
973
974    /* mrsas_free_tmp_dcmd(tcmd); */
975    mrsas_release_mfi_cmd(cmd);
976    free(tcmd);
977    return(retcode);
978}
979
980static void mrsas_enable_intr(void)
981{
982    u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
983
984    megaraid_outbound_intr_status_wr(&sc->d, ~0);
985    /* mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); */
986    megaraid_outbound_intr_status_rd(&sc->d);
987    /* status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); */
988
989    megaraid_outbound_intr_mask_wr(&sc->d, ~mask);
990    /* mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); */
991    megaraid_outbound_intr_mask_rd(&sc->d);
992    /* status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); */
993}
994
995#ifdef BARRELFISH
996static void pci_init_card(void *arg, struct device_mem *bar_info, int bar_count)
997#else
998static void pci_init_card(int bar_count)
999#endif
1000{
1001    errval_t err;
1002
1003    sc = malloc(sizeof(struct megaraid_ctrl));
1004    assert(sc != NULL);
1005    sc->device_id = pci_deviceid;
1006
1007    /* Intialize mutexes */
1008    /* mtx_init(&sc->sim_lock,  "mrsas_sim_lock", NULL, MTX_DEF); */
1009    /* mtx_init(&sc->pci_lock,  "mrsas_pci_lock", NULL, MTX_DEF); */
1010    /* mtx_init(&sc->io_lock,  "mrsas_io_lock", NULL, MTX_DEF); */
1011    /* mtx_init(&sc->aen_lock,  "mrsas_aen_lock", NULL, MTX_DEF); */
1012    /* mtx_init(&sc->ioctl_lock,  "mrsas_ioctl_lock", NULL, MTX_SPIN); */
1013    /* mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); */
1014    /* mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); */
1015    /* mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); */
1016
1017    /* Intialize linked list */
1018    TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
1019    TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
1020
1021    sc->fw_outstanding.val = 0;
1022    /* atomic_set(&sc->fw_outstanding,0); */
1023
1024    // Map first memory BAR for memory mapped register access
1025    assert(bar_count >= 2);
1026#ifdef BARRELFISH
1027    map_device(&bar_info[0]);
1028    DEBUG("BAR[0] mapped (v=%p p=%llx l=%llx)\n",
1029            bar_info[0].vaddr,
1030            (unsigned long long) bar_info[0].paddr,
1031            (unsigned long long) bar_info[0].bytes);
1032
1033    // Initialize Mackerel binding
1034    megaraid_initialize(&sc->d, (void *)bar_info[0].vaddr);
1035#else
1036    sc->uiofd = open("/dev/uio0", O_RDWR);
1037    assert(sc->uiofd > 0);
1038    sc->configfd = open("/sys/class/uio/uio0/device/config", O_RDWR | O_SYNC);
1039    assert(sc->configfd > 0);
1040    int resfd = open("/sys/bus/pci/devices/0000:09:00.0/resource1", O_RDWR | O_SYNC);
1041    assert(resfd > 0);
1042
1043    /* uint16_t *cfgptr = mmap(NULL, 256, PROT_READ | PROT_WRITE, MAP_SHARED, sc->configfd, 0); */
1044    /* assert(cfgptr != MAP_FAILED); */
1045
1046    /* uint16_t cfgptr[256]; */
1047    /* int ret = pread(sc->configfd, cfgptr, 256, 0); */
1048    /* assert(ret == 256); */
1049
1050    /* printf("vendor = %x\n", cfgptr[0]); */
1051    /* printf("device = %x\n", cfgptr[1]); */
1052    /* printf("cmd = %x\n", cfgptr[2]); */
1053
1054    void *vaddr = mmap(NULL, 65536, PROT_READ | PROT_WRITE, MAP_SHARED, resfd, 0);
1055    if(vaddr == (void *)-1) {
1056      perror("mmap");
1057    }
1058    assert(vaddr != (void *)-1);
1059    megaraid_initialize(&sc->d, (void *)vaddr);
1060#endif
1061
1062    // TODO: Transition device to ready state
1063    // See mrsas.c:mrsas_transition_to_ready()
1064#ifdef MEGARAID_DEBUG
1065    uint32_t status = megaraid_status_rd(&sc->d);
1066#endif
1067    megaraid_status_t state = megaraid_status_state_rdf(&sc->d);
1068    DEBUG("Status register = 0x%x, state = 0x%x\n", status, state);
1069    switch(state) {
1070    case megaraid_state_ready:
1071      break;
1072
1073    case megaraid_state_operational:
1074      /* DEBUG("Transitioning\n"); */
1075      mrsas_disable_intr();
1076      megaraid_doorbell_wr(&sc->d, MFI_RESET_FLAGS);
1077      for (int i=0; i < MRSAS_RESET_WAIT_TIME * 1000; i++) {
1078	if (megaraid_doorbell_rd(&sc->d) & 1)
1079	  DELAY(1000);
1080	else
1081	  break;
1082      }
1083      break;
1084
1085    default:
1086      assert(state == megaraid_state_ready);
1087      break;
1088    }
1089
1090    // -1 is needed (cf. mrsas.c:mrsas_init_adapter())
1091    sc->max_fw_cmds = megaraid_status_max_cmds_rdf(&sc->d) - 1;
1092    DEBUG("Max commands = %u\n", sc->max_fw_cmds);
1093
1094    /* Determine allocation size of command frames */
1095    sc->reply_q_depth = ((sc->max_fw_cmds *2 +1 +15)/16*16);
1096    sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * sc->max_fw_cmds;
1097    sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
1098    sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (sc->max_fw_cmds + 1));
1099    sc->chain_frames_alloc_sz = 1024 * sc->max_fw_cmds;
1100    sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1101        offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
1102
1103    sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
1104    sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
1105
1106    /* Used for pass thru MFI frame (DCMD) */
1107    sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
1108
1109    sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1110        sizeof(MPI2_SGE_IO_UNION))/16;
1111
1112    sc->last_reply_idx = 0;
1113
1114    u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1115              chain_frame_size, evt_detail_size;
1116    /*
1117     * Allocate for version buffer
1118     */
1119    struct capref cap;
1120    struct frame_identity id;
1121    verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(lpaddr_t));
1122    sc->verbuf_mem = alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE, verbuf_size, &cap);
1123    if(use_vtd) {
1124        sc->verbuf_phys_addr = (lpaddr_t)sc->verbuf_mem;
1125    } else {
1126        err = invoke_frame_identify(cap, &id);
1127        assert(err_is_ok(err));
1128        sc->verbuf_phys_addr = id.base;
1129    }
1130
1131    /*
1132     * Allocate IO Request Frames
1133     */
1134    io_req_size = sc->io_frames_alloc_sz;
1135    sc->io_request_mem = alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE, io_req_size, &cap);
1136    if(use_vtd) {
1137        sc->io_request_phys_addr = (lpaddr_t)sc->io_request_mem;
1138    } else {
1139        err = invoke_frame_identify(cap, &id);
1140        assert(err_is_ok(err));
1141        sc->io_request_phys_addr = id.base;
1142    }
1143
1144    /*
1145     * Allocate Chain Frames
1146     */
1147    chain_frame_size = sc->chain_frames_alloc_sz;
1148    sc->chain_frame_mem = alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE, chain_frame_size, &cap);
1149    if(use_vtd) {
1150        sc->chain_frame_phys_addr = (lpaddr_t)sc->chain_frame_mem;
1151    } else {
1152        err = invoke_frame_identify(cap, &id);
1153        assert(err_is_ok(err));
1154        sc->chain_frame_phys_addr = id.base;
1155    }
1156
1157    /*
1158     * Allocate Reply Descriptor Array
1159     */
1160    reply_desc_size = sc->reply_alloc_sz;
1161    sc->reply_desc_mem = alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE, reply_desc_size, &cap);
1162    if(use_vtd) {
1163        sc->reply_desc_phys_addr = (lpaddr_t)sc->reply_desc_mem;
1164    } else {
1165        err = invoke_frame_identify(cap, &id);
1166        assert(err_is_ok(err));
1167        sc->reply_desc_phys_addr = id.base;
1168    }
1169
1170    /*
1171     * Allocate Sense Buffer Array.  Keep in lower 4GB
1172     */
1173    sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1174    sc->sense_mem = alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE, sense_size, &cap);
1175    if(use_vtd) {
1176        sc->sense_phys_addr = (lpaddr_t)sc->sense_mem;
1177    } else {
1178        err = invoke_frame_identify(cap, &id);
1179        assert(err_is_ok(err));
1180        sc->sense_phys_addr = id.base;
1181    }
1182
1183    /*
1184     * Allocate for Event detail structure
1185     */
1186    evt_detail_size = sizeof(struct mrsas_evt_detail);
1187    sc->evt_detail_mem = alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE, evt_detail_size, &cap);
1188    if(use_vtd) {
1189        sc->evt_detail_phys_addr = (lpaddr_t)sc->evt_detail_mem;
1190    } else {
1191        err = invoke_frame_identify(cap, &id);
1192        assert(err_is_ok(err));
1193        sc->evt_detail_phys_addr = id.base;
1194    }
1195
1196    mrsas_alloc_mpt_cmds();
1197
1198    /* Allocate memory for the IOC INIT command */
1199    int ioc_init_size;
1200    ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
1201    sc->ioc_init_mem = alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE, ioc_init_size, &cap);
1202    memset(sc->ioc_init_mem, 0, ioc_init_size);
1203    if(use_vtd) {
1204        sc->ioc_init_phys_mem = (lpaddr_t)sc->ioc_init_mem;
1205    } else {
1206        err = invoke_frame_identify(cap, &id);
1207        assert(err_is_ok(err));
1208        sc->ioc_init_phys_mem = id.base;
1209    }
1210
1211    DEBUG("IOC Init frame at 0x%" PRIxLPADDR " in phys mem\n", sc->ioc_init_phys_mem);
1212
1213    // Issue IOC init command to firmware
1214    pMpi2IOCInitRequest_t   IOCInitMsg;
1215    IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
1216    IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
1217    IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1218    IOCInitMsg->MsgVersion = MPI2_VERSION;
1219    IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
1220    IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
1221    IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
1222    IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
1223    IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
1224
1225    volatile struct mrsas_init_frame *init_frame;
1226    init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
1227    init_frame->cmd = MFI_CMD_INIT;
1228    init_frame->cmd_status = 0xFF;
1229    init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1230
1231    if (sc->verbuf_mem != NULL) {
1232        snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
1233                MRSAS_VERSION);
1234        init_frame->driver_ver_lo = (lpaddr_t)sc->verbuf_phys_addr;
1235        init_frame->driver_ver_hi = 0;
1236    }
1237
1238    lpaddr_t phys_addr;
1239    phys_addr = (lpaddr_t)sc->ioc_init_phys_mem + 1024;
1240    init_frame->queue_info_new_phys_addr_lo = phys_addr;
1241    init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
1242
1243    MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
1244    req_desc.addr.Words = (lpaddr_t)sc->ioc_init_phys_mem;
1245    req_desc.MFAIo.RequestFlags =
1246        (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1247
1248    mrsas_disable_intr();
1249    DEBUG("Issuing IOC INIT command to FW. 0x%x low, 0x%x high\n",
1250          req_desc.addr.u.low, req_desc.addr.u.high);
1251    assert(init_frame->cmd_status == 0xFF);
1252
1253    sys_debug_flush_cache();
1254
1255    mrsas_fire_cmd(req_desc.addr.u.low, req_desc.addr.u.high);
1256
1257    DEBUG("Waiting for status\n");
1258
1259    /*
1260     * Poll response timer to wait for Firmware response.  While this
1261     * timer with the DELAY call could block CPU, the time interval for
1262     * this is only 1 millisecond.
1263     */
1264    /* while((volatile u_int8_t)init_frame->cmd_status == 0xFF); */
1265    u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
1266    if (init_frame->cmd_status == 0xFF) {
1267        for (int i=0; i < (max_wait * 1000); i++){
1268	  if ((volatile u_int8_t)init_frame->cmd_status == 0xFF)
1269                DELAY(1000);
1270            else
1271                break;
1272        }
1273    }
1274
1275    if (init_frame->cmd_status == 0)
1276        DEBUG("IOC INIT response received from FW.\n");
1277    else {
1278        DEBUG("IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
1279	abort();
1280    }
1281
1282    /* mrsas_free_ioc_cmd(sc); */
1283
1284    mrsas_alloc_mfi_cmds();
1285
1286    // Allocate DMA memory for RAID maps
1287    sc->map_sz = sizeof(MR_FW_RAID_MAP) +
1288                (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
1289    for(int i = 0; i < 2; i++) {
1290        sc->raidmap_mem[i] = alloc_map_frame(VREGION_FLAGS_READ_WRITE_NOCACHE,
1291                                             sc->map_sz, &cap);
1292        if(use_vtd) {
1293            sc->raidmap_phys_addr[i] = (lpaddr_t)sc->raidmap_mem[i];
1294        } else {
1295            err = invoke_frame_identify(cap, &id);
1296            assert(err_is_ok(err));
1297            sc->raidmap_phys_addr[i] = id.base;
1298        }
1299    }
1300
1301    /* DEBUG("Getting RAID map\n"); */
1302
1303    /* if (!mrsas_get_map_info()) */
1304    /*     mrsas_sync_map_info(); */
1305
1306    /* DEBUG("Getting physical drive list\n"); */
1307
1308    /* memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); */
1309    /* mrsas_get_pd_list(); */
1310
1311    memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
1312    mrsas_get_ld_list();
1313
1314    struct mrsas_ctrl_info *ctrl_info;
1315    ctrl_info = malloc(sizeof(struct mrsas_ctrl_info));
1316
1317    /*
1318     * Compute the max allowed sectors per IO: The controller info has two
1319     * limits on max sectors. Driver should use the minimum of these two.
1320     *
1321     * 1 << stripe_sz_ops.min = max sectors per strip
1322     *
1323     * Note that older firmwares ( < FW ver 30) didn't report information
1324     * to calculate max_sectors_1. So the number ended up as zero always.
1325     */
1326    u_int32_t max_sectors_1;
1327    u_int32_t max_sectors_2;
1328    u_int32_t tmp_sectors;
1329    tmp_sectors = 0;
1330    if (ctrl_info && !mrsas_get_ctrl_info(ctrl_info)) {
1331      DEBUG("Ctrl name '%s'\n",
1332	    ctrl_info->image_component[0].name);
1333      DEBUG("Product name '%s'\n",
1334	    ctrl_info->product_name);
1335      DEBUG("NVRAM size %u MB\n",
1336	    ctrl_info->nvram_size);
1337      DEBUG("Memory size %u MB\n",
1338	    ctrl_info->memory_size);
1339      DEBUG("Flash size %u MB\n",
1340	    ctrl_info->flash_size);
1341      /* DEBUG("port count = %u\n", ctrl_info->host_interface.port_count); */
1342      /* DEBUG("port count = %u\n", ctrl_info->device_interface.port_count); */
1343      /* for(int i = 0; i < 8; i++) { */
1344      /* 	DEBUG("image name %d = '%s'\n", i, ctrl_info->image_component[i].name); */
1345      /* } */
1346      /* DEBUG("UART present = %u\n", ctrl_info->hw_present.uart); */
1347      /* DEBUG("PD present = %u\n", ctrl_info->pd_present_count); */
1348      /* DEBUG("Package version = '%s'\n", ctrl_info->package_version); */
1349
1350        max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1351                    ctrl_info->max_strips_per_io;
1352        max_sectors_2 = ctrl_info->max_request_size;
1353        tmp_sectors = MIN(max_sectors_1 , max_sectors_2);
1354        sc->disableOnlineCtrlReset =
1355            ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
1356        sc->UnevenSpanSupport =
1357            ctrl_info->adapterOperations2.supportUnevenSpans;
1358        if(sc->UnevenSpanSupport) {
1359	  DEBUG("FW supports: UnevenSpanSupport=%x\n",
1360                sc->UnevenSpanSupport);
1361            if (MR_ValidateMapInfo())
1362           	    sc->fast_path_io = 1;
1363            else
1364                sc->fast_path_io = 0;
1365
1366        }
1367    }
1368    sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
1369
1370    if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
1371        sc->max_sectors_per_req = tmp_sectors;
1372
1373    if (ctrl_info)
1374        free(ctrl_info);
1375
1376    mrsas_enable_intr();
1377
1378    mrsas_complete_cmd();
1379
1380    /* mrsas_start_aen(); */
1381}
1382
1383int megaraid_driver_init(int argc, const char **argv)
1384{
1385#ifdef BARRELFISH
1386    errval_t r;
1387
1388    r = pci_client_connect();
1389    assert(err_is_ok(r));
1390    DEBUG("connected to pci\n");
1391
1392    r = pci_register_driver_irq(pci_init_card, NULL, PCI_CLASS_MASS_STORAGE,
1393                                PCI_SUB_RAID, PCI_DONT_CARE,
1394                                PCI_VENDOR_LSI, pci_deviceid,
1395                                pci_bus, pci_device, pci_function,
1396                                interrupt_handler, NULL);
1397    assert(err_is_ok(r));
1398
1399    while(sc == NULL) {
1400        event_dispatch(get_default_waitset());
1401    }
1402
1403    errval_t err = skb_client_connect();
1404    assert(err_is_ok(err));
1405
1406    err = skb_execute_query("vtd_enabled(0,C), write(vtd_coherency(C)).");
1407    if (err_is_ok(err)) {
1408        use_vtd = true;
1409        /* for(int i = 0; i < *argc; i++) {  */
1410	/*     if(!strncmp((*argv)[i], "use_vtd=", strlen("use_vtd=") - 1)) { */
1411	/*       use_vtd = !!atol((*argv)[i] + strlen("use_vtd=")); */
1412        /*         break; */
1413        /*     } */
1414        /* } */
1415	err = skb_read_output("vtd_coherency(%d)", &vtd_coherency);
1416	assert(err_is_ok(err));
1417    }
1418
1419    if (use_vtd) {
1420        err = connect_to_acpi();
1421	assert(err_is_ok(err));
1422	err = vtd_create_domain(cap_vroot);
1423	assert(err_is_ok(err));
1424        printf("megaraid: Using VT-d on bus 9\n");
1425	err = vtd_domain_add_device(0, 9, 0, 0, cap_vroot);
1426	assert(err_is_ok(err));
1427    }
1428#else
1429    if(argc < 2) {
1430      printf("Usage: %s PADDR\n", argv[0]);
1431      exit(1);
1432    }
1433
1434    int fd = open("/dev/mem", O_RDWR | O_SYNC);
1435    assert(fd > 0);
1436
1437    uintptr_t paddr = strtoul(argv[1], NULL, 0);
1438
1439    uint32_t *ptr = mmap(NULL, MEM_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, paddr);
1440    if(ptr == MAP_FAILED) {
1441      perror("mmap");
1442      exit(1);
1443    }
1444    assert(ptr != MAP_FAILED);
1445    /* printf("ptr = %x\n", *ptr); */
1446    pmem_start = pmem_base = (uint8_t *)ptr;
1447    paddr_start = paddr_base = paddr;
1448    paddr_end = paddr + MEM_SIZE;
1449
1450    pci_init_card(3);
1451#endif
1452
1453    return 0;
1454}
1455
1456/**
1457 * mrsas_release_mpt_cmd:      Return a cmd to free command pool
1458 * input:                      Command packet for return to free command pool
1459 *
1460 * This function returns an MPT command to the free command list.
1461 */
1462static void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd)
1463{
1464    /* struct mrsas_softc *sc = cmd->sc; */
1465
1466    /* mtx_lock(&sc->mpt_cmd_pool_lock); */
1467    cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
1468    TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
1469    /* mtx_unlock(&sc->mpt_cmd_pool_lock); */
1470
1471    return;
1472}
1473
1474/**
1475 * mrsas_complete_abort:      Completes aborting a command
1476 * input:                     Adapter soft state
1477 *                            Cmd that was issued to abort another cmd
1478 *
1479 * The mrsas_issue_blocked_abort_cmd() function waits for the command status
1480 * to change after sending the command.  This function is called from
1481 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
1482 */
1483static void mrsas_complete_abort(struct mrsas_mfi_cmd *cmd)
1484{
1485    if (cmd->sync_cmd) {
1486        cmd->sync_cmd = 0;
1487        cmd->cmd_status = 0;
1488        sc->chan = (void*)&cmd;
1489        /* wakeup_one((void *)&sc->chan); */
1490	assert(!"NYI");
1491    }
1492    return;
1493}
1494
1495/**
1496 * mrsas_complete_aen:        	Completes AEN command
1497 * input:                     	Adapter soft state
1498 *                            	Cmd that was issued to abort another cmd
1499 *
1500 * 								This function will be called from ISR and will continue
1501 * 								event processing from thread context by enqueuing task
1502 * 								in ev_tq (callback function "mrsas_aen_handler").
1503 */
1504static void mrsas_complete_aen(struct mrsas_mfi_cmd *cmd)
1505{
1506	/*
1507	* Don't signal app if it is just an aborted previously registered aen
1508	*/
1509	if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
1510		/* TO DO (?) */
1511	}
1512	else
1513		cmd->abort_aen = 0;
1514
1515	sc->aen_cmd = NULL;
1516	mrsas_release_mfi_cmd(cmd);
1517
1518	/* if (!sc->remove_in_progress) */
1519	/* 	taskqueue_enqueue(sc->ev_tq, &sc->ev_task); */
1520
1521	return;
1522}
1523
1524/**
1525 * mrsas_wakeup -         Completes an internal command
1526 * input:                 Adapter soft state
1527 *                        Command to be completed
1528 *
1529 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
1530 * a wait timer is started.  This function is called from
1531 * mrsas_complete_mptmfi_passthru() as it completes the command,
1532 * to wake up from the command wait.
1533 */
1534static void mrsas_wakeup(struct mrsas_mfi_cmd *cmd)
1535{
1536    cmd->cmd_status = cmd->frame->io.cmd_status;
1537
1538    if (cmd->cmd_status == ECONNREFUSED)
1539        cmd->cmd_status = 0;
1540
1541    /* For debug only ... */
1542    //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
1543
1544    sc->chan = (void*)&cmd;
1545    /* wakeup_one((void *)&sc->chan); */
1546    return;
1547}
1548
1549/**
1550 * mrsas_complete_mptmfi_passthru - Completes a command
1551 * input:                           sc: Adapter soft state
1552 *                                  cmd: Command to be completed
1553 *                                  status: cmd completion status
1554 *
1555 * This function is called from mrsas_complete_cmd() after an interrupt
1556 * is received from Firmware, and io_request->Function is
1557 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
1558 */
1559static void
1560mrsas_complete_mptmfi_passthru(struct mrsas_mfi_cmd *cmd,
1561			       u_int8_t status)
1562{
1563    struct mrsas_header *hdr = &cmd->frame->hdr;
1564    u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
1565
1566    /* Reset the retry counter for future re-tries */
1567    cmd->retry_for_fw_reset = 0;
1568
1569    if (cmd->ccb_ptr)
1570        cmd->ccb_ptr = NULL;
1571
1572    switch (hdr->cmd) {
1573        case MFI_CMD_INVALID:
1574	  DEBUG("MFI_CMD_INVALID command.\n");
1575            break;
1576        case MFI_CMD_PD_SCSI_IO:
1577        case MFI_CMD_LD_SCSI_IO:
1578            /*
1579             * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
1580             * issued either through an IO path or an IOCTL path. If it
1581             * was via IOCTL, we will send it to internal completion.
1582             */
1583            if (cmd->sync_cmd) {
1584                cmd->sync_cmd = 0;
1585                /* mrsas_wakeup(sc, cmd); */
1586		assert(!"NYI");
1587                break;
1588            }
1589        case MFI_CMD_SMP:
1590        case MFI_CMD_STP:
1591        case MFI_CMD_DCMD:
1592            /* Check for LD map update */
1593            if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
1594                (cmd->frame->dcmd.mbox.b[1] == 1)) {
1595                sc->fast_path_io = 0;
1596		/* mtx_lock(&sc->raidmap_lock); */
1597                if (cmd_status != 0) {
1598                    if (cmd_status != MFI_STAT_NOT_FOUND)
1599		      DEBUG("map sync failed, status=%x\n",cmd_status);
1600                    else {
1601                        mrsas_release_mfi_cmd(cmd);
1602		        /* mtx_unlock(&sc->raidmap_lock); */
1603                        break;
1604                    }
1605                }
1606                else
1607                    sc->map_id++;
1608                mrsas_release_mfi_cmd(cmd);
1609                if (MR_ValidateMapInfo())
1610                    sc->fast_path_io = 0;
1611                else
1612                    sc->fast_path_io = 1;
1613                mrsas_sync_map_info();
1614                /* mtx_unlock(&sc->raidmap_lock); */
1615                break;
1616            }
1617            /* See if got an event notification */
1618            if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
1619                mrsas_complete_aen(cmd);
1620            else
1621                mrsas_wakeup(cmd);
1622            break;
1623        case MFI_CMD_ABORT:
1624            /* Command issued to abort another cmd return */
1625            mrsas_complete_abort(cmd);
1626            break;
1627        default:
1628	  DEBUG("Unknown command completed! [0x%X]\n", hdr->cmd);
1629            break;
1630    }
1631}
1632
1633bool poll_mode = false;
1634
1635/*
1636 * mrsas_complete_cmd:        Process reply request
1637 * input:                     Adapter instance soft state
1638 *
1639 * This function is called from mrsas_isr() to process reply request and
1640 * clear response interrupt. Processing of the reply request entails
1641 * walking through the reply descriptor array for the command request
1642 * pended from Firmware.  We look at the Function field to determine
1643 * the command type and perform the appropriate action.  Before we
1644 * return, we clear the response interrupt.
1645 */
1646int mrsas_complete_cmd(void)
1647{
1648    Mpi2ReplyDescriptorsUnion_t *desc;
1649    MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1650    MRSAS_RAID_SCSI_IO_REQUEST  *scsi_io_req;
1651    struct mrsas_mpt_cmd *cmd_mpt;
1652    struct mrsas_mfi_cmd *cmd_mfi;
1653    u_int8_t arm, reply_descript_type;
1654    u_int16_t smid, num_completed;
1655    u_int8_t status, extStatus;
1656    union desc_value desc_val;
1657    PLD_LOAD_BALANCE_INFO lbinfo;
1658    u_int32_t device_id;
1659    int threshold_reply_count = 0;
1660
1661    desc = sc->reply_desc_mem;
1662    desc += sc->last_reply_idx;
1663
1664    reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1665
1666    desc_val.word = desc->Words;
1667    num_completed = 0;
1668
1669    reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1670
1671    /* Find our reply descriptor for the command and process */
1672    while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
1673    {
1674        smid = reply_desc->SMID;
1675        cmd_mpt = sc->mpt_cmd_list[smid -1];
1676        scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
1677
1678        status = scsi_io_req->RaidContext.status;
1679        extStatus = scsi_io_req->RaidContext.exStatus;
1680
1681        switch (scsi_io_req->Function)
1682        {
1683            case MPI2_FUNCTION_SCSI_IO_REQUEST :  /*Fast Path IO.*/
1684	      device_id = TARGET_DEVICE_ID;
1685                lbinfo = &sc->load_balance_info[device_id];
1686                if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1687                    arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
1688                    /* atomic_dec(&lbinfo->scsi_pending_cmds[arm]); */
1689		    lbinfo->scsi_pending_cmds[arm].val--;
1690                    cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1691                }
1692                //Fall thru and complete IO
1693            case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1694                /* mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus); */
1695	      if(status != MFI_STAT_OK) {
1696		DEBUG("Command SMID %u failed with status 0x%x, extStatus 0x%x\n",
1697		      smid, status, extStatus);
1698	      }
1699	      assert(status == MFI_STAT_OK);
1700	      cmd_mpt->ccb_ptr = NULL;
1701	      mrsas_release_mpt_cmd(cmd_mpt);
1702                /* mrsas_cmd_done(sc, cmd_mpt); */
1703                scsi_io_req->RaidContext.status = 0;
1704                scsi_io_req->RaidContext.exStatus = 0;
1705                /* atomic_dec(&sc->fw_outstanding); */
1706		sc->fw_outstanding.val--;
1707                break;
1708            case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
1709	      DEBUG("sync_cmd_idx = %x\n", cmd_mpt->sync_cmd_idx);
1710                cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1711                mrsas_complete_mptmfi_passthru(cmd_mfi, status);
1712                cmd_mpt->flags = 0;
1713                mrsas_release_mpt_cmd(cmd_mpt);
1714                break;
1715        }
1716
1717        sc->last_reply_idx++;
1718        if (sc->last_reply_idx >= sc->reply_q_depth)
1719            sc->last_reply_idx = 0;
1720
1721        desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
1722        num_completed++;
1723        threshold_reply_count++;
1724
1725        /* Get the next reply descriptor */
1726        if (!sc->last_reply_idx)
1727            desc = sc->reply_desc_mem;
1728	else
1729            desc++;
1730
1731        reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1732        desc_val.word = desc->Words;
1733
1734        reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1735
1736        if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1737            break;
1738
1739        /*
1740         * Write to reply post index after completing threshold reply count
1741         * and still there are more replies in reply queue pending to be
1742         * completed.
1743         */
1744        if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1745	  megaraid_reply_post_host_index_wr(&sc->d, sc->last_reply_idx);
1746	  threshold_reply_count = 0;
1747        }
1748
1749        if(poll_mode) {
1750            break;
1751        }
1752    }
1753
1754    /* No match, just return */
1755    if (num_completed == 0)
1756        return (DONE);
1757
1758    /* Clear response interrupt */
1759    megaraid_reply_post_host_index_wr(&sc->d, sc->last_reply_idx);
1760
1761    return(0);
1762}
1763